source: libcfa/src/concurrency/kernel.cfa@ bffcd66

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since bffcd66 was 09d4b22, checked in by Peter A. Buhr <pabuhr@…>, 6 years ago

move stack for preemptive pthread from TLS to static variable

  • Property mode set to 100644
File size: 27.9 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[09d4b22]12// Last Modified On : Thu Dec 5 16:25:52 2019
13// Update Count : 52
[8118303]14//
15
[2026bb6]16#define __cforall_thread__
17
[8118303]18//C Includes
[c84e80a]19#include <stddef.h>
[214e8da]20#include <errno.h>
[ea8b2f7]21#include <string.h>
[eb2e723]22extern "C" {
[9d944b2]23#include <stdio.h>
[8fcbb4c]24#include <fenv.h>
[eb2e723]25#include <sys/resource.h>
[58b6d1b]26#include <signal.h>
[9d944b2]27#include <unistd.h>
[27f5f71]28#include <limits.h> // PTHREAD_STACK_MIN
[1a3040c]29#include <sys/mman.h> // mprotect
[eb2e723]30}
[8118303]31
32//CFA Includes
[58b6d1b]33#include "time.hfa"
[73abe95]34#include "kernel_private.hfa"
35#include "preemption.hfa"
36#include "startup.hfa"
[8118303]37
38//Private includes
39#define __CFA_INVOKE_PRIVATE__
40#include "invoke.h"
41
[deca0f5]42//-----------------------------------------------------------------------------
43// Some assembly required
[1805b1b]44#if defined( __i386 )
[deca0f5]45 #define CtxGet( ctx ) \
46 __asm__ volatile ( \
47 "movl %%esp,%0\n"\
48 "movl %%ebp,%1\n"\
49 : "=rm" (ctx.SP),\
50 "=rm" (ctx.FP) \
51 )
52
53 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
54 // fcw : X87 FPU control word (preserved across function calls)
55 #define __x87_store \
56 uint32_t __mxcr; \
57 uint16_t __fcw; \
58 __asm__ volatile ( \
59 "stmxcsr %0\n" \
60 "fnstcw %1\n" \
61 : "=m" (__mxcr),\
62 "=m" (__fcw) \
63 )
64
65 #define __x87_load \
66 __asm__ volatile ( \
67 "fldcw %1\n" \
68 "ldmxcsr %0\n" \
69 ::"m" (__mxcr),\
70 "m" (__fcw) \
71 )
72
73#elif defined( __x86_64 )
74 #define CtxGet( ctx ) \
75 __asm__ volatile ( \
76 "movq %%rsp,%0\n"\
77 "movq %%rbp,%1\n"\
78 : "=rm" (ctx.SP),\
79 "=rm" (ctx.FP) \
80 )
81
82 #define __x87_store \
83 uint32_t __mxcr; \
84 uint16_t __fcw; \
85 __asm__ volatile ( \
86 "stmxcsr %0\n" \
87 "fnstcw %1\n" \
88 : "=m" (__mxcr),\
89 "=m" (__fcw) \
90 )
91
92 #define __x87_load \
93 __asm__ volatile ( \
94 "fldcw %1\n" \
95 "ldmxcsr %0\n" \
96 :: "m" (__mxcr),\
97 "m" (__fcw) \
98 )
99
100
101#elif defined( __ARM_ARCH )
102#define CtxGet( ctx ) __asm__ ( \
103 "mov %0,%%sp\n" \
104 "mov %1,%%r11\n" \
105 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
106#else
107 #error unknown hardware architecture
108#endif
109
110//-----------------------------------------------------------------------------
[2ac095d]111//Start and stop routine for the kernel, declared first to make sure they run first
[c29c342]112static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
113static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
[2ac095d]114
[8def349]115//-----------------------------------------------------------------------------
116// Kernel storage
[b2f6113]117KERNEL_STORAGE(cluster, mainCluster);
118KERNEL_STORAGE(processor, mainProcessor);
119KERNEL_STORAGE(thread_desc, mainThread);
120KERNEL_STORAGE(__stack_t, mainThreadCtx);
[8def349]121
[de6319f]122cluster * mainCluster;
123processor * mainProcessor;
[348006f]124thread_desc * mainThread;
[eb2e723]125
[ea8b2f7]126extern "C" {
[1805b1b]127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
[ea8b2f7]128}
[de94a60]129
[b2f6113]130size_t __page_size = 0;
131
[bd98b58]132//-----------------------------------------------------------------------------
133// Global state
[afc2427]134thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
[1805b1b]135 NULL, // cannot use 0p
[b10affd]136 NULL,
[09d4b22]137 { 1, false, false },
[21184e3]138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work
[b10affd]139};
[c84e80a]140
141//-----------------------------------------------------------------------------
[de6319f]142// Struct to steal stack
[8def349]143struct current_stack_info_t {
[1805b1b]144 __stack_t * storage; // pointer to stack object
145 void * base; // base of stack
146 void * limit; // stack grows towards stack limit
147 void * context; // address of cfa_context_t
[c84e80a]148};
149
[242a902]150void ?{}( current_stack_info_t & this ) {
[b2f6113]151 __stack_context_t ctx;
152 CtxGet( ctx );
153 this.base = ctx.FP;
[8def349]154
155 rlimit r;
[132fad4]156 getrlimit( RLIMIT_STACK, &r);
[69a61d2]157 size_t size = r.rlim_cur;
[8def349]158
[69a61d2]159 this.limit = (void *)(((intptr_t)this.base) - size);
[9236060]160 this.context = &storage_mainThreadCtx;
[8def349]161}
162
[de6319f]163//-----------------------------------------------------------------------------
164// Main thread construction
[8def349]165
[65deb18]166void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
[b2f6113]167 stack.storage = info->storage;
168 with(*stack.storage) {
169 limit = info->limit;
170 base = info->base;
171 }
[ffe2fad]172 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
173 *istorage |= 0x1;
[65deb18]174 name = "Main Thread";
175 state = Start;
[1805b1b]176 starter = 0p;
177 last = 0p;
178 cancellation = 0p;
[8def349]179}
180
[65deb18]181void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
[e8e457e]182 state = Start;
[65deb18]183 self_cor{ info };
[82c948c]184 curr_cor = &self_cor;
[de6319f]185 curr_cluster = mainCluster;
[82c948c]186 self_mon.owner = &this;
187 self_mon.recursion = 1;
188 self_mon_p = &self_mon;
[1805b1b]189 next = 0p;
[de94a60]190
[1805b1b]191 node.next = 0p;
192 node.prev = 0p;
[a1a17a74]193 doregister(curr_cluster, this);
[82c948c]194
195 monitors{ &self_mon_p, 1, (fptr_t)0 };
[8def349]196}
[c84e80a]197
[8def349]198//-----------------------------------------------------------------------------
199// Processor coroutine
[de6319f]200void ?{}(processorCtx_t & this) {
[39fea2f]201
[8def349]202}
203
[39fea2f]204// Construct the processor context of non-main processors
[c29c342]205static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
[242a902]206 (this.__cor){ info };
207 this.proc = proc;
[8def349]208}
209
[c29c342]210static void start(processor * this);
[de6319f]211void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
212 this.name = name;
213 this.cltr = &cltr;
[c40e7c5]214 terminated{ 0 };
215 do_terminate = false;
[1805b1b]216 preemption_alarm = 0p;
[c40e7c5]217 pending_preemption = false;
[094476d]218 runner.proc = &this;
[8def349]219
[85b1deb]220 idleLock{};
[6b4cdd3]221
[242a902]222 start( &this );
[c84e80a]223}
224
[65deb18]225void ^?{}(processor & this) with( this ){
[ea8b2f7]226 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
[36982fc]227 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
[85b1deb]228
229 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
230 wake( &this );
231
[65deb18]232 P( terminated );
[14a61b5]233 verify( kernelTLS.this_processor != &this);
[8def349]234 }
[6b4cdd3]235
[1805b1b]236 pthread_join( kernel_thread, 0p );
[27f5f71]237 free( this.stack );
[8def349]238}
239
[de6319f]240void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
241 this.name = name;
242 this.preemption_rate = preemption_rate;
[65deb18]243 ready_queue{};
244 ready_queue_lock{};
[de94a60]245
246 procs{ __get };
247 idles{ __get };
[a1a17a74]248 threads{ __get };
[de94a60]249
250 doregister(this);
[8def349]251}
252
[242a902]253void ^?{}(cluster & this) {
[de94a60]254 unregister(this);
[c84e80a]255}
256
[75f3522]257//=============================================================================================
258// Kernel Scheduling logic
259//=============================================================================================
[c29c342]260static void runThread(processor * this, thread_desc * dst);
261static void finishRunning(processor * this);
262static void halt(processor * this);
263
[8fcbb4c]264//Main of the processor contexts
[83a071f9]265void main(processorCtx_t & runner) {
[21184e3]266 // Because of a bug, we couldn't initialized the seed on construction
267 // Do it here
[57c764c4]268 kernelTLS.rand_seed ^= rdtscl();
[21184e3]269
[83a071f9]270 processor * this = runner.proc;
[094476d]271 verify(this);
[c81ebf9]272
[36982fc]273 __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
[8118303]274
[de94a60]275 doregister(this->cltr, this);
276
[75f3522]277 {
[c81ebf9]278 // Setup preemption data
279 preemption_scope scope = { this };
280
[36982fc]281 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
[8118303]282
[1805b1b]283 thread_desc * readyThread = 0p;
[1a3040c]284 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
[c81ebf9]285 readyThread = nextThread( this->cltr );
[75f3522]286
[1a3040c]287 if(readyThread) {
[14a61b5]288 verify( ! kernelTLS.preemption_state.enabled );
[4e6fb8e]289
[c81ebf9]290 runThread(this, readyThread);
[75f3522]291
[14a61b5]292 verify( ! kernelTLS.preemption_state.enabled );
[4e6fb8e]293
[c81ebf9]294 //Some actions need to be taken from the kernel
295 finishRunning(this);
296
297 spin_count = 0;
[1a3040c]298 } else {
[ea8b2f7]299 // spin(this, &spin_count);
300 halt(this);
[c81ebf9]301 }
302 }
303
[36982fc]304 __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
[c84e80a]305 }
[8118303]306
[de94a60]307 unregister(this->cltr, this);
308
[4cedd9f]309 V( this->terminated );
[bdeba0b]310
[36982fc]311 __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
[c84e80a]312}
313
[5c1a531]314static int * __volatile_errno() __attribute__((noinline));
315static int * __volatile_errno() { asm(""); return &errno; }
316
[14a61b5]317// KERNEL ONLY
[1c273d0]318// runThread runs a thread by context switching
319// from the processor coroutine to the target thread
[e8e457e]320static void runThread(processor * this, thread_desc * thrd_dst) {
[094476d]321 coroutine_desc * proc_cor = get_coroutine(this->runner);
[1c273d0]322
[14a61b5]323 // Reset the terminating actions here
[db6f06a]324 this->finish.action_code = No_Action;
[8fcbb4c]325
[14a61b5]326 // Update global state
[e8e457e]327 kernelTLS.this_thread = thrd_dst;
328
329 // set state of processor coroutine to inactive and the thread to active
330 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
331 thrd_dst->state = Active;
332
333 // set context switch to the thread that the processor is executing
334 verify( thrd_dst->context.SP );
335 CtxSwitch( &proc_cor->context, &thrd_dst->context );
336 // when CtxSwitch returns we are back in the processor coroutine
[75f3522]337
[e8e457e]338 // set state of processor coroutine to active and the thread to inactive
339 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
340 proc_cor->state = Active;
[75f3522]341}
342
[14a61b5]343// KERNEL_ONLY
[c29c342]344static void returnToKernel() {
[14a61b5]345 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
[e8e457e]346 thread_desc * thrd_src = kernelTLS.this_thread;
347
348 // set state of current coroutine to inactive
349 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
350 proc_cor->state = Active;
[5c1a531]351 int local_errno = *__volatile_errno();
[deca0f5]352 #if defined( __i386 ) || defined( __x86_64 )
353 __x87_store;
354 #endif
[e8e457e]355
356 // set new coroutine that the processor is executing
357 // and context switch to it
358 verify( proc_cor->context.SP );
359 CtxSwitch( &thrd_src->context, &proc_cor->context );
360
361 // set state of new coroutine to active
362 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
363 thrd_src->state = Active;
[deca0f5]364
365 #if defined( __i386 ) || defined( __x86_64 )
366 __x87_load;
367 #endif
[5c1a531]368 *__volatile_errno() = local_errno;
[82c948c]369}
370
[14a61b5]371// KERNEL_ONLY
[1c273d0]372// Once a thread has finished running, some of
[75f3522]373// its final actions must be executed from the kernel
[c29c342]374static void finishRunning(processor * this) with( this->finish ) {
[09800e9]375 verify( ! kernelTLS.preemption_state.enabled );
376 choose( action_code ) {
377 case No_Action:
378 break;
379 case Release:
[65deb18]380 unlock( *lock );
[09800e9]381 case Schedule:
[65deb18]382 ScheduleThread( thrd );
[09800e9]383 case Release_Schedule:
[65deb18]384 unlock( *lock );
385 ScheduleThread( thrd );
[09800e9]386 case Release_Multi:
[65deb18]387 for(int i = 0; i < lock_count; i++) {
388 unlock( *locks[i] );
[0c78741]389 }
[09800e9]390 case Release_Multi_Schedule:
[65deb18]391 for(int i = 0; i < lock_count; i++) {
392 unlock( *locks[i] );
[0c78741]393 }
[65deb18]394 for(int i = 0; i < thrd_count; i++) {
395 ScheduleThread( thrds[i] );
[0c78741]396 }
[09800e9]397 case Callback:
398 callback();
399 default:
400 abort("KERNEL ERROR: Unexpected action to run after thread");
[8fcbb4c]401 }
[c84e80a]402}
403
[14a61b5]404// KERNEL_ONLY
[0c92c9f]405// Context invoker for processors
406// This is the entry point for processors (kernel threads)
407// It effectively constructs a coroutine by stealing the pthread stack
[c29c342]408static void * CtxInvokeProcessor(void * arg) {
[8def349]409 processor * proc = (processor *) arg;
[14a61b5]410 kernelTLS.this_processor = proc;
[1805b1b]411 kernelTLS.this_thread = 0p;
[14a61b5]412 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
[8def349]413 // SKULLDUGGERY: We want to create a context for the processor coroutine
414 // which is needed for the 2-step context switch. However, there is no reason
[1c273d0]415 // to waste the perfectly valid stack create by pthread.
[8def349]416 current_stack_info_t info;
[b2f6113]417 __stack_t ctx;
418 info.storage = &ctx;
[094476d]419 (proc->runner){ proc, &info };
[8def349]420
[b2f6113]421 __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
[8fcbb4c]422
[0c92c9f]423 //Set global state
[1805b1b]424 kernelTLS.this_thread = 0p;
[8def349]425
426 //We now have a proper context from which to schedule threads
[094476d]427 __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
[8def349]428
[1c273d0]429 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
430 // resume it to start it like it normally would, it will just context switch
431 // back to here. Instead directly call the main since we already are on the
[8def349]432 // appropriate stack.
[094476d]433 get_coroutine(proc->runner)->state = Active;
434 main( proc->runner );
435 get_coroutine(proc->runner)->state = Halted;
[8def349]436
[0c92c9f]437 // Main routine of the core returned, the core is now fully terminated
[094476d]438 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
[8def349]439
[1805b1b]440 return 0p;
[c84e80a]441}
442
[1805b1b]443static void Abort( int ret, const char * func ) {
[1a3040c]444 if ( ret ) { // pthread routines return errno values
[1805b1b]445 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
[27f5f71]446 } // if
[1805b1b]447} // Abort
448
449void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
450 pthread_attr_t attr;
451
452 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
453
[27f5f71]454 size_t stacksize;
[09d4b22]455 // default stack size, normally defined by shell limit
[1a3040c]456 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
[27f5f71]457 assert( stacksize >= PTHREAD_STACK_MIN );
[1a3040c]458
[09d4b22]459 void * stack;
460 __cfaabi_dbg_debug_do(
461 stack = memalign( __page_size, stacksize + __page_size );
462 // pthread has no mechanism to create the guard page in user supplied stack.
463 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
464 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
465 } // if
466 );
467 __cfaabi_dbg_no_debug_do(
468 stack = malloc( stacksize );
469 );
[1a3040c]470
[1805b1b]471 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
[27f5f71]472
[1805b1b]473 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
474 return stack;
475}
[27f5f71]476
[1805b1b]477static void start(processor * this) {
478 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
479
480 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );
[eb2e723]481
[36982fc]482 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
[eb2e723]483}
484
[14a61b5]485// KERNEL_ONLY
[e8e457e]486void kernel_first_resume( processor * this ) {
487 thread_desc * src = mainThread;
[094476d]488 coroutine_desc * dst = get_coroutine(this->runner);
[b69ea6b]489
[14a61b5]490 verify( ! kernelTLS.preemption_state.enabled );
[b69ea6b]491
[b2f6113]492 __stack_prepare( &dst->stack, 65000 );
[094476d]493 CtxStart(&this->runner, CtxInvokeCoroutine);
[b69ea6b]494
[14a61b5]495 verify( ! kernelTLS.preemption_state.enabled );
[b69ea6b]496
[e8e457e]497 dst->last = &src->self_cor;
498 dst->starter = dst->starter ? dst->starter : &src->self_cor;
[b69ea6b]499
500 // set state of current coroutine to inactive
501 src->state = src->state == Halted ? Halted : Inactive;
502
503 // context switch to specified coroutine
[69a61d2]504 verify( dst->context.SP );
[b2f6113]505 CtxSwitch( &src->context, &dst->context );
[b69ea6b]506 // when CtxSwitch returns we are back in the src coroutine
507
508 // set state of new coroutine to active
509 src->state = Active;
510
[14a61b5]511 verify( ! kernelTLS.preemption_state.enabled );
[b69ea6b]512}
513
[e8e457e]514// KERNEL_ONLY
515void kernel_last_resume( processor * this ) {
516 coroutine_desc * src = &mainThread->self_cor;
517 coroutine_desc * dst = get_coroutine(this->runner);
518
519 verify( ! kernelTLS.preemption_state.enabled );
520 verify( dst->starter == src );
521 verify( dst->context.SP );
522
523 // context switch to the processor
524 CtxSwitch( &src->context, &dst->context );
525}
526
[8def349]527//-----------------------------------------------------------------------------
528// Scheduler routines
[14a61b5]529
530// KERNEL ONLY
[348006f]531void ScheduleThread( thread_desc * thrd ) {
[135b431]532 verify( thrd );
[e8e457e]533 verify( thrd->state != Halted );
[1c273d0]534
[14a61b5]535 verify( ! kernelTLS.preemption_state.enabled );
[690f13c]536
[1805b1b]537 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
[1c273d0]538
[de6319f]539 with( *thrd->curr_cluster ) {
[65deb18]540 lock ( ready_queue_lock __cfaabi_dbg_ctx2 );
[6b4cdd3]541 bool was_empty = !(ready_queue != 0);
[65deb18]542 append( ready_queue, thrd );
543 unlock( ready_queue_lock );
[6b4cdd3]544
[85b1deb]545 if(was_empty) {
[6b4cdd3]546 lock (proc_list_lock __cfaabi_dbg_ctx2);
547 if(idles) {
[85b1deb]548 wake_fast(idles.head);
[6b4cdd3]549 }
550 unlock (proc_list_lock);
551 }
[85b1deb]552 else if( struct processor * idle = idles.head ) {
553 wake_fast(idle);
554 }
555
[65deb18]556 }
[1c273d0]557
[14a61b5]558 verify( ! kernelTLS.preemption_state.enabled );
[db6f06a]559}
560
[14a61b5]561// KERNEL ONLY
[65deb18]562thread_desc * nextThread(cluster * this) with( *this ) {
[14a61b5]563 verify( ! kernelTLS.preemption_state.enabled );
[65deb18]564 lock( ready_queue_lock __cfaabi_dbg_ctx2 );
565 thread_desc * head = pop_head( ready_queue );
566 unlock( ready_queue_lock );
[14a61b5]567 verify( ! kernelTLS.preemption_state.enabled );
[db6f06a]568 return head;
[eb2e723]569}
570
[82ff5845]571void BlockInternal() {
572 disable_interrupts();
[14a61b5]573 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]574 returnToKernel();
[14a61b5]575 verify( ! kernelTLS.preemption_state.enabled );
[36982fc]576 enable_interrupts( __cfaabi_dbg_ctx );
[75f3522]577}
578
[ea7d2b0]579void BlockInternal( __spinlock_t * lock ) {
[82ff5845]580 disable_interrupts();
[14a61b5]581 with( *kernelTLS.this_processor ) {
[de6319f]582 finish.action_code = Release;
583 finish.lock = lock;
584 }
[0b33412]585
[afd550c]586 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]587 returnToKernel();
[afd550c]588 verify( ! kernelTLS.preemption_state.enabled );
[0b33412]589
[36982fc]590 enable_interrupts( __cfaabi_dbg_ctx );
[db6f06a]591}
592
[82ff5845]593void BlockInternal( thread_desc * thrd ) {
594 disable_interrupts();
[14a61b5]595 with( * kernelTLS.this_processor ) {
[de6319f]596 finish.action_code = Schedule;
597 finish.thrd = thrd;
598 }
[0b33412]599
[14a61b5]600 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]601 returnToKernel();
[14a61b5]602 verify( ! kernelTLS.preemption_state.enabled );
[0b33412]603
[36982fc]604 enable_interrupts( __cfaabi_dbg_ctx );
[db6f06a]605}
606
[ea7d2b0]607void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
[97e3296]608 assert(thrd);
[82ff5845]609 disable_interrupts();
[14a61b5]610 with( * kernelTLS.this_processor ) {
[de6319f]611 finish.action_code = Release_Schedule;
612 finish.lock = lock;
613 finish.thrd = thrd;
614 }
[0b33412]615
[14a61b5]616 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]617 returnToKernel();
[14a61b5]618 verify( ! kernelTLS.preemption_state.enabled );
[0b33412]619
[36982fc]620 enable_interrupts( __cfaabi_dbg_ctx );
[eb2e723]621}
622
[ea7d2b0]623void BlockInternal(__spinlock_t * locks [], unsigned short count) {
[82ff5845]624 disable_interrupts();
[14a61b5]625 with( * kernelTLS.this_processor ) {
[de6319f]626 finish.action_code = Release_Multi;
627 finish.locks = locks;
628 finish.lock_count = count;
629 }
[0b33412]630
[14a61b5]631 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]632 returnToKernel();
[14a61b5]633 verify( ! kernelTLS.preemption_state.enabled );
[0b33412]634
[36982fc]635 enable_interrupts( __cfaabi_dbg_ctx );
[0c78741]636}
637
[ea7d2b0]638void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
[82ff5845]639 disable_interrupts();
[14a61b5]640 with( *kernelTLS.this_processor ) {
[de6319f]641 finish.action_code = Release_Multi_Schedule;
642 finish.locks = locks;
643 finish.lock_count = lock_count;
644 finish.thrds = thrds;
645 finish.thrd_count = thrd_count;
646 }
[0b33412]647
[14a61b5]648 verify( ! kernelTLS.preemption_state.enabled );
[82c948c]649 returnToKernel();
[14a61b5]650 verify( ! kernelTLS.preemption_state.enabled );
[0b33412]651
[36982fc]652 enable_interrupts( __cfaabi_dbg_ctx );
[0c78741]653}
654
[09800e9]655void BlockInternal(__finish_callback_fptr_t callback) {
656 disable_interrupts();
657 with( *kernelTLS.this_processor ) {
658 finish.action_code = Callback;
659 finish.callback = callback;
660 }
661
662 verify( ! kernelTLS.preemption_state.enabled );
663 returnToKernel();
664 verify( ! kernelTLS.preemption_state.enabled );
665
666 enable_interrupts( __cfaabi_dbg_ctx );
667}
668
[14a61b5]669// KERNEL ONLY
[ea7d2b0]670void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
[14a61b5]671 verify( ! kernelTLS.preemption_state.enabled );
672 with( * kernelTLS.this_processor ) {
[de6319f]673 finish.action_code = thrd ? Release_Schedule : Release;
674 finish.lock = lock;
675 finish.thrd = thrd;
676 }
[f2b12406]677
[82c948c]678 returnToKernel();
[f2b12406]679}
680
[fa21ac9]681//=============================================================================================
682// Kernel Setup logic
683//=============================================================================================
[eb2e723]684//-----------------------------------------------------------------------------
685// Kernel boot procedures
[c29c342]686static void kernel_startup(void) {
[14a61b5]687 verify( ! kernelTLS.preemption_state.enabled );
[36982fc]688 __cfaabi_dbg_print_safe("Kernel : Starting\n");
[eb2e723]689
[b2f6113]690 __page_size = sysconf( _SC_PAGESIZE );
691
[ea8b2f7]692 __cfa_dbg_global_clusters.list{ __get };
693 __cfa_dbg_global_clusters.lock{};
[de94a60]694
[de6319f]695 // Initialize the main cluster
696 mainCluster = (cluster *)&storage_mainCluster;
697 (*mainCluster){"Main Cluster"};
698
699 __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
700
[eb2e723]701 // Start by initializing the main thread
[1c273d0]702 // SKULLDUGGERY: the mainThread steals the process main thread
[969b3fe]703 // which will then be scheduled by the mainProcessor normally
704 mainThread = (thread_desc *)&storage_mainThread;
[8fcbb4c]705 current_stack_info_t info;
[b2f6113]706 info.storage = (__stack_t*)&storage_mainThreadCtx;
[83a071f9]707 (*mainThread){ &info };
[eb2e723]708
[36982fc]709 __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
[fa21ac9]710
[bd98b58]711
[de6319f]712
713 // Construct the processor context of the main processor
714 void ?{}(processorCtx_t & this, processor * proc) {
715 (this.__cor){ "Processor" };
[1805b1b]716 this.__cor.starter = 0p;
[de6319f]717 this.proc = proc;
718 }
719
720 void ?{}(processor & this) with( this ) {
721 name = "Main Processor";
722 cltr = mainCluster;
723 terminated{ 0 };
724 do_terminate = false;
[1805b1b]725 preemption_alarm = 0p;
[de6319f]726 pending_preemption = false;
727 kernel_thread = pthread_self();
728
729 runner{ &this };
730 __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
731 }
[fa21ac9]732
[969b3fe]733 // Initialize the main processor and the main processor ctx
[eb2e723]734 // (the coroutine that contains the processing control flow)
[969b3fe]735 mainProcessor = (processor *)&storage_mainProcessor;
[de6319f]736 (*mainProcessor){};
[eb2e723]737
[dcb42b8]738 //initialize the global state variables
[14a61b5]739 kernelTLS.this_processor = mainProcessor;
740 kernelTLS.this_thread = mainThread;
[eb2e723]741
[82ff5845]742 // Enable preemption
743 kernel_start_preemption();
744
[969b3fe]745 // Add the main thread to the ready queue
746 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
747 ScheduleThread(mainThread);
748
749 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
[dcb42b8]750 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
[1c273d0]751 // mainThread is on the ready queue when this call is made.
[14a61b5]752 kernel_first_resume( kernelTLS.this_processor );
[eb2e723]753
[dcb42b8]754
755
756 // THE SYSTEM IS NOW COMPLETELY RUNNING
[36982fc]757 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
[82ff5845]758
[14a61b5]759 verify( ! kernelTLS.preemption_state.enabled );
[36982fc]760 enable_interrupts( __cfaabi_dbg_ctx );
[afd550c]761 verify( TL_GET( preemption_state.enabled ) );
[eb2e723]762}
763
[c29c342]764static void kernel_shutdown(void) {
[36982fc]765 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
[eb2e723]766
[afd550c]767 verify( TL_GET( preemption_state.enabled ) );
[4e6fb8e]768 disable_interrupts();
[14a61b5]769 verify( ! kernelTLS.preemption_state.enabled );
[4e6fb8e]770
[969b3fe]771 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
[dcb42b8]772 // When its coroutine terminates, it return control to the mainThread
773 // which is currently here
[ea8b2f7]774 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
[e8e457e]775 kernel_last_resume( kernelTLS.this_processor );
[ea8b2f7]776 mainThread->self_cor.state = Halted;
[eb2e723]777
[dcb42b8]778 // THE SYSTEM IS NOW COMPLETELY STOPPED
[eb2e723]779
[82ff5845]780 // Disable preemption
781 kernel_stop_preemption();
782
[969b3fe]783 // Destroy the main processor and its context in reverse order of construction
[dcb42b8]784 // These were manually constructed so we need manually destroy them
[094476d]785 ^(mainProcessor->runner){};
[969b3fe]786 ^(mainProcessor){};
[eb2e723]787
[dcb42b8]788 // Final step, destroy the main thread since it is no longer needed
789 // Since we provided a stack to this taxk it will not destroy anything
[eb2e723]790 ^(mainThread){};
791
[ea8b2f7]792 ^(__cfa_dbg_global_clusters.list){};
793 ^(__cfa_dbg_global_clusters.lock){};
[a1a17a74]794
[36982fc]795 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
[9d944b2]796}
797
[14a61b5]798//=============================================================================================
799// Kernel Quiescing
800//=============================================================================================
[c29c342]801static void halt(processor * this) with( *this ) {
[85b1deb]802 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
[ea8b2f7]803
[6b4cdd3]804 with( *cltr ) {
805 lock (proc_list_lock __cfaabi_dbg_ctx2);
806 remove (procs, *this);
807 push_front(idles, *this);
808 unlock (proc_list_lock);
809 }
[14a61b5]810
[6b4cdd3]811 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
[14a61b5]812
[85b1deb]813 wait( idleLock );
[14a61b5]814
[6b4cdd3]815 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
[14a61b5]816
[6b4cdd3]817 with( *cltr ) {
818 lock (proc_list_lock __cfaabi_dbg_ctx2);
819 remove (idles, *this);
820 push_front(procs, *this);
821 unlock (proc_list_lock);
822 }
823}
824
[dbe9b08]825//=============================================================================================
826// Unexpected Terminating logic
827//=============================================================================================
[ea7d2b0]828static __spinlock_t kernel_abort_lock;
[9d944b2]829static bool kernel_abort_called = false;
830
[afd550c]831void * kernel_abort(void) __attribute__ ((__nothrow__)) {
[9d944b2]832 // abort cannot be recursively entered by the same or different processors because all signal handlers return when
833 // the globalAbort flag is true.
[36982fc]834 lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
[9d944b2]835
836 // first task to abort ?
[de94a60]837 if ( kernel_abort_called ) { // not first task to abort ?
[ea7d2b0]838 unlock( kernel_abort_lock );
[1c273d0]839
[9d944b2]840 sigset_t mask;
841 sigemptyset( &mask );
[de94a60]842 sigaddset( &mask, SIGALRM ); // block SIGALRM signals
843 sigsuspend( &mask ); // block the processor to prevent further damage during abort
844 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it
845 }
846 else {
847 kernel_abort_called = true;
848 unlock( kernel_abort_lock );
[9d944b2]849 }
850
[14a61b5]851 return kernelTLS.this_thread;
[9d944b2]852}
853
854void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
855 thread_desc * thrd = kernel_data;
856
[de94a60]857 if(thrd) {
858 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
[1c40091]859 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]860
[212c2187]861 if ( &thrd->self_cor != thrd->curr_cor ) {
862 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
[1c40091]863 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]864 }
865 else {
[1c40091]866 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
[de94a60]867 }
[1c273d0]868 }
[9d944b2]869 else {
[de94a60]870 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
[1c40091]871 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[9d944b2]872 }
873}
874
[2b8bc41]875int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
[14a61b5]876 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
[2b8bc41]877}
878
[de94a60]879static __spinlock_t kernel_debug_lock;
880
[9d944b2]881extern "C" {
[1c40091]882 void __cfaabi_bits_acquire() {
[36982fc]883 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
[9d944b2]884 }
885
[1c40091]886 void __cfaabi_bits_release() {
[ea7d2b0]887 unlock( kernel_debug_lock );
[9d944b2]888 }
[8118303]889}
890
[fa21ac9]891//=============================================================================================
892// Kernel Utilities
893//=============================================================================================
[bd98b58]894//-----------------------------------------------------------------------------
895// Locks
[242a902]896void ?{}( semaphore & this, int count = 1 ) {
897 (this.lock){};
898 this.count = count;
899 (this.waiting){};
[db6f06a]900}
[242a902]901void ^?{}(semaphore & this) {}
[db6f06a]902
[65deb18]903void P(semaphore & this) with( this ){
904 lock( lock __cfaabi_dbg_ctx2 );
905 count -= 1;
906 if ( count < 0 ) {
[bdeba0b]907 // queue current task
[14a61b5]908 append( waiting, kernelTLS.this_thread );
[bdeba0b]909
910 // atomically release spin lock and block
[65deb18]911 BlockInternal( &lock );
[8def349]912 }
[4e6fb8e]913 else {
[65deb18]914 unlock( lock );
[4e6fb8e]915 }
[bd98b58]916}
917
[65deb18]918void V(semaphore & this) with( this ) {
[1805b1b]919 thread_desc * thrd = 0p;
[65deb18]920 lock( lock __cfaabi_dbg_ctx2 );
921 count += 1;
922 if ( count <= 0 ) {
[bdeba0b]923 // remove task at head of waiting list
[65deb18]924 thrd = pop_head( waiting );
[bd98b58]925 }
[bdeba0b]926
[65deb18]927 unlock( lock );
[bdeba0b]928
929 // make new owner
930 WakeThread( thrd );
[bd98b58]931}
932
[f7d6bb0]933//-----------------------------------------------------------------------------
[de94a60]934// Global Queues
935void doregister( cluster & cltr ) {
[ea8b2f7]936 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
937 push_front( __cfa_dbg_global_clusters.list, cltr );
938 unlock ( __cfa_dbg_global_clusters.lock );
[de94a60]939}
[f7d6bb0]940
[de94a60]941void unregister( cluster & cltr ) {
[ea8b2f7]942 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
943 remove( __cfa_dbg_global_clusters.list, cltr );
944 unlock( __cfa_dbg_global_clusters.lock );
[de94a60]945}
[f7d6bb0]946
[a1a17a74]947void doregister( cluster * cltr, thread_desc & thrd ) {
948 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2);
[d4e68a6]949 cltr->nthreads += 1;
[a1a17a74]950 push_front(cltr->threads, thrd);
951 unlock (cltr->thread_list_lock);
952}
953
954void unregister( cluster * cltr, thread_desc & thrd ) {
955 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2);
956 remove(cltr->threads, thrd );
[d4e68a6]957 cltr->nthreads -= 1;
[a1a17a74]958 unlock(cltr->thread_list_lock);
959}
[9181f1d]960
[de94a60]961void doregister( cluster * cltr, processor * proc ) {
[639991a]962 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);
[d4e68a6]963 cltr->nprocessors += 1;
[639991a]964 push_front(cltr->procs, *proc);
965 unlock (cltr->proc_list_lock);
[de94a60]966}
967
968void unregister( cluster * cltr, processor * proc ) {
[639991a]969 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);
970 remove(cltr->procs, *proc );
[d4e68a6]971 cltr->nprocessors -= 1;
[639991a]972 unlock(cltr->proc_list_lock);
[de94a60]973}
974
975//-----------------------------------------------------------------------------
976// Debug
977__cfaabi_dbg_debug_do(
[1997b4e]978 extern "C" {
979 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
980 this.prev_name = prev_name;
981 this.prev_thrd = kernelTLS.this_thread;
982 }
[9181f1d]983 }
[f7d6bb0]984)
[2026bb6]985
986//-----------------------------------------------------------------------------
987// Debug
988bool threading_enabled(void) {
989 return true;
990}
[8118303]991// Local Variables: //
992// mode: c //
993// tab-width: 4 //
994// End: //
Note: See TracBrowser for help on using the repository browser.