source: libcfa/src/concurrency/kernel.cfa@ f0ce5f4

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since f0ce5f4 was f0ce5f4, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

V-ing a semaphore now returns wether or not a thread was woken up.
Fix build compilation

  • Property mode set to 100644
File size: 31.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 13:03:15 2020
13// Update Count : 58
14//
15
16#define __cforall_thread__
17
18//C Includes
19#include <stddef.h>
20#include <errno.h>
21#include <string.h>
22extern "C" {
23#include <stdio.h>
24#include <fenv.h>
25#include <sys/resource.h>
26#include <signal.h>
27#include <unistd.h>
28#include <limits.h> // PTHREAD_STACK_MIN
29#include <sys/mman.h> // mprotect
30}
31
32//CFA Includes
33#include "time.hfa"
34#include "kernel_private.hfa"
35#include "preemption.hfa"
36#include "startup.hfa"
37
38//Private includes
39#define __CFA_INVOKE_PRIVATE__
40#include "invoke.h"
41
42//-----------------------------------------------------------------------------
43// Some assembly required
44#if defined( __i386 )
45 #define CtxGet( ctx ) \
46 __asm__ volatile ( \
47 "movl %%esp,%0\n"\
48 "movl %%ebp,%1\n"\
49 : "=rm" (ctx.SP),\
50 "=rm" (ctx.FP) \
51 )
52
53 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
54 // fcw : X87 FPU control word (preserved across function calls)
55 #define __x87_store \
56 uint32_t __mxcr; \
57 uint16_t __fcw; \
58 __asm__ volatile ( \
59 "stmxcsr %0\n" \
60 "fnstcw %1\n" \
61 : "=m" (__mxcr),\
62 "=m" (__fcw) \
63 )
64
65 #define __x87_load \
66 __asm__ volatile ( \
67 "fldcw %1\n" \
68 "ldmxcsr %0\n" \
69 ::"m" (__mxcr),\
70 "m" (__fcw) \
71 )
72
73#elif defined( __x86_64 )
74 #define CtxGet( ctx ) \
75 __asm__ volatile ( \
76 "movq %%rsp,%0\n"\
77 "movq %%rbp,%1\n"\
78 : "=rm" (ctx.SP),\
79 "=rm" (ctx.FP) \
80 )
81
82 #define __x87_store \
83 uint32_t __mxcr; \
84 uint16_t __fcw; \
85 __asm__ volatile ( \
86 "stmxcsr %0\n" \
87 "fnstcw %1\n" \
88 : "=m" (__mxcr),\
89 "=m" (__fcw) \
90 )
91
92 #define __x87_load \
93 __asm__ volatile ( \
94 "fldcw %1\n" \
95 "ldmxcsr %0\n" \
96 :: "m" (__mxcr),\
97 "m" (__fcw) \
98 )
99
100
101#elif defined( __ARM_ARCH )
102#define CtxGet( ctx ) __asm__ ( \
103 "mov %0,%%sp\n" \
104 "mov %1,%%r11\n" \
105 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
106#else
107 #error unknown hardware architecture
108#endif
109
110//-----------------------------------------------------------------------------
111//Start and stop routine for the kernel, declared first to make sure they run first
112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
114
115//-----------------------------------------------------------------------------
116// Kernel storage
117KERNEL_STORAGE(cluster, mainCluster);
118KERNEL_STORAGE(processor, mainProcessor);
119KERNEL_STORAGE($thread, mainThread);
120KERNEL_STORAGE(__stack_t, mainThreadCtx);
121
122cluster * mainCluster;
123processor * mainProcessor;
124$thread * mainThread;
125
126extern "C" {
127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
128}
129
130size_t __page_size = 0;
131
132//-----------------------------------------------------------------------------
133// Global state
134thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
135 NULL, // cannot use 0p
136 NULL,
137 { 1, false, false },
138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work
139};
140
141//-----------------------------------------------------------------------------
142// Struct to steal stack
143struct current_stack_info_t {
144 __stack_t * storage; // pointer to stack object
145 void * base; // base of stack
146 void * limit; // stack grows towards stack limit
147 void * context; // address of cfa_context_t
148};
149
150void ?{}( current_stack_info_t & this ) {
151 __stack_context_t ctx;
152 CtxGet( ctx );
153 this.base = ctx.FP;
154
155 rlimit r;
156 getrlimit( RLIMIT_STACK, &r);
157 size_t size = r.rlim_cur;
158
159 this.limit = (void *)(((intptr_t)this.base) - size);
160 this.context = &storage_mainThreadCtx;
161}
162
163//-----------------------------------------------------------------------------
164// Main thread construction
165
166void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
167 stack.storage = info->storage;
168 with(*stack.storage) {
169 limit = info->limit;
170 base = info->base;
171 }
172 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
173 *istorage |= 0x1;
174 name = "Main Thread";
175 state = Start;
176 starter = 0p;
177 last = 0p;
178 cancellation = 0p;
179}
180
181void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
182 state = Start;
183 self_cor{ info };
184 curr_cor = &self_cor;
185 curr_cluster = mainCluster;
186 self_mon.owner = &this;
187 self_mon.recursion = 1;
188 self_mon_p = &self_mon;
189 next = 0p;
190
191 node.next = 0p;
192 node.prev = 0p;
193 doregister(curr_cluster, this);
194
195 monitors{ &self_mon_p, 1, (fptr_t)0 };
196}
197
198//-----------------------------------------------------------------------------
199// Processor coroutine
200void ?{}(processorCtx_t & this) {
201
202}
203
204// Construct the processor context of non-main processors
205static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
206 (this.__cor){ info };
207 this.proc = proc;
208}
209
210static void * __invoke_processor(void * arg);
211
212void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
213 this.name = name;
214 this.cltr = &cltr;
215 terminated{ 0 };
216 destroyer = 0p;
217 do_terminate = false;
218 preemption_alarm = 0p;
219 pending_preemption = false;
220 runner.proc = &this;
221
222 idleLock{};
223
224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
225
226 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
227
228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
229}
230
231void ^?{}(processor & this) with( this ){
232 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
233 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
234
235 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
236 wake( &this );
237
238 P( terminated );
239 verify( kernelTLS.this_processor != &this);
240 }
241
242 pthread_join( kernel_thread, 0p );
243 free( this.stack );
244}
245
246void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) {
247 this.name = name;
248 this.preemption_rate = preemption_rate;
249 ready_queue{};
250 ready_queue_lock{};
251
252 procs{ __get };
253 idles{ __get };
254 threads{ __get };
255
256 doregister(this);
257}
258
259void ^?{}(cluster & this) {
260 unregister(this);
261}
262
263//=============================================================================================
264// Kernel Scheduling logic
265//=============================================================================================
266static $thread * __next_thread(cluster * this);
267static void __run_thread(processor * this, $thread * dst);
268static void __halt(processor * this);
269
270//Main of the processor contexts
271void main(processorCtx_t & runner) {
272 // Because of a bug, we couldn't initialized the seed on construction
273 // Do it here
274 kernelTLS.rand_seed ^= rdtscl();
275
276 processor * this = runner.proc;
277 verify(this);
278
279 __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
280
281 doregister(this->cltr, this);
282
283 {
284 // Setup preemption data
285 preemption_scope scope = { this };
286
287 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
288
289 $thread * readyThread = 0p;
290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
291 readyThread = __next_thread( this->cltr );
292
293 if(readyThread) {
294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
295 /* paranoid */ verifyf( readyThread->state == Blocked || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
297
298 __run_thread(this, readyThread);
299
300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
301
302 spin_count = 0;
303 } else {
304 // spin(this, &spin_count);
305 __halt(this);
306 }
307 }
308
309 __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
310 }
311
312 unregister(this->cltr, this);
313
314 bool signalled = V( this->terminated );
315 if(signalled)
316
317 __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
318}
319
320static int * __volatile_errno() __attribute__((noinline));
321static int * __volatile_errno() { asm(""); return &errno; }
322
323// KERNEL ONLY
324// runThread runs a thread by context switching
325// from the processor coroutine to the target thread
326static void __run_thread(processor * this, $thread * thrd_dst) {
327 $coroutine * proc_cor = get_coroutine(this->runner);
328
329 // Update global state
330 kernelTLS.this_thread = thrd_dst;
331
332 // set state of processor coroutine to inactive
333 verify(proc_cor->state == Active);
334 proc_cor->state = Blocked;
335
336 // Actually run the thread
337 RUNNING: while(true) {
338 if(unlikely(thrd_dst->preempted)) {
339 thrd_dst->preempted = __NO_PREEMPTION;
340 verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
341 } else {
342 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Blocked);
343 thrd_dst->state = Active;
344 }
345
346 __cfaabi_dbg_debug_do(
347 thrd_dst->park_stale = true;
348 thrd_dst->unpark_stale = true;
349 )
350
351 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
352 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
353 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
354
355 // set context switch to the thread that the processor is executing
356 verify( thrd_dst->context.SP );
357 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
358 // when __cfactx_switch returns we are back in the processor coroutine
359
360 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
361 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
362 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
363
364
365 // We just finished running a thread, there are a few things that could have happened.
366 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
367 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
368 // 4 - Preempted
369 // In case 1, we may have won a race so we can't write to the state again.
370 // In case 2, we lost the race so we now own the thread.
371
372 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
373 // The thread was preempted, reschedule it and reset the flag
374 __schedule_thread( thrd_dst );
375 break RUNNING;
376 }
377
378 // set state of processor coroutine to active and the thread to inactive
379 static_assert(sizeof(thrd_dst->state) == sizeof(int));
380 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST);
381 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; )
382 switch(old_state) {
383 case Halted:
384 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
385 thrd_dst->state = Halted;
386
387 // We may need to wake someone up here since
388 unpark( this->destroyer __cfaabi_dbg_ctx2 );
389 this->destroyer = 0p;
390 break RUNNING;
391 case Active:
392 // This is case 1, the regular case, nothing more is needed
393 break RUNNING;
394 case Rerun:
395 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
396 // In this case, just run it again.
397 continue RUNNING;
398 default:
399 // This makes no sense, something is wrong abort
400 abort("Finished running a thread that was Blocked/Start/Primed %d\n", old_state);
401 }
402 }
403
404 // Just before returning to the processor, set the processor coroutine to active
405 proc_cor->state = Active;
406 kernelTLS.this_thread = 0p;
407}
408
409// KERNEL_ONLY
410void returnToKernel() {
411 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
412 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
413 $thread * thrd_src = kernelTLS.this_thread;
414
415 // Run the thread on this processor
416 {
417 int local_errno = *__volatile_errno();
418 #if defined( __i386 ) || defined( __x86_64 )
419 __x87_store;
420 #endif
421 verify( proc_cor->context.SP );
422 __cfactx_switch( &thrd_src->context, &proc_cor->context );
423 #if defined( __i386 ) || defined( __x86_64 )
424 __x87_load;
425 #endif
426 *__volatile_errno() = local_errno;
427 }
428
429 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
430 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
431 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
432}
433
434// KERNEL_ONLY
435// Context invoker for processors
436// This is the entry point for processors (kernel threads)
437// It effectively constructs a coroutine by stealing the pthread stack
438static void * __invoke_processor(void * arg) {
439 processor * proc = (processor *) arg;
440 kernelTLS.this_processor = proc;
441 kernelTLS.this_thread = 0p;
442 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
443 // SKULLDUGGERY: We want to create a context for the processor coroutine
444 // which is needed for the 2-step context switch. However, there is no reason
445 // to waste the perfectly valid stack create by pthread.
446 current_stack_info_t info;
447 __stack_t ctx;
448 info.storage = &ctx;
449 (proc->runner){ proc, &info };
450
451 __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
452
453 //Set global state
454 kernelTLS.this_thread = 0p;
455
456 //We now have a proper context from which to schedule threads
457 __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
458
459 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
460 // resume it to start it like it normally would, it will just context switch
461 // back to here. Instead directly call the main since we already are on the
462 // appropriate stack.
463 get_coroutine(proc->runner)->state = Active;
464 main( proc->runner );
465 get_coroutine(proc->runner)->state = Halted;
466
467 // Main routine of the core returned, the core is now fully terminated
468 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
469
470 return 0p;
471}
472
473static void Abort( int ret, const char func[] ) {
474 if ( ret ) { // pthread routines return errno values
475 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
476 } // if
477} // Abort
478
479void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
480 pthread_attr_t attr;
481
482 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
483
484 size_t stacksize;
485 // default stack size, normally defined by shell limit
486 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
487 assert( stacksize >= PTHREAD_STACK_MIN );
488
489 void * stack;
490 __cfaabi_dbg_debug_do(
491 stack = memalign( __page_size, stacksize + __page_size );
492 // pthread has no mechanism to create the guard page in user supplied stack.
493 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
494 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
495 } // if
496 );
497 __cfaabi_dbg_no_debug_do(
498 stack = malloc( stacksize );
499 );
500
501 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
502
503 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
504 return stack;
505}
506
507// KERNEL_ONLY
508static void __kernel_first_resume( processor * this ) {
509 $thread * src = mainThread;
510 $coroutine * dst = get_coroutine(this->runner);
511
512 verify( ! kernelTLS.preemption_state.enabled );
513
514 kernelTLS.this_thread->curr_cor = dst;
515 __stack_prepare( &dst->stack, 65000 );
516 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
517
518 verify( ! kernelTLS.preemption_state.enabled );
519
520 dst->last = &src->self_cor;
521 dst->starter = dst->starter ? dst->starter : &src->self_cor;
522
523 // set state of current coroutine to inactive
524 src->state = src->state == Halted ? Halted : Blocked;
525
526 // context switch to specified coroutine
527 verify( dst->context.SP );
528 __cfactx_switch( &src->context, &dst->context );
529 // when __cfactx_switch returns we are back in the src coroutine
530
531 mainThread->curr_cor = &mainThread->self_cor;
532
533 // set state of new coroutine to active
534 src->state = Active;
535
536 verify( ! kernelTLS.preemption_state.enabled );
537}
538
539// KERNEL_ONLY
540static void __kernel_last_resume( processor * this ) {
541 $coroutine * src = &mainThread->self_cor;
542 $coroutine * dst = get_coroutine(this->runner);
543
544 verify( ! kernelTLS.preemption_state.enabled );
545 verify( dst->starter == src );
546 verify( dst->context.SP );
547
548 // context switch to the processor
549 __cfactx_switch( &src->context, &dst->context );
550}
551
552//-----------------------------------------------------------------------------
553// Scheduler routines
554// KERNEL ONLY
555void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
556 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
557 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
558 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
559 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
560 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
561 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
562 /* paranoid */ #endif
563 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
564
565 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
566
567 lock ( ready_queue_lock __cfaabi_dbg_ctx2 );
568 bool was_empty = !(ready_queue != 0);
569 append( ready_queue, thrd );
570 unlock( ready_queue_lock );
571
572 if(was_empty) {
573 lock (proc_list_lock __cfaabi_dbg_ctx2);
574 if(idles) {
575 wake_fast(idles.head);
576 }
577 unlock (proc_list_lock);
578 }
579 else if( struct processor * idle = idles.head ) {
580 wake_fast(idle);
581 }
582
583 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
584}
585
586// KERNEL ONLY
587static $thread * __next_thread(cluster * this) with( *this ) {
588 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
589
590 lock( ready_queue_lock __cfaabi_dbg_ctx2 );
591 $thread * head = pop_head( ready_queue );
592 unlock( ready_queue_lock );
593
594 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
595 return head;
596}
597
598void unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {
599 if( !thrd ) return;
600
601 disable_interrupts();
602 static_assert(sizeof(thrd->state) == sizeof(int));
603
604 // record activity
605 __cfaabi_dbg_record_thrd( *thrd, false, caller );
606
607 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
608 __cfaabi_dbg_debug_do( thrd->unpark_result = old_state; )
609 switch(old_state) {
610 case Active:
611 // Wake won the race, the thread will reschedule/rerun itself
612 break;
613 case Blocked:
614 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
615
616 // Wake lost the race,
617 thrd->state = Blocked;
618 __schedule_thread( thrd );
619 break;
620 case Rerun:
621 abort("More than one thread attempted to schedule thread %p\n", thrd);
622 break;
623 case Halted:
624 case Start:
625 case Primed:
626 default:
627 // This makes no sense, something is wrong abort
628 abort();
629 }
630 enable_interrupts( __cfaabi_dbg_ctx );
631}
632
633void park( __cfaabi_dbg_ctx_param ) {
634 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
635 disable_interrupts();
636 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
637 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
638
639 // record activity
640 __cfaabi_dbg_record_thrd( *kernelTLS.this_thread, true, caller );
641
642 returnToKernel();
643
644 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
645 enable_interrupts( __cfaabi_dbg_ctx );
646 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
647
648}
649
650// KERNEL ONLY
651void __leave_thread() {
652 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
653 returnToKernel();
654 abort();
655}
656
657// KERNEL ONLY
658bool force_yield( __Preemption_Reason reason ) {
659 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
660 disable_interrupts();
661 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
662
663 $thread * thrd = kernelTLS.this_thread;
664 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
665
666 // SKULLDUGGERY: It is possible that we are preempting this thread just before
667 // it was going to park itself. If that is the case and it is already using the
668 // intrusive fields then we can't use them to preempt the thread
669 // If that is the case, abandon the preemption.
670 bool preempted = false;
671 if(thrd->next == 0p) {
672 preempted = true;
673 thrd->preempted = reason;
674 returnToKernel();
675 }
676
677 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
678 enable_interrupts_noPoll();
679 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
680
681 return preempted;
682}
683
684//=============================================================================================
685// Kernel Setup logic
686//=============================================================================================
687//-----------------------------------------------------------------------------
688// Kernel boot procedures
689static void __kernel_startup(void) {
690 verify( ! kernelTLS.preemption_state.enabled );
691 __cfaabi_dbg_print_safe("Kernel : Starting\n");
692
693 __page_size = sysconf( _SC_PAGESIZE );
694
695 __cfa_dbg_global_clusters.list{ __get };
696 __cfa_dbg_global_clusters.lock{};
697
698 // Initialize the main cluster
699 mainCluster = (cluster *)&storage_mainCluster;
700 (*mainCluster){"Main Cluster"};
701
702 __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
703
704 // Start by initializing the main thread
705 // SKULLDUGGERY: the mainThread steals the process main thread
706 // which will then be scheduled by the mainProcessor normally
707 mainThread = ($thread *)&storage_mainThread;
708 current_stack_info_t info;
709 info.storage = (__stack_t*)&storage_mainThreadCtx;
710 (*mainThread){ &info };
711
712 __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
713
714
715
716 // Construct the processor context of the main processor
717 void ?{}(processorCtx_t & this, processor * proc) {
718 (this.__cor){ "Processor" };
719 this.__cor.starter = 0p;
720 this.proc = proc;
721 }
722
723 void ?{}(processor & this) with( this ) {
724 name = "Main Processor";
725 cltr = mainCluster;
726 terminated{ 0 };
727 do_terminate = false;
728 preemption_alarm = 0p;
729 pending_preemption = false;
730 kernel_thread = pthread_self();
731
732 runner{ &this };
733 __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
734 }
735
736 // Initialize the main processor and the main processor ctx
737 // (the coroutine that contains the processing control flow)
738 mainProcessor = (processor *)&storage_mainProcessor;
739 (*mainProcessor){};
740
741 //initialize the global state variables
742 kernelTLS.this_processor = mainProcessor;
743 kernelTLS.this_thread = mainThread;
744
745 // Enable preemption
746 kernel_start_preemption();
747
748 // Add the main thread to the ready queue
749 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
750 __schedule_thread(mainThread);
751
752 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
753 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
754 // mainThread is on the ready queue when this call is made.
755 __kernel_first_resume( kernelTLS.this_processor );
756
757
758
759 // THE SYSTEM IS NOW COMPLETELY RUNNING
760 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
761
762 verify( ! kernelTLS.preemption_state.enabled );
763 enable_interrupts( __cfaabi_dbg_ctx );
764 verify( TL_GET( preemption_state.enabled ) );
765}
766
767static void __kernel_shutdown(void) {
768 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
769
770 verify( TL_GET( preemption_state.enabled ) );
771 disable_interrupts();
772 verify( ! kernelTLS.preemption_state.enabled );
773
774 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
775 // When its coroutine terminates, it return control to the mainThread
776 // which is currently here
777 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
778 __kernel_last_resume( kernelTLS.this_processor );
779 mainThread->self_cor.state = Halted;
780
781 // THE SYSTEM IS NOW COMPLETELY STOPPED
782
783 // Disable preemption
784 kernel_stop_preemption();
785
786 // Destroy the main processor and its context in reverse order of construction
787 // These were manually constructed so we need manually destroy them
788 ^(*mainProcessor){};
789
790 // Final step, destroy the main thread since it is no longer needed
791 // Since we provided a stack to this taxk it will not destroy anything
792 /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
793 ^(*mainThread){};
794
795 ^(__cfa_dbg_global_clusters.list){};
796 ^(__cfa_dbg_global_clusters.lock){};
797
798 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
799}
800
801//=============================================================================================
802// Kernel Quiescing
803//=============================================================================================
804static void __halt(processor * this) with( *this ) {
805 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
806
807 with( *cltr ) {
808 lock (proc_list_lock __cfaabi_dbg_ctx2);
809 remove (procs, *this);
810 push_front(idles, *this);
811 unlock (proc_list_lock);
812 }
813
814 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
815
816 wait( idleLock );
817
818 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
819
820 with( *cltr ) {
821 lock (proc_list_lock __cfaabi_dbg_ctx2);
822 remove (idles, *this);
823 push_front(procs, *this);
824 unlock (proc_list_lock);
825 }
826}
827
828//=============================================================================================
829// Unexpected Terminating logic
830//=============================================================================================
831static __spinlock_t kernel_abort_lock;
832static bool kernel_abort_called = false;
833
834void * kernel_abort(void) __attribute__ ((__nothrow__)) {
835 // abort cannot be recursively entered by the same or different processors because all signal handlers return when
836 // the globalAbort flag is true.
837 lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
838
839 // first task to abort ?
840 if ( kernel_abort_called ) { // not first task to abort ?
841 unlock( kernel_abort_lock );
842
843 sigset_t mask;
844 sigemptyset( &mask );
845 sigaddset( &mask, SIGALRM ); // block SIGALRM signals
846 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals
847 sigsuspend( &mask ); // block the processor to prevent further damage during abort
848 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it
849 }
850 else {
851 kernel_abort_called = true;
852 unlock( kernel_abort_lock );
853 }
854
855 return kernelTLS.this_thread;
856}
857
858void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
859 $thread * thrd = kernel_data;
860
861 if(thrd) {
862 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
863 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
864
865 if ( &thrd->self_cor != thrd->curr_cor ) {
866 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
867 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
868 }
869 else {
870 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
871 }
872 }
873 else {
874 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
875 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
876 }
877}
878
879int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
880 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
881}
882
883static __spinlock_t kernel_debug_lock;
884
885extern "C" {
886 void __cfaabi_bits_acquire() {
887 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
888 }
889
890 void __cfaabi_bits_release() {
891 unlock( kernel_debug_lock );
892 }
893}
894
895//=============================================================================================
896// Kernel Utilities
897//=============================================================================================
898//-----------------------------------------------------------------------------
899// Locks
900void ?{}( semaphore & this, int count = 1 ) {
901 (this.lock){};
902 this.count = count;
903 (this.waiting){};
904}
905void ^?{}(semaphore & this) {}
906
907void P(semaphore & this) with( this ){
908 lock( lock __cfaabi_dbg_ctx2 );
909 count -= 1;
910 if ( count < 0 ) {
911 // queue current task
912 append( waiting, kernelTLS.this_thread );
913
914 // atomically release spin lock and block
915 unlock( lock );
916 park( __cfaabi_dbg_ctx );
917 }
918 else {
919 unlock( lock );
920 }
921}
922
923bool V(semaphore & this) with( this ) {
924 $thread * thrd = 0p;
925 lock( lock __cfaabi_dbg_ctx2 );
926 count += 1;
927 if ( count <= 0 ) {
928 // remove task at head of waiting list
929 thrd = pop_head( waiting );
930 }
931
932 unlock( lock );
933
934 // make new owner
935 unpark( thrd __cfaabi_dbg_ctx2 );
936
937 return thrd != 0p;
938}
939
940//-----------------------------------------------------------------------------
941// Global Queues
942void doregister( cluster & cltr ) {
943 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
944 push_front( __cfa_dbg_global_clusters.list, cltr );
945 unlock ( __cfa_dbg_global_clusters.lock );
946}
947
948void unregister( cluster & cltr ) {
949 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
950 remove( __cfa_dbg_global_clusters.list, cltr );
951 unlock( __cfa_dbg_global_clusters.lock );
952}
953
954void doregister( cluster * cltr, $thread & thrd ) {
955 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2);
956 cltr->nthreads += 1;
957 push_front(cltr->threads, thrd);
958 unlock (cltr->thread_list_lock);
959}
960
961void unregister( cluster * cltr, $thread & thrd ) {
962 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2);
963 remove(cltr->threads, thrd );
964 cltr->nthreads -= 1;
965 unlock(cltr->thread_list_lock);
966}
967
968void doregister( cluster * cltr, processor * proc ) {
969 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);
970 cltr->nprocessors += 1;
971 push_front(cltr->procs, *proc);
972 unlock (cltr->proc_list_lock);
973}
974
975void unregister( cluster * cltr, processor * proc ) {
976 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);
977 remove(cltr->procs, *proc );
978 cltr->nprocessors -= 1;
979 unlock(cltr->proc_list_lock);
980}
981
982//-----------------------------------------------------------------------------
983// Debug
984__cfaabi_dbg_debug_do(
985 extern "C" {
986 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
987 this.prev_name = prev_name;
988 this.prev_thrd = kernelTLS.this_thread;
989 }
990
991 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]) {
992 if(park) {
993 this.park_caller = prev_name;
994 this.park_stale = false;
995 }
996 else {
997 this.unpark_caller = prev_name;
998 this.unpark_stale = false;
999 }
1000 }
1001 }
1002)
1003
1004//-----------------------------------------------------------------------------
1005// Debug
1006bool threading_enabled(void) __attribute__((const)) {
1007 return true;
1008}
1009// Local Variables: //
1010// mode: c //
1011// tab-width: 4 //
1012// End: //
Note: See TracBrowser for help on using the repository browser.