source: libcfa/src/concurrency/kernel.cfa @ 58e280f4

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 58e280f4 was 2909b51, checked in by Peter A. Buhr <pabuhr@…>, 4 years ago

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

  • Property mode set to 100644
File size: 26.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Nov 21 16:46:59 2019
13// Update Count     : 27
14//
15
16#define __cforall_thread__
17
18//C Includes
19#include <stddef.h>
20#include <errno.h>
21#include <string.h>
22extern "C" {
23#include <stdio.h>
24#include <fenv.h>
25#include <sys/resource.h>
26#include <signal.h>
27#include <unistd.h>
28}
29
30//CFA Includes
31#include "time.hfa"
32#include "kernel_private.hfa"
33#include "preemption.hfa"
34#include "startup.hfa"
35
36//Private includes
37#define __CFA_INVOKE_PRIVATE__
38#include "invoke.h"
39
40//-----------------------------------------------------------------------------
41// Some assembly required
42#if   defined( __i386 )
43        #define CtxGet( ctx )        \
44                __asm__ volatile (     \
45                        "movl %%esp,%0\n"\
46                        "movl %%ebp,%1\n"\
47                        : "=rm" (ctx.SP),\
48                                "=rm" (ctx.FP) \
49                )
50
51        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
52        // fcw  : X87 FPU control word (preserved across function calls)
53        #define __x87_store         \
54                uint32_t __mxcr;      \
55                uint16_t __fcw;       \
56                __asm__ volatile (    \
57                        "stmxcsr %0\n"  \
58                        "fnstcw  %1\n"  \
59                        : "=m" (__mxcr),\
60                                "=m" (__fcw)  \
61                )
62
63        #define __x87_load         \
64                __asm__ volatile (   \
65                        "fldcw  %1\n"  \
66                        "ldmxcsr %0\n" \
67                        ::"m" (__mxcr),\
68                                "m" (__fcw)  \
69                )
70
71#elif defined( __x86_64 )
72        #define CtxGet( ctx )        \
73                __asm__ volatile (     \
74                        "movq %%rsp,%0\n"\
75                        "movq %%rbp,%1\n"\
76                        : "=rm" (ctx.SP),\
77                                "=rm" (ctx.FP) \
78                )
79
80        #define __x87_store         \
81                uint32_t __mxcr;      \
82                uint16_t __fcw;       \
83                __asm__ volatile (    \
84                        "stmxcsr %0\n"  \
85                        "fnstcw  %1\n"  \
86                        : "=m" (__mxcr),\
87                                "=m" (__fcw)  \
88                )
89
90        #define __x87_load          \
91                __asm__ volatile (    \
92                        "fldcw  %1\n"   \
93                        "ldmxcsr %0\n"  \
94                        :: "m" (__mxcr),\
95                                "m" (__fcw)  \
96                )
97
98
99#elif defined( __ARM_ARCH )
100#define CtxGet( ctx ) __asm__ ( \
101                "mov %0,%%sp\n"   \
102                "mov %1,%%r11\n"   \
103        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
104#else
105        #error unknown hardware architecture
106#endif
107
108//-----------------------------------------------------------------------------
109//Start and stop routine for the kernel, declared first to make sure they run first
110static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
111static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
112
113//-----------------------------------------------------------------------------
114// Kernel storage
115KERNEL_STORAGE(cluster,         mainCluster);
116KERNEL_STORAGE(processor,       mainProcessor);
117KERNEL_STORAGE(thread_desc,     mainThread);
118KERNEL_STORAGE(__stack_t,       mainThreadCtx);
119
120cluster     * mainCluster;
121processor   * mainProcessor;
122thread_desc * mainThread;
123
124extern "C" {
125struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
126}
127
128size_t __page_size = 0;
129
130//-----------------------------------------------------------------------------
131// Global state
132thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
133        NULL,
134        NULL,
135        { 1, false, false },
136        6u //this should be seeded better but due to a bug calling rdtsc doesn't work
137};
138
139//-----------------------------------------------------------------------------
140// Struct to steal stack
141struct current_stack_info_t {
142        __stack_t * storage;            // pointer to stack object
143        void *base;                             // base of stack
144        void *limit;                    // stack grows towards stack limit
145        void *context;                  // address of cfa_context_t
146};
147
148void ?{}( current_stack_info_t & this ) {
149        __stack_context_t ctx;
150        CtxGet( ctx );
151        this.base = ctx.FP;
152
153        rlimit r;
154        getrlimit( RLIMIT_STACK, &r);
155        size_t size = r.rlim_cur;
156
157        this.limit = (void *)(((intptr_t)this.base) - size);
158        this.context = &storage_mainThreadCtx;
159}
160
161//-----------------------------------------------------------------------------
162// Main thread construction
163
164void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
165        stack.storage = info->storage;
166        with(*stack.storage) {
167                limit     = info->limit;
168                base      = info->base;
169        }
170        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
171        *istorage |= 0x1;
172        name = "Main Thread";
173        state = Start;
174        starter = NULL;
175        last = NULL;
176        cancellation = NULL;
177}
178
179void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
180        state = Start;
181        self_cor{ info };
182        curr_cor = &self_cor;
183        curr_cluster = mainCluster;
184        self_mon.owner = &this;
185        self_mon.recursion = 1;
186        self_mon_p = &self_mon;
187        next = NULL;
188
189        node.next = NULL;
190        node.prev = NULL;
191        doregister(curr_cluster, this);
192
193        monitors{ &self_mon_p, 1, (fptr_t)0 };
194}
195
196//-----------------------------------------------------------------------------
197// Processor coroutine
198void ?{}(processorCtx_t & this) {
199
200}
201
202// Construct the processor context of non-main processors
203static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
204        (this.__cor){ info };
205        this.proc = proc;
206}
207
208static void start(processor * this);
209void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
210        this.name = name;
211        this.cltr = &cltr;
212        terminated{ 0 };
213        do_terminate = false;
214        preemption_alarm = NULL;
215        pending_preemption = false;
216        runner.proc = &this;
217
218        idleLock{};
219
220        start( &this );
221}
222
223void ^?{}(processor & this) with( this ){
224        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
225                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
226
227                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
228                wake( &this );
229
230                P( terminated );
231                verify( kernelTLS.this_processor != &this);
232        }
233
234        pthread_join( kernel_thread, NULL );
235}
236
237void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
238        this.name = name;
239        this.preemption_rate = preemption_rate;
240        ready_queue{};
241        ready_queue_lock{};
242
243        procs{ __get };
244        idles{ __get };
245        threads{ __get };
246
247        doregister(this);
248}
249
250void ^?{}(cluster & this) {
251        unregister(this);
252}
253
254//=============================================================================================
255// Kernel Scheduling logic
256//=============================================================================================
257static void runThread(processor * this, thread_desc * dst);
258static void finishRunning(processor * this);
259static void halt(processor * this);
260
261//Main of the processor contexts
262void main(processorCtx_t & runner) {
263        // Because of a bug, we couldn't initialized the seed on construction
264        // Do it here
265        kernelTLS.rand_seed ^= rdtscl();
266
267        processor * this = runner.proc;
268        verify(this);
269
270        __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
271
272        doregister(this->cltr, this);
273
274        {
275                // Setup preemption data
276                preemption_scope scope = { this };
277
278                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
279
280                thread_desc * readyThread = NULL;
281                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )
282                {
283                        readyThread = nextThread( this->cltr );
284
285                        if(readyThread)
286                        {
287                                verify( ! kernelTLS.preemption_state.enabled );
288
289                                runThread(this, readyThread);
290
291                                verify( ! kernelTLS.preemption_state.enabled );
292
293                                //Some actions need to be taken from the kernel
294                                finishRunning(this);
295
296                                spin_count = 0;
297                        }
298                        else
299                        {
300                                // spin(this, &spin_count);
301                                halt(this);
302                        }
303                }
304
305                __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
306        }
307
308        unregister(this->cltr, this);
309
310        V( this->terminated );
311
312        __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
313}
314
315static int * __volatile_errno() __attribute__((noinline));
316static int * __volatile_errno() { asm(""); return &errno; }
317
318// KERNEL ONLY
319// runThread runs a thread by context switching
320// from the processor coroutine to the target thread
321static void runThread(processor * this, thread_desc * thrd_dst) {
322        coroutine_desc * proc_cor = get_coroutine(this->runner);
323
324        // Reset the terminating actions here
325        this->finish.action_code = No_Action;
326
327        // Update global state
328        kernelTLS.this_thread = thrd_dst;
329
330        // set state of processor coroutine to inactive and the thread to active
331        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
332        thrd_dst->state = Active;
333
334        // set context switch to the thread that the processor is executing
335        verify( thrd_dst->context.SP );
336        CtxSwitch( &proc_cor->context, &thrd_dst->context );
337        // when CtxSwitch returns we are back in the processor coroutine
338
339        // set state of processor coroutine to active and the thread to inactive
340        thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
341        proc_cor->state = Active;
342}
343
344// KERNEL_ONLY
345static void returnToKernel() {
346        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
347        thread_desc * thrd_src = kernelTLS.this_thread;
348
349        // set state of current coroutine to inactive
350        thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
351        proc_cor->state = Active;
352        int local_errno = *__volatile_errno();
353        #if defined( __i386 ) || defined( __x86_64 )
354                __x87_store;
355        #endif
356
357        // set new coroutine that the processor is executing
358        // and context switch to it
359        verify( proc_cor->context.SP );
360        CtxSwitch( &thrd_src->context, &proc_cor->context );
361
362        // set state of new coroutine to active
363        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
364        thrd_src->state = Active;
365
366        #if defined( __i386 ) || defined( __x86_64 )
367                __x87_load;
368        #endif
369        *__volatile_errno() = local_errno;
370}
371
372// KERNEL_ONLY
373// Once a thread has finished running, some of
374// its final actions must be executed from the kernel
375static void finishRunning(processor * this) with( this->finish ) {
376        verify( ! kernelTLS.preemption_state.enabled );
377        choose( action_code ) {
378        case No_Action:
379                break;
380        case Release:
381                unlock( *lock );
382        case Schedule:
383                ScheduleThread( thrd );
384        case Release_Schedule:
385                unlock( *lock );
386                ScheduleThread( thrd );
387        case Release_Multi:
388                for(int i = 0; i < lock_count; i++) {
389                        unlock( *locks[i] );
390                }
391        case Release_Multi_Schedule:
392                for(int i = 0; i < lock_count; i++) {
393                        unlock( *locks[i] );
394                }
395                for(int i = 0; i < thrd_count; i++) {
396                        ScheduleThread( thrds[i] );
397                }
398        case Callback:
399                callback();
400        default:
401                abort("KERNEL ERROR: Unexpected action to run after thread");
402        }
403}
404
405// KERNEL_ONLY
406// Context invoker for processors
407// This is the entry point for processors (kernel threads)
408// It effectively constructs a coroutine by stealing the pthread stack
409static void * CtxInvokeProcessor(void * arg) {
410        processor * proc = (processor *) arg;
411        kernelTLS.this_processor = proc;
412        kernelTLS.this_thread    = NULL;
413        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
414        // SKULLDUGGERY: We want to create a context for the processor coroutine
415        // which is needed for the 2-step context switch. However, there is no reason
416        // to waste the perfectly valid stack create by pthread.
417        current_stack_info_t info;
418        __stack_t ctx;
419        info.storage = &ctx;
420        (proc->runner){ proc, &info };
421
422        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
423
424        //Set global state
425        kernelTLS.this_thread    = NULL;
426
427        //We now have a proper context from which to schedule threads
428        __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
429
430        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
431        // resume it to start it like it normally would, it will just context switch
432        // back to here. Instead directly call the main since we already are on the
433        // appropriate stack.
434        get_coroutine(proc->runner)->state = Active;
435        main( proc->runner );
436        get_coroutine(proc->runner)->state = Halted;
437
438        // Main routine of the core returned, the core is now fully terminated
439        __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
440
441        return NULL;
442}
443
444static void start(processor * this) {
445        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
446
447        pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
448
449        __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
450}
451
452// KERNEL_ONLY
453void kernel_first_resume( processor * this ) {
454        thread_desc * src = mainThread;
455        coroutine_desc * dst = get_coroutine(this->runner);
456
457        verify( ! kernelTLS.preemption_state.enabled );
458
459        __stack_prepare( &dst->stack, 65000 );
460        CtxStart(&this->runner, CtxInvokeCoroutine);
461
462        verify( ! kernelTLS.preemption_state.enabled );
463
464        dst->last = &src->self_cor;
465        dst->starter = dst->starter ? dst->starter : &src->self_cor;
466
467        // set state of current coroutine to inactive
468        src->state = src->state == Halted ? Halted : Inactive;
469
470        // context switch to specified coroutine
471        verify( dst->context.SP );
472        CtxSwitch( &src->context, &dst->context );
473        // when CtxSwitch returns we are back in the src coroutine
474
475        // set state of new coroutine to active
476        src->state = Active;
477
478        verify( ! kernelTLS.preemption_state.enabled );
479}
480
481// KERNEL_ONLY
482void kernel_last_resume( processor * this ) {
483        coroutine_desc * src = &mainThread->self_cor;
484        coroutine_desc * dst = get_coroutine(this->runner);
485
486        verify( ! kernelTLS.preemption_state.enabled );
487        verify( dst->starter == src );
488        verify( dst->context.SP );
489
490        // context switch to the processor
491        CtxSwitch( &src->context, &dst->context );
492}
493
494//-----------------------------------------------------------------------------
495// Scheduler routines
496
497// KERNEL ONLY
498void ScheduleThread( thread_desc * thrd ) {
499        verify( thrd );
500        verify( thrd->state != Halted );
501
502        verify( ! kernelTLS.preemption_state.enabled );
503
504        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
505
506        with( *thrd->curr_cluster ) {
507                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
508                bool was_empty = !(ready_queue != 0);
509                append( ready_queue, thrd );
510                unlock( ready_queue_lock );
511
512                if(was_empty) {
513                        lock      (proc_list_lock __cfaabi_dbg_ctx2);
514                        if(idles) {
515                                wake_fast(idles.head);
516                        }
517                        unlock    (proc_list_lock);
518                }
519                else if( struct processor * idle = idles.head ) {
520                        wake_fast(idle);
521                }
522
523        }
524
525        verify( ! kernelTLS.preemption_state.enabled );
526}
527
528// KERNEL ONLY
529thread_desc * nextThread(cluster * this) with( *this ) {
530        verify( ! kernelTLS.preemption_state.enabled );
531        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
532        thread_desc * head = pop_head( ready_queue );
533        unlock( ready_queue_lock );
534        verify( ! kernelTLS.preemption_state.enabled );
535        return head;
536}
537
538void BlockInternal() {
539        disable_interrupts();
540        verify( ! kernelTLS.preemption_state.enabled );
541        returnToKernel();
542        verify( ! kernelTLS.preemption_state.enabled );
543        enable_interrupts( __cfaabi_dbg_ctx );
544}
545
546void BlockInternal( __spinlock_t * lock ) {
547        disable_interrupts();
548        with( *kernelTLS.this_processor ) {
549                finish.action_code = Release;
550                finish.lock        = lock;
551        }
552
553        verify( ! kernelTLS.preemption_state.enabled );
554        returnToKernel();
555        verify( ! kernelTLS.preemption_state.enabled );
556
557        enable_interrupts( __cfaabi_dbg_ctx );
558}
559
560void BlockInternal( thread_desc * thrd ) {
561        disable_interrupts();
562        with( * kernelTLS.this_processor ) {
563                finish.action_code = Schedule;
564                finish.thrd        = thrd;
565        }
566
567        verify( ! kernelTLS.preemption_state.enabled );
568        returnToKernel();
569        verify( ! kernelTLS.preemption_state.enabled );
570
571        enable_interrupts( __cfaabi_dbg_ctx );
572}
573
574void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
575        assert(thrd);
576        disable_interrupts();
577        with( * kernelTLS.this_processor ) {
578                finish.action_code = Release_Schedule;
579                finish.lock        = lock;
580                finish.thrd        = thrd;
581        }
582
583        verify( ! kernelTLS.preemption_state.enabled );
584        returnToKernel();
585        verify( ! kernelTLS.preemption_state.enabled );
586
587        enable_interrupts( __cfaabi_dbg_ctx );
588}
589
590void BlockInternal(__spinlock_t * locks [], unsigned short count) {
591        disable_interrupts();
592        with( * kernelTLS.this_processor ) {
593                finish.action_code = Release_Multi;
594                finish.locks       = locks;
595                finish.lock_count  = count;
596        }
597
598        verify( ! kernelTLS.preemption_state.enabled );
599        returnToKernel();
600        verify( ! kernelTLS.preemption_state.enabled );
601
602        enable_interrupts( __cfaabi_dbg_ctx );
603}
604
605void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
606        disable_interrupts();
607        with( *kernelTLS.this_processor ) {
608                finish.action_code = Release_Multi_Schedule;
609                finish.locks       = locks;
610                finish.lock_count  = lock_count;
611                finish.thrds       = thrds;
612                finish.thrd_count  = thrd_count;
613        }
614
615        verify( ! kernelTLS.preemption_state.enabled );
616        returnToKernel();
617        verify( ! kernelTLS.preemption_state.enabled );
618
619        enable_interrupts( __cfaabi_dbg_ctx );
620}
621
622void BlockInternal(__finish_callback_fptr_t callback) {
623        disable_interrupts();
624        with( *kernelTLS.this_processor ) {
625                finish.action_code = Callback;
626                finish.callback    = callback;
627        }
628
629        verify( ! kernelTLS.preemption_state.enabled );
630        returnToKernel();
631        verify( ! kernelTLS.preemption_state.enabled );
632
633        enable_interrupts( __cfaabi_dbg_ctx );
634}
635
636// KERNEL ONLY
637void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
638        verify( ! kernelTLS.preemption_state.enabled );
639        with( * kernelTLS.this_processor ) {
640                finish.action_code = thrd ? Release_Schedule : Release;
641                finish.lock        = lock;
642                finish.thrd        = thrd;
643        }
644
645        returnToKernel();
646}
647
648//=============================================================================================
649// Kernel Setup logic
650//=============================================================================================
651//-----------------------------------------------------------------------------
652// Kernel boot procedures
653static void kernel_startup(void) {
654        verify( ! kernelTLS.preemption_state.enabled );
655        __cfaabi_dbg_print_safe("Kernel : Starting\n");
656
657        __page_size = sysconf( _SC_PAGESIZE );
658
659        __cfa_dbg_global_clusters.list{ __get };
660        __cfa_dbg_global_clusters.lock{};
661
662        // Initialize the main cluster
663        mainCluster = (cluster *)&storage_mainCluster;
664        (*mainCluster){"Main Cluster"};
665
666        __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
667
668        // Start by initializing the main thread
669        // SKULLDUGGERY: the mainThread steals the process main thread
670        // which will then be scheduled by the mainProcessor normally
671        mainThread = (thread_desc *)&storage_mainThread;
672        current_stack_info_t info;
673        info.storage = (__stack_t*)&storage_mainThreadCtx;
674        (*mainThread){ &info };
675
676        __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
677
678
679
680        // Construct the processor context of the main processor
681        void ?{}(processorCtx_t & this, processor * proc) {
682                (this.__cor){ "Processor" };
683                this.__cor.starter = NULL;
684                this.proc = proc;
685        }
686
687        void ?{}(processor & this) with( this ) {
688                name = "Main Processor";
689                cltr = mainCluster;
690                terminated{ 0 };
691                do_terminate = false;
692                preemption_alarm = NULL;
693                pending_preemption = false;
694                kernel_thread = pthread_self();
695
696                runner{ &this };
697                __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
698        }
699
700        // Initialize the main processor and the main processor ctx
701        // (the coroutine that contains the processing control flow)
702        mainProcessor = (processor *)&storage_mainProcessor;
703        (*mainProcessor){};
704
705        //initialize the global state variables
706        kernelTLS.this_processor = mainProcessor;
707        kernelTLS.this_thread    = mainThread;
708
709        // Enable preemption
710        kernel_start_preemption();
711
712        // Add the main thread to the ready queue
713        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
714        ScheduleThread(mainThread);
715
716        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
717        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
718        // mainThread is on the ready queue when this call is made.
719        kernel_first_resume( kernelTLS.this_processor );
720
721
722
723        // THE SYSTEM IS NOW COMPLETELY RUNNING
724        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
725
726        verify( ! kernelTLS.preemption_state.enabled );
727        enable_interrupts( __cfaabi_dbg_ctx );
728        verify( TL_GET( preemption_state.enabled ) );
729}
730
731static void kernel_shutdown(void) {
732        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
733
734        verify( TL_GET( preemption_state.enabled ) );
735        disable_interrupts();
736        verify( ! kernelTLS.preemption_state.enabled );
737
738        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
739        // When its coroutine terminates, it return control to the mainThread
740        // which is currently here
741        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
742        kernel_last_resume( kernelTLS.this_processor );
743        mainThread->self_cor.state = Halted;
744
745        // THE SYSTEM IS NOW COMPLETELY STOPPED
746
747        // Disable preemption
748        kernel_stop_preemption();
749
750        // Destroy the main processor and its context in reverse order of construction
751        // These were manually constructed so we need manually destroy them
752        ^(mainProcessor->runner){};
753        ^(mainProcessor){};
754
755        // Final step, destroy the main thread since it is no longer needed
756        // Since we provided a stack to this taxk it will not destroy anything
757        ^(mainThread){};
758
759        ^(__cfa_dbg_global_clusters.list){};
760        ^(__cfa_dbg_global_clusters.lock){};
761
762        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
763}
764
765//=============================================================================================
766// Kernel Quiescing
767//=============================================================================================
768static void halt(processor * this) with( *this ) {
769        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
770
771        with( *cltr ) {
772                lock      (proc_list_lock __cfaabi_dbg_ctx2);
773                remove    (procs, *this);
774                push_front(idles, *this);
775                unlock    (proc_list_lock);
776        }
777
778        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
779
780        wait( idleLock );
781
782        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
783
784        with( *cltr ) {
785                lock      (proc_list_lock __cfaabi_dbg_ctx2);
786                remove    (idles, *this);
787                push_front(procs, *this);
788                unlock    (proc_list_lock);
789        }
790}
791
792//=============================================================================================
793// Unexpected Terminating logic
794//=============================================================================================
795static __spinlock_t kernel_abort_lock;
796static bool kernel_abort_called = false;
797
798void * kernel_abort(void) __attribute__ ((__nothrow__)) {
799        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
800        // the globalAbort flag is true.
801        lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
802
803        // first task to abort ?
804        if ( kernel_abort_called ) {                    // not first task to abort ?
805                unlock( kernel_abort_lock );
806
807                sigset_t mask;
808                sigemptyset( &mask );
809                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
810                sigsuspend( &mask );                    // block the processor to prevent further damage during abort
811                _exit( EXIT_FAILURE );                  // if processor unblocks before it is killed, terminate it
812        }
813        else {
814                kernel_abort_called = true;
815                unlock( kernel_abort_lock );
816        }
817
818        return kernelTLS.this_thread;
819}
820
821void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
822        thread_desc * thrd = kernel_data;
823
824        if(thrd) {
825                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
826                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
827
828                if ( &thrd->self_cor != thrd->curr_cor ) {
829                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
830                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
831                }
832                else {
833                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
834                }
835        }
836        else {
837                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
838                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
839        }
840}
841
842int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
843        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
844}
845
846static __spinlock_t kernel_debug_lock;
847
848extern "C" {
849        void __cfaabi_bits_acquire() {
850                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
851        }
852
853        void __cfaabi_bits_release() {
854                unlock( kernel_debug_lock );
855        }
856}
857
858//=============================================================================================
859// Kernel Utilities
860//=============================================================================================
861//-----------------------------------------------------------------------------
862// Locks
863void  ?{}( semaphore & this, int count = 1 ) {
864        (this.lock){};
865        this.count = count;
866        (this.waiting){};
867}
868void ^?{}(semaphore & this) {}
869
870void P(semaphore & this) with( this ){
871        lock( lock __cfaabi_dbg_ctx2 );
872        count -= 1;
873        if ( count < 0 ) {
874                // queue current task
875                append( waiting, kernelTLS.this_thread );
876
877                // atomically release spin lock and block
878                BlockInternal( &lock );
879        }
880        else {
881            unlock( lock );
882        }
883}
884
885void V(semaphore & this) with( this ) {
886        thread_desc * thrd = NULL;
887        lock( lock __cfaabi_dbg_ctx2 );
888        count += 1;
889        if ( count <= 0 ) {
890                // remove task at head of waiting list
891                thrd = pop_head( waiting );
892        }
893
894        unlock( lock );
895
896        // make new owner
897        WakeThread( thrd );
898}
899
900//-----------------------------------------------------------------------------
901// Global Queues
902void doregister( cluster     & cltr ) {
903        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
904        push_front( __cfa_dbg_global_clusters.list, cltr );
905        unlock    ( __cfa_dbg_global_clusters.lock );
906}
907
908void unregister( cluster     & cltr ) {
909        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
910        remove( __cfa_dbg_global_clusters.list, cltr );
911        unlock( __cfa_dbg_global_clusters.lock );
912}
913
914void doregister( cluster * cltr, thread_desc & thrd ) {
915        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
916        cltr->nthreads += 1;
917        push_front(cltr->threads, thrd);
918        unlock    (cltr->thread_list_lock);
919}
920
921void unregister( cluster * cltr, thread_desc & thrd ) {
922        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
923        remove(cltr->threads, thrd );
924        cltr->nthreads -= 1;
925        unlock(cltr->thread_list_lock);
926}
927
928void doregister( cluster * cltr, processor * proc ) {
929        lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
930        cltr->nprocessors += 1;
931        push_front(cltr->procs, *proc);
932        unlock    (cltr->proc_list_lock);
933}
934
935void unregister( cluster * cltr, processor * proc ) {
936        lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
937        remove(cltr->procs, *proc );
938        cltr->nprocessors -= 1;
939        unlock(cltr->proc_list_lock);
940}
941
942//-----------------------------------------------------------------------------
943// Debug
944__cfaabi_dbg_debug_do(
945        extern "C" {
946                void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
947                        this.prev_name = prev_name;
948                        this.prev_thrd = kernelTLS.this_thread;
949                }
950        }
951)
952
953//-----------------------------------------------------------------------------
954// Debug
955bool threading_enabled(void) {
956        return true;
957}
958// Local Variables: //
959// mode: c //
960// tab-width: 4 //
961// End: //
Note: See TracBrowser for help on using the repository browser.