source: libcfa/src/concurrency/kernel.cfa @ b798713

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since b798713 was b798713, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Working ready queue

  • Property mode set to 100644
File size: 26.9 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Nov 21 16:46:59 2019
13// Update Count     : 27
14//
15
16#define __cforall_thread__
17
18//C Includes
19#include <stddef.h>
20#include <errno.h>
21#include <string.h>
22extern "C" {
23#include <stdio.h>
24#include <fenv.h>
25#include <sys/resource.h>
26#include <signal.h>
27#include <unistd.h>
28}
29
30//CFA Includes
31#include "time.hfa"
32#include "kernel_private.hfa"
33#include "preemption.hfa"
34#include "startup.hfa"
35
36//Private includes
37#define __CFA_INVOKE_PRIVATE__
38#include "invoke.h"
39
40//-----------------------------------------------------------------------------
41// Some assembly required
42#if   defined( __i386 )
43        #define CtxGet( ctx )        \
44                __asm__ volatile (     \
45                        "movl %%esp,%0\n"\
46                        "movl %%ebp,%1\n"\
47                        : "=rm" (ctx.SP),\
48                                "=rm" (ctx.FP) \
49                )
50
51        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
52        // fcw  : X87 FPU control word (preserved across function calls)
53        #define __x87_store         \
54                uint32_t __mxcr;      \
55                uint16_t __fcw;       \
56                __asm__ volatile (    \
57                        "stmxcsr %0\n"  \
58                        "fnstcw  %1\n"  \
59                        : "=m" (__mxcr),\
60                                "=m" (__fcw)  \
61                )
62
63        #define __x87_load         \
64                __asm__ volatile (   \
65                        "fldcw  %1\n"  \
66                        "ldmxcsr %0\n" \
67                        ::"m" (__mxcr),\
68                                "m" (__fcw)  \
69                )
70
71#elif defined( __x86_64 )
72        #define CtxGet( ctx )        \
73                __asm__ volatile (     \
74                        "movq %%rsp,%0\n"\
75                        "movq %%rbp,%1\n"\
76                        : "=rm" (ctx.SP),\
77                                "=rm" (ctx.FP) \
78                )
79
80        #define __x87_store         \
81                uint32_t __mxcr;      \
82                uint16_t __fcw;       \
83                __asm__ volatile (    \
84                        "stmxcsr %0\n"  \
85                        "fnstcw  %1\n"  \
86                        : "=m" (__mxcr),\
87                                "=m" (__fcw)  \
88                )
89
90        #define __x87_load          \
91                __asm__ volatile (    \
92                        "fldcw  %1\n"   \
93                        "ldmxcsr %0\n"  \
94                        :: "m" (__mxcr),\
95                                "m" (__fcw)  \
96                )
97
98
99#elif defined( __ARM_ARCH )
100#define CtxGet( ctx ) __asm__ ( \
101                "mov %0,%%sp\n"   \
102                "mov %1,%%r11\n"   \
103        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
104#else
105        #error unknown hardware architecture
106#endif
107
108//-----------------------------------------------------------------------------
109//Start and stop routine for the kernel, declared first to make sure they run first
110static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
111static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
112
113//-----------------------------------------------------------------------------
114// Kernel storage
115KERNEL_STORAGE(cluster,         mainCluster);
116KERNEL_STORAGE(processor,       mainProcessor);
117KERNEL_STORAGE(thread_desc,     mainThread);
118KERNEL_STORAGE(__stack_t,       mainThreadCtx);
119
120cluster     * mainCluster;
121processor   * mainProcessor;
122thread_desc * mainThread;
123
124extern "C" {
125struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
126}
127
128size_t __page_size = 0;
129
130//-----------------------------------------------------------------------------
131// Global state
132thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
133        NULL,
134        NULL,
135        { 1, false, false },
136        6u //this should be seeded better but due to a bug calling rdtsc doesn't work
137};
138
139//-----------------------------------------------------------------------------
140// Struct to steal stack
141struct current_stack_info_t {
142        __stack_t * storage;            // pointer to stack object
143        void *base;                             // base of stack
144        void *limit;                    // stack grows towards stack limit
145        void *context;                  // address of cfa_context_t
146};
147
148void ?{}( current_stack_info_t & this ) {
149        __stack_context_t ctx;
150        CtxGet( ctx );
151        this.base = ctx.FP;
152
153        rlimit r;
154        getrlimit( RLIMIT_STACK, &r);
155        size_t size = r.rlim_cur;
156
157        this.limit = (void *)(((intptr_t)this.base) - size);
158        this.context = &storage_mainThreadCtx;
159}
160
161//-----------------------------------------------------------------------------
162// Main thread construction
163
164void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
165        stack.storage = info->storage;
166        with(*stack.storage) {
167                limit     = info->limit;
168                base      = info->base;
169        }
170        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
171        *istorage |= 0x1;
172        name = "Main Thread";
173        state = Start;
174        starter = NULL;
175        last = NULL;
176        cancellation = NULL;
177}
178
179void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
180        state = Start;
181        self_cor{ info };
182        curr_cor = &self_cor;
183        curr_cluster = mainCluster;
184        self_mon.owner = &this;
185        self_mon.recursion = 1;
186        self_mon_p = &self_mon;
187        link.next = 0p;
188        link.prev = 0p;
189
190        node.next = NULL;
191        node.prev = NULL;
192        doregister(curr_cluster, this);
193
194        monitors{ &self_mon_p, 1, (fptr_t)0 };
195}
196
197//-----------------------------------------------------------------------------
198// Processor coroutine
199void ?{}(processorCtx_t & this) {
200
201}
202
203// Construct the processor context of non-main processors
204static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
205        (this.__cor){ info };
206        this.proc = proc;
207}
208
209static void start(processor * this);
210void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
211        this.name = name;
212        this.cltr = &cltr;
213        id = -1u;
214        terminated{ 0 };
215        do_terminate = false;
216        preemption_alarm = NULL;
217        pending_preemption = false;
218        runner.proc = &this;
219
220        idleLock{};
221
222        start( &this );
223}
224
225void ^?{}(processor & this) with( this ){
226        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
227                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
228
229                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
230                wake( &this );
231
232                P( terminated );
233                verify( kernelTLS.this_processor != &this);
234        }
235
236        pthread_join( kernel_thread, NULL );
237}
238
239void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
240        this.name = name;
241        this.preemption_rate = preemption_rate;
242        ready_queue{};
243        ready_lock{};
244
245        idles{ __get };
246        threads{ __get };
247
248        doregister(this);
249}
250
251void ^?{}(cluster & this) {
252        unregister(this);
253}
254
255//=============================================================================================
256// Kernel Scheduling logic
257//=============================================================================================
258static void runThread(processor * this, thread_desc * dst);
259static void finishRunning(processor * this);
260static void halt(processor * this);
261
262//Main of the processor contexts
263void main(processorCtx_t & runner) {
264        // Because of a bug, we couldn't initialized the seed on construction
265        // Do it here
266        kernelTLS.rand_seed ^= rdtscl();
267
268        processor * this = runner.proc;
269        verify(this);
270
271        __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
272
273        // register the processor unless it's the main thread which is handled in the boot sequence
274        if(this != mainProcessor) {
275                this->id = doregister(this->cltr, this);
276                ready_queue_grow( this->cltr );
277        }
278
279
280        {
281                // Setup preemption data
282                preemption_scope scope = { this };
283
284                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
285
286                thread_desc * readyThread = NULL;
287                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )
288                {
289                        readyThread = nextThread( this->cltr );
290
291                        if(readyThread)
292                        {
293                                verify( ! kernelTLS.preemption_state.enabled );
294
295                                runThread(this, readyThread);
296
297                                verify( ! kernelTLS.preemption_state.enabled );
298
299                                //Some actions need to be taken from the kernel
300                                finishRunning(this);
301
302                                spin_count = 0;
303                        }
304                        else
305                        {
306                                // spin(this, &spin_count);
307                                halt(this);
308                        }
309                }
310
311                __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
312        }
313
314        V( this->terminated );
315
316
317        // unregister the processor unless it's the main thread which is handled in the boot sequence
318        if(this != mainProcessor) {
319                ready_queue_shrink( this->cltr );
320                unregister(this->cltr, this);
321        }
322
323        __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
324
325        stats_tls_tally(this->cltr);
326}
327
328static int * __volatile_errno() __attribute__((noinline));
329static int * __volatile_errno() { asm(""); return &errno; }
330
331// KERNEL ONLY
332// runThread runs a thread by context switching
333// from the processor coroutine to the target thread
334static void runThread(processor * this, thread_desc * thrd_dst) {
335        coroutine_desc * proc_cor = get_coroutine(this->runner);
336
337        // Reset the terminating actions here
338        this->finish.action_code = No_Action;
339
340        // Update global state
341        kernelTLS.this_thread = thrd_dst;
342
343        // set state of processor coroutine to inactive and the thread to active
344        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
345        thrd_dst->state = Active;
346
347        // set context switch to the thread that the processor is executing
348        verify( thrd_dst->context.SP );
349        CtxSwitch( &proc_cor->context, &thrd_dst->context );
350        // when CtxSwitch returns we are back in the processor coroutine
351
352        // set state of processor coroutine to active and the thread to inactive
353        thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
354        proc_cor->state = Active;
355}
356
357// KERNEL_ONLY
358static void returnToKernel() {
359        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
360        thread_desc * thrd_src = kernelTLS.this_thread;
361
362        // set state of current coroutine to inactive
363        thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
364        proc_cor->state = Active;
365        int local_errno = *__volatile_errno();
366        #if defined( __i386 ) || defined( __x86_64 )
367                __x87_store;
368        #endif
369
370        // set new coroutine that the processor is executing
371        // and context switch to it
372        verify( proc_cor->context.SP );
373        CtxSwitch( &thrd_src->context, &proc_cor->context );
374
375        // set state of new coroutine to active
376        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
377        thrd_src->state = Active;
378
379        #if defined( __i386 ) || defined( __x86_64 )
380                __x87_load;
381        #endif
382        *__volatile_errno() = local_errno;
383}
384
385// KERNEL_ONLY
386// Once a thread has finished running, some of
387// its final actions must be executed from the kernel
388static void finishRunning(processor * this) with( this->finish ) {
389        verify( ! kernelTLS.preemption_state.enabled );
390        choose( action_code ) {
391        case No_Action:
392                break;
393        case Release:
394                unlock( *lock );
395        case Schedule:
396                ScheduleThread( thrd );
397        case Release_Schedule:
398                unlock( *lock );
399                ScheduleThread( thrd );
400        case Release_Multi:
401                for(int i = 0; i < lock_count; i++) {
402                        unlock( *locks[i] );
403                }
404        case Release_Multi_Schedule:
405                for(int i = 0; i < lock_count; i++) {
406                        unlock( *locks[i] );
407                }
408                for(int i = 0; i < thrd_count; i++) {
409                        ScheduleThread( thrds[i] );
410                }
411        case Callback:
412                callback();
413        default:
414                abort("KERNEL ERROR: Unexpected action to run after thread");
415        }
416}
417
418// KERNEL_ONLY
419// Context invoker for processors
420// This is the entry point for processors (kernel threads)
421// It effectively constructs a coroutine by stealing the pthread stack
422static void * CtxInvokeProcessor(void * arg) {
423        processor * proc = (processor *) arg;
424        kernelTLS.this_processor = proc;
425        kernelTLS.this_thread    = NULL;
426        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
427        // SKULLDUGGERY: We want to create a context for the processor coroutine
428        // which is needed for the 2-step context switch. However, there is no reason
429        // to waste the perfectly valid stack create by pthread.
430        current_stack_info_t info;
431        __stack_t ctx;
432        info.storage = &ctx;
433        (proc->runner){ proc, &info };
434
435        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
436
437        //Set global state
438        kernelTLS.this_thread    = NULL;
439
440        //We now have a proper context from which to schedule threads
441        __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
442
443        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
444        // resume it to start it like it normally would, it will just context switch
445        // back to here. Instead directly call the main since we already are on the
446        // appropriate stack.
447        get_coroutine(proc->runner)->state = Active;
448        main( proc->runner );
449        get_coroutine(proc->runner)->state = Halted;
450
451        // Main routine of the core returned, the core is now fully terminated
452        __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
453
454        return NULL;
455}
456
457static void start(processor * this) {
458        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
459
460        pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
461
462        __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
463}
464
465// KERNEL_ONLY
466void kernel_first_resume( processor * this ) {
467        thread_desc * src = mainThread;
468        coroutine_desc * dst = get_coroutine(this->runner);
469
470        verify( ! kernelTLS.preemption_state.enabled );
471
472        __stack_prepare( &dst->stack, 65000 );
473        CtxStart(&this->runner, CtxInvokeCoroutine);
474
475        verify( ! kernelTLS.preemption_state.enabled );
476
477        dst->last = &src->self_cor;
478        dst->starter = dst->starter ? dst->starter : &src->self_cor;
479
480        // set state of current coroutine to inactive
481        src->state = src->state == Halted ? Halted : Inactive;
482
483        // context switch to specified coroutine
484        verify( dst->context.SP );
485        CtxSwitch( &src->context, &dst->context );
486        // when CtxSwitch returns we are back in the src coroutine
487
488        // set state of new coroutine to active
489        src->state = Active;
490
491        verify( ! kernelTLS.preemption_state.enabled );
492}
493
494// KERNEL_ONLY
495void kernel_last_resume( processor * this ) {
496        coroutine_desc * src = &mainThread->self_cor;
497        coroutine_desc * dst = get_coroutine(this->runner);
498
499        verify( ! kernelTLS.preemption_state.enabled );
500        verify( dst->starter == src );
501        verify( dst->context.SP );
502
503        // context switch to the processor
504        CtxSwitch( &src->context, &dst->context );
505}
506
507//-----------------------------------------------------------------------------
508// Scheduler routines
509
510// KERNEL ONLY
511void ScheduleThread( thread_desc * thrd ) {
512        verify( thrd );
513        verify( thrd->state != Halted );
514
515        verify( ! kernelTLS.preemption_state.enabled );
516
517        verifyf( thrd->link.next == NULL, "Expected null got %p", thrd->link.next );
518
519
520        ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor);
521                bool was_empty = push( thrd->curr_cluster, thrd );
522        ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor);
523
524        with( *thrd->curr_cluster ) {
525                if(was_empty) {
526                        lock      (proc_list_lock __cfaabi_dbg_ctx2);
527                        if(idles) {
528                                wake_fast(idles.head);
529                        }
530                        unlock    (proc_list_lock);
531                }
532                else if( struct processor * idle = idles.head ) {
533                        wake_fast(idle);
534                }
535        }
536
537        verify( ! kernelTLS.preemption_state.enabled );
538}
539
540// KERNEL ONLY
541thread_desc * nextThread(cluster * this) with( *this ) {
542        verify( ! kernelTLS.preemption_state.enabled );
543
544        ready_schedule_lock(this, kernelTLS.this_processor);
545                thread_desc * head = pop( this );
546        ready_schedule_unlock(this, kernelTLS.this_processor);
547
548        verify( ! kernelTLS.preemption_state.enabled );
549        return head;
550}
551
552void BlockInternal() {
553        disable_interrupts();
554        verify( ! kernelTLS.preemption_state.enabled );
555        returnToKernel();
556        verify( ! kernelTLS.preemption_state.enabled );
557        enable_interrupts( __cfaabi_dbg_ctx );
558}
559
560void BlockInternal( __spinlock_t * lock ) {
561        disable_interrupts();
562        with( *kernelTLS.this_processor ) {
563                finish.action_code = Release;
564                finish.lock        = lock;
565        }
566
567        verify( ! kernelTLS.preemption_state.enabled );
568        returnToKernel();
569        verify( ! kernelTLS.preemption_state.enabled );
570
571        enable_interrupts( __cfaabi_dbg_ctx );
572}
573
574void BlockInternal( thread_desc * thrd ) {
575        disable_interrupts();
576        with( * kernelTLS.this_processor ) {
577                finish.action_code = Schedule;
578                finish.thrd        = thrd;
579        }
580
581        verify( ! kernelTLS.preemption_state.enabled );
582        returnToKernel();
583        verify( ! kernelTLS.preemption_state.enabled );
584
585        enable_interrupts( __cfaabi_dbg_ctx );
586}
587
588void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
589        assert(thrd);
590        disable_interrupts();
591        with( * kernelTLS.this_processor ) {
592                finish.action_code = Release_Schedule;
593                finish.lock        = lock;
594                finish.thrd        = thrd;
595        }
596
597        verify( ! kernelTLS.preemption_state.enabled );
598        returnToKernel();
599        verify( ! kernelTLS.preemption_state.enabled );
600
601        enable_interrupts( __cfaabi_dbg_ctx );
602}
603
604void BlockInternal(__spinlock_t * locks [], unsigned short count) {
605        disable_interrupts();
606        with( * kernelTLS.this_processor ) {
607                finish.action_code = Release_Multi;
608                finish.locks       = locks;
609                finish.lock_count  = count;
610        }
611
612        verify( ! kernelTLS.preemption_state.enabled );
613        returnToKernel();
614        verify( ! kernelTLS.preemption_state.enabled );
615
616        enable_interrupts( __cfaabi_dbg_ctx );
617}
618
619void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
620        disable_interrupts();
621        with( *kernelTLS.this_processor ) {
622                finish.action_code = Release_Multi_Schedule;
623                finish.locks       = locks;
624                finish.lock_count  = lock_count;
625                finish.thrds       = thrds;
626                finish.thrd_count  = thrd_count;
627        }
628
629        verify( ! kernelTLS.preemption_state.enabled );
630        returnToKernel();
631        verify( ! kernelTLS.preemption_state.enabled );
632
633        enable_interrupts( __cfaabi_dbg_ctx );
634}
635
636void BlockInternal(__finish_callback_fptr_t callback) {
637        disable_interrupts();
638        with( *kernelTLS.this_processor ) {
639                finish.action_code = Callback;
640                finish.callback    = callback;
641        }
642
643        verify( ! kernelTLS.preemption_state.enabled );
644        returnToKernel();
645        verify( ! kernelTLS.preemption_state.enabled );
646
647        enable_interrupts( __cfaabi_dbg_ctx );
648}
649
650// KERNEL ONLY
651void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
652        verify( ! kernelTLS.preemption_state.enabled );
653        with( * kernelTLS.this_processor ) {
654                finish.action_code = thrd ? Release_Schedule : Release;
655                finish.lock        = lock;
656                finish.thrd        = thrd;
657        }
658
659        returnToKernel();
660}
661
662//=============================================================================================
663// Kernel Setup logic
664//=============================================================================================
665//-----------------------------------------------------------------------------
666// Kernel boot procedures
667static void kernel_startup(void) {
668        verify( ! kernelTLS.preemption_state.enabled );
669        __cfaabi_dbg_print_safe("Kernel : Starting\n");
670
671        __page_size = sysconf( _SC_PAGESIZE );
672
673        __cfa_dbg_global_clusters.list{ __get };
674        __cfa_dbg_global_clusters.lock{};
675
676        // Initialize the main cluster
677        mainCluster = (cluster *)&storage_mainCluster;
678        (*mainCluster){"Main Cluster"};
679
680        __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
681
682        // Start by initializing the main thread
683        // SKULLDUGGERY: the mainThread steals the process main thread
684        // which will then be scheduled by the mainProcessor normally
685        mainThread = (thread_desc *)&storage_mainThread;
686        current_stack_info_t info;
687        info.storage = (__stack_t*)&storage_mainThreadCtx;
688        (*mainThread){ &info };
689
690        __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
691
692
693
694        // Construct the processor context of the main processor
695        void ?{}(processorCtx_t & this, processor * proc) {
696                (this.__cor){ "Processor" };
697                this.__cor.starter = NULL;
698                this.proc = proc;
699        }
700
701        void ?{}(processor & this) with( this ) {
702                name = "Main Processor";
703                cltr = mainCluster;
704                terminated{ 0 };
705                do_terminate = false;
706                preemption_alarm = NULL;
707                pending_preemption = false;
708                kernel_thread = pthread_self();
709                id = -1u;
710
711                runner{ &this };
712                __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
713        }
714
715        // Initialize the main processor and the main processor ctx
716        // (the coroutine that contains the processing control flow)
717        mainProcessor = (processor *)&storage_mainProcessor;
718        (*mainProcessor){};
719
720        mainProcessor->id = doregister(mainCluster, mainProcessor);
721
722        //initialize the global state variables
723        kernelTLS.this_processor = mainProcessor;
724        kernelTLS.this_thread    = mainThread;
725
726        // Enable preemption
727        kernel_start_preemption();
728
729        // Add the main thread to the ready queue
730        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
731        ScheduleThread(mainThread);
732
733        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
734        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
735        // mainThread is on the ready queue when this call is made.
736        kernel_first_resume( kernelTLS.this_processor );
737
738
739
740        // THE SYSTEM IS NOW COMPLETELY RUNNING
741        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
742
743        verify( ! kernelTLS.preemption_state.enabled );
744        enable_interrupts( __cfaabi_dbg_ctx );
745        verify( TL_GET( preemption_state.enabled ) );
746}
747
748static void kernel_shutdown(void) {
749        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
750
751        verify( TL_GET( preemption_state.enabled ) );
752        disable_interrupts();
753        verify( ! kernelTLS.preemption_state.enabled );
754
755        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
756        // When its coroutine terminates, it return control to the mainThread
757        // which is currently here
758        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
759        kernel_last_resume( kernelTLS.this_processor );
760        mainThread->self_cor.state = Halted;
761
762        // THE SYSTEM IS NOW COMPLETELY STOPPED
763
764        // Disable preemption
765        kernel_stop_preemption();
766
767        unregister(mainCluster, mainProcessor);
768
769        // Destroy the main processor and its context in reverse order of construction
770        // These were manually constructed so we need manually destroy them
771        void ^?{}(processor & this) with( this ) {
772                //don't join the main thread here, that wouldn't make any sense
773                __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
774        }
775
776        ^(*mainProcessor){};
777
778        // Final step, destroy the main thread since it is no longer needed
779        // Since we provided a stack to this task it will not destroy anything
780        ^(*mainThread){};
781
782        ^(*mainCluster){};
783
784        ^(__cfa_dbg_global_clusters.list){};
785        ^(__cfa_dbg_global_clusters.lock){};
786
787        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
788}
789
790//=============================================================================================
791// Kernel Quiescing
792//=============================================================================================
793static void halt(processor * this) with( *this ) {
794        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
795
796        with( *cltr ) {
797                lock      (proc_list_lock __cfaabi_dbg_ctx2);
798                push_front(idles, *this);
799                unlock    (proc_list_lock);
800        }
801
802        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
803
804        wait( idleLock );
805
806        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
807
808        with( *cltr ) {
809                lock      (proc_list_lock __cfaabi_dbg_ctx2);
810                remove    (idles, *this);
811                unlock    (proc_list_lock);
812        }
813}
814
815//=============================================================================================
816// Unexpected Terminating logic
817//=============================================================================================
818static __spinlock_t kernel_abort_lock;
819static bool kernel_abort_called = false;
820
821void * kernel_abort(void) __attribute__ ((__nothrow__)) {
822        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
823        // the globalAbort flag is true.
824        lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
825
826        // first task to abort ?
827        if ( kernel_abort_called ) {                    // not first task to abort ?
828                unlock( kernel_abort_lock );
829
830                sigset_t mask;
831                sigemptyset( &mask );
832                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
833                sigsuspend( &mask );                    // block the processor to prevent further damage during abort
834                _exit( EXIT_FAILURE );                  // if processor unblocks before it is killed, terminate it
835        }
836        else {
837                kernel_abort_called = true;
838                unlock( kernel_abort_lock );
839        }
840
841        return kernelTLS.this_thread;
842}
843
844void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
845        thread_desc * thrd = kernel_data;
846
847        if(thrd) {
848                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
849                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
850
851                if ( &thrd->self_cor != thrd->curr_cor ) {
852                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
853                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
854                }
855                else {
856                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
857                }
858        }
859        else {
860                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
861                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
862        }
863}
864
865int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
866        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
867}
868
869static __spinlock_t kernel_debug_lock;
870
871extern "C" {
872        void __cfaabi_bits_acquire() {
873                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
874        }
875
876        void __cfaabi_bits_release() {
877                unlock( kernel_debug_lock );
878        }
879}
880
881//=============================================================================================
882// Kernel Utilities
883//=============================================================================================
884//-----------------------------------------------------------------------------
885// Locks
886void  ?{}( semaphore & this, int count = 1 ) {
887        (this.lock){};
888        this.count = count;
889        (this.waiting){};
890}
891void ^?{}(semaphore & this) {}
892
893void P(semaphore & this) with( this ){
894        lock( lock __cfaabi_dbg_ctx2 );
895        count -= 1;
896        if ( count < 0 ) {
897                // queue current task
898                append( waiting, kernelTLS.this_thread );
899
900                // atomically release spin lock and block
901                BlockInternal( &lock );
902        }
903        else {
904            unlock( lock );
905        }
906}
907
908void V(semaphore & this) with( this ) {
909        thread_desc * thrd = NULL;
910        lock( lock __cfaabi_dbg_ctx2 );
911        count += 1;
912        if ( count <= 0 ) {
913                // remove task at head of waiting list
914                thrd = pop_head( waiting );
915        }
916
917        unlock( lock );
918
919        // make new owner
920        WakeThread( thrd );
921}
922
923//-----------------------------------------------------------------------------
924// Global Queues
925void doregister( cluster     & cltr ) {
926        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
927        push_front( __cfa_dbg_global_clusters.list, cltr );
928        unlock    ( __cfa_dbg_global_clusters.lock );
929}
930
931void unregister( cluster     & cltr ) {
932        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
933        remove( __cfa_dbg_global_clusters.list, cltr );
934        unlock( __cfa_dbg_global_clusters.lock );
935}
936
937void doregister( cluster * cltr, thread_desc & thrd ) {
938        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
939        cltr->nthreads += 1;
940        push_front(cltr->threads, thrd);
941        unlock    (cltr->thread_list_lock);
942}
943
944void unregister( cluster * cltr, thread_desc & thrd ) {
945        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
946        remove(cltr->threads, thrd );
947        cltr->nthreads -= 1;
948        unlock(cltr->thread_list_lock);
949}
950
951//-----------------------------------------------------------------------------
952// Debug
953__cfaabi_dbg_debug_do(
954        extern "C" {
955                void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
956                        this.prev_name = prev_name;
957                        this.prev_thrd = kernelTLS.this_thread;
958                }
959        }
960)
961
962//-----------------------------------------------------------------------------
963// Debug
964bool threading_enabled(void) {
965        return true;
966}
967// Local Variables: //
968// mode: c //
969// tab-width: 4 //
970// End: //
Note: See TracBrowser for help on using the repository browser.