source: libcfa/src/concurrency/kernel.cfa @ 3381ed7

arm-ehjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-expr
Last change on this file since 3381ed7 was 3381ed7, checked in by Thierry Delisle <tdelisle@…>, 22 months ago

Added park/unpark primitives thread and removed BlockInternal?.
Converted monitors to use park unpark.
Intrusive Queue now mark next field when thread is inside queue.
Added several asserts to kernel and monitor.
Added a few tests for park and unpark.

  • Property mode set to 100644
File size: 29.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Jan 30 22:55:50 2020
13// Update Count     : 56
14//
15
16#define __cforall_thread__
17
18//C Includes
19#include <stddef.h>
20#include <errno.h>
21#include <string.h>
22extern "C" {
23#include <stdio.h>
24#include <fenv.h>
25#include <sys/resource.h>
26#include <signal.h>
27#include <unistd.h>
28#include <limits.h>                                                                             // PTHREAD_STACK_MIN
29#include <sys/mman.h>                                                                   // mprotect
30}
31
32//CFA Includes
33#include "time.hfa"
34#include "kernel_private.hfa"
35#include "preemption.hfa"
36#include "startup.hfa"
37
38//Private includes
39#define __CFA_INVOKE_PRIVATE__
40#include "invoke.h"
41
42//-----------------------------------------------------------------------------
43// Some assembly required
44#if defined( __i386 )
45        #define CtxGet( ctx )        \
46                __asm__ volatile (     \
47                        "movl %%esp,%0\n"\
48                        "movl %%ebp,%1\n"\
49                        : "=rm" (ctx.SP),\
50                                "=rm" (ctx.FP) \
51                )
52
53        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
54        // fcw  : X87 FPU control word (preserved across function calls)
55        #define __x87_store         \
56                uint32_t __mxcr;      \
57                uint16_t __fcw;       \
58                __asm__ volatile (    \
59                        "stmxcsr %0\n"  \
60                        "fnstcw  %1\n"  \
61                        : "=m" (__mxcr),\
62                                "=m" (__fcw)  \
63                )
64
65        #define __x87_load         \
66                __asm__ volatile (   \
67                        "fldcw  %1\n"  \
68                        "ldmxcsr %0\n" \
69                        ::"m" (__mxcr),\
70                                "m" (__fcw)  \
71                )
72
73#elif defined( __x86_64 )
74        #define CtxGet( ctx )        \
75                __asm__ volatile (     \
76                        "movq %%rsp,%0\n"\
77                        "movq %%rbp,%1\n"\
78                        : "=rm" (ctx.SP),\
79                                "=rm" (ctx.FP) \
80                )
81
82        #define __x87_store         \
83                uint32_t __mxcr;      \
84                uint16_t __fcw;       \
85                __asm__ volatile (    \
86                        "stmxcsr %0\n"  \
87                        "fnstcw  %1\n"  \
88                        : "=m" (__mxcr),\
89                                "=m" (__fcw)  \
90                )
91
92        #define __x87_load          \
93                __asm__ volatile (    \
94                        "fldcw  %1\n"   \
95                        "ldmxcsr %0\n"  \
96                        :: "m" (__mxcr),\
97                                "m" (__fcw)  \
98                )
99
100
101#elif defined( __ARM_ARCH )
102#define CtxGet( ctx ) __asm__ ( \
103                "mov %0,%%sp\n"   \
104                "mov %1,%%r11\n"   \
105        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
106#else
107        #error unknown hardware architecture
108#endif
109
110//-----------------------------------------------------------------------------
111//Start and stop routine for the kernel, declared first to make sure they run first
112static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
113static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
114
115//-----------------------------------------------------------------------------
116// Kernel storage
117KERNEL_STORAGE(cluster,         mainCluster);
118KERNEL_STORAGE(processor,       mainProcessor);
119KERNEL_STORAGE(thread_desc,     mainThread);
120KERNEL_STORAGE(__stack_t,       mainThreadCtx);
121
122cluster     * mainCluster;
123processor   * mainProcessor;
124thread_desc * mainThread;
125
126extern "C" {
127        struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
128}
129
130size_t __page_size = 0;
131
132//-----------------------------------------------------------------------------
133// Global state
134thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
135        NULL,                                                                                           // cannot use 0p
136        NULL,
137        { 1, false, false },
138        6u //this should be seeded better but due to a bug calling rdtsc doesn't work
139};
140
141//-----------------------------------------------------------------------------
142// Struct to steal stack
143struct current_stack_info_t {
144        __stack_t * storage;                                                            // pointer to stack object
145        void * base;                                                                            // base of stack
146        void * limit;                                                                           // stack grows towards stack limit
147        void * context;                                                                         // address of cfa_context_t
148};
149
150void ?{}( current_stack_info_t & this ) {
151        __stack_context_t ctx;
152        CtxGet( ctx );
153        this.base = ctx.FP;
154
155        rlimit r;
156        getrlimit( RLIMIT_STACK, &r);
157        size_t size = r.rlim_cur;
158
159        this.limit = (void *)(((intptr_t)this.base) - size);
160        this.context = &storage_mainThreadCtx;
161}
162
163//-----------------------------------------------------------------------------
164// Main thread construction
165
166void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
167        stack.storage = info->storage;
168        with(*stack.storage) {
169                limit     = info->limit;
170                base      = info->base;
171        }
172        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
173        *istorage |= 0x1;
174        name = "Main Thread";
175        state = Start;
176        starter = 0p;
177        last = 0p;
178        cancellation = 0p;
179}
180
181void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
182        state = Start;
183        self_cor{ info };
184        curr_cor = &self_cor;
185        curr_cluster = mainCluster;
186        self_mon.owner = &this;
187        self_mon.recursion = 1;
188        self_mon_p = &self_mon;
189        next = 0p;
190
191        node.next = 0p;
192        node.prev = 0p;
193        doregister(curr_cluster, this);
194
195        monitors{ &self_mon_p, 1, (fptr_t)0 };
196}
197
198//-----------------------------------------------------------------------------
199// Processor coroutine
200void ?{}(processorCtx_t & this) {
201
202}
203
204// Construct the processor context of non-main processors
205static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
206        (this.__cor){ info };
207        this.proc = proc;
208}
209
210static void start(processor * this);
211void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
212        this.name = name;
213        this.cltr = &cltr;
214        terminated{ 0 };
215        do_terminate = false;
216        preemption_alarm = 0p;
217        pending_preemption = false;
218        runner.proc = &this;
219
220        idleLock{};
221
222        start( &this );
223}
224
225void ^?{}(processor & this) with( this ){
226        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
227                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
228
229                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
230                wake( &this );
231
232                P( terminated );
233                verify( kernelTLS.this_processor != &this);
234        }
235
236        pthread_join( kernel_thread, 0p );
237        free( this.stack );
238}
239
240void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
241        this.name = name;
242        this.preemption_rate = preemption_rate;
243        ready_queue{};
244        ready_queue_lock{};
245
246        procs{ __get };
247        idles{ __get };
248        threads{ __get };
249
250        doregister(this);
251}
252
253void ^?{}(cluster & this) {
254        unregister(this);
255}
256
257//=============================================================================================
258// Kernel Scheduling logic
259//=============================================================================================
260static thread_desc * nextThread(cluster * this);
261static void runThread(processor * this, thread_desc * dst);
262static void halt(processor * this);
263
264//Main of the processor contexts
265void main(processorCtx_t & runner) {
266        // Because of a bug, we couldn't initialized the seed on construction
267        // Do it here
268        kernelTLS.rand_seed ^= rdtscl();
269
270        processor * this = runner.proc;
271        verify(this);
272
273        __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
274
275        doregister(this->cltr, this);
276
277        {
278                // Setup preemption data
279                preemption_scope scope = { this };
280
281                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
282
283                thread_desc * readyThread = 0p;
284                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
285                        readyThread = nextThread( this->cltr );
286
287                        if(readyThread) {
288                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
289                                /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
290                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
291
292                                runThread(this, readyThread);
293
294                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
295
296                                spin_count = 0;
297                        } else {
298                                // spin(this, &spin_count);
299                                // halt(this);
300                        }
301                }
302
303                __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
304        }
305
306        unregister(this->cltr, this);
307
308        V( this->terminated );
309
310        __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
311}
312
313static int * __volatile_errno() __attribute__((noinline));
314static int * __volatile_errno() { asm(""); return &errno; }
315
316// KERNEL ONLY
317// runThread runs a thread by context switching
318// from the processor coroutine to the target thread
319static void runThread(processor * this, thread_desc * thrd_dst) {
320        coroutine_desc * proc_cor = get_coroutine(this->runner);
321
322        // Reset the terminating actions here
323        this->finish.action_code = No_Action;
324
325        // Update global state
326        kernelTLS.this_thread = thrd_dst;
327
328        // set state of processor coroutine to inactive
329        verify(proc_cor->state == Active);
330        proc_cor->state = Inactive;
331
332        // Actually run the thread
333        RUNNING:  while(true) {
334                if(unlikely(thrd_dst->preempted)) {
335                        thrd_dst->preempted = __NO_PREEMPTION;
336                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);
337                } else {
338                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
339                        thrd_dst->state = Active;
340                }
341
342                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
343
344                // set context switch to the thread that the processor is executing
345                verify( thrd_dst->context.SP );
346                CtxSwitch( &proc_cor->context, &thrd_dst->context );
347                // when CtxSwitch returns we are back in the processor coroutine
348
349                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
350
351
352                // We just finished running a thread, there are a few things that could have happened.
353                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
354                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
355                // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
356                // 4 - Preempted
357                // In case 1, we may have won a race so we can't write to the state again.
358                // In case 2, we lost the race so we now own the thread.
359                // In case 3, we lost the race but can just reschedule the thread.
360
361                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
362                        // The thread was preempted, reschedule it and reset the flag
363                        ScheduleThread( thrd_dst );
364                        break RUNNING;
365                }
366
367                // set state of processor coroutine to active and the thread to inactive
368                static_assert(sizeof(thrd_dst->state) == sizeof(int));
369                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
370                switch(old_state) {
371                        case Halted:
372                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
373                                thrd_dst->state = Halted;
374                                break RUNNING;
375                        case Active:
376                                // This is case 1, the regular case, nothing more is needed
377                                break RUNNING;
378                        case Rerun:
379                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
380                                // In this case, just run it again.
381                                continue RUNNING;
382                        case Reschedule:
383                                // This is case 3, someone tried to run this before it finished blocking
384                                // but it must go through the ready-queue
385                                thrd_dst->state = Inactive;  /*restore invariant */
386                                ScheduleThread( thrd_dst );
387                                break RUNNING;
388                        default:
389                                // This makes no sense, something is wrong abort
390                                abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
391                }
392        }
393
394        // Just before returning to the processor, set the processor coroutine to active
395        proc_cor->state = Active;
396}
397
398// KERNEL_ONLY
399static void returnToKernel() {
400        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
401        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
402        thread_desc * thrd_src = kernelTLS.this_thread;
403
404        // Run the thread on this processor
405        {
406                int local_errno = *__volatile_errno();
407                #if defined( __i386 ) || defined( __x86_64 )
408                        __x87_store;
409                #endif
410                verify( proc_cor->context.SP );
411                CtxSwitch( &thrd_src->context, &proc_cor->context );
412                #if defined( __i386 ) || defined( __x86_64 )
413                        __x87_load;
414                #endif
415                *__volatile_errno() = local_errno;
416        }
417
418        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
419}
420
421// KERNEL_ONLY
422// Context invoker for processors
423// This is the entry point for processors (kernel threads)
424// It effectively constructs a coroutine by stealing the pthread stack
425static void * CtxInvokeProcessor(void * arg) {
426        processor * proc = (processor *) arg;
427        kernelTLS.this_processor = proc;
428        kernelTLS.this_thread    = 0p;
429        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
430        // SKULLDUGGERY: We want to create a context for the processor coroutine
431        // which is needed for the 2-step context switch. However, there is no reason
432        // to waste the perfectly valid stack create by pthread.
433        current_stack_info_t info;
434        __stack_t ctx;
435        info.storage = &ctx;
436        (proc->runner){ proc, &info };
437
438        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
439
440        //Set global state
441        kernelTLS.this_thread = 0p;
442
443        //We now have a proper context from which to schedule threads
444        __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
445
446        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
447        // resume it to start it like it normally would, it will just context switch
448        // back to here. Instead directly call the main since we already are on the
449        // appropriate stack.
450        get_coroutine(proc->runner)->state = Active;
451        main( proc->runner );
452        get_coroutine(proc->runner)->state = Halted;
453
454        // Main routine of the core returned, the core is now fully terminated
455        __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
456
457        return 0p;
458}
459
460static void Abort( int ret, const char * func ) {
461        if ( ret ) {                                                                            // pthread routines return errno values
462                abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
463        } // if
464} // Abort
465
466void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
467        pthread_attr_t attr;
468
469        Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
470
471        size_t stacksize;
472        // default stack size, normally defined by shell limit
473        Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
474        assert( stacksize >= PTHREAD_STACK_MIN );
475
476        void * stack;
477        __cfaabi_dbg_debug_do(
478                stack = memalign( __page_size, stacksize + __page_size );
479                // pthread has no mechanism to create the guard page in user supplied stack.
480                if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
481                        abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
482                } // if
483        );
484        __cfaabi_dbg_no_debug_do(
485                stack = malloc( stacksize );
486        );
487
488        Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
489
490        Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
491        return stack;
492}
493
494static void start(processor * this) {
495        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
496
497        this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );
498
499        __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
500}
501
502// KERNEL_ONLY
503void kernel_first_resume( processor * this ) {
504        thread_desc * src = mainThread;
505        coroutine_desc * dst = get_coroutine(this->runner);
506
507        verify( ! kernelTLS.preemption_state.enabled );
508
509        kernelTLS.this_thread->curr_cor = dst;
510        __stack_prepare( &dst->stack, 65000 );
511        CtxStart(main, dst, this->runner, CtxInvokeCoroutine);
512
513        verify( ! kernelTLS.preemption_state.enabled );
514
515        dst->last = &src->self_cor;
516        dst->starter = dst->starter ? dst->starter : &src->self_cor;
517
518        // set state of current coroutine to inactive
519        src->state = src->state == Halted ? Halted : Inactive;
520
521        // context switch to specified coroutine
522        verify( dst->context.SP );
523        CtxSwitch( &src->context, &dst->context );
524        // when CtxSwitch returns we are back in the src coroutine
525
526        mainThread->curr_cor = &mainThread->self_cor;
527
528        // set state of new coroutine to active
529        src->state = Active;
530
531        verify( ! kernelTLS.preemption_state.enabled );
532}
533
534// KERNEL_ONLY
535void kernel_last_resume( processor * this ) {
536        coroutine_desc * src = &mainThread->self_cor;
537        coroutine_desc * dst = get_coroutine(this->runner);
538
539        verify( ! kernelTLS.preemption_state.enabled );
540        verify( dst->starter == src );
541        verify( dst->context.SP );
542
543        // context switch to the processor
544        CtxSwitch( &src->context, &dst->context );
545}
546
547//-----------------------------------------------------------------------------
548// Scheduler routines
549// KERNEL ONLY
550void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
551        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
552        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
553        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
554                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
555        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,
556                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
557        /* paranoid */ #endif
558        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
559
560        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
561        bool was_empty = !(ready_queue != 0);
562        append( ready_queue, thrd );
563        unlock( ready_queue_lock );
564
565        if(was_empty) {
566                lock      (proc_list_lock __cfaabi_dbg_ctx2);
567                if(idles) {
568                        wake_fast(idles.head);
569                }
570                unlock    (proc_list_lock);
571        }
572        else if( struct processor * idle = idles.head ) {
573                wake_fast(idle);
574        }
575
576        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
577}
578
579// KERNEL ONLY
580static thread_desc * nextThread(cluster * this) with( *this ) {
581        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
582
583        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
584        thread_desc * head = pop_head( ready_queue );
585        unlock( ready_queue_lock );
586
587        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
588        return head;
589}
590
591void unpark( thread_desc * thrd, bool must_yield ) {
592        if( !thrd ) return;
593
594        enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
595
596        disable_interrupts();
597        static_assert(sizeof(thrd->state) == sizeof(int));
598        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);
599        switch(old_state) {
600                case Active:
601                        // Wake won the race, the thread will reschedule/rerun itself
602                        break;
603                case Inactive:
604                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
605
606                        // Wake lost the race,
607                        thrd->state = Inactive;
608                        ScheduleThread( thrd );
609                        break;
610                case Rerun:
611                case Reschedule:
612                        abort("More than one thread attempted to schedule thread %p\n", thrd);
613                        break;
614                case Halted:
615                case Start:
616                case Primed:
617                default:
618                        // This makes no sense, something is wrong abort
619                        abort();
620        }
621        enable_interrupts( __cfaabi_dbg_ctx );
622}
623
624void park( void ) {
625        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
626        disable_interrupts();
627        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
628        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
629
630        returnToKernel();
631
632        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
633        enable_interrupts( __cfaabi_dbg_ctx );
634        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
635
636}
637
638// KERNEL ONLY
639void LeaveThread() {
640        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
641        returnToKernel();
642}
643
644// KERNEL ONLY
645bool force_yield( __Preemption_Reason reason ) {
646        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
647        disable_interrupts();
648        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
649
650        thread_desc * thrd = kernelTLS.this_thread;
651        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);
652
653        // SKULLDUGGERY: It is possible that we are preempting this thread just before
654        // it was going to park itself. If that is the case and it is already using the
655        // intrusive fields then we can't use them to preempt the thread
656        // If that is the case, abandon the preemption.
657        bool preempted = false;
658        if(thrd->next == 0p) {
659                preempted = true;
660                thrd->preempted = reason;
661                returnToKernel();
662        }
663
664        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
665        enable_interrupts_noPoll();
666        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
667
668        return preempted;
669}
670
671//=============================================================================================
672// Kernel Setup logic
673//=============================================================================================
674//-----------------------------------------------------------------------------
675// Kernel boot procedures
676static void kernel_startup(void) {
677        verify( ! kernelTLS.preemption_state.enabled );
678        __cfaabi_dbg_print_safe("Kernel : Starting\n");
679
680        __page_size = sysconf( _SC_PAGESIZE );
681
682        __cfa_dbg_global_clusters.list{ __get };
683        __cfa_dbg_global_clusters.lock{};
684
685        // Initialize the main cluster
686        mainCluster = (cluster *)&storage_mainCluster;
687        (*mainCluster){"Main Cluster"};
688
689        __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
690
691        // Start by initializing the main thread
692        // SKULLDUGGERY: the mainThread steals the process main thread
693        // which will then be scheduled by the mainProcessor normally
694        mainThread = (thread_desc *)&storage_mainThread;
695        current_stack_info_t info;
696        info.storage = (__stack_t*)&storage_mainThreadCtx;
697        (*mainThread){ &info };
698
699        __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
700
701
702
703        // Construct the processor context of the main processor
704        void ?{}(processorCtx_t & this, processor * proc) {
705                (this.__cor){ "Processor" };
706                this.__cor.starter = 0p;
707                this.proc = proc;
708        }
709
710        void ?{}(processor & this) with( this ) {
711                name = "Main Processor";
712                cltr = mainCluster;
713                terminated{ 0 };
714                do_terminate = false;
715                preemption_alarm = 0p;
716                pending_preemption = false;
717                kernel_thread = pthread_self();
718
719                runner{ &this };
720                __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
721        }
722
723        // Initialize the main processor and the main processor ctx
724        // (the coroutine that contains the processing control flow)
725        mainProcessor = (processor *)&storage_mainProcessor;
726        (*mainProcessor){};
727
728        //initialize the global state variables
729        kernelTLS.this_processor = mainProcessor;
730        kernelTLS.this_thread    = mainThread;
731
732        // Enable preemption
733        kernel_start_preemption();
734
735        // Add the main thread to the ready queue
736        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
737        ScheduleThread(mainThread);
738
739        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
740        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
741        // mainThread is on the ready queue when this call is made.
742        kernel_first_resume( kernelTLS.this_processor );
743
744
745
746        // THE SYSTEM IS NOW COMPLETELY RUNNING
747        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
748
749        verify( ! kernelTLS.preemption_state.enabled );
750        enable_interrupts( __cfaabi_dbg_ctx );
751        verify( TL_GET( preemption_state.enabled ) );
752}
753
754static void kernel_shutdown(void) {
755        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
756
757        verify( TL_GET( preemption_state.enabled ) );
758        disable_interrupts();
759        verify( ! kernelTLS.preemption_state.enabled );
760
761        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
762        // When its coroutine terminates, it return control to the mainThread
763        // which is currently here
764        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
765        kernel_last_resume( kernelTLS.this_processor );
766        mainThread->self_cor.state = Halted;
767
768        // THE SYSTEM IS NOW COMPLETELY STOPPED
769
770        // Disable preemption
771        kernel_stop_preemption();
772
773        // Destroy the main processor and its context in reverse order of construction
774        // These were manually constructed so we need manually destroy them
775        ^(mainProcessor->runner){};
776        ^(mainProcessor){};
777
778        // Final step, destroy the main thread since it is no longer needed
779        // Since we provided a stack to this taxk it will not destroy anything
780        ^(mainThread){};
781
782        ^(__cfa_dbg_global_clusters.list){};
783        ^(__cfa_dbg_global_clusters.lock){};
784
785        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
786}
787
788//=============================================================================================
789// Kernel Quiescing
790//=============================================================================================
791static void halt(processor * this) with( *this ) {
792        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
793
794        with( *cltr ) {
795                lock      (proc_list_lock __cfaabi_dbg_ctx2);
796                remove    (procs, *this);
797                push_front(idles, *this);
798                unlock    (proc_list_lock);
799        }
800
801        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
802
803        wait( idleLock );
804
805        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
806
807        with( *cltr ) {
808                lock      (proc_list_lock __cfaabi_dbg_ctx2);
809                remove    (idles, *this);
810                push_front(procs, *this);
811                unlock    (proc_list_lock);
812        }
813}
814
815//=============================================================================================
816// Unexpected Terminating logic
817//=============================================================================================
818static __spinlock_t kernel_abort_lock;
819static bool kernel_abort_called = false;
820
821void * kernel_abort(void) __attribute__ ((__nothrow__)) {
822        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
823        // the globalAbort flag is true.
824        lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
825
826        // first task to abort ?
827        if ( kernel_abort_called ) {                    // not first task to abort ?
828                unlock( kernel_abort_lock );
829
830                sigset_t mask;
831                sigemptyset( &mask );
832                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
833                sigaddset( &mask, SIGUSR1 );            // block SIGALRM signals
834                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
835                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
836        }
837        else {
838                kernel_abort_called = true;
839                unlock( kernel_abort_lock );
840        }
841
842        return kernelTLS.this_thread;
843}
844
845void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
846        thread_desc * thrd = kernel_data;
847
848        if(thrd) {
849                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
850                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
851
852                if ( &thrd->self_cor != thrd->curr_cor ) {
853                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
854                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
855                }
856                else {
857                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
858                }
859        }
860        else {
861                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
862                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
863        }
864}
865
866int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
867        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
868}
869
870static __spinlock_t kernel_debug_lock;
871
872extern "C" {
873        void __cfaabi_bits_acquire() {
874                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
875        }
876
877        void __cfaabi_bits_release() {
878                unlock( kernel_debug_lock );
879        }
880}
881
882//=============================================================================================
883// Kernel Utilities
884//=============================================================================================
885//-----------------------------------------------------------------------------
886// Locks
887void  ?{}( semaphore & this, int count = 1 ) {
888        (this.lock){};
889        this.count = count;
890        (this.waiting){};
891}
892void ^?{}(semaphore & this) {}
893
894void P(semaphore & this) with( this ){
895        lock( lock __cfaabi_dbg_ctx2 );
896        count -= 1;
897        if ( count < 0 ) {
898                // queue current task
899                append( waiting, kernelTLS.this_thread );
900
901                // atomically release spin lock and block
902                unlock( lock );
903                park();
904        }
905        else {
906            unlock( lock );
907        }
908}
909
910void V(semaphore & this) with( this ) {
911        thread_desc * thrd = 0p;
912        lock( lock __cfaabi_dbg_ctx2 );
913        count += 1;
914        if ( count <= 0 ) {
915                // remove task at head of waiting list
916                thrd = pop_head( waiting );
917        }
918
919        unlock( lock );
920
921        // make new owner
922        unpark( thrd );
923}
924
925//-----------------------------------------------------------------------------
926// Global Queues
927void doregister( cluster     & cltr ) {
928        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
929        push_front( __cfa_dbg_global_clusters.list, cltr );
930        unlock    ( __cfa_dbg_global_clusters.lock );
931}
932
933void unregister( cluster     & cltr ) {
934        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
935        remove( __cfa_dbg_global_clusters.list, cltr );
936        unlock( __cfa_dbg_global_clusters.lock );
937}
938
939void doregister( cluster * cltr, thread_desc & thrd ) {
940        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
941        cltr->nthreads += 1;
942        push_front(cltr->threads, thrd);
943        unlock    (cltr->thread_list_lock);
944}
945
946void unregister( cluster * cltr, thread_desc & thrd ) {
947        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
948        remove(cltr->threads, thrd );
949        cltr->nthreads -= 1;
950        unlock(cltr->thread_list_lock);
951}
952
953void doregister( cluster * cltr, processor * proc ) {
954        lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
955        cltr->nprocessors += 1;
956        push_front(cltr->procs, *proc);
957        unlock    (cltr->proc_list_lock);
958}
959
960void unregister( cluster * cltr, processor * proc ) {
961        lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
962        remove(cltr->procs, *proc );
963        cltr->nprocessors -= 1;
964        unlock(cltr->proc_list_lock);
965}
966
967//-----------------------------------------------------------------------------
968// Debug
969__cfaabi_dbg_debug_do(
970        extern "C" {
971                void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
972                        this.prev_name = prev_name;
973                        this.prev_thrd = kernelTLS.this_thread;
974                }
975        }
976)
977
978//-----------------------------------------------------------------------------
979// Debug
980bool threading_enabled(void) {
981        return true;
982}
983// Local Variables: //
984// mode: c //
985// tab-width: 4 //
986// End: //
Note: See TracBrowser for help on using the repository browser.