source: libcfa/src/concurrency/kernel.cfa @ 27273f9

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 27273f9 was f586539, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Fixed incorrect check reporting the main thread has corrupted stack

  • Property mode set to 100644
File size: 33.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb  4 13:03:15 2020
13// Update Count     : 58
14//
15
16#define __cforall_thread__
17
18//C Includes
19#include <stddef.h>
20#include <errno.h>
21#include <string.h>
22extern "C" {
23#include <stdio.h>
24#include <fenv.h>
25#include <sys/resource.h>
26#include <signal.h>
27#include <unistd.h>
28#include <limits.h>                                                                             // PTHREAD_STACK_MIN
29#include <sys/mman.h>                                                                   // mprotect
30}
31
32//CFA Includes
33#include "time.hfa"
34#include "kernel_private.hfa"
35#include "preemption.hfa"
36#include "startup.hfa"
37
38//Private includes
39#define __CFA_INVOKE_PRIVATE__
40#include "invoke.h"
41
42//-----------------------------------------------------------------------------
43// Some assembly required
44#if defined( __i386 )
45        #define CtxGet( ctx )        \
46                __asm__ volatile (     \
47                        "movl %%esp,%0\n"\
48                        "movl %%ebp,%1\n"\
49                        : "=rm" (ctx.SP),\
50                                "=rm" (ctx.FP) \
51                )
52
53        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
54        // fcw  : X87 FPU control word (preserved across function calls)
55        #define __x87_store         \
56                uint32_t __mxcr;      \
57                uint16_t __fcw;       \
58                __asm__ volatile (    \
59                        "stmxcsr %0\n"  \
60                        "fnstcw  %1\n"  \
61                        : "=m" (__mxcr),\
62                                "=m" (__fcw)  \
63                )
64
65        #define __x87_load         \
66                __asm__ volatile (   \
67                        "fldcw  %1\n"  \
68                        "ldmxcsr %0\n" \
69                        ::"m" (__mxcr),\
70                                "m" (__fcw)  \
71                )
72
73#elif defined( __x86_64 )
74        #define CtxGet( ctx )        \
75                __asm__ volatile (     \
76                        "movq %%rsp,%0\n"\
77                        "movq %%rbp,%1\n"\
78                        : "=rm" (ctx.SP),\
79                                "=rm" (ctx.FP) \
80                )
81
82        #define __x87_store         \
83                uint32_t __mxcr;      \
84                uint16_t __fcw;       \
85                __asm__ volatile (    \
86                        "stmxcsr %0\n"  \
87                        "fnstcw  %1\n"  \
88                        : "=m" (__mxcr),\
89                                "=m" (__fcw)  \
90                )
91
92        #define __x87_load          \
93                __asm__ volatile (    \
94                        "fldcw  %1\n"   \
95                        "ldmxcsr %0\n"  \
96                        :: "m" (__mxcr),\
97                                "m" (__fcw)  \
98                )
99
100
101#elif defined( __ARM_ARCH )
102#define CtxGet( ctx ) __asm__ ( \
103                "mov %0,%%sp\n"   \
104                "mov %1,%%r11\n"   \
105        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
106#else
107        #error unknown hardware architecture
108#endif
109
110//-----------------------------------------------------------------------------
111//Start and stop routine for the kernel, declared first to make sure they run first
112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
114
115//-----------------------------------------------------------------------------
116// Kernel Scheduling logic
117static $thread * __next_thread(cluster * this);
118static void __run_thread(processor * this, $thread * dst);
119static $thread * __halt(processor * this);
120static bool __wake_one(cluster * cltr, bool was_empty);
121static bool __wake_proc(processor *);
122
123//-----------------------------------------------------------------------------
124// Kernel storage
125KERNEL_STORAGE(cluster,         mainCluster);
126KERNEL_STORAGE(processor,       mainProcessor);
127KERNEL_STORAGE($thread, mainThread);
128KERNEL_STORAGE(__stack_t,       mainThreadCtx);
129
130cluster     * mainCluster;
131processor   * mainProcessor;
132$thread * mainThread;
133
134extern "C" {
135        struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
136}
137
138size_t __page_size = 0;
139
140//-----------------------------------------------------------------------------
141// Global state
142thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
143        NULL,                                                                                           // cannot use 0p
144        NULL,
145        { 1, false, false },
146        6u //this should be seeded better but due to a bug calling rdtsc doesn't work
147};
148
149//-----------------------------------------------------------------------------
150// Struct to steal stack
151struct current_stack_info_t {
152        __stack_t * storage;                                                            // pointer to stack object
153        void * base;                                                                            // base of stack
154        void * limit;                                                                           // stack grows towards stack limit
155        void * context;                                                                         // address of cfa_context_t
156};
157
158void ?{}( current_stack_info_t & this ) {
159        __stack_context_t ctx;
160        CtxGet( ctx );
161        this.base = ctx.FP;
162
163        rlimit r;
164        getrlimit( RLIMIT_STACK, &r);
165        size_t size = r.rlim_cur;
166
167        this.limit = (void *)(((intptr_t)this.base) - size);
168        this.context = &storage_mainThreadCtx;
169}
170
171//-----------------------------------------------------------------------------
172// Main thread construction
173
174void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
175        stack.storage = info->storage;
176        with(*stack.storage) {
177                limit     = info->limit;
178                base      = info->base;
179        }
180        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
181        *istorage |= 0x1;
182        name = "Main Thread";
183        state = Start;
184        starter = 0p;
185        last = 0p;
186        cancellation = 0p;
187}
188
189void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
190        state = Start;
191        self_cor{ info };
192        curr_cor = &self_cor;
193        curr_cluster = mainCluster;
194        self_mon.owner = &this;
195        self_mon.recursion = 1;
196        self_mon_p = &self_mon;
197        next = 0p;
198
199        node.next = 0p;
200        node.prev = 0p;
201        doregister(curr_cluster, this);
202
203        monitors{ &self_mon_p, 1, (fptr_t)0 };
204}
205
206//-----------------------------------------------------------------------------
207// Processor coroutine
208void ?{}(processorCtx_t & this) {
209
210}
211
212// Construct the processor context of non-main processors
213static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
214        (this.__cor){ info };
215        this.proc = proc;
216}
217
218static void * __invoke_processor(void * arg);
219
220void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
221        this.name = name;
222        this.cltr = &cltr;
223        terminated{ 0 };
224        destroyer = 0p;
225        do_terminate = false;
226        preemption_alarm = 0p;
227        pending_preemption = false;
228        runner.proc = &this;
229
230        idle{};
231
232        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
233
234        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
235
236        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
237}
238
239void ^?{}(processor & this) with( this ){
240        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
241                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
242
243                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
244                __wake_proc( &this );
245
246                P( terminated );
247                verify( kernelTLS.this_processor != &this);
248        }
249
250        pthread_join( kernel_thread, 0p );
251        free( this.stack );
252}
253
254void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) {
255        this.name = name;
256        this.preemption_rate = preemption_rate;
257        ready_queue{};
258        ready_queue_lock{};
259
260        procs{ __get };
261        idles{ __get };
262        threads{ __get };
263
264        doregister(this);
265}
266
267void ^?{}(cluster & this) {
268        unregister(this);
269}
270
271//=============================================================================================
272// Kernel Scheduling logic
273//=============================================================================================
274//Main of the processor contexts
275void main(processorCtx_t & runner) {
276        // Because of a bug, we couldn't initialized the seed on construction
277        // Do it here
278        kernelTLS.rand_seed ^= rdtscl();
279
280        processor * this = runner.proc;
281        verify(this);
282
283        __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
284
285        doregister(this->cltr, this);
286
287        {
288                // Setup preemption data
289                preemption_scope scope = { this };
290
291                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
292
293                $thread * readyThread = 0p;
294                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
295                        // Try to get the next thread
296                        readyThread = __next_thread( this->cltr );
297
298                        // If no ready thread
299                        if( readyThread == 0p ) {
300                                // Block until a thread is ready
301                                readyThread = __halt(this);
302                        }
303
304                        // Check if we actually found a thread
305                        if( readyThread ) {
306                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
307                                /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
308                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
309
310                                // We found a thread run it
311                                __run_thread(this, readyThread);
312
313                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
314                        }
315                }
316
317                __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
318        }
319
320        unregister(this->cltr, this);
321
322        V( this->terminated );
323
324        __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
325
326        // HACK : the coroutine context switch expects this_thread to be set
327        // and it make sense for it to be set in all other cases except here
328        // fake it
329        if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
330}
331
332static int * __volatile_errno() __attribute__((noinline));
333static int * __volatile_errno() { asm(""); return &errno; }
334
335// KERNEL ONLY
336// runThread runs a thread by context switching
337// from the processor coroutine to the target thread
338static void __run_thread(processor * this, $thread * thrd_dst) {
339        $coroutine * proc_cor = get_coroutine(this->runner);
340
341        // Update global state
342        kernelTLS.this_thread = thrd_dst;
343
344        // set state of processor coroutine to inactive
345        verify(proc_cor->state == Active);
346        proc_cor->state = Blocked;
347
348        // Actually run the thread
349        RUNNING:  while(true) {
350                if(unlikely(thrd_dst->preempted)) {
351                        thrd_dst->preempted = __NO_PREEMPTION;
352                        verify(thrd_dst->state == Active  || thrd_dst->state == Rerun);
353                } else {
354                        verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun
355                        thrd_dst->state = Active;
356                }
357
358                __cfaabi_dbg_debug_do(
359                        thrd_dst->park_stale   = true;
360                        thrd_dst->unpark_stale = true;
361                )
362
363                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
364                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
365                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
366
367                // set context switch to the thread that the processor is executing
368                verify( thrd_dst->context.SP );
369                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
370                // when __cfactx_switch returns we are back in the processor coroutine
371
372                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
373                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
374                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
375
376
377                // We just finished running a thread, there are a few things that could have happened.
378                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
379                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
380                // 4 - Preempted
381                // In case 1, we may have won a race so we can't write to the state again.
382                // In case 2, we lost the race so we now own the thread.
383
384                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
385                        // The thread was preempted, reschedule it and reset the flag
386                        __schedule_thread( thrd_dst );
387                        break RUNNING;
388                }
389
390                // set state of processor coroutine to active and the thread to inactive
391                static_assert(sizeof(thrd_dst->state) == sizeof(int));
392                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST);
393                __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; )
394                switch(old_state) {
395                        case Halted:
396                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
397                                thrd_dst->state = Halted;
398
399                                // We may need to wake someone up here since
400                                unpark( this->destroyer __cfaabi_dbg_ctx2 );
401                                this->destroyer = 0p;
402                                break RUNNING;
403                        case Active:
404                                // This is case 1, the regular case, nothing more is needed
405                                break RUNNING;
406                        case Rerun:
407                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
408                                // In this case, just run it again.
409                                continue RUNNING;
410                        default:
411                                // This makes no sense, something is wrong abort
412                                abort("Finished running a thread that was Blocked/Start/Primed %d\n", old_state);
413                }
414        }
415
416        // Just before returning to the processor, set the processor coroutine to active
417        proc_cor->state = Active;
418        kernelTLS.this_thread = 0p;
419}
420
421// KERNEL_ONLY
422void returnToKernel() {
423        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
424        $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
425        $thread * thrd_src = kernelTLS.this_thread;
426
427        // Run the thread on this processor
428        {
429                int local_errno = *__volatile_errno();
430                #if defined( __i386 ) || defined( __x86_64 )
431                        __x87_store;
432                #endif
433                verify( proc_cor->context.SP );
434                __cfactx_switch( &thrd_src->context, &proc_cor->context );
435                #if defined( __i386 ) || defined( __x86_64 )
436                        __x87_load;
437                #endif
438                *__volatile_errno() = local_errno;
439        }
440
441        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
442        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
443        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
444}
445
446// KERNEL_ONLY
447// Context invoker for processors
448// This is the entry point for processors (kernel threads)
449// It effectively constructs a coroutine by stealing the pthread stack
450static void * __invoke_processor(void * arg) {
451        processor * proc = (processor *) arg;
452        kernelTLS.this_processor = proc;
453        kernelTLS.this_thread    = 0p;
454        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
455        // SKULLDUGGERY: We want to create a context for the processor coroutine
456        // which is needed for the 2-step context switch. However, there is no reason
457        // to waste the perfectly valid stack create by pthread.
458        current_stack_info_t info;
459        __stack_t ctx;
460        info.storage = &ctx;
461        (proc->runner){ proc, &info };
462
463        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
464
465        //Set global state
466        kernelTLS.this_thread = 0p;
467
468        //We now have a proper context from which to schedule threads
469        __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
470
471        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
472        // resume it to start it like it normally would, it will just context switch
473        // back to here. Instead directly call the main since we already are on the
474        // appropriate stack.
475        get_coroutine(proc->runner)->state = Active;
476        main( proc->runner );
477        get_coroutine(proc->runner)->state = Halted;
478
479        // Main routine of the core returned, the core is now fully terminated
480        __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
481
482        return 0p;
483}
484
485static void Abort( int ret, const char func[] ) {
486        if ( ret ) {                                                                            // pthread routines return errno values
487                abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
488        } // if
489} // Abort
490
491void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
492        pthread_attr_t attr;
493
494        Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
495
496        size_t stacksize;
497        // default stack size, normally defined by shell limit
498        Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
499        assert( stacksize >= PTHREAD_STACK_MIN );
500
501        void * stack;
502        __cfaabi_dbg_debug_do(
503                stack = memalign( __page_size, stacksize + __page_size );
504                // pthread has no mechanism to create the guard page in user supplied stack.
505                if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
506                        abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
507                } // if
508        );
509        __cfaabi_dbg_no_debug_do(
510                stack = malloc( stacksize );
511        );
512
513        Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
514
515        Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
516        return stack;
517}
518
519// KERNEL_ONLY
520static void __kernel_first_resume( processor * this ) {
521        $thread * src = mainThread;
522        $coroutine * dst = get_coroutine(this->runner);
523
524        verify( ! kernelTLS.preemption_state.enabled );
525
526        kernelTLS.this_thread->curr_cor = dst;
527        __stack_prepare( &dst->stack, 65000 );
528        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
529
530        verify( ! kernelTLS.preemption_state.enabled );
531
532        dst->last = &src->self_cor;
533        dst->starter = dst->starter ? dst->starter : &src->self_cor;
534
535        // make sure the current state is still correct
536        /* paranoid */ verify(src->state == Ready);
537
538        // context switch to specified coroutine
539        verify( dst->context.SP );
540        __cfactx_switch( &src->context, &dst->context );
541        // when __cfactx_switch returns we are back in the src coroutine
542
543        mainThread->curr_cor = &mainThread->self_cor;
544
545        // make sure the current state has been update
546        /* paranoid */ verify(src->state == Active);
547
548        verify( ! kernelTLS.preemption_state.enabled );
549}
550
551// KERNEL_ONLY
552static void __kernel_last_resume( processor * this ) {
553        $coroutine * src = &mainThread->self_cor;
554        $coroutine * dst = get_coroutine(this->runner);
555
556        verify( ! kernelTLS.preemption_state.enabled );
557        verify( dst->starter == src );
558        verify( dst->context.SP );
559
560        // SKULLDUGGERY in debug the processors check that the
561        // stack is still within the limit of the stack limits after running a thread.
562        // that check doesn't make sense if we context switch to the processor using the
563        // coroutine semantics. Since this is a special case, use the current context
564        // info to populate these fields.
565        __cfaabi_dbg_debug_do(
566                __stack_context_t ctx;
567                CtxGet( ctx );
568                mainThread->context.SP = ctx.SP;
569                mainThread->context.FP = ctx.FP;
570        )
571
572        // context switch to the processor
573        __cfactx_switch( &src->context, &dst->context );
574}
575
576//-----------------------------------------------------------------------------
577// Scheduler routines
578// KERNEL ONLY
579void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
580        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
581        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
582        /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
583                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
584        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
585                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
586        /* paranoid */ #endif
587        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
588
589        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
590
591        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
592        bool was_empty = !(ready_queue != 0);
593        append( ready_queue, thrd );
594        unlock( ready_queue_lock );
595
596        __wake_one(thrd->curr_cluster, was_empty);
597
598        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
599}
600
601// KERNEL ONLY
602static $thread * __next_thread(cluster * this) with( *this ) {
603        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
604
605        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
606        $thread * head = pop_head( ready_queue );
607        unlock( ready_queue_lock );
608
609        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
610        return head;
611}
612
613void unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {
614        if( !thrd ) return;
615
616        disable_interrupts();
617        static_assert(sizeof(thrd->state) == sizeof(int));
618
619        // record activity
620        __cfaabi_dbg_record_thrd( *thrd, false, caller );
621
622        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
623        __cfaabi_dbg_debug_do( thrd->unpark_result = old_state; )
624        switch(old_state) {
625                case Active:
626                        // Wake won the race, the thread will reschedule/rerun itself
627                        break;
628                case Blocked:
629                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
630
631                        // Wake lost the race,
632                        thrd->state = Blocked;
633                        __schedule_thread( thrd );
634                        break;
635                case Rerun:
636                        abort("More than one thread attempted to schedule thread %p\n", thrd);
637                        break;
638                case Halted:
639                case Start:
640                case Primed:
641                default:
642                        // This makes no sense, something is wrong abort
643                        abort();
644        }
645        enable_interrupts( __cfaabi_dbg_ctx );
646}
647
648void park( __cfaabi_dbg_ctx_param ) {
649        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
650        disable_interrupts();
651        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
652        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
653
654        // record activity
655        __cfaabi_dbg_record_thrd( *kernelTLS.this_thread, true, caller );
656
657        returnToKernel();
658
659        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
660        enable_interrupts( __cfaabi_dbg_ctx );
661        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
662
663}
664
665// KERNEL ONLY
666void __leave_thread() {
667        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
668        returnToKernel();
669        abort();
670}
671
672// KERNEL ONLY
673bool force_yield( __Preemption_Reason reason ) {
674        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
675        disable_interrupts();
676        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
677
678        $thread * thrd = kernelTLS.this_thread;
679        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
680
681        // SKULLDUGGERY: It is possible that we are preempting this thread just before
682        // it was going to park itself. If that is the case and it is already using the
683        // intrusive fields then we can't use them to preempt the thread
684        // If that is the case, abandon the preemption.
685        bool preempted = false;
686        if(thrd->next == 0p) {
687                preempted = true;
688                thrd->preempted = reason;
689                returnToKernel();
690        }
691
692        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
693        enable_interrupts_noPoll();
694        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
695
696        return preempted;
697}
698
699//=============================================================================================
700// Kernel Setup logic
701//=============================================================================================
702//-----------------------------------------------------------------------------
703// Kernel boot procedures
704static void __kernel_startup(void) {
705        verify( ! kernelTLS.preemption_state.enabled );
706        __cfaabi_dbg_print_safe("Kernel : Starting\n");
707
708        __page_size = sysconf( _SC_PAGESIZE );
709
710        __cfa_dbg_global_clusters.list{ __get };
711        __cfa_dbg_global_clusters.lock{};
712
713        // Initialize the main cluster
714        mainCluster = (cluster *)&storage_mainCluster;
715        (*mainCluster){"Main Cluster"};
716
717        __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
718
719        // Start by initializing the main thread
720        // SKULLDUGGERY: the mainThread steals the process main thread
721        // which will then be scheduled by the mainProcessor normally
722        mainThread = ($thread *)&storage_mainThread;
723        current_stack_info_t info;
724        info.storage = (__stack_t*)&storage_mainThreadCtx;
725        (*mainThread){ &info };
726
727        __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
728
729
730
731        // Construct the processor context of the main processor
732        void ?{}(processorCtx_t & this, processor * proc) {
733                (this.__cor){ "Processor" };
734                this.__cor.starter = 0p;
735                this.proc = proc;
736        }
737
738        void ?{}(processor & this) with( this ) {
739                name = "Main Processor";
740                cltr = mainCluster;
741                terminated{ 0 };
742                do_terminate = false;
743                preemption_alarm = 0p;
744                pending_preemption = false;
745                kernel_thread = pthread_self();
746
747                runner{ &this };
748                __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
749        }
750
751        // Initialize the main processor and the main processor ctx
752        // (the coroutine that contains the processing control flow)
753        mainProcessor = (processor *)&storage_mainProcessor;
754        (*mainProcessor){};
755
756        //initialize the global state variables
757        kernelTLS.this_processor = mainProcessor;
758        kernelTLS.this_thread    = mainThread;
759
760        // Enable preemption
761        kernel_start_preemption();
762
763        // Add the main thread to the ready queue
764        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
765        __schedule_thread(mainThread);
766
767        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
768        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
769        // mainThread is on the ready queue when this call is made.
770        __kernel_first_resume( kernelTLS.this_processor );
771
772
773
774        // THE SYSTEM IS NOW COMPLETELY RUNNING
775        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
776
777        verify( ! kernelTLS.preemption_state.enabled );
778        enable_interrupts( __cfaabi_dbg_ctx );
779        verify( TL_GET( preemption_state.enabled ) );
780}
781
782static void __kernel_shutdown(void) {
783        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
784
785        /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
786        disable_interrupts();
787        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
788
789        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
790        // When its coroutine terminates, it return control to the mainThread
791        // which is currently here
792        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
793        __kernel_last_resume( kernelTLS.this_processor );
794        mainThread->self_cor.state = Halted;
795
796        // THE SYSTEM IS NOW COMPLETELY STOPPED
797
798        // Disable preemption
799        kernel_stop_preemption();
800
801        // Destroy the main processor and its context in reverse order of construction
802        // These were manually constructed so we need manually destroy them
803        ^(*mainProcessor){};
804
805        // Final step, destroy the main thread since it is no longer needed
806        // Since we provided a stack to this taxk it will not destroy anything
807        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
808        ^(*mainThread){};
809
810        ^(__cfa_dbg_global_clusters.list){};
811        ^(__cfa_dbg_global_clusters.lock){};
812
813        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
814}
815
816//=============================================================================================
817// Kernel Idle Sleep
818//=============================================================================================
819static $thread * __halt(processor * this) with( *this ) {
820        if( do_terminate ) return 0p;
821
822        // First, lock the cluster idle
823        lock( cltr->idle_lock __cfaabi_dbg_ctx2 );
824
825        // Check if we can find a thread
826        if( $thread * found = __next_thread( cltr ) ) {
827                unlock( cltr->idle_lock );
828                return found;
829        }
830
831        // Move this processor from the active list to the idle list
832        move_to_front(cltr->procs, cltr->idles, *this);
833
834        // Unlock the idle lock so we don't go to sleep with a lock
835        unlock    (cltr->idle_lock);
836
837        // We are ready to sleep
838        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
839        wait( idle );
840
841        // We have woken up
842        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
843
844        // Get ourself off the idle list
845        with( *cltr ) {
846                lock  (idle_lock __cfaabi_dbg_ctx2);
847                move_to_front(idles, procs, *this);
848                unlock(idle_lock);
849        }
850
851        // Don't check the ready queue again, we may not be in a position to run a thread
852        return 0p;
853}
854
855// Wake a thread from the front if there are any
856static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
857        // if we don't want to force check if we know it's false
858        if( !this->idles.head && !force ) return false;
859
860        // First, lock the cluster idle
861        lock( this->idle_lock __cfaabi_dbg_ctx2 );
862
863        // Check if there is someone to wake up
864        if( !this->idles.head ) {
865                // Nope unlock and return false
866                unlock( this->idle_lock );
867                return false;
868        }
869
870        // Wake them up
871        post( this->idles.head->idle );
872
873        // Unlock and return true
874        unlock( this->idle_lock );
875        return true;
876}
877
878// Unconditionnaly wake a thread
879static bool __wake_proc(processor * this) {
880        return post( this->idle );
881}
882
883//=============================================================================================
884// Unexpected Terminating logic
885//=============================================================================================
886static __spinlock_t kernel_abort_lock;
887static bool kernel_abort_called = false;
888
889void * kernel_abort(void) __attribute__ ((__nothrow__)) {
890        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
891        // the globalAbort flag is true.
892        lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
893
894        // first task to abort ?
895        if ( kernel_abort_called ) {                    // not first task to abort ?
896                unlock( kernel_abort_lock );
897
898                sigset_t mask;
899                sigemptyset( &mask );
900                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
901                sigaddset( &mask, SIGUSR1 );            // block SIGALRM signals
902                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
903                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
904        }
905        else {
906                kernel_abort_called = true;
907                unlock( kernel_abort_lock );
908        }
909
910        return kernelTLS.this_thread;
911}
912
913void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
914        $thread * thrd = kernel_data;
915
916        if(thrd) {
917                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
918                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
919
920                if ( &thrd->self_cor != thrd->curr_cor ) {
921                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
922                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
923                }
924                else {
925                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
926                }
927        }
928        else {
929                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
930                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
931        }
932}
933
934int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
935        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
936}
937
938static __spinlock_t kernel_debug_lock;
939
940extern "C" {
941        void __cfaabi_bits_acquire() {
942                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
943        }
944
945        void __cfaabi_bits_release() {
946                unlock( kernel_debug_lock );
947        }
948}
949
950//=============================================================================================
951// Kernel Utilities
952//=============================================================================================
953//-----------------------------------------------------------------------------
954// Locks
955void  ?{}( semaphore & this, int count = 1 ) {
956        (this.lock){};
957        this.count = count;
958        (this.waiting){};
959}
960void ^?{}(semaphore & this) {}
961
962void P(semaphore & this) with( this ){
963        lock( lock __cfaabi_dbg_ctx2 );
964        count -= 1;
965        if ( count < 0 ) {
966                // queue current task
967                append( waiting, kernelTLS.this_thread );
968
969                // atomically release spin lock and block
970                unlock( lock );
971                park( __cfaabi_dbg_ctx );
972        }
973        else {
974            unlock( lock );
975        }
976}
977
978bool V(semaphore & this) with( this ) {
979        $thread * thrd = 0p;
980        lock( lock __cfaabi_dbg_ctx2 );
981        count += 1;
982        if ( count <= 0 ) {
983                // remove task at head of waiting list
984                thrd = pop_head( waiting );
985        }
986
987        unlock( lock );
988
989        // make new owner
990        unpark( thrd __cfaabi_dbg_ctx2 );
991
992        return thrd != 0p;
993}
994
995//-----------------------------------------------------------------------------
996// Global Queues
997void doregister( cluster     & cltr ) {
998        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
999        push_front( __cfa_dbg_global_clusters.list, cltr );
1000        unlock    ( __cfa_dbg_global_clusters.lock );
1001}
1002
1003void unregister( cluster     & cltr ) {
1004        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
1005        remove( __cfa_dbg_global_clusters.list, cltr );
1006        unlock( __cfa_dbg_global_clusters.lock );
1007}
1008
1009void doregister( cluster * cltr, $thread & thrd ) {
1010        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
1011        cltr->nthreads += 1;
1012        push_front(cltr->threads, thrd);
1013        unlock    (cltr->thread_list_lock);
1014}
1015
1016void unregister( cluster * cltr, $thread & thrd ) {
1017        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
1018        remove(cltr->threads, thrd );
1019        cltr->nthreads -= 1;
1020        unlock(cltr->thread_list_lock);
1021}
1022
1023void doregister( cluster * cltr, processor * proc ) {
1024        lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
1025        cltr->nprocessors += 1;
1026        push_front(cltr->procs, *proc);
1027        unlock    (cltr->idle_lock);
1028}
1029
1030void unregister( cluster * cltr, processor * proc ) {
1031        lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
1032        remove(cltr->procs, *proc );
1033        cltr->nprocessors -= 1;
1034        unlock(cltr->idle_lock);
1035}
1036
1037//-----------------------------------------------------------------------------
1038// Debug
1039__cfaabi_dbg_debug_do(
1040        extern "C" {
1041                void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
1042                        this.prev_name = prev_name;
1043                        this.prev_thrd = kernelTLS.this_thread;
1044                }
1045
1046                void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]) {
1047                        if(park) {
1048                                this.park_caller   = prev_name;
1049                                this.park_stale    = false;
1050                        }
1051                        else {
1052                                this.unpark_caller = prev_name;
1053                                this.unpark_stale  = false;
1054                        }
1055                }
1056        }
1057)
1058
1059//-----------------------------------------------------------------------------
1060// Debug
1061bool threading_enabled(void) __attribute__((const)) {
1062        return true;
1063}
1064// Local Variables: //
1065// mode: c //
1066// tab-width: 4 //
1067// End: //
Note: See TracBrowser for help on using the repository browser.