Changeset e660761 for libcfa


Ignore:
Timestamp:
Jul 31, 2020, 4:02:04 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
3e2b9c9
Parents:
3ac8b9f
Message:

First attempt at reducing complation time by restructuring the code.
Notably, starting the runtime has been moved to kernel/startup.cfa

Location:
libcfa/src
Files:
2 added
8 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/defs.hfa

    r3ac8b9f re660761  
    1616#pragma once
    1717
    18 #include <stdbool.h>
    19 #include <stddef.h>
    2018#include <stdint.h>
    2119
     
    5452    return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
    5553}
    56 
    57 // #define __CFA_NO_BIT_TEST_AND_SET__
    58 
    59 #if defined( __i386 )
    60 static inline bool __atomic_bts(volatile unsigned long int * target, unsigned long int bit ) {
    61         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    62         unsigned long int mask = 1ul << bit;
    63         unsigned long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
    64         return (ret & mask) != 0;
    65     #else
    66         int result = 0;
    67         asm volatile(
    68             "LOCK btsl %[bit], %[target]\n\t"
    69             : "=@ccc" (result)
    70             : [target] "m" (*target), [bit] "r" (bit)
    71         );
    72         return result != 0;
    73     #endif
    74 }
    75 
    76 static inline bool __atomic_btr(volatile unsigned long int * target, unsigned long int bit ) {
    77         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    78         unsigned long int mask = 1ul << bit;
    79         unsigned long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
    80         return (ret & mask) != 0;
    81         #else
    82         int result = 0;
    83         asm volatile(
    84             "LOCK btrl %[bit], %[target]\n\t"
    85             :"=@ccc" (result)
    86             : [target] "m" (*target), [bit] "r" (bit)
    87         );
    88         return result != 0;
    89     #endif
    90 }
    91 #elif defined( __x86_64 )
    92 static inline bool __atomic_bts(volatile unsigned long long int * target, unsigned long long int bit ) {
    93         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    94         unsigned long long int mask = 1ul << bit;
    95         unsigned long long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
    96         return (ret & mask) != 0;
    97     #else
    98         int result = 0;
    99         asm volatile(
    100             "LOCK btsq %[bit], %[target]\n\t"
    101             : "=@ccc" (result)
    102             : [target] "m" (*target), [bit] "r" (bit)
    103         );
    104         return result != 0;
    105     #endif
    106 }
    107 
    108 static inline bool __atomic_btr(volatile unsigned long long int * target, unsigned long long int bit ) {
    109         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    110         unsigned long long int mask = 1ul << bit;
    111         unsigned long long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
    112         return (ret & mask) != 0;
    113         #else
    114         int result = 0;
    115         asm volatile(
    116             "LOCK btrq %[bit], %[target]\n\t"
    117             :"=@ccc" (result)
    118             : [target] "m" (*target), [bit] "r" (bit)
    119         );
    120         return result != 0;
    121     #endif
    122 }
    123 #elif defined( __ARM_ARCH )
    124     #error __atomic_bts and __atomic_btr not implemented for arm
    125 #else
    126         #error uknown hardware architecture
    127 #endif
  • libcfa/src/concurrency/alarm.cfa

    r3ac8b9f re660761  
    2323
    2424#include "alarm.hfa"
    25 #include "kernel_private.hfa"
     25#include "kernel/fwd.hfa"
    2626#include "preemption.hfa"
    2727
  • libcfa/src/concurrency/io.cfa

    r3ac8b9f re660761  
    1616#if defined(__CFA_DEBUG__)
    1717        // #define __CFA_DEBUG_PRINT_IO__
    18         #define __CFA_DEBUG_PRINT_IO_CORE__
     18        // #define __CFA_DEBUG_PRINT_IO_CORE__
    1919#endif
    2020
     
    173173} iopoll;
    174174
    175 void __kernel_io_startup() {
     175void __kernel_io_startup(void) {
    176176        __cfaabi_dbg_print_safe( "Kernel : Creating EPOLL instance\n" );
    177177
     
    187187}
    188188
    189 void __kernel_io_shutdown() {
     189void __kernel_io_shutdown(void) {
    190190        // Notify the io poller thread of the shutdown
    191191        iopoll.run = false;
  • libcfa/src/concurrency/kernel.cfa

    r3ac8b9f re660761  
    1818
    1919//C Includes
    20 #include <stddef.h>
    2120#include <errno.h>
    22 #include <string.h>
    2321#include <stdio.h>
    24 #include <fenv.h>
    2522#include <signal.h>
    2623#include <unistd.h>
    27 #include <limits.h>                                                                             // PTHREAD_STACK_MIN
    28 #include <sys/mman.h>                                                                   // mprotect
    29 extern "C" {
    30 #include <sys/resource.h>
    31 }
    3224
    3325//CFA Includes
    34 #include "time.hfa"
    3526#include "kernel_private.hfa"
    3627#include "preemption.hfa"
    37 #include "startup.hfa"
    3828
    3929//Private includes
     
    4535// Some assembly required
    4636#if defined( __i386 )
    47         #define CtxGet( ctx )        \
    48                 __asm__ volatile (     \
    49                         "movl %%esp,%0\n"\
    50                         "movl %%ebp,%1\n"\
    51                         : "=rm" (ctx.SP),\
    52                                 "=rm" (ctx.FP) \
    53                 )
    54 
    5537        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
    5638        // fcw  : X87 FPU control word (preserved across function calls)
     
    7456
    7557#elif defined( __x86_64 )
    76         #define CtxGet( ctx )        \
    77                 __asm__ volatile (     \
    78                         "movq %%rsp,%0\n"\
    79                         "movq %%rbp,%1\n"\
    80                         : "=rm" (ctx.SP),\
    81                                 "=rm" (ctx.FP) \
    82                 )
    83 
    8458        #define __x87_store         \
    8559                uint32_t __mxcr;      \
     
    10276
    10377#elif defined( __ARM_ARCH )
    104 #define CtxGet( ctx ) __asm__ ( \
    105                 "mov %0,%%sp\n"   \
    106                 "mov %1,%%r11\n"   \
    107         : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    10878#else
    10979        #error unknown hardware architecture
    11080#endif
    11181
    112 //-----------------------------------------------------------------------------
    113 //Start and stop routine for the kernel, declared first to make sure they run first
    114 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    115 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     82extern $thread * mainThread;
     83extern processor * mainProcessor;
    11684
    11785//-----------------------------------------------------------------------------
     
    12088static bool __has_next_thread(cluster * this);
    12189static void __run_thread(processor * this, $thread * dst);
    122 static bool __wake_proc(processor *);
    12390static bool __wake_one(struct __processor_id_t * id, cluster * cltr);
    12491static void __halt(processor * this);
    125 
    126 //-----------------------------------------------------------------------------
    127 // Kernel storage
    128 KERNEL_STORAGE(cluster,              mainCluster);
    129 KERNEL_STORAGE(processor,            mainProcessor);
    130 KERNEL_STORAGE($thread,              mainThread);
    131 KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    132 KERNEL_STORAGE(io_context,           mainPollerThread);
    133 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    134 #if !defined(__CFA_NO_STATISTICS__)
    135 KERNEL_STORAGE(__stats_t, mainProcStats);
    136 #endif
    137 
    138 cluster              * mainCluster;
    139 processor            * mainProcessor;
    140 $thread              * mainThread;
    141 __scheduler_RWLock_t * __scheduler_lock;
    142 
    143 extern "C" {
    144         struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    145 }
    146 
    147 size_t __page_size = 0;
    148 
    149 //-----------------------------------------------------------------------------
    150 // Global state
    151 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) @= {
    152         NULL,                                                                                           // cannot use 0p
    153         NULL,
    154         NULL,
    155         { 1, false, false },
    156 };
    157 
    158 //-----------------------------------------------------------------------------
    159 // Struct to steal stack
    160 struct current_stack_info_t {
    161         __stack_t * storage;                                                            // pointer to stack object
    162         void * base;                                                                            // base of stack
    163         void * limit;                                                                           // stack grows towards stack limit
    164         void * context;                                                                         // address of cfa_context_t
    165 };
    166 
    167 void ?{}( current_stack_info_t & this ) {
    168         __stack_context_t ctx;
    169         CtxGet( ctx );
    170         this.base = ctx.FP;
    171 
    172         rlimit r;
    173         getrlimit( RLIMIT_STACK, &r);
    174         size_t size = r.rlim_cur;
    175 
    176         this.limit = (void *)(((intptr_t)this.base) - size);
    177         this.context = &storage_mainThreadCtx;
    178 }
    179 
    180 //-----------------------------------------------------------------------------
    181 // Main thread construction
    182 
    183 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
    184         stack.storage = info->storage;
    185         with(*stack.storage) {
    186                 limit     = info->limit;
    187                 base      = info->base;
    188         }
    189         __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
    190         *istorage |= 0x1;
    191         name = "Main Thread";
    192         state = Start;
    193         starter = 0p;
    194         last = 0p;
    195         cancellation = 0p;
    196 }
    197 
    198 void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    199         ticket = 1;
    200         state = Start;
    201         self_cor{ info };
    202         curr_cor = &self_cor;
    203         curr_cluster = mainCluster;
    204         self_mon.owner = &this;
    205         self_mon.recursion = 1;
    206         self_mon_p = &self_mon;
    207         link.next = 0p;
    208         link.prev = 0p;
    209 
    210         node.next = 0p;
    211         node.prev = 0p;
    212         doregister(curr_cluster, this);
    213 
    214         monitors{ &self_mon_p, 1, (fptr_t)0 };
    215 }
    216 
    217 //-----------------------------------------------------------------------------
    218 // Processor coroutine
    219 void ?{}(processorCtx_t & this) {
    220 
    221 }
    222 
    223 // Construct the processor context of non-main processors
    224 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
    225         (this.__cor){ info };
    226         this.proc = proc;
    227 }
    228 
    229 static void * __invoke_processor(void * arg);
    230 
    231 static init(processor & this, const char name[], cluster & _cltr) with( this ) {
    232         this.name = name;
    233         this.cltr = &_cltr;
    234         id = -1u;
    235         destroyer = 0p;
    236         do_terminate = false;
    237         preemption_alarm = 0p;
    238         pending_preemption = false;
    239 
    240         #if !defined(__CFA_NO_STATISTICS__)
    241                 print_stats = 0;
    242                 print_halts = false;
    243         #endif
    244 
    245         int target = __atomic_add_fetch( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    246 
    247         id = doregister((__processor_id_t*)&this);
    248 
    249         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    250         uint_fast32_t last_size = ready_mutate_lock();
    251 
    252                 // Adjust the ready queue size
    253                 ready_queue_grow( cltr, target );
    254 
    255         // Unlock the RWlock
    256         ready_mutate_unlock( last_size );
    257 
    258         __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
    259 }
    260 
    261 // Not a ctor, it just preps the destruction but should not destroy members
    262 void deinit(processor & this) {
    263 
    264         int target = __atomic_sub_fetch( &this.cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    265 
    266         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    267         uint_fast32_t last_size = ready_mutate_lock();
    268 
    269                 // Adjust the ready queue size
    270                 ready_queue_shrink( this.cltr, target );
    271 
    272                 // Make sure we aren't on the idle queue
    273                 unsafe_remove( this.cltr->idles, &this );
    274 
    275         // Unlock the RWlock
    276         ready_mutate_unlock( last_size );
    277 
    278         // Finally we don't need the read_lock any more
    279         unregister((__processor_id_t*)&this);
    280 }
    281 
    282 void ?{}(processor & this, const char name[], cluster & _cltr) {
    283         ( this.idle ){};
    284         ( this.terminated ){ 0 };
    285         ( this.runner ){};
    286         init( this, name, _cltr );
    287 
    288         __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
    289 
    290         this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
    291 
    292 }
    293 
    294 void ^?{}(processor & this) with( this ){
    295         if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
    296                 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
    297 
    298                 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
    299                 __wake_proc( &this );
    300 
    301                 P( terminated );
    302                 verify( kernelTLS.this_processor != &this);
    303         }
    304 
    305         int err = pthread_join( kernel_thread, 0p );
    306         if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err));
    307 
    308         free( this.stack );
    309 
    310         deinit( this );
    311 }
    312 
    313 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
    314         this.name = name;
    315         this.preemption_rate = preemption_rate;
    316         this.nprocessors = 0;
    317         ready_queue{};
    318 
    319         #if !defined(__CFA_NO_STATISTICS__)
    320                 print_stats = 0;
    321                 stats = alloc();
    322                 __init_stats( stats );
    323         #endif
    324 
    325         threads{ __get };
    326 
    327         doregister(this);
    328 
    329         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    330         uint_fast32_t last_size = ready_mutate_lock();
    331 
    332                 // Adjust the ready queue size
    333                 ready_queue_grow( &this, 0 );
    334 
    335         // Unlock the RWlock
    336         ready_mutate_unlock( last_size );
    337 
    338         this.io.cnt  = num_io;
    339         this.io.ctxs = aalloc(num_io);
    340         for(i; this.io.cnt) {
    341                 (this.io.ctxs[i]){ this, io_params };
    342         }
    343 }
    344 
    345 void ^?{}(cluster & this) {
    346         for(i; this.io.cnt) {
    347                 ^(this.io.ctxs[i]){ true };
    348         }
    349         free(this.io.ctxs);
    350 
    351         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    352         uint_fast32_t last_size = ready_mutate_lock();
    353 
    354                 // Adjust the ready queue size
    355                 ready_queue_shrink( &this, 0 );
    356 
    357         // Unlock the RWlock
    358         ready_mutate_unlock( last_size );
    359 
    360         #if !defined(__CFA_NO_STATISTICS__)
    361                 if( 0 != this.print_stats ) {
    362                         __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this );
    363                 }
    364                 free( this.stats );
    365         #endif
    366 
    367         unregister(this);
    368 }
     92bool __wake_proc(processor *);
    36993
    37094//=============================================================================================
     
    557281}
    558282
    559 // KERNEL_ONLY
    560 // Context invoker for processors
    561 // This is the entry point for processors (kernel threads)
    562 // It effectively constructs a coroutine by stealing the pthread stack
    563 static void * __invoke_processor(void * arg) {
    564         #if !defined( __CFA_NO_STATISTICS__ )
    565                 __stats_t local_stats;
    566                 __init_stats( &local_stats );
    567                 kernelTLS.this_stats = &local_stats;
    568         #endif
    569 
    570         processor * proc = (processor *) arg;
    571         kernelTLS.this_processor = proc;
    572         kernelTLS.this_thread    = 0p;
    573         kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
    574         // SKULLDUGGERY: We want to create a context for the processor coroutine
    575         // which is needed for the 2-step context switch. However, there is no reason
    576         // to waste the perfectly valid stack create by pthread.
    577         current_stack_info_t info;
    578         __stack_t ctx;
    579         info.storage = &ctx;
    580         (proc->runner){ proc, &info };
    581 
    582         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    583 
    584         //Set global state
    585         kernelTLS.this_thread = 0p;
    586 
    587         //We now have a proper context from which to schedule threads
    588         __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
    589 
    590         // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
    591         // resume it to start it like it normally would, it will just context switch
    592         // back to here. Instead directly call the main since we already are on the
    593         // appropriate stack.
    594         get_coroutine(proc->runner)->state = Active;
    595         main( proc->runner );
    596         get_coroutine(proc->runner)->state = Halted;
    597 
    598         // Main routine of the core returned, the core is now fully terminated
    599         __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    600 
    601         #if !defined(__CFA_NO_STATISTICS__)
    602                 __tally_stats(proc->cltr->stats, &local_stats);
    603                 if( 0 != proc->print_stats ) {
    604                         __print_stats( &local_stats, proc->print_stats, true, proc->name, (void*)proc );
    605                 }
    606         #endif
    607 
    608         return 0p;
    609 }
    610 
    611 static void Abort( int ret, const char func[] ) {
    612         if ( ret ) {                                                                            // pthread routines return errno values
    613                 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
    614         } // if
    615 } // Abort
    616 
    617 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
    618         pthread_attr_t attr;
    619 
    620         Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
    621 
    622         size_t stacksize;
    623         // default stack size, normally defined by shell limit
    624         Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
    625         assert( stacksize >= PTHREAD_STACK_MIN );
    626 
    627         void * stack;
    628         __cfaabi_dbg_debug_do(
    629                 stack = memalign( __page_size, stacksize + __page_size );
    630                 // pthread has no mechanism to create the guard page in user supplied stack.
    631                 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
    632                         abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
    633                 } // if
    634         );
    635         __cfaabi_dbg_no_debug_do(
    636                 stack = malloc( stacksize );
    637         );
    638 
    639         Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
    640 
    641         Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
    642         return stack;
    643 }
    644 
    645 // KERNEL_ONLY
    646 static void __kernel_first_resume( processor * this ) {
    647         $thread * src = mainThread;
    648         $coroutine * dst = get_coroutine(this->runner);
    649 
    650         verify( ! kernelTLS.preemption_state.enabled );
    651 
    652         kernelTLS.this_thread->curr_cor = dst;
    653         __stack_prepare( &dst->stack, 65000 );
    654         __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    655 
    656         verify( ! kernelTLS.preemption_state.enabled );
    657 
    658         dst->last = &src->self_cor;
    659         dst->starter = dst->starter ? dst->starter : &src->self_cor;
    660 
    661         // make sure the current state is still correct
    662         /* paranoid */ verify(src->state == Ready);
    663 
    664         // context switch to specified coroutine
    665         verify( dst->context.SP );
    666         __cfactx_switch( &src->context, &dst->context );
    667         // when __cfactx_switch returns we are back in the src coroutine
    668 
    669         mainThread->curr_cor = &mainThread->self_cor;
    670 
    671         // make sure the current state has been update
    672         /* paranoid */ verify(src->state == Active);
    673 
    674         verify( ! kernelTLS.preemption_state.enabled );
    675 }
    676 
    677 // KERNEL_ONLY
    678 static void __kernel_last_resume( processor * this ) {
    679         $coroutine * src = &mainThread->self_cor;
    680         $coroutine * dst = get_coroutine(this->runner);
    681 
    682         verify( ! kernelTLS.preemption_state.enabled );
    683         verify( dst->starter == src );
    684         verify( dst->context.SP );
    685 
    686         // SKULLDUGGERY in debug the processors check that the
    687         // stack is still within the limit of the stack limits after running a thread.
    688         // that check doesn't make sense if we context switch to the processor using the
    689         // coroutine semantics. Since this is a special case, use the current context
    690         // info to populate these fields.
    691         __cfaabi_dbg_debug_do(
    692                 __stack_context_t ctx;
    693                 CtxGet( ctx );
    694                 mainThread->context.SP = ctx.SP;
    695                 mainThread->context.FP = ctx.FP;
    696         )
    697 
    698         // context switch to the processor
    699         __cfactx_switch( &src->context, &dst->context );
    700 }
    701 
    702283//-----------------------------------------------------------------------------
    703284// Scheduler routines
     
    841422
    842423//=============================================================================================
    843 // Kernel Setup logic
    844 //=============================================================================================
    845 //-----------------------------------------------------------------------------
    846 // Kernel boot procedures
    847 static void __kernel_startup(void) {
    848         verify( ! kernelTLS.preemption_state.enabled );
    849         __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
    850 
    851         __page_size = sysconf( _SC_PAGESIZE );
    852 
    853         __cfa_dbg_global_clusters.list{ __get };
    854         __cfa_dbg_global_clusters.lock{};
    855 
    856         // Initialize the global scheduler lock
    857         __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
    858         (*__scheduler_lock){};
    859 
    860         // Initialize the main cluster
    861         mainCluster = (cluster *)&storage_mainCluster;
    862         (*mainCluster){"Main Cluster", 0};
    863 
    864         __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n");
    865 
    866         // Start by initializing the main thread
    867         // SKULLDUGGERY: the mainThread steals the process main thread
    868         // which will then be scheduled by the mainProcessor normally
    869         mainThread = ($thread *)&storage_mainThread;
    870         current_stack_info_t info;
    871         info.storage = (__stack_t*)&storage_mainThreadCtx;
    872         (*mainThread){ &info };
    873 
    874         __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n");
    875 
    876 
    877 
    878         // Construct the processor context of the main processor
    879         void ?{}(processorCtx_t & this, processor * proc) {
    880                 (this.__cor){ "Processor" };
    881                 this.__cor.starter = 0p;
    882                 this.proc = proc;
    883         }
    884 
    885         void ?{}(processor & this) with( this ) {
    886                 ( this.idle ){};
    887                 ( this.terminated ){ 0 };
    888                 ( this.runner ){};
    889                 init( this, "Main Processor", *mainCluster );
    890                 kernel_thread = pthread_self();
    891 
    892                 runner{ &this };
    893                 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
    894         }
    895 
    896         // Initialize the main processor and the main processor ctx
    897         // (the coroutine that contains the processing control flow)
    898         mainProcessor = (processor *)&storage_mainProcessor;
    899         (*mainProcessor){};
    900 
    901         //initialize the global state variables
    902         kernelTLS.this_processor = mainProcessor;
    903         kernelTLS.this_thread    = mainThread;
    904 
    905         #if !defined( __CFA_NO_STATISTICS__ )
    906                 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
    907                 __init_stats( kernelTLS.this_stats );
    908         #endif
    909 
    910         // Start IO
    911         __kernel_io_startup();
    912 
    913         // Enable preemption
    914         kernel_start_preemption();
    915 
    916         // Add the main thread to the ready queue
    917         // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    918         __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
    919 
    920         // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    921         // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    922         // mainThread is on the ready queue when this call is made.
    923         __kernel_first_resume( kernelTLS.this_processor );
    924 
    925 
    926         // THE SYSTEM IS NOW COMPLETELY RUNNING
    927 
    928 
    929         // Now that the system is up, finish creating systems that need threading
    930         mainCluster->io.ctxs = (io_context *)&storage_mainPollerThread;
    931         mainCluster->io.cnt  = 1;
    932         (*mainCluster->io.ctxs){ *mainCluster };
    933 
    934         __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n");
    935 
    936         verify( ! kernelTLS.preemption_state.enabled );
    937         enable_interrupts( __cfaabi_dbg_ctx );
    938         verify( TL_GET( preemption_state.enabled ) );
    939 }
    940 
    941 static void __kernel_shutdown(void) {
    942         //Before we start shutting things down, wait for systems that need threading to shutdown
    943         ^(*mainCluster->io.ctxs){};
    944         mainCluster->io.cnt  = 0;
    945         mainCluster->io.ctxs = 0p;
    946 
    947         /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
    948         disable_interrupts();
    949         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    950 
    951         __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n");
    952 
    953         // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
    954         // When its coroutine terminates, it return control to the mainThread
    955         // which is currently here
    956         __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    957         __kernel_last_resume( kernelTLS.this_processor );
    958         mainThread->self_cor.state = Halted;
    959 
    960         // THE SYSTEM IS NOW COMPLETELY STOPPED
    961 
    962         // Disable preemption
    963         kernel_stop_preemption();
    964 
    965         // Stop IO
    966         __kernel_io_shutdown();
    967 
    968         // Destroy the main processor and its context in reverse order of construction
    969         // These were manually constructed so we need manually destroy them
    970         void ^?{}(processor & this) with( this ){
    971                 deinit( this );
    972 
    973                 /* paranoid */ verify( this.do_terminate == true );
    974                 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    975         }
    976 
    977         ^(*mainProcessor){};
    978 
    979         // Final step, destroy the main thread since it is no longer needed
    980 
    981         // Since we provided a stack to this taxk it will not destroy anything
    982         /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
    983         ^(*mainThread){};
    984 
    985         ^(*mainCluster){};
    986 
    987         ^(*__scheduler_lock){};
    988 
    989         ^(__cfa_dbg_global_clusters.list){};
    990         ^(__cfa_dbg_global_clusters.lock){};
    991 
    992         __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n");
    993 }
    994 
    995 //=============================================================================================
    996424// Kernel Idle Sleep
    997425//=============================================================================================
     
    1013441
    1014442// Unconditionnaly wake a thread
    1015 static bool __wake_proc(processor * this) {
     443bool __wake_proc(processor * this) {
    1016444        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    1017445
     
    1189617
    1190618//-----------------------------------------------------------------------------
    1191 // Global Queues
    1192 void doregister( cluster     & cltr ) {
    1193         lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    1194         push_front( __cfa_dbg_global_clusters.list, cltr );
    1195         unlock    ( __cfa_dbg_global_clusters.lock );
    1196 }
    1197 
    1198 void unregister( cluster     & cltr ) {
    1199         lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    1200         remove( __cfa_dbg_global_clusters.list, cltr );
    1201         unlock( __cfa_dbg_global_clusters.lock );
    1202 }
    1203 
    1204 void doregister( cluster * cltr, $thread & thrd ) {
    1205         lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    1206         cltr->nthreads += 1;
    1207         push_front(cltr->threads, thrd);
    1208         unlock    (cltr->thread_list_lock);
    1209 }
    1210 
    1211 void unregister( cluster * cltr, $thread & thrd ) {
    1212         lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    1213         remove(cltr->threads, thrd );
    1214         cltr->nthreads -= 1;
    1215         unlock(cltr->thread_list_lock);
    1216 }
    1217 
    1218 //-----------------------------------------------------------------------------
    1219619// Debug
    1220620__cfaabi_dbg_debug_do(
  • libcfa/src/concurrency/kernel.hfa

    r3ac8b9f re660761  
    1616#pragma once
    1717
    18 #include <stdbool.h>
    19 #include <stdint.h>
    20 
    2118#include "invoke.h"
    2219#include "time_t.hfa"
     
    2724extern "C" {
    2825#include <pthread.h>
    29 #include <semaphore.h>
    3026}
    3127
  • libcfa/src/concurrency/kernel_private.hfa

    r3ac8b9f re660761  
    5252
    5353
    54 
    55 struct event_kernel_t {
    56         alarm_list_t alarms;
    57         __spinlock_t lock;
    58 };
    59 
    60 extern event_kernel_t * event_kernel;
    61 
    62 struct __cfa_kernel_preemption_state_t {
    63         bool enabled;
    64         bool in_progress;
    65         unsigned short disable_count;
    66 };
    67 
    68 extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
    6954
    7055extern cluster * mainCluster;
     
    10489//-----------------------------------------------------------------------------
    10590// Utils
    106 #define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
    107 
    10891static inline uint64_t __tls_rand() {
    10992        #if defined(__SIZEOF_INT128__)
     
    11396        #endif
    11497}
    115 
    116 
    117 void doregister( struct cluster & cltr );
    118 void unregister( struct cluster & cltr );
    11998
    12099void doregister( struct cluster * cltr, struct $thread & thrd );
  • libcfa/src/concurrency/preemption.cfa

    r3ac8b9f re660761  
    2626
    2727#include "bits/signal.hfa"
     28#include "kernel_private.hfa"
    2829
    2930#if !defined(__CFA_DEFAULT_PREEMPTION__)
     
    6061#error unknown hardware architecture
    6162#endif
     63
     64#warning duplicated in startup.cfa
     65#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
    6266
    6367KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
     
    293297// Startup routine to activate preemption
    294298// Called from kernel_startup
    295 void kernel_start_preemption() {
     299void __kernel_alarm_startup() {
    296300        __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" );
    297301
     
    315319// Shutdown routine to deactivate preemption
    316320// Called from kernel_shutdown
    317 void kernel_stop_preemption() {
     321void __kernel_alarm_shutdown() {
    318322        __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" );
    319323
  • libcfa/src/concurrency/preemption.hfa

    r3ac8b9f re660761  
    1616#pragma once
    1717
     18#include "bits/locks.hfa"
    1819#include "alarm.hfa"
    19 #include "kernel_private.hfa"
    2020
    21 void kernel_start_preemption();
    22 void kernel_stop_preemption();
     21struct event_kernel_t {
     22        alarm_list_t alarms;
     23        __spinlock_t lock;
     24};
     25
     26extern event_kernel_t * event_kernel;
     27
    2328void update_preemption( processor * this, Duration duration );
    2429
Note: See TracChangeset for help on using the changeset viewer.