Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rffe2fad rafc2427  
    3636#include "invoke.h"
    3737
    38 //-----------------------------------------------------------------------------
    39 // Some assembly required
    40 #if   defined( __i386 )
    41         #define CtxGet( ctx )        \
    42                 __asm__ volatile (     \
    43                         "movl %%esp,%0\n"\
    44                         "movl %%ebp,%1\n"\
    45                         : "=rm" (ctx.SP),\
    46                                 "=rm" (ctx.FP) \
    47                 )
    48 
    49         // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
    50         // fcw  : X87 FPU control word (preserved across function calls)
    51         #define __x87_store         \
    52                 uint32_t __mxcr;      \
    53                 uint16_t __fcw;       \
    54                 __asm__ volatile (    \
    55                         "stmxcsr %0\n"  \
    56                         "fnstcw  %1\n"  \
    57                         : "=m" (__mxcr),\
    58                                 "=m" (__fcw)  \
    59                 )
    60 
    61         #define __x87_load         \
    62                 __asm__ volatile (   \
    63                         "fldcw  %1\n"  \
    64                         "ldmxcsr %0\n" \
    65                         ::"m" (__mxcr),\
    66                                 "m" (__fcw)  \
    67                 )
    68 
    69 #elif defined( __x86_64 )
    70         #define CtxGet( ctx )        \
    71                 __asm__ volatile (     \
    72                         "movq %%rsp,%0\n"\
    73                         "movq %%rbp,%1\n"\
    74                         : "=rm" (ctx.SP),\
    75                                 "=rm" (ctx.FP) \
    76                 )
    77 
    78         #define __x87_store         \
    79                 uint32_t __mxcr;      \
    80                 uint16_t __fcw;       \
    81                 __asm__ volatile (    \
    82                         "stmxcsr %0\n"  \
    83                         "fnstcw  %1\n"  \
    84                         : "=m" (__mxcr),\
    85                                 "=m" (__fcw)  \
    86                 )
    87 
    88         #define __x87_load          \
    89                 __asm__ volatile (    \
    90                         "fldcw  %1\n"   \
    91                         "ldmxcsr %0\n"  \
    92                         :: "m" (__mxcr),\
    93                                 "m" (__fcw)  \
    94                 )
    95 
    96 
    97 #elif defined( __ARM_ARCH )
    98 #define CtxGet( ctx ) __asm__ ( \
    99                 "mov %0,%%sp\n"   \
    100                 "mov %1,%%r11\n"   \
    101         : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    102 #else
    103         #error unknown hardware architecture
    104 #endif
    105 
    106 //-----------------------------------------------------------------------------
    10738//Start and stop routine for the kernel, declared first to make sure they run first
    10839static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     
    11142//-----------------------------------------------------------------------------
    11243// Kernel storage
    113 KERNEL_STORAGE(cluster,         mainCluster);
    114 KERNEL_STORAGE(processor,       mainProcessor);
    115 KERNEL_STORAGE(thread_desc,     mainThread);
    116 KERNEL_STORAGE(__stack_t,       mainThreadCtx);
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(thread_desc,       mainThread);
     47KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    11748
    11849cluster     * mainCluster;
     
    12354struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    12455}
    125 
    126 size_t __page_size = 0;
    12756
    12857//-----------------------------------------------------------------------------
     
    13160        NULL,
    13261        NULL,
     62        NULL,
    13363        { 1, false, false }
    13464};
     
    13767// Struct to steal stack
    13868struct current_stack_info_t {
    139         __stack_t * storage;            // pointer to stack object
     69        machine_context_t ctx;
     70        unsigned int size;              // size of stack
    14071        void *base;                             // base of stack
     72        void *storage;                  // pointer to stack
    14173        void *limit;                    // stack grows towards stack limit
    14274        void *context;                  // address of cfa_context_t
     75        void *top;                              // address of top of storage
    14376};
    14477
    14578void ?{}( current_stack_info_t & this ) {
    146         __stack_context_t ctx;
    147         CtxGet( ctx );
    148         this.base = ctx.FP;
     79        CtxGet( this.ctx );
     80        this.base = this.ctx.FP;
     81        this.storage = this.ctx.SP;
    14982
    15083        rlimit r;
    15184        getrlimit( RLIMIT_STACK, &r);
    152         size_t size = r.rlim_cur;
    153 
    154         this.limit = (void *)(((intptr_t)this.base) - size);
     85        this.size = r.rlim_cur;
     86
     87        this.limit = (void *)(((intptr_t)this.base) - this.size);
    15588        this.context = &storage_mainThreadCtx;
     89        this.top = this.base;
    15690}
    15791
    15892//-----------------------------------------------------------------------------
    15993// Main thread construction
     94void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
     95        size      = info->size;
     96        storage   = info->storage;
     97        limit     = info->limit;
     98        base      = info->base;
     99        context   = info->context;
     100        top       = info->top;
     101        userStack = true;
     102}
    160103
    161104void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    162         stack.storage = info->storage;
    163         with(*stack.storage) {
    164                 limit     = info->limit;
    165                 base      = info->base;
    166         }
    167         __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
    168         *istorage |= 0x1;
     105        stack{ info };
    169106        name = "Main Thread";
     107        errno_ = 0;
    170108        state = Start;
    171109        starter = NULL;
    172         last = NULL;
    173         cancellation = NULL;
    174110}
    175111
    176112void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
    177         state = Start;
    178113        self_cor{ info };
    179114        curr_cor = &self_cor;
     
    306241}
    307242
    308 static int * __volatile_errno() __attribute__((noinline));
    309 static int * __volatile_errno() { asm(""); return &errno; }
    310 
    311243// KERNEL ONLY
    312244// runThread runs a thread by context switching
    313245// from the processor coroutine to the target thread
    314 static void runThread(processor * this, thread_desc * thrd_dst) {
     246static void runThread(processor * this, thread_desc * dst) {
     247        assert(dst->curr_cor);
    315248        coroutine_desc * proc_cor = get_coroutine(this->runner);
     249        coroutine_desc * thrd_cor = dst->curr_cor;
    316250
    317251        // Reset the terminating actions here
     
    319253
    320254        // Update global state
    321         kernelTLS.this_thread = thrd_dst;
    322 
    323         // set state of processor coroutine to inactive and the thread to active
    324         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    325         thrd_dst->state = Active;
    326 
    327         // set context switch to the thread that the processor is executing
    328         verify( thrd_dst->context.SP );
    329         CtxSwitch( &proc_cor->context, &thrd_dst->context );
    330         // when CtxSwitch returns we are back in the processor coroutine
    331 
    332         // set state of processor coroutine to active and the thread to inactive
    333         thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
    334         proc_cor->state = Active;
     255        kernelTLS.this_thread = dst;
     256
     257        // Context Switch to the thread
     258        ThreadCtxSwitch(proc_cor, thrd_cor);
     259        // when ThreadCtxSwitch returns we are back in the processor coroutine
    335260}
    336261
     
    338263static void returnToKernel() {
    339264        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    340         thread_desc * thrd_src = kernelTLS.this_thread;
    341 
    342         // set state of current coroutine to inactive
    343         thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
    344         proc_cor->state = Active;
    345         int local_errno = *__volatile_errno();
    346         #if defined( __i386 ) || defined( __x86_64 )
    347                 __x87_store;
    348         #endif
    349 
    350         // set new coroutine that the processor is executing
    351         // and context switch to it
    352         verify( proc_cor->context.SP );
    353         CtxSwitch( &thrd_src->context, &proc_cor->context );
    354 
    355         // set state of new coroutine to active
    356         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    357         thrd_src->state = Active;
    358 
    359         #if defined( __i386 ) || defined( __x86_64 )
    360                 __x87_load;
    361         #endif
    362         *__volatile_errno() = local_errno;
     265        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
     266        ThreadCtxSwitch(thrd_cor, proc_cor);
    363267}
    364268
     
    403307        processor * proc = (processor *) arg;
    404308        kernelTLS.this_processor = proc;
     309        kernelTLS.this_coroutine = NULL;
    405310        kernelTLS.this_thread    = NULL;
    406311        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     
    409314        // to waste the perfectly valid stack create by pthread.
    410315        current_stack_info_t info;
    411         __stack_t ctx;
    412         info.storage = &ctx;
     316        machine_context_t ctx;
     317        info.context = &ctx;
    413318        (proc->runner){ proc, &info };
    414319
    415         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
     320        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
    416321
    417322        //Set global state
     323        kernelTLS.this_coroutine = get_coroutine(proc->runner);
    418324        kernelTLS.this_thread    = NULL;
    419325
     
    444350
    445351// KERNEL_ONLY
    446 void kernel_first_resume( processor * this ) {
    447         thread_desc * src = mainThread;
     352void kernel_first_resume(processor * this) {
     353        coroutine_desc * src = kernelTLS.this_coroutine;
    448354        coroutine_desc * dst = get_coroutine(this->runner);
    449355
    450356        verify( ! kernelTLS.preemption_state.enabled );
    451357
    452         __stack_prepare( &dst->stack, 65000 );
     358        create_stack(&dst->stack, dst->stack.size);
    453359        CtxStart(&this->runner, CtxInvokeCoroutine);
    454360
    455361        verify( ! kernelTLS.preemption_state.enabled );
    456362
    457         dst->last = &src->self_cor;
    458         dst->starter = dst->starter ? dst->starter : &src->self_cor;
     363        dst->last = src;
     364        dst->starter = dst->starter ? dst->starter : src;
    459365
    460366        // set state of current coroutine to inactive
    461367        src->state = src->state == Halted ? Halted : Inactive;
    462368
     369        // set new coroutine that task is executing
     370        kernelTLS.this_coroutine = dst;
     371
     372        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     373        // Therefore, when first creating a coroutine, interrupts are enable before calling the main.
     374        // This is consistent with thread creation. However, when creating the main processor coroutine,
     375        // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will
     376        // stay disabled.
     377        disable_interrupts();
     378
    463379        // context switch to specified coroutine
    464         verify( dst->context.SP );
    465         CtxSwitch( &src->context, &dst->context );
     380        assert( src->stack.context );
     381        CtxSwitch( src->stack.context, dst->stack.context );
    466382        // when CtxSwitch returns we are back in the src coroutine
    467383
     
    470386
    471387        verify( ! kernelTLS.preemption_state.enabled );
    472 }
    473 
    474 // KERNEL_ONLY
    475 void kernel_last_resume( processor * this ) {
    476         coroutine_desc * src = &mainThread->self_cor;
    477         coroutine_desc * dst = get_coroutine(this->runner);
    478 
    479         verify( ! kernelTLS.preemption_state.enabled );
    480         verify( dst->starter == src );
    481         verify( dst->context.SP );
    482 
    483         // context switch to the processor
    484         CtxSwitch( &src->context, &dst->context );
    485388}
    486389
     
    491394void ScheduleThread( thread_desc * thrd ) {
    492395        verify( thrd );
    493         verify( thrd->state != Halted );
     396        verify( thrd->self_cor.state != Halted );
    494397
    495398        verify( ! kernelTLS.preemption_state.enabled );
     
    648551        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    649552
    650         __page_size = sysconf( _SC_PAGESIZE );
    651 
    652553        __cfa_dbg_global_clusters.list{ __get };
    653554        __cfa_dbg_global_clusters.lock{};
     
    664565        mainThread = (thread_desc *)&storage_mainThread;
    665566        current_stack_info_t info;
    666         info.storage = (__stack_t*)&storage_mainThreadCtx;
    667567        (*mainThread){ &info };
    668568
     
    699599        kernelTLS.this_processor = mainProcessor;
    700600        kernelTLS.this_thread    = mainThread;
     601        kernelTLS.this_coroutine = &mainThread->self_cor;
    701602
    702603        // Enable preemption
     
    733634        // which is currently here
    734635        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    735         kernel_last_resume( kernelTLS.this_processor );
     636        returnToKernel();
    736637        mainThread->self_cor.state = Halted;
    737638
     
    819720                __cfaabi_dbg_bits_write( abort_text, len );
    820721
    821                 if ( &thrd->self_cor != thrd->curr_cor ) {
    822                         len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
     722                if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
     723                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
    823724                        __cfaabi_dbg_bits_write( abort_text, len );
    824725                }
Note: See TracChangeset for help on using the changeset viewer.