Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rdeca0f5 r212c2187  
    3636#include "invoke.h"
    3737
    38 //-----------------------------------------------------------------------------
    39 // Some assembly required
    40 #if   defined( __i386 )
    41         #define CtxGet( ctx )        \
    42                 __asm__ volatile (     \
    43                         "movl %%esp,%0\n"\
    44                         "movl %%ebp,%1\n"\
    45                         : "=rm" (ctx.SP),\
    46                                 "=rm" (ctx.FP) \
    47                 )
    48 
    49         // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
    50         // fcw  : X87 FPU control word (preserved across function calls)
    51         #define __x87_store         \
    52                 uint32_t __mxcr;      \
    53                 uint16_t __fcw;       \
    54                 __asm__ volatile (    \
    55                         "stmxcsr %0\n"  \
    56                         "fnstcw  %1\n"  \
    57                         : "=m" (__mxcr),\
    58                                 "=m" (__fcw)  \
    59                 )
    60 
    61         #define __x87_load         \
    62                 __asm__ volatile (   \
    63                         "fldcw  %1\n"  \
    64                         "ldmxcsr %0\n" \
    65                         ::"m" (__mxcr),\
    66                                 "m" (__fcw)  \
    67                 )
    68 
    69 #elif defined( __x86_64 )
    70         #define CtxGet( ctx )        \
    71                 __asm__ volatile (     \
    72                         "movq %%rsp,%0\n"\
    73                         "movq %%rbp,%1\n"\
    74                         : "=rm" (ctx.SP),\
    75                                 "=rm" (ctx.FP) \
    76                 )
    77 
    78         #define __x87_store         \
    79                 uint32_t __mxcr;      \
    80                 uint16_t __fcw;       \
    81                 __asm__ volatile (    \
    82                         "stmxcsr %0\n"  \
    83                         "fnstcw  %1\n"  \
    84                         : "=m" (__mxcr),\
    85                                 "=m" (__fcw)  \
    86                 )
    87 
    88         #define __x87_load          \
    89                 __asm__ volatile (    \
    90                         "fldcw  %1\n"   \
    91                         "ldmxcsr %0\n"  \
    92                         :: "m" (__mxcr),\
    93                                 "m" (__fcw)  \
    94                 )
    95 
    96 
    97 #elif defined( __ARM_ARCH )
    98 #define CtxGet( ctx ) __asm__ ( \
    99                 "mov %0,%%sp\n"   \
    100                 "mov %1,%%r11\n"   \
    101         : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    102 #else
    103         #error unknown hardware architecture
    104 #endif
    105 
    106 //-----------------------------------------------------------------------------
    10738//Start and stop routine for the kernel, declared first to make sure they run first
    10839static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     
    11142//-----------------------------------------------------------------------------
    11243// Kernel storage
    113 KERNEL_STORAGE(cluster,         mainCluster);
    114 KERNEL_STORAGE(processor,       mainProcessor);
    115 KERNEL_STORAGE(thread_desc,     mainThread);
    116 KERNEL_STORAGE(__stack_t,       mainThreadCtx);
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(thread_desc,       mainThread);
     47KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    11748
    11849cluster     * mainCluster;
     
    12354struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    12455}
    125 
    126 size_t __page_size = 0;
    12756
    12857//-----------------------------------------------------------------------------
     
    13766// Struct to steal stack
    13867struct current_stack_info_t {
    139         __stack_t * storage;            // pointer to stack object
     68        machine_context_t ctx;
     69        unsigned int size;              // size of stack
    14070        void *base;                             // base of stack
     71        void *storage;                  // pointer to stack
    14172        void *limit;                    // stack grows towards stack limit
    14273        void *context;                  // address of cfa_context_t
     74        void *top;                              // address of top of storage
    14375};
    14476
    14577void ?{}( current_stack_info_t & this ) {
    146         __stack_context_t ctx;
    147         CtxGet( ctx );
    148         this.base = ctx.FP;
     78        CtxGet( this.ctx );
     79        this.base = this.ctx.FP;
     80        this.storage = this.ctx.SP;
    14981
    15082        rlimit r;
    15183        getrlimit( RLIMIT_STACK, &r);
    152         size_t size = r.rlim_cur;
    153 
    154         this.limit = (void *)(((intptr_t)this.base) - size);
     84        this.size = r.rlim_cur;
     85
     86        this.limit = (void *)(((intptr_t)this.base) - this.size);
    15587        this.context = &storage_mainThreadCtx;
     88        this.top = this.base;
    15689}
    15790
    15891//-----------------------------------------------------------------------------
    15992// Main thread construction
     93void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
     94        size      = info->size;
     95        storage   = info->storage;
     96        limit     = info->limit;
     97        base      = info->base;
     98        context   = info->context;
     99        top       = info->top;
     100        userStack = true;
     101}
    160102
    161103void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    162         stack.storage = info->storage;
    163         with(*stack.storage) {
    164                 limit     = info->limit;
    165                 base      = info->base;
    166         }
    167         *((intptr_t*)&stack.storage) |= 0x1;
     104        stack{ info };
    168105        name = "Main Thread";
     106        errno_ = 0;
    169107        state = Start;
    170108        starter = NULL;
    171         last = NULL;
    172         cancellation = NULL;
    173109}
    174110
    175111void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
    176         state = Start;
    177112        self_cor{ info };
    178113        curr_cor = &self_cor;
     
    305240}
    306241
    307 static int * __volatile_errno() __attribute__((noinline));
    308 static int * __volatile_errno() { asm(""); return &errno; }
    309 
    310242// KERNEL ONLY
    311243// runThread runs a thread by context switching
    312244// from the processor coroutine to the target thread
    313 static void runThread(processor * this, thread_desc * thrd_dst) {
     245static void runThread(processor * this, thread_desc * dst) {
     246        assert(dst->curr_cor);
    314247        coroutine_desc * proc_cor = get_coroutine(this->runner);
     248        coroutine_desc * thrd_cor = dst->curr_cor;
    315249
    316250        // Reset the terminating actions here
     
    318252
    319253        // Update global state
    320         kernelTLS.this_thread = thrd_dst;
    321 
    322         // set state of processor coroutine to inactive and the thread to active
    323         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    324         thrd_dst->state = Active;
    325 
    326         // set context switch to the thread that the processor is executing
    327         verify( thrd_dst->context.SP );
    328         CtxSwitch( &proc_cor->context, &thrd_dst->context );
    329         // when CtxSwitch returns we are back in the processor coroutine
    330 
    331         // set state of processor coroutine to active and the thread to inactive
    332         thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
    333         proc_cor->state = Active;
     254        kernelTLS.this_thread = dst;
     255
     256        // Context Switch to the thread
     257        ThreadCtxSwitch(proc_cor, thrd_cor);
     258        // when ThreadCtxSwitch returns we are back in the processor coroutine
    334259}
    335260
     
    337262static void returnToKernel() {
    338263        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    339         thread_desc * thrd_src = kernelTLS.this_thread;
    340 
    341         // set state of current coroutine to inactive
    342         thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
    343         proc_cor->state = Active;
    344         int local_errno = *__volatile_errno();
    345         #if defined( __i386 ) || defined( __x86_64 )
    346                 __x87_store;
    347         #endif
    348 
    349         // set new coroutine that the processor is executing
    350         // and context switch to it
    351         verify( proc_cor->context.SP );
    352         CtxSwitch( &thrd_src->context, &proc_cor->context );
    353 
    354         // set state of new coroutine to active
    355         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    356         thrd_src->state = Active;
    357 
    358         #if defined( __i386 ) || defined( __x86_64 )
    359                 __x87_load;
    360         #endif
    361         *__volatile_errno() = local_errno;
     264        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor;
     265        ThreadCtxSwitch(thrd_cor, proc_cor);
    362266}
    363267
     
    408312        // to waste the perfectly valid stack create by pthread.
    409313        current_stack_info_t info;
    410         __stack_t ctx;
    411         info.storage = &ctx;
     314        machine_context_t ctx;
     315        info.context = &ctx;
    412316        (proc->runner){ proc, &info };
    413317
    414         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
     318        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
    415319
    416320        //Set global state
     
    443347
    444348// KERNEL_ONLY
    445 void kernel_first_resume( processor * this ) {
    446         thread_desc * src = mainThread;
     349void kernel_first_resume(processor * this) {
     350        coroutine_desc * src = mainThread->curr_cor;
    447351        coroutine_desc * dst = get_coroutine(this->runner);
    448352
    449353        verify( ! kernelTLS.preemption_state.enabled );
    450354
    451         __stack_prepare( &dst->stack, 65000 );
     355        create_stack(&dst->stack, dst->stack.size);
    452356        CtxStart(&this->runner, CtxInvokeCoroutine);
    453357
    454358        verify( ! kernelTLS.preemption_state.enabled );
    455359
    456         dst->last = &src->self_cor;
    457         dst->starter = dst->starter ? dst->starter : &src->self_cor;
     360        dst->last = src;
     361        dst->starter = dst->starter ? dst->starter : src;
    458362
    459363        // set state of current coroutine to inactive
    460364        src->state = src->state == Halted ? Halted : Inactive;
    461365
     366        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     367        // Therefore, when first creating a coroutine, interrupts are enable before calling the main.
     368        // This is consistent with thread creation. However, when creating the main processor coroutine,
     369        // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will
     370        // stay disabled.
     371        disable_interrupts();
     372
    462373        // context switch to specified coroutine
    463         verify( dst->context.SP );
    464         CtxSwitch( &src->context, &dst->context );
     374        assert( src->stack.context );
     375        CtxSwitch( src->stack.context, dst->stack.context );
    465376        // when CtxSwitch returns we are back in the src coroutine
    466377
     
    469380
    470381        verify( ! kernelTLS.preemption_state.enabled );
    471 }
    472 
    473 // KERNEL_ONLY
    474 void kernel_last_resume( processor * this ) {
    475         coroutine_desc * src = &mainThread->self_cor;
    476         coroutine_desc * dst = get_coroutine(this->runner);
    477 
    478         verify( ! kernelTLS.preemption_state.enabled );
    479         verify( dst->starter == src );
    480         verify( dst->context.SP );
    481 
    482         // context switch to the processor
    483         CtxSwitch( &src->context, &dst->context );
    484382}
    485383
     
    490388void ScheduleThread( thread_desc * thrd ) {
    491389        verify( thrd );
    492         verify( thrd->state != Halted );
     390        verify( thrd->self_cor.state != Halted );
    493391
    494392        verify( ! kernelTLS.preemption_state.enabled );
     
    647545        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    648546
    649         __page_size = sysconf( _SC_PAGESIZE );
    650 
    651547        __cfa_dbg_global_clusters.list{ __get };
    652548        __cfa_dbg_global_clusters.lock{};
     
    663559        mainThread = (thread_desc *)&storage_mainThread;
    664560        current_stack_info_t info;
    665         info.storage = (__stack_t*)&storage_mainThreadCtx;
    666561        (*mainThread){ &info };
    667562
     
    732627        // which is currently here
    733628        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    734         kernel_last_resume( kernelTLS.this_processor );
     629        returnToKernel();
    735630        mainThread->self_cor.state = Halted;
    736631
Note: See TracChangeset for help on using the changeset viewer.