Ignore:
Timestamp:
May 24, 2019, 10:19:41 AM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
d908563
Parents:
6a9d4b4 (diff), 292642a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r6a9d4b4 r933f32f  
    3636#include "invoke.h"
    3737
     38//-----------------------------------------------------------------------------
     39// Some assembly required
     40#if   defined( __i386 )
     41        #define CtxGet( ctx )        \
     42                __asm__ volatile (     \
     43                        "movl %%esp,%0\n"\
     44                        "movl %%ebp,%1\n"\
     45                        : "=rm" (ctx.SP),\
     46                                "=rm" (ctx.FP) \
     47                )
     48
     49        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
     50        // fcw  : X87 FPU control word (preserved across function calls)
     51        #define __x87_store         \
     52                uint32_t __mxcr;      \
     53                uint16_t __fcw;       \
     54                __asm__ volatile (    \
     55                        "stmxcsr %0\n"  \
     56                        "fnstcw  %1\n"  \
     57                        : "=m" (__mxcr),\
     58                                "=m" (__fcw)  \
     59                )
     60
     61        #define __x87_load         \
     62                __asm__ volatile (   \
     63                        "fldcw  %1\n"  \
     64                        "ldmxcsr %0\n" \
     65                        ::"m" (__mxcr),\
     66                                "m" (__fcw)  \
     67                )
     68
     69#elif defined( __x86_64 )
     70        #define CtxGet( ctx )        \
     71                __asm__ volatile (     \
     72                        "movq %%rsp,%0\n"\
     73                        "movq %%rbp,%1\n"\
     74                        : "=rm" (ctx.SP),\
     75                                "=rm" (ctx.FP) \
     76                )
     77
     78        #define __x87_store         \
     79                uint32_t __mxcr;      \
     80                uint16_t __fcw;       \
     81                __asm__ volatile (    \
     82                        "stmxcsr %0\n"  \
     83                        "fnstcw  %1\n"  \
     84                        : "=m" (__mxcr),\
     85                                "=m" (__fcw)  \
     86                )
     87
     88        #define __x87_load          \
     89                __asm__ volatile (    \
     90                        "fldcw  %1\n"   \
     91                        "ldmxcsr %0\n"  \
     92                        :: "m" (__mxcr),\
     93                                "m" (__fcw)  \
     94                )
     95
     96
     97#elif defined( __ARM_ARCH )
     98#define CtxGet( ctx ) __asm__ ( \
     99                "mov %0,%%sp\n"   \
     100                "mov %1,%%r11\n"   \
     101        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     102#else
     103        #error unknown hardware architecture
     104#endif
     105
     106//-----------------------------------------------------------------------------
    38107//Start and stop routine for the kernel, declared first to make sure they run first
    39108static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     
    42111//-----------------------------------------------------------------------------
    43112// Kernel storage
    44 KERNEL_STORAGE(cluster,           mainCluster);
    45 KERNEL_STORAGE(processor,         mainProcessor);
    46 KERNEL_STORAGE(thread_desc,       mainThread);
    47 KERNEL_STORAGE(machine_context_t, mainThreadCtx);
     113KERNEL_STORAGE(cluster,         mainCluster);
     114KERNEL_STORAGE(processor,       mainProcessor);
     115KERNEL_STORAGE(thread_desc,     mainThread);
     116KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    48117
    49118cluster     * mainCluster;
     
    54123struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    55124}
     125
     126size_t __page_size = 0;
    56127
    57128//-----------------------------------------------------------------------------
     
    60131        NULL,
    61132        NULL,
    62         NULL,
    63133        { 1, false, false }
    64134};
     
    67137// Struct to steal stack
    68138struct current_stack_info_t {
    69         machine_context_t ctx;
    70         unsigned int size;              // size of stack
     139        __stack_t * storage;            // pointer to stack object
    71140        void *base;                             // base of stack
    72         void *storage;                  // pointer to stack
    73141        void *limit;                    // stack grows towards stack limit
    74142        void *context;                  // address of cfa_context_t
    75         void *top;                              // address of top of storage
    76143};
    77144
    78145void ?{}( current_stack_info_t & this ) {
    79         CtxGet( this.ctx );
    80         this.base = this.ctx.FP;
    81         this.storage = this.ctx.SP;
     146        __stack_context_t ctx;
     147        CtxGet( ctx );
     148        this.base = ctx.FP;
    82149
    83150        rlimit r;
    84151        getrlimit( RLIMIT_STACK, &r);
    85         this.size = r.rlim_cur;
    86 
    87         this.limit = (void *)(((intptr_t)this.base) - this.size);
     152        size_t size = r.rlim_cur;
     153
     154        this.limit = (void *)(((intptr_t)this.base) - size);
    88155        this.context = &storage_mainThreadCtx;
    89         this.top = this.base;
    90156}
    91157
    92158//-----------------------------------------------------------------------------
    93159// Main thread construction
    94 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
    95         size      = info->size;
    96         storage   = info->storage;
    97         limit     = info->limit;
    98         base      = info->base;
    99         context   = info->context;
    100         top       = info->top;
    101         userStack = true;
    102 }
    103160
    104161void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    105         stack{ info };
     162        stack.storage = info->storage;
     163        with(*stack.storage) {
     164                limit     = info->limit;
     165                base      = info->base;
     166        }
     167        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
     168        *istorage |= 0x1;
    106169        name = "Main Thread";
    107         errno_ = 0;
    108170        state = Start;
    109171        starter = NULL;
     172        last = NULL;
     173        cancellation = NULL;
    110174}
    111175
    112176void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     177        state = Start;
    113178        self_cor{ info };
    114179        curr_cor = &self_cor;
     
    241306}
    242307
     308static int * __volatile_errno() __attribute__((noinline));
     309static int * __volatile_errno() { asm(""); return &errno; }
     310
    243311// KERNEL ONLY
    244312// runThread runs a thread by context switching
    245313// from the processor coroutine to the target thread
    246 static void runThread(processor * this, thread_desc * dst) {
    247         assert(dst->curr_cor);
     314static void runThread(processor * this, thread_desc * thrd_dst) {
    248315        coroutine_desc * proc_cor = get_coroutine(this->runner);
    249         coroutine_desc * thrd_cor = dst->curr_cor;
    250316
    251317        // Reset the terminating actions here
     
    253319
    254320        // Update global state
    255         kernelTLS.this_thread = dst;
    256 
    257         // Context Switch to the thread
    258         ThreadCtxSwitch(proc_cor, thrd_cor);
    259         // when ThreadCtxSwitch returns we are back in the processor coroutine
     321        kernelTLS.this_thread = thrd_dst;
     322
     323        // set state of processor coroutine to inactive and the thread to active
     324        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
     325        thrd_dst->state = Active;
     326
     327        // set context switch to the thread that the processor is executing
     328        verify( thrd_dst->context.SP );
     329        CtxSwitch( &proc_cor->context, &thrd_dst->context );
     330        // when CtxSwitch returns we are back in the processor coroutine
     331
     332        // set state of processor coroutine to active and the thread to inactive
     333        thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
     334        proc_cor->state = Active;
    260335}
    261336
     
    263338static void returnToKernel() {
    264339        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    265         coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
    266         ThreadCtxSwitch(thrd_cor, proc_cor);
     340        thread_desc * thrd_src = kernelTLS.this_thread;
     341
     342        // set state of current coroutine to inactive
     343        thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
     344        proc_cor->state = Active;
     345        int local_errno = *__volatile_errno();
     346        #if defined( __i386 ) || defined( __x86_64 )
     347                __x87_store;
     348        #endif
     349
     350        // set new coroutine that the processor is executing
     351        // and context switch to it
     352        verify( proc_cor->context.SP );
     353        CtxSwitch( &thrd_src->context, &proc_cor->context );
     354
     355        // set state of new coroutine to active
     356        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
     357        thrd_src->state = Active;
     358
     359        #if defined( __i386 ) || defined( __x86_64 )
     360                __x87_load;
     361        #endif
     362        *__volatile_errno() = local_errno;
    267363}
    268364
     
    307403        processor * proc = (processor *) arg;
    308404        kernelTLS.this_processor = proc;
    309         kernelTLS.this_coroutine = NULL;
    310405        kernelTLS.this_thread    = NULL;
    311406        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     
    314409        // to waste the perfectly valid stack create by pthread.
    315410        current_stack_info_t info;
    316         machine_context_t ctx;
    317         info.context = &ctx;
     411        __stack_t ctx;
     412        info.storage = &ctx;
    318413        (proc->runner){ proc, &info };
    319414
    320         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
     415        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    321416
    322417        //Set global state
    323         kernelTLS.this_coroutine = get_coroutine(proc->runner);
    324418        kernelTLS.this_thread    = NULL;
    325419
     
    350444
    351445// KERNEL_ONLY
    352 void kernel_first_resume(processor * this) {
    353         coroutine_desc * src = kernelTLS.this_coroutine;
     446void kernel_first_resume( processor * this ) {
     447        thread_desc * src = mainThread;
    354448        coroutine_desc * dst = get_coroutine(this->runner);
    355449
    356450        verify( ! kernelTLS.preemption_state.enabled );
    357451
    358         create_stack(&dst->stack, dst->stack.size);
     452        __stack_prepare( &dst->stack, 65000 );
    359453        CtxStart(&this->runner, CtxInvokeCoroutine);
    360454
    361455        verify( ! kernelTLS.preemption_state.enabled );
    362456
    363         dst->last = src;
    364         dst->starter = dst->starter ? dst->starter : src;
     457        dst->last = &src->self_cor;
     458        dst->starter = dst->starter ? dst->starter : &src->self_cor;
    365459
    366460        // set state of current coroutine to inactive
    367461        src->state = src->state == Halted ? Halted : Inactive;
    368462
    369         // set new coroutine that task is executing
    370         kernelTLS.this_coroutine = dst;
    371 
    372         // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
    373         // Therefore, when first creating a coroutine, interrupts are enable before calling the main.
    374         // This is consistent with thread creation. However, when creating the main processor coroutine,
    375         // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will
    376         // stay disabled.
    377         disable_interrupts();
    378 
    379463        // context switch to specified coroutine
    380         assert( src->stack.context );
    381         CtxSwitch( src->stack.context, dst->stack.context );
     464        verify( dst->context.SP );
     465        CtxSwitch( &src->context, &dst->context );
    382466        // when CtxSwitch returns we are back in the src coroutine
    383467
     
    386470
    387471        verify( ! kernelTLS.preemption_state.enabled );
     472}
     473
     474// KERNEL_ONLY
     475void kernel_last_resume( processor * this ) {
     476        coroutine_desc * src = &mainThread->self_cor;
     477        coroutine_desc * dst = get_coroutine(this->runner);
     478
     479        verify( ! kernelTLS.preemption_state.enabled );
     480        verify( dst->starter == src );
     481        verify( dst->context.SP );
     482
     483        // context switch to the processor
     484        CtxSwitch( &src->context, &dst->context );
    388485}
    389486
     
    394491void ScheduleThread( thread_desc * thrd ) {
    395492        verify( thrd );
    396         verify( thrd->self_cor.state != Halted );
     493        verify( thrd->state != Halted );
    397494
    398495        verify( ! kernelTLS.preemption_state.enabled );
     
    551648        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    552649
     650        __page_size = sysconf( _SC_PAGESIZE );
     651
    553652        __cfa_dbg_global_clusters.list{ __get };
    554653        __cfa_dbg_global_clusters.lock{};
     
    565664        mainThread = (thread_desc *)&storage_mainThread;
    566665        current_stack_info_t info;
     666        info.storage = (__stack_t*)&storage_mainThreadCtx;
    567667        (*mainThread){ &info };
    568668
     
    599699        kernelTLS.this_processor = mainProcessor;
    600700        kernelTLS.this_thread    = mainThread;
    601         kernelTLS.this_coroutine = &mainThread->self_cor;
    602701
    603702        // Enable preemption
     
    634733        // which is currently here
    635734        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    636         returnToKernel();
     735        kernel_last_resume( kernelTLS.this_processor );
    637736        mainThread->self_cor.state = Halted;
    638737
     
    720819                __cfaabi_dbg_bits_write( abort_text, len );
    721820
    722                 if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
    723                         len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
     821                if ( &thrd->self_cor != thrd->curr_cor ) {
     822                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
    724823                        __cfaabi_dbg_bits_write( abort_text, len );
    725824                }
Note: See TracChangeset for help on using the changeset viewer.