Changes in / [8278abf:ec28948]


Ignore:
Files:
5 deleted
16 edited

Legend:

Unmodified
Added
Removed
  • benchmark/ctxswitch/cfa_cor.cfa

    r8278abf rec28948  
    1111}
    1212
    13 void main( __attribute__((unused)) GreatSuspender & this ) {
     13void main( GreatSuspender & this ) {
    1414        while( true ) {
    1515                suspend();
  • benchmark/ctxswitch/cfa_thrd2.cfa

    r8278abf rec28948  
    88thread Fibre {};
    99
    10 void main(__attribute__((unused)) Fibre & this) {
     10void main(Fibre & this) {
    1111        while(!done) {
    1212                yield();
  • libcfa/src/bits/containers.hfa

    r8278abf rec28948  
    186186
    187187        forall(dtype T | is_node(T))
    188         static inline bool ?!=?( __queue(T) & this, __attribute__((unused)) zero_t zero ) {
     188        static inline bool ?!=?( __queue(T) & this, zero_t zero ) {
    189189                return this.head != 0;
    190190        }
     
    196196//-----------------------------------------------------------------------------
    197197#ifdef __cforall
    198         forall(dtype TYPE)
     198        forall(dtype TYPE | sized(TYPE))
    199199        #define T TYPE
    200200        #define __getter_t * [T * & next, T * & prev] ( T & )
     
    268268
    269269        forall(dtype T | sized(T))
    270         static inline bool ?!=?( __dllist(T) & this, __attribute__((unused)) zero_t zero ) {
     270        static inline bool ?!=?( __dllist(T) & this, zero_t zero ) {
    271271                return this.head != 0;
    272272        }
  • libcfa/src/concurrency/CtxSwitch-i386.S

    r8278abf rec28948  
    4141#define PC_OFFSET       ( 2 * PTR_BYTE )
    4242
    43         .text
     43.text
    4444        .align 2
    45         .globl CtxSwitch
    46         .type  CtxSwitch, @function
     45.globl  CtxSwitch
    4746CtxSwitch:
    4847
     
    5150
    5251        movl 4(%esp),%eax
     52
     53        // Save floating & SSE control words on the stack.
     54
     55        sub    $8,%esp
     56        stmxcsr 0(%esp)         // 4 bytes
     57        fnstcw  4(%esp)         // 2 bytes
    5358
    5459        // Save volatile registers on the stack.
     
    6267        movl %esp,SP_OFFSET(%eax)
    6368        movl %ebp,FP_OFFSET(%eax)
     69//      movl 4(%ebp),%ebx       // save previous eip for debugger
     70//      movl %ebx,PC_OFFSET(%eax)
    6471
    6572        // Copy the "to" context argument from the stack to register eax
     
    8087        popl %ebx
    8188
     89        // Load floating & SSE control words from the stack.
     90
     91        fldcw   4(%esp)
     92        ldmxcsr 0(%esp)
     93        add    $8,%esp
     94
    8295        // Return to thread.
    8396
    8497        ret
    85         .size  CtxSwitch, .-CtxSwitch
    8698
    8799// Local Variables: //
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r8278abf rec28948  
    3939#define SP_OFFSET       ( 0 * PTR_BYTE )
    4040#define FP_OFFSET       ( 1 * PTR_BYTE )
     41#define PC_OFFSET       ( 2 * PTR_BYTE )
    4142
    42 //-----------------------------------------------------------------------------
    43 // Regular context switch routine which enables switching from one context to anouther
    44         .text
     43.text
    4544        .align 2
    46         .globl CtxSwitch
    47         .type  CtxSwitch, @function
     45.globl  CtxSwitch
    4846CtxSwitch:
     47
     48        // Save floating & SSE control words on the stack.
     49
     50        subq   $8,%rsp
     51        stmxcsr 0(%rsp)         // 4 bytes
     52        fnstcw  4(%rsp)         // 2 bytes
    4953
    5054        // Save volatile registers on the stack.
     
    7478        popq %r15
    7579
    76         // Return to thread.
     80        // Load floating & SSE control words from the stack.
    7781
    78         ret
    79         .size  CtxSwitch, .-CtxSwitch
    80 
    81 //-----------------------------------------------------------------------------
    82 // Part of a 2 part context switch routine, use with CtxRet, stores the current context and then makes a function call
    83         .text
    84         .align 2
    85         .globl CtxStore
    86         .type  CtxStore, @function
    87 CtxStore:
    88 
    89         // Save volatile registers on the stack.
    90 
    91         pushq %r15
    92         pushq %r14
    93         pushq %r13
    94         pushq %r12
    95         pushq %rbx
    96 
    97         // Save old context in the "from" area.
    98 
    99         movq %rsp,SP_OFFSET(%rdi)
    100         movq %rbp,FP_OFFSET(%rdi)
    101 
    102         // Don't load a new context, directly jump to the desired function
    103 
    104         jmp *%rsi
    105         .size  CtxStore, .-CtxStore
    106 
    107 //-----------------------------------------------------------------------------
    108 // Part of a 2 part context switch routine, use with CtxStore, context switches to the desired target without saving the current context
    109         .text
    110         .align 2
    111         .globl CtxRet
    112         .type  CtxRet, @function
    113 CtxRet:
    114         // Load new context from the "to" area.
    115 
    116         movq SP_OFFSET(%rdi),%rsp
    117         movq FP_OFFSET(%rdi),%rbp
    118 
    119         // Load volatile registers from the stack.
    120 
    121         popq %rbx
    122         popq %r12
    123         popq %r13
    124         popq %r14
    125         popq %r15
     82        fldcw   4(%rsp)
     83        ldmxcsr 0(%rsp)
     84        addq   $8,%rsp
    12685
    12786        // Return to thread.
    12887
    12988        ret
    130         .size  CtxRet, .-CtxRet
     89
     90//.text
     91//      .align 2
     92//.globl        CtxStore
     93//CtxStore:
     94//      // Save floating & SSE control words on the stack.
     95//
     96//      subq   $8,%rsp
     97//      stmxcsr 0(%rsp)         // 4 bytes
     98//      fnstcw  4(%rsp)         // 2 bytes
     99//
     100//      // Save volatile registers on the stack.
     101//
     102//      pushq %r15
     103//      pushq %r14
     104//      pushq %r13
     105//      pushq %r12
     106//      pushq %rbx
     107//
     108//      // Save old context in the "from" area.
     109//
     110//      movq %rsp,SP_OFFSET(%rdi)
     111//      movq %rbp,FP_OFFSET(%rdi)
     112//
     113//      // Return to thread
     114//
     115//      ret
     116//
     117//.text
     118//      .align 2
     119//.globl        CtxRet
     120//CtxRet:
     121//      // Load new context from the "to" area.
     122//
     123//      movq SP_OFFSET(%rdi),%rsp
     124//      movq FP_OFFSET(%rdi),%rbp
     125//
     126//      // Load volatile registers from the stack.
     127//
     128//      popq %rbx
     129//      popq %r12
     130//      popq %r13
     131//      popq %r14
     132//      popq %r15
     133//
     134//      // Load floating & SSE control words from the stack.
     135//
     136//      fldcw   4(%rsp)
     137//      ldmxcsr 0(%rsp)
     138//      addq   $8,%rsp
     139//
     140//      // Return to thread.
     141//
     142//      ret
    131143
    132144
    133 //-----------------------------------------------------------------------------
    134 // Stub used to create new stacks which are ready to be context switched to
    135         .text
     145.text
    136146        .align 2
    137         .globl CtxInvokeStub
    138         .type    CtxInvokeStub, @function
     147.globl  CtxInvokeStub
    139148CtxInvokeStub:
    140149        movq %rbx, %rdi
    141150        jmp *%r12
    142         .size  CtxInvokeStub, .-CtxInvokeStub
    143151
    144152// Local Variables: //
  • libcfa/src/concurrency/coroutine.cfa

    r8278abf rec28948  
    3535
    3636extern "C" {
    37         void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    38         static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    39         static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
    40                 abort();
    41         }
     37      void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     38      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
     39      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     40            abort();
     41      }
    4242}
    4343
     
    4747// minimum feasible stack size in bytes
    4848#define MinStackSize 1000
    49 extern size_t __page_size;                              // architecture pagesize HACK, should go in proper runtime singleton
    50 
    51 void __stack_prepare( __stack_info_t * this, size_t create_size );
     49static size_t pageSize = 0;                             // architecture pagesize HACK, should go in proper runtime singleton
    5250
    5351//-----------------------------------------------------------------------------
    5452// Coroutine ctors and dtors
    55 void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) {
    56         this.storage   = (__stack_t *)storage;
    57 
    58         // Did we get a piece of storage ?
    59         if (this.storage || storageSize != 0) {
    60                 // We either got a piece of storage or the user asked for a specific size
    61                 // Immediately create the stack
    62                 // (This is slightly unintuitive that non-default sized coroutines create are eagerly created
    63                 // but it avoids that all coroutines carry an unnecessary size)
    64                 verify( storageSize != 0 );
    65                 __stack_prepare( &this, storageSize );
    66         }
    67 }
    68 
    69 void ^?{}(__stack_info_t & this) {
    70         bool userStack = ((intptr_t)this.storage & 0x1) != 0;
    71         if ( ! userStack && this.storage ) {
    72                 __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage;
    73                 *istorage &= (intptr_t)-1;
    74 
    75                 void * storage = this.storage->limit;
    76                 __cfaabi_dbg_debug_do(
    77                         storage = (char*)(storage) - __page_size;
    78                         if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) {
    79                                 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
    80                         }
    81                 );
    82                 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
    83                 free( storage );
    84         }
     53void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) {
     54      size               = storageSize == 0 ? 65000 : storageSize; // size of stack
     55      this.storage = storage;                                // pointer to stack
     56      limit              = NULL;                                   // stack grows towards stack limit
     57      base               = NULL;                                   // base of stack
     58      context    = NULL;                                   // address of cfa_context_t
     59      top                = NULL;                                   // address of top of storage
     60      userStack  = storage != NULL;
     61}
     62
     63void ^?{}(coStack_t & this) {
     64      if ( ! this.userStack && this.storage ) {
     65            __cfaabi_dbg_debug_do(
     66                  if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) {
     67                        abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
     68                  }
     69            );
     70            free( this.storage );
     71      }
    8572}
    8673
    8774void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {
    88         (this.context){NULL, NULL};
    89         (this.stack){storage, storageSize};
    90         this.name = name;
    91         state = Start;
    92         starter = NULL;
    93         last = NULL;
    94         cancellation = NULL;
     75      (this.stack){storage, storageSize};
     76      this.name = name;
     77      errno_ = 0;
     78      state = Start;
     79      starter = NULL;
     80      last = NULL;
     81      cancellation = NULL;
    9582}
    9683
    9784void ^?{}(coroutine_desc& this) {
    98         if(this.state != Halted && this.state != Start) {
    99                 coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    100                 coroutine_desc * dst = &this;
    101 
    102                 struct _Unwind_Exception storage;
    103                 storage.exception_class = -1;
    104                 storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
    105                 this.cancellation = &storage;
    106                 this.last = src;
    107 
    108                 // not resuming self ?
    109                 if ( src == dst ) {
    110                         abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
    111                 }
    112 
    113                 CoroutineCtxSwitch( src, dst );
    114         }
     85      if(this.state != Halted && this.state != Start) {
     86            coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     87            coroutine_desc * dst = &this;
     88
     89            struct _Unwind_Exception storage;
     90            storage.exception_class = -1;
     91            storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
     92            this.cancellation = &storage;
     93            this.last = src;
     94
     95              // not resuming self ?
     96              if ( src == dst ) {
     97                      abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
     98            }
     99
     100              CoroutineCtxSwitch( src, dst );
     101      }
    115102}
    116103
     
    119106forall(dtype T | is_coroutine(T))
    120107void prime(T& cor) {
    121         coroutine_desc* this = get_coroutine(cor);
    122         assert(this->state == Start);
    123 
    124         this->state = Primed;
    125         resume(cor);
    126 }
    127 
    128 [void *, size_t] __stack_alloc( size_t storageSize ) {
    129         static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    130         assert(__page_size != 0l);
    131         size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
    132 
    133         // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
    134         void * storage;
    135         __cfaabi_dbg_debug_do(
    136                 storage = memalign( __page_size, size + __page_size );
    137         );
    138         __cfaabi_dbg_no_debug_do(
    139                 storage = (void*)malloc(size);
    140         );
    141 
    142         __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
    143         __cfaabi_dbg_debug_do(
    144                 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
    145                         abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
    146                 }
    147                 storage = (void *)(((intptr_t)storage) + __page_size);
    148         );
    149 
    150         verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
    151         return [storage, size];
    152 }
    153 
    154 void __stack_prepare( __stack_info_t * this, size_t create_size ) {
    155         static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    156         bool userStack;
    157         void * storage;
    158         size_t size;
    159         if ( !this->storage ) {
    160                 userStack = false;
    161                 [storage, size] = __stack_alloc( create_size );
    162         } else {
    163                 userStack = true;
    164                 __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zd bytes)\n", this, this->storage, (intptr_t)this->storage->limit - (intptr_t)this->storage->base);
    165 
    166                 // The stack must be aligned, advance the pointer to the next align data
    167                 storage = (void*)libCeiling( (intptr_t)this->storage, libAlign());
    168 
    169                 // The size needs to be shrinked to fit all the extra data structure and be aligned
    170                 ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage;
    171                 size = libFloor(create_size - stack_data_size - diff, libAlign());
    172         } // if
    173         assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
    174 
    175         this->storage = (__stack_t *)((intptr_t)storage + size);
    176         this->storage->limit = storage;
    177         this->storage->base  = (void*)((intptr_t)storage + size);
    178         __attribute__((may_alias)) intptr_t * istorage = (intptr_t*)&this->storage;
    179         *istorage |= userStack ? 0x1 : 0x0;
     108      coroutine_desc* this = get_coroutine(cor);
     109      assert(this->state == Start);
     110
     111      this->state = Primed;
     112      resume(cor);
     113}
     114
     115// Wrapper for co
     116void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     117      // Safety note : Preemption must be disabled since there is a race condition
     118      // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
     119      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     120      disable_interrupts();
     121
     122      // set state of current coroutine to inactive
     123      src->state = src->state == Halted ? Halted : Inactive;
     124
     125      // set new coroutine that task is executing
     126      TL_GET( this_thread )->curr_cor = dst;
     127
     128      // context switch to specified coroutine
     129      assert( src->stack.context );
     130      CtxSwitch( src->stack.context, dst->stack.context );
     131      // when CtxSwitch returns we are back in the src coroutine
     132
     133      // set state of new coroutine to active
     134      src->state = Active;
     135
     136      enable_interrupts( __cfaabi_dbg_ctx );
     137      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     138
     139
     140      if( unlikely(src->cancellation != NULL) ) {
     141            _CtxCoroutine_Unwind(src->cancellation, src);
     142      }
     143} //ctxSwitchDirect
     144
     145void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
     146      //TEMP HACK do this on proper kernel startup
     147      if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
     148
     149      size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
     150
     151      if ( !storage ) {
     152            __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this);
     153
     154            userStack = false;
     155            size = libCeiling( storageSize, 16 );
     156            // use malloc/memalign because "new" raises an exception for out-of-memory
     157
     158            // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
     159            __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
     160            __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
     161
     162            __cfaabi_dbg_debug_do(
     163                  if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
     164                        abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
     165                  } // if
     166            );
     167
     168            if ( (intptr_t)storage == 0 ) {
     169                  abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
     170            } // if
     171
     172            __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
     173            __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
     174
     175      } else {
     176            __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize);
     177
     178            assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
     179            userStack = true;
     180            size = storageSize - cxtSize;
     181
     182            if ( size % 16 != 0u ) size -= 8;
     183
     184            limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
     185      } // if
     186      assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     187
     188      base = (char *)limit + size;
     189      context = base;
     190      top = (char *)context + cxtSize;
    180191}
    181192
     
    183194// is not inline (We can't inline Cforall in C)
    184195extern "C" {
    185         void __suspend_internal(void) {
    186                 suspend();
    187         }
    188 
    189         void __leave_coroutine( coroutine_desc * src ) {
    190                 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    191 
    192                 src->state = Halted;
    193 
    194                 assertf( starter != 0,
    195                         "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
    196                         "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    197                         src->name, src );
    198                 assertf( starter->state != Halted,
    199                         "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
    200                         "Possible cause is terminated coroutine's main routine has already returned.",
    201                         src->name, src, starter->name, starter );
    202 
    203                 CoroutineCtxSwitch( src, starter );
    204         }
     196      void __suspend_internal(void) {
     197            suspend();
     198      }
     199
     200      void __leave_coroutine( coroutine_desc * src ) {
     201            coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
     202
     203            src->state = Halted;
     204
     205            assertf( starter != 0,
     206                  "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
     207                  "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
     208                  src->name, src );
     209            assertf( starter->state != Halted,
     210                  "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
     211                  "Possible cause is terminated coroutine's main routine has already returned.",
     212                  src->name, src, starter->name, starter );
     213
     214            CoroutineCtxSwitch( src, starter );
     215      }
    205216}
    206217
  • libcfa/src/concurrency/coroutine.hfa

    r8278abf rec28948  
    6464      forall(dtype T | is_coroutine(T))
    6565      void CtxStart(T * this, void ( *invoke)(T *));
    66 
    67         extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    68 
    69         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
    70         extern void CtxStore ( struct __stack_context_t * from, __attribute__((noreturn)) void (*__callback)(void) ) asm ("CtxStore");
    71         extern void CtxRet   ( struct __stack_context_t * to ) asm ("CtxRet") __attribute__ ((__noreturn__));
    7266}
    7367
    7468// Private wrappers for context switch and stack creation
    75 // Wrapper for co
    76 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    77         // set state of current coroutine to inactive
    78         src->state = src->state == Halted ? Halted : Inactive;
    79 
    80         // set new coroutine that task is executing
    81         TL_GET( this_thread )->curr_cor = dst;
    82 
    83         // context switch to specified coroutine
    84         verify( dst->context.SP );
    85         CtxSwitch( &src->context, &dst->context );
    86         // when CtxSwitch returns we are back in the src coroutine
    87 
    88         // set state of new coroutine to active
    89         src->state = Active;
    90 
    91         if( unlikely(src->cancellation != NULL) ) {
    92                 _CtxCoroutine_Unwind(src->cancellation, src);
    93         }
    94 }
    95 
    96 extern void __stack_prepare   ( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
     69extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
     70extern void create_stack( coStack_t * this, unsigned int storageSize );
    9771
    9872// Suspend implementation inlined for performance
     
    128102        coroutine_desc * dst = get_coroutine(cor);
    129103
    130         if( unlikely(dst->context.SP == NULL) ) {
    131                 __stack_prepare(&dst->stack, 65000);
     104        if( unlikely(!dst->stack.base) ) {
     105                create_stack(&dst->stack, dst->stack.size);
    132106                CtxStart(&cor, CtxInvokeCoroutine);
    133107        }
     
    172146}
    173147
    174 static inline void suspend_then(fptr_t call) {
    175         // optimization : read TLS once and reuse it
    176         // Safety note: this is preemption safe since if
    177         // preemption occurs after this line, the pointer
    178         // will also migrate which means this value will
    179         // stay in syn with the TLS
    180         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    181 
    182         assertf( src->last != 0,
    183                 "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n"
    184                 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    185                 src->name, src );
    186         assertf( src->last->state != Halted,
    187                 "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n"
    188                 "Possible cause is terminated coroutine's main routine has already returned.",
    189                 src->name, src, src->last->name, src->last );
    190 
    191         src->state = PreInactive;
    192 
    193       // context switch to specified coroutine
    194       assert( src->context.SP );
    195 
    196         __attribute__((noreturn)) void __suspend_callback(void) {
    197                 call();
    198 
    199                 // set state of current coroutine to inactive
    200                 src->state = src->state == Halted ? Halted : Inactive;
    201 
    202                 TL_GET( this_thread )->curr_cor = src->last;
    203 
    204                 // context switch to specified coroutine
    205                 assert( src->last->context.SP );
    206                 CtxRet( &src->last->context );
    207 
    208                 abort();
    209         }
    210       CtxStore( &src->context, __suspend_callback );
    211         // when CtxStore returns we are back in the src coroutine
    212 
    213         // set state of new coroutine to active
    214         src->state = Active;
    215 
    216         if( unlikely(src->cancellation != NULL) ) {
    217                 _CtxCoroutine_Unwind(src->cancellation, src);
    218         }
    219 
    220         return;
    221 }
     148
     149
     150// static inline bool suspend_checkpoint(void) {
     151//      // optimization : read TLS once and reuse it
     152//      // Safety note: this is preemption safe since if
     153//      // preemption occurs after this line, the pointer
     154//      // will also migrate which means this value will
     155//      // stay in syn with the TLS
     156//      // set state of current coroutine to inactive
     157//       this->state = Checkpoint;
     158
     159//       // context switch to specified coroutine
     160//       assert( src->stack.context );
     161
     162//       CtxStore(src->stack.context);
     163
     164//      bool ret = this->state == Checkpoint;
     165
     166//       // set state of new coroutine to active
     167//       src->state = Active;
     168
     169//       enable_interrupts( __cfaabi_dbg_ctx );
     170//       // Safety note : This could cause some false positives due to preemption
     171//       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     172
     173//       if( unlikely(src->cancellation != NULL) ) {
     174//             _CtxCoroutine_Unwind(src->cancellation);
     175//       }
     176
     177//      return ret;
     178// }
    222179
    223180// static inline void suspend_return(void) {
  • libcfa/src/concurrency/invoke.c

    r8278abf rec28948  
    2929extern void __suspend_internal(void);
    3030extern void __leave_coroutine( struct coroutine_desc * );
    31 extern void __finish_creation( struct thread_desc * );
     31extern void __finish_creation( struct coroutine_desc * );
    3232extern void __leave_thread_monitor( struct thread_desc * this );
    3333extern void disable_interrupts();
     
    4646
    4747        cor->state = Active;
     48
     49        enable_interrupts( __cfaabi_dbg_ctx );
    4850
    4951        main( this );
     
    9193        // First suspend, once the thread arrives here,
    9294        // the function pointer to main can be invalidated without risk
    93         __finish_creation( thrd );
     95        __finish_creation(&thrd->self_cor);
     96
     97        // Restore the last to NULL, we clobbered because of the thunk problem
     98        thrd->self_cor.last = NULL;
    9499
    95100        // Officially start the thread by enabling preemption
     
    117122        void (*invoke)(void *)
    118123) {
    119         struct coroutine_desc * cor = get_coroutine( this );
    120         struct __stack_t * stack = cor->stack.storage;
     124        struct coStack_t* stack = &get_coroutine( this )->stack;
    121125
    122126#if defined( __i386 )
     
    124128        struct FakeStack {
    125129            void *fixedRegisters[3];                    // fixed registers ebx, edi, esi (popped on 1st uSwitch, values unimportant)
     130            uint32_t mxcr;                        // SSE Status and Control bits (control bits are preserved across function calls)
     131            uint16_t fcw;                         // X97 FPU control word (preserved across function calls)
    126132            void *rturn;                          // where to go on return from uSwitch
    127133            void *dummyReturn;                          // fake return compiler would have pushed on call to uInvoke
     
    130136        };
    131137
    132         cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
    133         cor->context.FP = NULL;         // terminate stack with NULL fp
     138        ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
     139        ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
    134140
    135         struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    136 
    137         fs->dummyReturn = NULL;
    138         fs->argument[0] = this;     // argument to invoke
    139         fs->rturn = invoke;
     141        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
     142        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this;     // argument to invoke
     143        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
     144        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
     145        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    140146
    141147#elif defined( __x86_64 )
     
    143149        struct FakeStack {
    144150                void *fixedRegisters[5];            // fixed registers rbx, r12, r13, r14, r15
     151                uint32_t mxcr;                      // SSE Status and Control bits (control bits are preserved across function calls)
     152                uint16_t fcw;                       // X97 FPU control word (preserved across function calls)
    145153                void *rturn;                        // where to go on return from uSwitch
    146154                void *dummyReturn;                  // NULL return address to provide proper alignment
    147155        };
    148156
    149         cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
    150         cor->context.FP = NULL;         // terminate stack with NULL fp
     157        ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
     158        ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
    151159
    152         struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    153 
    154         fs->dummyReturn = NULL;
    155         fs->rturn = CtxInvokeStub;
    156         fs->fixedRegisters[0] = this;
    157         fs->fixedRegisters[1] = invoke;
     160        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
     161        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = CtxInvokeStub;
     162        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[0] = this;
     163        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
     164        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
     165        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    158166
    159167#elif defined( __ARM_ARCH )
     
    165173        };
    166174
    167         cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
    168         cor->context.FP = NULL;
     175        ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
     176        ((struct machine_context_t *)stack->context)->FP = NULL;
    169177
    170         struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
     178        struct FakeStack *fs = (struct FakeStack *)((struct machine_context_t *)stack->context)->SP;
    171179
    172180        fs->intRegs[8] = CtxInvokeStub;
  • libcfa/src/concurrency/invoke.h

    r8278abf rec28948  
    6262        #endif
    6363
    64         struct __stack_context_t {
    65                 void * SP;
    66                 void * FP;
    67         };
    68 
    69         // low adresses  :           +----------------------+ <- start of allocation
    70         //                           |  optional guard page |
    71         //                           +----------------------+ <- __stack_t.limit
    72         //                           |                      |
    73         //                           |       /\ /\ /\       |
    74         //                           |       || || ||       |
    75         //                           |                      |
    76         //                           |    program  stack    |
    77         //                           |                      |
    78         // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
    79         //                           |      __stack_t       |
    80         // high adresses :           +----------------------+ <- end of allocation
    81 
    82         struct __stack_t {
    83                 // stack grows towards stack limit
    84                 void * limit;
    85 
    86                 // base of stack
    87                 void * base;
    88         };
    89 
    90         struct __stack_info_t {
    91                 // pointer to stack
    92                 struct __stack_t * storage;
    93         };
    94 
    95         enum coroutine_state { Halted, Start, Inactive, Active, Primed, PreInactive };
     64        struct coStack_t {
     65                size_t size;                                                                    // size of stack
     66                void * storage;                                                                 // pointer to stack
     67                void * limit;                                                                   // stack grows towards stack limit
     68                void * base;                                                                    // base of stack
     69                void * context;                                                                 // address of cfa_context_t
     70                void * top;                                                                             // address of top of storage
     71                bool userStack;                                                                 // whether or not the user allocated the stack
     72        };
     73
     74        enum coroutine_state { Halted, Start, Inactive, Active, Primed };
    9675
    9776        struct coroutine_desc {
    98                 // context that is switch during a CtxSwitch
    99                 struct __stack_context_t context;
    100 
    10177                // stack information of the coroutine
    102                 struct __stack_info_t stack;
    103 
    104                 // textual name for coroutine/task
     78                struct coStack_t stack;
     79
     80                // textual name for coroutine/task, initialized by uC++ generated code
    10581                const char * name;
     82
     83                // copy of global UNIX variable errno
     84                int errno_;
    10685
    10786                // current execution status for coroutine
    10887                enum coroutine_state state;
    109 
    11088                // first coroutine to resume this one
    11189                struct coroutine_desc * starter;
     
    161139        struct thread_desc {
    162140                // Core threading fields
    163                 // context that is switch during a CtxSwitch
    164                 struct __stack_context_t context;
    165 
    166                 // current execution status for coroutine
    167                 enum coroutine_state state;
    168 
    169                 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
    170 
    171141                // coroutine body used to store context
    172142                struct coroutine_desc  self_cor;
     
    199169        #ifdef __cforall
    200170        extern "Cforall" {
    201                 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
    202                 static inline struct thread_desc    * active_thread   () { return TL_GET( this_thread    ); }
    203                 static inline struct processor      * active_processor() { return TL_GET( this_processor ); } // UNSAFE
     171                static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     172                static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
     173                static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    204174
    205175                static inline thread_desc * & get_next( thread_desc & this ) {
     
    260230        // assembler routines that performs the context switch
    261231        extern void CtxInvokeStub( void );
    262         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     232        void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    263233        // void CtxStore ( void * this ) asm ("CtxStore");
    264234        // void CtxRet   ( void * dst  ) asm ("CtxRet");
     235
     236        #if   defined( __i386 )
     237        #define CtxGet( ctx ) __asm__ ( \
     238                        "movl %%esp,%0\n"   \
     239                        "movl %%ebp,%1\n"   \
     240                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     241        #elif defined( __x86_64 )
     242        #define CtxGet( ctx ) __asm__ ( \
     243                        "movq %%rsp,%0\n"   \
     244                        "movq %%rbp,%1\n"   \
     245                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     246        #elif defined( __ARM_ARCH )
     247        #define CtxGet( ctx ) __asm__ ( \
     248                        "mov %0,%%sp\n"   \
     249                        "mov %1,%%r11\n"   \
     250                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     251        #else
     252                #error unknown hardware architecture
     253        #endif
    265254
    266255#endif //_INVOKE_PRIVATE_H_
  • libcfa/src/concurrency/kernel.cfa

    r8278abf rec28948  
    3636#include "invoke.h"
    3737
    38 //-----------------------------------------------------------------------------
    39 // Some assembly required
    40 #if   defined( __i386 )
    41         #define CtxGet( ctx )        \
    42                 __asm__ volatile (     \
    43                         "movl %%esp,%0\n"\
    44                         "movl %%ebp,%1\n"\
    45                         : "=rm" (ctx.SP),\
    46                                 "=rm" (ctx.FP) \
    47                 )
    48 
    49         // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
    50         // fcw  : X87 FPU control word (preserved across function calls)
    51         #define __x87_store         \
    52                 uint32_t __mxcr;      \
    53                 uint16_t __fcw;       \
    54                 __asm__ volatile (    \
    55                         "stmxcsr %0\n"  \
    56                         "fnstcw  %1\n"  \
    57                         : "=m" (__mxcr),\
    58                                 "=m" (__fcw)  \
    59                 )
    60 
    61         #define __x87_load         \
    62                 __asm__ volatile (   \
    63                         "fldcw  %1\n"  \
    64                         "ldmxcsr %0\n" \
    65                         ::"m" (__mxcr),\
    66                                 "m" (__fcw)  \
    67                 )
    68 
    69 #elif defined( __x86_64 )
    70         #define CtxGet( ctx )        \
    71                 __asm__ volatile (     \
    72                         "movq %%rsp,%0\n"\
    73                         "movq %%rbp,%1\n"\
    74                         : "=rm" (ctx.SP),\
    75                                 "=rm" (ctx.FP) \
    76                 )
    77 
    78         #define __x87_store         \
    79                 uint32_t __mxcr;      \
    80                 uint16_t __fcw;       \
    81                 __asm__ volatile (    \
    82                         "stmxcsr %0\n"  \
    83                         "fnstcw  %1\n"  \
    84                         : "=m" (__mxcr),\
    85                                 "=m" (__fcw)  \
    86                 )
    87 
    88         #define __x87_load          \
    89                 __asm__ volatile (    \
    90                         "fldcw  %1\n"   \
    91                         "ldmxcsr %0\n"  \
    92                         :: "m" (__mxcr),\
    93                                 "m" (__fcw)  \
    94                 )
    95 
    96 
    97 #elif defined( __ARM_ARCH )
    98 #define CtxGet( ctx ) __asm__ ( \
    99                 "mov %0,%%sp\n"   \
    100                 "mov %1,%%r11\n"   \
    101         : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    102 #else
    103         #error unknown hardware architecture
    104 #endif
    105 
    106 //-----------------------------------------------------------------------------
    10738//Start and stop routine for the kernel, declared first to make sure they run first
    10839static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     
    11142//-----------------------------------------------------------------------------
    11243// Kernel storage
    113 KERNEL_STORAGE(cluster,         mainCluster);
    114 KERNEL_STORAGE(processor,       mainProcessor);
    115 KERNEL_STORAGE(thread_desc,     mainThread);
    116 KERNEL_STORAGE(__stack_t,       mainThreadCtx);
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(thread_desc,       mainThread);
     47KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    11748
    11849cluster     * mainCluster;
     
    12354struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    12455}
    125 
    126 size_t __page_size = 0;
    12756
    12857//-----------------------------------------------------------------------------
     
    13766// Struct to steal stack
    13867struct current_stack_info_t {
    139         __stack_t * storage;            // pointer to stack object
     68        machine_context_t ctx;
     69        unsigned int size;              // size of stack
    14070        void *base;                             // base of stack
     71        void *storage;                  // pointer to stack
    14172        void *limit;                    // stack grows towards stack limit
    14273        void *context;                  // address of cfa_context_t
     74        void *top;                              // address of top of storage
    14375};
    14476
    14577void ?{}( current_stack_info_t & this ) {
    146         __stack_context_t ctx;
    147         CtxGet( ctx );
    148         this.base = ctx.FP;
     78        CtxGet( this.ctx );
     79        this.base = this.ctx.FP;
     80        this.storage = this.ctx.SP;
    14981
    15082        rlimit r;
    15183        getrlimit( RLIMIT_STACK, &r);
    152         size_t size = r.rlim_cur;
    153 
    154         this.limit = (void *)(((intptr_t)this.base) - size);
     84        this.size = r.rlim_cur;
     85
     86        this.limit = (void *)(((intptr_t)this.base) - this.size);
    15587        this.context = &storage_mainThreadCtx;
     88        this.top = this.base;
    15689}
    15790
    15891//-----------------------------------------------------------------------------
    15992// Main thread construction
     93void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
     94        size      = info->size;
     95        storage   = info->storage;
     96        limit     = info->limit;
     97        base      = info->base;
     98        context   = info->context;
     99        top       = info->top;
     100        userStack = true;
     101}
    160102
    161103void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    162         stack.storage = info->storage;
    163         with(*stack.storage) {
    164                 limit     = info->limit;
    165                 base      = info->base;
    166         }
    167         __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
    168         *istorage |= 0x1;
     104        stack{ info };
    169105        name = "Main Thread";
     106        errno_ = 0;
    170107        state = Start;
    171108        starter = NULL;
    172         last = NULL;
    173         cancellation = NULL;
    174109}
    175110
    176111void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
    177         state = Start;
    178112        self_cor{ info };
    179113        curr_cor = &self_cor;
     
    306240}
    307241
    308 static int * __volatile_errno() __attribute__((noinline));
    309 static int * __volatile_errno() { asm(""); return &errno; }
    310 
    311242// KERNEL ONLY
    312243// runThread runs a thread by context switching
    313244// from the processor coroutine to the target thread
    314 static void runThread(processor * this, thread_desc * thrd_dst) {
     245static void runThread(processor * this, thread_desc * dst) {
     246        assert(dst->curr_cor);
    315247        coroutine_desc * proc_cor = get_coroutine(this->runner);
     248        coroutine_desc * thrd_cor = dst->curr_cor;
    316249
    317250        // Reset the terminating actions here
     
    319252
    320253        // Update global state
    321         kernelTLS.this_thread = thrd_dst;
    322 
    323         // set state of processor coroutine to inactive and the thread to active
    324         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    325         thrd_dst->state = Active;
    326 
    327         // set context switch to the thread that the processor is executing
    328         verify( thrd_dst->context.SP );
    329         CtxSwitch( &proc_cor->context, &thrd_dst->context );
    330         // when CtxSwitch returns we are back in the processor coroutine
    331 
    332         // set state of processor coroutine to active and the thread to inactive
    333         thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
    334         proc_cor->state = Active;
     254        kernelTLS.this_thread = dst;
     255
     256        // Context Switch to the thread
     257        ThreadCtxSwitch(proc_cor, thrd_cor);
     258        // when ThreadCtxSwitch returns we are back in the processor coroutine
    335259}
    336260
     
    338262static void returnToKernel() {
    339263        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    340         thread_desc * thrd_src = kernelTLS.this_thread;
    341 
    342         // set state of current coroutine to inactive
    343         thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
    344         proc_cor->state = Active;
    345         int local_errno = *__volatile_errno();
    346         #if defined( __i386 ) || defined( __x86_64 )
    347                 __x87_store;
    348         #endif
    349 
    350         // set new coroutine that the processor is executing
    351         // and context switch to it
    352         verify( proc_cor->context.SP );
    353         CtxSwitch( &thrd_src->context, &proc_cor->context );
    354 
    355         // set state of new coroutine to active
    356         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    357         thrd_src->state = Active;
    358 
    359         #if defined( __i386 ) || defined( __x86_64 )
    360                 __x87_load;
    361         #endif
    362         *__volatile_errno() = local_errno;
     264        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor;
     265        ThreadCtxSwitch(thrd_cor, proc_cor);
    363266}
    364267
     
    409312        // to waste the perfectly valid stack create by pthread.
    410313        current_stack_info_t info;
    411         __stack_t ctx;
    412         info.storage = &ctx;
     314        machine_context_t ctx;
     315        info.context = &ctx;
    413316        (proc->runner){ proc, &info };
    414317
    415         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
     318        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
    416319
    417320        //Set global state
     
    444347
    445348// KERNEL_ONLY
    446 void kernel_first_resume( processor * this ) {
    447         thread_desc * src = mainThread;
     349void kernel_first_resume(processor * this) {
     350        coroutine_desc * src = mainThread->curr_cor;
    448351        coroutine_desc * dst = get_coroutine(this->runner);
    449352
    450353        verify( ! kernelTLS.preemption_state.enabled );
    451354
    452         __stack_prepare( &dst->stack, 65000 );
     355        create_stack(&dst->stack, dst->stack.size);
    453356        CtxStart(&this->runner, CtxInvokeCoroutine);
    454357
    455358        verify( ! kernelTLS.preemption_state.enabled );
    456359
    457         dst->last = &src->self_cor;
    458         dst->starter = dst->starter ? dst->starter : &src->self_cor;
     360        dst->last = src;
     361        dst->starter = dst->starter ? dst->starter : src;
    459362
    460363        // set state of current coroutine to inactive
    461364        src->state = src->state == Halted ? Halted : Inactive;
    462365
     366        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     367        // Therefore, when first creating a coroutine, interrupts are enable before calling the main.
     368        // This is consistent with thread creation. However, when creating the main processor coroutine,
     369        // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will
     370        // stay disabled.
     371        disable_interrupts();
     372
    463373        // context switch to specified coroutine
    464         verify( dst->context.SP );
    465         CtxSwitch( &src->context, &dst->context );
     374        assert( src->stack.context );
     375        CtxSwitch( src->stack.context, dst->stack.context );
    466376        // when CtxSwitch returns we are back in the src coroutine
    467377
     
    470380
    471381        verify( ! kernelTLS.preemption_state.enabled );
    472 }
    473 
    474 // KERNEL_ONLY
    475 void kernel_last_resume( processor * this ) {
    476         coroutine_desc * src = &mainThread->self_cor;
    477         coroutine_desc * dst = get_coroutine(this->runner);
    478 
    479         verify( ! kernelTLS.preemption_state.enabled );
    480         verify( dst->starter == src );
    481         verify( dst->context.SP );
    482 
    483         // context switch to the processor
    484         CtxSwitch( &src->context, &dst->context );
    485382}
    486383
     
    491388void ScheduleThread( thread_desc * thrd ) {
    492389        verify( thrd );
    493         verify( thrd->state != Halted );
     390        verify( thrd->self_cor.state != Halted );
    494391
    495392        verify( ! kernelTLS.preemption_state.enabled );
     
    648545        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    649546
    650         __page_size = sysconf( _SC_PAGESIZE );
    651 
    652547        __cfa_dbg_global_clusters.list{ __get };
    653548        __cfa_dbg_global_clusters.lock{};
     
    664559        mainThread = (thread_desc *)&storage_mainThread;
    665560        current_stack_info_t info;
    666         info.storage = (__stack_t*)&storage_mainThreadCtx;
    667561        (*mainThread){ &info };
    668562
     
    733627        // which is currently here
    734628        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    735         kernel_last_resume( kernelTLS.this_processor );
     629        returnToKernel();
    736630        mainThread->self_cor.state = Halted;
    737631
  • libcfa/src/concurrency/thread.cfa

    r8278abf rec28948  
    3131// Thread ctors and dtors
    3232void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
    33         context{ NULL, NULL };
    3433        self_cor{ name, storage, storageSize };
    35         state = Start;
     34        verify(&self_cor);
    3635        curr_cor = &self_cor;
    3736        self_mon.owner = &this;
     
    7473forall( dtype T | is_thread(T) )
    7574void __thrd_start( T& this ) {
    76         thread_desc * this_thrd = get_thread(this);
    77         thread_desc * curr_thrd = TL_GET( this_thread );
     75        coroutine_desc* thrd_c = get_coroutine(this);
     76        thread_desc   * thrd_h = get_thread   (this);
     77        thrd_c->last = TL_GET( this_thread )->curr_cor;
     78
     79        // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
    7880
    7981        disable_interrupts();
     82        create_stack(&thrd_c->stack, thrd_c->stack.size);
    8083        CtxStart(&this, CtxInvokeThread);
    81         this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
    82         verify( this_thrd->context.SP );
    83         CtxSwitch( &curr_thrd->context, &this_thrd->context );
     84        assert( thrd_c->last->stack.context );
     85        CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
    8486
    85         ScheduleThread(this_thrd);
     87        ScheduleThread(thrd_h);
    8688        enable_interrupts( __cfaabi_dbg_ctx );
    8789}
     
    8991extern "C" {
    9092        // KERNEL ONLY
    91         void __finish_creation(thread_desc * this) {
    92                 // set new coroutine that the processor is executing
    93                 // and context switch to it
    94                 verify( kernelTLS.this_thread != this );
    95                 verify( kernelTLS.this_thread->context.SP );
    96                 CtxSwitch( &this->context, &kernelTLS.this_thread->context );
     93        void __finish_creation(coroutine_desc * thrd_c) {
     94                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9795        }
    9896}
     
    112110}
    113111
     112// KERNEL ONLY
     113void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     114        // set state of current coroutine to inactive
     115        src->state = src->state == Halted ? Halted : Inactive;
     116        dst->state = Active;
     117
     118        // set new coroutine that the processor is executing
     119        // and context switch to it
     120        assert( src->stack.context );
     121        CtxSwitch( src->stack.context, dst->stack.context );
     122
     123        // set state of new coroutine to active
     124        dst->state = dst->state == Halted ? Halted : Inactive;
     125        src->state = Active;
     126}
     127
    114128// Local Variables: //
    115129// mode: c //
  • libcfa/src/concurrency/thread.hfa

    r8278abf rec28948  
    6161void ^?{}(thread_desc & this);
    6262
    63 static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }
     63static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; }
    6464static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }
    6565static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    66 static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 65000 }; }
    67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, NULL, stackSize }; }
     66static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 0 }; }
     67static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0, stackSize }; }
    6868static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    69 static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 65000 }; }
    70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 65000 }; }
     69static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 0 }; }
     70static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 0 }; }
    7171static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }
    7272
  • libcfa/src/time.hfa

    r8278abf rec28948  
    3030
    3131static inline {
    32         Duration ?=?( Duration & dur, __attribute__((unused)) zero_t ) { return dur{ 0 }; }
     32        Duration ?=?( Duration & dur, zero_t ) { return dur{ 0 }; }
    3333
    3434        Duration +?( Duration rhs ) with( rhs ) {       return (Duration)@{ +tv }; }
     
    5959        bool ?>=?( Duration lhs, Duration rhs ) { return lhs.tv >= rhs.tv; }
    6060
    61         bool ?==?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv == 0; }
    62         bool ?!=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv != 0; }
    63         bool ?<? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv <  0; }
    64         bool ?<=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv <= 0; }
    65         bool ?>? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv >  0; }
    66         bool ?>=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv >= 0; }
     61        bool ?==?( Duration lhs, zero_t ) { return lhs.tv == 0; }
     62        bool ?!=?( Duration lhs, zero_t ) { return lhs.tv != 0; }
     63        bool ?<? ( Duration lhs, zero_t ) { return lhs.tv <  0; }
     64        bool ?<=?( Duration lhs, zero_t ) { return lhs.tv <= 0; }
     65        bool ?>? ( Duration lhs, zero_t ) { return lhs.tv >  0; }
     66        bool ?>=?( Duration lhs, zero_t ) { return lhs.tv >= 0; }
    6767
    6868        Duration abs( Duration rhs ) { return rhs.tv >= 0 ? rhs : -rhs; }
     
    101101        void ?{}( timeval & t, time_t sec, suseconds_t usec ) { t.tv_sec = sec; t.tv_usec = usec; }
    102102        void ?{}( timeval & t, time_t sec ) { t{ sec, 0 }; }
    103         void ?{}( timeval & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; }
    104 
    105         timeval ?=?( timeval & t, __attribute__((unused)) zero_t ) { return t{ 0 }; }
     103        void ?{}( timeval & t, zero_t ) { t{ 0, 0 }; }
     104
     105        timeval ?=?( timeval & t, zero_t ) { return t{ 0 }; }
    106106        timeval ?+?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_usec + rhs.tv_usec }; }
    107107        timeval ?-?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_usec - rhs.tv_usec }; }
     
    116116        void ?{}( timespec & t, time_t sec, __syscall_slong_t nsec ) { t.tv_sec = sec; t.tv_nsec = nsec; }
    117117        void ?{}( timespec & t, time_t sec ) { t{ sec, 0}; }
    118         void ?{}( timespec & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; }
    119 
    120         timespec ?=?( timespec & t, __attribute__((unused)) zero_t ) { return t{ 0 }; }
     118        void ?{}( timespec & t, zero_t ) { t{ 0, 0 }; }
     119
     120        timespec ?=?( timespec & t, zero_t ) { return t{ 0 }; }
    121121        timespec ?+?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec }; }
    122122        timespec ?-?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec }; }
     
    145145void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 );
    146146static inline {
    147         Time ?=?( Time & time, __attribute__((unused)) zero_t ) { return time{ 0 }; }
     147        Time ?=?( Time & time, zero_t ) { return time{ 0 }; }
    148148
    149149        void ?{}( Time & time, timeval t ) with( time ) { tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; }
  • libcfa/src/time_t.hfa

    r8278abf rec28948  
    2424
    2525static inline void ?{}( Duration & dur ) with( dur ) { tv = 0; }
    26 static inline void ?{}( Duration & dur, __attribute__((unused)) zero_t ) with( dur ) { tv = 0; }
     26static inline void ?{}( Duration & dur, zero_t ) with( dur ) { tv = 0; }
    2727
    2828
     
    3434
    3535static inline void ?{}( Time & time ) with( time ) { tv = 0; }
    36 static inline void ?{}( Time & time, __attribute__((unused)) zero_t ) with( time ) { tv = 0; }
     36static inline void ?{}( Time & time, zero_t ) with( time ) { tv = 0; }
    3737
    3838// Local Variables: //
  • tests/Makefile.am

    r8278abf rec28948  
    2222debug=yes
    2323installed=no
    24 
    25 INSTALL_FLAGS=-in-tree
    26 DEBUG_FLAGS=-debug -O0
    2724
    2825quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
  • tests/Makefile.in

    r8278abf rec28948  
    375375debug = yes
    376376installed = no
    377 INSTALL_FLAGS = -in-tree
    378 DEBUG_FLAGS = -debug -O0
    379377quick_test = avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
    380378concurrent =
Note: See TracChangeset for help on using the changeset viewer.