Changes in / [2fabdc02:bee653c]


Ignore:
Location:
libcfa/src/concurrency
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r2fabdc02 rbee653c  
    8888        ret
    8989
    90 //.text
    91 //      .align 2
    92 //.globl        CtxStore
    93 //CtxStore:
    94 //      // Save floating & SSE control words on the stack.
    95 //
    96 //      subq   $8,%rsp
    97 //      stmxcsr 0(%rsp)         // 4 bytes
    98 //      fnstcw  4(%rsp)         // 2 bytes
    99 //
    100 //      // Save volatile registers on the stack.
    101 //
    102 //      pushq %r15
    103 //      pushq %r14
    104 //      pushq %r13
    105 //      pushq %r12
    106 //      pushq %rbx
    107 //
    108 //      // Save old context in the "from" area.
    109 //
    110 //      movq %rsp,SP_OFFSET(%rdi)
    111 //      movq %rbp,FP_OFFSET(%rdi)
    112 //
    113 //      // Return to thread
    114 //
    115 //      ret
    116 //
    117 //.text
    118 //      .align 2
    119 //.globl        CtxRet
    120 //CtxRet:
    121 //      // Load new context from the "to" area.
    122 //
    123 //      movq SP_OFFSET(%rdi),%rsp
    124 //      movq FP_OFFSET(%rdi),%rbp
    125 //
    126 //      // Load volatile registers from the stack.
    127 //
    128 //      popq %rbx
    129 //      popq %r12
    130 //      popq %r13
    131 //      popq %r14
    132 //      popq %r15
    133 //
    134 //      // Load floating & SSE control words from the stack.
    135 //
    136 //      fldcw   4(%rsp)
    137 //      ldmxcsr 0(%rsp)
    138 //      addq   $8,%rsp
    139 //
    140 //      // Return to thread.
    141 //
    142 //      ret
    143 
    144 
    14590.text
    14691        .align 2
  • libcfa/src/concurrency/coroutine.cfa

    r2fabdc02 rbee653c  
    3535
    3636extern "C" {
    37       void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     37      void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) __attribute__ ((__noreturn__));
    3838      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    3939      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     
    8484void ^?{}(coroutine_desc& this) {
    8585      if(this.state != Halted && this.state != Start) {
    86             coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     86            coroutine_desc * src = TL_GET( this_coroutine );
    8787            coroutine_desc * dst = &this;
    8888
     
    115115// Wrapper for co
    116116void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    117       // Safety note : Preemption must be disabled since there is a race condition
    118       // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
     117      // Safety note : This could cause some false positives due to preemption
    119118      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    120119      disable_interrupts();
     
    124123
    125124      // set new coroutine that task is executing
    126       TL_GET( this_thread )->curr_cor = dst;
     125      kernelTLS.this_coroutine = dst;
    127126
    128127      // context switch to specified coroutine
     
    135134
    136135      enable_interrupts( __cfaabi_dbg_ctx );
     136      // Safety note : This could cause some false positives due to preemption
    137137      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    138138
    139 
    140139      if( unlikely(src->cancellation != NULL) ) {
    141             _CtxCoroutine_Unwind(src->cancellation, src);
     140            _CtxCoroutine_Unwind(src->cancellation);
    142141      }
    143142} //ctxSwitchDirect
     
    198197      }
    199198
    200       void __leave_coroutine( coroutine_desc * src ) {
     199      void __leave_coroutine() {
     200            coroutine_desc * src = TL_GET( this_coroutine ); // optimization
    201201            coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    202202
  • libcfa/src/concurrency/coroutine.hfa

    r2fabdc02 rbee653c  
    7777        // will also migrate which means this value will
    7878        // stay in syn with the TLS
    79         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     79        coroutine_desc * src = TL_GET( this_coroutine );
    8080
    8181        assertf( src->last != 0,
     
    9999        // will also migrate which means this value will
    100100        // stay in syn with the TLS
    101         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     101        coroutine_desc * src = TL_GET( this_coroutine );
    102102        coroutine_desc * dst = get_coroutine(cor);
    103103
     
    129129        // will also migrate which means this value will
    130130        // stay in syn with the TLS
    131         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     131        coroutine_desc * src = TL_GET( this_coroutine );
    132132
    133133        // not resuming self ?
     
    146146}
    147147
    148 
    149 
    150 // static inline bool suspend_checkpoint(void) {
    151 //      // optimization : read TLS once and reuse it
    152 //      // Safety note: this is preemption safe since if
    153 //      // preemption occurs after this line, the pointer
    154 //      // will also migrate which means this value will
    155 //      // stay in syn with the TLS
    156 //      // set state of current coroutine to inactive
    157 //       this->state = Checkpoint;
    158 
    159 //       // context switch to specified coroutine
    160 //       assert( src->stack.context );
    161 
    162 //       CtxStore(src->stack.context);
    163 
    164 //      bool ret = this->state == Checkpoint;
    165 
    166 //       // set state of new coroutine to active
    167 //       src->state = Active;
    168 
    169 //       enable_interrupts( __cfaabi_dbg_ctx );
    170 //       // Safety note : This could cause some false positives due to preemption
    171 //       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    172 
    173 //       if( unlikely(src->cancellation != NULL) ) {
    174 //             _CtxCoroutine_Unwind(src->cancellation);
    175 //       }
    176 
    177 //      return ret;
    178 // }
    179 
    180 // static inline void suspend_return(void) {
    181 //      // optimization : read TLS once and reuse it
    182 //      // Safety note: this is preemption safe since if
    183 //      // preemption occurs after this line, the pointer
    184 //      // will also migrate which means this value will
    185 //      // stay in syn with the TLS
    186 //      coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    187 
    188 //      assertf( src->last != 0,
    189 //              "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n"
    190 //              "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    191 //              src->name, src );
    192 //      assertf( src->last->state != Halted,
    193 //              "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n"
    194 //              "Possible cause is terminated coroutine's main routine has already returned.",
    195 //              src->name, src, src->last->name, src->last );
    196 
    197 //      // Safety note : Preemption must be disabled here since kernelTLS.this_coroutine must always be up to date
    198 //       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    199 //       disable_interrupts();
    200 
    201 //       // set state of current coroutine to inactive
    202 //       src->state = src->state == Halted ? Halted : Inactive;
    203 
    204 //       // set new coroutine that task is executing
    205 //       kernelTLS.this_coroutine = dst;
    206 
    207 //       // context switch to specified coroutine
    208 //       assert( src->stack.context );
    209 //      CtxRet( src->stack.context );
    210 
    211 //      abort();
    212 // }
    213 
    214148// Local Variables: //
    215149// mode: c //
  • libcfa/src/concurrency/invoke.c

    r2fabdc02 rbee653c  
    2828
    2929extern void __suspend_internal(void);
    30 extern void __leave_coroutine( struct coroutine_desc * );
    31 extern void __finish_creation( struct coroutine_desc * );
     30extern void __leave_coroutine(void);
     31extern void __finish_creation(void);
    3232extern void __leave_thread_monitor( struct thread_desc * this );
    3333extern void disable_interrupts();
     
    5252
    5353        //Final suspend, should never return
    54         __leave_coroutine( cor );
     54        __leave_coroutine();
    5555        __cabi_abort( "Resumed dead coroutine" );
    5656}
     
    6262        __attribute((__unused__)) struct _Unwind_Exception * unwind_exception,
    6363        __attribute((__unused__)) struct _Unwind_Context * context,
    64         void * param
     64        __attribute((__unused__)) void * param
    6565) {
    6666        if( actions & _UA_END_OF_STACK  ) {
    6767                // We finished unwinding the coroutine,
    6868                // leave it
    69                 __leave_coroutine( param );
     69                __leave_coroutine();
    7070                __cabi_abort( "Resumed dead coroutine" );
    7171        }
     
    7575}
    7676
    77 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
    78 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
    79         _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
     77void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) __attribute__ ((__noreturn__));
     78void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) {
     79        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, NULL );
    8080        printf("UNWIND ERROR %d after force unwind\n", ret);
    8181        abort();
     
    8888        void *this
    8989) {
     90        // First suspend, once the thread arrives here,
     91        // the function pointer to main can be invalidated without risk
     92        __finish_creation();
     93
    9094        // Fetch the thread handle from the user defined thread structure
    9195        struct thread_desc* thrd = get_thread( this );
    92 
    93         // First suspend, once the thread arrives here,
    94         // the function pointer to main can be invalidated without risk
    95         __finish_creation(&thrd->self_cor);
    96 
    97         // Restore the last to NULL, we clobbered because of the thunk problem
    9896        thrd->self_cor.last = NULL;
    9997
  • libcfa/src/concurrency/invoke.h

    r2fabdc02 rbee653c  
    5050
    5151                extern thread_local struct KernelThreadData {
     52                        struct coroutine_desc * volatile this_coroutine;
    5253                        struct thread_desc    * volatile this_thread;
    5354                        struct processor      * volatile this_processor;
     
    6061                } kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
    6162        }
     63
     64        static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }
     65        static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
     66        static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    6267        #endif
    6368
     
    165170                        struct thread_desc * prev;
    166171                } node;
    167         };
    168 
    169         #ifdef __cforall
    170         extern "Cforall" {
    171                 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_thread )->curr_cor; }
    172                 static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
    173                 static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    174 
     172     };
     173
     174     #ifdef __cforall
     175     extern "Cforall" {
    175176                static inline thread_desc * & get_next( thread_desc & this ) {
    176177                        return this.next;
     
    231232        extern void CtxInvokeStub( void );
    232233        void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    233         // void CtxStore ( void * this ) asm ("CtxStore");
    234         // void CtxRet   ( void * dst  ) asm ("CtxRet");
    235234
    236235        #if   defined( __i386 )
  • libcfa/src/concurrency/kernel.cfa

    r2fabdc02 rbee653c  
    6060        NULL,
    6161        NULL,
     62        NULL,
    6263        { 1, false, false }
    6364};
     
    262263static void returnToKernel() {
    263264        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    264         coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor;
     265        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
    265266        ThreadCtxSwitch(thrd_cor, proc_cor);
    266267}
     
    306307        processor * proc = (processor *) arg;
    307308        kernelTLS.this_processor = proc;
     309        kernelTLS.this_coroutine = NULL;
    308310        kernelTLS.this_thread    = NULL;
    309311        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     
    319321
    320322        //Set global state
     323        kernelTLS.this_coroutine = get_coroutine(proc->runner);
    321324        kernelTLS.this_thread    = NULL;
    322325
     
    348351// KERNEL_ONLY
    349352void kernel_first_resume(processor * this) {
    350         coroutine_desc * src = mainThread->curr_cor;
     353        coroutine_desc * src = kernelTLS.this_coroutine;
    351354        coroutine_desc * dst = get_coroutine(this->runner);
    352355
     
    363366        // set state of current coroutine to inactive
    364367        src->state = src->state == Halted ? Halted : Inactive;
     368
     369        // set new coroutine that task is executing
     370        kernelTLS.this_coroutine = dst;
    365371
    366372        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     
    593599        kernelTLS.this_processor = mainProcessor;
    594600        kernelTLS.this_thread    = mainThread;
     601        kernelTLS.this_coroutine = &mainThread->self_cor;
    595602
    596603        // Enable preemption
     
    713720                __cfaabi_dbg_bits_write( abort_text, len );
    714721
    715                 if ( &thrd->self_cor != thrd->curr_cor ) {
    716                         len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
     722                if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
     723                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
    717724                        __cfaabi_dbg_bits_write( abort_text, len );
    718725                }
  • libcfa/src/concurrency/thread.cfa

    r2fabdc02 rbee653c  
    7575        coroutine_desc* thrd_c = get_coroutine(this);
    7676        thread_desc   * thrd_h = get_thread   (this);
    77         thrd_c->last = TL_GET( this_thread )->curr_cor;
     77        thrd_c->last = TL_GET( this_coroutine );
    7878
    7979        // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     
    8181        disable_interrupts();
    8282        create_stack(&thrd_c->stack, thrd_c->stack.size);
     83        kernelTLS.this_coroutine = thrd_c;
    8384        CtxStart(&this, CtxInvokeThread);
    8485        assert( thrd_c->last->stack.context );
     
    9192extern "C" {
    9293        // KERNEL ONLY
    93         void __finish_creation(coroutine_desc * thrd_c) {
     94        void __finish_creation(void) {
     95                coroutine_desc* thrd_c = kernelTLS.this_coroutine;
    9496                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9597        }
     
    118120        // set new coroutine that the processor is executing
    119121        // and context switch to it
     122        kernelTLS.this_coroutine = dst;
    120123        assert( src->stack.context );
    121124        CtxSwitch( src->stack.context, dst->stack.context );
     125        kernelTLS.this_coroutine = src;
    122126
    123127        // set state of new coroutine to active
Note: See TracChangeset for help on using the changeset viewer.