Changeset 16a6a617


Ignore:
Timestamp:
Apr 2, 2019, 3:26:38 PM (3 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
arm-eh, cleanup-dtors, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
98319ad
Parents:
9be2b60 (diff), 2fabdc02 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src/concurrency
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r9be2b60 r16a6a617  
    8888        ret
    8989
     90//.text
     91//      .align 2
     92//.globl        CtxStore
     93//CtxStore:
     94//      // Save floating & SSE control words on the stack.
     95//
     96//      subq   $8,%rsp
     97//      stmxcsr 0(%rsp)         // 4 bytes
     98//      fnstcw  4(%rsp)         // 2 bytes
     99//
     100//      // Save volatile registers on the stack.
     101//
     102//      pushq %r15
     103//      pushq %r14
     104//      pushq %r13
     105//      pushq %r12
     106//      pushq %rbx
     107//
     108//      // Save old context in the "from" area.
     109//
     110//      movq %rsp,SP_OFFSET(%rdi)
     111//      movq %rbp,FP_OFFSET(%rdi)
     112//
     113//      // Return to thread
     114//
     115//      ret
     116//
     117//.text
     118//      .align 2
     119//.globl        CtxRet
     120//CtxRet:
     121//      // Load new context from the "to" area.
     122//
     123//      movq SP_OFFSET(%rdi),%rsp
     124//      movq FP_OFFSET(%rdi),%rbp
     125//
     126//      // Load volatile registers from the stack.
     127//
     128//      popq %rbx
     129//      popq %r12
     130//      popq %r13
     131//      popq %r14
     132//      popq %r15
     133//
     134//      // Load floating & SSE control words from the stack.
     135//
     136//      fldcw   4(%rsp)
     137//      ldmxcsr 0(%rsp)
     138//      addq   $8,%rsp
     139//
     140//      // Return to thread.
     141//
     142//      ret
     143
     144
    90145.text
    91146        .align 2
  • libcfa/src/concurrency/coroutine.cfa

    r9be2b60 r16a6a617  
    3535
    3636extern "C" {
    37       void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) __attribute__ ((__noreturn__));
     37      void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    3838      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    3939      static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     
    8484void ^?{}(coroutine_desc& this) {
    8585      if(this.state != Halted && this.state != Start) {
    86             coroutine_desc * src = TL_GET( this_coroutine );
     86            coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    8787            coroutine_desc * dst = &this;
    8888
     
    115115// Wrapper for co
    116116void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    117       // Safety note : This could cause some false positives due to preemption
     117      // Safety note : Preemption must be disabled since there is a race condition
     118      // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
    118119      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    119120      disable_interrupts();
     
    123124
    124125      // set new coroutine that task is executing
    125       kernelTLS.this_coroutine = dst;
     126      TL_GET( this_thread )->curr_cor = dst;
    126127
    127128      // context switch to specified coroutine
     
    134135
    135136      enable_interrupts( __cfaabi_dbg_ctx );
    136       // Safety note : This could cause some false positives due to preemption
    137137      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    138138
     139
    139140      if( unlikely(src->cancellation != NULL) ) {
    140             _CtxCoroutine_Unwind(src->cancellation);
     141            _CtxCoroutine_Unwind(src->cancellation, src);
    141142      }
    142143} //ctxSwitchDirect
     
    197198      }
    198199
    199       void __leave_coroutine() {
    200             coroutine_desc * src = TL_GET( this_coroutine ); // optimization
     200      void __leave_coroutine( coroutine_desc * src ) {
    201201            coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    202202
  • libcfa/src/concurrency/coroutine.hfa

    r9be2b60 r16a6a617  
    7777        // will also migrate which means this value will
    7878        // stay in syn with the TLS
    79         coroutine_desc * src = TL_GET( this_coroutine );
     79        coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    8080
    8181        assertf( src->last != 0,
     
    9999        // will also migrate which means this value will
    100100        // stay in syn with the TLS
    101         coroutine_desc * src = TL_GET( this_coroutine );
     101        coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    102102        coroutine_desc * dst = get_coroutine(cor);
    103103
     
    129129        // will also migrate which means this value will
    130130        // stay in syn with the TLS
    131         coroutine_desc * src = TL_GET( this_coroutine );
     131        coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    132132
    133133        // not resuming self ?
     
    146146}
    147147
     148
     149
     150// static inline bool suspend_checkpoint(void) {
     151//      // optimization : read TLS once and reuse it
     152//      // Safety note: this is preemption safe since if
     153//      // preemption occurs after this line, the pointer
     154//      // will also migrate which means this value will
     155//      // stay in syn with the TLS
     156//      // set state of current coroutine to inactive
     157//       this->state = Checkpoint;
     158
     159//       // context switch to specified coroutine
     160//       assert( src->stack.context );
     161
     162//       CtxStore(src->stack.context);
     163
     164//      bool ret = this->state == Checkpoint;
     165
     166//       // set state of new coroutine to active
     167//       src->state = Active;
     168
     169//       enable_interrupts( __cfaabi_dbg_ctx );
     170//       // Safety note : This could cause some false positives due to preemption
     171//       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     172
     173//       if( unlikely(src->cancellation != NULL) ) {
     174//             _CtxCoroutine_Unwind(src->cancellation);
     175//       }
     176
     177//      return ret;
     178// }
     179
     180// static inline void suspend_return(void) {
     181//      // optimization : read TLS once and reuse it
     182//      // Safety note: this is preemption safe since if
     183//      // preemption occurs after this line, the pointer
     184//      // will also migrate which means this value will
     185//      // stay in syn with the TLS
     186//      coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     187
     188//      assertf( src->last != 0,
     189//              "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n"
     190//              "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
     191//              src->name, src );
     192//      assertf( src->last->state != Halted,
     193//              "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n"
     194//              "Possible cause is terminated coroutine's main routine has already returned.",
     195//              src->name, src, src->last->name, src->last );
     196
     197//      // Safety note : Preemption must be disabled here since kernelTLS.this_coroutine must always be up to date
     198//       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     199//       disable_interrupts();
     200
     201//       // set state of current coroutine to inactive
     202//       src->state = src->state == Halted ? Halted : Inactive;
     203
     204//       // set new coroutine that task is executing
     205//       kernelTLS.this_coroutine = dst;
     206
     207//       // context switch to specified coroutine
     208//       assert( src->stack.context );
     209//      CtxRet( src->stack.context );
     210
     211//      abort();
     212// }
     213
    148214// Local Variables: //
    149215// mode: c //
  • libcfa/src/concurrency/invoke.c

    r9be2b60 r16a6a617  
    2828
    2929extern void __suspend_internal(void);
    30 extern void __leave_coroutine(void);
    31 extern void __finish_creation(void);
     30extern void __leave_coroutine( struct coroutine_desc * );
     31extern void __finish_creation( struct coroutine_desc * );
    3232extern void __leave_thread_monitor( struct thread_desc * this );
    3333extern void disable_interrupts();
     
    5252
    5353        //Final suspend, should never return
    54         __leave_coroutine();
     54        __leave_coroutine( cor );
    5555        __cabi_abort( "Resumed dead coroutine" );
    5656}
     
    6262        __attribute((__unused__)) struct _Unwind_Exception * unwind_exception,
    6363        __attribute((__unused__)) struct _Unwind_Context * context,
    64         __attribute((__unused__)) void * param
     64        void * param
    6565) {
    6666        if( actions & _UA_END_OF_STACK  ) {
    6767                // We finished unwinding the coroutine,
    6868                // leave it
    69                 __leave_coroutine();
     69                __leave_coroutine( param );
    7070                __cabi_abort( "Resumed dead coroutine" );
    7171        }
     
    7575}
    7676
    77 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) __attribute__ ((__noreturn__));
    78 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage) {
    79         _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, NULL );
     77void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
     78void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
     79        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
    8080        printf("UNWIND ERROR %d after force unwind\n", ret);
    8181        abort();
     
    8888        void *this
    8989) {
     90        // Fetch the thread handle from the user defined thread structure
     91        struct thread_desc* thrd = get_thread( this );
     92
    9093        // First suspend, once the thread arrives here,
    9194        // the function pointer to main can be invalidated without risk
    92         __finish_creation();
     95        __finish_creation(&thrd->self_cor);
    9396
    94         // Fetch the thread handle from the user defined thread structure
    95         struct thread_desc* thrd = get_thread( this );
     97        // Restore the last to NULL, we clobbered because of the thunk problem
    9698        thrd->self_cor.last = NULL;
    9799
  • libcfa/src/concurrency/invoke.h

    r9be2b60 r16a6a617  
    5050
    5151                extern thread_local struct KernelThreadData {
    52                         struct coroutine_desc * volatile this_coroutine;
    5352                        struct thread_desc    * volatile this_thread;
    5453                        struct processor      * volatile this_processor;
     
    6160                } kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
    6261        }
    63 
    64         static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }
    65         static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
    66         static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    6762        #endif
    6863
     
    170165                        struct thread_desc * prev;
    171166                } node;
    172      };
    173 
    174      #ifdef __cforall
    175      extern "Cforall" {
     167        };
     168
     169        #ifdef __cforall
     170        extern "Cforall" {
     171                static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     172                static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
     173                static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
     174
    176175                static inline thread_desc * & get_next( thread_desc & this ) {
    177176                        return this.next;
     
    232231        extern void CtxInvokeStub( void );
    233232        void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
     233        // void CtxStore ( void * this ) asm ("CtxStore");
     234        // void CtxRet   ( void * dst  ) asm ("CtxRet");
    234235
    235236        #if   defined( __i386 )
  • libcfa/src/concurrency/kernel.cfa

    r9be2b60 r16a6a617  
    6060        NULL,
    6161        NULL,
    62         NULL,
    6362        { 1, false, false }
    6463};
     
    263262static void returnToKernel() {
    264263        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    265         coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
     264        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor;
    266265        ThreadCtxSwitch(thrd_cor, proc_cor);
    267266}
     
    307306        processor * proc = (processor *) arg;
    308307        kernelTLS.this_processor = proc;
    309         kernelTLS.this_coroutine = NULL;
    310308        kernelTLS.this_thread    = NULL;
    311309        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     
    321319
    322320        //Set global state
    323         kernelTLS.this_coroutine = get_coroutine(proc->runner);
    324321        kernelTLS.this_thread    = NULL;
    325322
     
    351348// KERNEL_ONLY
    352349void kernel_first_resume(processor * this) {
    353         coroutine_desc * src = kernelTLS.this_coroutine;
     350        coroutine_desc * src = mainThread->curr_cor;
    354351        coroutine_desc * dst = get_coroutine(this->runner);
    355352
     
    366363        // set state of current coroutine to inactive
    367364        src->state = src->state == Halted ? Halted : Inactive;
    368 
    369         // set new coroutine that task is executing
    370         kernelTLS.this_coroutine = dst;
    371365
    372366        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     
    599593        kernelTLS.this_processor = mainProcessor;
    600594        kernelTLS.this_thread    = mainThread;
    601         kernelTLS.this_coroutine = &mainThread->self_cor;
    602595
    603596        // Enable preemption
     
    720713                __cfaabi_dbg_bits_write( abort_text, len );
    721714
    722                 if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
    723                         len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
     715                if ( &thrd->self_cor != thrd->curr_cor ) {
     716                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
    724717                        __cfaabi_dbg_bits_write( abort_text, len );
    725718                }
  • libcfa/src/concurrency/thread.cfa

    r9be2b60 r16a6a617  
    7575        coroutine_desc* thrd_c = get_coroutine(this);
    7676        thread_desc   * thrd_h = get_thread   (this);
    77         thrd_c->last = TL_GET( this_coroutine );
     77        thrd_c->last = TL_GET( this_thread )->curr_cor;
    7878
    7979        // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     
    8181        disable_interrupts();
    8282        create_stack(&thrd_c->stack, thrd_c->stack.size);
    83         kernelTLS.this_coroutine = thrd_c;
    8483        CtxStart(&this, CtxInvokeThread);
    8584        assert( thrd_c->last->stack.context );
     
    9291extern "C" {
    9392        // KERNEL ONLY
    94         void __finish_creation(void) {
    95                 coroutine_desc* thrd_c = kernelTLS.this_coroutine;
     93        void __finish_creation(coroutine_desc * thrd_c) {
    9694                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9795        }
     
    120118        // set new coroutine that the processor is executing
    121119        // and context switch to it
    122         kernelTLS.this_coroutine = dst;
    123120        assert( src->stack.context );
    124121        CtxSwitch( src->stack.context, dst->stack.context );
    125         kernelTLS.this_coroutine = src;
    126122
    127123        // set state of new coroutine to active
Note: See TracChangeset for help on using the changeset viewer.