- Timestamp:
- Apr 1, 2019, 2:04:14 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 2fabdc02
- Parents:
- b611fc3
- Location:
- libcfa/src/concurrency
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/CtxSwitch-x86_64.S
rb611fc3 r212c2187 88 88 ret 89 89 90 //.text 91 // .align 2 92 //.globl CtxStore 93 //CtxStore: 94 // // Save floating & SSE control words on the stack. 95 // 96 // subq $8,%rsp 97 // stmxcsr 0(%rsp) // 4 bytes 98 // fnstcw 4(%rsp) // 2 bytes 99 // 100 // // Save volatile registers on the stack. 101 // 102 // pushq %r15 103 // pushq %r14 104 // pushq %r13 105 // pushq %r12 106 // pushq %rbx 107 // 108 // // Save old context in the "from" area. 109 // 110 // movq %rsp,SP_OFFSET(%rdi) 111 // movq %rbp,FP_OFFSET(%rdi) 112 // 113 // // Return to thread 114 // 115 // ret 116 // 117 //.text 118 // .align 2 119 //.globl CtxRet 120 //CtxRet: 121 // // Load new context from the "to" area. 122 // 123 // movq SP_OFFSET(%rdi),%rsp 124 // movq FP_OFFSET(%rdi),%rbp 125 // 126 // // Load volatile registers from the stack. 127 // 128 // popq %rbx 129 // popq %r12 130 // popq %r13 131 // popq %r14 132 // popq %r15 133 // 134 // // Load floating & SSE control words from the stack. 135 // 136 // fldcw 4(%rsp) 137 // ldmxcsr 0(%rsp) 138 // addq $8,%rsp 139 // 140 // // Return to thread. 141 // 142 // ret 143 144 90 145 .text 91 146 .align 2 -
libcfa/src/concurrency/coroutine.cfa
rb611fc3 r212c2187 35 35 36 36 extern "C" { 37 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage ) __attribute__ ((__noreturn__));37 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__)); 38 38 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 39 39 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 84 84 void ^?{}(coroutine_desc& this) { 85 85 if(this.state != Halted && this.state != Start) { 86 coroutine_desc * src = TL_GET( this_ coroutine );86 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 87 87 coroutine_desc * dst = &this; 88 88 … … 115 115 // Wrapper for co 116 116 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 117 // Safety note : This could cause some false positives due to preemption 117 // Safety note : Preemption must be disabled since there is a race condition 118 // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times 118 119 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 119 120 disable_interrupts(); … … 123 124 124 125 // set new coroutine that task is executing 125 kernelTLS.this_coroutine= dst;126 TL_GET( this_thread )->curr_cor = dst; 126 127 127 128 // context switch to specified coroutine … … 134 135 135 136 enable_interrupts( __cfaabi_dbg_ctx ); 136 // Safety note : This could cause some false positives due to preemption137 137 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 138 138 139 139 140 if( unlikely(src->cancellation != NULL) ) { 140 _CtxCoroutine_Unwind(src->cancellation );141 _CtxCoroutine_Unwind(src->cancellation, src); 141 142 } 142 143 } //ctxSwitchDirect … … 197 198 } 198 199 199 void __leave_coroutine() { 200 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 200 void __leave_coroutine( coroutine_desc * src ) { 201 201 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 202 202 -
libcfa/src/concurrency/coroutine.hfa
rb611fc3 r212c2187 77 77 // will also migrate which means this value will 78 78 // stay in syn with the TLS 79 coroutine_desc * src = TL_GET( this_ coroutine );79 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 80 80 81 81 assertf( src->last != 0, … … 99 99 // will also migrate which means this value will 100 100 // stay in syn with the TLS 101 coroutine_desc * src = TL_GET( this_ coroutine );101 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 102 102 coroutine_desc * dst = get_coroutine(cor); 103 103 … … 129 129 // will also migrate which means this value will 130 130 // stay in syn with the TLS 131 coroutine_desc * src = TL_GET( this_ coroutine );131 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 132 132 133 133 // not resuming self ? … … 146 146 } 147 147 148 149 150 // static inline bool suspend_checkpoint(void) { 151 // // optimization : read TLS once and reuse it 152 // // Safety note: this is preemption safe since if 153 // // preemption occurs after this line, the pointer 154 // // will also migrate which means this value will 155 // // stay in syn with the TLS 156 // // set state of current coroutine to inactive 157 // this->state = Checkpoint; 158 159 // // context switch to specified coroutine 160 // assert( src->stack.context ); 161 162 // CtxStore(src->stack.context); 163 164 // bool ret = this->state == Checkpoint; 165 166 // // set state of new coroutine to active 167 // src->state = Active; 168 169 // enable_interrupts( __cfaabi_dbg_ctx ); 170 // // Safety note : This could cause some false positives due to preemption 171 // verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 172 173 // if( unlikely(src->cancellation != NULL) ) { 174 // _CtxCoroutine_Unwind(src->cancellation); 175 // } 176 177 // return ret; 178 // } 179 180 // static inline void suspend_return(void) { 181 // // optimization : read TLS once and reuse it 182 // // Safety note: this is preemption safe since if 183 // // preemption occurs after this line, the pointer 184 // // will also migrate which means this value will 185 // // stay in syn with the TLS 186 // coroutine_desc * src = TL_GET( this_thread )->curr_cor; 187 188 // assertf( src->last != 0, 189 // "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n" 190 // "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.", 191 // src->name, src ); 192 // assertf( src->last->state != Halted, 193 // "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n" 194 // "Possible cause is terminated coroutine's main routine has already returned.", 195 // src->name, src, src->last->name, src->last ); 196 197 // // Safety note : Preemption must be disabled here since kernelTLS.this_coroutine must always be up to date 198 // verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 199 // disable_interrupts(); 200 201 // // set state of current coroutine to inactive 202 // src->state = src->state == Halted ? Halted : Inactive; 203 204 // // set new coroutine that task is executing 205 // kernelTLS.this_coroutine = dst; 206 207 // // context switch to specified coroutine 208 // assert( src->stack.context ); 209 // CtxRet( src->stack.context ); 210 211 // abort(); 212 // } 213 148 214 // Local Variables: // 149 215 // mode: c // -
libcfa/src/concurrency/invoke.c
rb611fc3 r212c2187 28 28 29 29 extern void __suspend_internal(void); 30 extern void __leave_coroutine( void);31 extern void __finish_creation( void);30 extern void __leave_coroutine( struct coroutine_desc * ); 31 extern void __finish_creation( struct coroutine_desc * ); 32 32 extern void __leave_thread_monitor( struct thread_desc * this ); 33 33 extern void disable_interrupts(); … … 52 52 53 53 //Final suspend, should never return 54 __leave_coroutine( );54 __leave_coroutine( cor ); 55 55 __cabi_abort( "Resumed dead coroutine" ); 56 56 } … … 62 62 __attribute((__unused__)) struct _Unwind_Exception * unwind_exception, 63 63 __attribute((__unused__)) struct _Unwind_Context * context, 64 __attribute((__unused__))void * param64 void * param 65 65 ) { 66 66 if( actions & _UA_END_OF_STACK ) { 67 67 // We finished unwinding the coroutine, 68 68 // leave it 69 __leave_coroutine( );69 __leave_coroutine( param ); 70 70 __cabi_abort( "Resumed dead coroutine" ); 71 71 } … … 75 75 } 76 76 77 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage ) __attribute__ ((__noreturn__));78 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage ) {79 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, NULL);77 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__)); 78 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) { 79 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor ); 80 80 printf("UNWIND ERROR %d after force unwind\n", ret); 81 81 abort(); … … 88 88 void *this 89 89 ) { 90 // Fetch the thread handle from the user defined thread structure 91 struct thread_desc* thrd = get_thread( this ); 92 90 93 // First suspend, once the thread arrives here, 91 94 // the function pointer to main can be invalidated without risk 92 __finish_creation( );95 __finish_creation(&thrd->self_cor); 93 96 94 // Fetch the thread handle from the user defined thread structure 95 struct thread_desc* thrd = get_thread( this ); 97 // Restore the last to NULL, we clobbered because of the thunk problem 96 98 thrd->self_cor.last = NULL; 97 99 -
libcfa/src/concurrency/invoke.h
rb611fc3 r212c2187 50 50 51 51 extern thread_local struct KernelThreadData { 52 struct coroutine_desc * volatile this_coroutine;53 52 struct thread_desc * volatile this_thread; 54 53 struct processor * volatile this_processor; … … 61 60 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 62 61 } 63 64 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }65 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); }66 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE67 62 #endif 68 63 … … 170 165 struct thread_desc * prev; 171 166 } node; 172 }; 173 174 #ifdef __cforall 175 extern "Cforall" { 167 }; 168 169 #ifdef __cforall 170 extern "Cforall" { 171 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_thread )->curr_cor; } 172 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); } 173 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE 174 176 175 static inline thread_desc * & get_next( thread_desc & this ) { 177 176 return this.next; … … 232 231 extern void CtxInvokeStub( void ); 233 232 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 233 // void CtxStore ( void * this ) asm ("CtxStore"); 234 // void CtxRet ( void * dst ) asm ("CtxRet"); 234 235 235 236 #if defined( __i386 ) -
libcfa/src/concurrency/kernel.cfa
rb611fc3 r212c2187 60 60 NULL, 61 61 NULL, 62 NULL,63 62 { 1, false, false } 64 63 }; … … 263 262 static void returnToKernel() { 264 263 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 265 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;264 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor; 266 265 ThreadCtxSwitch(thrd_cor, proc_cor); 267 266 } … … 307 306 processor * proc = (processor *) arg; 308 307 kernelTLS.this_processor = proc; 309 kernelTLS.this_coroutine = NULL;310 308 kernelTLS.this_thread = NULL; 311 309 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; … … 321 319 322 320 //Set global state 323 kernelTLS.this_coroutine = get_coroutine(proc->runner);324 321 kernelTLS.this_thread = NULL; 325 322 … … 351 348 // KERNEL_ONLY 352 349 void kernel_first_resume(processor * this) { 353 coroutine_desc * src = kernelTLS.this_coroutine;350 coroutine_desc * src = mainThread->curr_cor; 354 351 coroutine_desc * dst = get_coroutine(this->runner); 355 352 … … 366 363 // set state of current coroutine to inactive 367 364 src->state = src->state == Halted ? Halted : Inactive; 368 369 // set new coroutine that task is executing370 kernelTLS.this_coroutine = dst;371 365 372 366 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 599 593 kernelTLS.this_processor = mainProcessor; 600 594 kernelTLS.this_thread = mainThread; 601 kernelTLS.this_coroutine = &mainThread->self_cor;602 595 603 596 // Enable preemption … … 720 713 __cfaabi_dbg_bits_write( abort_text, len ); 721 714 722 if ( get_coroutine(thrd) != kernelTLS.this_coroutine) {723 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine);715 if ( &thrd->self_cor != thrd->curr_cor ) { 716 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); 724 717 __cfaabi_dbg_bits_write( abort_text, len ); 725 718 } -
libcfa/src/concurrency/thread.cfa
rb611fc3 r212c2187 75 75 coroutine_desc* thrd_c = get_coroutine(this); 76 76 thread_desc * thrd_h = get_thread (this); 77 thrd_c->last = TL_GET( this_ coroutine );77 thrd_c->last = TL_GET( this_thread )->curr_cor; 78 78 79 79 // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); … … 81 81 disable_interrupts(); 82 82 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 kernelTLS.this_coroutine = thrd_c;84 83 CtxStart(&this, CtxInvokeThread); 85 84 assert( thrd_c->last->stack.context ); … … 92 91 extern "C" { 93 92 // KERNEL ONLY 94 void __finish_creation(void) { 95 coroutine_desc* thrd_c = kernelTLS.this_coroutine; 93 void __finish_creation(coroutine_desc * thrd_c) { 96 94 ThreadCtxSwitch( thrd_c, thrd_c->last ); 97 95 } … … 120 118 // set new coroutine that the processor is executing 121 119 // and context switch to it 122 kernelTLS.this_coroutine = dst;123 120 assert( src->stack.context ); 124 121 CtxSwitch( src->stack.context, dst->stack.context ); 125 kernelTLS.this_coroutine = src;126 122 127 123 // set state of new coroutine to active
Note: See TracChangeset
for help on using the changeset viewer.