- Timestamp:
- May 8, 2018, 5:22:38 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- b4a835d, de94a60
- Parents:
- 4990812
- Location:
- src/libcfa/concurrency
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/coroutine.c
r4990812 rafd550c 85 85 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 86 86 // Safety note : This could cause some false positives due to preemption 87 verify( TL_GET( preemption_state ).enabled|| TL_GET( this_processor )->do_terminate );87 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 88 88 disable_interrupts(); 89 89 … … 104 104 enable_interrupts( __cfaabi_dbg_ctx ); 105 105 // Safety note : This could cause some false positives due to preemption 106 verify( TL_GET( preemption_state ).enabled|| TL_GET( this_processor )->do_terminate );106 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 107 107 } //ctxSwitchDirect 108 108 -
src/libcfa/concurrency/invoke.c
r4990812 rafd550c 69 69 // Fetch the thread handle from the user defined thread structure 70 70 struct thread_desc* thrd = get_thread( this ); 71 thrd->self_cor.last = NULL; 71 72 72 73 // Officially start the thread by enabling preemption -
src/libcfa/concurrency/invoke.h
r4990812 rafd550c 49 49 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); } 50 50 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); } 51 //static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE 52 52 #endif 53 53 -
src/libcfa/concurrency/kernel.c
r4990812 rafd550c 422 422 } 423 423 424 verify( ! preemption_state.enabled );424 verify( ! kernelTLS.preemption_state.enabled ); 425 425 returnToKernel(); 426 verify( ! preemption_state.enabled );426 verify( ! kernelTLS.preemption_state.enabled ); 427 427 428 428 enable_interrupts( __cfaabi_dbg_ctx ); … … 578 578 verify( ! kernelTLS.preemption_state.enabled ); 579 579 enable_interrupts( __cfaabi_dbg_ctx ); 580 verify( TL_GET( preemption_state ).enabled);580 verify( TL_GET( preemption_state.enabled ) ); 581 581 } 582 582 … … 584 584 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 585 585 586 verify( TL_GET( preemption_state ).enabled);586 verify( TL_GET( preemption_state.enabled ) ); 587 587 disable_interrupts(); 588 588 verify( ! kernelTLS.preemption_state.enabled ); … … 642 642 static bool kernel_abort_called = false; 643 643 644 void * kernel_abort 644 void * kernel_abort(void) __attribute__ ((__nothrow__)) { 645 645 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 646 646 // the globalAbort flag is true. -
src/libcfa/concurrency/monitor.c
r4990812 rafd550c 480 480 481 481 // Create the node specific to this wait operation 482 wait_ctx_primed( TL_GET( this_thread ), 0 )482 wait_ctx_primed( kernelTLS.this_thread, 0 ) 483 483 484 484 //save contexts -
src/libcfa/concurrency/preemption.c
r4990812 rafd550c 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 with( TL_GET( preemption_state )) {151 with( kernelTLS.preemption_state ) { 152 152 enabled = false; 153 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; … … 160 160 // If counter reaches 0, execute any pending CtxSwitch 161 161 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 162 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add163 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add164 165 with( TL_GET( preemption_state )){162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add 164 165 with( kernelTLS.preemption_state ){ 166 166 unsigned short prev = disable_count; 167 167 disable_count -= 1; … … 185 185 // Don't execute any pending CtxSwitch even if counter reaches 0 186 186 void enable_interrupts_noPoll() { 187 unsigned short prev = TL_GET( preemption_state ).disable_count;188 TL_GET( preemption_state ).disable_count -= 1;187 unsigned short prev = kernelTLS.preemption_state.disable_count; 188 kernelTLS.preemption_state.disable_count -= 1; 189 189 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 190 190 if( prev == 1 ) { 191 TL_GET( preemption_state ).enabled = true;191 kernelTLS.preemption_state.enabled = true; 192 192 } 193 193 } -
src/libcfa/concurrency/thread.c
r4990812 rafd550c 100 100 void yield( void ) { 101 101 // Safety note : This could cause some false positives due to preemption 102 verify( TL_GET( preemption_state ).enabled);102 verify( TL_GET( preemption_state.enabled ) ); 103 103 BlockInternal( TL_GET( this_thread ) ); 104 104 // Safety note : This could cause some false positives due to preemption 105 verify( TL_GET( preemption_state ).enabled);105 verify( TL_GET( preemption_state.enabled ) ); 106 106 } 107 107
Note: See TracChangeset
for help on using the changeset viewer.