Changeset b4a835d for src/libcfa/concurrency/preemption.c
- Timestamp:
- May 8, 2018, 9:27:25 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- df22130
- Parents:
- 3d60c08 (diff), afd550c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
r3d60c08 rb4a835d 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 with( TL_GET( preemption_state )) {151 with( kernelTLS.preemption_state ) { 152 152 enabled = false; 153 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; … … 160 160 // If counter reaches 0, execute any pending CtxSwitch 161 161 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 162 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add163 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add164 165 with( TL_GET( preemption_state )){162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add 164 165 with( kernelTLS.preemption_state ){ 166 166 unsigned short prev = disable_count; 167 167 disable_count -= 1; … … 185 185 // Don't execute any pending CtxSwitch even if counter reaches 0 186 186 void enable_interrupts_noPoll() { 187 unsigned short prev = TL_GET( preemption_state ).disable_count;188 TL_GET( preemption_state ).disable_count -= 1;187 unsigned short prev = kernelTLS.preemption_state.disable_count; 188 kernelTLS.preemption_state.disable_count -= 1; 189 189 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 190 190 if( prev == 1 ) { 191 TL_GET( preemption_state ).enabled = true;191 kernelTLS.preemption_state.enabled = true; 192 192 } 193 193 } … … 234 234 } 235 235 236 236 // KERNEL ONLY 237 237 // Check if a CtxSwitch signal handler shoud defer 238 238 // If true : preemption is safe 239 239 // If false : preemption is unsafe and marked as pending 240 240 static inline bool preemption_ready() { 241 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 242 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 241 // Check if preemption is safe 242 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 243 244 // Adjust the pending flag accordingly 245 kernelTLS.this_processor->pending_preemption = !ready; 243 246 return ready; 244 247 } … … 254 257 255 258 // Start with preemption disabled until ready 256 TL_GET( preemption_state ).enabled = false;257 TL_GET( preemption_state ).disable_count = 1;259 kernelTLS.preemption_state.enabled = false; 260 kernelTLS.preemption_state.disable_count = 1; 258 261 259 262 // Initialize the event kernel … … 320 323 // before the kernel thread has even started running. When that happens an iterrupt 321 324 // we a null 'this_processor' will be caught, just ignore it. 322 if(! TL_GET( this_processor )) return;325 if(! kernelTLS.this_processor ) return; 323 326 324 327 choose(sfp->si_value.sival_int) { 325 328 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 326 case PREEMPT_TERMINATE: verify( TL_GET( this_processor )->do_terminate);329 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 327 330 default: 328 331 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 332 335 if( !preemption_ready() ) { return; } 333 336 334 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", TL_GET( this_processor ), TL_GET( this_thread ) ); 335 336 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 337 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 338 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread ); 338 339 // Sync flag : prevent recursive calls to the signal handler 340 kernelTLS.preemption_state.in_progress = true; 341 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 344 345 // TODO: this should go in finish action 346 // Clear the in progress flag 347 kernelTLS.preemption_state.in_progress = false; 339 348 340 349 // Preemption can occur here 341 350 342 BlockInternal( (thread_desc*)TL_GET( this_thread )); // Do the actual CtxSwitch351 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 343 352 } 344 353 … … 409 418 410 419 void __cfaabi_check_preemption() { 411 bool ready = TL_GET( preemption_state ).enabled;420 bool ready = kernelTLS.preemption_state.enabled; 412 421 if(!ready) { abort("Preemption should be ready"); } 413 422
Note: See TracChangeset
for help on using the changeset viewer.