Changeset ff29f08 for src/libcfa/concurrency/preemption.c
- Timestamp:
- May 18, 2018, 2:09:21 PM (7 years ago)
- Branches:
- new-env, with_gc
- Children:
- 2472a19
- Parents:
- f6f0cca3 (diff), c7d8100c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
rf6f0cca3 rff29f08 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 TL_GET( preemption_state ).enabled = false; 152 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1; 153 TL_GET( preemption_state ).disable_count = new_val; 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 151 with( kernelTLS.preemption_state ) { 152 enabled = false; 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; 154 disable_count = new_val; 155 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 156 } 155 157 } 156 158 … … 158 160 // If counter reaches 0, execute any pending CtxSwitch 159 161 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 160 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add 161 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add 162 163 unsigned short prev = TL_GET( preemption_state ).disable_count; 164 TL_GET( preemption_state ).disable_count -= 1; 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 167 // Check if we need to prempt the thread because an interrupt was missed 168 if( prev == 1 ) { 169 TL_GET( preemption_state ).enabled = true; 170 if( proc->pending_preemption ) { 171 proc->pending_preemption = false; 172 BlockInternal( thrd ); 162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add 164 165 with( kernelTLS.preemption_state ){ 166 unsigned short prev = disable_count; 167 disable_count -= 1; 168 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 169 170 // Check if we need to prempt the thread because an interrupt was missed 171 if( prev == 1 ) { 172 enabled = true; 173 if( proc->pending_preemption ) { 174 proc->pending_preemption = false; 175 BlockInternal( thrd ); 176 } 173 177 } 174 178 } … … 181 185 // Don't execute any pending CtxSwitch even if counter reaches 0 182 186 void enable_interrupts_noPoll() { 183 unsigned short prev = TL_GET( preemption_state ).disable_count;184 TL_GET( preemption_state ).disable_count -= 1;187 unsigned short prev = kernelTLS.preemption_state.disable_count; 188 kernelTLS.preemption_state.disable_count -= 1; 185 189 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 186 190 if( prev == 1 ) { 187 TL_GET( preemption_state ).enabled = true;191 kernelTLS.preemption_state.enabled = true; 188 192 } 189 193 } … … 230 234 } 231 235 232 236 // KERNEL ONLY 233 237 // Check if a CtxSwitch signal handler shoud defer 234 238 // If true : preemption is safe 235 239 // If false : preemption is unsafe and marked as pending 236 240 static inline bool preemption_ready() { 237 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 238 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 241 // Check if preemption is safe 242 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 243 244 // Adjust the pending flag accordingly 245 kernelTLS.this_processor->pending_preemption = !ready; 239 246 return ready; 240 247 } … … 250 257 251 258 // Start with preemption disabled until ready 252 TL_GET( preemption_state ).enabled = false;253 TL_GET( preemption_state ).disable_count = 1;259 kernelTLS.preemption_state.enabled = false; 260 kernelTLS.preemption_state.disable_count = 1; 254 261 255 262 // Initialize the event kernel … … 316 323 // before the kernel thread has even started running. When that happens an iterrupt 317 324 // we a null 'this_processor' will be caught, just ignore it. 318 if(! TL_GET( this_processor )) return;325 if(! kernelTLS.this_processor ) return; 319 326 320 327 choose(sfp->si_value.sival_int) { 321 328 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 322 case PREEMPT_TERMINATE: verify( TL_GET( this_processor )->do_terminate);329 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 323 330 default: 324 331 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 328 335 if( !preemption_ready() ) { return; } 329 336 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 331 332 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 333 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 334 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread ); 338 339 // Sync flag : prevent recursive calls to the signal handler 340 kernelTLS.preemption_state.in_progress = true; 341 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 344 345 // TODO: this should go in finish action 346 // Clear the in progress flag 347 kernelTLS.preemption_state.in_progress = false; 335 348 336 349 // Preemption can occur here 337 350 338 BlockInternal( (thread_desc*)TL_GET( this_thread )); // Do the actual CtxSwitch351 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 339 352 } 340 353 … … 344 357 // Block sigalrms to control when they arrive 345 358 sigset_t mask; 359 sigfillset(&mask); 360 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 361 abort( "internal error, pthread_sigmask" ); 362 } 363 346 364 sigemptyset( &mask ); 347 365 sigaddset( &mask, SIGALRM ); 348 349 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {350 abort( "internal error, pthread_sigmask" );351 }352 366 353 367 // Main loop … … 400 414 } 401 415 416 //============================================================================================= 417 // Kernel Signal Debug 418 //============================================================================================= 419 420 void __cfaabi_check_preemption() { 421 bool ready = kernelTLS.preemption_state.enabled; 422 if(!ready) { abort("Preemption should be ready"); } 423 424 sigset_t oldset; 425 int ret; 426 ret = sigprocmask(0, NULL, &oldset); 427 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 428 429 ret = sigismember(&oldset, SIGUSR1); 430 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 431 432 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 433 } 434 402 435 // Local Variables: // 403 436 // mode: c //
Note:
See TracChangeset
for help on using the changeset viewer.