- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
rb69ea6b r381fdee 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 9 1 6:38:13201813 // Update Count : 1412 // Last Modified On : Fri Feb 9 14:42:34 2018 13 // Update Count : 25 14 14 // 15 15 … … 67 67 } 68 68 69 enum {70 PREEMPT_NORMAL = 0,71 PREEMPT_TERMINATE = 1,72 };73 74 69 //============================================================================================= 75 70 // Kernel Preemption logic … … 149 144 // Disable interrupts by incrementing the counter 150 145 void disable_interrupts() { 151 preemption .enabled = false;152 __attribute__((unused)) unsigned short new_val = preemption.disable_count + 1;153 preemption.disable_count = new_val;146 preemption_enabled = false; 147 __attribute__((unused)) unsigned short new_val = disable_preempt_count + 1; 148 disable_preempt_count = new_val; 154 149 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 155 150 } … … 161 156 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 162 157 163 unsigned short prev = preemption.disable_count;164 preemption.disable_count -= 1;158 unsigned short prev = disable_preempt_count; 159 disable_preempt_count -= 1; 165 160 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 161 167 162 // Check if we need to prempt the thread because an interrupt was missed 168 163 if( prev == 1 ) { 169 preemption .enabled = true;164 preemption_enabled = true; 170 165 if( proc->pending_preemption ) { 171 166 proc->pending_preemption = false; … … 181 176 // Don't execute any pending CtxSwitch even if counter reaches 0 182 177 void enable_interrupts_noPoll() { 183 unsigned short prev = preemption.disable_count;184 preemption.disable_count -= 1;178 unsigned short prev = disable_preempt_count; 179 disable_preempt_count -= 1; 185 180 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 186 181 if( prev == 1 ) { 187 preemption .enabled = true;182 preemption_enabled = true; 188 183 } 189 184 } … … 197 192 198 193 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { 199 abort ( "internal error, pthread_sigmask" );194 abortf( "internal error, pthread_sigmask" ); 200 195 } 201 196 } … … 208 203 209 204 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 210 abort ( "internal error, pthread_sigmask" );205 abortf( "internal error, pthread_sigmask" ); 211 206 } 212 207 } … … 214 209 // kill wrapper : signal a processor 215 210 static void preempt( processor * this ) { 216 sigval_t value = { PREEMPT_NORMAL }; 217 pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 218 } 219 220 // kill wrapper : signal a processor 221 void terminate(processor * this) { 222 this->do_terminate = true; 223 sigval_t value = { PREEMPT_TERMINATE }; 224 pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 211 pthread_kill( this->kernel_thread, SIGUSR1 ); 225 212 } 226 213 … … 235 222 // If false : preemption is unsafe and marked as pending 236 223 static inline bool preemption_ready() { 237 bool ready = preemption .enabled && !preemption.in_progress; // Check if preemption is safe224 bool ready = preemption_enabled && !preemption_in_progress; // Check if preemption is safe 238 225 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 239 226 return ready; … … 247 234 // Called from kernel_startup 248 235 void kernel_start_preemption() { 249 __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n");236 __cfaabi_dbg_print_safe("Kernel : Starting preemption\n"); 250 237 251 238 // Start with preemption disabled until ready 252 preemption .enabled = false;253 preemption.disable_count = 1;239 preemption_enabled = false; 240 disable_preempt_count = 1; 254 241 255 242 // Initialize the event kernel … … 268 255 // Called from kernel_shutdown 269 256 void kernel_stop_preemption() { 270 __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n");257 __cfaabi_dbg_print_safe("Kernel : Preemption stopping\n"); 271 258 272 259 // Block all signals since we are already shutting down … … 284 271 // Preemption is now fully stopped 285 272 286 __cfaabi_dbg_print_safe( "Kernel : Preemption stopped\n");273 __cfaabi_dbg_print_safe("Kernel : Preemption stopped\n"); 287 274 } 288 275 … … 290 277 // Used by thread to control when they want to receive preemption signals 291 278 void ?{}( preemption_scope & this, processor * proc ) { 292 (this.alarm){ proc, 0`cfa_s, 0`cfa_s};279 (this.alarm){ proc, zero_time, zero_time }; 293 280 this.proc = proc; 294 281 this.proc->preemption_alarm = &this.alarm; … … 300 287 disable_interrupts(); 301 288 302 update_preemption( this.proc, 0`cfa_s);289 update_preemption( this.proc, zero_time ); 303 290 } 304 291 … … 312 299 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); ) 313 300 314 // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,315 // the interrupt that is supposed to force the kernel thread to preempt might arrive316 // before the kernel thread has even started running. When that happens an iterrupt317 // we a null 'this_processor' will be caught, just ignore it.318 if(!this_processor) return;319 320 choose(sfp->si_value.sival_int) {321 case PREEMPT_NORMAL : ;// Normal case, nothing to do here322 case PREEMPT_TERMINATE: verify(this_processor->do_terminate);323 default:324 abort( "internal error, signal value is %d", sfp->si_value.sival_int );325 }326 327 301 // Check if it is safe to preempt here 328 302 if( !preemption_ready() ) { return; } 329 303 330 __cfaabi_dbg_print_buffer_decl( 331 332 preemption .in_progress = true; // Sync flag : prevent recursive calls to the signal handler304 __cfaabi_dbg_print_buffer_decl(" KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 305 306 preemption_in_progress = true; // Sync flag : prevent recursive calls to the signal handler 333 307 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 334 preemption .in_progress = false; // Clear the in progress flag308 preemption_in_progress = false; // Clear the in progress flag 335 309 336 310 // Preemption can occur here … … 348 322 349 323 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 350 abort ( "internal error, pthread_sigmask" );324 abortf( "internal error, pthread_sigmask" ); 351 325 } 352 326 … … 365 339 continue; 366 340 case EINVAL : 367 abort ( "Timeout was invalid.");341 abortf("Timeout was invalid."); 368 342 default: 369 abort ("Unhandled error %d", err);343 abortf("Unhandled error %d", err); 370 344 } 371 345 } … … 374 348 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int); 375 349 376 // __cfaabi_dbg_print_safe( 350 // __cfaabi_dbg_print_safe("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int ); 377 351 // Switch on the code (a.k.a. the sender) to 378 352 switch( info.si_code ) … … 382 356 case SI_TIMER: 383 357 case SI_KERNEL: 384 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n");358 // __cfaabi_dbg_print_safe("Kernel : Preemption thread tick\n"); 385 359 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 386 360 tick_preemption(); … … 396 370 397 371 EXIT: 398 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n");372 __cfaabi_dbg_print_safe("Kernel : Preemption thread stopping\n"); 399 373 return NULL; 400 374 }
Note:
See TracChangeset
for help on using the changeset viewer.