Changeset eef8dfb for libcfa/src/concurrency/preemption.cfa
- Timestamp:
- Jan 7, 2021, 2:55:57 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58fe85a
- Parents:
- bdfc032 (diff), 44e37ef (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/preemption.cfa
rbdfc032 reef8dfb 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Dec 5 16:34:05 201913 // Update Count : 4312 // Last Modified On : Fri Nov 6 07:42:13 2020 13 // Update Count : 54 14 14 // 15 15 … … 19 19 #include <assert.h> 20 20 21 extern "C" {22 21 #include <errno.h> 23 22 #include <stdio.h> … … 25 24 #include <unistd.h> 26 25 #include <limits.h> // PTHREAD_STACK_MIN 27 }28 26 29 27 #include "bits/signal.hfa" 28 #include "kernel_private.hfa" 30 29 31 30 #if !defined(__CFA_DEFAULT_PREEMPTION__) … … 39 38 // FwdDeclarations : timeout handlers 40 39 static void preempt( processor * this ); 41 static void timeout( thread_desc* this );40 static void timeout( $thread * this ); 42 41 43 42 // FwdDeclarations : Signal handlers 44 43 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 44 static void sigHandler_alarm ( __CFA_SIGPARMS__ ); 45 45 static void sigHandler_segv ( __CFA_SIGPARMS__ ); 46 46 static void sigHandler_ill ( __CFA_SIGPARMS__ ); … … 56 56 #elif defined( __x86_64 ) 57 57 #define CFA_REG_IP gregs[REG_RIP] 58 #elif defined( __ ARM_ARCH)58 #elif defined( __arm__ ) 59 59 #define CFA_REG_IP arm_pc 60 #elif defined( __aarch64__ ) 61 #define CFA_REG_IP pc 60 62 #else 61 #error un knownhardware architecture63 #error unsupported hardware architecture 62 64 #endif 63 65 … … 83 85 // Get next expired node 84 86 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 85 if( ! alarms->head) return 0p; // If no alarms return null86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null87 if( ! & (*alarms)`first ) return 0p; // If no alarms return null 88 if( (*alarms)`first.alarm >= currtime ) return 0p; // If alarms head not expired return null 87 89 return pop(alarms); // Otherwise just pop head 88 90 } 89 91 90 92 // Tick one frame of the Discrete Event Simulation for alarms 91 static void tick_preemption( ) {93 static void tick_preemption(void) { 92 94 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 95 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 97 99 while( node = get_expired( alarms, currtime ) ) { 98 100 // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" ); 101 Duration period = node->period; 102 if( period == 0) { 103 node->set = false; // Node is one-shot, just mark it as not pending 104 } 99 105 100 106 // Check if this is a kernel 101 if( node-> kernel_alarm) {107 if( node->type == Kernel ) { 102 108 preempt( node->proc ); 103 109 } 110 else if( node->type == User ) { 111 timeout( node->thrd ); 112 } 104 113 else { 105 timeout( node->thrd);114 node->callback(*node); 106 115 } 107 116 108 117 // Check if this is a periodic alarm 109 Duration period = node->period;110 118 if( period > 0 ) { 111 119 // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv ); … … 113 121 insert( alarms, node ); // Reinsert the node for the next time it triggers 114 122 } 115 else {116 node->set = false; // Node is one-shot, just mark it as not pending117 }118 123 } 119 124 120 125 // If there are still alarms pending, reset the timer 121 if( alarms->head) {122 __cfa abi_dbg_print_buffer_decl(" KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);123 Duration delta = alarms->head->alarm - currtime;124 Duration cap ed = max(delta, 50`us);126 if( & (*alarms)`first ) { 127 __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 128 Duration delta = (*alarms)`first.alarm - currtime; 129 Duration capped = max(delta, 50`us); 125 130 // itimerval tim = { caped }; 126 131 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec); 127 132 128 __kernel_set_timer( cap ed );133 __kernel_set_timer( capped ); 129 134 } 130 135 } … … 158 163 // Kernel Signal Tools 159 164 //============================================================================================= 160 161 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 165 // In a user-level threading system, there are handful of thread-local variables where this problem occurs on the ARM. 166 // 167 // For each kernel thread running user-level threads, there is a flag variable to indicate if interrupts are 168 // enabled/disabled for that kernel thread. Therefore, this variable is made thread local. 169 // 170 // For example, this code fragment sets the state of the "interrupt" variable in thread-local memory. 171 // 172 // _Thread_local volatile int interrupts; 173 // int main() { 174 // interrupts = 0; // disable interrupts } 175 // 176 // which generates the following code on the ARM 177 // 178 // (gdb) disassemble main 179 // Dump of assembler code for function main: 180 // 0x0000000000000610 <+0>: mrs x1, tpidr_el0 181 // 0x0000000000000614 <+4>: mov w0, #0x0 // #0 182 // 0x0000000000000618 <+8>: add x1, x1, #0x0, lsl #12 183 // 0x000000000000061c <+12>: add x1, x1, #0x10 184 // 0x0000000000000620 <+16>: str wzr, [x1] 185 // 0x0000000000000624 <+20>: ret 186 // 187 // The mrs moves a pointer from coprocessor register tpidr_el0 into register x1. Register w0 is set to 0. The two adds 188 // increase the TLS pointer with the displacement (offset) 0x10, which is the location in the TSL of variable 189 // "interrupts". Finally, 0 is stored into "interrupts" through the pointer in register x1 that points into the 190 // TSL. Now once x1 has the pointer to the location of the TSL for kernel thread N, it can be be preempted at a 191 // user-level and the user thread is put on the user-level ready-queue. When the preempted thread gets to the front of 192 // the user-level ready-queue it is run on kernel thread M. It now stores 0 into "interrupts" back on kernel thread N, 193 // turning off interrupt on the wrong kernel thread. 194 // 195 // On the x86, the following code is generated for the same code fragment. 196 // 197 // (gdb) disassemble main 198 // Dump of assembler code for function main: 199 // 0x0000000000400420 <+0>: movl $0x0,%fs:0xfffffffffffffffc 200 // 0x000000000040042c <+12>: xor %eax,%eax 201 // 0x000000000040042e <+14>: retq 202 // 203 // and there is base-displacement addressing used to atomically reset variable "interrupts" off of the TSL pointer in 204 // register "fs". 205 // 206 // Hence, the ARM has base-displacement address for the general purpose registers, BUT not to the coprocessor 207 // registers. As a result, generating the address for the write into variable "interrupts" is no longer atomic. 208 // 209 // Note this problem does NOT occur when just using multiple kernel threads because the preemption ALWAYS restarts the 210 // thread on the same kernel thread. 211 // 212 // The obvious question is why does ARM use a coprocessor register to store the TSL pointer given that coprocessor 213 // registers are second-class registers with respect to the instruction set. One possible answer is that they did not 214 // want to dedicate one of the general registers to hold the TLS pointer and there was a free coprocessor register 215 // available. 216 217 //----------------------------------------------------------------------------- 218 // Some assembly required 219 #define __cfaasm_label(label, when) when: asm volatile goto(".global __cfaasm_" #label "_" #when "\n" "__cfaasm_" #label "_" #when ":":::"memory":when) 220 221 //---------- 222 // special case for preemption since used often 223 bool __preemption_enabled() { 224 // create a assembler label before 225 // marked as clobber all to avoid movement 226 __cfaasm_label(check, before); 227 228 // access tls as normal 229 bool enabled = __cfaabi_tls.preemption_state.enabled; 230 231 // create a assembler label after 232 // marked as clobber all to avoid movement 233 __cfaasm_label(check, after); 234 return enabled; 235 } 236 237 struct asm_region { 238 void * before; 239 void * after; 240 }; 241 242 static inline bool __cfaasm_in( void * ip, struct asm_region & region ) { 243 return ip >= region.before && ip <= region.after; 244 } 245 246 247 //---------- 248 // Get data from the TLS block 249 // struct asm_region __cfaasm_get; 250 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems 251 uintptr_t __cfatls_get( unsigned long int offset ) { 252 // create a assembler label before 253 // marked as clobber all to avoid movement 254 __cfaasm_label(get, before); 255 256 // access tls as normal (except for pointer arithmetic) 257 uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset); 258 259 // create a assembler label after 260 // marked as clobber all to avoid movement 261 __cfaasm_label(get, after); 262 return val; 263 } 162 264 163 265 extern "C" { 164 266 // Disable interrupts by incrementing the counter 165 267 void disable_interrupts() { 166 with( kernelTLS.preemption_state ) { 268 // create a assembler label before 269 // marked as clobber all to avoid movement 270 __cfaasm_label(dsable, before); 271 272 with( __cfaabi_tls.preemption_state ) { 167 273 #if GCC_VERSION > 50000 168 274 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); … … 181 287 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 182 288 } 289 290 // create a assembler label after 291 // marked as clobber all to avoid movement 292 __cfaasm_label(dsable, after); 293 183 294 } 184 295 185 296 // Enable interrupts by decrementing the counter 186 // If counter reaches 0, execute any pending CtxSwitch297 // If counter reaches 0, execute any pending __cfactx_switch 187 298 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 189 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store 190 191 with( kernelTLS.preemption_state ){ 299 // Cache the processor now since interrupts can start happening after the atomic store 300 processor * proc = __cfaabi_tls.this_processor; 301 /* paranoid */ verify( proc ); 302 303 with( __cfaabi_tls.preemption_state ){ 192 304 unsigned short prev = disable_count; 193 305 disable_count -= 1; 194 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 306 307 // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 308 /* paranoid */ verify( prev != 0u ); 195 309 196 310 // Check if we need to prempt the thread because an interrupt was missed 197 311 if( prev == 1 ) { 198 312 #if GCC_VERSION > 50000 199 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");313 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 200 314 #endif 201 315 … … 209 323 if( proc->pending_preemption ) { 210 324 proc->pending_preemption = false; 211 BlockInternal( thrd);325 force_yield( __POLL_PREEMPTION ); 212 326 } 213 327 } … … 219 333 220 334 // Disable interrupts by incrementint the counter 221 // Don't execute any pending CtxSwitch even if counter reaches 0335 // Don't execute any pending __cfactx_switch even if counter reaches 0 222 336 void enable_interrupts_noPoll() { 223 unsigned short prev = kernelTLS.preemption_state.disable_count; 224 kernelTLS.preemption_state.disable_count -= 1; 225 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 337 unsigned short prev = __cfaabi_tls.preemption_state.disable_count; 338 __cfaabi_tls.preemption_state.disable_count -= 1; 339 // If this triggers someone is enabled already enabled interrupts 340 /* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev ); 226 341 if( prev == 1 ) { 227 342 #if GCC_VERSION > 50000 228 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");343 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free"); 229 344 #endif 230 345 // Set enabled flag to true 231 346 // should be atomic to avoid preemption in the middle of the operation. 232 347 // use memory order RELAXED since there is no inter-thread on this variable requirements 233 __atomic_store_n(& kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);348 __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED); 234 349 235 350 // Signal the compiler that a fence is needed but only for signal handlers … … 238 353 } 239 354 } 355 356 //----------------------------------------------------------------------------- 357 // Kernel Signal Debug 358 void __cfaabi_check_preemption() { 359 bool ready = __preemption_enabled(); 360 if(!ready) { abort("Preemption should be ready"); } 361 362 __cfaasm_label(debug, before); 363 364 sigset_t oldset; 365 int ret; 366 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 367 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 368 369 ret = sigismember(&oldset, SIGUSR1); 370 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 371 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 372 373 ret = sigismember(&oldset, SIGALRM); 374 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 375 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 376 377 ret = sigismember(&oldset, SIGTERM); 378 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 379 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 380 381 __cfaasm_label(debug, after); 382 } 383 384 #ifdef __CFA_WITH_VERIFY__ 385 bool __cfaabi_dbg_in_kernel() { 386 return !__preemption_enabled(); 387 } 388 #endif 389 390 #undef __cfaasm_label 391 392 //----------------------------------------------------------------------------- 393 // Signal handling 240 394 241 395 // sigprocmask wrapper : unblock a single signal … … 257 411 258 412 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 259 413 abort( "internal error, pthread_sigmask" ); 260 414 } 261 415 } … … 268 422 269 423 // reserved for future use 270 static void timeout( thread_desc * this ) { 271 //TODO : implement waking threads 272 } 424 static void timeout( $thread * this ) { 425 unpark( this ); 426 } 427 428 //----------------------------------------------------------------------------- 429 // Some assembly required 430 #if defined( __i386 ) 431 #ifdef __PIC__ 432 #define RELOC_PRELUDE( label ) \ 433 "calll .Lcfaasm_prelude_" #label "$pb\n\t" \ 434 ".Lcfaasm_prelude_" #label "$pb:\n\t" \ 435 "popl %%eax\n\t" \ 436 ".Lcfaasm_prelude_" #label "_end:\n\t" \ 437 "addl $_GLOBAL_OFFSET_TABLE_+(.Lcfaasm_prelude_" #label "_end-.Lcfaasm_prelude_" #label "$pb), %%eax\n\t" 438 #define RELOC_PREFIX "" 439 #define RELOC_SUFFIX "@GOT(%%eax)" 440 #else 441 #define RELOC_PREFIX "$" 442 #define RELOC_SUFFIX "" 443 #endif 444 #define __cfaasm_label( label ) struct asm_region label = \ 445 ({ \ 446 struct asm_region region; \ 447 asm( \ 448 RELOC_PRELUDE( label ) \ 449 "movl " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 450 "movl " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 451 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 452 ); \ 453 region; \ 454 }); 455 #elif defined( __x86_64 ) 456 #ifdef __PIC__ 457 #define RELOC_PREFIX "" 458 #define RELOC_SUFFIX "@GOTPCREL(%%rip)" 459 #else 460 #define RELOC_PREFIX "$" 461 #define RELOC_SUFFIX "" 462 #endif 463 #define __cfaasm_label( label ) struct asm_region label = \ 464 ({ \ 465 struct asm_region region; \ 466 asm( \ 467 "movq " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 468 "movq " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 469 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 470 ); \ 471 region; \ 472 }); 473 #elif defined( __aarch64__ ) 474 #ifdef __PIC__ 475 // Note that this works only for gcc 476 #define __cfaasm_label( label ) struct asm_region label = \ 477 ({ \ 478 struct asm_region region; \ 479 asm( \ 480 "adrp %[vb], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 481 "ldr %[vb], [%[vb], #:gotpage_lo15:__cfaasm_" #label "_before]" "\n\t" \ 482 "adrp %[va], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 483 "ldr %[va], [%[va], #:gotpage_lo15:__cfaasm_" #label "_after]" "\n\t" \ 484 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 485 ); \ 486 region; \ 487 }); 488 #else 489 #error this is not the right thing to do 490 /* 491 #define __cfaasm_label( label ) struct asm_region label = \ 492 ({ \ 493 struct asm_region region; \ 494 asm( \ 495 "adrp %[vb], __cfaasm_" #label "_before" "\n\t" \ 496 "add %[vb], %[vb], :lo12:__cfaasm_" #label "_before" "\n\t" \ 497 "adrp %[va], :got:__cfaasm_" #label "_after" "\n\t" \ 498 "add %[va], %[va], :lo12:__cfaasm_" #label "_after" "\n\t" \ 499 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 500 ); \ 501 region; \ 502 }); 503 */ 504 #endif 505 #else 506 #error unknown hardware architecture 507 #endif 273 508 274 509 // KERNEL ONLY 275 // Check if a CtxSwitch signal handler shoud defer510 // Check if a __cfactx_switch signal handler shoud defer 276 511 // If true : preemption is safe 277 512 // If false : preemption is unsafe and marked as pending 278 static inline bool preemption_ready() { 513 static inline bool preemption_ready( void * ip ) { 514 // Get all the region for which it is not safe to preempt 515 __cfaasm_label( get ); 516 __cfaasm_label( check ); 517 __cfaasm_label( dsable ); 518 __cfaasm_label( debug ); 519 279 520 // Check if preemption is safe 280 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 281 521 bool ready = true; 522 if( __cfaasm_in( ip, get ) ) { ready = false; goto EXIT; }; 523 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 524 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 525 if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 526 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 527 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; 528 529 EXIT: 282 530 // Adjust the pending flag accordingly 283 kernelTLS.this_processor->pending_preemption = !ready;531 __cfaabi_tls.this_processor->pending_preemption = !ready; 284 532 return ready; 285 533 } … … 291 539 // Startup routine to activate preemption 292 540 // Called from kernel_startup 293 void kernel_start_preemption() {541 void __kernel_alarm_startup() { 294 542 __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" ); 295 543 296 544 // Start with preemption disabled until ready 297 kernelTLS.preemption_state.enabled = false;298 kernelTLS.preemption_state.disable_count = 1;545 __cfaabi_tls.preemption_state.enabled = false; 546 __cfaabi_tls.preemption_state.disable_count = 1; 299 547 300 548 // Initialize the event kernel … … 303 551 304 552 // Setup proper signal handlers 305 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler 553 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 554 __cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO | SA_RESTART ); // debug handler 306 555 307 556 signal_block( SIGALRM ); 308 557 309 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );558 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 310 559 } 311 560 312 561 // Shutdown routine to deactivate preemption 313 562 // Called from kernel_shutdown 314 void kernel_stop_preemption() {563 void __kernel_alarm_shutdown() { 315 564 __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" ); 316 565 … … 326 575 // Wait for the preemption thread to finish 327 576 328 pthread_join( alarm_thread, 0p ); 329 free( alarm_stack ); 577 __destroy_pthread( alarm_thread, alarm_stack, 0p ); 330 578 331 579 // Preemption is now fully stopped … … 353 601 // Kernel Signal Handlers 354 602 //============================================================================================= 603 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 355 604 356 605 // Context switch signal handler 357 606 // Receives SIGUSR1 signal and causes the current thread to yield 358 607 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 359 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); ) 608 void * ip = (void *)(cxt->uc_mcontext.CFA_REG_IP); 609 __cfaabi_dbg_debug_do( last_interrupt = ip; ) 360 610 361 611 // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it, 362 612 // the interrupt that is supposed to force the kernel thread to preempt might arrive 363 // before the kernel thread has even started running. When that happens an iterrupt364 // w ea null 'this_processor' will be caught, just ignore it.365 if(! kernelTLS.this_processor ) return;613 // before the kernel thread has even started running. When that happens, an interrupt 614 // with a null 'this_processor' will be caught, just ignore it. 615 if(! __cfaabi_tls.this_processor ) return; 366 616 367 617 choose(sfp->si_value.sival_int) { 368 618 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 369 case PREEMPT_TERMINATE: verify( __atomic_load_n( & kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );619 case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 370 620 default: 371 621 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 373 623 374 624 // Check if it is safe to preempt here 375 if( !preemption_ready( ) ) { return; }376 377 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );625 if( !preemption_ready( ip ) ) { return; } 626 627 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 378 628 379 629 // Sync flag : prevent recursive calls to the signal handler 380 kernelTLS.preemption_state.in_progress = true;630 __cfaabi_tls.preemption_state.in_progress = true; 381 631 382 632 // Clear sighandler mask before context switching. … … 388 638 } 389 639 390 // TODO: this should go in finish action391 640 // Clear the in progress flag 392 kernelTLS.preemption_state.in_progress = false;641 __cfaabi_tls.preemption_state.in_progress = false; 393 642 394 643 // Preemption can occur here 395 644 396 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 397 } 645 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 646 } 647 648 static void sigHandler_alarm( __CFA_SIGPARMS__ ) { 649 abort("SIGALRM should never reach the signal handler"); 650 } 651 652 #if !defined(__CFA_NO_STATISTICS__) 653 int __print_alarm_stats = 0; 654 #endif 398 655 399 656 // Main of the alarm thread 400 657 // Waits on SIGALRM and send SIGUSR1 to whom ever needs it 401 658 static void * alarm_loop( __attribute__((unused)) void * args ) { 659 __processor_id_t id; 660 id.full_proc = false; 661 id.id = doregister(&id); 662 __cfaabi_tls.this_proc_id = &id; 663 664 #if !defined(__CFA_NO_STATISTICS__) 665 struct __stats_t local_stats; 666 __cfaabi_tls.this_stats = &local_stats; 667 __init_stats( &local_stats ); 668 #endif 669 402 670 // Block sigalrms to control when they arrive 403 671 sigset_t mask; … … 457 725 EXIT: 458 726 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 727 unregister(&id); 728 729 #if !defined(__CFA_NO_STATISTICS__) 730 if( 0 != __print_alarm_stats ) { 731 __print_stats( &local_stats, __print_alarm_stats, "Alarm", "Thread", 0p ); 732 } 733 #endif 459 734 return 0p; 460 735 } 461 462 //=============================================================================================463 // Kernel Signal Debug464 //=============================================================================================465 466 void __cfaabi_check_preemption() {467 bool ready = kernelTLS.preemption_state.enabled;468 if(!ready) { abort("Preemption should be ready"); }469 470 sigset_t oldset;471 int ret;472 ret = pthread_sigmask(0, 0p, &oldset);473 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }474 475 ret = sigismember(&oldset, SIGUSR1);476 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }477 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }478 479 ret = sigismember(&oldset, SIGALRM);480 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }481 if(ret == 0) { abort("ERROR SIGALRM is enabled"); }482 483 ret = sigismember(&oldset, SIGTERM);484 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }485 if(ret == 1) { abort("ERROR SIGTERM is disabled"); }486 }487 488 #ifdef __CFA_WITH_VERIFY__489 bool __cfaabi_dbg_in_kernel() {490 return !kernelTLS.preemption_state.enabled;491 }492 #endif493 736 494 737 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.