Changeset 58fe85a for libcfa/src/concurrency/preemption.cfa
- Timestamp:
- Jan 7, 2021, 3:27:00 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 2b4daf2, 64aeca0
- Parents:
- 3c64c668 (diff), eef8dfb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/preemption.cfa
r3c64c668 r58fe85a 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Dec 5 16:34:05 201913 // Update Count : 4312 // Last Modified On : Fri Nov 6 07:42:13 2020 13 // Update Count : 54 14 14 // 15 15 … … 19 19 #include <assert.h> 20 20 21 extern "C" {22 21 #include <errno.h> 23 22 #include <stdio.h> … … 25 24 #include <unistd.h> 26 25 #include <limits.h> // PTHREAD_STACK_MIN 27 }28 26 29 27 #include "bits/signal.hfa" 28 #include "kernel_private.hfa" 30 29 31 30 #if !defined(__CFA_DEFAULT_PREEMPTION__) … … 43 42 // FwdDeclarations : Signal handlers 44 43 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 44 static void sigHandler_alarm ( __CFA_SIGPARMS__ ); 45 45 static void sigHandler_segv ( __CFA_SIGPARMS__ ); 46 46 static void sigHandler_ill ( __CFA_SIGPARMS__ ); … … 56 56 #elif defined( __x86_64 ) 57 57 #define CFA_REG_IP gregs[REG_RIP] 58 #elif defined( __ ARM_ARCH)58 #elif defined( __arm__ ) 59 59 #define CFA_REG_IP arm_pc 60 #elif defined( __aarch64__ ) 61 #define CFA_REG_IP pc 60 62 #else 61 #error un knownhardware architecture63 #error unsupported hardware architecture 62 64 #endif 63 65 … … 83 85 // Get next expired node 84 86 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 85 if( ! alarms->head) return 0p; // If no alarms return null86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null87 if( ! & (*alarms)`first ) return 0p; // If no alarms return null 88 if( (*alarms)`first.alarm >= currtime ) return 0p; // If alarms head not expired return null 87 89 return pop(alarms); // Otherwise just pop head 88 90 } 89 91 90 92 // Tick one frame of the Discrete Event Simulation for alarms 91 static void tick_preemption( ) {93 static void tick_preemption(void) { 92 94 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 95 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 97 99 while( node = get_expired( alarms, currtime ) ) { 98 100 // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" ); 101 Duration period = node->period; 102 if( period == 0) { 103 node->set = false; // Node is one-shot, just mark it as not pending 104 } 99 105 100 106 // Check if this is a kernel 101 if( node-> kernel_alarm) {107 if( node->type == Kernel ) { 102 108 preempt( node->proc ); 103 109 } 110 else if( node->type == User ) { 111 timeout( node->thrd ); 112 } 104 113 else { 105 timeout( node->thrd);114 node->callback(*node); 106 115 } 107 116 108 117 // Check if this is a periodic alarm 109 Duration period = node->period;110 118 if( period > 0 ) { 111 119 // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv ); … … 113 121 insert( alarms, node ); // Reinsert the node for the next time it triggers 114 122 } 115 else {116 node->set = false; // Node is one-shot, just mark it as not pending117 }118 123 } 119 124 120 125 // If there are still alarms pending, reset the timer 121 if( alarms->head) {122 __cfa abi_dbg_print_buffer_decl(" KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);123 Duration delta = alarms->head->alarm - currtime;124 Duration cap ed = max(delta, 50`us);126 if( & (*alarms)`first ) { 127 __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 128 Duration delta = (*alarms)`first.alarm - currtime; 129 Duration capped = max(delta, 50`us); 125 130 // itimerval tim = { caped }; 126 131 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec); 127 132 128 __kernel_set_timer( cap ed );133 __kernel_set_timer( capped ); 129 134 } 130 135 } … … 158 163 // Kernel Signal Tools 159 164 //============================================================================================= 160 161 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 165 // In a user-level threading system, there are handful of thread-local variables where this problem occurs on the ARM. 166 // 167 // For each kernel thread running user-level threads, there is a flag variable to indicate if interrupts are 168 // enabled/disabled for that kernel thread. Therefore, this variable is made thread local. 169 // 170 // For example, this code fragment sets the state of the "interrupt" variable in thread-local memory. 171 // 172 // _Thread_local volatile int interrupts; 173 // int main() { 174 // interrupts = 0; // disable interrupts } 175 // 176 // which generates the following code on the ARM 177 // 178 // (gdb) disassemble main 179 // Dump of assembler code for function main: 180 // 0x0000000000000610 <+0>: mrs x1, tpidr_el0 181 // 0x0000000000000614 <+4>: mov w0, #0x0 // #0 182 // 0x0000000000000618 <+8>: add x1, x1, #0x0, lsl #12 183 // 0x000000000000061c <+12>: add x1, x1, #0x10 184 // 0x0000000000000620 <+16>: str wzr, [x1] 185 // 0x0000000000000624 <+20>: ret 186 // 187 // The mrs moves a pointer from coprocessor register tpidr_el0 into register x1. Register w0 is set to 0. The two adds 188 // increase the TLS pointer with the displacement (offset) 0x10, which is the location in the TSL of variable 189 // "interrupts". Finally, 0 is stored into "interrupts" through the pointer in register x1 that points into the 190 // TSL. Now once x1 has the pointer to the location of the TSL for kernel thread N, it can be be preempted at a 191 // user-level and the user thread is put on the user-level ready-queue. When the preempted thread gets to the front of 192 // the user-level ready-queue it is run on kernel thread M. It now stores 0 into "interrupts" back on kernel thread N, 193 // turning off interrupt on the wrong kernel thread. 194 // 195 // On the x86, the following code is generated for the same code fragment. 196 // 197 // (gdb) disassemble main 198 // Dump of assembler code for function main: 199 // 0x0000000000400420 <+0>: movl $0x0,%fs:0xfffffffffffffffc 200 // 0x000000000040042c <+12>: xor %eax,%eax 201 // 0x000000000040042e <+14>: retq 202 // 203 // and there is base-displacement addressing used to atomically reset variable "interrupts" off of the TSL pointer in 204 // register "fs". 205 // 206 // Hence, the ARM has base-displacement address for the general purpose registers, BUT not to the coprocessor 207 // registers. As a result, generating the address for the write into variable "interrupts" is no longer atomic. 208 // 209 // Note this problem does NOT occur when just using multiple kernel threads because the preemption ALWAYS restarts the 210 // thread on the same kernel thread. 211 // 212 // The obvious question is why does ARM use a coprocessor register to store the TSL pointer given that coprocessor 213 // registers are second-class registers with respect to the instruction set. One possible answer is that they did not 214 // want to dedicate one of the general registers to hold the TLS pointer and there was a free coprocessor register 215 // available. 216 217 //----------------------------------------------------------------------------- 218 // Some assembly required 219 #define __cfaasm_label(label, when) when: asm volatile goto(".global __cfaasm_" #label "_" #when "\n" "__cfaasm_" #label "_" #when ":":::"memory":when) 220 221 //---------- 222 // special case for preemption since used often 223 bool __preemption_enabled() { 224 // create a assembler label before 225 // marked as clobber all to avoid movement 226 __cfaasm_label(check, before); 227 228 // access tls as normal 229 bool enabled = __cfaabi_tls.preemption_state.enabled; 230 231 // create a assembler label after 232 // marked as clobber all to avoid movement 233 __cfaasm_label(check, after); 234 return enabled; 235 } 236 237 struct asm_region { 238 void * before; 239 void * after; 240 }; 241 242 static inline bool __cfaasm_in( void * ip, struct asm_region & region ) { 243 return ip >= region.before && ip <= region.after; 244 } 245 246 247 //---------- 248 // Get data from the TLS block 249 // struct asm_region __cfaasm_get; 250 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems 251 uintptr_t __cfatls_get( unsigned long int offset ) { 252 // create a assembler label before 253 // marked as clobber all to avoid movement 254 __cfaasm_label(get, before); 255 256 // access tls as normal (except for pointer arithmetic) 257 uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset); 258 259 // create a assembler label after 260 // marked as clobber all to avoid movement 261 __cfaasm_label(get, after); 262 return val; 263 } 162 264 163 265 extern "C" { 164 266 // Disable interrupts by incrementing the counter 165 267 void disable_interrupts() { 166 with( kernelTLS.preemption_state ) { 268 // create a assembler label before 269 // marked as clobber all to avoid movement 270 __cfaasm_label(dsable, before); 271 272 with( __cfaabi_tls.preemption_state ) { 167 273 #if GCC_VERSION > 50000 168 274 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); … … 181 287 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 182 288 } 289 290 // create a assembler label after 291 // marked as clobber all to avoid movement 292 __cfaasm_label(dsable, after); 293 183 294 } 184 295 … … 186 297 // If counter reaches 0, execute any pending __cfactx_switch 187 298 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 189 190 with( kernelTLS.preemption_state ){ 299 // Cache the processor now since interrupts can start happening after the atomic store 300 processor * proc = __cfaabi_tls.this_processor; 301 /* paranoid */ verify( proc ); 302 303 with( __cfaabi_tls.preemption_state ){ 191 304 unsigned short prev = disable_count; 192 305 disable_count -= 1; 193 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 306 307 // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 308 /* paranoid */ verify( prev != 0u ); 194 309 195 310 // Check if we need to prempt the thread because an interrupt was missed 196 311 if( prev == 1 ) { 197 312 #if GCC_VERSION > 50000 198 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");313 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 199 314 #endif 200 315 … … 220 335 // Don't execute any pending __cfactx_switch even if counter reaches 0 221 336 void enable_interrupts_noPoll() { 222 unsigned short prev = kernelTLS.preemption_state.disable_count; 223 kernelTLS.preemption_state.disable_count -= 1; 224 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 337 unsigned short prev = __cfaabi_tls.preemption_state.disable_count; 338 __cfaabi_tls.preemption_state.disable_count -= 1; 339 // If this triggers someone is enabled already enabled interrupts 340 /* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev ); 225 341 if( prev == 1 ) { 226 342 #if GCC_VERSION > 50000 227 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");343 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free"); 228 344 #endif 229 345 // Set enabled flag to true 230 346 // should be atomic to avoid preemption in the middle of the operation. 231 347 // use memory order RELAXED since there is no inter-thread on this variable requirements 232 __atomic_store_n(& kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);348 __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED); 233 349 234 350 // Signal the compiler that a fence is needed but only for signal handlers … … 237 353 } 238 354 } 355 356 //----------------------------------------------------------------------------- 357 // Kernel Signal Debug 358 void __cfaabi_check_preemption() { 359 bool ready = __preemption_enabled(); 360 if(!ready) { abort("Preemption should be ready"); } 361 362 __cfaasm_label(debug, before); 363 364 sigset_t oldset; 365 int ret; 366 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 367 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 368 369 ret = sigismember(&oldset, SIGUSR1); 370 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 371 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 372 373 ret = sigismember(&oldset, SIGALRM); 374 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 375 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 376 377 ret = sigismember(&oldset, SIGTERM); 378 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 379 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 380 381 __cfaasm_label(debug, after); 382 } 383 384 #ifdef __CFA_WITH_VERIFY__ 385 bool __cfaabi_dbg_in_kernel() { 386 return !__preemption_enabled(); 387 } 388 #endif 389 390 #undef __cfaasm_label 391 392 //----------------------------------------------------------------------------- 393 // Signal handling 239 394 240 395 // sigprocmask wrapper : unblock a single signal … … 256 411 257 412 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 258 413 abort( "internal error, pthread_sigmask" ); 259 414 } 260 415 } … … 268 423 // reserved for future use 269 424 static void timeout( $thread * this ) { 270 //TODO : implement waking threads 271 } 425 unpark( this ); 426 } 427 428 //----------------------------------------------------------------------------- 429 // Some assembly required 430 #if defined( __i386 ) 431 #ifdef __PIC__ 432 #define RELOC_PRELUDE( label ) \ 433 "calll .Lcfaasm_prelude_" #label "$pb\n\t" \ 434 ".Lcfaasm_prelude_" #label "$pb:\n\t" \ 435 "popl %%eax\n\t" \ 436 ".Lcfaasm_prelude_" #label "_end:\n\t" \ 437 "addl $_GLOBAL_OFFSET_TABLE_+(.Lcfaasm_prelude_" #label "_end-.Lcfaasm_prelude_" #label "$pb), %%eax\n\t" 438 #define RELOC_PREFIX "" 439 #define RELOC_SUFFIX "@GOT(%%eax)" 440 #else 441 #define RELOC_PREFIX "$" 442 #define RELOC_SUFFIX "" 443 #endif 444 #define __cfaasm_label( label ) struct asm_region label = \ 445 ({ \ 446 struct asm_region region; \ 447 asm( \ 448 RELOC_PRELUDE( label ) \ 449 "movl " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 450 "movl " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 451 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 452 ); \ 453 region; \ 454 }); 455 #elif defined( __x86_64 ) 456 #ifdef __PIC__ 457 #define RELOC_PREFIX "" 458 #define RELOC_SUFFIX "@GOTPCREL(%%rip)" 459 #else 460 #define RELOC_PREFIX "$" 461 #define RELOC_SUFFIX "" 462 #endif 463 #define __cfaasm_label( label ) struct asm_region label = \ 464 ({ \ 465 struct asm_region region; \ 466 asm( \ 467 "movq " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 468 "movq " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 469 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 470 ); \ 471 region; \ 472 }); 473 #elif defined( __aarch64__ ) 474 #ifdef __PIC__ 475 // Note that this works only for gcc 476 #define __cfaasm_label( label ) struct asm_region label = \ 477 ({ \ 478 struct asm_region region; \ 479 asm( \ 480 "adrp %[vb], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 481 "ldr %[vb], [%[vb], #:gotpage_lo15:__cfaasm_" #label "_before]" "\n\t" \ 482 "adrp %[va], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 483 "ldr %[va], [%[va], #:gotpage_lo15:__cfaasm_" #label "_after]" "\n\t" \ 484 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 485 ); \ 486 region; \ 487 }); 488 #else 489 #error this is not the right thing to do 490 /* 491 #define __cfaasm_label( label ) struct asm_region label = \ 492 ({ \ 493 struct asm_region region; \ 494 asm( \ 495 "adrp %[vb], __cfaasm_" #label "_before" "\n\t" \ 496 "add %[vb], %[vb], :lo12:__cfaasm_" #label "_before" "\n\t" \ 497 "adrp %[va], :got:__cfaasm_" #label "_after" "\n\t" \ 498 "add %[va], %[va], :lo12:__cfaasm_" #label "_after" "\n\t" \ 499 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 500 ); \ 501 region; \ 502 }); 503 */ 504 #endif 505 #else 506 #error unknown hardware architecture 507 #endif 272 508 273 509 // KERNEL ONLY … … 275 511 // If true : preemption is safe 276 512 // If false : preemption is unsafe and marked as pending 277 static inline bool preemption_ready() { 513 static inline bool preemption_ready( void * ip ) { 514 // Get all the region for which it is not safe to preempt 515 __cfaasm_label( get ); 516 __cfaasm_label( check ); 517 __cfaasm_label( dsable ); 518 __cfaasm_label( debug ); 519 278 520 // Check if preemption is safe 279 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 280 521 bool ready = true; 522 if( __cfaasm_in( ip, get ) ) { ready = false; goto EXIT; }; 523 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 524 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 525 if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 526 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 527 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; 528 529 EXIT: 281 530 // Adjust the pending flag accordingly 282 kernelTLS.this_processor->pending_preemption = !ready;531 __cfaabi_tls.this_processor->pending_preemption = !ready; 283 532 return ready; 284 533 } … … 290 539 // Startup routine to activate preemption 291 540 // Called from kernel_startup 292 void kernel_start_preemption() {541 void __kernel_alarm_startup() { 293 542 __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" ); 294 543 295 544 // Start with preemption disabled until ready 296 kernelTLS.preemption_state.enabled = false;297 kernelTLS.preemption_state.disable_count = 1;545 __cfaabi_tls.preemption_state.enabled = false; 546 __cfaabi_tls.preemption_state.disable_count = 1; 298 547 299 548 // Initialize the event kernel … … 303 552 // Setup proper signal handlers 304 553 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 554 __cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO | SA_RESTART ); // debug handler 305 555 306 556 signal_block( SIGALRM ); … … 311 561 // Shutdown routine to deactivate preemption 312 562 // Called from kernel_shutdown 313 void kernel_stop_preemption() {563 void __kernel_alarm_shutdown() { 314 564 __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" ); 315 565 … … 325 575 // Wait for the preemption thread to finish 326 576 327 pthread_join( alarm_thread, 0p ); 328 free( alarm_stack ); 577 __destroy_pthread( alarm_thread, alarm_stack, 0p ); 329 578 330 579 // Preemption is now fully stopped … … 352 601 // Kernel Signal Handlers 353 602 //============================================================================================= 603 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 354 604 355 605 // Context switch signal handler 356 606 // Receives SIGUSR1 signal and causes the current thread to yield 357 607 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 358 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); ) 608 void * ip = (void *)(cxt->uc_mcontext.CFA_REG_IP); 609 __cfaabi_dbg_debug_do( last_interrupt = ip; ) 359 610 360 611 // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it, 361 612 // the interrupt that is supposed to force the kernel thread to preempt might arrive 362 // before the kernel thread has even started running. When that happens an iterrupt363 // w ea null 'this_processor' will be caught, just ignore it.364 if(! kernelTLS.this_processor ) return;613 // before the kernel thread has even started running. When that happens, an interrupt 614 // with a null 'this_processor' will be caught, just ignore it. 615 if(! __cfaabi_tls.this_processor ) return; 365 616 366 617 choose(sfp->si_value.sival_int) { 367 618 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 368 case PREEMPT_TERMINATE: verify( __atomic_load_n( & kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );619 case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 369 620 default: 370 621 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 372 623 373 624 // Check if it is safe to preempt here 374 if( !preemption_ready( ) ) { return; }375 376 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );625 if( !preemption_ready( ip ) ) { return; } 626 627 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 377 628 378 629 // Sync flag : prevent recursive calls to the signal handler 379 kernelTLS.preemption_state.in_progress = true;630 __cfaabi_tls.preemption_state.in_progress = true; 380 631 381 632 // Clear sighandler mask before context switching. … … 387 638 } 388 639 389 // TODO: this should go in finish action390 640 // Clear the in progress flag 391 kernelTLS.preemption_state.in_progress = false;641 __cfaabi_tls.preemption_state.in_progress = false; 392 642 393 643 // Preemption can occur here … … 395 645 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 396 646 } 647 648 static void sigHandler_alarm( __CFA_SIGPARMS__ ) { 649 abort("SIGALRM should never reach the signal handler"); 650 } 651 652 #if !defined(__CFA_NO_STATISTICS__) 653 int __print_alarm_stats = 0; 654 #endif 397 655 398 656 // Main of the alarm thread 399 657 // Waits on SIGALRM and send SIGUSR1 to whom ever needs it 400 658 static void * alarm_loop( __attribute__((unused)) void * args ) { 659 __processor_id_t id; 660 id.full_proc = false; 661 id.id = doregister(&id); 662 __cfaabi_tls.this_proc_id = &id; 663 664 #if !defined(__CFA_NO_STATISTICS__) 665 struct __stats_t local_stats; 666 __cfaabi_tls.this_stats = &local_stats; 667 __init_stats( &local_stats ); 668 #endif 669 401 670 // Block sigalrms to control when they arrive 402 671 sigset_t mask; … … 456 725 EXIT: 457 726 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 727 unregister(&id); 728 729 #if !defined(__CFA_NO_STATISTICS__) 730 if( 0 != __print_alarm_stats ) { 731 __print_stats( &local_stats, __print_alarm_stats, "Alarm", "Thread", 0p ); 732 } 733 #endif 458 734 return 0p; 459 735 } 460 461 //=============================================================================================462 // Kernel Signal Debug463 //=============================================================================================464 465 void __cfaabi_check_preemption() {466 bool ready = kernelTLS.preemption_state.enabled;467 if(!ready) { abort("Preemption should be ready"); }468 469 sigset_t oldset;470 int ret;471 ret = pthread_sigmask(0, 0p, &oldset);472 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }473 474 ret = sigismember(&oldset, SIGUSR1);475 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }476 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }477 478 ret = sigismember(&oldset, SIGALRM);479 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }480 if(ret == 0) { abort("ERROR SIGALRM is enabled"); }481 482 ret = sigismember(&oldset, SIGTERM);483 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }484 if(ret == 1) { abort("ERROR SIGTERM is disabled"); }485 }486 487 #ifdef __CFA_WITH_VERIFY__488 bool __cfaabi_dbg_in_kernel() {489 return !kernelTLS.preemption_state.enabled;490 }491 #endif492 736 493 737 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.