- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/preemption.cfa
rb443db0 r428adbc 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_ nopreempt libcfa_public {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 272 272 } 273 273 274 extern "C" {275 __attribute__((visibility("hidden"))) extern void * const __start_cfatext_nopreempt;276 __attribute__((visibility("hidden"))) extern void * const __stop_cfatext_nopreempt;277 278 extern const __cfa_nopreempt_region __libcfa_nopreempt;279 __attribute__((visibility("protected"))) const __cfa_nopreempt_region __libcfathrd_nopreempt @= {280 (void * const)&__start_cfatext_nopreempt,281 (void * const)&__stop_cfatext_nopreempt282 };283 }284 285 static inline bool __cfaabi_in( void * const ip, const struct __cfa_nopreempt_region & const region ) {286 return ip >= region.start && ip <= region.stop;287 }288 289 274 290 275 //---------- 291 276 // Get data from the TLS block 292 277 // struct asm_region __cfaasm_get; 293 uintptr_t __cfatls_get( unsigned long int offset ) libcfa_nopreempt libcfa_public; //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 294 279 uintptr_t __cfatls_get( unsigned long int offset ) { 295 280 // create a assembler label before … … 310 295 extern "C" { 311 296 // Disable interrupts by incrementing the counter 312 void disable_interrupts() libcfa_nopreemptlibcfa_public {297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 313 298 // create a assembler label before 314 299 // marked as clobber all to avoid movement … … 341 326 // Enable interrupts by decrementing the counter 342 327 // If counter reaches 0, execute any pending __cfactx_switch 343 void enable_interrupts( bool poll ) libcfa_ nopreempt libcfa_public {328 void enable_interrupts( bool poll ) libcfa_public { 344 329 // Cache the processor now since interrupts can start happening after the atomic store 345 330 processor * proc = __cfaabi_tls.this_processor; … … 373 358 } 374 359 } 375 376 // Check whether or not there is pending preemption377 // force_yield( __POLL_PREEMPTION ) if appropriate378 // return true if the thread was in an interruptable state379 // i.e. on a real processor and not in the kernel380 // (can return true even if no preemption was pending)381 bool poll_interrupts() libcfa_public {382 // Cache the processor now since interrupts can start happening after the atomic store383 processor * proc = publicTLS_get( this_processor );384 if ( ! proc ) return false;385 if ( ! __preemption_enabled() ) return false;386 387 with( __cfaabi_tls.preemption_state ){388 // Signal the compiler that a fence is needed but only for signal handlers389 __atomic_signal_fence(__ATOMIC_RELEASE);390 if( proc->pending_preemption ) {391 proc->pending_preemption = false;392 force_yield( __POLL_PREEMPTION );393 }394 }395 396 return true;397 }398 360 } 399 361 … … 406 368 sigset_t oldset; 407 369 int ret; 408 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary370 ret = real_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 409 371 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 410 372 … … 439 401 sigaddset( &mask, sig ); 440 402 441 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {403 if ( real_pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 442 404 abort( "internal error, pthread_sigmask" ); 443 405 } … … 450 412 sigaddset( &mask, sig ); 451 413 452 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {414 if ( real_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 453 415 abort( "internal error, pthread_sigmask" ); 454 416 } … … 458 420 static void preempt( processor * this ) { 459 421 sigval_t value = { PREEMPT_NORMAL }; 460 pthread_sigqueue( this->kernel_thread, SIGUSR1, value );422 real_pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 461 423 } 462 424 … … 469 431 sigset_t oldset; 470 432 int ret; 471 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary433 ret = real_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 472 434 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 473 435 … … 488 450 sigset_t oldset; 489 451 int ret; 490 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary452 ret = real_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 491 453 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 492 454 … … 501 463 502 464 //----------------------------------------------------------------------------- 465 // Some assembly required 466 #if defined( __i386 ) 467 #ifdef __PIC__ 468 #define RELOC_PRELUDE( label ) \ 469 "calll .Lcfaasm_prelude_" #label "$pb\n\t" \ 470 ".Lcfaasm_prelude_" #label "$pb:\n\t" \ 471 "popl %%eax\n\t" \ 472 ".Lcfaasm_prelude_" #label "_end:\n\t" \ 473 "addl $_GLOBAL_OFFSET_TABLE_+(.Lcfaasm_prelude_" #label "_end-.Lcfaasm_prelude_" #label "$pb), %%eax\n\t" 474 #define RELOC_PREFIX "" 475 #define RELOC_SUFFIX "@GOT(%%eax)" 476 #else 477 #define RELOC_PREFIX "$" 478 #define RELOC_SUFFIX "" 479 #endif 480 #define __cfaasm_label( label ) struct asm_region label = \ 481 ({ \ 482 struct asm_region region; \ 483 asm( \ 484 RELOC_PRELUDE( label ) \ 485 "movl " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 486 "movl " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 487 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 488 ); \ 489 region; \ 490 }); 491 #elif defined( __x86_64 ) 492 #ifdef __PIC__ 493 #define RELOC_PREFIX "" 494 #define RELOC_SUFFIX "@GOTPCREL(%%rip)" 495 #else 496 #define RELOC_PREFIX "$" 497 #define RELOC_SUFFIX "" 498 #endif 499 #define __cfaasm_label( label ) struct asm_region label = \ 500 ({ \ 501 struct asm_region region; \ 502 asm( \ 503 "movq " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \ 504 "movq " RELOC_PREFIX "__cfaasm_" #label "_after" RELOC_SUFFIX ", %[va]\n\t" \ 505 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 506 ); \ 507 region; \ 508 }); 509 #elif defined( __aarch64__ ) 510 #ifdef __PIC__ 511 // Note that this works only for gcc 512 #define __cfaasm_label( label ) struct asm_region label = \ 513 ({ \ 514 struct asm_region region; \ 515 asm( \ 516 "adrp %[vb], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 517 "ldr %[vb], [%[vb], #:gotpage_lo15:__cfaasm_" #label "_before]" "\n\t" \ 518 "adrp %[va], _GLOBAL_OFFSET_TABLE_" "\n\t" \ 519 "ldr %[va], [%[va], #:gotpage_lo15:__cfaasm_" #label "_after]" "\n\t" \ 520 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 521 ); \ 522 region; \ 523 }); 524 #else 525 #error this is not the right thing to do 526 /* 527 #define __cfaasm_label( label ) struct asm_region label = \ 528 ({ \ 529 struct asm_region region; \ 530 asm( \ 531 "adrp %[vb], __cfaasm_" #label "_before" "\n\t" \ 532 "add %[vb], %[vb], :lo12:__cfaasm_" #label "_before" "\n\t" \ 533 "adrp %[va], :got:__cfaasm_" #label "_after" "\n\t" \ 534 "add %[va], %[va], :lo12:__cfaasm_" #label "_after" "\n\t" \ 535 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 536 ); \ 537 region; \ 538 }); 539 */ 540 #endif 541 #else 542 #error unknown hardware architecture 543 #endif 544 503 545 // KERNEL ONLY 504 546 // Check if a __cfactx_switch signal handler shoud defer … … 506 548 // If false : preemption is unsafe and marked as pending 507 549 static inline bool preemption_ready( void * ip ) { 550 // Get all the region for which it is not safe to preempt 551 __cfaasm_label( get ); 552 __cfaasm_label( check ); 553 __cfaasm_label( dsable ); 554 // __cfaasm_label( debug ); 555 508 556 // Check if preemption is safe 509 557 bool ready = true; 510 if( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; }; 511 if( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; }; 512 558 if( __cfaasm_in( ip, get ) ) { ready = false; goto EXIT; }; 559 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 560 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 561 // if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 513 562 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 514 563 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; … … 559 608 sigval val; 560 609 val.sival_int = 0; 561 pthread_sigqueue( alarm_thread, SIGALRM, val );610 real_pthread_sigqueue( alarm_thread, SIGALRM, val ); 562 611 563 612 // Wait for the preemption thread to finish … … 594 643 // Kernel Signal Handlers 595 644 //============================================================================================= 596 __cfaabi_dbg_debug_do( static __threadvoid * last_interrupt = 0; )645 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 597 646 598 647 // Context switch signal handler … … 633 682 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 634 683 #endif 635 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) {684 if ( real_pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) { 636 685 abort( "internal error, sigprocmask" ); 637 686 } … … 661 710 sigset_t mask; 662 711 sigfillset(&mask); 663 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {712 if ( real_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 664 713 abort( "internal error, pthread_sigmask" ); 665 714 }
Note:
See TracChangeset
for help on using the changeset viewer.