Changeset cfff639 for libcfa/src/concurrency
- Timestamp:
- Apr 24, 2021, 7:45:02 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- fb0be05
- Parents:
- 89eff25 (diff), 254ad1b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/alarm.cfa
r89eff25 rcfff639 116 116 unlock( event_kernel->lock ); 117 117 this->set = true; 118 enable_interrupts( __cfaabi_dbg_ctx);118 enable_interrupts(); 119 119 } 120 120 … … 127 127 } 128 128 unlock( event_kernel->lock ); 129 enable_interrupts( __cfaabi_dbg_ctx);129 enable_interrupts(); 130 130 this->set = false; 131 131 } -
libcfa/src/concurrency/clib/cfathread.cfa
r89eff25 rcfff639 117 117 118 118 this_thrd->state = Ready; 119 enable_interrupts( __cfaabi_dbg_ctx);119 enable_interrupts(); 120 120 } 121 121 -
libcfa/src/concurrency/invoke.c
r89eff25 rcfff639 34 34 35 35 extern void disable_interrupts() OPTIONAL_THREAD; 36 extern void enable_interrupts( _ _cfaabi_dbg_ctx_param);36 extern void enable_interrupts( _Bool poll ); 37 37 38 38 void __cfactx_invoke_coroutine( … … 82 82 ) { 83 83 // Officially start the thread by enabling preemption 84 enable_interrupts( __cfaabi_dbg_ctx);84 enable_interrupts( true ); 85 85 86 86 // Call the main of the thread -
libcfa/src/concurrency/io.cfa
r89eff25 rcfff639 244 244 // Allocation was successful 245 245 __STATS__( true, io.alloc.fast += 1; ) 246 enable_interrupts( __cfaabi_dbg_ctx);246 enable_interrupts(); 247 247 248 248 __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd); … … 256 256 // Fast path failed, fallback on arbitration 257 257 __STATS__( true, io.alloc.slow += 1; ) 258 enable_interrupts( __cfaabi_dbg_ctx);258 enable_interrupts(); 259 259 260 260 $io_arbiter * ioarb = proc->cltr->io.arbiter; … … 314 314 // Mark the instance as no longer in-use, re-enable interrupts and return 315 315 __STATS__( true, io.submit.fast += 1; ) 316 enable_interrupts( __cfaabi_dbg_ctx);316 enable_interrupts(); 317 317 318 318 __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n"); … … 322 322 // Fast path failed, fallback on arbitration 323 323 __STATS__( true, io.submit.slow += 1; ) 324 enable_interrupts( __cfaabi_dbg_ctx);324 enable_interrupts(); 325 325 326 326 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n"); -
libcfa/src/concurrency/kernel.cfa
r89eff25 rcfff639 115 115 static $thread * __next_thread(cluster * this); 116 116 static $thread * __next_thread_slow(cluster * this); 117 static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1))); 117 118 static void __run_thread(processor * this, $thread * dst); 118 119 static void __wake_one(cluster * cltr); … … 130 131 extern void __disable_interrupts_hard(); 131 132 extern void __enable_interrupts_hard(); 133 134 static inline void __disable_interrupts_checked() { 135 /* paranoid */ verify( __preemption_enabled() ); 136 disable_interrupts(); 137 /* paranoid */ verify( ! __preemption_enabled() ); 138 } 139 140 static inline void __enable_interrupts_checked( bool poll = true ) { 141 /* paranoid */ verify( ! __preemption_enabled() ); 142 enable_interrupts( poll ); 143 /* paranoid */ verify( __preemption_enabled() ); 144 } 132 145 133 146 //============================================================================================= … … 452 465 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 453 466 // The thread was preempted, reschedule it and reset the flag 454 __schedule_thread( thrd_dst );467 schedule_thread$( thrd_dst ); 455 468 break RUNNING; 456 469 } … … 541 554 /* paranoid */ verify( ! __preemption_enabled() ); 542 555 /* paranoid */ verify( kernelTLS().this_proc_id ); 556 /* paranoid */ verify( ready_schedule_islocked()); 543 557 /* paranoid */ verify( thrd ); 544 558 /* paranoid */ verify( thrd->state != Halted ); … … 560 574 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 561 575 562 ready_schedule_lock(); 563 // push the thread to the cluster ready-queue 564 push( cl, thrd ); 565 566 // variable thrd is no longer safe to use 567 568 // wake the cluster using the save variable. 569 __wake_one( cl ); 570 ready_schedule_unlock(); 576 // push the thread to the cluster ready-queue 577 push( cl, thrd ); 578 579 // variable thrd is no longer safe to use 580 thrd = 0xdeaddeaddeaddeadp; 581 582 // wake the cluster using the save variable. 583 __wake_one( cl ); 571 584 572 585 #if !defined(__CFA_NO_STATISTICS__) … … 585 598 #endif 586 599 587 /* paranoid */ verify( ! __preemption_enabled() ); 600 /* paranoid */ verify( ready_schedule_islocked()); 601 /* paranoid */ verify( ! __preemption_enabled() ); 602 } 603 604 void schedule_thread$( $thread * thrd ) { 605 ready_schedule_lock(); 606 __schedule_thread( thrd ); 607 ready_schedule_unlock(); 588 608 } 589 609 … … 623 643 } 624 644 625 void unpark( $thread * thrd ) { 626 if( !thrd ) return; 627 645 static inline bool __must_unpark( $thread * thrd ) { 628 646 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 629 647 switch(old_ticket) { 630 648 case TICKET_RUNNING: 631 649 // Wake won the race, the thread will reschedule/rerun itself 632 break;650 return false; 633 651 case TICKET_BLOCKED: 634 652 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 635 653 /* paranoid */ verify( thrd->state == Blocked ); 636 637 { 638 /* paranoid */ verify( publicTLS_get(this_proc_id) ); 639 disable_interrupts(); 640 641 /* paranoid */ verify( ! __preemption_enabled() ); 642 643 // Wake lost the race, 644 __schedule_thread( thrd ); 645 646 /* paranoid */ verify( ! __preemption_enabled() ); 647 648 enable_interrupts_noPoll(); 649 /* paranoid */ verify( publicTLS_get(this_proc_id) ); 650 } 651 652 break; 654 return true; 653 655 default: 654 656 // This makes no sense, something is wrong abort … … 657 659 } 658 660 661 void unpark( $thread * thrd ) { 662 if( !thrd ) return; 663 664 if(__must_unpark(thrd)) { 665 disable_interrupts(); 666 // Wake lost the race, 667 schedule_thread$( thrd ); 668 enable_interrupts(false); 669 } 670 } 671 659 672 void park( void ) { 660 /* paranoid */ verify( __preemption_enabled() ); 661 disable_interrupts(); 662 /* paranoid */ verify( ! __preemption_enabled() ); 663 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); 664 665 returnToKernel(); 666 667 /* paranoid */ verify( ! __preemption_enabled() ); 668 enable_interrupts( __cfaabi_dbg_ctx ); 669 /* paranoid */ verify( __preemption_enabled() ); 673 __disable_interrupts_checked(); 674 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); 675 returnToKernel(); 676 __enable_interrupts_checked(); 670 677 671 678 } … … 707 714 // KERNEL ONLY 708 715 bool force_yield( __Preemption_Reason reason ) { 709 /* paranoid */ verify( __preemption_enabled() ); 710 disable_interrupts(); 711 /* paranoid */ verify( ! __preemption_enabled() ); 712 713 $thread * thrd = kernelTLS().this_thread; 714 /* paranoid */ verify(thrd->state == Active); 715 716 // SKULLDUGGERY: It is possible that we are preempting this thread just before 717 // it was going to park itself. If that is the case and it is already using the 718 // intrusive fields then we can't use them to preempt the thread 719 // If that is the case, abandon the preemption. 720 bool preempted = false; 721 if(thrd->link.next == 0p) { 722 preempted = true; 723 thrd->preempted = reason; 724 returnToKernel(); 725 } 726 727 /* paranoid */ verify( ! __preemption_enabled() ); 728 enable_interrupts_noPoll(); 729 /* paranoid */ verify( __preemption_enabled() ); 730 716 __disable_interrupts_checked(); 717 $thread * thrd = kernelTLS().this_thread; 718 /* paranoid */ verify(thrd->state == Active); 719 720 // SKULLDUGGERY: It is possible that we are preempting this thread just before 721 // it was going to park itself. If that is the case and it is already using the 722 // intrusive fields then we can't use them to preempt the thread 723 // If that is the case, abandon the preemption. 724 bool preempted = false; 725 if(thrd->link.next == 0p) { 726 preempted = true; 727 thrd->preempted = reason; 728 returnToKernel(); 729 } 730 __enable_interrupts_checked( false ); 731 731 return preempted; 732 732 } … … 773 773 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 774 774 775 disable_interrupts();775 __disable_interrupts_checked(); 776 776 /* paranoid */ verify( ! __preemption_enabled() ); 777 777 eventfd_t val; 778 778 val = 1; 779 779 eventfd_write( this->idle, val ); 780 enable_interrupts( __cfaabi_dbg_ctx);780 __enable_interrupts_checked(); 781 781 } 782 782 -
libcfa/src/concurrency/kernel/fwd.hfa
r89eff25 rcfff639 108 108 109 109 extern void disable_interrupts(); 110 extern void enable_interrupts_noPoll(); 111 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 110 extern void enable_interrupts( bool poll = false ); 112 111 113 112 extern "Cforall" { … … 403 402 __VA_ARGS__ \ 404 403 } \ 405 if( !(in_kernel) ) enable_interrupts( __cfaabi_dbg_ctx); \404 if( !(in_kernel) ) enable_interrupts(); \ 406 405 } 407 406 #else -
libcfa/src/concurrency/kernel/startup.cfa
r89eff25 rcfff639 225 225 // Add the main thread to the ready queue 226 226 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 227 __schedule_thread(mainThread);227 schedule_thread$(mainThread); 228 228 229 229 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX … … 238 238 239 239 /* paranoid */ verify( ! __preemption_enabled() ); 240 enable_interrupts( __cfaabi_dbg_ctx);240 enable_interrupts(); 241 241 /* paranoid */ verify( __preemption_enabled() ); 242 242 … … 532 532 disable_interrupts(); 533 533 init( this, name, _cltr, initT ); 534 enable_interrupts( __cfaabi_dbg_ctx);534 enable_interrupts(); 535 535 536 536 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); … … 559 559 disable_interrupts(); 560 560 deinit( this ); 561 enable_interrupts( __cfaabi_dbg_ctx);561 enable_interrupts(); 562 562 } 563 563 … … 597 597 // Unlock the RWlock 598 598 ready_mutate_unlock( last_size ); 599 enable_interrupts _noPoll(); // Don't poll, could be in main cluster599 enable_interrupts( false ); // Don't poll, could be in main cluster 600 600 } 601 601 … … 612 612 // Unlock the RWlock 613 613 ready_mutate_unlock( last_size ); 614 enable_interrupts _noPoll(); // Don't poll, could be in main cluster614 enable_interrupts( false ); // Don't poll, could be in main cluster 615 615 616 616 #if !defined(__CFA_NO_STATISTICS__) -
libcfa/src/concurrency/kernel_private.hfa
r89eff25 rcfff639 29 29 extern "C" { 30 30 void disable_interrupts() OPTIONAL_THREAD; 31 void enable_interrupts_noPoll(); 32 void enable_interrupts( __cfaabi_dbg_ctx_param ); 33 } 34 35 void __schedule_thread( $thread * ) 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull (1))) 38 #endif 39 ; 31 void enable_interrupts( bool poll = true ); 32 } 33 34 void schedule_thread$( $thread * ) __attribute__((nonnull (1))); 40 35 41 36 extern bool __preemption_enabled(); -
libcfa/src/concurrency/preemption.cfa
r89eff25 rcfff639 315 315 // Enable interrupts by decrementing the counter 316 316 // If counter reaches 0, execute any pending __cfactx_switch 317 void enable_interrupts( __cfaabi_dbg_ctx_param) {317 void enable_interrupts( bool poll ) { 318 318 // Cache the processor now since interrupts can start happening after the atomic store 319 319 processor * proc = __cfaabi_tls.this_processor; 320 /* paranoid */ verify( proc );320 /* paranoid */ verify( !poll || proc ); 321 321 322 322 with( __cfaabi_tls.preemption_state ){ … … 340 340 // Signal the compiler that a fence is needed but only for signal handlers 341 341 __atomic_signal_fence(__ATOMIC_RELEASE); 342 if( p roc->pending_preemption ) {342 if( poll && proc->pending_preemption ) { 343 343 proc->pending_preemption = false; 344 344 force_yield( __POLL_PREEMPTION ); 345 345 } 346 346 } 347 }348 349 // For debugging purposes : keep track of the last person to enable the interrupts350 __cfaabi_dbg_debug_do( proc->last_enable = caller; )351 }352 353 // Disable interrupts by incrementint the counter354 // Don't execute any pending __cfactx_switch even if counter reaches 0355 void enable_interrupts_noPoll() {356 unsigned short prev = __cfaabi_tls.preemption_state.disable_count;357 __cfaabi_tls.preemption_state.disable_count -= 1;358 // If this triggers someone is enabled already enabled interrupts359 /* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev );360 if( prev == 1 ) {361 #if GCC_VERSION > 50000362 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");363 #endif364 // Set enabled flag to true365 // should be atomic to avoid preemption in the middle of the operation.366 // use memory order RELAXED since there is no inter-thread on this variable requirements367 __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED);368 369 // Signal the compiler that a fence is needed but only for signal handlers370 __atomic_signal_fence(__ATOMIC_RELEASE);371 347 } 372 348 } -
libcfa/src/concurrency/thread.cfa
r89eff25 rcfff639 136 136 /* paranoid */ verify( this_thrd->context.SP ); 137 137 138 __schedule_thread( this_thrd );139 enable_interrupts( __cfaabi_dbg_ctx);138 schedule_thread$( this_thrd ); 139 enable_interrupts(); 140 140 } 141 141 … … 170 170 disable_interrupts(); 171 171 uint64_t ret = __tls_rand(); 172 enable_interrupts( __cfaabi_dbg_ctx);172 enable_interrupts(); 173 173 return ret; 174 174 }
Note: See TracChangeset
for help on using the changeset viewer.