- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rb93bf85 r6b33e89 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 9 08:42:05 202313 // Update Count : 7712 // Last Modified On : Fri Apr 25 07:02:42 2025 13 // Update Count : 82 14 14 // 15 15 … … 45 45 #pragma GCC diagnostic pop 46 46 47 #if ! defined(__CFA_NO_STATISTICS__)47 #if ! defined(__CFA_NO_STATISTICS__) 48 48 #define __STATS_DEF( ...) __VA_ARGS__ 49 49 #else … … 158 158 159 159 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 160 #if ! defined(__CFA_NO_STATISTICS__)161 if ( this->print_halts ) {160 #if ! defined(__CFA_NO_STATISTICS__) 161 if ( this->print_halts ) { 162 162 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); 163 163 } … … 169 169 170 170 // if we need to run some special setup, now is the time to do it. 171 if (this->init.thrd) {171 if (this->init.thrd) { 172 172 this->init.thrd->curr_cluster = this->cltr; 173 173 __run_thread(this, this->init.thrd); … … 185 185 readyThread = __next_thread( this->cltr ); 186 186 187 if ( !readyThread ) {187 if ( ! readyThread ) { 188 188 // there is no point in holding submissions if we are idle 189 189 __IO_STATS__(true, io.flush.idle++; ) … … 196 196 } 197 197 198 if ( !readyThread ) for(5) {198 if ( ! readyThread ) for(5) { 199 199 readyThread = __next_thread_slow( this->cltr ); 200 200 201 if ( readyThread ) break;201 if ( readyThread ) break; 202 202 203 203 // It's unlikely we still I/O to submit, but the arbiter could … … 210 210 211 211 HALT: 212 if ( !readyThread ) {212 if ( ! readyThread ) { 213 213 // Don't block if we are done 214 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;214 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 215 215 216 216 // Push self to idle stack 217 if (!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;217 if ( ! mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 218 218 219 219 // Confirm the ready-queue is empty 220 220 readyThread = __next_thread_search( this->cltr ); 221 if ( readyThread ) {221 if ( readyThread ) { 222 222 // A thread was found, cancel the halt 223 223 mark_awake(this->cltr->procs, * this); … … 247 247 248 248 // Are we done? 249 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;250 251 if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {249 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 250 251 if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && ! __atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 252 252 __IO_STATS__(true, io.flush.dirty++; ) 253 253 __cfa_io_flush( this ); … … 263 263 post( this->terminated ); 264 264 265 if (this == mainProcessor) {265 if (this == mainProcessor) { 266 266 // HACK : the coroutine context switch expects this_thread to be set 267 267 // and it make sense for it to be set in all other cases except here … … 294 294 295 295 // Actually run the thread 296 RUNNING: while(true) { 296 RUNNING: 297 while( true ) { 297 298 thrd_dst->preempted = __NO_PREEMPTION; 298 299 … … 339 340 // In case 2, we lost the race so we now own the thread. 340 341 341 if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {342 if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 342 343 // Reset the this_thread now that we know 343 344 // the state isn't active anymore … … 349 350 } 350 351 351 if (unlikely(thrd_dst->state == Halting)) {352 if (unlikely(thrd_dst->state == Halting)) { 352 353 // Reset the this_thread now that we know 353 354 // the state isn't active anymore … … 418 419 } 419 420 420 #if ! defined(__CFA_NO_STATISTICS__)421 #if ! defined(__CFA_NO_STATISTICS__) 421 422 /* paranoid */ verify( thrd_src->last_proc != 0p ); 422 if (thrd_src->last_proc != kernelTLS().this_processor) {423 if (thrd_src->last_proc != kernelTLS().this_processor) { 423 424 __tls_stats()->ready.threads.migration++; 424 425 } … … 440 441 /* paranoid */ verify( thrd->curr_cluster ); 441 442 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 442 /* paranoid */ if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,443 /* paranoid */ if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 443 444 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 444 /* paranoid */ if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,445 /* paranoid */ if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, 445 446 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 446 447 /* paranoid */ #endif … … 463 464 __wake_one( cl ); 464 465 465 #if ! defined(__CFA_NO_STATISTICS__)466 if ( kernelTLS().this_stats ) {466 #if ! defined(__CFA_NO_STATISTICS__) 467 if ( kernelTLS().this_stats ) { 467 468 __tls_stats()->ready.threads.threads++; 468 if (outside) {469 if (outside) { 469 470 __tls_stats()->ready.threads.extunpark++; 470 471 } … … 542 543 /* paranoid */ verify( ready_schedule_islocked()); 543 544 544 if ( !thrd ) return;545 546 if (__must_unpark(thrd)) {545 if ( ! thrd ) return; 546 547 if (__must_unpark(thrd)) { 547 548 // Wake lost the race, 548 549 __schedule_thread( thrd, hint ); … … 554 555 555 556 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 556 if ( !thrd ) return;557 558 if (__must_unpark(thrd)) {557 if ( ! thrd ) return; 558 559 if (__must_unpark(thrd)) { 559 560 disable_interrupts(); 560 561 // Wake lost the race, … … 592 593 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 593 594 594 if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }595 if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }596 if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }595 if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 596 if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } 597 if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 597 598 598 599 thrd->state = Halting; … … 618 619 // If that is the case, abandon the preemption. 619 620 bool preempted = false; 620 if (thrd->rdy_link.next == 0p) {621 if (thrd->rdy_link.next == 0p) { 621 622 preempted = true; 622 623 thrd->preempted = reason; … … 641 642 642 643 // If no one is sleeping: we are done 643 if ( fdp == 0p ) return;644 if ( fdp == 0p ) return; 644 645 645 646 int fd = 1; 646 if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {647 if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 647 648 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 648 649 } … … 652 653 case 0: 653 654 // If the processor isn't ready to sleep then the exchange will already wake it up 654 #if ! defined(__CFA_NO_STATISTICS__)655 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;655 #if ! defined(__CFA_NO_STATISTICS__) 656 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 656 657 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 657 658 #endif … … 659 660 case 1: 660 661 // If someone else already said they will wake them: we are done 661 #if ! defined(__CFA_NO_STATISTICS__)662 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;662 #if ! defined(__CFA_NO_STATISTICS__) 663 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 663 664 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 664 665 #endif … … 670 671 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 671 672 672 #if ! defined(__CFA_NO_STATISTICS__)673 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;673 #if ! defined(__CFA_NO_STATISTICS__) 674 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 674 675 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 675 676 #endif … … 710 711 711 712 // Someone already told us to wake-up! No time for a nap. 712 if (expected == 1) { return; }713 if (expected == 1) { return; } 713 714 714 715 // Try to mark that we are going to sleep 715 if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {716 if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 716 717 // Every one agreed, taking a nap 717 718 break; … … 720 721 721 722 722 #if ! defined(__CFA_NO_STATISTICS__)723 if (this->print_halts) {723 #if ! defined(__CFA_NO_STATISTICS__) 724 if (this->print_halts) { 724 725 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 725 726 } … … 731 732 eventfd_t val; 732 733 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 733 if (ret < 0) {734 if (ret < 0) { 734 735 switch((int)errno) { 735 736 case EAGAIN: … … 746 747 } 747 748 748 #if ! defined(__CFA_NO_STATISTICS__)749 if (this->print_halts) {749 #if ! defined(__CFA_NO_STATISTICS__) 750 if (this->print_halts) { 750 751 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 751 752 } … … 759 760 760 761 /* paranoid */ verify( ! __preemption_enabled() ); 761 if (!try_lock( this )) return false;762 if ( ! try_lock( this )) return false; 762 763 this.idle++; 763 764 /* paranoid */ verify( this.idle <= this.total ); … … 784 785 // update the pointer to the head wait context 785 786 struct __fd_waitctx * wctx = 0; 786 if (!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;787 if ( ! isEmpty( this.idles )) wctx = &first( this. idles ).idle_wctx; 787 788 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 788 789 } … … 798 799 thread$ * thrd = __cfaabi_tls.this_thread; 799 800 800 if (thrd) {801 if (thrd) { 801 802 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 802 803 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); … … 847 848 //----------------------------------------------------------------------------- 848 849 // Statistics 849 #if ! defined(__CFA_NO_STATISTICS__)850 #if ! defined(__CFA_NO_STATISTICS__) 850 851 void print_halts( processor & this ) libcfa_public { 851 852 this.print_halts = true; … … 855 856 /* paranoid */ verify( cltr->stats ); 856 857 857 processor * it = & list`first;858 processor * it = &first( list ); 858 859 for(unsigned i = 0; i < count; i++) { 859 860 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); … … 861 862 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); 862 863 __tally_stats( cltr->stats, it->local_data->this_stats ); 863 it = & (*it)`next;864 it = &next( *it ); 864 865 } 865 866 }
Note:
See TracChangeset
for help on using the changeset viewer.