- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r6b33e89 rb93bf85 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 07:02:42 202513 // Update Count : 8212 // Last Modified On : Mon Jan 9 08:42:05 2023 13 // Update Count : 77 14 14 // 15 15 … … 45 45 #pragma GCC diagnostic pop 46 46 47 #if ! 47 #if !defined(__CFA_NO_STATISTICS__) 48 48 #define __STATS_DEF( ...) __VA_ARGS__ 49 49 #else … … 158 158 159 159 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 160 #if ! 161 if 160 #if !defined(__CFA_NO_STATISTICS__) 161 if( this->print_halts ) { 162 162 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); 163 163 } … … 169 169 170 170 // if we need to run some special setup, now is the time to do it. 171 if 171 if(this->init.thrd) { 172 172 this->init.thrd->curr_cluster = this->cltr; 173 173 __run_thread(this, this->init.thrd); … … 185 185 readyThread = __next_thread( this->cltr ); 186 186 187 if ( !readyThread ) {187 if( !readyThread ) { 188 188 // there is no point in holding submissions if we are idle 189 189 __IO_STATS__(true, io.flush.idle++; ) … … 196 196 } 197 197 198 if ( !readyThread ) for(5) {198 if( !readyThread ) for(5) { 199 199 readyThread = __next_thread_slow( this->cltr ); 200 200 201 if 201 if( readyThread ) break; 202 202 203 203 // It's unlikely we still I/O to submit, but the arbiter could … … 210 210 211 211 HALT: 212 if ( !readyThread ) {212 if( !readyThread ) { 213 213 // Don't block if we are done 214 if 214 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 215 215 216 216 // Push self to idle stack 217 if ( !mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;217 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 218 218 219 219 // Confirm the ready-queue is empty 220 220 readyThread = __next_thread_search( this->cltr ); 221 if 221 if( readyThread ) { 222 222 // A thread was found, cancel the halt 223 223 mark_awake(this->cltr->procs, * this); … … 247 247 248 248 // Are we done? 249 if 250 251 if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {249 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 250 251 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 252 252 __IO_STATS__(true, io.flush.dirty++; ) 253 253 __cfa_io_flush( this ); … … 263 263 post( this->terminated ); 264 264 265 if 265 if(this == mainProcessor) { 266 266 // HACK : the coroutine context switch expects this_thread to be set 267 267 // and it make sense for it to be set in all other cases except here … … 294 294 295 295 // Actually run the thread 296 RUNNING: 297 while( true ) { 296 RUNNING: while(true) { 298 297 thrd_dst->preempted = __NO_PREEMPTION; 299 298 … … 340 339 // In case 2, we lost the race so we now own the thread. 341 340 342 if 341 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 343 342 // Reset the this_thread now that we know 344 343 // the state isn't active anymore … … 350 349 } 351 350 352 if 351 if(unlikely(thrd_dst->state == Halting)) { 353 352 // Reset the this_thread now that we know 354 353 // the state isn't active anymore … … 419 418 } 420 419 421 #if ! 420 #if !defined(__CFA_NO_STATISTICS__) 422 421 /* paranoid */ verify( thrd_src->last_proc != 0p ); 423 if 422 if(thrd_src->last_proc != kernelTLS().this_processor) { 424 423 __tls_stats()->ready.threads.migration++; 425 424 } … … 441 440 /* paranoid */ verify( thrd->curr_cluster ); 442 441 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 443 /* paranoid */ if 442 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 444 443 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 445 /* paranoid */ if 444 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, 446 445 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 447 446 /* paranoid */ #endif … … 464 463 __wake_one( cl ); 465 464 466 #if ! 467 if 465 #if !defined(__CFA_NO_STATISTICS__) 466 if( kernelTLS().this_stats ) { 468 467 __tls_stats()->ready.threads.threads++; 469 if 468 if(outside) { 470 469 __tls_stats()->ready.threads.extunpark++; 471 470 } … … 543 542 /* paranoid */ verify( ready_schedule_islocked()); 544 543 545 if ( !thrd ) return;546 547 if 544 if( !thrd ) return; 545 546 if(__must_unpark(thrd)) { 548 547 // Wake lost the race, 549 548 __schedule_thread( thrd, hint ); … … 555 554 556 555 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 557 if ( !thrd ) return;558 559 if 556 if( !thrd ) return; 557 558 if(__must_unpark(thrd)) { 560 559 disable_interrupts(); 561 560 // Wake lost the race, … … 593 592 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 594 593 595 if 596 if 597 if 594 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 595 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } 596 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 598 597 599 598 thrd->state = Halting; … … 619 618 // If that is the case, abandon the preemption. 620 619 bool preempted = false; 621 if 620 if(thrd->rdy_link.next == 0p) { 622 621 preempted = true; 623 622 thrd->preempted = reason; … … 642 641 643 642 // If no one is sleeping: we are done 644 if 643 if( fdp == 0p ) return; 645 644 646 645 int fd = 1; 647 if 646 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 648 647 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 649 648 } … … 653 652 case 0: 654 653 // If the processor isn't ready to sleep then the exchange will already wake it up 655 #if ! 656 if 654 #if !defined(__CFA_NO_STATISTICS__) 655 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 657 656 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 658 657 #endif … … 660 659 case 1: 661 660 // If someone else already said they will wake them: we are done 662 #if ! 663 if 661 #if !defined(__CFA_NO_STATISTICS__) 662 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 664 663 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 665 664 #endif … … 671 670 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 672 671 673 #if ! 674 if 672 #if !defined(__CFA_NO_STATISTICS__) 673 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 675 674 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 676 675 #endif … … 711 710 712 711 // Someone already told us to wake-up! No time for a nap. 713 if 712 if(expected == 1) { return; } 714 713 715 714 // Try to mark that we are going to sleep 716 if 715 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 717 716 // Every one agreed, taking a nap 718 717 break; … … 721 720 722 721 723 #if ! 724 if 722 #if !defined(__CFA_NO_STATISTICS__) 723 if(this->print_halts) { 725 724 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 726 725 } … … 732 731 eventfd_t val; 733 732 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 734 if 733 if(ret < 0) { 735 734 switch((int)errno) { 736 735 case EAGAIN: … … 747 746 } 748 747 749 #if ! 750 if 748 #if !defined(__CFA_NO_STATISTICS__) 749 if(this->print_halts) { 751 750 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 752 751 } … … 760 759 761 760 /* paranoid */ verify( ! __preemption_enabled() ); 762 if ( !try_lock( this )) return false;761 if(!try_lock( this )) return false; 763 762 this.idle++; 764 763 /* paranoid */ verify( this.idle <= this.total ); … … 785 784 // update the pointer to the head wait context 786 785 struct __fd_waitctx * wctx = 0; 787 if ( ! isEmpty( this.idles )) wctx = &first( this. idles ).idle_wctx;786 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; 788 787 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 789 788 } … … 799 798 thread$ * thrd = __cfaabi_tls.this_thread; 800 799 801 if 800 if(thrd) { 802 801 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 803 802 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); … … 848 847 //----------------------------------------------------------------------------- 849 848 // Statistics 850 #if ! 849 #if !defined(__CFA_NO_STATISTICS__) 851 850 void print_halts( processor & this ) libcfa_public { 852 851 this.print_halts = true; … … 856 855 /* paranoid */ verify( cltr->stats ); 857 856 858 processor * it = & first( list );857 processor * it = &list`first; 859 858 for(unsigned i = 0; i < count; i++) { 860 859 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); … … 862 861 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); 863 862 __tally_stats( cltr->stats, it->local_data->this_stats ); 864 it = & next( *it );863 it = &(*it)`next; 865 864 } 866 865 }
Note:
See TracChangeset
for help on using the changeset viewer.