Changeset 63be3387 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Nov 14, 2022, 11:52:44 AM (3 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 7d9598d8
- Parents:
- b77f0e1 (diff), 19a8c40 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rb77f0e1 r63be3387 138 138 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))); 139 139 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1))); 140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1))); 141 142 #if defined(CFA_WITH_IO_URING_IDLE) 143 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd); 144 #endif 140 145 141 146 142 extern void __disable_interrupts_hard(); … … 162 158 verify(this); 163 159 164 /* paranoid */ verify( this->idle_wctx.ftr != 0p );165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );166 167 // used for idle sleep when io_uring is present168 // mark it as already fulfilled so we know if there is a pending request or not169 this->idle_wctx.ftr->self.ptr = 1p;170 171 160 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 172 161 #if !defined(__CFA_NO_STATISTICS__) … … 291 280 /* paranoid */ verify( ! __preemption_enabled() ); 292 281 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); 293 /* paranoid */ verifyf( thrd_dst-> link.next == 0p, "Expected null got %p", thrd_dst->link.next );282 /* paranoid */ verifyf( thrd_dst->rdy_link.next == 0p, "Expected null got %p", thrd_dst->rdy_link.next ); 294 283 __builtin_prefetch( thrd_dst->context.SP ); 295 284 … … 321 310 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 322 311 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 312 /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, this, __ATOMIC_SEQ_CST) == 0p ); 323 313 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 324 314 … … 332 322 333 323 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 324 /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, 0p, __ATOMIC_SEQ_CST) == this ); 334 325 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 335 326 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 327 /* paranoid */ verify( thrd_dst->state != Halted ); 336 328 /* paranoid */ verify( thrd_dst->context.SP ); 337 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );338 329 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); 339 330 /* paranoid */ verify( ! __preemption_enabled() ); … … 452 443 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 453 444 /* paranoid */ #endif 454 /* paranoid */ verifyf( thrd-> link.next == 0p, "Expected null got %p", thrd->link.next );445 /* paranoid */ verifyf( thrd->rdy_link.next == 0p, "Expected null got %p", thrd->rdy_link.next ); 455 446 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); 456 447 … … 600 591 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 601 592 602 thrd->state = Halting;603 593 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 604 594 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } 605 595 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 596 597 thrd->state = Halting; 598 thrd->ticket = TICKET_DEAD; 606 599 607 600 // Leave the thread … … 624 617 // If that is the case, abandon the preemption. 625 618 bool preempted = false; 626 if(thrd-> link.next == 0p) {619 if(thrd->rdy_link.next == 0p) { 627 620 preempted = true; 628 621 thrd->preempted = reason; … … 726 719 727 720 728 #if !defined(CFA_WITH_IO_URING_IDLE) 729 #if !defined(__CFA_NO_STATISTICS__) 730 if(this->print_halts) { 731 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 721 #if !defined(__CFA_NO_STATISTICS__) 722 if(this->print_halts) { 723 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 724 } 725 #endif 726 727 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 728 729 { 730 eventfd_t val; 731 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 732 if(ret < 0) { 733 switch((int)errno) { 734 case EAGAIN: 735 #if EAGAIN != EWOULDBLOCK 736 case EWOULDBLOCK: 737 #endif 738 case EINTR: 739 // No need to do anything special here, just assume it's a legitimate wake-up 740 break; 741 default: 742 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 732 743 } 733 #endif 734 735 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 736 737 { 738 eventfd_t val; 739 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 740 if(ret < 0) { 741 switch((int)errno) { 742 case EAGAIN: 743 #if EAGAIN != EWOULDBLOCK 744 case EWOULDBLOCK: 745 #endif 746 case EINTR: 747 // No need to do anything special here, just assume it's a legitimate wake-up 748 break; 749 default: 750 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 751 } 752 } 753 } 754 755 #if !defined(__CFA_NO_STATISTICS__) 756 if(this->print_halts) { 757 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 758 } 759 #endif 760 #else 761 __cfa_io_idle( this ); 744 } 745 } 746 747 #if !defined(__CFA_NO_STATISTICS__) 748 if(this->print_halts) { 749 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 750 } 762 751 #endif 763 752 } … … 775 764 insert_first(this.idles, proc); 776 765 766 // update the pointer to the head wait context, which should now point to this proc. 777 767 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); 778 768 unlock( this ); … … 791 781 792 782 { 783 // update the pointer to the head wait context 793 784 struct __fd_waitctx * wctx = 0; 794 785 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
Note:
See TracChangeset
for help on using the changeset viewer.