Changeset 7cf3b1d
- Timestamp:
- Jan 25, 2022, 4:16:00 PM (23 months ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- 4fcbf26
- Parents:
- b200492
- Location:
- libcfa/src/concurrency
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rb200492 r7cf3b1d 205 205 // Don't block if we are done 206 206 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 207 208 #if !defined(__CFA_NO_STATISTICS__)209 __tls_stats()->ready.sleep.halts++;210 #endif211 207 212 208 // Push self to idle stack … … 732 728 // Wake a thread from the front if there are any 733 729 static void __wake_one(cluster * this) { 730 eventfd_t val; 731 734 732 /* paranoid */ verify( ! __preemption_enabled() ); 735 733 /* paranoid */ verify( ready_schedule_islocked() ); 736 734 737 735 // Check if there is a sleeping processor 738 // int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 739 int fd = 0; 740 if( __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST) != 0 ) { 741 fd = __atomic_exchange_n(&this->procs.fd, 0, __ATOMIC_RELAXED); 742 } 743 744 // If no one is sleeping, we are done 745 if( fd == 0 ) return; 746 747 // We found a processor, wake it up 748 eventfd_t val; 749 val = 1; 750 eventfd_write( fd, val ); 751 752 #if !defined(__CFA_NO_STATISTICS__) 753 if( kernelTLS().this_stats ) { 754 __tls_stats()->ready.sleep.wakes++; 755 } 756 else { 757 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); 758 } 759 #endif 736 struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST); 737 738 // If no one is sleeping: we are done 739 if( fdp == 0p ) return; 740 741 int fd = 1; 742 if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) { 743 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED); 744 } 745 746 switch(fd) { 747 case 0: 748 // If the processor isn't ready to sleep then the exchange will already wake it up 749 #if !defined(__CFA_NO_STATISTICS__) 750 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 751 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 752 #endif 753 break; 754 case 1: 755 // If someone else already said they will wake them: we are done 756 #if !defined(__CFA_NO_STATISTICS__) 757 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 758 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 759 #endif 760 break; 761 default: 762 // If the processor was ready to sleep, we need to wake it up with an actual write 763 val = 1; 764 eventfd_write( fd, val ); 765 766 #if !defined(__CFA_NO_STATISTICS__) 767 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 768 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 769 #endif 770 break; 771 } 760 772 761 773 /* paranoid */ verify( ready_schedule_islocked() ); … … 770 782 771 783 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 784 785 this->idle_wctx.fd = 1; 772 786 773 787 eventfd_t val; … … 779 793 780 794 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 795 // Tell everyone we are ready to go do sleep 796 for() { 797 int expected = this->idle_wctx.fd; 798 799 // Someone already told us to wake-up! No time for a nap. 800 if(expected == 1) { return; } 801 802 // Try to mark that we are going to sleep 803 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 804 // Every one agreed, taking a nap 805 break; 806 } 807 } 808 809 781 810 #if !defined(CFA_WITH_IO_URING_IDLE) 782 811 #if !defined(__CFA_NO_STATISTICS__) … … 825 854 826 855 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 856 #if !defined(__CFA_NO_STATISTICS__) 857 __tls_stats()->ready.sleep.halts++; 858 #endif 859 860 proc.idle_wctx.fd = 0; 861 827 862 /* paranoid */ verify( ! __preemption_enabled() ); 828 863 if(!try_lock( this )) return false; … … 832 867 insert_first(this.idles, proc); 833 868 834 __atomic_store_n(&this.fd , proc.idle_fd, __ATOMIC_SEQ_CST);869 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); 835 870 unlock( this ); 836 871 /* paranoid */ verify( ! __preemption_enabled() ); … … 848 883 849 884 { 850 int fd= 0;851 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd;852 __atomic_store_n(&this.fd , fd, __ATOMIC_SEQ_CST);885 struct __fd_waitctx * wctx = 0; 886 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; 887 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 853 888 } 854 889 -
libcfa/src/concurrency/kernel.hfa
rb200492 r7cf3b1d 53 53 coroutine processorCtx_t { 54 54 struct processor * proc; 55 }; 56 57 58 struct __fd_waitctx { 59 volatile int fd; 55 60 }; 56 61 … … 101 106 int idle_fd; 102 107 108 // Idle waitctx 109 struct __fd_waitctx idle_wctx; 110 103 111 // Termination synchronisation (user semaphore) 104 112 oneshot terminated; … … 207 215 208 216 // FD to use to wake a processor 209 volatile int fd;217 struct __fd_waitctx * volatile fdw; 210 218 211 219 // Total number of processors -
libcfa/src/concurrency/kernel/startup.cfa
rb200492 r7cf3b1d 537 537 } 538 538 539 this.idle_wctx.fd = 0; 540 541 // I'm assuming these two are reserved for standard input and output 542 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 ); 544 /* paranoid */ verify( this.idle_fd != 1 ); 545 539 546 #if !defined(__CFA_NO_STATISTICS__) 540 547 print_stats = 0; … … 590 597 // Cluster 591 598 static void ?{}(__cluster_proc_list & this) { 592 this.fd = 0;599 this.fdw = 0p; 593 600 this.idle = 0; 594 601 this.total = 0; -
libcfa/src/concurrency/stats.cfa
rb200492 r7cf3b1d 31 31 stats->ready.sleep.halts = 0; 32 32 stats->ready.sleep.cancels = 0; 33 stats->ready.sleep.early = 0; 33 34 stats->ready.sleep.wakes = 0; 35 stats->ready.sleep.seen = 0; 34 36 stats->ready.sleep.exits = 0; 35 37 … … 91 93 tally_one( &cltr->ready.sleep.halts , &proc->ready.sleep.halts ); 92 94 tally_one( &cltr->ready.sleep.cancels , &proc->ready.sleep.cancels ); 95 tally_one( &cltr->ready.sleep.early , &proc->ready.sleep.early ); 93 96 tally_one( &cltr->ready.sleep.wakes , &proc->ready.sleep.wakes ); 97 tally_one( &cltr->ready.sleep.seen , &proc->ready.sleep.wakes ); 94 98 tally_one( &cltr->ready.sleep.exits , &proc->ready.sleep.exits ); 95 99 … … 153 157 | " (" | eng3(ready.pop.search.attempt) | " try)"; 154 158 155 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," | eng3(ready.sleep.wakes) | "wake," | eng3(ready.sleep.exits) | "exit"; 159 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," 160 | eng3(ready.sleep.wakes + ready.sleep.early) | '(' | eng3(ready.sleep.early) | ',' | eng3(ready.sleep.seen) | ')' | " wake(early, seen)," 161 | eng3(ready.sleep.exits) | "exit"; 156 162 sstr | nl; 157 163 } -
libcfa/src/concurrency/stats.hfa
rb200492 r7cf3b1d 69 69 volatile uint64_t halts; 70 70 volatile uint64_t cancels; 71 volatile uint64_t early; 71 72 volatile uint64_t wakes; 73 volatile uint64_t seen; 72 74 volatile uint64_t exits; 73 75 } sleep;
Note: See TracChangeset
for help on using the changeset viewer.