Changeset 2e9b59b for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Apr 19, 2022, 3:00:04 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 5b84a321
- Parents:
- ba897d21 (diff), bb7c77d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rba897d21 r2e9b59b 19 19 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 20 20 21 #pragma GCC diagnostic push 22 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 23 21 24 //C Includes 22 25 #include <errno.h> … … 25 28 #include <signal.h> 26 29 #include <unistd.h> 30 27 31 extern "C" { 28 32 #include <sys/eventfd.h> … … 31 35 32 36 //CFA Includes 33 #include "kernel _private.hfa"37 #include "kernel/private.hfa" 34 38 #include "preemption.hfa" 35 39 #include "strstream.hfa" … … 40 44 #define __CFA_INVOKE_PRIVATE__ 41 45 #include "invoke.h" 46 #pragma GCC diagnostic pop 42 47 43 48 #if !defined(__CFA_NO_STATISTICS__) … … 127 132 static void __wake_one(cluster * cltr); 128 133 129 static void idle_sleep(processor * proc , io_future_t & future, iovec & iov);134 static void idle_sleep(processor * proc); 130 135 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 131 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 132 137 133 extern void __cfa_io_start( processor * ); 134 extern bool __cfa_io_drain( processor * ); 135 extern bool __cfa_io_flush( processor *, int min_comp ); 136 extern void __cfa_io_stop ( processor * ); 137 static inline bool __maybe_io_drain( processor * ); 138 extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))); 139 extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1))); 140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1))); 138 141 139 142 #if defined(CFA_WITH_IO_URING_IDLE) … … 159 162 verify(this); 160 163 161 io_future_t future; // used for idle sleep when io_uring is present162 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not163 eventfd_t idle_val; 164 iovec idle_iovec = { &idle_val, sizeof(idle_val) };165 166 __cfa_io_start( this );164 /* paranoid */ verify( this->idle_wctx.ftr != 0p ); 165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p ); 166 167 // used for idle sleep when io_uring is present 168 // mark it as already fulfilled so we know if there is a pending request or not 169 this->idle_wctx.ftr->self.ptr = 1p; 167 170 168 171 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 189 192 for() { 190 193 // Check if there is pending io 191 __ maybe_io_drain( this );194 __cfa_io_drain( this ); 192 195 193 196 // Try to get the next thread … … 195 198 196 199 if( !readyThread ) { 200 // there is no point in holding submissions if we are idle 197 201 __IO_STATS__(true, io.flush.idle++; ) 198 __cfa_io_flush( this, 0 ); 202 __cfa_io_flush( this ); 203 204 // drain again in case something showed up 205 __cfa_io_drain( this ); 199 206 200 207 readyThread = __next_thread( this->cltr ); … … 202 209 203 210 if( !readyThread ) for(5) { 211 readyThread = __next_thread_slow( this->cltr ); 212 213 if( readyThread ) break; 214 215 // It's unlikely we still I/O to submit, but the arbiter could 204 216 __IO_STATS__(true, io.flush.idle++; ) 205 206 readyThread = __next_thread_slow( this->cltr ); 207 208 if( readyThread ) break; 209 210 __cfa_io_flush( this, 0 ); 217 __cfa_io_flush( this ); 218 219 // drain again in case something showed up 220 __cfa_io_drain( this ); 211 221 } 212 222 … … 231 241 } 232 242 233 idle_sleep( this , future, idle_iovec);243 idle_sleep( this ); 234 244 235 245 // We were woken up, remove self from idle … … 251 261 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 252 262 253 if( this->io.pending && !this->io.dirty) {263 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 254 264 __IO_STATS__(true, io.flush.dirty++; ) 255 __cfa_io_flush( this , 0);265 __cfa_io_flush( this ); 256 266 } 257 267 } … … 259 269 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 260 270 } 261 262 for(int i = 0; !available(future); i++) {263 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);264 __cfa_io_flush( this, 1 );265 }266 267 __cfa_io_stop( this );268 271 269 272 post( this->terminated ); … … 634 637 635 638 int fd = 1; 636 if( __atomic_load_n(&fdp-> fd, __ATOMIC_SEQ_CST) != 1 ) {637 fd = __atomic_exchange_n(&fdp-> fd, 1, __ATOMIC_RELAXED);639 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 640 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 638 641 } 639 642 640 643 switch(fd) { 644 __attribute__((unused)) int ret; 641 645 case 0: 642 646 // If the processor isn't ready to sleep then the exchange will already wake it up … … 656 660 // If the processor was ready to sleep, we need to wake it up with an actual write 657 661 val = 1; 658 eventfd_write( fd, val ); 662 ret = eventfd_write( fd, val ); 663 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 659 664 660 665 #if !defined(__CFA_NO_STATISTICS__) … … 677 682 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 683 679 this->idle_wctx.fd = 1; 684 this->idle_wctx.sem = 1; 685 686 this->idle_wctx.wake__time = rdtscl(); 680 687 681 688 eventfd_t val; 682 689 val = 1; 683 eventfd_write( this->idle_fd, val ); 684 685 /* paranoid */ verify( ! __preemption_enabled() ); 686 } 687 688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 690 __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val ); 691 692 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 693 /* paranoid */ verify( ! __preemption_enabled() ); 694 } 695 696 static void idle_sleep(processor * this) { 697 /* paranoid */ verify( this->idle_wctx.evfd != 1 ); 698 /* paranoid */ verify( this->idle_wctx.evfd != 2 ); 699 689 700 // Tell everyone we are ready to go do sleep 690 701 for() { 691 int expected = this->idle_wctx. fd;702 int expected = this->idle_wctx.sem; 692 703 693 704 // Someone already told us to wake-up! No time for a nap. … … 695 706 696 707 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx. fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {708 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 709 // Every one agreed, taking a nap 699 710 break; … … 713 724 { 714 725 eventfd_t val; 715 ssize_t ret = read( this->idle_ fd, &val, sizeof(val) );726 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 716 727 if(ret < 0) { 717 728 switch((int)errno) { … … 735 746 #endif 736 747 #else 737 // Do we already have a pending read 738 if(available(future)) { 739 // There is no pending read, we need to add one 740 reset(future); 741 742 __kernel_read(this, future, iov, this->idle_fd ); 743 } 744 745 __cfa_io_flush( this, 1 ); 748 __cfa_io_idle( this ); 746 749 #endif 747 750 } … … 750 753 __STATS__(true, ready.sleep.halts++; ) 751 754 752 proc.idle_wctx. fd= 0;755 proc.idle_wctx.sem = 0; 753 756 754 757 /* paranoid */ verify( ! __preemption_enabled() ); … … 831 834 #endif 832 835 833 static inline bool __maybe_io_drain( processor * proc ) { 834 bool ret = false; 835 #if defined(CFA_HAVE_LINUX_IO_URING_H) 836 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd); 837 838 // Check if we should drain the queue 839 $io_context * ctx = proc->io.ctx; 840 unsigned head = *ctx->cq.head; 841 unsigned tail = *ctx->cq.tail; 842 if(head == tail) return false; 843 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc ); 845 ready_schedule_unlock(); 846 #endif 847 return ret; 848 } 836 849 837 850 838 //----------------------------------------------------------------------------- … … 903 891 void print_stats_now( cluster & this, int flags ) { 904 892 crawl_cluster_stats( this ); 905 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); 906 894 } 907 895 #endif
Note:
See TracChangeset
for help on using the changeset viewer.