Changeset d672350 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Mar 21, 2022, 1:44:06 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- a76202d
- Parents:
- ef3c383 (diff), dbe2533 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
ref3c383 rd672350 19 19 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 20 20 21 #pragma GCC diagnostic push 22 #pragma GCC diagnostic ignored "-Waddress-of-packed-member" 23 21 24 //C Includes 22 25 #include <errno.h> … … 25 28 #include <signal.h> 26 29 #include <unistd.h> 30 27 31 extern "C" { 28 32 #include <sys/eventfd.h> … … 31 35 32 36 //CFA Includes 33 #include "kernel _private.hfa"37 #include "kernel/private.hfa" 34 38 #include "preemption.hfa" 35 39 #include "strstream.hfa" … … 40 44 #define __CFA_INVOKE_PRIVATE__ 41 45 #include "invoke.h" 46 #pragma GCC diagnostic pop 42 47 43 48 #if !defined(__CFA_NO_STATISTICS__) … … 131 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 132 137 133 extern void __cfa_io_start( processor * ); 134 extern bool __cfa_io_drain( processor * ); 138 extern bool __cfa_io_drain( $io_context * ); 135 139 extern bool __cfa_io_flush( processor *, int min_comp ); 136 extern void __cfa_io_stop ( processor * );137 140 static inline bool __maybe_io_drain( processor * ); 138 141 … … 159 162 verify(this); 160 163 161 io_future_t future; // used for idle sleep when io_uring is present 162 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 163 eventfd_t idle_val; 164 iovec idle_iovec = { &idle_val, sizeof(idle_val) }; 165 166 __cfa_io_start( this ); 164 /* paranoid */ verify( this->idle_wctx.ftr != 0p ); 165 /* paranoid */ verify( this->idle_wctx.rdbuf != 0p ); 166 167 // used for idle sleep when io_uring is present 168 // mark it as already fulfilled so we know if there is a pending request or not 169 this->idle_wctx.ftr->self.ptr = 1p; 170 iovec idle_iovec = { this->idle_wctx.rdbuf, sizeof(eventfd_t) }; 167 171 168 172 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); … … 231 235 } 232 236 233 idle_sleep( this, future, idle_iovec );237 idle_sleep( this, *this->idle_wctx.ftr, idle_iovec ); 234 238 235 239 // We were woken up, remove self from idle … … 251 255 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 252 256 253 if( this->io.pending && !this->io.dirty) {257 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 254 258 __IO_STATS__(true, io.flush.dirty++; ) 255 259 __cfa_io_flush( this, 0 ); … … 259 263 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 260 264 } 261 262 for(int i = 0; !available(future); i++) {263 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);264 __cfa_io_flush( this, 1 );265 }266 267 __cfa_io_stop( this );268 265 269 266 post( this->terminated ); … … 634 631 635 632 int fd = 1; 636 if( __atomic_load_n(&fdp-> fd, __ATOMIC_SEQ_CST) != 1 ) {637 fd = __atomic_exchange_n(&fdp-> fd, 1, __ATOMIC_RELAXED);633 if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 634 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 638 635 } 639 636 … … 677 674 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 675 679 this->idle_wctx. fd= 1;676 this->idle_wctx.sem = 1; 680 677 681 678 eventfd_t val; 682 679 val = 1; 683 eventfd_write( this->idle_ fd, val );680 eventfd_write( this->idle_wctx.evfd, val ); 684 681 685 682 /* paranoid */ verify( ! __preemption_enabled() ); … … 689 686 // Tell everyone we are ready to go do sleep 690 687 for() { 691 int expected = this->idle_wctx. fd;688 int expected = this->idle_wctx.sem; 692 689 693 690 // Someone already told us to wake-up! No time for a nap. … … 695 692 696 693 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx. fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {694 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 695 // Every one agreed, taking a nap 699 696 break; … … 713 710 { 714 711 eventfd_t val; 715 ssize_t ret = read( this->idle_ fd, &val, sizeof(val) );712 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 716 713 if(ret < 0) { 717 714 switch((int)errno) { … … 740 737 reset(future); 741 738 742 __kernel_read(this, future, iov, this->idle_ fd );739 __kernel_read(this, future, iov, this->idle_wctx.evfd ); 743 740 } 744 741 … … 750 747 __STATS__(true, ready.sleep.halts++; ) 751 748 752 proc.idle_wctx. fd= 0;749 proc.idle_wctx.sem = 0; 753 750 754 751 /* paranoid */ verify( ! __preemption_enabled() ); … … 842 839 if(head == tail) return false; 843 840 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc);841 ret = __cfa_io_drain( ctx ); 845 842 ready_schedule_unlock(); 846 843 #endif
Note:
See TracChangeset
for help on using the changeset viewer.