Changeset 5235d49 for libcfa/src/concurrency
- Timestamp:
- Jan 1, 2022, 11:14:35 AM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum, stuck-waitfor-destruct
- Children:
- 12c1eef
- Parents:
- 7770cc8 (diff), db1ebed (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
-
io.cfa (modified) (6 diffs)
-
io/setup.cfa (modified) (2 diffs)
-
kernel.cfa (modified) (13 diffs)
-
kernel_private.hfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r7770cc8 r5235d49 33 33 #include <sys/syscall.h> 34 34 #include <sys/eventfd.h> 35 #include <sys/uio.h> 35 36 36 37 #include <linux/io_uring.h> … … 133 134 } 134 135 135 bool __cfa_io_flush( processor * proc, bool wait) {136 bool __cfa_io_flush( processor * proc, int min_comp ) { 136 137 /* paranoid */ verify( ! __preemption_enabled() ); 137 138 /* paranoid */ verify( proc ); … … 144 145 145 146 __STATS__( true, io.calls.flush++; ) 146 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, wait ? 1 : 0,0, (sigset_t *)0p, _NSIG / 8);147 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8); 147 148 if( ret < 0 ) { 148 149 switch((int)errno) { … … 302 303 ctx->proc->io.dirty = true; 303 304 if(sq.to_submit > 30 || !lazy) { 304 __cfa_io_flush( ctx->proc, false);305 __cfa_io_flush( ctx->proc, 0 ); 305 306 } 306 307 } … … 502 503 } 503 504 504 #if defined( IO_URING_IDLE)505 bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) {505 #if defined(CFA_WITH_IO_URING_IDLE) 506 bool __kernel_read(processor * proc, io_future_t & future, iovec & iov, int fd) { 506 507 $io_context * ctx = proc->io.ctx; 507 508 /* paranoid */ verify( ! __preemption_enabled() ); … … 518 519 __fill( &sqe, 1, &idx, ctx ); 519 520 520 sqe->opcode = IORING_OP_READ;521 521 sqe->user_data = (uintptr_t)&future; 522 522 sqe->flags = 0; 523 sqe->fd = fd; 524 sqe->off = 0; 523 525 sqe->ioprio = 0; 524 sqe->fd = 0;525 sqe->off = 0;526 526 sqe->fsync_flags = 0; 527 527 sqe->__pad2[0] = 0; 528 528 sqe->__pad2[1] = 0; 529 529 sqe->__pad2[2] = 0; 530 sqe->addr = (uintptr_t)buf; 531 sqe->len = sizeof(uint64_t); 530 531 #if defined(CFA_HAVE_IORING_OP_READ) 532 sqe->opcode = IORING_OP_READ; 533 sqe->addr = (uint64_t)iov.iov_base; 534 sqe->len = iov.iov_len; 535 #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV) 536 sqe->opcode = IORING_OP_READV; 537 sqe->addr = (uintptr_t)&iov; 538 sqe->len = 1; 539 #else 540 #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined 541 #endif 532 542 533 543 asm volatile("": : :"memory"); -
libcfa/src/concurrency/io/setup.cfa
r7770cc8 r5235d49 32 32 33 33 void __cfa_io_start( processor * proc ) {} 34 bool __cfa_io_flush( processor * proc, bool) {}34 bool __cfa_io_flush( processor * proc, int ) {} 35 35 void __cfa_io_stop ( processor * proc ) {} 36 36 … … 220 220 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); 221 221 222 #if !defined( IO_URING_IDLE)222 #if !defined(CFA_WITH_IO_URING_IDLE) 223 223 // Step 4 : eventfd 224 224 // io_uring_register is so f*cking slow on some machine that it -
libcfa/src/concurrency/kernel.cfa
r7770cc8 r5235d49 27 27 extern "C" { 28 28 #include <sys/eventfd.h> 29 #include <sys/uio.h> 29 30 } 30 31 … … 125 126 static void __wake_one(cluster * cltr); 126 127 127 static void idle_sleep(processor * proc, io_future_t & future, char buf[]);128 static void idle_sleep(processor * proc, io_future_t & future, iovec & iov); 128 129 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 129 130 static void mark_awake(__cluster_proc_list & idles, processor & proc); … … 131 132 extern void __cfa_io_start( processor * ); 132 133 extern bool __cfa_io_drain( processor * ); 133 extern bool __cfa_io_flush( processor *, bool wait);134 extern bool __cfa_io_flush( processor *, int min_comp ); 134 135 extern void __cfa_io_stop ( processor * ); 135 136 static inline bool __maybe_io_drain( processor * ); 136 137 137 #if defined( IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)138 extern bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd);138 #if defined(CFA_WITH_IO_URING_IDLE) 139 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd); 139 140 #endif 140 141 … … 171 172 io_future_t future; // used for idle sleep when io_uring is present 172 173 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 173 char buf[sizeof(uint64_t)]; 174 eventfd_t idle_val; 175 iovec idle_iovec = { &idle_val, sizeof(idle_val) }; 174 176 175 177 __cfa_io_start( this ); … … 206 208 207 209 if( !readyThread ) { 208 __cfa_io_flush( this, false);210 __cfa_io_flush( this, 0 ); 209 211 210 212 readyThread = __next_thread_slow( this->cltr ); … … 237 239 } 238 240 239 idle_sleep( this, future, buf);241 idle_sleep( this, future, idle_iovec ); 240 242 241 243 // We were woken up, remove self from idle … … 258 260 259 261 if(this->io.pending && !this->io.dirty) { 260 __cfa_io_flush( this, false);262 __cfa_io_flush( this, 0 ); 261 263 } 262 264 … … 274 276 275 277 // If we can't find a thread, might as well flush any outstanding I/O 276 if(this->io.pending) { __cfa_io_flush( this, false); }278 if(this->io.pending) { __cfa_io_flush( this, 0 ); } 277 279 278 280 // Spin a little on I/O, just in case … … 369 371 370 372 if(this->io.pending && !this->io.dirty) { 371 __cfa_io_flush( this, false);373 __cfa_io_flush( this, 0 ); 372 374 } 373 375 … … 379 381 380 382 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 383 } 384 385 for(int i = 0; !available(future); i++) { 386 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60); 387 __cfa_io_flush( this, 1 ); 381 388 } 382 389 … … 779 786 } 780 787 781 static void idle_sleep(processor * this, io_future_t & future, char buf[]) {782 #if !defined( IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H)788 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 789 #if !defined(CFA_WITH_IO_URING_IDLE) 783 790 #if !defined(__CFA_NO_STATISTICS__) 784 791 if(this->print_halts) { … … 813 820 #endif 814 821 #else 815 #if !defined(CFA_HAVE_IORING_OP_READ)816 #error this is only implemented if the read is present817 #endif818 822 // Do we already have a pending read 819 823 if(available(future)) { … … 821 825 reset(future); 822 826 823 __kernel_read(this, future, buf, this->idle_fd );824 } 825 826 __cfa_io_flush( this, true);827 __kernel_read(this, future, iov, this->idle_fd ); 828 } 829 830 __cfa_io_flush( this, 1 ); 827 831 #endif 828 832 } -
libcfa/src/concurrency/kernel_private.hfa
r7770cc8 r5235d49 39 39 } 40 40 41 // #define IO_URING_IDLE 41 // Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call 42 #define CFA_WANT_IO_URING_IDLE 43 44 // Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call 45 #if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H) 46 #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)) 47 #define CFA_WITH_IO_URING_IDLE 48 #endif 49 #endif 42 50 43 51 //-----------------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.