Changes in libcfa/src/concurrency/io.cfa [e8ac228:d3605f8]
- File:
-
- 1 edited
-
libcfa/src/concurrency/io.cfa (modified) (8 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
re8ac228 rd3605f8 33 33 #include <sys/syscall.h> 34 34 #include <sys/eventfd.h> 35 #include <sys/uio.h> 35 36 36 37 #include <linux/io_uring.h> … … 133 134 } 134 135 135 void __cfa_io_flush( processor * proc) {136 bool __cfa_io_flush( processor * proc, int min_comp ) { 136 137 /* paranoid */ verify( ! __preemption_enabled() ); 137 138 /* paranoid */ verify( proc ); … … 141 142 $io_context & ctx = *proc->io.ctx; 142 143 143 // for(i; 2) {144 // unsigned idx = proc->rdq.id + i;145 // cltr->ready_queue.lanes.tscs[idx].tv = -1ull;146 // }147 148 144 __ioarbiter_flush( ctx ); 149 145 150 146 __STATS__( true, io.calls.flush++; ) 151 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, 0,0, (sigset_t *)0p, _NSIG / 8);147 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8); 152 148 if( ret < 0 ) { 153 149 switch((int)errno) { … … 157 153 // Update statistics 158 154 __STATS__( false, io.calls.errors.busy ++; ) 159 // for(i; 2) { 160 // unsigned idx = proc->rdq.id + i; 161 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 162 // } 163 return; 155 return false; 164 156 default: 165 157 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); … … 182 174 183 175 ctx.proc->io.pending = false; 184 185 __cfa_io_drain( proc ); 186 // for(i; 2) { 187 // unsigned idx = proc->rdq.id + i; 188 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 189 // } 176 ready_schedule_lock(); 177 bool ret = __cfa_io_drain( proc ); 178 ready_schedule_unlock(); 179 return ret; 190 180 } 191 181 … … 291 281 } 292 282 293 294 283 //============================================================================================= 295 284 // submission … … 314 303 ctx->proc->io.dirty = true; 315 304 if(sq.to_submit > 30 || !lazy) { 316 ready_schedule_lock(); 317 __cfa_io_flush( ctx->proc ); 318 ready_schedule_unlock(); 305 __cfa_io_flush( ctx->proc, 0 ); 319 306 } 320 307 } … … 515 502 } 516 503 } 504 505 #if defined(CFA_WITH_IO_URING_IDLE) 506 bool __kernel_read(processor * proc, io_future_t & future, iovec & iov, int fd) { 507 $io_context * ctx = proc->io.ctx; 508 /* paranoid */ verify( ! __preemption_enabled() ); 509 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 510 /* paranoid */ verify( ctx ); 511 512 __u32 idx; 513 struct io_uring_sqe * sqe; 514 515 // We can proceed to the fast path 516 if( !__alloc(ctx, &idx, 1) ) return false; 517 518 // Allocation was successful 519 __fill( &sqe, 1, &idx, ctx ); 520 521 sqe->user_data = (uintptr_t)&future; 522 sqe->flags = 0; 523 sqe->fd = fd; 524 sqe->off = 0; 525 sqe->ioprio = 0; 526 sqe->fsync_flags = 0; 527 sqe->__pad2[0] = 0; 528 sqe->__pad2[1] = 0; 529 sqe->__pad2[2] = 0; 530 531 #if defined(CFA_HAVE_IORING_OP_READ) 532 sqe->opcode = IORING_OP_READ; 533 sqe->addr = (uint64_t)iov.iov_base; 534 sqe->len = iov.iov_len; 535 #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV) 536 sqe->opcode = IORING_OP_READV; 537 sqe->addr = (uintptr_t)&iov; 538 sqe->len = 1; 539 #else 540 #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined 541 #endif 542 543 asm volatile("": : :"memory"); 544 545 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future ); 546 __submit( ctx, &idx, 1, true ); 547 548 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 549 /* paranoid */ verify( ! __preemption_enabled() ); 550 } 551 #endif 517 552 #endif
Note:
See TracChangeset
for help on using the changeset viewer.