[ecf6b46] | 1 | //
|
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
---|
| 3 | //
|
---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
---|
| 5 | // file "LICENCE" distributed with Cforall.
|
---|
| 6 | //
|
---|
| 7 | // io.cfa --
|
---|
| 8 | //
|
---|
| 9 | // Author : Thierry Delisle
|
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020
|
---|
| 11 | // Last Modified By :
|
---|
| 12 | // Last Modified On :
|
---|
| 13 | // Update Count :
|
---|
| 14 | //
|
---|
| 15 |
|
---|
[3e2b9c9] | 16 | #define __cforall_thread__
|
---|
[43784ac] | 17 | #define _GNU_SOURCE
|
---|
[3e2b9c9] | 18 |
|
---|
[20ab637] | 19 | #if defined(__CFA_DEBUG__)
|
---|
[d60d30e] | 20 | // #define __CFA_DEBUG_PRINT_IO__
|
---|
| 21 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
---|
[20ab637] | 22 | #endif
|
---|
[4069faad] | 23 |
|
---|
[f6660520] | 24 |
|
---|
[3e2b9c9] | 25 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
---|
[31bb2e1] | 26 | #include <errno.h>
|
---|
[3e2b9c9] | 27 | #include <signal.h>
|
---|
[31bb2e1] | 28 | #include <stdint.h>
|
---|
| 29 | #include <string.h>
|
---|
| 30 | #include <unistd.h>
|
---|
| 31 |
|
---|
[92976d9] | 32 | extern "C" {
|
---|
| 33 | #include <sys/syscall.h>
|
---|
[dddb3dd0] | 34 | #include <sys/eventfd.h>
|
---|
[d3605f8] | 35 | #include <sys/uio.h>
|
---|
[92976d9] | 36 |
|
---|
| 37 | #include <linux/io_uring.h>
|
---|
| 38 | }
|
---|
| 39 |
|
---|
[3e2b9c9] | 40 | #include "stats.hfa"
|
---|
| 41 | #include "kernel.hfa"
|
---|
| 42 | #include "kernel/fwd.hfa"
|
---|
[708ae38] | 43 | #include "kernel/private.hfa"
|
---|
[3e2b9c9] | 44 | #include "io/types.hfa"
|
---|
[185efe6] | 45 |
|
---|
[2fab24e3] | 46 | __attribute__((unused)) static const char * opcodes[] = {
|
---|
[426f60c] | 47 | "OP_NOP",
|
---|
| 48 | "OP_READV",
|
---|
| 49 | "OP_WRITEV",
|
---|
| 50 | "OP_FSYNC",
|
---|
| 51 | "OP_READ_FIXED",
|
---|
| 52 | "OP_WRITE_FIXED",
|
---|
| 53 | "OP_POLL_ADD",
|
---|
| 54 | "OP_POLL_REMOVE",
|
---|
| 55 | "OP_SYNC_FILE_RANGE",
|
---|
| 56 | "OP_SENDMSG",
|
---|
| 57 | "OP_RECVMSG",
|
---|
| 58 | "OP_TIMEOUT",
|
---|
| 59 | "OP_TIMEOUT_REMOVE",
|
---|
| 60 | "OP_ACCEPT",
|
---|
| 61 | "OP_ASYNC_CANCEL",
|
---|
| 62 | "OP_LINK_TIMEOUT",
|
---|
| 63 | "OP_CONNECT",
|
---|
| 64 | "OP_FALLOCATE",
|
---|
| 65 | "OP_OPENAT",
|
---|
| 66 | "OP_CLOSE",
|
---|
| 67 | "OP_FILES_UPDATE",
|
---|
| 68 | "OP_STATX",
|
---|
| 69 | "OP_READ",
|
---|
| 70 | "OP_WRITE",
|
---|
| 71 | "OP_FADVISE",
|
---|
| 72 | "OP_MADVISE",
|
---|
| 73 | "OP_SEND",
|
---|
| 74 | "OP_RECV",
|
---|
| 75 | "OP_OPENAT2",
|
---|
| 76 | "OP_EPOLL_CTL",
|
---|
| 77 | "OP_SPLICE",
|
---|
| 78 | "OP_PROVIDE_BUFFERS",
|
---|
| 79 | "OP_REMOVE_BUFFERS",
|
---|
| 80 | "OP_TEE",
|
---|
| 81 | "INVALID_OP"
|
---|
| 82 | };
|
---|
| 83 |
|
---|
[11054eb] | 84 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want );
|
---|
| 85 | static void __ioarbiter_submit( $io_context * , __u32 idxs[], __u32 have, bool lazy );
|
---|
| 86 | static void __ioarbiter_flush ( $io_context & );
|
---|
[dddb3dd0] | 87 | static inline void __ioarbiter_notify( $io_context & ctx );
|
---|
[92976d9] | 88 | //=============================================================================================
|
---|
| 89 | // I/O Polling
|
---|
| 90 | //=============================================================================================
|
---|
[78da4ab] | 91 | static inline unsigned __flush( struct $io_context & );
|
---|
| 92 | static inline __u32 __release_sqes( struct $io_context & );
|
---|
[24e321c] | 93 | extern void __kernel_unpark( thread$ * thrd, unpark_hint );
|
---|
[1d5e4711] | 94 |
|
---|
[c1c95b1] | 95 | bool __cfa_io_drain( processor * proc ) {
|
---|
[dddb3dd0] | 96 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[e9c0b4c] | 97 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
[dddb3dd0] | 98 | /* paranoid */ verify( proc );
|
---|
| 99 | /* paranoid */ verify( proc->io.ctx );
|
---|
[6f121b8] | 100 |
|
---|
[d384787] | 101 | // Drain the queue
|
---|
[dddb3dd0] | 102 | $io_context * ctx = proc->io.ctx;
|
---|
| 103 | unsigned head = *ctx->cq.head;
|
---|
| 104 | unsigned tail = *ctx->cq.tail;
|
---|
| 105 | const __u32 mask = *ctx->cq.mask;
|
---|
[92976d9] | 106 |
|
---|
[4998155] | 107 | __u32 count = tail - head;
|
---|
[dddb3dd0] | 108 | __STATS__( false, io.calls.drain++; io.calls.completed += count; )
|
---|
[d60d30e] | 109 |
|
---|
[c1c95b1] | 110 | if(count == 0) return false;
|
---|
| 111 |
|
---|
[d384787] | 112 | for(i; count) {
|
---|
[6f121b8] | 113 | unsigned idx = (head + i) & mask;
|
---|
[dddb3dd0] | 114 | volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
|
---|
[92976d9] | 115 |
|
---|
[d384787] | 116 | /* paranoid */ verify(&cqe);
|
---|
[92976d9] | 117 |
|
---|
[78da4ab] | 118 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
|
---|
| 119 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
|
---|
| 120 |
|
---|
[24e321c] | 121 | __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
|
---|
[78da4ab] | 122 | }
|
---|
| 123 |
|
---|
[dddb3dd0] | 124 | __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
|
---|
[2d8f7b0] | 125 |
|
---|
[92976d9] | 126 | // Mark to the kernel that the cqe has been seen
|
---|
| 127 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
---|
[dddb3dd0] | 128 | __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
|
---|
[92976d9] | 129 |
|
---|
[e9c0b4c] | 130 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
[dddb3dd0] | 131 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 132 |
|
---|
[c1c95b1] | 133 | return true;
|
---|
[92976d9] | 134 | }
|
---|
| 135 |
|
---|
[c7b2215] | 136 | bool __cfa_io_flush( processor * proc, int min_comp ) {
|
---|
[dddb3dd0] | 137 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 138 | /* paranoid */ verify( proc );
|
---|
| 139 | /* paranoid */ verify( proc->io.ctx );
|
---|
[1539bbd] | 140 |
|
---|
[d36bac7] | 141 | __attribute__((unused)) cluster * cltr = proc->cltr;
|
---|
[dddb3dd0] | 142 | $io_context & ctx = *proc->io.ctx;
|
---|
[78da4ab] | 143 |
|
---|
[11054eb] | 144 | __ioarbiter_flush( ctx );
|
---|
[3c039b0] | 145 |
|
---|
[21a5bfb7] | 146 | if(ctx.sq.to_submit != 0 || min_comp > 0) {
|
---|
| 147 |
|
---|
| 148 | __STATS__( true, io.calls.flush++; )
|
---|
| 149 | int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8);
|
---|
| 150 | if( ret < 0 ) {
|
---|
| 151 | switch((int)errno) {
|
---|
| 152 | case EAGAIN:
|
---|
| 153 | case EINTR:
|
---|
| 154 | case EBUSY:
|
---|
| 155 | // Update statistics
|
---|
| 156 | __STATS__( false, io.calls.errors.busy ++; )
|
---|
| 157 | return false;
|
---|
| 158 | default:
|
---|
| 159 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
---|
| 160 | }
|
---|
[61dd73d] | 161 | }
|
---|
[ece0e80] | 162 |
|
---|
[21a5bfb7] | 163 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
|
---|
| 164 | __STATS__( true, io.calls.submitted += ret; )
|
---|
| 165 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
| 166 | /* paranoid */ verify( ctx.sq.to_submit >= ret );
|
---|
[dddb3dd0] | 167 |
|
---|
[21a5bfb7] | 168 | ctx.sq.to_submit -= ret;
|
---|
[ece0e80] | 169 |
|
---|
[21a5bfb7] | 170 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
[5dadc9b7] | 171 |
|
---|
[21a5bfb7] | 172 | // Release the consumed SQEs
|
---|
| 173 | __release_sqes( ctx );
|
---|
[ece0e80] | 174 |
|
---|
[21a5bfb7] | 175 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 176 |
|
---|
[d529ad0] | 177 | __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
|
---|
[21a5bfb7] | 178 | }
|
---|
[61dd73d] | 179 |
|
---|
[7ef162b2] | 180 | ready_schedule_lock();
|
---|
| 181 | bool ret = __cfa_io_drain( proc );
|
---|
| 182 | ready_schedule_unlock();
|
---|
| 183 | return ret;
|
---|
[61dd73d] | 184 | }
|
---|
[f6660520] | 185 |
|
---|
[92976d9] | 186 | //=============================================================================================
|
---|
| 187 | // I/O Submissions
|
---|
| 188 | //=============================================================================================
|
---|
| 189 |
|
---|
[2d8f7b0] | 190 | // Submition steps :
|
---|
[e46c753] | 191 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
---|
[2d8f7b0] | 192 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
---|
| 193 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
---|
| 194 | // need to write an allocator that allows allocating concurrently.
|
---|
| 195 | //
|
---|
[e46c753] | 196 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
---|
[2d8f7b0] | 197 | //
|
---|
[e46c753] | 198 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
---|
[2d8f7b0] | 199 | // needs to arrive to two concensus at the same time:
|
---|
| 200 | // A - The order in which entries are listed in the array: no two threads must pick the
|
---|
| 201 | // same index for their entries
|
---|
| 202 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
---|
| 203 | // head and tail must be fully filled and shouldn't ever be touched again.
|
---|
| 204 | //
|
---|
[78da4ab] | 205 | //=============================================================================================
|
---|
| 206 | // Allocation
|
---|
| 207 | // for user's convenience fill the sqes from the indexes
|
---|
| 208 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context * ctx) {
|
---|
| 209 | struct io_uring_sqe * sqes = ctx->sq.sqes;
|
---|
| 210 | for(i; want) {
|
---|
[dddb3dd0] | 211 | __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
|
---|
[78da4ab] | 212 | out_sqes[i] = &sqes[idxs[i]];
|
---|
| 213 | }
|
---|
| 214 | }
|
---|
[2489d31] | 215 |
|
---|
[78da4ab] | 216 | // Try to directly allocate from the a given context
|
---|
| 217 | // Not thread-safe
|
---|
| 218 | static inline bool __alloc(struct $io_context * ctx, __u32 idxs[], __u32 want) {
|
---|
| 219 | __sub_ring_t & sq = ctx->sq;
|
---|
| 220 | const __u32 mask = *sq.mask;
|
---|
| 221 | __u32 fhead = sq.free_ring.head; // get the current head of the queue
|
---|
| 222 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
|
---|
[2489d31] | 223 |
|
---|
[78da4ab] | 224 | // If we don't have enough sqes, fail
|
---|
| 225 | if((ftail - fhead) < want) { return false; }
|
---|
[426f60c] | 226 |
|
---|
[78da4ab] | 227 | // copy all the indexes we want from the available list
|
---|
| 228 | for(i; want) {
|
---|
[dddb3dd0] | 229 | __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
|
---|
[78da4ab] | 230 | idxs[i] = sq.free_ring.array[(fhead + i) & mask];
|
---|
[6f121b8] | 231 | }
|
---|
[2489d31] | 232 |
|
---|
[78da4ab] | 233 | // Advance the head to mark the indexes as consumed
|
---|
| 234 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
|
---|
[df40a56] | 235 |
|
---|
[78da4ab] | 236 | // return success
|
---|
| 237 | return true;
|
---|
| 238 | }
|
---|
[df40a56] | 239 |
|
---|
[78da4ab] | 240 | // Allocate an submit queue entry.
|
---|
| 241 | // The kernel cannot see these entries until they are submitted, but other threads must be
|
---|
| 242 | // able to see which entries can be used and which are already un used by an other thread
|
---|
| 243 | // for convenience, return both the index and the pointer to the sqe
|
---|
| 244 | // sqe == &sqes[idx]
|
---|
| 245 | struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
|
---|
| 246 | __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
|
---|
[df40a56] | 247 |
|
---|
[78da4ab] | 248 | disable_interrupts();
|
---|
| 249 | processor * proc = __cfaabi_tls.this_processor;
|
---|
[dddb3dd0] | 250 | $io_context * ctx = proc->io.ctx;
|
---|
[78da4ab] | 251 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
[dddb3dd0] | 252 | /* paranoid */ verify( ctx );
|
---|
[78da4ab] | 253 |
|
---|
[dddb3dd0] | 254 | __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
|
---|
[78da4ab] | 255 |
|
---|
[dddb3dd0] | 256 | // We can proceed to the fast path
|
---|
| 257 | if( __alloc(ctx, idxs, want) ) {
|
---|
| 258 | // Allocation was successful
|
---|
| 259 | __STATS__( true, io.alloc.fast += 1; )
|
---|
[a3821fa] | 260 | enable_interrupts();
|
---|
[df40a56] | 261 |
|
---|
[dddb3dd0] | 262 | __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
|
---|
[2fafe7e] | 263 |
|
---|
[dddb3dd0] | 264 | __fill( sqes, want, idxs, ctx );
|
---|
| 265 | return ctx;
|
---|
[df40a56] | 266 | }
|
---|
[dddb3dd0] | 267 | // The fast path failed, fallback
|
---|
| 268 | __STATS__( true, io.alloc.fail += 1; )
|
---|
[df40a56] | 269 |
|
---|
[78da4ab] | 270 | // Fast path failed, fallback on arbitration
|
---|
[d60d30e] | 271 | __STATS__( true, io.alloc.slow += 1; )
|
---|
[a3821fa] | 272 | enable_interrupts();
|
---|
[78da4ab] | 273 |
|
---|
[dddb3dd0] | 274 | $io_arbiter * ioarb = proc->cltr->io.arbiter;
|
---|
| 275 | /* paranoid */ verify( ioarb );
|
---|
| 276 |
|
---|
[78da4ab] | 277 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
|
---|
| 278 |
|
---|
[11054eb] | 279 | struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want);
|
---|
[78da4ab] | 280 |
|
---|
[dddb3dd0] | 281 | __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
|
---|
[df40a56] | 282 |
|
---|
[78da4ab] | 283 | __fill( sqes, want, idxs,ret );
|
---|
| 284 | return ret;
|
---|
[df40a56] | 285 | }
|
---|
| 286 |
|
---|
[78da4ab] | 287 | //=============================================================================================
|
---|
| 288 | // submission
|
---|
[2432e8e] | 289 | static inline void __submit_only( struct $io_context * ctx, __u32 idxs[], __u32 have) {
|
---|
[78da4ab] | 290 | // We can proceed to the fast path
|
---|
| 291 | // Get the right objects
|
---|
| 292 | __sub_ring_t & sq = ctx->sq;
|
---|
| 293 | const __u32 mask = *sq.mask;
|
---|
[dddb3dd0] | 294 | __u32 tail = *sq.kring.tail;
|
---|
[78da4ab] | 295 |
|
---|
| 296 | // Add the sqes to the array
|
---|
| 297 | for( i; have ) {
|
---|
[dddb3dd0] | 298 | __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
|
---|
[78da4ab] | 299 | sq.kring.array[ (tail + i) & mask ] = idxs[i];
|
---|
[426f60c] | 300 | }
|
---|
| 301 |
|
---|
[78da4ab] | 302 | // Make the sqes visible to the submitter
|
---|
[dddb3dd0] | 303 | __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
|
---|
[e8ac228] | 304 | sq.to_submit += have;
|
---|
[426f60c] | 305 |
|
---|
[d529ad0] | 306 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
|
---|
| 307 | __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED);
|
---|
[2432e8e] | 308 | }
|
---|
| 309 |
|
---|
| 310 | static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
|
---|
| 311 | __sub_ring_t & sq = ctx->sq;
|
---|
| 312 | __submit_only(ctx, idxs, have);
|
---|
| 313 |
|
---|
[70b4aeb9] | 314 | if(sq.to_submit > 30) {
|
---|
| 315 | __tls_stats()->io.flush.full++;
|
---|
| 316 | __cfa_io_flush( ctx->proc, 0 );
|
---|
| 317 | }
|
---|
| 318 | if(!lazy) {
|
---|
| 319 | __tls_stats()->io.flush.eager++;
|
---|
[c7b2215] | 320 | __cfa_io_flush( ctx->proc, 0 );
|
---|
[dddb3dd0] | 321 | }
|
---|
[78da4ab] | 322 | }
|
---|
[2489d31] | 323 |
|
---|
[dddb3dd0] | 324 | void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
|
---|
| 325 | __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
|
---|
[5dadc9b7] | 326 |
|
---|
[78da4ab] | 327 | disable_interrupts();
|
---|
| 328 | processor * proc = __cfaabi_tls.this_processor;
|
---|
| 329 | $io_context * ctx = proc->io.ctx;
|
---|
[dddb3dd0] | 330 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
| 331 | /* paranoid */ verify( ctx );
|
---|
[e46c753] | 332 |
|
---|
[78da4ab] | 333 | // Can we proceed to the fast path
|
---|
[dddb3dd0] | 334 | if( ctx == inctx ) // We have the right instance?
|
---|
[78da4ab] | 335 | {
|
---|
[dddb3dd0] | 336 | __submit(ctx, idxs, have, lazy);
|
---|
[e46c753] | 337 |
|
---|
[78da4ab] | 338 | // Mark the instance as no longer in-use, re-enable interrupts and return
|
---|
[d60d30e] | 339 | __STATS__( true, io.submit.fast += 1; )
|
---|
[a3821fa] | 340 | enable_interrupts();
|
---|
[ece0e80] | 341 |
|
---|
[78da4ab] | 342 | __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
|
---|
| 343 | return;
|
---|
[e46c753] | 344 | }
|
---|
[d384787] | 345 |
|
---|
[78da4ab] | 346 | // Fast path failed, fallback on arbitration
|
---|
[d60d30e] | 347 | __STATS__( true, io.submit.slow += 1; )
|
---|
[a3821fa] | 348 | enable_interrupts();
|
---|
[5dadc9b7] | 349 |
|
---|
[78da4ab] | 350 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
|
---|
[426f60c] | 351 |
|
---|
[11054eb] | 352 | __ioarbiter_submit(inctx, idxs, have, lazy);
|
---|
[78da4ab] | 353 | }
|
---|
[2fab24e3] | 354 |
|
---|
[78da4ab] | 355 | //=============================================================================================
|
---|
| 356 | // Flushing
|
---|
[426f60c] | 357 | // Go through the ring's submit queue and release everything that has already been consumed
|
---|
| 358 | // by io_uring
|
---|
[78da4ab] | 359 | // This cannot be done by multiple threads
|
---|
| 360 | static __u32 __release_sqes( struct $io_context & ctx ) {
|
---|
| 361 | const __u32 mask = *ctx.sq.mask;
|
---|
[732b406] | 362 |
|
---|
[426f60c] | 363 | __attribute__((unused))
|
---|
[78da4ab] | 364 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
|
---|
| 365 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
|
---|
| 366 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
|
---|
| 367 |
|
---|
| 368 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
|
---|
[732b406] | 369 |
|
---|
[426f60c] | 370 | // the 3 fields are organized like this diagram
|
---|
| 371 | // except it's are ring
|
---|
| 372 | // ---+--------+--------+----
|
---|
| 373 | // ---+--------+--------+----
|
---|
| 374 | // ^ ^ ^
|
---|
| 375 | // phead chead ctail
|
---|
| 376 |
|
---|
| 377 | // make sure ctail doesn't wrap around and reach phead
|
---|
| 378 | /* paranoid */ verify(
|
---|
| 379 | (ctail >= chead && chead >= phead)
|
---|
| 380 | || (chead >= phead && phead >= ctail)
|
---|
| 381 | || (phead >= ctail && ctail >= chead)
|
---|
| 382 | );
|
---|
| 383 |
|
---|
| 384 | // find the range we need to clear
|
---|
[4998155] | 385 | __u32 count = chead - phead;
|
---|
[426f60c] | 386 |
|
---|
[78da4ab] | 387 | if(count == 0) {
|
---|
| 388 | return 0;
|
---|
| 389 | }
|
---|
| 390 |
|
---|
[426f60c] | 391 | // We acquired an previous-head/current-head range
|
---|
| 392 | // go through the range and release the sqes
|
---|
[34b61882] | 393 | for( i; count ) {
|
---|
[dddb3dd0] | 394 | __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
|
---|
[78da4ab] | 395 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
|
---|
| 396 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
|
---|
[34b61882] | 397 | }
|
---|
[78da4ab] | 398 |
|
---|
| 399 | ctx.sq.kring.released = chead; // note up to were we processed
|
---|
| 400 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
|
---|
| 401 |
|
---|
| 402 | __ioarbiter_notify(ctx);
|
---|
| 403 |
|
---|
[34b61882] | 404 | return count;
|
---|
| 405 | }
|
---|
[35285fd] | 406 |
|
---|
[78da4ab] | 407 | //=============================================================================================
|
---|
| 408 | // I/O Arbiter
|
---|
| 409 | //=============================================================================================
|
---|
[9f5a71eb] | 410 | static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
|
---|
| 411 | bool was_empty;
|
---|
| 412 |
|
---|
[11054eb] | 413 | // Lock the list, it's not thread safe
|
---|
| 414 | lock( queue.lock __cfaabi_dbg_ctx2 );
|
---|
| 415 | {
|
---|
[9f5a71eb] | 416 | was_empty = empty(queue.queue);
|
---|
| 417 |
|
---|
[11054eb] | 418 | // Add our request to the list
|
---|
| 419 | add( queue.queue, item );
|
---|
| 420 |
|
---|
| 421 | // Mark as pending
|
---|
| 422 | __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
|
---|
| 423 | }
|
---|
| 424 | unlock( queue.lock );
|
---|
| 425 |
|
---|
[9f5a71eb] | 426 | return was_empty;
|
---|
[11054eb] | 427 | }
|
---|
| 428 |
|
---|
| 429 | static inline bool empty(__outstanding_io_queue & queue ) {
|
---|
| 430 | return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
|
---|
| 431 | }
|
---|
| 432 |
|
---|
| 433 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) {
|
---|
[78da4ab] | 434 | __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
|
---|
| 435 |
|
---|
[d60d30e] | 436 | __STATS__( false, io.alloc.block += 1; )
|
---|
| 437 |
|
---|
[78da4ab] | 438 | // No one has any resources left, wait for something to finish
|
---|
[11054eb] | 439 | // We need to add ourself to a list of pending allocs and wait for an answer
|
---|
| 440 | __pending_alloc pa;
|
---|
| 441 | pa.idxs = idxs;
|
---|
| 442 | pa.want = want;
|
---|
[78da4ab] | 443 |
|
---|
[9f5a71eb] | 444 | enqueue(this.pending, (__outstanding_io&)pa);
|
---|
| 445 |
|
---|
| 446 | wait( pa.sem );
|
---|
[78da4ab] | 447 |
|
---|
[11054eb] | 448 | return pa.ctx;
|
---|
[dddb3dd0] | 449 |
|
---|
[78da4ab] | 450 | }
|
---|
| 451 |
|
---|
[11054eb] | 452 | static void __ioarbiter_notify( $io_arbiter & this, $io_context * ctx ) {
|
---|
| 453 | /* paranoid */ verify( !empty(this.pending.queue) );
|
---|
[78da4ab] | 454 |
|
---|
[11054eb] | 455 | lock( this.pending.lock __cfaabi_dbg_ctx2 );
|
---|
| 456 | {
|
---|
| 457 | while( !empty(this.pending.queue) ) {
|
---|
| 458 | __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
|
---|
| 459 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
|
---|
| 460 | __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
|
---|
[78da4ab] | 461 |
|
---|
[11054eb] | 462 | if( have > pa.want ) goto DONE;
|
---|
| 463 | drop( this.pending.queue );
|
---|
[78da4ab] | 464 |
|
---|
[11054eb] | 465 | /* paranoid */__attribute__((unused)) bool ret =
|
---|
[78da4ab] | 466 |
|
---|
[11054eb] | 467 | __alloc(ctx, pa.idxs, pa.want);
|
---|
| 468 |
|
---|
| 469 | /* paranoid */ verify( ret );
|
---|
| 470 |
|
---|
| 471 | pa.ctx = ctx;
|
---|
| 472 |
|
---|
| 473 | post( pa.sem );
|
---|
| 474 | }
|
---|
| 475 |
|
---|
| 476 | this.pending.empty = true;
|
---|
| 477 | DONE:;
|
---|
| 478 | }
|
---|
| 479 | unlock( this.pending.lock );
|
---|
[78da4ab] | 480 | }
|
---|
| 481 |
|
---|
| 482 | static void __ioarbiter_notify( $io_context & ctx ) {
|
---|
[11054eb] | 483 | if(!empty( ctx.arbiter->pending )) {
|
---|
[78da4ab] | 484 | __ioarbiter_notify( *ctx.arbiter, &ctx );
|
---|
| 485 | }
|
---|
| 486 | }
|
---|
| 487 |
|
---|
| 488 | // Simply append to the pending
|
---|
[11054eb] | 489 | static void __ioarbiter_submit( $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) {
|
---|
[78da4ab] | 490 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
|
---|
| 491 |
|
---|
| 492 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
|
---|
| 493 |
|
---|
[11054eb] | 494 | __external_io ei;
|
---|
| 495 | ei.idxs = idxs;
|
---|
| 496 | ei.have = have;
|
---|
| 497 | ei.lazy = lazy;
|
---|
[78da4ab] | 498 |
|
---|
[9f5a71eb] | 499 | bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
|
---|
| 500 |
|
---|
[d529ad0] | 501 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
|
---|
[9f5a71eb] | 502 |
|
---|
| 503 | if( we ) {
|
---|
| 504 | sigval_t value = { PREEMPT_IO };
|
---|
| 505 | pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
|
---|
| 506 | }
|
---|
| 507 |
|
---|
| 508 | wait( ei.sem );
|
---|
[78da4ab] | 509 |
|
---|
| 510 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
|
---|
| 511 | }
|
---|
| 512 |
|
---|
[11054eb] | 513 | static void __ioarbiter_flush( $io_context & ctx ) {
|
---|
| 514 | if(!empty( ctx.ext_sq )) {
|
---|
| 515 | __STATS__( false, io.flush.external += 1; )
|
---|
[78da4ab] | 516 |
|
---|
[11054eb] | 517 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
|
---|
[d60d30e] | 518 |
|
---|
[11054eb] | 519 | lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
|
---|
| 520 | {
|
---|
| 521 | while( !empty(ctx.ext_sq.queue) ) {
|
---|
| 522 | __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
|
---|
[78da4ab] | 523 |
|
---|
[2432e8e] | 524 | __submit_only(&ctx, ei.idxs, ei.have);
|
---|
[78da4ab] | 525 |
|
---|
[11054eb] | 526 | post( ei.sem );
|
---|
| 527 | }
|
---|
| 528 |
|
---|
| 529 | ctx.ext_sq.empty = true;
|
---|
| 530 | }
|
---|
| 531 | unlock(ctx.ext_sq.lock );
|
---|
| 532 | }
|
---|
[78da4ab] | 533 | }
|
---|
[7ef162b2] | 534 |
|
---|
[d3605f8] | 535 | #if defined(CFA_WITH_IO_URING_IDLE)
|
---|
| 536 | bool __kernel_read(processor * proc, io_future_t & future, iovec & iov, int fd) {
|
---|
[6ddef36] | 537 | $io_context * ctx = proc->io.ctx;
|
---|
| 538 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 539 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
| 540 | /* paranoid */ verify( ctx );
|
---|
[7ef162b2] | 541 |
|
---|
[6ddef36] | 542 | __u32 idx;
|
---|
| 543 | struct io_uring_sqe * sqe;
|
---|
[7ef162b2] | 544 |
|
---|
[6ddef36] | 545 | // We can proceed to the fast path
|
---|
| 546 | if( !__alloc(ctx, &idx, 1) ) return false;
|
---|
| 547 |
|
---|
| 548 | // Allocation was successful
|
---|
| 549 | __fill( &sqe, 1, &idx, ctx );
|
---|
| 550 |
|
---|
| 551 | sqe->user_data = (uintptr_t)&future;
|
---|
| 552 | sqe->flags = 0;
|
---|
[a1f3d93] | 553 | sqe->fd = fd;
|
---|
[6ddef36] | 554 | sqe->off = 0;
|
---|
[d3605f8] | 555 | sqe->ioprio = 0;
|
---|
[6ddef36] | 556 | sqe->fsync_flags = 0;
|
---|
| 557 | sqe->__pad2[0] = 0;
|
---|
| 558 | sqe->__pad2[1] = 0;
|
---|
| 559 | sqe->__pad2[2] = 0;
|
---|
[d3605f8] | 560 |
|
---|
| 561 | #if defined(CFA_HAVE_IORING_OP_READ)
|
---|
| 562 | sqe->opcode = IORING_OP_READ;
|
---|
| 563 | sqe->addr = (uint64_t)iov.iov_base;
|
---|
| 564 | sqe->len = iov.iov_len;
|
---|
| 565 | #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)
|
---|
| 566 | sqe->opcode = IORING_OP_READV;
|
---|
| 567 | sqe->addr = (uintptr_t)&iov;
|
---|
| 568 | sqe->len = 1;
|
---|
| 569 | #else
|
---|
| 570 | #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined
|
---|
| 571 | #endif
|
---|
[6ddef36] | 572 |
|
---|
| 573 | asm volatile("": : :"memory");
|
---|
| 574 |
|
---|
| 575 | /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
|
---|
| 576 | __submit( ctx, &idx, 1, true );
|
---|
| 577 |
|
---|
| 578 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
| 579 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[078fb05] | 580 |
|
---|
| 581 | return true;
|
---|
[6ddef36] | 582 | }
|
---|
| 583 | #endif
|
---|
[47746a2] | 584 | #endif
|
---|