[ecf6b46] | 1 | //
|
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
---|
| 3 | //
|
---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
---|
| 5 | // file "LICENCE" distributed with Cforall.
|
---|
| 6 | //
|
---|
| 7 | // io.cfa --
|
---|
| 8 | //
|
---|
| 9 | // Author : Thierry Delisle
|
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020
|
---|
| 11 | // Last Modified By :
|
---|
| 12 | // Last Modified On :
|
---|
| 13 | // Update Count :
|
---|
| 14 | //
|
---|
| 15 |
|
---|
[3e2b9c9] | 16 | #define __cforall_thread__
|
---|
[43784ac] | 17 | #define _GNU_SOURCE
|
---|
[3e2b9c9] | 18 |
|
---|
[20ab637] | 19 | #if defined(__CFA_DEBUG__)
|
---|
[d60d30e] | 20 | // #define __CFA_DEBUG_PRINT_IO__
|
---|
| 21 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
---|
[20ab637] | 22 | #endif
|
---|
[4069faad] | 23 |
|
---|
[f6660520] | 24 |
|
---|
[3e2b9c9] | 25 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
---|
[31bb2e1] | 26 | #include <errno.h>
|
---|
[3e2b9c9] | 27 | #include <signal.h>
|
---|
[31bb2e1] | 28 | #include <stdint.h>
|
---|
| 29 | #include <string.h>
|
---|
| 30 | #include <unistd.h>
|
---|
| 31 |
|
---|
[92976d9] | 32 | extern "C" {
|
---|
| 33 | #include <sys/syscall.h>
|
---|
[dddb3dd0] | 34 | #include <sys/eventfd.h>
|
---|
[d3605f8] | 35 | #include <sys/uio.h>
|
---|
[92976d9] | 36 |
|
---|
| 37 | #include <linux/io_uring.h>
|
---|
| 38 | }
|
---|
| 39 |
|
---|
[3e2b9c9] | 40 | #include "stats.hfa"
|
---|
| 41 | #include "kernel.hfa"
|
---|
| 42 | #include "kernel/fwd.hfa"
|
---|
[708ae38] | 43 | #include "kernel/private.hfa"
|
---|
[78a580d] | 44 | #include "kernel/cluster.hfa"
|
---|
[3e2b9c9] | 45 | #include "io/types.hfa"
|
---|
[185efe6] | 46 |
|
---|
[2fab24e3] | 47 | __attribute__((unused)) static const char * opcodes[] = {
|
---|
[426f60c] | 48 | "OP_NOP",
|
---|
| 49 | "OP_READV",
|
---|
| 50 | "OP_WRITEV",
|
---|
| 51 | "OP_FSYNC",
|
---|
| 52 | "OP_READ_FIXED",
|
---|
| 53 | "OP_WRITE_FIXED",
|
---|
| 54 | "OP_POLL_ADD",
|
---|
| 55 | "OP_POLL_REMOVE",
|
---|
| 56 | "OP_SYNC_FILE_RANGE",
|
---|
| 57 | "OP_SENDMSG",
|
---|
| 58 | "OP_RECVMSG",
|
---|
| 59 | "OP_TIMEOUT",
|
---|
| 60 | "OP_TIMEOUT_REMOVE",
|
---|
| 61 | "OP_ACCEPT",
|
---|
| 62 | "OP_ASYNC_CANCEL",
|
---|
| 63 | "OP_LINK_TIMEOUT",
|
---|
| 64 | "OP_CONNECT",
|
---|
| 65 | "OP_FALLOCATE",
|
---|
| 66 | "OP_OPENAT",
|
---|
| 67 | "OP_CLOSE",
|
---|
| 68 | "OP_FILES_UPDATE",
|
---|
| 69 | "OP_STATX",
|
---|
| 70 | "OP_READ",
|
---|
| 71 | "OP_WRITE",
|
---|
| 72 | "OP_FADVISE",
|
---|
| 73 | "OP_MADVISE",
|
---|
| 74 | "OP_SEND",
|
---|
| 75 | "OP_RECV",
|
---|
| 76 | "OP_OPENAT2",
|
---|
| 77 | "OP_EPOLL_CTL",
|
---|
| 78 | "OP_SPLICE",
|
---|
| 79 | "OP_PROVIDE_BUFFERS",
|
---|
| 80 | "OP_REMOVE_BUFFERS",
|
---|
| 81 | "OP_TEE",
|
---|
| 82 | "INVALID_OP"
|
---|
| 83 | };
|
---|
| 84 |
|
---|
[11054eb] | 85 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want );
|
---|
| 86 | static void __ioarbiter_submit( $io_context * , __u32 idxs[], __u32 have, bool lazy );
|
---|
| 87 | static void __ioarbiter_flush ( $io_context & );
|
---|
[dddb3dd0] | 88 | static inline void __ioarbiter_notify( $io_context & ctx );
|
---|
[92976d9] | 89 | //=============================================================================================
|
---|
| 90 | // I/O Polling
|
---|
| 91 | //=============================================================================================
|
---|
[78da4ab] | 92 | static inline unsigned __flush( struct $io_context & );
|
---|
| 93 | static inline __u32 __release_sqes( struct $io_context & );
|
---|
[24e321c] | 94 | extern void __kernel_unpark( thread$ * thrd, unpark_hint );
|
---|
[1d5e4711] | 95 |
|
---|
[4479890] | 96 | static bool __cfa_do_drain( $io_context * ctx, cluster * cltr ) {
|
---|
[dddb3dd0] | 97 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[e9c0b4c] | 98 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
[4ecc35a] | 99 | /* paranoid */ verify( ctx );
|
---|
[6f121b8] | 100 |
|
---|
[dddb3dd0] | 101 | const __u32 mask = *ctx->cq.mask;
|
---|
[92976d9] | 102 |
|
---|
[d60d30e] | 103 |
|
---|
[3caf5e3] | 104 | {
|
---|
| 105 | const __u32 head = *ctx->cq.head;
|
---|
| 106 | const __u32 tail = *ctx->cq.tail;
|
---|
| 107 |
|
---|
| 108 | if(head == tail) return false;
|
---|
| 109 | }
|
---|
[c1c95b1] | 110 |
|
---|
[3caf5e3] | 111 | // Drain the queue
|
---|
[4ecc35a] | 112 | if(!__atomic_try_acquire(&ctx->cq.lock)) {
|
---|
[54c1196] | 113 | __STATS__( false, io.calls.locked++; )
|
---|
[4ecc35a] | 114 | return false;
|
---|
| 115 | }
|
---|
| 116 |
|
---|
[78a580d] | 117 | unsigned long long ts_prev = ctx->cq.ts;
|
---|
| 118 |
|
---|
[3caf5e3] | 119 | // re-read the head and tail in case it already changed.
|
---|
| 120 | const __u32 head = *ctx->cq.head;
|
---|
| 121 | const __u32 tail = *ctx->cq.tail;
|
---|
| 122 | const __u32 count = tail - head;
|
---|
| 123 | __STATS__( false, io.calls.drain++; io.calls.completed += count; )
|
---|
| 124 |
|
---|
[d384787] | 125 | for(i; count) {
|
---|
[6f121b8] | 126 | unsigned idx = (head + i) & mask;
|
---|
[dddb3dd0] | 127 | volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
|
---|
[92976d9] | 128 |
|
---|
[d384787] | 129 | /* paranoid */ verify(&cqe);
|
---|
[92976d9] | 130 |
|
---|
[78da4ab] | 131 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
|
---|
| 132 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
|
---|
| 133 |
|
---|
[24e321c] | 134 | __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
|
---|
[78da4ab] | 135 | }
|
---|
| 136 |
|
---|
[dddb3dd0] | 137 | __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
|
---|
[78a580d] | 138 | unsigned long long ts_next = ctx->cq.ts = rdtscl();
|
---|
[2d8f7b0] | 139 |
|
---|
[92976d9] | 140 | // Mark to the kernel that the cqe has been seen
|
---|
| 141 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
---|
[dddb3dd0] | 142 | __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
|
---|
[92976d9] | 143 |
|
---|
[e9c0b4c] | 144 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
[dddb3dd0] | 145 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 146 |
|
---|
[4ecc35a] | 147 | __atomic_unlock(&ctx->cq.lock);
|
---|
| 148 |
|
---|
[78a580d] | 149 | touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next );
|
---|
| 150 |
|
---|
[c1c95b1] | 151 | return true;
|
---|
[92976d9] | 152 | }
|
---|
| 153 |
|
---|
[4479890] | 154 | bool __cfa_io_drain( processor * proc ) {
|
---|
| 155 | bool local = false;
|
---|
| 156 | bool remote = false;
|
---|
| 157 |
|
---|
| 158 | cluster * const cltr = proc->cltr;
|
---|
| 159 | $io_context * const ctx = proc->io.ctx;
|
---|
| 160 | /* paranoid */ verify( cltr );
|
---|
| 161 | /* paranoid */ verify( ctx );
|
---|
| 162 |
|
---|
| 163 | with(cltr->sched) {
|
---|
| 164 | const size_t ctxs_count = io.count;
|
---|
| 165 |
|
---|
| 166 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
| 167 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 168 | /* paranoid */ verify( active_processor() == proc );
|
---|
| 169 | /* paranoid */ verify( __shard_factor.io > 0 );
|
---|
| 170 | /* paranoid */ verify( ctxs_count > 0 );
|
---|
| 171 | /* paranoid */ verify( ctx->cq.id < ctxs_count );
|
---|
| 172 |
|
---|
| 173 | const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
|
---|
| 174 | const unsigned long long ctsc = rdtscl();
|
---|
| 175 |
|
---|
| 176 | if(proc->io.target == MAX) {
|
---|
| 177 | uint64_t chaos = __tls_rand();
|
---|
| 178 | unsigned ext = chaos & 0xff;
|
---|
| 179 | unsigned other = (chaos >> 8) % (ctxs_count);
|
---|
| 180 |
|
---|
| 181 | if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
|
---|
| 182 | proc->io.target = other;
|
---|
| 183 | }
|
---|
| 184 | }
|
---|
| 185 | else {
|
---|
| 186 | const unsigned target = proc->io.target;
|
---|
| 187 | /* paranoid */ verify( io.tscs[target].tv != MAX );
|
---|
| 188 | if(target < ctxs_count) {
|
---|
| 189 | const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
|
---|
| 190 | const unsigned long long age = moving_average(ctsc, io.tscs[target].tv, io.tscs[target].ma);
|
---|
| 191 | // __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
|
---|
| 192 | if(age > cutoff) {
|
---|
| 193 | remote = __cfa_do_drain( io.data[target], cltr );
|
---|
[54c1196] | 194 | if(remote) __STATS__( false, io.calls.helped++; )
|
---|
[4479890] | 195 | }
|
---|
| 196 | }
|
---|
| 197 | proc->io.target = MAX;
|
---|
| 198 | }
|
---|
| 199 | }
|
---|
| 200 |
|
---|
| 201 |
|
---|
| 202 | // Drain the local queue
|
---|
| 203 | local = __cfa_do_drain( proc->io.ctx, cltr );
|
---|
| 204 |
|
---|
| 205 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
| 206 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 207 | /* paranoid */ verify( active_processor() == proc );
|
---|
| 208 | return local || remote;
|
---|
| 209 | }
|
---|
| 210 |
|
---|
[c7b2215] | 211 | bool __cfa_io_flush( processor * proc, int min_comp ) {
|
---|
[dddb3dd0] | 212 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 213 | /* paranoid */ verify( proc );
|
---|
| 214 | /* paranoid */ verify( proc->io.ctx );
|
---|
[1539bbd] | 215 |
|
---|
[78a580d] | 216 | cluster * cltr = proc->cltr;
|
---|
[dddb3dd0] | 217 | $io_context & ctx = *proc->io.ctx;
|
---|
[78da4ab] | 218 |
|
---|
[11054eb] | 219 | __ioarbiter_flush( ctx );
|
---|
[3c039b0] | 220 |
|
---|
[21a5bfb7] | 221 | if(ctx.sq.to_submit != 0 || min_comp > 0) {
|
---|
| 222 |
|
---|
| 223 | __STATS__( true, io.calls.flush++; )
|
---|
| 224 | int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, min_comp > 0 ? IORING_ENTER_GETEVENTS : 0, (sigset_t *)0p, _NSIG / 8);
|
---|
| 225 | if( ret < 0 ) {
|
---|
| 226 | switch((int)errno) {
|
---|
| 227 | case EAGAIN:
|
---|
| 228 | case EINTR:
|
---|
| 229 | case EBUSY:
|
---|
| 230 | // Update statistics
|
---|
| 231 | __STATS__( false, io.calls.errors.busy ++; )
|
---|
| 232 | return false;
|
---|
| 233 | default:
|
---|
| 234 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
---|
| 235 | }
|
---|
[61dd73d] | 236 | }
|
---|
[ece0e80] | 237 |
|
---|
[21a5bfb7] | 238 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
|
---|
| 239 | __STATS__( true, io.calls.submitted += ret; )
|
---|
| 240 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
| 241 | /* paranoid */ verify( ctx.sq.to_submit >= ret );
|
---|
[dddb3dd0] | 242 |
|
---|
[21a5bfb7] | 243 | ctx.sq.to_submit -= ret;
|
---|
[ece0e80] | 244 |
|
---|
[21a5bfb7] | 245 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
[5dadc9b7] | 246 |
|
---|
[21a5bfb7] | 247 | // Release the consumed SQEs
|
---|
| 248 | __release_sqes( ctx );
|
---|
[ece0e80] | 249 |
|
---|
[21a5bfb7] | 250 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 251 |
|
---|
[d529ad0] | 252 | __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
|
---|
[21a5bfb7] | 253 | }
|
---|
[61dd73d] | 254 |
|
---|
[7ef162b2] | 255 | ready_schedule_lock();
|
---|
[4479890] | 256 | bool ret = __cfa_io_drain( proc );
|
---|
[7ef162b2] | 257 | ready_schedule_unlock();
|
---|
| 258 | return ret;
|
---|
[61dd73d] | 259 | }
|
---|
[f6660520] | 260 |
|
---|
[92976d9] | 261 | //=============================================================================================
|
---|
| 262 | // I/O Submissions
|
---|
| 263 | //=============================================================================================
|
---|
| 264 |
|
---|
[2d8f7b0] | 265 | // Submition steps :
|
---|
[e46c753] | 266 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
---|
[2d8f7b0] | 267 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
---|
| 268 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
---|
| 269 | // need to write an allocator that allows allocating concurrently.
|
---|
| 270 | //
|
---|
[e46c753] | 271 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
---|
[2d8f7b0] | 272 | //
|
---|
[e46c753] | 273 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
---|
[2d8f7b0] | 274 | // needs to arrive to two concensus at the same time:
|
---|
| 275 | // A - The order in which entries are listed in the array: no two threads must pick the
|
---|
| 276 | // same index for their entries
|
---|
| 277 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
---|
| 278 | // head and tail must be fully filled and shouldn't ever be touched again.
|
---|
| 279 | //
|
---|
[78da4ab] | 280 | //=============================================================================================
|
---|
| 281 | // Allocation
|
---|
| 282 | // for user's convenience fill the sqes from the indexes
|
---|
| 283 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context * ctx) {
|
---|
| 284 | struct io_uring_sqe * sqes = ctx->sq.sqes;
|
---|
| 285 | for(i; want) {
|
---|
[dddb3dd0] | 286 | __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
|
---|
[78da4ab] | 287 | out_sqes[i] = &sqes[idxs[i]];
|
---|
| 288 | }
|
---|
| 289 | }
|
---|
[2489d31] | 290 |
|
---|
[78da4ab] | 291 | // Try to directly allocate from the a given context
|
---|
| 292 | // Not thread-safe
|
---|
| 293 | static inline bool __alloc(struct $io_context * ctx, __u32 idxs[], __u32 want) {
|
---|
| 294 | __sub_ring_t & sq = ctx->sq;
|
---|
| 295 | const __u32 mask = *sq.mask;
|
---|
| 296 | __u32 fhead = sq.free_ring.head; // get the current head of the queue
|
---|
| 297 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
|
---|
[2489d31] | 298 |
|
---|
[78da4ab] | 299 | // If we don't have enough sqes, fail
|
---|
| 300 | if((ftail - fhead) < want) { return false; }
|
---|
[426f60c] | 301 |
|
---|
[78da4ab] | 302 | // copy all the indexes we want from the available list
|
---|
| 303 | for(i; want) {
|
---|
[dddb3dd0] | 304 | __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
|
---|
[78da4ab] | 305 | idxs[i] = sq.free_ring.array[(fhead + i) & mask];
|
---|
[6f121b8] | 306 | }
|
---|
[2489d31] | 307 |
|
---|
[78da4ab] | 308 | // Advance the head to mark the indexes as consumed
|
---|
| 309 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
|
---|
[df40a56] | 310 |
|
---|
[78da4ab] | 311 | // return success
|
---|
| 312 | return true;
|
---|
| 313 | }
|
---|
[df40a56] | 314 |
|
---|
[78da4ab] | 315 | // Allocate an submit queue entry.
|
---|
| 316 | // The kernel cannot see these entries until they are submitted, but other threads must be
|
---|
| 317 | // able to see which entries can be used and which are already un used by an other thread
|
---|
| 318 | // for convenience, return both the index and the pointer to the sqe
|
---|
| 319 | // sqe == &sqes[idx]
|
---|
| 320 | struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
|
---|
| 321 | __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
|
---|
[df40a56] | 322 |
|
---|
[78da4ab] | 323 | disable_interrupts();
|
---|
| 324 | processor * proc = __cfaabi_tls.this_processor;
|
---|
[dddb3dd0] | 325 | $io_context * ctx = proc->io.ctx;
|
---|
[78da4ab] | 326 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
[dddb3dd0] | 327 | /* paranoid */ verify( ctx );
|
---|
[78da4ab] | 328 |
|
---|
[dddb3dd0] | 329 | __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
|
---|
[78da4ab] | 330 |
|
---|
[dddb3dd0] | 331 | // We can proceed to the fast path
|
---|
| 332 | if( __alloc(ctx, idxs, want) ) {
|
---|
| 333 | // Allocation was successful
|
---|
| 334 | __STATS__( true, io.alloc.fast += 1; )
|
---|
[a3821fa] | 335 | enable_interrupts();
|
---|
[df40a56] | 336 |
|
---|
[dddb3dd0] | 337 | __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
|
---|
[2fafe7e] | 338 |
|
---|
[dddb3dd0] | 339 | __fill( sqes, want, idxs, ctx );
|
---|
| 340 | return ctx;
|
---|
[df40a56] | 341 | }
|
---|
[dddb3dd0] | 342 | // The fast path failed, fallback
|
---|
| 343 | __STATS__( true, io.alloc.fail += 1; )
|
---|
[df40a56] | 344 |
|
---|
[78da4ab] | 345 | // Fast path failed, fallback on arbitration
|
---|
[d60d30e] | 346 | __STATS__( true, io.alloc.slow += 1; )
|
---|
[a3821fa] | 347 | enable_interrupts();
|
---|
[78da4ab] | 348 |
|
---|
[dddb3dd0] | 349 | $io_arbiter * ioarb = proc->cltr->io.arbiter;
|
---|
| 350 | /* paranoid */ verify( ioarb );
|
---|
| 351 |
|
---|
[78da4ab] | 352 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
|
---|
| 353 |
|
---|
[11054eb] | 354 | struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want);
|
---|
[78da4ab] | 355 |
|
---|
[dddb3dd0] | 356 | __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
|
---|
[df40a56] | 357 |
|
---|
[78da4ab] | 358 | __fill( sqes, want, idxs,ret );
|
---|
| 359 | return ret;
|
---|
[df40a56] | 360 | }
|
---|
| 361 |
|
---|
[78da4ab] | 362 | //=============================================================================================
|
---|
| 363 | // submission
|
---|
[2432e8e] | 364 | static inline void __submit_only( struct $io_context * ctx, __u32 idxs[], __u32 have) {
|
---|
[78da4ab] | 365 | // We can proceed to the fast path
|
---|
| 366 | // Get the right objects
|
---|
| 367 | __sub_ring_t & sq = ctx->sq;
|
---|
| 368 | const __u32 mask = *sq.mask;
|
---|
[dddb3dd0] | 369 | __u32 tail = *sq.kring.tail;
|
---|
[78da4ab] | 370 |
|
---|
| 371 | // Add the sqes to the array
|
---|
| 372 | for( i; have ) {
|
---|
[dddb3dd0] | 373 | __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
|
---|
[78da4ab] | 374 | sq.kring.array[ (tail + i) & mask ] = idxs[i];
|
---|
[426f60c] | 375 | }
|
---|
| 376 |
|
---|
[78da4ab] | 377 | // Make the sqes visible to the submitter
|
---|
[dddb3dd0] | 378 | __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
|
---|
[e8ac228] | 379 | sq.to_submit += have;
|
---|
[426f60c] | 380 |
|
---|
[d529ad0] | 381 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
|
---|
| 382 | __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED);
|
---|
[2432e8e] | 383 | }
|
---|
| 384 |
|
---|
| 385 | static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
|
---|
| 386 | __sub_ring_t & sq = ctx->sq;
|
---|
| 387 | __submit_only(ctx, idxs, have);
|
---|
| 388 |
|
---|
[70b4aeb9] | 389 | if(sq.to_submit > 30) {
|
---|
| 390 | __tls_stats()->io.flush.full++;
|
---|
| 391 | __cfa_io_flush( ctx->proc, 0 );
|
---|
| 392 | }
|
---|
| 393 | if(!lazy) {
|
---|
| 394 | __tls_stats()->io.flush.eager++;
|
---|
[c7b2215] | 395 | __cfa_io_flush( ctx->proc, 0 );
|
---|
[dddb3dd0] | 396 | }
|
---|
[78da4ab] | 397 | }
|
---|
[2489d31] | 398 |
|
---|
[dddb3dd0] | 399 | void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
|
---|
| 400 | __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
|
---|
[5dadc9b7] | 401 |
|
---|
[78da4ab] | 402 | disable_interrupts();
|
---|
| 403 | processor * proc = __cfaabi_tls.this_processor;
|
---|
| 404 | $io_context * ctx = proc->io.ctx;
|
---|
[dddb3dd0] | 405 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
| 406 | /* paranoid */ verify( ctx );
|
---|
[e46c753] | 407 |
|
---|
[78da4ab] | 408 | // Can we proceed to the fast path
|
---|
[dddb3dd0] | 409 | if( ctx == inctx ) // We have the right instance?
|
---|
[78da4ab] | 410 | {
|
---|
[dddb3dd0] | 411 | __submit(ctx, idxs, have, lazy);
|
---|
[e46c753] | 412 |
|
---|
[78da4ab] | 413 | // Mark the instance as no longer in-use, re-enable interrupts and return
|
---|
[d60d30e] | 414 | __STATS__( true, io.submit.fast += 1; )
|
---|
[a3821fa] | 415 | enable_interrupts();
|
---|
[ece0e80] | 416 |
|
---|
[78da4ab] | 417 | __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
|
---|
| 418 | return;
|
---|
[e46c753] | 419 | }
|
---|
[d384787] | 420 |
|
---|
[78da4ab] | 421 | // Fast path failed, fallback on arbitration
|
---|
[d60d30e] | 422 | __STATS__( true, io.submit.slow += 1; )
|
---|
[a3821fa] | 423 | enable_interrupts();
|
---|
[5dadc9b7] | 424 |
|
---|
[78da4ab] | 425 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
|
---|
[426f60c] | 426 |
|
---|
[11054eb] | 427 | __ioarbiter_submit(inctx, idxs, have, lazy);
|
---|
[78da4ab] | 428 | }
|
---|
[2fab24e3] | 429 |
|
---|
[78da4ab] | 430 | //=============================================================================================
|
---|
| 431 | // Flushing
|
---|
[426f60c] | 432 | // Go through the ring's submit queue and release everything that has already been consumed
|
---|
| 433 | // by io_uring
|
---|
[78da4ab] | 434 | // This cannot be done by multiple threads
|
---|
| 435 | static __u32 __release_sqes( struct $io_context & ctx ) {
|
---|
| 436 | const __u32 mask = *ctx.sq.mask;
|
---|
[732b406] | 437 |
|
---|
[426f60c] | 438 | __attribute__((unused))
|
---|
[78da4ab] | 439 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
|
---|
| 440 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
|
---|
| 441 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
|
---|
| 442 |
|
---|
| 443 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
|
---|
[732b406] | 444 |
|
---|
[426f60c] | 445 | // the 3 fields are organized like this diagram
|
---|
| 446 | // except it's are ring
|
---|
| 447 | // ---+--------+--------+----
|
---|
| 448 | // ---+--------+--------+----
|
---|
| 449 | // ^ ^ ^
|
---|
| 450 | // phead chead ctail
|
---|
| 451 |
|
---|
| 452 | // make sure ctail doesn't wrap around and reach phead
|
---|
| 453 | /* paranoid */ verify(
|
---|
| 454 | (ctail >= chead && chead >= phead)
|
---|
| 455 | || (chead >= phead && phead >= ctail)
|
---|
| 456 | || (phead >= ctail && ctail >= chead)
|
---|
| 457 | );
|
---|
| 458 |
|
---|
| 459 | // find the range we need to clear
|
---|
[4998155] | 460 | __u32 count = chead - phead;
|
---|
[426f60c] | 461 |
|
---|
[78da4ab] | 462 | if(count == 0) {
|
---|
| 463 | return 0;
|
---|
| 464 | }
|
---|
| 465 |
|
---|
[426f60c] | 466 | // We acquired an previous-head/current-head range
|
---|
| 467 | // go through the range and release the sqes
|
---|
[34b61882] | 468 | for( i; count ) {
|
---|
[dddb3dd0] | 469 | __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
|
---|
[78da4ab] | 470 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
|
---|
| 471 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
|
---|
[34b61882] | 472 | }
|
---|
[78da4ab] | 473 |
|
---|
| 474 | ctx.sq.kring.released = chead; // note up to were we processed
|
---|
| 475 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
|
---|
| 476 |
|
---|
| 477 | __ioarbiter_notify(ctx);
|
---|
| 478 |
|
---|
[34b61882] | 479 | return count;
|
---|
| 480 | }
|
---|
[35285fd] | 481 |
|
---|
[78da4ab] | 482 | //=============================================================================================
|
---|
| 483 | // I/O Arbiter
|
---|
| 484 | //=============================================================================================
|
---|
[9f5a71eb] | 485 | static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
|
---|
| 486 | bool was_empty;
|
---|
| 487 |
|
---|
[11054eb] | 488 | // Lock the list, it's not thread safe
|
---|
| 489 | lock( queue.lock __cfaabi_dbg_ctx2 );
|
---|
| 490 | {
|
---|
[9f5a71eb] | 491 | was_empty = empty(queue.queue);
|
---|
| 492 |
|
---|
[11054eb] | 493 | // Add our request to the list
|
---|
| 494 | add( queue.queue, item );
|
---|
| 495 |
|
---|
| 496 | // Mark as pending
|
---|
| 497 | __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
|
---|
| 498 | }
|
---|
| 499 | unlock( queue.lock );
|
---|
| 500 |
|
---|
[9f5a71eb] | 501 | return was_empty;
|
---|
[11054eb] | 502 | }
|
---|
| 503 |
|
---|
| 504 | static inline bool empty(__outstanding_io_queue & queue ) {
|
---|
| 505 | return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
|
---|
| 506 | }
|
---|
| 507 |
|
---|
| 508 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) {
|
---|
[78da4ab] | 509 | __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
|
---|
| 510 |
|
---|
[d60d30e] | 511 | __STATS__( false, io.alloc.block += 1; )
|
---|
| 512 |
|
---|
[78da4ab] | 513 | // No one has any resources left, wait for something to finish
|
---|
[11054eb] | 514 | // We need to add ourself to a list of pending allocs and wait for an answer
|
---|
| 515 | __pending_alloc pa;
|
---|
| 516 | pa.idxs = idxs;
|
---|
| 517 | pa.want = want;
|
---|
[78da4ab] | 518 |
|
---|
[9f5a71eb] | 519 | enqueue(this.pending, (__outstanding_io&)pa);
|
---|
| 520 |
|
---|
| 521 | wait( pa.sem );
|
---|
[78da4ab] | 522 |
|
---|
[11054eb] | 523 | return pa.ctx;
|
---|
[dddb3dd0] | 524 |
|
---|
[78da4ab] | 525 | }
|
---|
| 526 |
|
---|
[11054eb] | 527 | static void __ioarbiter_notify( $io_arbiter & this, $io_context * ctx ) {
|
---|
| 528 | /* paranoid */ verify( !empty(this.pending.queue) );
|
---|
[78da4ab] | 529 |
|
---|
[11054eb] | 530 | lock( this.pending.lock __cfaabi_dbg_ctx2 );
|
---|
| 531 | {
|
---|
| 532 | while( !empty(this.pending.queue) ) {
|
---|
| 533 | __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
|
---|
| 534 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
|
---|
| 535 | __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
|
---|
[78da4ab] | 536 |
|
---|
[11054eb] | 537 | if( have > pa.want ) goto DONE;
|
---|
| 538 | drop( this.pending.queue );
|
---|
[78da4ab] | 539 |
|
---|
[11054eb] | 540 | /* paranoid */__attribute__((unused)) bool ret =
|
---|
[78da4ab] | 541 |
|
---|
[11054eb] | 542 | __alloc(ctx, pa.idxs, pa.want);
|
---|
| 543 |
|
---|
| 544 | /* paranoid */ verify( ret );
|
---|
| 545 |
|
---|
| 546 | pa.ctx = ctx;
|
---|
| 547 |
|
---|
| 548 | post( pa.sem );
|
---|
| 549 | }
|
---|
| 550 |
|
---|
| 551 | this.pending.empty = true;
|
---|
| 552 | DONE:;
|
---|
| 553 | }
|
---|
| 554 | unlock( this.pending.lock );
|
---|
[78da4ab] | 555 | }
|
---|
| 556 |
|
---|
| 557 | static void __ioarbiter_notify( $io_context & ctx ) {
|
---|
[11054eb] | 558 | if(!empty( ctx.arbiter->pending )) {
|
---|
[78da4ab] | 559 | __ioarbiter_notify( *ctx.arbiter, &ctx );
|
---|
| 560 | }
|
---|
| 561 | }
|
---|
| 562 |
|
---|
| 563 | // Simply append to the pending
|
---|
[11054eb] | 564 | static void __ioarbiter_submit( $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) {
|
---|
[78da4ab] | 565 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
|
---|
| 566 |
|
---|
| 567 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
|
---|
| 568 |
|
---|
[11054eb] | 569 | __external_io ei;
|
---|
| 570 | ei.idxs = idxs;
|
---|
| 571 | ei.have = have;
|
---|
| 572 | ei.lazy = lazy;
|
---|
[78da4ab] | 573 |
|
---|
[9f5a71eb] | 574 | bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
|
---|
| 575 |
|
---|
[d529ad0] | 576 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
|
---|
[9f5a71eb] | 577 |
|
---|
| 578 | if( we ) {
|
---|
| 579 | sigval_t value = { PREEMPT_IO };
|
---|
| 580 | pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
|
---|
| 581 | }
|
---|
| 582 |
|
---|
| 583 | wait( ei.sem );
|
---|
[78da4ab] | 584 |
|
---|
| 585 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
|
---|
| 586 | }
|
---|
| 587 |
|
---|
[11054eb] | 588 | static void __ioarbiter_flush( $io_context & ctx ) {
|
---|
| 589 | if(!empty( ctx.ext_sq )) {
|
---|
| 590 | __STATS__( false, io.flush.external += 1; )
|
---|
[78da4ab] | 591 |
|
---|
[11054eb] | 592 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
|
---|
[d60d30e] | 593 |
|
---|
[11054eb] | 594 | lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
|
---|
| 595 | {
|
---|
| 596 | while( !empty(ctx.ext_sq.queue) ) {
|
---|
| 597 | __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
|
---|
[78da4ab] | 598 |
|
---|
[2432e8e] | 599 | __submit_only(&ctx, ei.idxs, ei.have);
|
---|
[78da4ab] | 600 |
|
---|
[11054eb] | 601 | post( ei.sem );
|
---|
| 602 | }
|
---|
| 603 |
|
---|
| 604 | ctx.ext_sq.empty = true;
|
---|
| 605 | }
|
---|
| 606 | unlock(ctx.ext_sq.lock );
|
---|
| 607 | }
|
---|
[78da4ab] | 608 | }
|
---|
[7ef162b2] | 609 |
|
---|
[d3605f8] | 610 | #if defined(CFA_WITH_IO_URING_IDLE)
|
---|
| 611 | bool __kernel_read(processor * proc, io_future_t & future, iovec & iov, int fd) {
|
---|
[6ddef36] | 612 | $io_context * ctx = proc->io.ctx;
|
---|
| 613 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
| 614 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
| 615 | /* paranoid */ verify( ctx );
|
---|
[7ef162b2] | 616 |
|
---|
[6ddef36] | 617 | __u32 idx;
|
---|
| 618 | struct io_uring_sqe * sqe;
|
---|
[7ef162b2] | 619 |
|
---|
[6ddef36] | 620 | // We can proceed to the fast path
|
---|
| 621 | if( !__alloc(ctx, &idx, 1) ) return false;
|
---|
| 622 |
|
---|
| 623 | // Allocation was successful
|
---|
| 624 | __fill( &sqe, 1, &idx, ctx );
|
---|
| 625 |
|
---|
| 626 | sqe->user_data = (uintptr_t)&future;
|
---|
| 627 | sqe->flags = 0;
|
---|
[a1f3d93] | 628 | sqe->fd = fd;
|
---|
[6ddef36] | 629 | sqe->off = 0;
|
---|
[d3605f8] | 630 | sqe->ioprio = 0;
|
---|
[6ddef36] | 631 | sqe->fsync_flags = 0;
|
---|
| 632 | sqe->__pad2[0] = 0;
|
---|
| 633 | sqe->__pad2[1] = 0;
|
---|
| 634 | sqe->__pad2[2] = 0;
|
---|
[d3605f8] | 635 |
|
---|
| 636 | #if defined(CFA_HAVE_IORING_OP_READ)
|
---|
| 637 | sqe->opcode = IORING_OP_READ;
|
---|
| 638 | sqe->addr = (uint64_t)iov.iov_base;
|
---|
| 639 | sqe->len = iov.iov_len;
|
---|
| 640 | #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)
|
---|
| 641 | sqe->opcode = IORING_OP_READV;
|
---|
| 642 | sqe->addr = (uintptr_t)&iov;
|
---|
| 643 | sqe->len = 1;
|
---|
| 644 | #else
|
---|
| 645 | #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined
|
---|
| 646 | #endif
|
---|
[6ddef36] | 647 |
|
---|
| 648 | asm volatile("": : :"memory");
|
---|
| 649 |
|
---|
| 650 | /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
|
---|
| 651 | __submit( ctx, &idx, 1, true );
|
---|
| 652 |
|
---|
| 653 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
| 654 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
[078fb05] | 655 |
|
---|
| 656 | return true;
|
---|
[6ddef36] | 657 | }
|
---|
| 658 | #endif
|
---|
[47746a2] | 659 | #endif
|
---|