[ecf6b46] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // io.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
[3e2b9c9] | 16 | #define __cforall_thread__ |
---|
| 17 | |
---|
[20ab637] | 18 | #if defined(__CFA_DEBUG__) |
---|
[d60d30e] | 19 | // #define __CFA_DEBUG_PRINT_IO__ |
---|
| 20 | // #define __CFA_DEBUG_PRINT_IO_CORE__ |
---|
[20ab637] | 21 | #endif |
---|
[4069faad] | 22 | |
---|
[f6660520] | 23 | |
---|
[3e2b9c9] | 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H) |
---|
[31bb2e1] | 25 | #include <errno.h> |
---|
[3e2b9c9] | 26 | #include <signal.h> |
---|
[31bb2e1] | 27 | #include <stdint.h> |
---|
| 28 | #include <string.h> |
---|
| 29 | #include <unistd.h> |
---|
| 30 | |
---|
[92976d9] | 31 | extern "C" { |
---|
| 32 | #include <sys/syscall.h> |
---|
[dddb3dd0] | 33 | #include <sys/eventfd.h> |
---|
[d3605f8] | 34 | #include <sys/uio.h> |
---|
[92976d9] | 35 | |
---|
| 36 | #include <linux/io_uring.h> |
---|
| 37 | } |
---|
| 38 | |
---|
[3e2b9c9] | 39 | #include "stats.hfa" |
---|
| 40 | #include "kernel.hfa" |
---|
| 41 | #include "kernel/fwd.hfa" |
---|
[708ae38] | 42 | #include "kernel/private.hfa" |
---|
[78a580d] | 43 | #include "kernel/cluster.hfa" |
---|
[3e2b9c9] | 44 | #include "io/types.hfa" |
---|
[185efe6] | 45 | |
---|
[2fab24e3] | 46 | __attribute__((unused)) static const char * opcodes[] = { |
---|
[426f60c] | 47 | "OP_NOP", |
---|
| 48 | "OP_READV", |
---|
| 49 | "OP_WRITEV", |
---|
| 50 | "OP_FSYNC", |
---|
| 51 | "OP_READ_FIXED", |
---|
| 52 | "OP_WRITE_FIXED", |
---|
| 53 | "OP_POLL_ADD", |
---|
| 54 | "OP_POLL_REMOVE", |
---|
| 55 | "OP_SYNC_FILE_RANGE", |
---|
| 56 | "OP_SENDMSG", |
---|
| 57 | "OP_RECVMSG", |
---|
| 58 | "OP_TIMEOUT", |
---|
| 59 | "OP_TIMEOUT_REMOVE", |
---|
| 60 | "OP_ACCEPT", |
---|
| 61 | "OP_ASYNC_CANCEL", |
---|
| 62 | "OP_LINK_TIMEOUT", |
---|
| 63 | "OP_CONNECT", |
---|
| 64 | "OP_FALLOCATE", |
---|
| 65 | "OP_OPENAT", |
---|
| 66 | "OP_CLOSE", |
---|
| 67 | "OP_FILES_UPDATE", |
---|
| 68 | "OP_STATX", |
---|
| 69 | "OP_READ", |
---|
| 70 | "OP_WRITE", |
---|
| 71 | "OP_FADVISE", |
---|
| 72 | "OP_MADVISE", |
---|
| 73 | "OP_SEND", |
---|
| 74 | "OP_RECV", |
---|
| 75 | "OP_OPENAT2", |
---|
| 76 | "OP_EPOLL_CTL", |
---|
| 77 | "OP_SPLICE", |
---|
| 78 | "OP_PROVIDE_BUFFERS", |
---|
| 79 | "OP_REMOVE_BUFFERS", |
---|
| 80 | "OP_TEE", |
---|
| 81 | "INVALID_OP" |
---|
| 82 | }; |
---|
| 83 | |
---|
[8bee858] | 84 | static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ); |
---|
| 85 | static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy ); |
---|
[26544f9] | 86 | static void __ioarbiter_flush ( io_context$ &, bool kernel ); |
---|
[8bee858] | 87 | static inline void __ioarbiter_notify( io_context$ & ctx ); |
---|
[92976d9] | 88 | //============================================================================================= |
---|
| 89 | // I/O Polling |
---|
| 90 | //============================================================================================= |
---|
[8bee858] | 91 | static inline unsigned __flush( struct io_context$ & ); |
---|
| 92 | static inline __u32 __release_sqes( struct io_context$ & ); |
---|
[24e321c] | 93 | extern void __kernel_unpark( thread$ * thrd, unpark_hint ); |
---|
[1d5e4711] | 94 | |
---|
[26544f9] | 95 | static inline void __post(oneshot & this, bool kernel, unpark_hint hint) { |
---|
| 96 | thread$ * t = post( this, false ); |
---|
| 97 | if(kernel) __kernel_unpark( t, hint ); |
---|
| 98 | else unpark( t, hint ); |
---|
| 99 | } |
---|
| 100 | |
---|
| 101 | // actual system call of io uring |
---|
| 102 | // wrap so everything that needs to happen around it is always done |
---|
| 103 | // i.e., stats, book keeping, sqe reclamation, etc. |
---|
[8bee858] | 104 | static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) { |
---|
[18f7858] | 105 | __STATS__( true, io.calls.flush++; ) |
---|
[bdfd0bd] | 106 | int ret; |
---|
| 107 | for() { |
---|
[26544f9] | 108 | // do the system call in a loop, repeat on interrupts |
---|
[bdfd0bd] | 109 | ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8); |
---|
| 110 | if( ret < 0 ) { |
---|
| 111 | switch((int)errno) { |
---|
| 112 | case EINTR: |
---|
| 113 | continue; |
---|
| 114 | case EAGAIN: |
---|
| 115 | case EBUSY: |
---|
| 116 | // Update statistics |
---|
| 117 | __STATS__( false, io.calls.errors.busy ++; ) |
---|
| 118 | return false; |
---|
| 119 | default: |
---|
| 120 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); |
---|
| 121 | } |
---|
[18f7858] | 122 | } |
---|
[bdfd0bd] | 123 | break; |
---|
[18f7858] | 124 | } |
---|
| 125 | |
---|
| 126 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd); |
---|
| 127 | __STATS__( true, io.calls.submitted += ret; ) |
---|
| 128 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); |
---|
| 129 | /* paranoid */ verify( ctx.sq.to_submit >= ret ); |
---|
| 130 | |
---|
[26544f9] | 131 | // keep track of how many still need submitting |
---|
| 132 | __atomic_fetch_sub(&ctx.sq.to_submit, ret, __ATOMIC_SEQ_CST); |
---|
[18f7858] | 133 | |
---|
| 134 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); |
---|
| 135 | |
---|
| 136 | // Release the consumed SQEs |
---|
| 137 | __release_sqes( ctx ); |
---|
| 138 | |
---|
[dddb3dd0] | 139 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[6f121b8] | 140 | |
---|
[26544f9] | 141 | // mark that there is no pending io left |
---|
[18f7858] | 142 | __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED); |
---|
| 143 | } |
---|
| 144 | |
---|
[26544f9] | 145 | // try to acquire an io context for draining, helping means we never *need* to drain, we can always do it later |
---|
[8bee858] | 146 | static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) { |
---|
[18f7858] | 147 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 148 | /* paranoid */ verify( ready_schedule_islocked() ); |
---|
[92976d9] | 149 | |
---|
[d60d30e] | 150 | |
---|
[3caf5e3] | 151 | { |
---|
[26544f9] | 152 | // if there is nothing to drain there is no point in acquiring anything |
---|
[3caf5e3] | 153 | const __u32 head = *ctx->cq.head; |
---|
| 154 | const __u32 tail = *ctx->cq.tail; |
---|
| 155 | |
---|
| 156 | if(head == tail) return false; |
---|
| 157 | } |
---|
[c1c95b1] | 158 | |
---|
[26544f9] | 159 | // try a simple spinlock acquire, it's likely there are completions to drain |
---|
| 160 | if(!__atomic_try_acquire(&ctx->cq.try_lock)) { |
---|
| 161 | // some other processor already has it |
---|
[54c1196] | 162 | __STATS__( false, io.calls.locked++; ) |
---|
[4ecc35a] | 163 | return false; |
---|
| 164 | } |
---|
| 165 | |
---|
[26544f9] | 166 | // acquired!! |
---|
[18f7858] | 167 | return true; |
---|
| 168 | } |
---|
| 169 | |
---|
[26544f9] | 170 | // actually drain the completion |
---|
[8bee858] | 171 | static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) { |
---|
[18f7858] | 172 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 173 | /* paranoid */ verify( ready_schedule_islocked() ); |
---|
[26544f9] | 174 | /* paranoid */ verify( ctx->cq.try_lock == true ); |
---|
[18f7858] | 175 | |
---|
[26544f9] | 176 | // get all the invariants and initial state |
---|
[18f7858] | 177 | const __u32 mask = *ctx->cq.mask; |
---|
[7affcda] | 178 | const __u32 num = *ctx->cq.num; |
---|
[78a580d] | 179 | unsigned long long ts_prev = ctx->cq.ts; |
---|
[7affcda] | 180 | unsigned long long ts_next; |
---|
[78a580d] | 181 | |
---|
[7affcda] | 182 | // We might need to do this multiple times if more events completed than can fit in the queue. |
---|
| 183 | for() { |
---|
| 184 | // re-read the head and tail in case it already changed. |
---|
[26544f9] | 185 | // count the difference between the two |
---|
[7affcda] | 186 | const __u32 head = *ctx->cq.head; |
---|
| 187 | const __u32 tail = *ctx->cq.tail; |
---|
| 188 | const __u32 count = tail - head; |
---|
| 189 | __STATS__( false, io.calls.drain++; io.calls.completed += count; ) |
---|
[3caf5e3] | 190 | |
---|
[26544f9] | 191 | // for everything between head and tail, drain it |
---|
[7affcda] | 192 | for(i; count) { |
---|
| 193 | unsigned idx = (head + i) & mask; |
---|
| 194 | volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx]; |
---|
[92976d9] | 195 | |
---|
[7affcda] | 196 | /* paranoid */ verify(&cqe); |
---|
[92976d9] | 197 | |
---|
[26544f9] | 198 | // find the future in the completion |
---|
[7affcda] | 199 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; |
---|
| 200 | // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); |
---|
[78da4ab] | 201 | |
---|
[26544f9] | 202 | // don't directly fulfill the future, preemption is disabled so we need to use kernel_unpark |
---|
[7affcda] | 203 | __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); |
---|
| 204 | } |
---|
| 205 | |
---|
[26544f9] | 206 | // update the timestamps accordingly |
---|
| 207 | // keep a local copy so we can update the relaxed copy |
---|
[7affcda] | 208 | ts_next = ctx->cq.ts = rdtscl(); |
---|
[78da4ab] | 209 | |
---|
[7affcda] | 210 | // Mark to the kernel that the cqe has been seen |
---|
| 211 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. |
---|
| 212 | __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST ); |
---|
| 213 | ctx->proc->idle_wctx.drain_time = ts_next; |
---|
[2d8f7b0] | 214 | |
---|
[26544f9] | 215 | // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel. |
---|
[7affcda] | 216 | if(likely(count < num)) break; |
---|
| 217 | |
---|
[26544f9] | 218 | // the ring buffer was full, there could be more stuff in the kernel. |
---|
[7affcda] | 219 | ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS); |
---|
| 220 | } |
---|
[92976d9] | 221 | |
---|
[1e6ffb44] | 222 | __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next); |
---|
[e9c0b4c] | 223 | /* paranoid */ verify( ready_schedule_islocked() ); |
---|
[dddb3dd0] | 224 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 225 | |
---|
[26544f9] | 226 | // everything is drained, we can release the lock |
---|
| 227 | __atomic_unlock(&ctx->cq.try_lock); |
---|
[4ecc35a] | 228 | |
---|
[26544f9] | 229 | // update the relaxed timestamp |
---|
[5f9c42b] | 230 | touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next, false ); |
---|
[78a580d] | 231 | |
---|
[c1c95b1] | 232 | return true; |
---|
[92976d9] | 233 | } |
---|
| 234 | |
---|
[26544f9] | 235 | // call from a processor to flush |
---|
| 236 | // contains all the bookkeeping a proc must do, not just the barebones flushing logic |
---|
| 237 | void __cfa_do_flush( io_context$ & ctx, bool kernel ) { |
---|
| 238 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 239 | |
---|
| 240 | // flush any external requests |
---|
| 241 | ctx.sq.last_external = false; // clear the external bit, the arbiter will reset it if needed |
---|
| 242 | __ioarbiter_flush( ctx, kernel ); |
---|
| 243 | |
---|
| 244 | // if submitting must be submitted, do the system call |
---|
| 245 | if(ctx.sq.to_submit != 0) { |
---|
| 246 | ioring_syscsll(ctx, 0, 0); |
---|
| 247 | } |
---|
| 248 | } |
---|
| 249 | |
---|
| 250 | // call from a processor to drain |
---|
| 251 | // contains all the bookkeeping a proc must do, not just the barebones draining logic |
---|
[1756e08] | 252 | bool __cfa_io_drain( struct processor * proc ) { |
---|
[4479890] | 253 | bool local = false; |
---|
| 254 | bool remote = false; |
---|
| 255 | |
---|
[26544f9] | 256 | // make sure no ones creates/destroys io contexts |
---|
[18f7858] | 257 | ready_schedule_lock(); |
---|
| 258 | |
---|
[4479890] | 259 | cluster * const cltr = proc->cltr; |
---|
[8bee858] | 260 | io_context$ * const ctx = proc->io.ctx; |
---|
[4479890] | 261 | /* paranoid */ verify( cltr ); |
---|
| 262 | /* paranoid */ verify( ctx ); |
---|
| 263 | |
---|
[26544f9] | 264 | // Help if needed |
---|
[4479890] | 265 | with(cltr->sched) { |
---|
| 266 | const size_t ctxs_count = io.count; |
---|
| 267 | |
---|
| 268 | /* paranoid */ verify( ready_schedule_islocked() ); |
---|
| 269 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 270 | /* paranoid */ verify( active_processor() == proc ); |
---|
| 271 | /* paranoid */ verify( __shard_factor.io > 0 ); |
---|
| 272 | /* paranoid */ verify( ctxs_count > 0 ); |
---|
| 273 | /* paranoid */ verify( ctx->cq.id < ctxs_count ); |
---|
| 274 | |
---|
| 275 | const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io); |
---|
| 276 | const unsigned long long ctsc = rdtscl(); |
---|
| 277 | |
---|
[26544f9] | 278 | // only help once every other time |
---|
| 279 | // pick a target when not helping |
---|
[b035046] | 280 | if(proc->io.target == UINT_MAX) { |
---|
[4479890] | 281 | uint64_t chaos = __tls_rand(); |
---|
[26544f9] | 282 | // choose who to help and whether to accept helping far processors |
---|
[4479890] | 283 | unsigned ext = chaos & 0xff; |
---|
| 284 | unsigned other = (chaos >> 8) % (ctxs_count); |
---|
| 285 | |
---|
[26544f9] | 286 | // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it |
---|
[4479890] | 287 | if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) { |
---|
| 288 | proc->io.target = other; |
---|
| 289 | } |
---|
| 290 | } |
---|
| 291 | else { |
---|
[26544f9] | 292 | // a target was picked last time, help it |
---|
[4479890] | 293 | const unsigned target = proc->io.target; |
---|
[2af1943] | 294 | /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX ); |
---|
[26544f9] | 295 | // make sure the target hasn't stopped existing since last time |
---|
[18f7858] | 296 | HELP: if(target < ctxs_count) { |
---|
[1afd9ccb] | 297 | // calculate it's age and how young it could be before we give up on helping |
---|
[31c967b] | 298 | const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false); |
---|
| 299 | const __readyQ_avg_t age = moving_average(ctsc, io.tscs[target].t.tv, io.tscs[target].t.ma, false); |
---|
[edf247b] | 300 | __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no"); |
---|
[26544f9] | 301 | // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger |
---|
[18f7858] | 302 | if(age <= cutoff) break HELP; |
---|
| 303 | |
---|
[26544f9] | 304 | // attempt to help the submission side |
---|
| 305 | __cfa_do_flush( *io.data[target], true ); |
---|
| 306 | |
---|
| 307 | // attempt to help the completion side |
---|
| 308 | if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed |
---|
[18f7858] | 309 | |
---|
[26544f9] | 310 | // actually help |
---|
[18f7858] | 311 | if(!__cfa_do_drain( io.data[target], cltr )) break HELP; |
---|
| 312 | |
---|
[26544f9] | 313 | // track we did help someone |
---|
[18f7858] | 314 | remote = true; |
---|
[8a5e357] | 315 | __STATS__( true, io.calls.helped++; ) |
---|
[4479890] | 316 | } |
---|
[26544f9] | 317 | |
---|
| 318 | // reset the target |
---|
[b035046] | 319 | proc->io.target = UINT_MAX; |
---|
[4479890] | 320 | } |
---|
| 321 | } |
---|
| 322 | |
---|
| 323 | // Drain the local queue |
---|
[18f7858] | 324 | if(try_acquire( proc->io.ctx )) { |
---|
| 325 | local = __cfa_do_drain( proc->io.ctx, cltr ); |
---|
| 326 | } |
---|
[4479890] | 327 | |
---|
| 328 | /* paranoid */ verify( ready_schedule_islocked() ); |
---|
| 329 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 330 | /* paranoid */ verify( active_processor() == proc ); |
---|
[18f7858] | 331 | |
---|
| 332 | ready_schedule_unlock(); |
---|
[26544f9] | 333 | |
---|
| 334 | // return true if some completion entry, local or remote, was drained |
---|
[4479890] | 335 | return local || remote; |
---|
| 336 | } |
---|
| 337 | |
---|
[26544f9] | 338 | |
---|
| 339 | |
---|
| 340 | // call from a processor to flush |
---|
| 341 | // contains all the bookkeeping a proc must do, not just the barebones flushing logic |
---|
[1756e08] | 342 | bool __cfa_io_flush( struct processor * proc ) { |
---|
[dddb3dd0] | 343 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
| 344 | /* paranoid */ verify( proc ); |
---|
| 345 | /* paranoid */ verify( proc->io.ctx ); |
---|
[1539bbd] | 346 | |
---|
[26544f9] | 347 | __cfa_do_flush( *proc->io.ctx, false ); |
---|
[61dd73d] | 348 | |
---|
[26544f9] | 349 | // also drain since some stuff will immediately complete |
---|
[18f7858] | 350 | return __cfa_io_drain( proc ); |
---|
[61dd73d] | 351 | } |
---|
[f6660520] | 352 | |
---|
[92976d9] | 353 | //============================================================================================= |
---|
| 354 | // I/O Submissions |
---|
| 355 | //============================================================================================= |
---|
| 356 | |
---|
[2d8f7b0] | 357 | // Submition steps : |
---|
[e46c753] | 358 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones |
---|
[2d8f7b0] | 359 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not |
---|
| 360 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we |
---|
| 361 | // need to write an allocator that allows allocating concurrently. |
---|
| 362 | // |
---|
[e46c753] | 363 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step. |
---|
[2d8f7b0] | 364 | // |
---|
[e46c753] | 365 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation |
---|
[2d8f7b0] | 366 | // needs to arrive to two concensus at the same time: |
---|
| 367 | // A - The order in which entries are listed in the array: no two threads must pick the |
---|
| 368 | // same index for their entries |
---|
| 369 | // B - When can the tail be update for the kernel. EVERY entries in the array between |
---|
| 370 | // head and tail must be fully filled and shouldn't ever be touched again. |
---|
| 371 | // |
---|
[78da4ab] | 372 | //============================================================================================= |
---|
| 373 | // Allocation |
---|
| 374 | // for user's convenience fill the sqes from the indexes |
---|
[8bee858] | 375 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct io_context$ * ctx) { |
---|
[78da4ab] | 376 | struct io_uring_sqe * sqes = ctx->sq.sqes; |
---|
| 377 | for(i; want) { |
---|
[1e6ffb44] | 378 | // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n"); |
---|
[78da4ab] | 379 | out_sqes[i] = &sqes[idxs[i]]; |
---|
| 380 | } |
---|
| 381 | } |
---|
[2489d31] | 382 | |
---|
[78da4ab] | 383 | // Try to directly allocate from the a given context |
---|
| 384 | // Not thread-safe |
---|
[8bee858] | 385 | static inline bool __alloc(struct io_context$ * ctx, __u32 idxs[], __u32 want) { |
---|
[78da4ab] | 386 | __sub_ring_t & sq = ctx->sq; |
---|
| 387 | const __u32 mask = *sq.mask; |
---|
| 388 | __u32 fhead = sq.free_ring.head; // get the current head of the queue |
---|
| 389 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue |
---|
[2489d31] | 390 | |
---|
[78da4ab] | 391 | // If we don't have enough sqes, fail |
---|
| 392 | if((ftail - fhead) < want) { return false; } |
---|
[426f60c] | 393 | |
---|
[78da4ab] | 394 | // copy all the indexes we want from the available list |
---|
| 395 | for(i; want) { |
---|
[1e6ffb44] | 396 | // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n"); |
---|
[78da4ab] | 397 | idxs[i] = sq.free_ring.array[(fhead + i) & mask]; |
---|
[6f121b8] | 398 | } |
---|
[2489d31] | 399 | |
---|
[78da4ab] | 400 | // Advance the head to mark the indexes as consumed |
---|
| 401 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE); |
---|
[df40a56] | 402 | |
---|
[78da4ab] | 403 | // return success |
---|
| 404 | return true; |
---|
| 405 | } |
---|
[df40a56] | 406 | |
---|
[78da4ab] | 407 | // Allocate an submit queue entry. |
---|
| 408 | // The kernel cannot see these entries until they are submitted, but other threads must be |
---|
| 409 | // able to see which entries can be used and which are already un used by an other thread |
---|
| 410 | // for convenience, return both the index and the pointer to the sqe |
---|
| 411 | // sqe == &sqes[idx] |
---|
[8bee858] | 412 | struct io_context$ * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { |
---|
[1e6ffb44] | 413 | // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); |
---|
[df40a56] | 414 | |
---|
[78da4ab] | 415 | disable_interrupts(); |
---|
[1756e08] | 416 | struct processor * proc = __cfaabi_tls.this_processor; |
---|
[8bee858] | 417 | io_context$ * ctx = proc->io.ctx; |
---|
[78da4ab] | 418 | /* paranoid */ verify( __cfaabi_tls.this_processor ); |
---|
[dddb3dd0] | 419 | /* paranoid */ verify( ctx ); |
---|
[78da4ab] | 420 | |
---|
[1e6ffb44] | 421 | // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n"); |
---|
[78da4ab] | 422 | |
---|
[dddb3dd0] | 423 | // We can proceed to the fast path |
---|
| 424 | if( __alloc(ctx, idxs, want) ) { |
---|
| 425 | // Allocation was successful |
---|
| 426 | __STATS__( true, io.alloc.fast += 1; ) |
---|
[a3821fa] | 427 | enable_interrupts(); |
---|
[df40a56] | 428 | |
---|
[1e6ffb44] | 429 | // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd); |
---|
[2fafe7e] | 430 | |
---|
[dddb3dd0] | 431 | __fill( sqes, want, idxs, ctx ); |
---|
| 432 | return ctx; |
---|
[df40a56] | 433 | } |
---|
[dddb3dd0] | 434 | // The fast path failed, fallback |
---|
| 435 | __STATS__( true, io.alloc.fail += 1; ) |
---|
[df40a56] | 436 | |
---|
[78da4ab] | 437 | // Fast path failed, fallback on arbitration |
---|
[d60d30e] | 438 | __STATS__( true, io.alloc.slow += 1; ) |
---|
[a3821fa] | 439 | enable_interrupts(); |
---|
[78da4ab] | 440 | |
---|
[8bee858] | 441 | io_arbiter$ * ioarb = proc->cltr->io.arbiter; |
---|
[dddb3dd0] | 442 | /* paranoid */ verify( ioarb ); |
---|
| 443 | |
---|
[1e6ffb44] | 444 | // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n"); |
---|
[78da4ab] | 445 | |
---|
[8bee858] | 446 | struct io_context$ * ret = __ioarbiter_allocate(*ioarb, idxs, want); |
---|
[78da4ab] | 447 | |
---|
[1e6ffb44] | 448 | // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd); |
---|
[df40a56] | 449 | |
---|
[78da4ab] | 450 | __fill( sqes, want, idxs,ret ); |
---|
| 451 | return ret; |
---|
[df40a56] | 452 | } |
---|
| 453 | |
---|
[78da4ab] | 454 | //============================================================================================= |
---|
| 455 | // submission |
---|
[26544f9] | 456 | // barebones logic to submit a group of sqes |
---|
| 457 | static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) { |
---|
| 458 | if(!lock) |
---|
| 459 | lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 ); |
---|
[78da4ab] | 460 | // We can proceed to the fast path |
---|
| 461 | // Get the right objects |
---|
| 462 | __sub_ring_t & sq = ctx->sq; |
---|
| 463 | const __u32 mask = *sq.mask; |
---|
[dddb3dd0] | 464 | __u32 tail = *sq.kring.tail; |
---|
[78da4ab] | 465 | |
---|
| 466 | // Add the sqes to the array |
---|
| 467 | for( i; have ) { |
---|
[1e6ffb44] | 468 | // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n"); |
---|
[78da4ab] | 469 | sq.kring.array[ (tail + i) & mask ] = idxs[i]; |
---|
[426f60c] | 470 | } |
---|
| 471 | |
---|
[78da4ab] | 472 | // Make the sqes visible to the submitter |
---|
[dddb3dd0] | 473 | __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE); |
---|
[26544f9] | 474 | __atomic_fetch_add(&sq.to_submit, have, __ATOMIC_SEQ_CST); |
---|
[426f60c] | 475 | |
---|
[26544f9] | 476 | // set the bit to mark things need to be flushed |
---|
[d529ad0] | 477 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED); |
---|
| 478 | __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED); |
---|
[26544f9] | 479 | |
---|
| 480 | if(!lock) |
---|
| 481 | unlock( ctx->ext_sq.lock ); |
---|
[2432e8e] | 482 | } |
---|
| 483 | |
---|
[26544f9] | 484 | // submission logic + maybe flushing |
---|
[8bee858] | 485 | static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) { |
---|
[2432e8e] | 486 | __sub_ring_t & sq = ctx->sq; |
---|
[26544f9] | 487 | __submit_only(ctx, idxs, have, false); |
---|
[2432e8e] | 488 | |
---|
[70b4aeb9] | 489 | if(sq.to_submit > 30) { |
---|
| 490 | __tls_stats()->io.flush.full++; |
---|
[18f7858] | 491 | __cfa_io_flush( ctx->proc ); |
---|
[70b4aeb9] | 492 | } |
---|
| 493 | if(!lazy) { |
---|
| 494 | __tls_stats()->io.flush.eager++; |
---|
[18f7858] | 495 | __cfa_io_flush( ctx->proc ); |
---|
[dddb3dd0] | 496 | } |
---|
[78da4ab] | 497 | } |
---|
[2489d31] | 498 | |
---|
[26544f9] | 499 | // call from a processor to flush |
---|
| 500 | // might require arbitration if the thread was migrated after the allocation |
---|
[8bee858] | 501 | void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { |
---|
[1e6ffb44] | 502 | // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); |
---|
[5dadc9b] | 503 | |
---|
[78da4ab] | 504 | disable_interrupts(); |
---|
[7ce8873] | 505 | __STATS__( true, if(!lazy) io.submit.eagr += 1; ) |
---|
[1756e08] | 506 | struct processor * proc = __cfaabi_tls.this_processor; |
---|
[8bee858] | 507 | io_context$ * ctx = proc->io.ctx; |
---|
[dddb3dd0] | 508 | /* paranoid */ verify( __cfaabi_tls.this_processor ); |
---|
| 509 | /* paranoid */ verify( ctx ); |
---|
[e46c753] | 510 | |
---|
[78da4ab] | 511 | // Can we proceed to the fast path |
---|
[dddb3dd0] | 512 | if( ctx == inctx ) // We have the right instance? |
---|
[78da4ab] | 513 | { |
---|
[26544f9] | 514 | // yes! fast submit |
---|
[dddb3dd0] | 515 | __submit(ctx, idxs, have, lazy); |
---|
[e46c753] | 516 | |
---|
[78da4ab] | 517 | // Mark the instance as no longer in-use, re-enable interrupts and return |
---|
[d60d30e] | 518 | __STATS__( true, io.submit.fast += 1; ) |
---|
[a3821fa] | 519 | enable_interrupts(); |
---|
[ece0e80] | 520 | |
---|
[1e6ffb44] | 521 | // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n"); |
---|
[78da4ab] | 522 | return; |
---|
[e46c753] | 523 | } |
---|
[d384787] | 524 | |
---|
[78da4ab] | 525 | // Fast path failed, fallback on arbitration |
---|
[d60d30e] | 526 | __STATS__( true, io.submit.slow += 1; ) |
---|
[a3821fa] | 527 | enable_interrupts(); |
---|
[5dadc9b] | 528 | |
---|
[1e6ffb44] | 529 | // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n"); |
---|
[426f60c] | 530 | |
---|
[11054eb] | 531 | __ioarbiter_submit(inctx, idxs, have, lazy); |
---|
[78da4ab] | 532 | } |
---|
[2fab24e3] | 533 | |
---|
[78da4ab] | 534 | //============================================================================================= |
---|
| 535 | // Flushing |
---|
[426f60c] | 536 | // Go through the ring's submit queue and release everything that has already been consumed |
---|
| 537 | // by io_uring |
---|
[78da4ab] | 538 | // This cannot be done by multiple threads |
---|
[8bee858] | 539 | static __u32 __release_sqes( struct io_context$ & ctx ) { |
---|
[78da4ab] | 540 | const __u32 mask = *ctx.sq.mask; |
---|
[732b406] | 541 | |
---|
[426f60c] | 542 | __attribute__((unused)) |
---|
[78da4ab] | 543 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue |
---|
| 544 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue |
---|
| 545 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here |
---|
| 546 | |
---|
| 547 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue |
---|
[732b406] | 548 | |
---|
[426f60c] | 549 | // the 3 fields are organized like this diagram |
---|
| 550 | // except it's are ring |
---|
| 551 | // ---+--------+--------+---- |
---|
| 552 | // ---+--------+--------+---- |
---|
| 553 | // ^ ^ ^ |
---|
| 554 | // phead chead ctail |
---|
| 555 | |
---|
| 556 | // make sure ctail doesn't wrap around and reach phead |
---|
| 557 | /* paranoid */ verify( |
---|
| 558 | (ctail >= chead && chead >= phead) |
---|
| 559 | || (chead >= phead && phead >= ctail) |
---|
| 560 | || (phead >= ctail && ctail >= chead) |
---|
| 561 | ); |
---|
| 562 | |
---|
| 563 | // find the range we need to clear |
---|
[4998155] | 564 | __u32 count = chead - phead; |
---|
[426f60c] | 565 | |
---|
[78da4ab] | 566 | if(count == 0) { |
---|
| 567 | return 0; |
---|
| 568 | } |
---|
| 569 | |
---|
[426f60c] | 570 | // We acquired an previous-head/current-head range |
---|
| 571 | // go through the range and release the sqes |
---|
[34b61882] | 572 | for( i; count ) { |
---|
[1e6ffb44] | 573 | // __cfadbg_print_safe(io, "Kernel I/O : release loop\n"); |
---|
[78da4ab] | 574 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ]; |
---|
| 575 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx; |
---|
[34b61882] | 576 | } |
---|
[78da4ab] | 577 | |
---|
| 578 | ctx.sq.kring.released = chead; // note up to were we processed |
---|
| 579 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST); |
---|
| 580 | |
---|
[26544f9] | 581 | // notify the allocator that new allocations can be made |
---|
[78da4ab] | 582 | __ioarbiter_notify(ctx); |
---|
| 583 | |
---|
[34b61882] | 584 | return count; |
---|
| 585 | } |
---|
[35285fd] | 586 | |
---|
[78da4ab] | 587 | //============================================================================================= |
---|
| 588 | // I/O Arbiter |
---|
| 589 | //============================================================================================= |
---|
[9f5a71eb] | 590 | static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) { |
---|
| 591 | bool was_empty; |
---|
| 592 | |
---|
[11054eb] | 593 | // Lock the list, it's not thread safe |
---|
| 594 | lock( queue.lock __cfaabi_dbg_ctx2 ); |
---|
| 595 | { |
---|
[9f5a71eb] | 596 | was_empty = empty(queue.queue); |
---|
| 597 | |
---|
[11054eb] | 598 | // Add our request to the list |
---|
| 599 | add( queue.queue, item ); |
---|
| 600 | |
---|
| 601 | // Mark as pending |
---|
| 602 | __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST ); |
---|
| 603 | } |
---|
| 604 | unlock( queue.lock ); |
---|
| 605 | |
---|
[9f5a71eb] | 606 | return was_empty; |
---|
[11054eb] | 607 | } |
---|
| 608 | |
---|
| 609 | static inline bool empty(__outstanding_io_queue & queue ) { |
---|
| 610 | return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST); |
---|
| 611 | } |
---|
| 612 | |
---|
[8bee858] | 613 | static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ) { |
---|
[1e6ffb44] | 614 | // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n"); |
---|
[78da4ab] | 615 | |
---|
[d60d30e] | 616 | __STATS__( false, io.alloc.block += 1; ) |
---|
| 617 | |
---|
[78da4ab] | 618 | // No one has any resources left, wait for something to finish |
---|
[11054eb] | 619 | // We need to add ourself to a list of pending allocs and wait for an answer |
---|
| 620 | __pending_alloc pa; |
---|
| 621 | pa.idxs = idxs; |
---|
| 622 | pa.want = want; |
---|
[78da4ab] | 623 | |
---|
[9f5a71eb] | 624 | enqueue(this.pending, (__outstanding_io&)pa); |
---|
| 625 | |
---|
[a55472cc] | 626 | wait( pa.waitctx ); |
---|
[78da4ab] | 627 | |
---|
[11054eb] | 628 | return pa.ctx; |
---|
[dddb3dd0] | 629 | |
---|
[78da4ab] | 630 | } |
---|
| 631 | |
---|
[26544f9] | 632 | // notify the arbiter that new allocations are available |
---|
[8bee858] | 633 | static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) { |
---|
[11054eb] | 634 | /* paranoid */ verify( !empty(this.pending.queue) ); |
---|
[26544f9] | 635 | /* paranoid */ verify( __preemption_enabled() ); |
---|
[78da4ab] | 636 | |
---|
[26544f9] | 637 | // mutual exclusion is needed |
---|
[11054eb] | 638 | lock( this.pending.lock __cfaabi_dbg_ctx2 ); |
---|
| 639 | { |
---|
[26544f9] | 640 | __cfadbg_print_safe(io, "Kernel I/O : notifying\n"); |
---|
| 641 | |
---|
| 642 | // as long as there are pending allocations try to satisfy them |
---|
| 643 | // for simplicity do it in FIFO order |
---|
[11054eb] | 644 | while( !empty(this.pending.queue) ) { |
---|
[26544f9] | 645 | // get first pending allocs |
---|
[11054eb] | 646 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; |
---|
| 647 | __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue ); |
---|
[78da4ab] | 648 | |
---|
[26544f9] | 649 | // check if we have enough to satisfy the request |
---|
[11054eb] | 650 | if( have > pa.want ) goto DONE; |
---|
[26544f9] | 651 | |
---|
| 652 | // if there are enough allocations it means we can drop the request |
---|
[11054eb] | 653 | drop( this.pending.queue ); |
---|
[78da4ab] | 654 | |
---|
[11054eb] | 655 | /* paranoid */__attribute__((unused)) bool ret = |
---|
[78da4ab] | 656 | |
---|
[26544f9] | 657 | // actually do the alloc |
---|
[11054eb] | 658 | __alloc(ctx, pa.idxs, pa.want); |
---|
| 659 | |
---|
| 660 | /* paranoid */ verify( ret ); |
---|
| 661 | |
---|
[26544f9] | 662 | // write out which context statisfied the request and post |
---|
| 663 | // this |
---|
[11054eb] | 664 | pa.ctx = ctx; |
---|
[a55472cc] | 665 | post( pa.waitctx ); |
---|
[11054eb] | 666 | } |
---|
| 667 | |
---|
| 668 | this.pending.empty = true; |
---|
| 669 | DONE:; |
---|
| 670 | } |
---|
| 671 | unlock( this.pending.lock ); |
---|
[26544f9] | 672 | |
---|
| 673 | /* paranoid */ verify( __preemption_enabled() ); |
---|
[78da4ab] | 674 | } |
---|
| 675 | |
---|
[26544f9] | 676 | // short hand to avoid the mutual exclusion of the pending is empty regardless |
---|
[8bee858] | 677 | static void __ioarbiter_notify( io_context$ & ctx ) { |
---|
[26544f9] | 678 | if(empty( ctx.arbiter->pending )) return; |
---|
| 679 | __ioarbiter_notify( *ctx.arbiter, &ctx ); |
---|
[78da4ab] | 680 | } |
---|
| 681 | |
---|
[26544f9] | 682 | // Submit from outside the local processor: append to the outstanding list |
---|
[8bee858] | 683 | static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) { |
---|
[78da4ab] | 684 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd); |
---|
| 685 | |
---|
| 686 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have); |
---|
| 687 | |
---|
[26544f9] | 688 | // create the intrusive object to append |
---|
[11054eb] | 689 | __external_io ei; |
---|
| 690 | ei.idxs = idxs; |
---|
| 691 | ei.have = have; |
---|
| 692 | ei.lazy = lazy; |
---|
[78da4ab] | 693 | |
---|
[26544f9] | 694 | // enqueue the io |
---|
[9f5a71eb] | 695 | bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei); |
---|
| 696 | |
---|
[26544f9] | 697 | // mark pending |
---|
[d529ad0] | 698 | __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST); |
---|
[9f5a71eb] | 699 | |
---|
[26544f9] | 700 | // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing |
---|
| 701 | // if it's not the first enqueue, a signal is already in transit |
---|
[9f5a71eb] | 702 | if( we ) { |
---|
| 703 | sigval_t value = { PREEMPT_IO }; |
---|
[95dab9e] | 704 | __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value); |
---|
[26544f9] | 705 | __STATS__( false, io.flush.signal += 1; ) |
---|
[9f5a71eb] | 706 | } |
---|
[26544f9] | 707 | __STATS__( false, io.submit.extr += 1; ) |
---|
[9f5a71eb] | 708 | |
---|
[26544f9] | 709 | // to avoid dynamic allocation/memory reclamation headaches, wait for it to have been submitted |
---|
[a55472cc] | 710 | wait( ei.waitctx ); |
---|
[78da4ab] | 711 | |
---|
| 712 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have); |
---|
| 713 | } |
---|
| 714 | |
---|
[26544f9] | 715 | // flush the io arbiter: move all external io operations to the submission ring |
---|
| 716 | static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) { |
---|
| 717 | // if there are no external operations just return |
---|
| 718 | if(empty( ctx.ext_sq )) return; |
---|
[d60d30e] | 719 | |
---|
[26544f9] | 720 | // stats and logs |
---|
| 721 | __STATS__( false, io.flush.external += 1; ) |
---|
| 722 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n"); |
---|
[78da4ab] | 723 | |
---|
[26544f9] | 724 | // this can happen from multiple processors, mutual exclusion is needed |
---|
| 725 | lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 ); |
---|
| 726 | { |
---|
| 727 | // pop each operation one at a time. |
---|
| 728 | // There is no wait morphing because of the io sq ring |
---|
| 729 | while( !empty(ctx.ext_sq.queue) ) { |
---|
| 730 | // drop the element from the queue |
---|
| 731 | __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue ); |
---|
| 732 | |
---|
| 733 | // submit it |
---|
| 734 | __submit_only(&ctx, ei.idxs, ei.have, true); |
---|
| 735 | |
---|
| 736 | // wake the thread that was waiting on it |
---|
| 737 | // since this can both be called from kernel and user, check the flag before posting |
---|
| 738 | __post( ei.waitctx, kernel, UNPARK_LOCAL ); |
---|
| 739 | } |
---|
[78da4ab] | 740 | |
---|
[26544f9] | 741 | // mark the queue as empty |
---|
| 742 | ctx.ext_sq.empty = true; |
---|
| 743 | ctx.sq.last_external = true; |
---|
| 744 | } |
---|
| 745 | unlock(ctx.ext_sq.lock ); |
---|
| 746 | } |
---|
[11054eb] | 747 | |
---|
[26544f9] | 748 | extern "C" { |
---|
| 749 | // debug functions used for gdb |
---|
| 750 | // io_uring doesn't yet support gdb soe the kernel-shared data structures aren't viewable in gdb |
---|
| 751 | // these functions read the data that gdb can't and should be removed once the support is added |
---|
| 752 | static __u32 __cfagdb_cq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.head; } |
---|
| 753 | static __u32 __cfagdb_cq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.tail; } |
---|
| 754 | static __u32 __cfagdb_cq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.mask; } |
---|
| 755 | static __u32 __cfagdb_sq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.head; } |
---|
| 756 | static __u32 __cfagdb_sq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.tail; } |
---|
| 757 | static __u32 __cfagdb_sq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.mask; } |
---|
| 758 | |
---|
| 759 | // fancier version that reads an sqe and copies it out. |
---|
| 760 | static struct io_uring_sqe __cfagdb_sq_at( io_context$ * ctx, __u32 at ) __attribute__((nonnull(1),used,noinline)) { |
---|
| 761 | __u32 ax = at & *ctx->sq.mask; |
---|
| 762 | __u32 ix = ctx->sq.kring.array[ax]; |
---|
| 763 | return ctx->sq.sqes[ix]; |
---|
[11054eb] | 764 | } |
---|
[78da4ab] | 765 | } |
---|
[47746a2] | 766 | #endif |
---|