| 1 | //
|
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
|---|
| 3 | //
|
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
|---|
| 5 | // file "LICENCE" distributed with Cforall.
|
|---|
| 6 | //
|
|---|
| 7 | // io.cfa --
|
|---|
| 8 | //
|
|---|
| 9 | // Author : Thierry Delisle
|
|---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020
|
|---|
| 11 | // Last Modified By :
|
|---|
| 12 | // Last Modified On :
|
|---|
| 13 | // Update Count :
|
|---|
| 14 | //
|
|---|
| 15 |
|
|---|
| 16 | #define __cforall_thread__
|
|---|
| 17 |
|
|---|
| 18 | #if defined(__CFA_DEBUG__)
|
|---|
| 19 | // #define __CFA_DEBUG_PRINT_IO__
|
|---|
| 20 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
|---|
| 21 | #endif
|
|---|
| 22 |
|
|---|
| 23 |
|
|---|
| 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
|---|
| 25 | #define _GNU_SOURCE /* See feature_test_macros(7) */
|
|---|
| 26 | #include <errno.h>
|
|---|
| 27 | #include <signal.h>
|
|---|
| 28 | #include <stdint.h>
|
|---|
| 29 | #include <string.h>
|
|---|
| 30 | #include <unistd.h>
|
|---|
| 31 |
|
|---|
| 32 | extern "C" {
|
|---|
| 33 | #include <sys/syscall.h>
|
|---|
| 34 | #include <sys/eventfd.h>
|
|---|
| 35 |
|
|---|
| 36 | #include <linux/io_uring.h>
|
|---|
| 37 | }
|
|---|
| 38 |
|
|---|
| 39 | #include "stats.hfa"
|
|---|
| 40 | #include "kernel.hfa"
|
|---|
| 41 | #include "kernel/fwd.hfa"
|
|---|
| 42 | #include "io/types.hfa"
|
|---|
| 43 |
|
|---|
| 44 | __attribute__((unused)) static const char * opcodes[] = {
|
|---|
| 45 | "OP_NOP",
|
|---|
| 46 | "OP_READV",
|
|---|
| 47 | "OP_WRITEV",
|
|---|
| 48 | "OP_FSYNC",
|
|---|
| 49 | "OP_READ_FIXED",
|
|---|
| 50 | "OP_WRITE_FIXED",
|
|---|
| 51 | "OP_POLL_ADD",
|
|---|
| 52 | "OP_POLL_REMOVE",
|
|---|
| 53 | "OP_SYNC_FILE_RANGE",
|
|---|
| 54 | "OP_SENDMSG",
|
|---|
| 55 | "OP_RECVMSG",
|
|---|
| 56 | "OP_TIMEOUT",
|
|---|
| 57 | "OP_TIMEOUT_REMOVE",
|
|---|
| 58 | "OP_ACCEPT",
|
|---|
| 59 | "OP_ASYNC_CANCEL",
|
|---|
| 60 | "OP_LINK_TIMEOUT",
|
|---|
| 61 | "OP_CONNECT",
|
|---|
| 62 | "OP_FALLOCATE",
|
|---|
| 63 | "OP_OPENAT",
|
|---|
| 64 | "OP_CLOSE",
|
|---|
| 65 | "OP_FILES_UPDATE",
|
|---|
| 66 | "OP_STATX",
|
|---|
| 67 | "OP_READ",
|
|---|
| 68 | "OP_WRITE",
|
|---|
| 69 | "OP_FADVISE",
|
|---|
| 70 | "OP_MADVISE",
|
|---|
| 71 | "OP_SEND",
|
|---|
| 72 | "OP_RECV",
|
|---|
| 73 | "OP_OPENAT2",
|
|---|
| 74 | "OP_EPOLL_CTL",
|
|---|
| 75 | "OP_SPLICE",
|
|---|
| 76 | "OP_PROVIDE_BUFFERS",
|
|---|
| 77 | "OP_REMOVE_BUFFERS",
|
|---|
| 78 | "OP_TEE",
|
|---|
| 79 | "INVALID_OP"
|
|---|
| 80 | };
|
|---|
| 81 |
|
|---|
| 82 | static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor *, __u32 idxs[], __u32 want );
|
|---|
| 83 | static void __ioarbiter_submit( $io_arbiter & mutex this, $io_context * , __u32 idxs[], __u32 have, bool lazy );
|
|---|
| 84 | static void __ioarbiter_flush ( $io_arbiter & mutex this, $io_context * );
|
|---|
| 85 | static inline void __ioarbiter_notify( $io_context & ctx );
|
|---|
| 86 | //=============================================================================================
|
|---|
| 87 | // I/O Polling
|
|---|
| 88 | //=============================================================================================
|
|---|
| 89 | static inline unsigned __flush( struct $io_context & );
|
|---|
| 90 | static inline __u32 __release_sqes( struct $io_context & );
|
|---|
| 91 |
|
|---|
| 92 | void __cfa_io_drain( processor * proc ) {
|
|---|
| 93 | /* paranoid */ verify( ! __preemption_enabled() );
|
|---|
| 94 | /* paranoid */ verify( proc );
|
|---|
| 95 | /* paranoid */ verify( proc->io.ctx );
|
|---|
| 96 |
|
|---|
| 97 | // Drain the queue
|
|---|
| 98 | $io_context * ctx = proc->io.ctx;
|
|---|
| 99 | unsigned head = *ctx->cq.head;
|
|---|
| 100 | unsigned tail = *ctx->cq.tail;
|
|---|
| 101 | const __u32 mask = *ctx->cq.mask;
|
|---|
| 102 |
|
|---|
| 103 | __u32 count = tail - head;
|
|---|
| 104 | __STATS__( false, io.calls.drain++; io.calls.completed += count; )
|
|---|
| 105 |
|
|---|
| 106 | for(i; count) {
|
|---|
| 107 | unsigned idx = (head + i) & mask;
|
|---|
| 108 | volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
|
|---|
| 109 |
|
|---|
| 110 | /* paranoid */ verify(&cqe);
|
|---|
| 111 |
|
|---|
| 112 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
|
|---|
| 113 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
|
|---|
| 114 |
|
|---|
| 115 | fulfil( *future, cqe.res );
|
|---|
| 116 | }
|
|---|
| 117 |
|
|---|
| 118 | __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
|
|---|
| 119 |
|
|---|
| 120 | // Mark to the kernel that the cqe has been seen
|
|---|
| 121 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
|---|
| 122 | __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
|
|---|
| 123 |
|
|---|
| 124 | /* paranoid */ verify( ! __preemption_enabled() );
|
|---|
| 125 |
|
|---|
| 126 | return;
|
|---|
| 127 | }
|
|---|
| 128 |
|
|---|
| 129 | void __cfa_io_flush( processor * proc ) {
|
|---|
| 130 | /* paranoid */ verify( ! __preemption_enabled() );
|
|---|
| 131 | /* paranoid */ verify( proc );
|
|---|
| 132 | /* paranoid */ verify( proc->io.ctx );
|
|---|
| 133 |
|
|---|
| 134 | $io_context & ctx = *proc->io.ctx;
|
|---|
| 135 |
|
|---|
| 136 | if(!ctx.ext_sq.empty) {
|
|---|
| 137 | __ioarbiter_flush( *ctx.arbiter, &ctx );
|
|---|
| 138 | }
|
|---|
| 139 |
|
|---|
| 140 | __STATS__( true, io.calls.flush++; )
|
|---|
| 141 | int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, 0, 0, (sigset_t *)0p, _NSIG / 8);
|
|---|
| 142 | if( ret < 0 ) {
|
|---|
| 143 | switch((int)errno) {
|
|---|
| 144 | case EAGAIN:
|
|---|
| 145 | case EINTR:
|
|---|
| 146 | case EBUSY:
|
|---|
| 147 | // Update statistics
|
|---|
| 148 | __STATS__( false, io.calls.errors.busy ++; )
|
|---|
| 149 | return;
|
|---|
| 150 | default:
|
|---|
| 151 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
|---|
| 152 | }
|
|---|
| 153 | }
|
|---|
| 154 |
|
|---|
| 155 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
|
|---|
| 156 | __STATS__( true, io.calls.submitted += ret; )
|
|---|
| 157 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 158 | /* paranoid */ verify( ctx.sq.to_submit >= ret );
|
|---|
| 159 |
|
|---|
| 160 | ctx.sq.to_submit -= ret;
|
|---|
| 161 |
|
|---|
| 162 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 163 |
|
|---|
| 164 | // Release the consumed SQEs
|
|---|
| 165 | __release_sqes( ctx );
|
|---|
| 166 |
|
|---|
| 167 | /* paranoid */ verify( ! __preemption_enabled() );
|
|---|
| 168 |
|
|---|
| 169 | ctx.proc->io.pending = false;
|
|---|
| 170 | }
|
|---|
| 171 |
|
|---|
| 172 | //=============================================================================================
|
|---|
| 173 | // I/O Submissions
|
|---|
| 174 | //=============================================================================================
|
|---|
| 175 |
|
|---|
| 176 | // Submition steps :
|
|---|
| 177 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
|---|
| 178 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
|---|
| 179 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
|---|
| 180 | // need to write an allocator that allows allocating concurrently.
|
|---|
| 181 | //
|
|---|
| 182 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
|---|
| 183 | //
|
|---|
| 184 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
|---|
| 185 | // needs to arrive to two concensus at the same time:
|
|---|
| 186 | // A - The order in which entries are listed in the array: no two threads must pick the
|
|---|
| 187 | // same index for their entries
|
|---|
| 188 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
|---|
| 189 | // head and tail must be fully filled and shouldn't ever be touched again.
|
|---|
| 190 | //
|
|---|
| 191 | //=============================================================================================
|
|---|
| 192 | // Allocation
|
|---|
| 193 | // for user's convenience fill the sqes from the indexes
|
|---|
| 194 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context * ctx) {
|
|---|
| 195 | struct io_uring_sqe * sqes = ctx->sq.sqes;
|
|---|
| 196 | for(i; want) {
|
|---|
| 197 | __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
|
|---|
| 198 | out_sqes[i] = &sqes[idxs[i]];
|
|---|
| 199 | }
|
|---|
| 200 | }
|
|---|
| 201 |
|
|---|
| 202 | // Try to directly allocate from the a given context
|
|---|
| 203 | // Not thread-safe
|
|---|
| 204 | static inline bool __alloc(struct $io_context * ctx, __u32 idxs[], __u32 want) {
|
|---|
| 205 | __sub_ring_t & sq = ctx->sq;
|
|---|
| 206 | const __u32 mask = *sq.mask;
|
|---|
| 207 | __u32 fhead = sq.free_ring.head; // get the current head of the queue
|
|---|
| 208 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
|
|---|
| 209 |
|
|---|
| 210 | // If we don't have enough sqes, fail
|
|---|
| 211 | if((ftail - fhead) < want) { return false; }
|
|---|
| 212 |
|
|---|
| 213 | // copy all the indexes we want from the available list
|
|---|
| 214 | for(i; want) {
|
|---|
| 215 | __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
|
|---|
| 216 | idxs[i] = sq.free_ring.array[(fhead + i) & mask];
|
|---|
| 217 | }
|
|---|
| 218 |
|
|---|
| 219 | // Advance the head to mark the indexes as consumed
|
|---|
| 220 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
|
|---|
| 221 |
|
|---|
| 222 | // return success
|
|---|
| 223 | return true;
|
|---|
| 224 | }
|
|---|
| 225 |
|
|---|
| 226 | // Allocate an submit queue entry.
|
|---|
| 227 | // The kernel cannot see these entries until they are submitted, but other threads must be
|
|---|
| 228 | // able to see which entries can be used and which are already un used by an other thread
|
|---|
| 229 | // for convenience, return both the index and the pointer to the sqe
|
|---|
| 230 | // sqe == &sqes[idx]
|
|---|
| 231 | struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
|
|---|
| 232 | __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
|
|---|
| 233 |
|
|---|
| 234 | disable_interrupts();
|
|---|
| 235 | processor * proc = __cfaabi_tls.this_processor;
|
|---|
| 236 | $io_context * ctx = proc->io.ctx;
|
|---|
| 237 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
|---|
| 238 | /* paranoid */ verify( ctx );
|
|---|
| 239 |
|
|---|
| 240 | __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
|
|---|
| 241 |
|
|---|
| 242 | // We can proceed to the fast path
|
|---|
| 243 | if( __alloc(ctx, idxs, want) ) {
|
|---|
| 244 | // Allocation was successful
|
|---|
| 245 | __STATS__( true, io.alloc.fast += 1; )
|
|---|
| 246 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 247 |
|
|---|
| 248 | __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
|
|---|
| 249 |
|
|---|
| 250 | __fill( sqes, want, idxs, ctx );
|
|---|
| 251 | return ctx;
|
|---|
| 252 | }
|
|---|
| 253 | // The fast path failed, fallback
|
|---|
| 254 | __STATS__( true, io.alloc.fail += 1; )
|
|---|
| 255 |
|
|---|
| 256 | // Fast path failed, fallback on arbitration
|
|---|
| 257 | __STATS__( true, io.alloc.slow += 1; )
|
|---|
| 258 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 259 |
|
|---|
| 260 | $io_arbiter * ioarb = proc->cltr->io.arbiter;
|
|---|
| 261 | /* paranoid */ verify( ioarb );
|
|---|
| 262 |
|
|---|
| 263 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
|
|---|
| 264 |
|
|---|
| 265 | struct $io_context * ret = __ioarbiter_allocate(*ioarb, proc, idxs, want);
|
|---|
| 266 |
|
|---|
| 267 | __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
|
|---|
| 268 |
|
|---|
| 269 | __fill( sqes, want, idxs,ret );
|
|---|
| 270 | return ret;
|
|---|
| 271 | }
|
|---|
| 272 |
|
|---|
| 273 |
|
|---|
| 274 | //=============================================================================================
|
|---|
| 275 | // submission
|
|---|
| 276 | static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
|
|---|
| 277 | // We can proceed to the fast path
|
|---|
| 278 | // Get the right objects
|
|---|
| 279 | __sub_ring_t & sq = ctx->sq;
|
|---|
| 280 | const __u32 mask = *sq.mask;
|
|---|
| 281 | __u32 tail = *sq.kring.tail;
|
|---|
| 282 |
|
|---|
| 283 | // Add the sqes to the array
|
|---|
| 284 | for( i; have ) {
|
|---|
| 285 | __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
|
|---|
| 286 | sq.kring.array[ (tail + i) & mask ] = idxs[i];
|
|---|
| 287 | }
|
|---|
| 288 |
|
|---|
| 289 | // Make the sqes visible to the submitter
|
|---|
| 290 | __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
|
|---|
| 291 | sq.to_submit++;
|
|---|
| 292 |
|
|---|
| 293 | ctx->proc->io.pending = true;
|
|---|
| 294 | ctx->proc->io.dirty = true;
|
|---|
| 295 | if(sq.to_submit > 30 || !lazy) {
|
|---|
| 296 | __cfa_io_flush( ctx->proc );
|
|---|
| 297 | }
|
|---|
| 298 | }
|
|---|
| 299 |
|
|---|
| 300 | void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
|
|---|
| 301 | __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
|
|---|
| 302 |
|
|---|
| 303 | disable_interrupts();
|
|---|
| 304 | processor * proc = __cfaabi_tls.this_processor;
|
|---|
| 305 | $io_context * ctx = proc->io.ctx;
|
|---|
| 306 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
|---|
| 307 | /* paranoid */ verify( ctx );
|
|---|
| 308 |
|
|---|
| 309 | // Can we proceed to the fast path
|
|---|
| 310 | if( ctx == inctx ) // We have the right instance?
|
|---|
| 311 | {
|
|---|
| 312 | __submit(ctx, idxs, have, lazy);
|
|---|
| 313 |
|
|---|
| 314 | // Mark the instance as no longer in-use, re-enable interrupts and return
|
|---|
| 315 | __STATS__( true, io.submit.fast += 1; )
|
|---|
| 316 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 317 |
|
|---|
| 318 | __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
|
|---|
| 319 | return;
|
|---|
| 320 | }
|
|---|
| 321 |
|
|---|
| 322 | // Fast path failed, fallback on arbitration
|
|---|
| 323 | __STATS__( true, io.submit.slow += 1; )
|
|---|
| 324 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 325 |
|
|---|
| 326 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
|
|---|
| 327 |
|
|---|
| 328 | __ioarbiter_submit(*inctx->arbiter, inctx, idxs, have, lazy);
|
|---|
| 329 | }
|
|---|
| 330 |
|
|---|
| 331 | //=============================================================================================
|
|---|
| 332 | // Flushing
|
|---|
| 333 | // Go through the ring's submit queue and release everything that has already been consumed
|
|---|
| 334 | // by io_uring
|
|---|
| 335 | // This cannot be done by multiple threads
|
|---|
| 336 | static __u32 __release_sqes( struct $io_context & ctx ) {
|
|---|
| 337 | const __u32 mask = *ctx.sq.mask;
|
|---|
| 338 |
|
|---|
| 339 | __attribute__((unused))
|
|---|
| 340 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
|
|---|
| 341 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
|
|---|
| 342 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
|
|---|
| 343 |
|
|---|
| 344 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
|
|---|
| 345 |
|
|---|
| 346 | // the 3 fields are organized like this diagram
|
|---|
| 347 | // except it's are ring
|
|---|
| 348 | // ---+--------+--------+----
|
|---|
| 349 | // ---+--------+--------+----
|
|---|
| 350 | // ^ ^ ^
|
|---|
| 351 | // phead chead ctail
|
|---|
| 352 |
|
|---|
| 353 | // make sure ctail doesn't wrap around and reach phead
|
|---|
| 354 | /* paranoid */ verify(
|
|---|
| 355 | (ctail >= chead && chead >= phead)
|
|---|
| 356 | || (chead >= phead && phead >= ctail)
|
|---|
| 357 | || (phead >= ctail && ctail >= chead)
|
|---|
| 358 | );
|
|---|
| 359 |
|
|---|
| 360 | // find the range we need to clear
|
|---|
| 361 | __u32 count = chead - phead;
|
|---|
| 362 |
|
|---|
| 363 | if(count == 0) {
|
|---|
| 364 | return 0;
|
|---|
| 365 | }
|
|---|
| 366 |
|
|---|
| 367 | // We acquired an previous-head/current-head range
|
|---|
| 368 | // go through the range and release the sqes
|
|---|
| 369 | for( i; count ) {
|
|---|
| 370 | __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
|
|---|
| 371 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
|
|---|
| 372 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
|
|---|
| 373 | }
|
|---|
| 374 |
|
|---|
| 375 | ctx.sq.kring.released = chead; // note up to were we processed
|
|---|
| 376 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
|
|---|
| 377 |
|
|---|
| 378 | __ioarbiter_notify(ctx);
|
|---|
| 379 |
|
|---|
| 380 | return count;
|
|---|
| 381 | }
|
|---|
| 382 |
|
|---|
| 383 | //=============================================================================================
|
|---|
| 384 | // I/O Arbiter
|
|---|
| 385 | //=============================================================================================
|
|---|
| 386 | static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor * proc, __u32 idxs[], __u32 want ) {
|
|---|
| 387 | __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
|
|---|
| 388 |
|
|---|
| 389 | __STATS__( false, io.alloc.block += 1; )
|
|---|
| 390 |
|
|---|
| 391 | // No one has any resources left, wait for something to finish
|
|---|
| 392 | // Mark as pending
|
|---|
| 393 | __atomic_store_n( &this.pending.flag, true, __ATOMIC_SEQ_CST );
|
|---|
| 394 |
|
|---|
| 395 | // Wait for our turn to submit
|
|---|
| 396 | wait( this.pending.blocked, want );
|
|---|
| 397 |
|
|---|
| 398 | __attribute((unused)) bool ret =
|
|---|
| 399 | __alloc( this.pending.ctx, idxs, want);
|
|---|
| 400 | /* paranoid */ verify( ret );
|
|---|
| 401 |
|
|---|
| 402 | return this.pending.ctx;
|
|---|
| 403 |
|
|---|
| 404 | }
|
|---|
| 405 |
|
|---|
| 406 | static void __ioarbiter_notify( $io_arbiter & mutex this, $io_context * ctx ) {
|
|---|
| 407 | /* paranoid */ verify( !is_empty(this.pending.blocked) );
|
|---|
| 408 | this.pending.ctx = ctx;
|
|---|
| 409 |
|
|---|
| 410 | while( !is_empty(this.pending.blocked) ) {
|
|---|
| 411 | __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
|
|---|
| 412 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
|
|---|
| 413 | __u32 want = front( this.pending.blocked );
|
|---|
| 414 |
|
|---|
| 415 | if( have > want ) return;
|
|---|
| 416 |
|
|---|
| 417 | signal_block( this.pending.blocked );
|
|---|
| 418 | }
|
|---|
| 419 |
|
|---|
| 420 | this.pending.flag = false;
|
|---|
| 421 | }
|
|---|
| 422 |
|
|---|
| 423 | static void __ioarbiter_notify( $io_context & ctx ) {
|
|---|
| 424 | if(__atomic_load_n( &ctx.arbiter->pending.flag, __ATOMIC_SEQ_CST)) {
|
|---|
| 425 | __ioarbiter_notify( *ctx.arbiter, &ctx );
|
|---|
| 426 | }
|
|---|
| 427 | }
|
|---|
| 428 |
|
|---|
| 429 | // Simply append to the pending
|
|---|
| 430 | static void __ioarbiter_submit( $io_arbiter & mutex this, $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) {
|
|---|
| 431 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
|
|---|
| 432 |
|
|---|
| 433 | /* paranoid */ verify( &this == ctx->arbiter );
|
|---|
| 434 |
|
|---|
| 435 | // Mark as pending
|
|---|
| 436 | __atomic_store_n( &ctx->ext_sq.empty, false, __ATOMIC_SEQ_CST );
|
|---|
| 437 |
|
|---|
| 438 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
|
|---|
| 439 |
|
|---|
| 440 | // Wait for our turn to submit
|
|---|
| 441 | wait( ctx->ext_sq.blocked );
|
|---|
| 442 |
|
|---|
| 443 | // Submit our indexes
|
|---|
| 444 | __submit(ctx, idxs, have, lazy);
|
|---|
| 445 |
|
|---|
| 446 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
|
|---|
| 447 | }
|
|---|
| 448 |
|
|---|
| 449 | static void __ioarbiter_flush( $io_arbiter & mutex this, $io_context * ctx ) {
|
|---|
| 450 | /* paranoid */ verify( &this == ctx->arbiter );
|
|---|
| 451 |
|
|---|
| 452 | __STATS__( false, io.flush.external += 1; )
|
|---|
| 453 |
|
|---|
| 454 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
|
|---|
| 455 |
|
|---|
| 456 | condition & blcked = ctx->ext_sq.blocked;
|
|---|
| 457 | /* paranoid */ verify( ctx->ext_sq.empty == is_empty( blcked ) );
|
|---|
| 458 | while(!is_empty( blcked )) {
|
|---|
| 459 | signal_block( blcked );
|
|---|
| 460 | }
|
|---|
| 461 |
|
|---|
| 462 | ctx->ext_sq.empty = true;
|
|---|
| 463 | }
|
|---|
| 464 | #endif
|
|---|