| 1 | //
|
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
|---|
| 3 | //
|
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
|---|
| 5 | // file "LICENCE" distributed with Cforall.
|
|---|
| 6 | //
|
|---|
| 7 | // io.cfa --
|
|---|
| 8 | //
|
|---|
| 9 | // Author : Thierry Delisle
|
|---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020
|
|---|
| 11 | // Last Modified By :
|
|---|
| 12 | // Last Modified On :
|
|---|
| 13 | // Update Count :
|
|---|
| 14 | //
|
|---|
| 15 |
|
|---|
| 16 | #define __cforall_thread__
|
|---|
| 17 |
|
|---|
| 18 | #if defined(__CFA_DEBUG__)
|
|---|
| 19 | // #define __CFA_DEBUG_PRINT_IO__
|
|---|
| 20 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
|---|
| 21 | #endif
|
|---|
| 22 |
|
|---|
| 23 |
|
|---|
| 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
|---|
| 25 | #define _GNU_SOURCE /* See feature_test_macros(7) */
|
|---|
| 26 | #include <errno.h>
|
|---|
| 27 | #include <signal.h>
|
|---|
| 28 | #include <stdint.h>
|
|---|
| 29 | #include <string.h>
|
|---|
| 30 | #include <unistd.h>
|
|---|
| 31 |
|
|---|
| 32 | extern "C" {
|
|---|
| 33 | #include <sys/syscall.h>
|
|---|
| 34 |
|
|---|
| 35 | #include <linux/io_uring.h>
|
|---|
| 36 | }
|
|---|
| 37 |
|
|---|
| 38 | #include "stats.hfa"
|
|---|
| 39 | #include "kernel.hfa"
|
|---|
| 40 | #include "kernel/fwd.hfa"
|
|---|
| 41 | #include "io/types.hfa"
|
|---|
| 42 |
|
|---|
| 43 | __attribute__((unused)) static const char * opcodes[] = {
|
|---|
| 44 | "OP_NOP",
|
|---|
| 45 | "OP_READV",
|
|---|
| 46 | "OP_WRITEV",
|
|---|
| 47 | "OP_FSYNC",
|
|---|
| 48 | "OP_READ_FIXED",
|
|---|
| 49 | "OP_WRITE_FIXED",
|
|---|
| 50 | "OP_POLL_ADD",
|
|---|
| 51 | "OP_POLL_REMOVE",
|
|---|
| 52 | "OP_SYNC_FILE_RANGE",
|
|---|
| 53 | "OP_SENDMSG",
|
|---|
| 54 | "OP_RECVMSG",
|
|---|
| 55 | "OP_TIMEOUT",
|
|---|
| 56 | "OP_TIMEOUT_REMOVE",
|
|---|
| 57 | "OP_ACCEPT",
|
|---|
| 58 | "OP_ASYNC_CANCEL",
|
|---|
| 59 | "OP_LINK_TIMEOUT",
|
|---|
| 60 | "OP_CONNECT",
|
|---|
| 61 | "OP_FALLOCATE",
|
|---|
| 62 | "OP_OPENAT",
|
|---|
| 63 | "OP_CLOSE",
|
|---|
| 64 | "OP_FILES_UPDATE",
|
|---|
| 65 | "OP_STATX",
|
|---|
| 66 | "OP_READ",
|
|---|
| 67 | "OP_WRITE",
|
|---|
| 68 | "OP_FADVISE",
|
|---|
| 69 | "OP_MADVISE",
|
|---|
| 70 | "OP_SEND",
|
|---|
| 71 | "OP_RECV",
|
|---|
| 72 | "OP_OPENAT2",
|
|---|
| 73 | "OP_EPOLL_CTL",
|
|---|
| 74 | "OP_SPLICE",
|
|---|
| 75 | "OP_PROVIDE_BUFFERS",
|
|---|
| 76 | "OP_REMOVE_BUFFERS",
|
|---|
| 77 | "OP_TEE",
|
|---|
| 78 | "INVALID_OP"
|
|---|
| 79 | };
|
|---|
| 80 |
|
|---|
| 81 | //=============================================================================================
|
|---|
| 82 | // I/O Syscall
|
|---|
| 83 | //=============================================================================================
|
|---|
| 84 | static int __io_uring_enter( struct $io_context & ctx, unsigned to_submit, bool get ) {
|
|---|
| 85 | __STATS__( false, io.calls.count++; )
|
|---|
| 86 | bool need_sys_to_submit = false;
|
|---|
| 87 | bool need_sys_to_complete = false;
|
|---|
| 88 | unsigned flags = 0;
|
|---|
| 89 |
|
|---|
| 90 | TO_SUBMIT:
|
|---|
| 91 | if( to_submit > 0 ) {
|
|---|
| 92 | if( !(ctx.ring_flags & IORING_SETUP_SQPOLL) ) {
|
|---|
| 93 | need_sys_to_submit = true;
|
|---|
| 94 | break TO_SUBMIT;
|
|---|
| 95 | }
|
|---|
| 96 | if( (*ctx.sq.flags) & IORING_SQ_NEED_WAKEUP ) {
|
|---|
| 97 | need_sys_to_submit = true;
|
|---|
| 98 | flags |= IORING_ENTER_SQ_WAKEUP;
|
|---|
| 99 | }
|
|---|
| 100 | }
|
|---|
| 101 |
|
|---|
| 102 | if( get && !(ctx.ring_flags & IORING_SETUP_SQPOLL) ) {
|
|---|
| 103 | flags |= IORING_ENTER_GETEVENTS;
|
|---|
| 104 | if( (ctx.ring_flags & IORING_SETUP_IOPOLL) ) {
|
|---|
| 105 | need_sys_to_complete = true;
|
|---|
| 106 | }
|
|---|
| 107 | }
|
|---|
| 108 |
|
|---|
| 109 | int ret = 0;
|
|---|
| 110 | if( need_sys_to_submit || need_sys_to_complete ) {
|
|---|
| 111 | __cfadbg_print_safe(io_core, "Kernel I/O : IO_URING enter %d %u %u\n", ctx.fd, to_submit, flags);
|
|---|
| 112 | __STATS__( false, io.calls.blocks++; )
|
|---|
| 113 | ret = syscall( __NR_io_uring_enter, ctx.fd, to_submit, 0, flags, (sigset_t *)0p, _NSIG / 8);
|
|---|
| 114 | __cfadbg_print_safe(io_core, "Kernel I/O : IO_URING %d returned %d\n", ctx.fd, ret);
|
|---|
| 115 | }
|
|---|
| 116 |
|
|---|
| 117 | // Memory barrier
|
|---|
| 118 | __atomic_thread_fence( __ATOMIC_SEQ_CST );
|
|---|
| 119 | return ret;
|
|---|
| 120 | }
|
|---|
| 121 |
|
|---|
| 122 | //=============================================================================================
|
|---|
| 123 | // I/O Polling
|
|---|
| 124 | //=============================================================================================
|
|---|
| 125 | static inline unsigned __flush( struct $io_context & );
|
|---|
| 126 | static inline __u32 __release_sqes( struct $io_context & );
|
|---|
| 127 |
|
|---|
| 128 | static bool __drain_io( struct $io_context & ctx ) {
|
|---|
| 129 | unsigned to_submit = __flush( ctx );
|
|---|
| 130 | int ret = __io_uring_enter( ctx, to_submit, true );
|
|---|
| 131 | if( ret < 0 ) {
|
|---|
| 132 | switch((int)errno) {
|
|---|
| 133 | case EAGAIN:
|
|---|
| 134 | case EINTR:
|
|---|
| 135 | case EBUSY:
|
|---|
| 136 | // Update statistics
|
|---|
| 137 | __STATS__( false, io.calls.errors.busy ++; )
|
|---|
| 138 | return true;
|
|---|
| 139 | break;
|
|---|
| 140 | default:
|
|---|
| 141 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
|---|
| 142 | }
|
|---|
| 143 | }
|
|---|
| 144 |
|
|---|
| 145 | // update statistics
|
|---|
| 146 | if (to_submit > 0) {
|
|---|
| 147 | __STATS__( false, io.calls.submitted += ret; )
|
|---|
| 148 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 149 |
|
|---|
| 150 | /* paranoid */ verify( ctx.sq.to_submit >= ret );
|
|---|
| 151 | ctx.sq.to_submit -= ret;
|
|---|
| 152 |
|
|---|
| 153 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 154 |
|
|---|
| 155 | if(ret) {
|
|---|
| 156 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring\n", ret);
|
|---|
| 157 | }
|
|---|
| 158 | }
|
|---|
| 159 |
|
|---|
| 160 | // Release the consumed SQEs
|
|---|
| 161 | __release_sqes( ctx );
|
|---|
| 162 |
|
|---|
| 163 | // Drain the queue
|
|---|
| 164 | unsigned head = *ctx.cq.head;
|
|---|
| 165 | unsigned tail = *ctx.cq.tail;
|
|---|
| 166 | const __u32 mask = *ctx.cq.mask;
|
|---|
| 167 |
|
|---|
| 168 | // Nothing was new return 0
|
|---|
| 169 | if (head == tail) {
|
|---|
| 170 | return ctx.sq.to_submit > 0;
|
|---|
| 171 | }
|
|---|
| 172 |
|
|---|
| 173 | __u32 count = tail - head;
|
|---|
| 174 | /* paranoid */ verify( count != 0 );
|
|---|
| 175 | __STATS__( false, io.calls.completed += count; )
|
|---|
| 176 |
|
|---|
| 177 | for(i; count) {
|
|---|
| 178 | unsigned idx = (head + i) & mask;
|
|---|
| 179 | volatile struct io_uring_cqe & cqe = ctx.cq.cqes[idx];
|
|---|
| 180 |
|
|---|
| 181 | /* paranoid */ verify(&cqe);
|
|---|
| 182 |
|
|---|
| 183 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
|
|---|
| 184 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
|
|---|
| 185 |
|
|---|
| 186 | fulfil( *future, cqe.res );
|
|---|
| 187 | }
|
|---|
| 188 |
|
|---|
| 189 | if(count) {
|
|---|
| 190 | __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
|
|---|
| 191 | }
|
|---|
| 192 |
|
|---|
| 193 | // Mark to the kernel that the cqe has been seen
|
|---|
| 194 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
|---|
| 195 | __atomic_store_n( ctx.cq.head, head + count, __ATOMIC_SEQ_CST );
|
|---|
| 196 |
|
|---|
| 197 | return count > 0 || to_submit > 0;
|
|---|
| 198 | }
|
|---|
| 199 |
|
|---|
| 200 | void main( $io_context & this ) {
|
|---|
| 201 | __cfadbg_print_safe(io_core, "Kernel I/O : IO poller %d (%p) ready\n", this.fd, &this);
|
|---|
| 202 |
|
|---|
| 203 | const int reset_cnt = 5;
|
|---|
| 204 | int reset = reset_cnt;
|
|---|
| 205 | // Then loop until we need to start
|
|---|
| 206 | LOOP:
|
|---|
| 207 | while() {
|
|---|
| 208 | waitfor( ^?{} : this) {
|
|---|
| 209 | break LOOP;
|
|---|
| 210 | }
|
|---|
| 211 | or else {}
|
|---|
| 212 |
|
|---|
| 213 | // Drain the io
|
|---|
| 214 | bool again = __drain_io( this );
|
|---|
| 215 |
|
|---|
| 216 | if(!again) reset--;
|
|---|
| 217 |
|
|---|
| 218 | // If we got something, just yield and check again
|
|---|
| 219 | if(reset > 1) {
|
|---|
| 220 | yield();
|
|---|
| 221 | continue LOOP;
|
|---|
| 222 | }
|
|---|
| 223 |
|
|---|
| 224 | // We alread failed to find completed entries a few time.
|
|---|
| 225 | if(reset == 1) {
|
|---|
| 226 | // Rearm the context so it can block
|
|---|
| 227 | // but don't block right away
|
|---|
| 228 | // we need to retry one last time in case
|
|---|
| 229 | // something completed *just now*
|
|---|
| 230 | __ioctx_prepare_block( this );
|
|---|
| 231 | continue LOOP;
|
|---|
| 232 | }
|
|---|
| 233 |
|
|---|
| 234 | __STATS__( false,
|
|---|
| 235 | io.poller.sleeps += 1;
|
|---|
| 236 | )
|
|---|
| 237 | __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %d (%p)\n", this.fd, &this);
|
|---|
| 238 |
|
|---|
| 239 | // block this thread
|
|---|
| 240 | wait( this.sem );
|
|---|
| 241 |
|
|---|
| 242 | // restore counter
|
|---|
| 243 | reset = reset_cnt;
|
|---|
| 244 | }
|
|---|
| 245 |
|
|---|
| 246 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller %d (%p) stopping\n", this.fd, &this);
|
|---|
| 247 | }
|
|---|
| 248 |
|
|---|
| 249 | //=============================================================================================
|
|---|
| 250 | // I/O Submissions
|
|---|
| 251 | //=============================================================================================
|
|---|
| 252 |
|
|---|
| 253 | // Submition steps :
|
|---|
| 254 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
|---|
| 255 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
|---|
| 256 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
|---|
| 257 | // need to write an allocator that allows allocating concurrently.
|
|---|
| 258 | //
|
|---|
| 259 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
|---|
| 260 | //
|
|---|
| 261 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
|---|
| 262 | // needs to arrive to two concensus at the same time:
|
|---|
| 263 | // A - The order in which entries are listed in the array: no two threads must pick the
|
|---|
| 264 | // same index for their entries
|
|---|
| 265 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
|---|
| 266 | // head and tail must be fully filled and shouldn't ever be touched again.
|
|---|
| 267 | //
|
|---|
| 268 |
|
|---|
| 269 | static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor *, __u32 idxs[], __u32 want );
|
|---|
| 270 | static void __ioarbiter_submit ( $io_arbiter & mutex this, $io_context * , __u32 idxs[], __u32 have );
|
|---|
| 271 | static void __ioarbiter_flush ( $io_arbiter & mutex this, $io_context * );
|
|---|
| 272 | static inline void __ioarbiter_notify( $io_context & ctx );
|
|---|
| 273 |
|
|---|
| 274 | //=============================================================================================
|
|---|
| 275 | // Allocation
|
|---|
| 276 | // for user's convenience fill the sqes from the indexes
|
|---|
| 277 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context * ctx) {
|
|---|
| 278 | struct io_uring_sqe * sqes = ctx->sq.sqes;
|
|---|
| 279 | for(i; want) {
|
|---|
| 280 | out_sqes[i] = &sqes[idxs[i]];
|
|---|
| 281 | }
|
|---|
| 282 | }
|
|---|
| 283 |
|
|---|
| 284 | // Try to directly allocate from the a given context
|
|---|
| 285 | // Not thread-safe
|
|---|
| 286 | static inline bool __alloc(struct $io_context * ctx, __u32 idxs[], __u32 want) {
|
|---|
| 287 | __sub_ring_t & sq = ctx->sq;
|
|---|
| 288 | const __u32 mask = *sq.mask;
|
|---|
| 289 | __u32 fhead = sq.free_ring.head; // get the current head of the queue
|
|---|
| 290 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
|
|---|
| 291 |
|
|---|
| 292 | // If we don't have enough sqes, fail
|
|---|
| 293 | if((ftail - fhead) < want) { return false; }
|
|---|
| 294 |
|
|---|
| 295 | // copy all the indexes we want from the available list
|
|---|
| 296 | for(i; want) {
|
|---|
| 297 | idxs[i] = sq.free_ring.array[(fhead + i) & mask];
|
|---|
| 298 | }
|
|---|
| 299 |
|
|---|
| 300 | // Advance the head to mark the indexes as consumed
|
|---|
| 301 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
|
|---|
| 302 |
|
|---|
| 303 | // return success
|
|---|
| 304 | return true;
|
|---|
| 305 | }
|
|---|
| 306 |
|
|---|
| 307 | // Allocate an submit queue entry.
|
|---|
| 308 | // The kernel cannot see these entries until they are submitted, but other threads must be
|
|---|
| 309 | // able to see which entries can be used and which are already un used by an other thread
|
|---|
| 310 | // for convenience, return both the index and the pointer to the sqe
|
|---|
| 311 | // sqe == &sqes[idx]
|
|---|
| 312 | struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
|
|---|
| 313 | __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
|
|---|
| 314 |
|
|---|
| 315 | disable_interrupts();
|
|---|
| 316 | processor * proc = __cfaabi_tls.this_processor;
|
|---|
| 317 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
|---|
| 318 | /* paranoid */ verify( proc->io.lock == false );
|
|---|
| 319 |
|
|---|
| 320 | __atomic_store_n( &proc->io.lock, true, __ATOMIC_SEQ_CST );
|
|---|
| 321 | $io_context * ctx = proc->io.ctx;
|
|---|
| 322 | $io_arbiter * ioarb = proc->cltr->io.arbiter;
|
|---|
| 323 | /* paranoid */ verify( ioarb );
|
|---|
| 324 |
|
|---|
| 325 | // Can we proceed to the fast path
|
|---|
| 326 | if( ctx // We alreay have an instance?
|
|---|
| 327 | && !ctx->revoked ) // Our instance is still valid?
|
|---|
| 328 | {
|
|---|
| 329 | __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
|
|---|
| 330 |
|
|---|
| 331 | // We can proceed to the fast path
|
|---|
| 332 | if( __alloc(ctx, idxs, want) ) {
|
|---|
| 333 | // Allocation was successful
|
|---|
| 334 | // Mark the instance as no longer in-use and re-enable interrupts
|
|---|
| 335 | __atomic_store_n( &proc->io.lock, false, __ATOMIC_RELEASE );
|
|---|
| 336 | __STATS__( true, io.alloc.fast += 1; )
|
|---|
| 337 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 338 |
|
|---|
| 339 | __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful\n");
|
|---|
| 340 |
|
|---|
| 341 | __fill( sqes, want, idxs, ctx );
|
|---|
| 342 | return ctx;
|
|---|
| 343 | }
|
|---|
| 344 | // The fast path failed, fallback
|
|---|
| 345 | __STATS__( true, io.alloc.fail += 1; )
|
|---|
| 346 | }
|
|---|
| 347 |
|
|---|
| 348 | // Fast path failed, fallback on arbitration
|
|---|
| 349 | __atomic_store_n( &proc->io.lock, false, __ATOMIC_RELEASE );
|
|---|
| 350 | __STATS__( true, io.alloc.slow += 1; )
|
|---|
| 351 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 352 |
|
|---|
| 353 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
|
|---|
| 354 |
|
|---|
| 355 | struct $io_context * ret = __ioarbiter_allocate(*ioarb, proc, idxs, want);
|
|---|
| 356 |
|
|---|
| 357 | __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed\n");
|
|---|
| 358 |
|
|---|
| 359 | __fill( sqes, want, idxs,ret );
|
|---|
| 360 | return ret;
|
|---|
| 361 | }
|
|---|
| 362 |
|
|---|
| 363 |
|
|---|
| 364 | //=============================================================================================
|
|---|
| 365 | // submission
|
|---|
| 366 | static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have) {
|
|---|
| 367 | // We can proceed to the fast path
|
|---|
| 368 | // Get the right objects
|
|---|
| 369 | __sub_ring_t & sq = ctx->sq;
|
|---|
| 370 | const __u32 mask = *sq.mask;
|
|---|
| 371 | __u32 tail = sq.kring.ready;
|
|---|
| 372 |
|
|---|
| 373 | // Add the sqes to the array
|
|---|
| 374 | for( i; have ) {
|
|---|
| 375 | sq.kring.array[ (tail + i) & mask ] = idxs[i];
|
|---|
| 376 | }
|
|---|
| 377 |
|
|---|
| 378 | // Make the sqes visible to the submitter
|
|---|
| 379 | __atomic_store_n(&sq.kring.ready, tail + have, __ATOMIC_RELEASE);
|
|---|
| 380 |
|
|---|
| 381 | // Make sure the poller is awake
|
|---|
| 382 | __cfadbg_print_safe(io, "Kernel I/O : waking the poller\n");
|
|---|
| 383 | post( ctx->sem );
|
|---|
| 384 | }
|
|---|
| 385 |
|
|---|
| 386 | void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have ) __attribute__((nonnull (1))) {
|
|---|
| 387 | __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u\n", have);
|
|---|
| 388 |
|
|---|
| 389 | disable_interrupts();
|
|---|
| 390 | processor * proc = __cfaabi_tls.this_processor;
|
|---|
| 391 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
|---|
| 392 | /* paranoid */ verify( proc->io.lock == false );
|
|---|
| 393 |
|
|---|
| 394 | __atomic_store_n( &proc->io.lock, true, __ATOMIC_SEQ_CST );
|
|---|
| 395 | $io_context * ctx = proc->io.ctx;
|
|---|
| 396 |
|
|---|
| 397 | // Can we proceed to the fast path
|
|---|
| 398 | if( ctx // We alreay have an instance?
|
|---|
| 399 | && !ctx->revoked // Our instance is still valid?
|
|---|
| 400 | && ctx == inctx ) // We have the right instance?
|
|---|
| 401 | {
|
|---|
| 402 | __submit(ctx, idxs, have);
|
|---|
| 403 |
|
|---|
| 404 | // Mark the instance as no longer in-use, re-enable interrupts and return
|
|---|
| 405 | __atomic_store_n( &proc->io.lock, false, __ATOMIC_RELEASE );
|
|---|
| 406 | __STATS__( true, io.submit.fast += 1; )
|
|---|
| 407 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 408 |
|
|---|
| 409 | __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
|
|---|
| 410 | return;
|
|---|
| 411 | }
|
|---|
| 412 |
|
|---|
| 413 | // Fast path failed, fallback on arbitration
|
|---|
| 414 | __atomic_store_n( &proc->io.lock, false, __ATOMIC_RELEASE );
|
|---|
| 415 | __STATS__( true, io.submit.slow += 1; )
|
|---|
| 416 | enable_interrupts( __cfaabi_dbg_ctx );
|
|---|
| 417 |
|
|---|
| 418 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
|
|---|
| 419 |
|
|---|
| 420 | __ioarbiter_submit(*inctx->arbiter, inctx, idxs, have);
|
|---|
| 421 | }
|
|---|
| 422 |
|
|---|
| 423 | //=============================================================================================
|
|---|
| 424 | // Flushing
|
|---|
| 425 | static unsigned __flush( struct $io_context & ctx ) {
|
|---|
| 426 | // First check for external
|
|---|
| 427 | if( !__atomic_load_n(&ctx.ext_sq.empty, __ATOMIC_SEQ_CST) ) {
|
|---|
| 428 | // We have external submissions, delegate to the arbiter
|
|---|
| 429 | __ioarbiter_flush( *ctx.arbiter, &ctx );
|
|---|
| 430 | }
|
|---|
| 431 |
|
|---|
| 432 | __u32 tail = *ctx.sq.kring.tail;
|
|---|
| 433 | __u32 ready = ctx.sq.kring.ready;
|
|---|
| 434 |
|
|---|
| 435 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 436 | ctx.sq.to_submit += (ready - tail);
|
|---|
| 437 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
|---|
| 438 |
|
|---|
| 439 | if(ctx.sq.to_submit) {
|
|---|
| 440 | __cfadbg_print_safe(io, "Kernel I/O : %u ready to submit\n", ctx.sq.to_submit);
|
|---|
| 441 | }
|
|---|
| 442 |
|
|---|
| 443 | __atomic_store_n(ctx.sq.kring.tail, ready, __ATOMIC_RELEASE);
|
|---|
| 444 |
|
|---|
| 445 | return ctx.sq.to_submit;
|
|---|
| 446 | }
|
|---|
| 447 |
|
|---|
| 448 |
|
|---|
| 449 | // Go through the ring's submit queue and release everything that has already been consumed
|
|---|
| 450 | // by io_uring
|
|---|
| 451 | // This cannot be done by multiple threads
|
|---|
| 452 | static __u32 __release_sqes( struct $io_context & ctx ) {
|
|---|
| 453 | const __u32 mask = *ctx.sq.mask;
|
|---|
| 454 |
|
|---|
| 455 | __attribute__((unused))
|
|---|
| 456 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
|
|---|
| 457 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
|
|---|
| 458 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
|
|---|
| 459 |
|
|---|
| 460 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
|
|---|
| 461 |
|
|---|
| 462 | // the 3 fields are organized like this diagram
|
|---|
| 463 | // except it's are ring
|
|---|
| 464 | // ---+--------+--------+----
|
|---|
| 465 | // ---+--------+--------+----
|
|---|
| 466 | // ^ ^ ^
|
|---|
| 467 | // phead chead ctail
|
|---|
| 468 |
|
|---|
| 469 | // make sure ctail doesn't wrap around and reach phead
|
|---|
| 470 | /* paranoid */ verify(
|
|---|
| 471 | (ctail >= chead && chead >= phead)
|
|---|
| 472 | || (chead >= phead && phead >= ctail)
|
|---|
| 473 | || (phead >= ctail && ctail >= chead)
|
|---|
| 474 | );
|
|---|
| 475 |
|
|---|
| 476 | // find the range we need to clear
|
|---|
| 477 | __u32 count = chead - phead;
|
|---|
| 478 |
|
|---|
| 479 | if(count == 0) {
|
|---|
| 480 | return 0;
|
|---|
| 481 | }
|
|---|
| 482 |
|
|---|
| 483 | // We acquired an previous-head/current-head range
|
|---|
| 484 | // go through the range and release the sqes
|
|---|
| 485 | for( i; count ) {
|
|---|
| 486 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
|
|---|
| 487 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
|
|---|
| 488 | }
|
|---|
| 489 |
|
|---|
| 490 | ctx.sq.kring.released = chead; // note up to were we processed
|
|---|
| 491 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
|
|---|
| 492 |
|
|---|
| 493 | __ioarbiter_notify(ctx);
|
|---|
| 494 |
|
|---|
| 495 | return count;
|
|---|
| 496 | }
|
|---|
| 497 |
|
|---|
| 498 | //=============================================================================================
|
|---|
| 499 | // I/O Arbiter
|
|---|
| 500 | //=============================================================================================
|
|---|
| 501 | static inline void __revoke( $io_arbiter & this, $io_context * ctx ) {
|
|---|
| 502 | if(ctx->revoked) return;
|
|---|
| 503 |
|
|---|
| 504 | /* paranoid */ verify( ctx->proc );
|
|---|
| 505 | remove( this.assigned, *ctx );
|
|---|
| 506 |
|
|---|
| 507 | // Mark as revoked
|
|---|
| 508 | __atomic_store_n(&ctx->revoked, true, __ATOMIC_SEQ_CST);
|
|---|
| 509 |
|
|---|
| 510 | // Wait for the processor to no longer use it
|
|---|
| 511 | while(ctx->proc->io.lock) Pause();
|
|---|
| 512 |
|
|---|
| 513 | // Remove the coupling with the processor
|
|---|
| 514 | ctx->proc->io.ctx = 0p;
|
|---|
| 515 | ctx->proc = 0p;
|
|---|
| 516 |
|
|---|
| 517 | // add to available contexts
|
|---|
| 518 | addHead( this.available, *ctx );
|
|---|
| 519 | }
|
|---|
| 520 |
|
|---|
| 521 | static inline void __assign( $io_arbiter & this, $io_context * ctx, processor * proc ) {
|
|---|
| 522 | remove( this.available, *ctx );
|
|---|
| 523 |
|
|---|
| 524 | ctx->revoked = false;
|
|---|
| 525 | ctx->proc = proc;
|
|---|
| 526 | __atomic_store_n(&proc->io.ctx, ctx, __ATOMIC_SEQ_CST);
|
|---|
| 527 |
|
|---|
| 528 | // add to assigned contexts
|
|---|
| 529 | addTail( this.assigned, *ctx );
|
|---|
| 530 | }
|
|---|
| 531 |
|
|---|
| 532 | static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor * proc, __u32 idxs[], __u32 want ) {
|
|---|
| 533 | __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
|
|---|
| 534 |
|
|---|
| 535 | SeqIter($io_context) iter;
|
|---|
| 536 | $io_context & ci;
|
|---|
| 537 | // Do we already have something available?
|
|---|
| 538 | for( over( iter, this.available ); iter | ci;) {
|
|---|
| 539 | __cfadbg_print_safe(io, "Kernel I/O : attempting available context\n");
|
|---|
| 540 |
|
|---|
| 541 | $io_context * c = &ci;
|
|---|
| 542 | if(__alloc(c, idxs, want)) {
|
|---|
| 543 | __assign( this, c, proc);
|
|---|
| 544 | return c;
|
|---|
| 545 | }
|
|---|
| 546 | }
|
|---|
| 547 |
|
|---|
| 548 |
|
|---|
| 549 | // Otherwise, we have no choice but to revoke everyone to check if other instance have available data
|
|---|
| 550 | for( over( iter, this.assigned ); iter | ci; ) {
|
|---|
| 551 | __cfadbg_print_safe(io, "Kernel I/O : revoking context for allocation\n");
|
|---|
| 552 |
|
|---|
| 553 | $io_context * c = &ci;
|
|---|
| 554 | __revoke( this, c );
|
|---|
| 555 |
|
|---|
| 556 | __STATS__( false, io.alloc.revoke += 1; )
|
|---|
| 557 |
|
|---|
| 558 | if(__alloc(c, idxs, want)) {
|
|---|
| 559 | __assign( this, c, proc);
|
|---|
| 560 | return c;
|
|---|
| 561 | }
|
|---|
| 562 | }
|
|---|
| 563 |
|
|---|
| 564 | __cfadbg_print_safe(io, "Kernel I/O : waiting for available resources\n");
|
|---|
| 565 |
|
|---|
| 566 | __STATS__( false, io.alloc.block += 1; )
|
|---|
| 567 |
|
|---|
| 568 | // No one has any resources left, wait for something to finish
|
|---|
| 569 | // Mark as pending
|
|---|
| 570 | __atomic_store_n( &this.pending.flag, true, __ATOMIC_SEQ_CST );
|
|---|
| 571 |
|
|---|
| 572 | // Wait for our turn to submit
|
|---|
| 573 | wait( this.pending.blocked, want );
|
|---|
| 574 |
|
|---|
| 575 | __attribute((unused)) bool ret =
|
|---|
| 576 | __alloc( this.pending.ctx, idxs, want);
|
|---|
| 577 | /* paranoid */ verify( ret );
|
|---|
| 578 |
|
|---|
| 579 | __assign( this, this.pending.ctx, proc);
|
|---|
| 580 | return this.pending.ctx;
|
|---|
| 581 | }
|
|---|
| 582 |
|
|---|
| 583 | static void __ioarbiter_notify( $io_arbiter & mutex this, $io_context * ctx ) {
|
|---|
| 584 | /* paranoid */ verify( !is_empty(this.pending.blocked) );
|
|---|
| 585 | this.pending.ctx = ctx;
|
|---|
| 586 |
|
|---|
| 587 | while( !is_empty(this.pending.blocked) ) {
|
|---|
| 588 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
|
|---|
| 589 | __u32 want = front( this.pending.blocked );
|
|---|
| 590 |
|
|---|
| 591 | if( have > want ) return;
|
|---|
| 592 |
|
|---|
| 593 | signal_block( this.pending.blocked );
|
|---|
| 594 | }
|
|---|
| 595 |
|
|---|
| 596 | this.pending.flag = false;
|
|---|
| 597 | }
|
|---|
| 598 |
|
|---|
| 599 | static void __ioarbiter_notify( $io_context & ctx ) {
|
|---|
| 600 | if(__atomic_load_n( &ctx.arbiter->pending.flag, __ATOMIC_SEQ_CST)) {
|
|---|
| 601 | __ioarbiter_notify( *ctx.arbiter, &ctx );
|
|---|
| 602 | }
|
|---|
| 603 | }
|
|---|
| 604 |
|
|---|
| 605 | // Simply append to the pending
|
|---|
| 606 | static void __ioarbiter_submit( $io_arbiter & mutex this, $io_context * ctx, __u32 idxs[], __u32 have ) {
|
|---|
| 607 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
|
|---|
| 608 |
|
|---|
| 609 | /* paranoid */ verify( &this == ctx->arbiter );
|
|---|
| 610 |
|
|---|
| 611 | // Mark as pending
|
|---|
| 612 | __atomic_store_n( &ctx->ext_sq.empty, false, __ATOMIC_SEQ_CST );
|
|---|
| 613 |
|
|---|
| 614 | // Wake-up the poller
|
|---|
| 615 | post( ctx->sem );
|
|---|
| 616 |
|
|---|
| 617 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
|
|---|
| 618 |
|
|---|
| 619 | // Wait for our turn to submit
|
|---|
| 620 | wait( ctx->ext_sq.blocked );
|
|---|
| 621 |
|
|---|
| 622 | // Submit our indexes
|
|---|
| 623 | __submit(ctx, idxs, have);
|
|---|
| 624 |
|
|---|
| 625 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
|
|---|
| 626 | }
|
|---|
| 627 |
|
|---|
| 628 | static void __ioarbiter_flush( $io_arbiter & mutex this, $io_context * ctx ) {
|
|---|
| 629 | /* paranoid */ verify( &this == ctx->arbiter );
|
|---|
| 630 |
|
|---|
| 631 | __STATS__( false, io.flush.external += 1; )
|
|---|
| 632 |
|
|---|
| 633 | __revoke( this, ctx );
|
|---|
| 634 |
|
|---|
| 635 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
|
|---|
| 636 |
|
|---|
| 637 | condition & blcked = ctx->ext_sq.blocked;
|
|---|
| 638 | /* paranoid */ verify( ctx->ext_sq.empty == is_empty( blcked ) );
|
|---|
| 639 | while(!is_empty( blcked )) {
|
|---|
| 640 | signal_block( blcked );
|
|---|
| 641 | }
|
|---|
| 642 |
|
|---|
| 643 | ctx->ext_sq.empty = true;
|
|---|
| 644 | }
|
|---|
| 645 |
|
|---|
| 646 | void __ioarbiter_register( $io_arbiter & mutex this, $io_context & ctx ) {
|
|---|
| 647 | __cfadbg_print_safe(io, "Kernel I/O : registering new context\n");
|
|---|
| 648 |
|
|---|
| 649 | ctx.arbiter = &this;
|
|---|
| 650 |
|
|---|
| 651 | // add to available contexts
|
|---|
| 652 | addHead( this.available, ctx );
|
|---|
| 653 |
|
|---|
| 654 | // Check if this solves pending allocations
|
|---|
| 655 | if(this.pending.flag) {
|
|---|
| 656 | __ioarbiter_notify( ctx );
|
|---|
| 657 | }
|
|---|
| 658 | }
|
|---|
| 659 |
|
|---|
| 660 | void __ioarbiter_unregister( $io_arbiter & mutex this, $io_context & ctx ) {
|
|---|
| 661 | /* paranoid */ verify( &this == ctx.arbiter );
|
|---|
| 662 |
|
|---|
| 663 | __revoke( this, &ctx );
|
|---|
| 664 |
|
|---|
| 665 | remove( this.available, ctx );
|
|---|
| 666 | }
|
|---|
| 667 | #endif
|
|---|