[ecf6b46] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // io.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
[3e2b9c9] | 16 | #define __cforall_thread__ |
---|
| 17 | |
---|
[20ab637] | 18 | #if defined(__CFA_DEBUG__) |
---|
| 19 | // #define __CFA_DEBUG_PRINT_IO__ |
---|
[e660761] | 20 | // #define __CFA_DEBUG_PRINT_IO_CORE__ |
---|
[20ab637] | 21 | #endif |
---|
[4069faad] | 22 | |
---|
[f6660520] | 23 | |
---|
[3e2b9c9] | 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H) |
---|
[31bb2e1] | 25 | #define _GNU_SOURCE /* See feature_test_macros(7) */ |
---|
| 26 | #include <errno.h> |
---|
[3e2b9c9] | 27 | #include <signal.h> |
---|
[31bb2e1] | 28 | #include <stdint.h> |
---|
| 29 | #include <string.h> |
---|
| 30 | #include <unistd.h> |
---|
| 31 | |
---|
[92976d9] | 32 | extern "C" { |
---|
| 33 | #include <sys/syscall.h> |
---|
| 34 | |
---|
| 35 | #include <linux/io_uring.h> |
---|
| 36 | } |
---|
| 37 | |
---|
[3e2b9c9] | 38 | #include "stats.hfa" |
---|
| 39 | #include "kernel.hfa" |
---|
| 40 | #include "kernel/fwd.hfa" |
---|
| 41 | #include "io/types.hfa" |
---|
[185efe6] | 42 | |
---|
[426f60c] | 43 | static const char * opcodes[] = { |
---|
| 44 | "OP_NOP", |
---|
| 45 | "OP_READV", |
---|
| 46 | "OP_WRITEV", |
---|
| 47 | "OP_FSYNC", |
---|
| 48 | "OP_READ_FIXED", |
---|
| 49 | "OP_WRITE_FIXED", |
---|
| 50 | "OP_POLL_ADD", |
---|
| 51 | "OP_POLL_REMOVE", |
---|
| 52 | "OP_SYNC_FILE_RANGE", |
---|
| 53 | "OP_SENDMSG", |
---|
| 54 | "OP_RECVMSG", |
---|
| 55 | "OP_TIMEOUT", |
---|
| 56 | "OP_TIMEOUT_REMOVE", |
---|
| 57 | "OP_ACCEPT", |
---|
| 58 | "OP_ASYNC_CANCEL", |
---|
| 59 | "OP_LINK_TIMEOUT", |
---|
| 60 | "OP_CONNECT", |
---|
| 61 | "OP_FALLOCATE", |
---|
| 62 | "OP_OPENAT", |
---|
| 63 | "OP_CLOSE", |
---|
| 64 | "OP_FILES_UPDATE", |
---|
| 65 | "OP_STATX", |
---|
| 66 | "OP_READ", |
---|
| 67 | "OP_WRITE", |
---|
| 68 | "OP_FADVISE", |
---|
| 69 | "OP_MADVISE", |
---|
| 70 | "OP_SEND", |
---|
| 71 | "OP_RECV", |
---|
| 72 | "OP_OPENAT2", |
---|
| 73 | "OP_EPOLL_CTL", |
---|
| 74 | "OP_SPLICE", |
---|
| 75 | "OP_PROVIDE_BUFFERS", |
---|
| 76 | "OP_REMOVE_BUFFERS", |
---|
| 77 | "OP_TEE", |
---|
| 78 | "INVALID_OP" |
---|
| 79 | }; |
---|
| 80 | |
---|
[2fafe7e] | 81 | // returns true of acquired as leader or second leader |
---|
| 82 | static inline bool try_lock( __leaderlock_t & this ) { |
---|
| 83 | const uintptr_t thrd = 1z | (uintptr_t)active_thread(); |
---|
| 84 | bool block; |
---|
| 85 | disable_interrupts(); |
---|
| 86 | for() { |
---|
| 87 | struct $thread * expected = this.value; |
---|
| 88 | if( 1p != expected && 0p != expected ) { |
---|
| 89 | /* paranoid */ verify( thrd != (uintptr_t)expected ); // We better not already be the next leader |
---|
| 90 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
| 91 | return false; |
---|
| 92 | } |
---|
| 93 | struct $thread * desired; |
---|
| 94 | if( 0p == expected ) { |
---|
| 95 | // If the lock isn't locked acquire it, no need to block |
---|
| 96 | desired = 1p; |
---|
| 97 | block = false; |
---|
| 98 | } |
---|
| 99 | else { |
---|
| 100 | // If the lock is already locked try becomming the next leader |
---|
| 101 | desired = (struct $thread *)thrd; |
---|
| 102 | block = true; |
---|
| 103 | } |
---|
| 104 | if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break; |
---|
| 105 | } |
---|
| 106 | if( block ) { |
---|
| 107 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
[e235429] | 108 | park(); |
---|
[2fafe7e] | 109 | disable_interrupts(); |
---|
| 110 | } |
---|
| 111 | return true; |
---|
| 112 | } |
---|
| 113 | |
---|
| 114 | static inline bool next( __leaderlock_t & this ) { |
---|
[8fc652e0] | 115 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[2fafe7e] | 116 | struct $thread * nextt; |
---|
| 117 | for() { |
---|
| 118 | struct $thread * expected = this.value; |
---|
| 119 | /* paranoid */ verify( (1 & (uintptr_t)expected) == 1 ); // The lock better be locked |
---|
| 120 | |
---|
| 121 | struct $thread * desired; |
---|
| 122 | if( 1p == expected ) { |
---|
| 123 | // No next leader, just unlock |
---|
| 124 | desired = 0p; |
---|
| 125 | nextt = 0p; |
---|
| 126 | } |
---|
| 127 | else { |
---|
| 128 | // There is a next leader, remove but keep locked |
---|
| 129 | desired = 1p; |
---|
| 130 | nextt = (struct $thread *)(~1z & (uintptr_t)expected); |
---|
| 131 | } |
---|
| 132 | if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break; |
---|
| 133 | } |
---|
| 134 | |
---|
| 135 | if(nextt) { |
---|
[b4b63e8] | 136 | unpark( nextt ); |
---|
[2fafe7e] | 137 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
| 138 | return true; |
---|
| 139 | } |
---|
| 140 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
| 141 | return false; |
---|
| 142 | } |
---|
| 143 | |
---|
[92976d9] | 144 | //============================================================================================= |
---|
[3e2b9c9] | 145 | // I/O Syscall |
---|
[92976d9] | 146 | //============================================================================================= |
---|
[3e2b9c9] | 147 | static int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) { |
---|
[20ab637] | 148 | bool need_sys_to_submit = false; |
---|
| 149 | bool need_sys_to_complete = false; |
---|
| 150 | unsigned flags = 0; |
---|
| 151 | |
---|
| 152 | TO_SUBMIT: |
---|
| 153 | if( to_submit > 0 ) { |
---|
| 154 | if( !(ring.ring_flags & IORING_SETUP_SQPOLL) ) { |
---|
| 155 | need_sys_to_submit = true; |
---|
| 156 | break TO_SUBMIT; |
---|
| 157 | } |
---|
| 158 | if( (*ring.submit_q.flags) & IORING_SQ_NEED_WAKEUP ) { |
---|
| 159 | need_sys_to_submit = true; |
---|
| 160 | flags |= IORING_ENTER_SQ_WAKEUP; |
---|
| 161 | } |
---|
| 162 | } |
---|
| 163 | |
---|
| 164 | if( get && !(ring.ring_flags & IORING_SETUP_SQPOLL) ) { |
---|
| 165 | flags |= IORING_ENTER_GETEVENTS; |
---|
| 166 | if( (ring.ring_flags & IORING_SETUP_IOPOLL) ) { |
---|
| 167 | need_sys_to_complete = true; |
---|
| 168 | } |
---|
| 169 | } |
---|
| 170 | |
---|
| 171 | int ret = 0; |
---|
| 172 | if( need_sys_to_submit || need_sys_to_complete ) { |
---|
[ece0e80] | 173 | __cfadbg_print_safe(io_core, "Kernel I/O : IO_URING enter %d %u %u\n", ring.fd, to_submit, flags); |
---|
[b982fb2] | 174 | ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, flags, (sigset_t *)0p, _NSIG / 8); |
---|
[20ab637] | 175 | if( ret < 0 ) { |
---|
| 176 | switch((int)errno) { |
---|
| 177 | case EAGAIN: |
---|
| 178 | case EINTR: |
---|
| 179 | ret = -1; |
---|
| 180 | break; |
---|
| 181 | default: |
---|
| 182 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); |
---|
| 183 | } |
---|
| 184 | } |
---|
| 185 | } |
---|
| 186 | |
---|
| 187 | // Memory barrier |
---|
| 188 | __atomic_thread_fence( __ATOMIC_SEQ_CST ); |
---|
| 189 | return ret; |
---|
| 190 | } |
---|
| 191 | |
---|
[92976d9] | 192 | //============================================================================================= |
---|
| 193 | // I/O Polling |
---|
| 194 | //============================================================================================= |
---|
[1d5e4711] | 195 | static unsigned __collect_submitions( struct __io_data & ring ); |
---|
[4998155] | 196 | static __u32 __release_consumed_submission( struct __io_data & ring ); |
---|
[35285fd] | 197 | static inline void __clean( volatile struct io_uring_sqe * sqe ); |
---|
[1d5e4711] | 198 | |
---|
[426f60c] | 199 | // Process a single completion message from the io_uring |
---|
| 200 | // This is NOT thread-safe |
---|
| 201 | static inline void process( volatile struct io_uring_cqe & cqe ) { |
---|
[c402739f] | 202 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; |
---|
[fe9468e2] | 203 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); |
---|
[5751a56] | 204 | |
---|
[c402739f] | 205 | fulfil( *future, cqe.res ); |
---|
[5751a56] | 206 | } |
---|
| 207 | |
---|
[f00b26d4] | 208 | static [int, bool] __drain_io( & struct __io_data ring ) { |
---|
[8fc652e0] | 209 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[e46c753] | 210 | |
---|
[5dadc9b] | 211 | unsigned to_submit = 0; |
---|
[f00b26d4] | 212 | if( ring.poller_submits ) { |
---|
[5dadc9b] | 213 | // If the poller thread also submits, then we need to aggregate the submissions which are ready |
---|
[e46c753] | 214 | to_submit = __collect_submitions( ring ); |
---|
[5dadc9b] | 215 | } |
---|
| 216 | |
---|
[f00b26d4] | 217 | int ret = __io_uring_enter(ring, to_submit, true); |
---|
[20ab637] | 218 | if( ret < 0 ) { |
---|
| 219 | return [0, true]; |
---|
| 220 | } |
---|
[1d5e4711] | 221 | |
---|
[20ab637] | 222 | // update statistics |
---|
| 223 | if (to_submit > 0) { |
---|
[1d5e4711] | 224 | __STATS__( true, |
---|
| 225 | if( to_submit > 0 ) { |
---|
| 226 | io.submit_q.submit_avg.rdy += to_submit; |
---|
| 227 | io.submit_q.submit_avg.csm += ret; |
---|
| 228 | io.submit_q.submit_avg.cnt += 1; |
---|
| 229 | } |
---|
| 230 | ) |
---|
[6f121b8] | 231 | } |
---|
| 232 | |
---|
[426f60c] | 233 | __atomic_thread_fence( __ATOMIC_SEQ_CST ); |
---|
| 234 | |
---|
[20ab637] | 235 | // Release the consumed SQEs |
---|
| 236 | __release_consumed_submission( ring ); |
---|
[6f121b8] | 237 | |
---|
[d384787] | 238 | // Drain the queue |
---|
[92976d9] | 239 | unsigned head = *ring.completion_q.head; |
---|
[6f121b8] | 240 | unsigned tail = *ring.completion_q.tail; |
---|
[4998155] | 241 | const __u32 mask = *ring.completion_q.mask; |
---|
[6f121b8] | 242 | |
---|
[d384787] | 243 | // Nothing was new return 0 |
---|
| 244 | if (head == tail) { |
---|
[e46c753] | 245 | return [0, to_submit > 0]; |
---|
[d384787] | 246 | } |
---|
[92976d9] | 247 | |
---|
[4998155] | 248 | __u32 count = tail - head; |
---|
[1d5e4711] | 249 | /* paranoid */ verify( count != 0 ); |
---|
[d384787] | 250 | for(i; count) { |
---|
[6f121b8] | 251 | unsigned idx = (head + i) & mask; |
---|
[426f60c] | 252 | volatile struct io_uring_cqe & cqe = ring.completion_q.cqes[idx]; |
---|
[92976d9] | 253 | |
---|
[d384787] | 254 | /* paranoid */ verify(&cqe); |
---|
[92976d9] | 255 | |
---|
[f00b26d4] | 256 | process( cqe ); |
---|
[d384787] | 257 | } |
---|
[2d8f7b0] | 258 | |
---|
[92976d9] | 259 | // Mark to the kernel that the cqe has been seen |
---|
| 260 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. |
---|
[426f60c] | 261 | __atomic_fetch_add( ring.completion_q.head, count, __ATOMIC_SEQ_CST ); |
---|
[92976d9] | 262 | |
---|
[5dadc9b] | 263 | return [count, count > 0 || to_submit > 0]; |
---|
[92976d9] | 264 | } |
---|
| 265 | |
---|
[f00b26d4] | 266 | void main( $io_ctx_thread & this ) { |
---|
[d48b174] | 267 | __ioctx_register( this ); |
---|
[1539bbd] | 268 | |
---|
[426f60c] | 269 | __cfadbg_print_safe(io_core, "Kernel I/O : IO poller %d (%p) ready\n", this.ring->fd, &this); |
---|
[1539bbd] | 270 | |
---|
[ece0e80] | 271 | const int reset_cnt = 5; |
---|
| 272 | int reset = reset_cnt; |
---|
[61dd73d] | 273 | // Then loop until we need to start |
---|
[ece0e80] | 274 | LOOP: |
---|
[f00b26d4] | 275 | while(!__atomic_load_n(&this.done, __ATOMIC_SEQ_CST)) { |
---|
[61dd73d] | 276 | // Drain the io |
---|
[5dadc9b] | 277 | int count; |
---|
| 278 | bool again; |
---|
[13c5e19] | 279 | disable_interrupts(); |
---|
[f00b26d4] | 280 | [count, again] = __drain_io( *this.ring ); |
---|
[5dadc9b] | 281 | |
---|
[ece0e80] | 282 | if(!again) reset--; |
---|
[3c039b0] | 283 | |
---|
[13c5e19] | 284 | // Update statistics |
---|
[47746a2] | 285 | __STATS__( true, |
---|
| 286 | io.complete_q.completed_avg.val += count; |
---|
[dcb5f8d] | 287 | io.complete_q.completed_avg.cnt += 1; |
---|
[47746a2] | 288 | ) |
---|
[13c5e19] | 289 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
[3c039b0] | 290 | |
---|
[5dadc9b] | 291 | // If we got something, just yield and check again |
---|
[ece0e80] | 292 | if(reset > 1) { |
---|
[61dd73d] | 293 | yield(); |
---|
[ece0e80] | 294 | continue LOOP; |
---|
[61dd73d] | 295 | } |
---|
[ece0e80] | 296 | |
---|
[426f60c] | 297 | // We alread failed to find completed entries a few time. |
---|
[ece0e80] | 298 | if(reset == 1) { |
---|
| 299 | // Rearm the context so it can block |
---|
| 300 | // but don't block right away |
---|
| 301 | // we need to retry one last time in case |
---|
| 302 | // something completed *just now* |
---|
[d48b174] | 303 | __ioctx_prepare_block( this ); |
---|
[ece0e80] | 304 | continue LOOP; |
---|
| 305 | } |
---|
| 306 | |
---|
[93526ef] | 307 | __STATS__( false, |
---|
[dcb5f8d] | 308 | io.complete_q.blocks += 1; |
---|
| 309 | ) |
---|
[426f60c] | 310 | __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %d (%p)\n", this.ring->fd, &this); |
---|
[5dadc9b] | 311 | |
---|
[3e2b9c9] | 312 | // block this thread |
---|
[f00b26d4] | 313 | wait( this.sem ); |
---|
[ece0e80] | 314 | |
---|
| 315 | // restore counter |
---|
| 316 | reset = reset_cnt; |
---|
[f6660520] | 317 | } |
---|
[61dd73d] | 318 | |
---|
[426f60c] | 319 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller %d (%p) stopping\n", this.ring->fd, &this); |
---|
[61dd73d] | 320 | } |
---|
[f6660520] | 321 | |
---|
[92976d9] | 322 | //============================================================================================= |
---|
| 323 | // I/O Submissions |
---|
| 324 | //============================================================================================= |
---|
| 325 | |
---|
[2d8f7b0] | 326 | // Submition steps : |
---|
[e46c753] | 327 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones |
---|
[2d8f7b0] | 328 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not |
---|
| 329 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we |
---|
| 330 | // need to write an allocator that allows allocating concurrently. |
---|
| 331 | // |
---|
[e46c753] | 332 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step. |
---|
[2d8f7b0] | 333 | // |
---|
[e46c753] | 334 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation |
---|
[2d8f7b0] | 335 | // needs to arrive to two concensus at the same time: |
---|
| 336 | // A - The order in which entries are listed in the array: no two threads must pick the |
---|
| 337 | // same index for their entries |
---|
| 338 | // B - When can the tail be update for the kernel. EVERY entries in the array between |
---|
| 339 | // head and tail must be fully filled and shouldn't ever be touched again. |
---|
| 340 | // |
---|
| 341 | |
---|
[426f60c] | 342 | // Allocate an submit queue entry. |
---|
| 343 | // The kernel cannot see these entries until they are submitted, but other threads must be |
---|
| 344 | // able to see which entries can be used and which are already un used by an other thread |
---|
| 345 | // for convenience, return both the index and the pointer to the sqe |
---|
| 346 | // sqe == &sqes[idx] |
---|
| 347 | [* volatile struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ) { |
---|
[e46c753] | 348 | /* paranoid */ verify( data != 0 ); |
---|
[13c5e19] | 349 | |
---|
[6f121b8] | 350 | // Prepare the data we need |
---|
| 351 | __attribute((unused)) int len = 0; |
---|
| 352 | __attribute((unused)) int block = 0; |
---|
[4998155] | 353 | __u32 cnt = *ring.submit_q.num; |
---|
| 354 | __u32 mask = *ring.submit_q.mask; |
---|
[8ae4165] | 355 | |
---|
[fe9468e2] | 356 | __u32 off = thread_rand(); |
---|
[6f121b8] | 357 | |
---|
| 358 | // Loop around looking for an available spot |
---|
[13c5e19] | 359 | for() { |
---|
[6f121b8] | 360 | // Look through the list starting at some offset |
---|
| 361 | for(i; cnt) { |
---|
[426f60c] | 362 | __u64 expected = 3; |
---|
| 363 | __u32 idx = (i + off) & mask; // Get an index from a random |
---|
| 364 | volatile struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx]; |
---|
[4998155] | 365 | volatile __u64 * udata = &sqe->user_data; |
---|
[6f121b8] | 366 | |
---|
[426f60c] | 367 | // Allocate the entry by CASing the user_data field from 0 to the future address |
---|
[6f121b8] | 368 | if( *udata == expected && |
---|
| 369 | __atomic_compare_exchange_n( udata, &expected, data, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) |
---|
| 370 | { |
---|
| 371 | // update statistics |
---|
[47746a2] | 372 | __STATS__( false, |
---|
| 373 | io.submit_q.alloc_avg.val += len; |
---|
| 374 | io.submit_q.alloc_avg.block += block; |
---|
| 375 | io.submit_q.alloc_avg.cnt += 1; |
---|
| 376 | ) |
---|
[6f121b8] | 377 | |
---|
[426f60c] | 378 | // debug log |
---|
[ece0e80] | 379 | __cfadbg_print_safe( io, "Kernel I/O : allocated [%p, %u] for %p (%p)\n", sqe, idx, active_thread(), (void*)data ); |
---|
[13c5e19] | 380 | |
---|
[6f121b8] | 381 | // Success return the data |
---|
[426f60c] | 382 | sqe->opcode = 0; |
---|
| 383 | sqe->flags = 0; |
---|
| 384 | sqe->ioprio = 0; |
---|
| 385 | sqe->fd = 0; |
---|
| 386 | sqe->off = 0; |
---|
| 387 | sqe->addr = 0; |
---|
| 388 | sqe->len = 0; |
---|
| 389 | sqe->accept_flags = 0; |
---|
| 390 | sqe->__pad2[0] = 0; |
---|
| 391 | sqe->__pad2[1] = 0; |
---|
| 392 | sqe->__pad2[2] = 0; |
---|
[6f121b8] | 393 | return [sqe, idx]; |
---|
| 394 | } |
---|
| 395 | verify(expected != data); |
---|
[2489d31] | 396 | |
---|
[426f60c] | 397 | // This one was used |
---|
[6f121b8] | 398 | len ++; |
---|
| 399 | } |
---|
[2489d31] | 400 | |
---|
[6f121b8] | 401 | block++; |
---|
[426f60c] | 402 | |
---|
| 403 | abort( "Kernel I/O : all submit queue entries used, yielding\n" ); |
---|
| 404 | |
---|
[6f121b8] | 405 | yield(); |
---|
| 406 | } |
---|
[2489d31] | 407 | } |
---|
| 408 | |
---|
[4998155] | 409 | static inline __u32 __submit_to_ready_array( struct __io_data & ring, __u32 idx, const __u32 mask ) { |
---|
[df40a56] | 410 | /* paranoid */ verify( idx <= mask ); |
---|
| 411 | /* paranoid */ verify( idx != -1ul32 ); |
---|
| 412 | |
---|
| 413 | // We need to find a spot in the ready array |
---|
| 414 | __attribute((unused)) int len = 0; |
---|
| 415 | __attribute((unused)) int block = 0; |
---|
[4998155] | 416 | __u32 ready_mask = ring.submit_q.ready_cnt - 1; |
---|
[df40a56] | 417 | |
---|
[fe9468e2] | 418 | __u32 off = thread_rand(); |
---|
[df40a56] | 419 | |
---|
[4998155] | 420 | __u32 picked; |
---|
[df40a56] | 421 | LOOKING: for() { |
---|
| 422 | for(i; ring.submit_q.ready_cnt) { |
---|
| 423 | picked = (i + off) & ready_mask; |
---|
[4998155] | 424 | __u32 expected = -1ul32; |
---|
[df40a56] | 425 | if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) { |
---|
| 426 | break LOOKING; |
---|
| 427 | } |
---|
| 428 | verify(expected != idx); |
---|
| 429 | |
---|
| 430 | len ++; |
---|
| 431 | } |
---|
| 432 | |
---|
| 433 | block++; |
---|
[2fafe7e] | 434 | |
---|
| 435 | __u32 released = __release_consumed_submission( ring ); |
---|
| 436 | if( released == 0 ) { |
---|
[34b61882] | 437 | yield(); |
---|
| 438 | } |
---|
[df40a56] | 439 | } |
---|
| 440 | |
---|
| 441 | // update statistics |
---|
[47746a2] | 442 | __STATS__( false, |
---|
| 443 | io.submit_q.look_avg.val += len; |
---|
| 444 | io.submit_q.look_avg.block += block; |
---|
| 445 | io.submit_q.look_avg.cnt += 1; |
---|
| 446 | ) |
---|
[df40a56] | 447 | |
---|
| 448 | return picked; |
---|
| 449 | } |
---|
| 450 | |
---|
[4998155] | 451 | void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))) { |
---|
[f00b26d4] | 452 | __io_data & ring = *ctx->thrd.ring; |
---|
[426f60c] | 453 | |
---|
| 454 | { |
---|
| 455 | __attribute__((unused)) volatile struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx]; |
---|
| 456 | __cfadbg_print_safe( io, |
---|
| 457 | "Kernel I/O : submitting %u (%p) for %p\n" |
---|
| 458 | " data: %p\n" |
---|
| 459 | " opcode: %s\n" |
---|
| 460 | " fd: %d\n" |
---|
| 461 | " flags: %d\n" |
---|
| 462 | " prio: %d\n" |
---|
| 463 | " off: %p\n" |
---|
| 464 | " addr: %p\n" |
---|
| 465 | " len: %d\n" |
---|
| 466 | " other flags: %d\n" |
---|
| 467 | " splice fd: %d\n" |
---|
| 468 | " pad[0]: %llu\n" |
---|
| 469 | " pad[1]: %llu\n" |
---|
| 470 | " pad[2]: %llu\n", |
---|
| 471 | idx, sqe, |
---|
| 472 | active_thread(), |
---|
| 473 | (void*)sqe->user_data, |
---|
| 474 | opcodes[sqe->opcode], |
---|
| 475 | sqe->fd, |
---|
| 476 | sqe->flags, |
---|
| 477 | sqe->ioprio, |
---|
| 478 | sqe->off, |
---|
| 479 | sqe->addr, |
---|
| 480 | sqe->len, |
---|
| 481 | sqe->accept_flags, |
---|
| 482 | sqe->splice_fd_in, |
---|
| 483 | sqe->__pad2[0], |
---|
| 484 | sqe->__pad2[1], |
---|
| 485 | sqe->__pad2[2] |
---|
| 486 | ); |
---|
| 487 | } |
---|
| 488 | |
---|
| 489 | |
---|
[5dadc9b] | 490 | // Get now the data we definetely need |
---|
[4998155] | 491 | volatile __u32 * const tail = ring.submit_q.tail; |
---|
| 492 | const __u32 mask = *ring.submit_q.mask; |
---|
[2489d31] | 493 | |
---|
[5dadc9b] | 494 | // There are 2 submission schemes, check which one we are using |
---|
[f00b26d4] | 495 | if( ring.poller_submits ) { |
---|
[5dadc9b] | 496 | // If the poller thread submits, then we just need to add this to the ready array |
---|
[df40a56] | 497 | __submit_to_ready_array( ring, idx, mask ); |
---|
[5dadc9b] | 498 | |
---|
[f00b26d4] | 499 | post( ctx->thrd.sem ); |
---|
[5dadc9b] | 500 | |
---|
[dd4e2d7] | 501 | __cfadbg_print_safe( io, "Kernel I/O : Added %u to ready for %p\n", idx, active_thread() ); |
---|
[2d8f7b0] | 502 | } |
---|
[f00b26d4] | 503 | else if( ring.eager_submits ) { |
---|
[4998155] | 504 | __u32 picked = __submit_to_ready_array( ring, idx, mask ); |
---|
[e46c753] | 505 | |
---|
[2fafe7e] | 506 | #if defined(LEADER_LOCK) |
---|
| 507 | if( !try_lock(ring.submit_q.submit_lock) ) { |
---|
[47746a2] | 508 | __STATS__( false, |
---|
| 509 | io.submit_q.helped += 1; |
---|
| 510 | ) |
---|
[e46c753] | 511 | return; |
---|
| 512 | } |
---|
[8fc652e0] | 513 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[2fafe7e] | 514 | __STATS__( true, |
---|
| 515 | io.submit_q.leader += 1; |
---|
| 516 | ) |
---|
| 517 | #else |
---|
| 518 | for() { |
---|
| 519 | yield(); |
---|
| 520 | |
---|
| 521 | if( try_lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2) ) { |
---|
| 522 | __STATS__( false, |
---|
| 523 | io.submit_q.leader += 1; |
---|
| 524 | ) |
---|
| 525 | break; |
---|
| 526 | } |
---|
| 527 | |
---|
| 528 | // If some one else collected our index, we are done |
---|
| 529 | #warning ABA problem |
---|
| 530 | if( ring.submit_q.ready[picked] != idx ) { |
---|
| 531 | __STATS__( false, |
---|
| 532 | io.submit_q.helped += 1; |
---|
| 533 | ) |
---|
| 534 | return; |
---|
| 535 | } |
---|
[e46c753] | 536 | |
---|
[47746a2] | 537 | __STATS__( false, |
---|
[2fafe7e] | 538 | io.submit_q.busy += 1; |
---|
[47746a2] | 539 | ) |
---|
[e46c753] | 540 | } |
---|
[2fafe7e] | 541 | #endif |
---|
[e46c753] | 542 | |
---|
| 543 | // We got the lock |
---|
[be36ec3] | 544 | // Collect the submissions |
---|
[e46c753] | 545 | unsigned to_submit = __collect_submitions( ring ); |
---|
| 546 | |
---|
[be36ec3] | 547 | // Actually submit |
---|
| 548 | int ret = __io_uring_enter( ring, to_submit, false ); |
---|
[309d814] | 549 | |
---|
[2fafe7e] | 550 | #if defined(LEADER_LOCK) |
---|
[8fc652e0] | 551 | /* paranoid */ verify( ! __preemption_enabled() ); |
---|
[2fafe7e] | 552 | next(ring.submit_q.submit_lock); |
---|
| 553 | #else |
---|
| 554 | unlock(ring.submit_q.submit_lock); |
---|
| 555 | #endif |
---|
[ece0e80] | 556 | if( ret < 0 ) { |
---|
| 557 | return; |
---|
| 558 | } |
---|
[e46c753] | 559 | |
---|
| 560 | // Release the consumed SQEs |
---|
[34b61882] | 561 | __release_consumed_submission( ring ); |
---|
[e46c753] | 562 | |
---|
| 563 | // update statistics |
---|
[be36ec3] | 564 | __STATS__( false, |
---|
[47746a2] | 565 | io.submit_q.submit_avg.rdy += to_submit; |
---|
| 566 | io.submit_q.submit_avg.csm += ret; |
---|
| 567 | io.submit_q.submit_avg.cnt += 1; |
---|
| 568 | ) |
---|
[ece0e80] | 569 | |
---|
| 570 | __cfadbg_print_safe( io, "Kernel I/O : submitted %u (among %u) for %p\n", idx, ret, active_thread() ); |
---|
[e46c753] | 571 | } |
---|
[426f60c] | 572 | else |
---|
| 573 | { |
---|
[5dadc9b] | 574 | // get mutual exclusion |
---|
[2fafe7e] | 575 | #if defined(LEADER_LOCK) |
---|
| 576 | while(!try_lock(ring.submit_q.submit_lock)); |
---|
| 577 | #else |
---|
| 578 | lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2); |
---|
| 579 | #endif |
---|
[2489d31] | 580 | |
---|
[426f60c] | 581 | /* paranoid */ verifyf( ring.submit_q.sqes[ idx ].user_data != 3ul64, |
---|
[20ab637] | 582 | /* paranoid */ "index %u already reclaimed\n" |
---|
| 583 | /* paranoid */ "head %u, prev %u, tail %u\n" |
---|
| 584 | /* paranoid */ "[-0: %u,-1: %u,-2: %u,-3: %u]\n", |
---|
| 585 | /* paranoid */ idx, |
---|
| 586 | /* paranoid */ *ring.submit_q.head, ring.submit_q.prev_head, *tail |
---|
| 587 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 0) & (*ring.submit_q.mask) ] |
---|
| 588 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 1) & (*ring.submit_q.mask) ] |
---|
| 589 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 2) & (*ring.submit_q.mask) ] |
---|
| 590 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 3) & (*ring.submit_q.mask) ] |
---|
| 591 | /* paranoid */ ); |
---|
| 592 | |
---|
[5dadc9b] | 593 | // Append to the list of ready entries |
---|
| 594 | |
---|
| 595 | /* paranoid */ verify( idx <= mask ); |
---|
[20ab637] | 596 | ring.submit_q.array[ (*tail) & mask ] = idx; |
---|
[5dadc9b] | 597 | __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST); |
---|
[d384787] | 598 | |
---|
[5dadc9b] | 599 | // Submit however, many entries need to be submitted |
---|
[f00b26d4] | 600 | int ret = __io_uring_enter( ring, 1, false ); |
---|
[5dadc9b] | 601 | if( ret < 0 ) { |
---|
| 602 | switch((int)errno) { |
---|
| 603 | default: |
---|
| 604 | abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) ); |
---|
| 605 | } |
---|
| 606 | } |
---|
[d384787] | 607 | |
---|
[426f60c] | 608 | /* paranoid */ verify(ret == 1); |
---|
| 609 | |
---|
[5dadc9b] | 610 | // update statistics |
---|
[47746a2] | 611 | __STATS__( false, |
---|
| 612 | io.submit_q.submit_avg.csm += 1; |
---|
| 613 | io.submit_q.submit_avg.cnt += 1; |
---|
| 614 | ) |
---|
[5dadc9b] | 615 | |
---|
[426f60c] | 616 | { |
---|
| 617 | __attribute__((unused)) volatile __u32 * const head = ring.submit_q.head; |
---|
| 618 | __attribute__((unused)) __u32 last_idx = ring.submit_q.array[ ((*head) - 1) & mask ]; |
---|
| 619 | __attribute__((unused)) volatile struct io_uring_sqe * sqe = &ring.submit_q.sqes[last_idx]; |
---|
| 620 | |
---|
| 621 | __cfadbg_print_safe( io, |
---|
| 622 | "Kernel I/O : last submitted is %u (%p)\n" |
---|
| 623 | " data: %p\n" |
---|
| 624 | " opcode: %s\n" |
---|
| 625 | " fd: %d\n" |
---|
| 626 | " flags: %d\n" |
---|
| 627 | " prio: %d\n" |
---|
| 628 | " off: %p\n" |
---|
| 629 | " addr: %p\n" |
---|
| 630 | " len: %d\n" |
---|
| 631 | " other flags: %d\n" |
---|
| 632 | " splice fd: %d\n" |
---|
| 633 | " pad[0]: %llu\n" |
---|
| 634 | " pad[1]: %llu\n" |
---|
| 635 | " pad[2]: %llu\n", |
---|
| 636 | last_idx, sqe, |
---|
| 637 | (void*)sqe->user_data, |
---|
| 638 | opcodes[sqe->opcode], |
---|
| 639 | sqe->fd, |
---|
| 640 | sqe->flags, |
---|
| 641 | sqe->ioprio, |
---|
| 642 | sqe->off, |
---|
| 643 | sqe->addr, |
---|
| 644 | sqe->len, |
---|
| 645 | sqe->accept_flags, |
---|
| 646 | sqe->splice_fd_in, |
---|
| 647 | sqe->__pad2[0], |
---|
| 648 | sqe->__pad2[1], |
---|
| 649 | sqe->__pad2[2] |
---|
| 650 | ); |
---|
| 651 | } |
---|
| 652 | |
---|
| 653 | __atomic_thread_fence( __ATOMIC_SEQ_CST ); |
---|
[34b61882] | 654 | // Release the consumed SQEs |
---|
| 655 | __release_consumed_submission( ring ); |
---|
[426f60c] | 656 | // ring.submit_q.sqes[idx].user_data = 3ul64; |
---|
[7bfc849] | 657 | |
---|
[2fafe7e] | 658 | #if defined(LEADER_LOCK) |
---|
| 659 | next(ring.submit_q.submit_lock); |
---|
| 660 | #else |
---|
| 661 | unlock(ring.submit_q.submit_lock); |
---|
| 662 | #endif |
---|
[dd4e2d7] | 663 | |
---|
[426f60c] | 664 | __cfadbg_print_safe( io, "Kernel I/O : submitted %u for %p\n", idx, active_thread() ); |
---|
[5dadc9b] | 665 | } |
---|
[2489d31] | 666 | } |
---|
[e46c753] | 667 | |
---|
[d2b5d2d] | 668 | // #define PARTIAL_SUBMIT 32 |
---|
[426f60c] | 669 | |
---|
| 670 | // go through the list of submissions in the ready array and moved them into |
---|
| 671 | // the ring's submit queue |
---|
[e46c753] | 672 | static unsigned __collect_submitions( struct __io_data & ring ) { |
---|
| 673 | /* paranoid */ verify( ring.submit_q.ready != 0p ); |
---|
| 674 | /* paranoid */ verify( ring.submit_q.ready_cnt > 0 ); |
---|
| 675 | |
---|
| 676 | unsigned to_submit = 0; |
---|
[4998155] | 677 | __u32 tail = *ring.submit_q.tail; |
---|
| 678 | const __u32 mask = *ring.submit_q.mask; |
---|
[1095ccd] | 679 | #if defined(PARTIAL_SUBMIT) |
---|
| 680 | #if defined(LEADER_LOCK) |
---|
| 681 | #error PARTIAL_SUBMIT and LEADER_LOCK cannot co-exist |
---|
| 682 | #endif |
---|
| 683 | const __u32 cnt = ring.submit_q.ready_cnt > PARTIAL_SUBMIT ? PARTIAL_SUBMIT : ring.submit_q.ready_cnt; |
---|
| 684 | const __u32 offset = ring.submit_q.prev_ready; |
---|
| 685 | ring.submit_q.prev_ready += cnt; |
---|
| 686 | #else |
---|
| 687 | const __u32 cnt = ring.submit_q.ready_cnt; |
---|
| 688 | const __u32 offset = 0; |
---|
| 689 | #endif |
---|
[e46c753] | 690 | |
---|
| 691 | // Go through the list of ready submissions |
---|
[1095ccd] | 692 | for( c; cnt ) { |
---|
| 693 | __u32 i = (offset + c) % ring.submit_q.ready_cnt; |
---|
| 694 | |
---|
[e46c753] | 695 | // replace any submission with the sentinel, to consume it. |
---|
[4998155] | 696 | __u32 idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED); |
---|
[e46c753] | 697 | |
---|
| 698 | // If it was already the sentinel, then we are done |
---|
| 699 | if( idx == -1ul32 ) continue; |
---|
| 700 | |
---|
| 701 | // If we got a real submission, append it to the list |
---|
| 702 | ring.submit_q.array[ (tail + to_submit) & mask ] = idx & mask; |
---|
| 703 | to_submit++; |
---|
| 704 | } |
---|
| 705 | |
---|
| 706 | // Increment the tail based on how many we are ready to submit |
---|
| 707 | __atomic_fetch_add(ring.submit_q.tail, to_submit, __ATOMIC_SEQ_CST); |
---|
| 708 | |
---|
| 709 | return to_submit; |
---|
| 710 | } |
---|
[34b61882] | 711 | |
---|
[426f60c] | 712 | // Go through the ring's submit queue and release everything that has already been consumed |
---|
| 713 | // by io_uring |
---|
[4998155] | 714 | static __u32 __release_consumed_submission( struct __io_data & ring ) { |
---|
| 715 | const __u32 smask = *ring.submit_q.mask; |
---|
[732b406] | 716 | |
---|
[426f60c] | 717 | // We need to get the lock to copy the old head and new head |
---|
[732b406] | 718 | if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0; |
---|
[426f60c] | 719 | __attribute__((unused)) |
---|
| 720 | __u32 ctail = *ring.submit_q.tail; // get the current tail of the queue |
---|
| 721 | __u32 chead = *ring.submit_q.head; // get the current head of the queue |
---|
| 722 | __u32 phead = ring.submit_q.prev_head; // get the head the last time we were here |
---|
| 723 | ring.submit_q.prev_head = chead; // note up to were we processed |
---|
[732b406] | 724 | unlock(ring.submit_q.release_lock); |
---|
| 725 | |
---|
[426f60c] | 726 | // the 3 fields are organized like this diagram |
---|
| 727 | // except it's are ring |
---|
| 728 | // ---+--------+--------+---- |
---|
| 729 | // ---+--------+--------+---- |
---|
| 730 | // ^ ^ ^ |
---|
| 731 | // phead chead ctail |
---|
| 732 | |
---|
| 733 | // make sure ctail doesn't wrap around and reach phead |
---|
| 734 | /* paranoid */ verify( |
---|
| 735 | (ctail >= chead && chead >= phead) |
---|
| 736 | || (chead >= phead && phead >= ctail) |
---|
| 737 | || (phead >= ctail && ctail >= chead) |
---|
| 738 | ); |
---|
| 739 | |
---|
| 740 | // find the range we need to clear |
---|
[4998155] | 741 | __u32 count = chead - phead; |
---|
[426f60c] | 742 | |
---|
| 743 | // We acquired an previous-head/current-head range |
---|
| 744 | // go through the range and release the sqes |
---|
[34b61882] | 745 | for( i; count ) { |
---|
[4998155] | 746 | __u32 idx = ring.submit_q.array[ (phead + i) & smask ]; |
---|
[426f60c] | 747 | |
---|
| 748 | /* paranoid */ verify( 0 != ring.submit_q.sqes[ idx ].user_data ); |
---|
[35285fd] | 749 | __clean( &ring.submit_q.sqes[ idx ] ); |
---|
[34b61882] | 750 | } |
---|
| 751 | return count; |
---|
| 752 | } |
---|
[35285fd] | 753 | |
---|
| 754 | void __sqe_clean( volatile struct io_uring_sqe * sqe ) { |
---|
| 755 | __clean( sqe ); |
---|
| 756 | } |
---|
| 757 | |
---|
| 758 | static inline void __clean( volatile struct io_uring_sqe * sqe ) { |
---|
| 759 | // If we are in debug mode, thrash the fields to make sure we catch reclamation errors |
---|
| 760 | __cfaabi_dbg_debug_do( |
---|
| 761 | memset(sqe, 0xde, sizeof(*sqe)); |
---|
| 762 | sqe->opcode = IORING_OP_LAST; |
---|
| 763 | ); |
---|
| 764 | |
---|
| 765 | // Mark the entry as unused |
---|
| 766 | __atomic_store_n(&sqe->user_data, 3ul64, __ATOMIC_SEQ_CST); |
---|
| 767 | } |
---|
[47746a2] | 768 | #endif |
---|