[ecf6b46] | 1 | //
|
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
---|
| 3 | //
|
---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
---|
| 5 | // file "LICENCE" distributed with Cforall.
|
---|
| 6 | //
|
---|
| 7 | // io.cfa --
|
---|
| 8 | //
|
---|
| 9 | // Author : Thierry Delisle
|
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020
|
---|
| 11 | // Last Modified By :
|
---|
| 12 | // Last Modified On :
|
---|
| 13 | // Update Count :
|
---|
| 14 | //
|
---|
| 15 |
|
---|
[3e2b9c9] | 16 | #define __cforall_thread__
|
---|
| 17 |
|
---|
[20ab637] | 18 | #if defined(__CFA_DEBUG__)
|
---|
| 19 | // #define __CFA_DEBUG_PRINT_IO__
|
---|
[e660761] | 20 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
---|
[20ab637] | 21 | #endif
|
---|
[4069faad] | 22 |
|
---|
[f6660520] | 23 |
|
---|
[3e2b9c9] | 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
---|
[31bb2e1] | 25 | #define _GNU_SOURCE /* See feature_test_macros(7) */
|
---|
| 26 | #include <errno.h>
|
---|
[3e2b9c9] | 27 | #include <signal.h>
|
---|
[31bb2e1] | 28 | #include <stdint.h>
|
---|
| 29 | #include <string.h>
|
---|
| 30 | #include <unistd.h>
|
---|
| 31 |
|
---|
[92976d9] | 32 | extern "C" {
|
---|
[f00b26d4] | 33 | #include <sys/epoll.h>
|
---|
[92976d9] | 34 | #include <sys/syscall.h>
|
---|
| 35 |
|
---|
| 36 | #include <linux/io_uring.h>
|
---|
| 37 | }
|
---|
| 38 |
|
---|
[3e2b9c9] | 39 | #include "stats.hfa"
|
---|
| 40 | #include "kernel.hfa"
|
---|
| 41 | #include "kernel/fwd.hfa"
|
---|
| 42 | #include "io/types.hfa"
|
---|
[185efe6] | 43 |
|
---|
[2fafe7e] | 44 | // returns true of acquired as leader or second leader
|
---|
| 45 | static inline bool try_lock( __leaderlock_t & this ) {
|
---|
| 46 | const uintptr_t thrd = 1z | (uintptr_t)active_thread();
|
---|
| 47 | bool block;
|
---|
| 48 | disable_interrupts();
|
---|
| 49 | for() {
|
---|
| 50 | struct $thread * expected = this.value;
|
---|
| 51 | if( 1p != expected && 0p != expected ) {
|
---|
| 52 | /* paranoid */ verify( thrd != (uintptr_t)expected ); // We better not already be the next leader
|
---|
| 53 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
| 54 | return false;
|
---|
| 55 | }
|
---|
| 56 | struct $thread * desired;
|
---|
| 57 | if( 0p == expected ) {
|
---|
| 58 | // If the lock isn't locked acquire it, no need to block
|
---|
| 59 | desired = 1p;
|
---|
| 60 | block = false;
|
---|
| 61 | }
|
---|
| 62 | else {
|
---|
| 63 | // If the lock is already locked try becomming the next leader
|
---|
| 64 | desired = (struct $thread *)thrd;
|
---|
| 65 | block = true;
|
---|
| 66 | }
|
---|
| 67 | if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
|
---|
| 68 | }
|
---|
| 69 | if( block ) {
|
---|
| 70 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
| 71 | park( __cfaabi_dbg_ctx );
|
---|
| 72 | disable_interrupts();
|
---|
| 73 | }
|
---|
| 74 | return true;
|
---|
| 75 | }
|
---|
| 76 |
|
---|
| 77 | static inline bool next( __leaderlock_t & this ) {
|
---|
| 78 | /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
|
---|
| 79 | struct $thread * nextt;
|
---|
| 80 | for() {
|
---|
| 81 | struct $thread * expected = this.value;
|
---|
| 82 | /* paranoid */ verify( (1 & (uintptr_t)expected) == 1 ); // The lock better be locked
|
---|
| 83 |
|
---|
| 84 | struct $thread * desired;
|
---|
| 85 | if( 1p == expected ) {
|
---|
| 86 | // No next leader, just unlock
|
---|
| 87 | desired = 0p;
|
---|
| 88 | nextt = 0p;
|
---|
| 89 | }
|
---|
| 90 | else {
|
---|
| 91 | // There is a next leader, remove but keep locked
|
---|
| 92 | desired = 1p;
|
---|
| 93 | nextt = (struct $thread *)(~1z & (uintptr_t)expected);
|
---|
| 94 | }
|
---|
| 95 | if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
|
---|
| 96 | }
|
---|
| 97 |
|
---|
| 98 | if(nextt) {
|
---|
| 99 | unpark( nextt __cfaabi_dbg_ctx2 );
|
---|
| 100 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
| 101 | return true;
|
---|
| 102 | }
|
---|
| 103 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
| 104 | return false;
|
---|
| 105 | }
|
---|
| 106 |
|
---|
[92976d9] | 107 | //=============================================================================================
|
---|
[3e2b9c9] | 108 | // I/O Syscall
|
---|
[92976d9] | 109 | //=============================================================================================
|
---|
[3e2b9c9] | 110 | static int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) {
|
---|
[20ab637] | 111 | bool need_sys_to_submit = false;
|
---|
| 112 | bool need_sys_to_complete = false;
|
---|
| 113 | unsigned flags = 0;
|
---|
| 114 |
|
---|
| 115 | TO_SUBMIT:
|
---|
| 116 | if( to_submit > 0 ) {
|
---|
| 117 | if( !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
|
---|
| 118 | need_sys_to_submit = true;
|
---|
| 119 | break TO_SUBMIT;
|
---|
| 120 | }
|
---|
| 121 | if( (*ring.submit_q.flags) & IORING_SQ_NEED_WAKEUP ) {
|
---|
| 122 | need_sys_to_submit = true;
|
---|
| 123 | flags |= IORING_ENTER_SQ_WAKEUP;
|
---|
| 124 | }
|
---|
| 125 | }
|
---|
| 126 |
|
---|
| 127 | if( get && !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
|
---|
| 128 | flags |= IORING_ENTER_GETEVENTS;
|
---|
| 129 | if( (ring.ring_flags & IORING_SETUP_IOPOLL) ) {
|
---|
| 130 | need_sys_to_complete = true;
|
---|
| 131 | }
|
---|
| 132 | }
|
---|
| 133 |
|
---|
| 134 | int ret = 0;
|
---|
| 135 | if( need_sys_to_submit || need_sys_to_complete ) {
|
---|
[f00b26d4] | 136 | ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, flags, 0p, _NSIG / 8);
|
---|
[20ab637] | 137 | if( ret < 0 ) {
|
---|
| 138 | switch((int)errno) {
|
---|
| 139 | case EAGAIN:
|
---|
| 140 | case EINTR:
|
---|
| 141 | ret = -1;
|
---|
| 142 | break;
|
---|
| 143 | default:
|
---|
| 144 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
---|
| 145 | }
|
---|
| 146 | }
|
---|
| 147 | }
|
---|
| 148 |
|
---|
| 149 | // Memory barrier
|
---|
| 150 | __atomic_thread_fence( __ATOMIC_SEQ_CST );
|
---|
| 151 | return ret;
|
---|
| 152 | }
|
---|
| 153 |
|
---|
[92976d9] | 154 | //=============================================================================================
|
---|
| 155 | // I/O Polling
|
---|
| 156 | //=============================================================================================
|
---|
[1d5e4711] | 157 | static unsigned __collect_submitions( struct __io_data & ring );
|
---|
[4998155] | 158 | static __u32 __release_consumed_submission( struct __io_data & ring );
|
---|
[1d5e4711] | 159 |
|
---|
[f00b26d4] | 160 | static inline void process(struct io_uring_cqe & cqe ) {
|
---|
[5751a56] | 161 | struct __io_user_data_t * data = (struct __io_user_data_t *)(uintptr_t)cqe.user_data;
|
---|
| 162 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", data, cqe.res, data->thrd );
|
---|
| 163 |
|
---|
| 164 | data->result = cqe.res;
|
---|
[4998155] | 165 | post( data->sem );
|
---|
[5751a56] | 166 | }
|
---|
| 167 |
|
---|
[92976d9] | 168 | // Process a single completion message from the io_uring
|
---|
| 169 | // This is NOT thread-safe
|
---|
[f00b26d4] | 170 | static [int, bool] __drain_io( & struct __io_data ring ) {
|
---|
[e46c753] | 171 | /* paranoid */ verify( !kernelTLS.preemption_state.enabled );
|
---|
| 172 |
|
---|
[5dadc9b7] | 173 | unsigned to_submit = 0;
|
---|
[f00b26d4] | 174 | if( ring.poller_submits ) {
|
---|
[5dadc9b7] | 175 | // If the poller thread also submits, then we need to aggregate the submissions which are ready
|
---|
[e46c753] | 176 | to_submit = __collect_submitions( ring );
|
---|
[5dadc9b7] | 177 | }
|
---|
| 178 |
|
---|
[f00b26d4] | 179 | int ret = __io_uring_enter(ring, to_submit, true);
|
---|
[20ab637] | 180 | if( ret < 0 ) {
|
---|
| 181 | return [0, true];
|
---|
| 182 | }
|
---|
[1d5e4711] | 183 |
|
---|
[20ab637] | 184 | // update statistics
|
---|
| 185 | if (to_submit > 0) {
|
---|
[1d5e4711] | 186 | __STATS__( true,
|
---|
| 187 | if( to_submit > 0 ) {
|
---|
| 188 | io.submit_q.submit_avg.rdy += to_submit;
|
---|
| 189 | io.submit_q.submit_avg.csm += ret;
|
---|
| 190 | io.submit_q.submit_avg.cnt += 1;
|
---|
| 191 | }
|
---|
| 192 | )
|
---|
[6f121b8] | 193 | }
|
---|
| 194 |
|
---|
[20ab637] | 195 | // Release the consumed SQEs
|
---|
| 196 | __release_consumed_submission( ring );
|
---|
[6f121b8] | 197 |
|
---|
[d384787] | 198 | // Drain the queue
|
---|
[92976d9] | 199 | unsigned head = *ring.completion_q.head;
|
---|
[6f121b8] | 200 | unsigned tail = *ring.completion_q.tail;
|
---|
[4998155] | 201 | const __u32 mask = *ring.completion_q.mask;
|
---|
[6f121b8] | 202 |
|
---|
[d384787] | 203 | // Nothing was new return 0
|
---|
| 204 | if (head == tail) {
|
---|
[e46c753] | 205 | return [0, to_submit > 0];
|
---|
[d384787] | 206 | }
|
---|
[92976d9] | 207 |
|
---|
[4998155] | 208 | __u32 count = tail - head;
|
---|
[1d5e4711] | 209 | /* paranoid */ verify( count != 0 );
|
---|
[d384787] | 210 | for(i; count) {
|
---|
[6f121b8] | 211 | unsigned idx = (head + i) & mask;
|
---|
[d384787] | 212 | struct io_uring_cqe & cqe = ring.completion_q.cqes[idx];
|
---|
[92976d9] | 213 |
|
---|
[d384787] | 214 | /* paranoid */ verify(&cqe);
|
---|
[92976d9] | 215 |
|
---|
[f00b26d4] | 216 | process( cqe );
|
---|
[d384787] | 217 | }
|
---|
[2d8f7b0] | 218 |
|
---|
[92976d9] | 219 | // Mark to the kernel that the cqe has been seen
|
---|
| 220 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
---|
[6f121b8] | 221 | __atomic_thread_fence( __ATOMIC_SEQ_CST );
|
---|
[d384787] | 222 | __atomic_fetch_add( ring.completion_q.head, count, __ATOMIC_RELAXED );
|
---|
[92976d9] | 223 |
|
---|
[5dadc9b7] | 224 | return [count, count > 0 || to_submit > 0];
|
---|
[92976d9] | 225 | }
|
---|
| 226 |
|
---|
[f00b26d4] | 227 | void main( $io_ctx_thread & this ) {
|
---|
| 228 | epoll_event ev;
|
---|
[3e2b9c9] | 229 | __ioctx_register( this, ev );
|
---|
[1539bbd] | 230 |
|
---|
[f00b26d4] | 231 | __cfadbg_print_safe(io_core, "Kernel I/O : IO poller %p for ring %p ready\n", &this, &this.ring);
|
---|
[1539bbd] | 232 |
|
---|
[4e74466] | 233 | int reset = 0;
|
---|
[61dd73d] | 234 | // Then loop until we need to start
|
---|
[f00b26d4] | 235 | while(!__atomic_load_n(&this.done, __ATOMIC_SEQ_CST)) {
|
---|
[61dd73d] | 236 | // Drain the io
|
---|
[5dadc9b7] | 237 | int count;
|
---|
| 238 | bool again;
|
---|
[13c5e19] | 239 | disable_interrupts();
|
---|
[f00b26d4] | 240 | [count, again] = __drain_io( *this.ring );
|
---|
[5dadc9b7] | 241 |
|
---|
[13c5e19] | 242 | if(!again) reset++;
|
---|
[3c039b0] | 243 |
|
---|
[13c5e19] | 244 | // Update statistics
|
---|
[47746a2] | 245 | __STATS__( true,
|
---|
| 246 | io.complete_q.completed_avg.val += count;
|
---|
[dcb5f8d] | 247 | io.complete_q.completed_avg.cnt += 1;
|
---|
[47746a2] | 248 | )
|
---|
[13c5e19] | 249 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
[3c039b0] | 250 |
|
---|
[5dadc9b7] | 251 | // If we got something, just yield and check again
|
---|
[4e74466] | 252 | if(reset < 5) {
|
---|
[61dd73d] | 253 | yield();
|
---|
| 254 | }
|
---|
[5dadc9b7] | 255 | // We didn't get anything baton pass to the slow poller
|
---|
[61dd73d] | 256 | else {
|
---|
[93526ef] | 257 | __STATS__( false,
|
---|
[dcb5f8d] | 258 | io.complete_q.blocks += 1;
|
---|
| 259 | )
|
---|
[f00b26d4] | 260 | __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %p\n", &this.self);
|
---|
[5dadc9b7] | 261 | reset = 0;
|
---|
| 262 |
|
---|
[3e2b9c9] | 263 | // block this thread
|
---|
| 264 | __ioctx_prepare_block( this, ev );
|
---|
[f00b26d4] | 265 | wait( this.sem );
|
---|
[f6660520] | 266 | }
|
---|
| 267 | }
|
---|
[61dd73d] | 268 |
|
---|
| 269 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller for ring %p stopping\n", &this.ring);
|
---|
| 270 | }
|
---|
[f6660520] | 271 |
|
---|
[92976d9] | 272 | //=============================================================================================
|
---|
| 273 | // I/O Submissions
|
---|
| 274 | //=============================================================================================
|
---|
| 275 |
|
---|
[2d8f7b0] | 276 | // Submition steps :
|
---|
[e46c753] | 277 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
---|
[2d8f7b0] | 278 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
---|
| 279 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
---|
| 280 | // need to write an allocator that allows allocating concurrently.
|
---|
| 281 | //
|
---|
[e46c753] | 282 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
---|
[2d8f7b0] | 283 | //
|
---|
[e46c753] | 284 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
---|
[2d8f7b0] | 285 | // needs to arrive to two concensus at the same time:
|
---|
| 286 | // A - The order in which entries are listed in the array: no two threads must pick the
|
---|
| 287 | // same index for their entries
|
---|
| 288 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
---|
| 289 | // head and tail must be fully filled and shouldn't ever be touched again.
|
---|
| 290 | //
|
---|
| 291 |
|
---|
[4998155] | 292 | [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ) {
|
---|
[e46c753] | 293 | /* paranoid */ verify( data != 0 );
|
---|
[13c5e19] | 294 |
|
---|
[6f121b8] | 295 | // Prepare the data we need
|
---|
| 296 | __attribute((unused)) int len = 0;
|
---|
| 297 | __attribute((unused)) int block = 0;
|
---|
[4998155] | 298 | __u32 cnt = *ring.submit_q.num;
|
---|
| 299 | __u32 mask = *ring.submit_q.mask;
|
---|
[8ae4165] | 300 |
|
---|
| 301 | disable_interrupts();
|
---|
[4998155] | 302 | __u32 off = __tls_rand();
|
---|
[8ae4165] | 303 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
[6f121b8] | 304 |
|
---|
| 305 | // Loop around looking for an available spot
|
---|
[13c5e19] | 306 | for() {
|
---|
[6f121b8] | 307 | // Look through the list starting at some offset
|
---|
| 308 | for(i; cnt) {
|
---|
[4998155] | 309 | __u64 expected = 0;
|
---|
| 310 | __u32 idx = (i + off) & mask;
|
---|
[6f121b8] | 311 | struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx];
|
---|
[4998155] | 312 | volatile __u64 * udata = &sqe->user_data;
|
---|
[6f121b8] | 313 |
|
---|
| 314 | if( *udata == expected &&
|
---|
| 315 | __atomic_compare_exchange_n( udata, &expected, data, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) )
|
---|
| 316 | {
|
---|
| 317 | // update statistics
|
---|
[47746a2] | 318 | __STATS__( false,
|
---|
| 319 | io.submit_q.alloc_avg.val += len;
|
---|
| 320 | io.submit_q.alloc_avg.block += block;
|
---|
| 321 | io.submit_q.alloc_avg.cnt += 1;
|
---|
| 322 | )
|
---|
[6f121b8] | 323 |
|
---|
[13c5e19] | 324 |
|
---|
[6f121b8] | 325 | // Success return the data
|
---|
| 326 | return [sqe, idx];
|
---|
| 327 | }
|
---|
| 328 | verify(expected != data);
|
---|
[2489d31] | 329 |
|
---|
[6f121b8] | 330 | len ++;
|
---|
| 331 | }
|
---|
[2489d31] | 332 |
|
---|
[6f121b8] | 333 | block++;
|
---|
| 334 | yield();
|
---|
| 335 | }
|
---|
[2489d31] | 336 | }
|
---|
| 337 |
|
---|
[4998155] | 338 | static inline __u32 __submit_to_ready_array( struct __io_data & ring, __u32 idx, const __u32 mask ) {
|
---|
[df40a56] | 339 | /* paranoid */ verify( idx <= mask );
|
---|
| 340 | /* paranoid */ verify( idx != -1ul32 );
|
---|
| 341 |
|
---|
| 342 | // We need to find a spot in the ready array
|
---|
| 343 | __attribute((unused)) int len = 0;
|
---|
| 344 | __attribute((unused)) int block = 0;
|
---|
[4998155] | 345 | __u32 ready_mask = ring.submit_q.ready_cnt - 1;
|
---|
[df40a56] | 346 |
|
---|
| 347 | disable_interrupts();
|
---|
[4998155] | 348 | __u32 off = __tls_rand();
|
---|
[df40a56] | 349 | enable_interrupts( __cfaabi_dbg_ctx );
|
---|
| 350 |
|
---|
[4998155] | 351 | __u32 picked;
|
---|
[df40a56] | 352 | LOOKING: for() {
|
---|
| 353 | for(i; ring.submit_q.ready_cnt) {
|
---|
| 354 | picked = (i + off) & ready_mask;
|
---|
[4998155] | 355 | __u32 expected = -1ul32;
|
---|
[df40a56] | 356 | if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) {
|
---|
| 357 | break LOOKING;
|
---|
| 358 | }
|
---|
| 359 | verify(expected != idx);
|
---|
| 360 |
|
---|
| 361 | len ++;
|
---|
| 362 | }
|
---|
| 363 |
|
---|
| 364 | block++;
|
---|
[2fafe7e] | 365 |
|
---|
| 366 | __u32 released = __release_consumed_submission( ring );
|
---|
| 367 | if( released == 0 ) {
|
---|
[34b61882] | 368 | yield();
|
---|
| 369 | }
|
---|
[df40a56] | 370 | }
|
---|
| 371 |
|
---|
| 372 | // update statistics
|
---|
[47746a2] | 373 | __STATS__( false,
|
---|
| 374 | io.submit_q.look_avg.val += len;
|
---|
| 375 | io.submit_q.look_avg.block += block;
|
---|
| 376 | io.submit_q.look_avg.cnt += 1;
|
---|
| 377 | )
|
---|
[df40a56] | 378 |
|
---|
| 379 | return picked;
|
---|
| 380 | }
|
---|
| 381 |
|
---|
[4998155] | 382 | void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))) {
|
---|
[f00b26d4] | 383 | __io_data & ring = *ctx->thrd.ring;
|
---|
[5dadc9b7] | 384 | // Get now the data we definetely need
|
---|
[4998155] | 385 | volatile __u32 * const tail = ring.submit_q.tail;
|
---|
| 386 | const __u32 mask = *ring.submit_q.mask;
|
---|
[2489d31] | 387 |
|
---|
[5dadc9b7] | 388 | // There are 2 submission schemes, check which one we are using
|
---|
[f00b26d4] | 389 | if( ring.poller_submits ) {
|
---|
[5dadc9b7] | 390 | // If the poller thread submits, then we just need to add this to the ready array
|
---|
[df40a56] | 391 | __submit_to_ready_array( ring, idx, mask );
|
---|
[5dadc9b7] | 392 |
|
---|
[f00b26d4] | 393 | post( ctx->thrd.sem );
|
---|
[5dadc9b7] | 394 |
|
---|
[dd4e2d7] | 395 | __cfadbg_print_safe( io, "Kernel I/O : Added %u to ready for %p\n", idx, active_thread() );
|
---|
[2d8f7b0] | 396 | }
|
---|
[f00b26d4] | 397 | else if( ring.eager_submits ) {
|
---|
[4998155] | 398 | __u32 picked = __submit_to_ready_array( ring, idx, mask );
|
---|
[e46c753] | 399 |
|
---|
[2fafe7e] | 400 | #if defined(LEADER_LOCK)
|
---|
| 401 | if( !try_lock(ring.submit_q.submit_lock) ) {
|
---|
[47746a2] | 402 | __STATS__( false,
|
---|
| 403 | io.submit_q.helped += 1;
|
---|
| 404 | )
|
---|
[e46c753] | 405 | return;
|
---|
| 406 | }
|
---|
[2fafe7e] | 407 | /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
|
---|
| 408 | __STATS__( true,
|
---|
| 409 | io.submit_q.leader += 1;
|
---|
| 410 | )
|
---|
| 411 | #else
|
---|
| 412 | for() {
|
---|
| 413 | yield();
|
---|
| 414 |
|
---|
| 415 | if( try_lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2) ) {
|
---|
| 416 | __STATS__( false,
|
---|
| 417 | io.submit_q.leader += 1;
|
---|
| 418 | )
|
---|
| 419 | break;
|
---|
| 420 | }
|
---|
| 421 |
|
---|
| 422 | // If some one else collected our index, we are done
|
---|
| 423 | #warning ABA problem
|
---|
| 424 | if( ring.submit_q.ready[picked] != idx ) {
|
---|
| 425 | __STATS__( false,
|
---|
| 426 | io.submit_q.helped += 1;
|
---|
| 427 | )
|
---|
| 428 | return;
|
---|
| 429 | }
|
---|
[e46c753] | 430 |
|
---|
[47746a2] | 431 | __STATS__( false,
|
---|
[2fafe7e] | 432 | io.submit_q.busy += 1;
|
---|
[47746a2] | 433 | )
|
---|
[e46c753] | 434 | }
|
---|
[2fafe7e] | 435 | #endif
|
---|
[e46c753] | 436 |
|
---|
| 437 | // We got the lock
|
---|
[be36ec3] | 438 | // Collect the submissions
|
---|
[e46c753] | 439 | unsigned to_submit = __collect_submitions( ring );
|
---|
| 440 |
|
---|
[be36ec3] | 441 | // Actually submit
|
---|
| 442 | int ret = __io_uring_enter( ring, to_submit, false );
|
---|
[309d814] | 443 |
|
---|
[2fafe7e] | 444 | #if defined(LEADER_LOCK)
|
---|
| 445 | /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
|
---|
| 446 | next(ring.submit_q.submit_lock);
|
---|
| 447 | #else
|
---|
| 448 | unlock(ring.submit_q.submit_lock);
|
---|
| 449 | #endif
|
---|
[be36ec3] | 450 | if( ret < 0 ) return;
|
---|
[e46c753] | 451 |
|
---|
| 452 | // Release the consumed SQEs
|
---|
[34b61882] | 453 | __release_consumed_submission( ring );
|
---|
[e46c753] | 454 |
|
---|
| 455 | // update statistics
|
---|
[be36ec3] | 456 | __STATS__( false,
|
---|
[47746a2] | 457 | io.submit_q.submit_avg.rdy += to_submit;
|
---|
| 458 | io.submit_q.submit_avg.csm += ret;
|
---|
| 459 | io.submit_q.submit_avg.cnt += 1;
|
---|
| 460 | )
|
---|
[e46c753] | 461 | }
|
---|
[5dadc9b7] | 462 | else {
|
---|
| 463 | // get mutual exclusion
|
---|
[2fafe7e] | 464 | #if defined(LEADER_LOCK)
|
---|
| 465 | while(!try_lock(ring.submit_q.submit_lock));
|
---|
| 466 | #else
|
---|
| 467 | lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2);
|
---|
| 468 | #endif
|
---|
[2489d31] | 469 |
|
---|
[20ab637] | 470 | /* paranoid */ verifyf( ring.submit_q.sqes[ idx ].user_data != 0,
|
---|
| 471 | /* paranoid */ "index %u already reclaimed\n"
|
---|
| 472 | /* paranoid */ "head %u, prev %u, tail %u\n"
|
---|
| 473 | /* paranoid */ "[-0: %u,-1: %u,-2: %u,-3: %u]\n",
|
---|
| 474 | /* paranoid */ idx,
|
---|
| 475 | /* paranoid */ *ring.submit_q.head, ring.submit_q.prev_head, *tail
|
---|
| 476 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 0) & (*ring.submit_q.mask) ]
|
---|
| 477 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 1) & (*ring.submit_q.mask) ]
|
---|
| 478 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 2) & (*ring.submit_q.mask) ]
|
---|
| 479 | /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 3) & (*ring.submit_q.mask) ]
|
---|
| 480 | /* paranoid */ );
|
---|
| 481 |
|
---|
[5dadc9b7] | 482 | // Append to the list of ready entries
|
---|
| 483 |
|
---|
| 484 | /* paranoid */ verify( idx <= mask );
|
---|
[20ab637] | 485 | ring.submit_q.array[ (*tail) & mask ] = idx;
|
---|
[5dadc9b7] | 486 | __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST);
|
---|
[d384787] | 487 |
|
---|
[5dadc9b7] | 488 | // Submit however, many entries need to be submitted
|
---|
[f00b26d4] | 489 | int ret = __io_uring_enter( ring, 1, false );
|
---|
[5dadc9b7] | 490 | if( ret < 0 ) {
|
---|
| 491 | switch((int)errno) {
|
---|
| 492 | default:
|
---|
| 493 | abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) );
|
---|
| 494 | }
|
---|
| 495 | }
|
---|
[d384787] | 496 |
|
---|
[5dadc9b7] | 497 | // update statistics
|
---|
[47746a2] | 498 | __STATS__( false,
|
---|
| 499 | io.submit_q.submit_avg.csm += 1;
|
---|
| 500 | io.submit_q.submit_avg.cnt += 1;
|
---|
| 501 | )
|
---|
[5dadc9b7] | 502 |
|
---|
[34b61882] | 503 | // Release the consumed SQEs
|
---|
| 504 | __release_consumed_submission( ring );
|
---|
[7bfc849] | 505 |
|
---|
[2fafe7e] | 506 | #if defined(LEADER_LOCK)
|
---|
| 507 | next(ring.submit_q.submit_lock);
|
---|
| 508 | #else
|
---|
| 509 | unlock(ring.submit_q.submit_lock);
|
---|
| 510 | #endif
|
---|
[dd4e2d7] | 511 |
|
---|
| 512 | __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret );
|
---|
[5dadc9b7] | 513 | }
|
---|
[2489d31] | 514 | }
|
---|
[e46c753] | 515 |
|
---|
[d2b5d2d] | 516 | // #define PARTIAL_SUBMIT 32
|
---|
[e46c753] | 517 | static unsigned __collect_submitions( struct __io_data & ring ) {
|
---|
| 518 | /* paranoid */ verify( ring.submit_q.ready != 0p );
|
---|
| 519 | /* paranoid */ verify( ring.submit_q.ready_cnt > 0 );
|
---|
| 520 |
|
---|
| 521 | unsigned to_submit = 0;
|
---|
[4998155] | 522 | __u32 tail = *ring.submit_q.tail;
|
---|
| 523 | const __u32 mask = *ring.submit_q.mask;
|
---|
[1095ccd] | 524 | #if defined(PARTIAL_SUBMIT)
|
---|
| 525 | #if defined(LEADER_LOCK)
|
---|
| 526 | #error PARTIAL_SUBMIT and LEADER_LOCK cannot co-exist
|
---|
| 527 | #endif
|
---|
| 528 | const __u32 cnt = ring.submit_q.ready_cnt > PARTIAL_SUBMIT ? PARTIAL_SUBMIT : ring.submit_q.ready_cnt;
|
---|
| 529 | const __u32 offset = ring.submit_q.prev_ready;
|
---|
| 530 | ring.submit_q.prev_ready += cnt;
|
---|
| 531 | #else
|
---|
| 532 | const __u32 cnt = ring.submit_q.ready_cnt;
|
---|
| 533 | const __u32 offset = 0;
|
---|
| 534 | #endif
|
---|
[e46c753] | 535 |
|
---|
| 536 | // Go through the list of ready submissions
|
---|
[1095ccd] | 537 | for( c; cnt ) {
|
---|
| 538 | __u32 i = (offset + c) % ring.submit_q.ready_cnt;
|
---|
| 539 |
|
---|
[e46c753] | 540 | // replace any submission with the sentinel, to consume it.
|
---|
[4998155] | 541 | __u32 idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED);
|
---|
[e46c753] | 542 |
|
---|
| 543 | // If it was already the sentinel, then we are done
|
---|
| 544 | if( idx == -1ul32 ) continue;
|
---|
| 545 |
|
---|
| 546 | // If we got a real submission, append it to the list
|
---|
| 547 | ring.submit_q.array[ (tail + to_submit) & mask ] = idx & mask;
|
---|
| 548 | to_submit++;
|
---|
| 549 | }
|
---|
| 550 |
|
---|
| 551 | // Increment the tail based on how many we are ready to submit
|
---|
| 552 | __atomic_fetch_add(ring.submit_q.tail, to_submit, __ATOMIC_SEQ_CST);
|
---|
| 553 |
|
---|
| 554 | return to_submit;
|
---|
| 555 | }
|
---|
[34b61882] | 556 |
|
---|
[4998155] | 557 | static __u32 __release_consumed_submission( struct __io_data & ring ) {
|
---|
| 558 | const __u32 smask = *ring.submit_q.mask;
|
---|
[732b406] | 559 |
|
---|
| 560 | if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0;
|
---|
[4998155] | 561 | __u32 chead = *ring.submit_q.head;
|
---|
| 562 | __u32 phead = ring.submit_q.prev_head;
|
---|
[34b61882] | 563 | ring.submit_q.prev_head = chead;
|
---|
[732b406] | 564 | unlock(ring.submit_q.release_lock);
|
---|
| 565 |
|
---|
[4998155] | 566 | __u32 count = chead - phead;
|
---|
[34b61882] | 567 | for( i; count ) {
|
---|
[4998155] | 568 | __u32 idx = ring.submit_q.array[ (phead + i) & smask ];
|
---|
[34b61882] | 569 | ring.submit_q.sqes[ idx ].user_data = 0;
|
---|
| 570 | }
|
---|
| 571 | return count;
|
---|
| 572 | }
|
---|
[47746a2] | 573 | #endif
|
---|