| 1 | //
 | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
 | 
|---|
| 3 | //
 | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
 | 
|---|
| 5 | // file "LICENCE" distributed with Cforall.
 | 
|---|
| 6 | //
 | 
|---|
| 7 | // io.cfa --
 | 
|---|
| 8 | //
 | 
|---|
| 9 | // Author           : Thierry Delisle
 | 
|---|
| 10 | // Created On       : Thu Apr 23 17:31:00 2020
 | 
|---|
| 11 | // Last Modified By :
 | 
|---|
| 12 | // Last Modified On :
 | 
|---|
| 13 | // Update Count     :
 | 
|---|
| 14 | //
 | 
|---|
| 15 | 
 | 
|---|
| 16 | #define __cforall_thread__
 | 
|---|
| 17 | 
 | 
|---|
| 18 | #if defined(__CFA_DEBUG__)
 | 
|---|
| 19 |         // #define __CFA_DEBUG_PRINT_IO__
 | 
|---|
| 20 |         // #define __CFA_DEBUG_PRINT_IO_CORE__
 | 
|---|
| 21 | #endif
 | 
|---|
| 22 | 
 | 
|---|
| 23 | 
 | 
|---|
| 24 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
 | 
|---|
| 25 |         #define _GNU_SOURCE         /* See feature_test_macros(7) */
 | 
|---|
| 26 |         #include <errno.h>
 | 
|---|
| 27 |         #include <signal.h>
 | 
|---|
| 28 |         #include <stdint.h>
 | 
|---|
| 29 |         #include <string.h>
 | 
|---|
| 30 |         #include <unistd.h>
 | 
|---|
| 31 | 
 | 
|---|
| 32 |         extern "C" {
 | 
|---|
| 33 |                 #include <sys/epoll.h>
 | 
|---|
| 34 |                 #include <sys/syscall.h>
 | 
|---|
| 35 | 
 | 
|---|
| 36 |                 #include <linux/io_uring.h>
 | 
|---|
| 37 |         }
 | 
|---|
| 38 | 
 | 
|---|
| 39 |         #include "stats.hfa"
 | 
|---|
| 40 |         #include "kernel.hfa"
 | 
|---|
| 41 |         #include "kernel/fwd.hfa"
 | 
|---|
| 42 |         #include "io/types.hfa"
 | 
|---|
| 43 | 
 | 
|---|
| 44 |         // returns true of acquired as leader or second leader
 | 
|---|
| 45 |         static inline bool try_lock( __leaderlock_t & this ) {
 | 
|---|
| 46 |                 const uintptr_t thrd = 1z | (uintptr_t)active_thread();
 | 
|---|
| 47 |                 bool block;
 | 
|---|
| 48 |                 disable_interrupts();
 | 
|---|
| 49 |                 for() {
 | 
|---|
| 50 |                         struct $thread * expected = this.value;
 | 
|---|
| 51 |                         if( 1p != expected && 0p != expected ) {
 | 
|---|
| 52 |                                 /* paranoid */ verify( thrd != (uintptr_t)expected ); // We better not already be the next leader
 | 
|---|
| 53 |                                 enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 54 |                                 return false;
 | 
|---|
| 55 |                         }
 | 
|---|
| 56 |                         struct $thread * desired;
 | 
|---|
| 57 |                         if( 0p == expected ) {
 | 
|---|
| 58 |                                 // If the lock isn't locked acquire it, no need to block
 | 
|---|
| 59 |                                 desired = 1p;
 | 
|---|
| 60 |                                 block = false;
 | 
|---|
| 61 |                         }
 | 
|---|
| 62 |                         else {
 | 
|---|
| 63 |                                 // If the lock is already locked try becomming the next leader
 | 
|---|
| 64 |                                 desired = (struct $thread *)thrd;
 | 
|---|
| 65 |                                 block = true;
 | 
|---|
| 66 |                         }
 | 
|---|
| 67 |                         if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
 | 
|---|
| 68 |                 }
 | 
|---|
| 69 |                 if( block ) {
 | 
|---|
| 70 |                         enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 71 |                         park( __cfaabi_dbg_ctx );
 | 
|---|
| 72 |                         disable_interrupts();
 | 
|---|
| 73 |                 }
 | 
|---|
| 74 |                 return true;
 | 
|---|
| 75 |         }
 | 
|---|
| 76 | 
 | 
|---|
| 77 |         static inline bool next( __leaderlock_t & this ) {
 | 
|---|
| 78 |                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 | 
|---|
| 79 |                 struct $thread * nextt;
 | 
|---|
| 80 |                 for() {
 | 
|---|
| 81 |                         struct $thread * expected = this.value;
 | 
|---|
| 82 |                         /* paranoid */ verify( (1 & (uintptr_t)expected) == 1 ); // The lock better be locked
 | 
|---|
| 83 | 
 | 
|---|
| 84 |                         struct $thread * desired;
 | 
|---|
| 85 |                         if( 1p == expected ) {
 | 
|---|
| 86 |                                 // No next leader, just unlock
 | 
|---|
| 87 |                                 desired = 0p;
 | 
|---|
| 88 |                                 nextt   = 0p;
 | 
|---|
| 89 |                         }
 | 
|---|
| 90 |                         else {
 | 
|---|
| 91 |                                 // There is a next leader, remove but keep locked
 | 
|---|
| 92 |                                 desired = 1p;
 | 
|---|
| 93 |                                 nextt   = (struct $thread *)(~1z & (uintptr_t)expected);
 | 
|---|
| 94 |                         }
 | 
|---|
| 95 |                         if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
 | 
|---|
| 96 |                 }
 | 
|---|
| 97 | 
 | 
|---|
| 98 |                 if(nextt) {
 | 
|---|
| 99 |                         unpark( nextt __cfaabi_dbg_ctx2 );
 | 
|---|
| 100 |                         enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 101 |                         return true;
 | 
|---|
| 102 |                 }
 | 
|---|
| 103 |                 enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 104 |                 return false;
 | 
|---|
| 105 |         }
 | 
|---|
| 106 | 
 | 
|---|
| 107 | //=============================================================================================
 | 
|---|
| 108 | // I/O Syscall
 | 
|---|
| 109 | //=============================================================================================
 | 
|---|
| 110 |         static int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) {
 | 
|---|
| 111 |                 bool need_sys_to_submit = false;
 | 
|---|
| 112 |                 bool need_sys_to_complete = false;
 | 
|---|
| 113 |                 unsigned flags = 0;
 | 
|---|
| 114 | 
 | 
|---|
| 115 |                 TO_SUBMIT:
 | 
|---|
| 116 |                 if( to_submit > 0 ) {
 | 
|---|
| 117 |                         if( !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
 | 
|---|
| 118 |                                 need_sys_to_submit = true;
 | 
|---|
| 119 |                                 break TO_SUBMIT;
 | 
|---|
| 120 |                         }
 | 
|---|
| 121 |                         if( (*ring.submit_q.flags) & IORING_SQ_NEED_WAKEUP ) {
 | 
|---|
| 122 |                                 need_sys_to_submit = true;
 | 
|---|
| 123 |                                 flags |= IORING_ENTER_SQ_WAKEUP;
 | 
|---|
| 124 |                         }
 | 
|---|
| 125 |                 }
 | 
|---|
| 126 | 
 | 
|---|
| 127 |                 if( get && !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
 | 
|---|
| 128 |                         flags |= IORING_ENTER_GETEVENTS;
 | 
|---|
| 129 |                         if( (ring.ring_flags & IORING_SETUP_IOPOLL) ) {
 | 
|---|
| 130 |                                 need_sys_to_complete = true;
 | 
|---|
| 131 |                         }
 | 
|---|
| 132 |                 }
 | 
|---|
| 133 | 
 | 
|---|
| 134 |                 int ret = 0;
 | 
|---|
| 135 |                 if( need_sys_to_submit || need_sys_to_complete ) {
 | 
|---|
| 136 |                         ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, flags, 0p, _NSIG / 8);
 | 
|---|
| 137 |                         if( ret < 0 ) {
 | 
|---|
| 138 |                                 switch((int)errno) {
 | 
|---|
| 139 |                                 case EAGAIN:
 | 
|---|
| 140 |                                 case EINTR:
 | 
|---|
| 141 |                                         ret = -1;
 | 
|---|
| 142 |                                         break;
 | 
|---|
| 143 |                                 default:
 | 
|---|
| 144 |                                         abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
 | 
|---|
| 145 |                                 }
 | 
|---|
| 146 |                         }
 | 
|---|
| 147 |                 }
 | 
|---|
| 148 | 
 | 
|---|
| 149 |                 // Memory barrier
 | 
|---|
| 150 |                 __atomic_thread_fence( __ATOMIC_SEQ_CST );
 | 
|---|
| 151 |                 return ret;
 | 
|---|
| 152 |         }
 | 
|---|
| 153 | 
 | 
|---|
| 154 | //=============================================================================================
 | 
|---|
| 155 | // I/O Polling
 | 
|---|
| 156 | //=============================================================================================
 | 
|---|
| 157 |         static unsigned __collect_submitions( struct __io_data & ring );
 | 
|---|
| 158 |         static __u32 __release_consumed_submission( struct __io_data & ring );
 | 
|---|
| 159 | 
 | 
|---|
| 160 |         static inline void process(struct io_uring_cqe & cqe ) {
 | 
|---|
| 161 |                 struct __io_user_data_t * data = (struct __io_user_data_t *)(uintptr_t)cqe.user_data;
 | 
|---|
| 162 |                 __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", data, cqe.res, data->thrd );
 | 
|---|
| 163 | 
 | 
|---|
| 164 |                 data->result = cqe.res;
 | 
|---|
| 165 |                 post( data->sem );
 | 
|---|
| 166 |         }
 | 
|---|
| 167 | 
 | 
|---|
| 168 |         // Process a single completion message from the io_uring
 | 
|---|
| 169 |         // This is NOT thread-safe
 | 
|---|
| 170 |         static [int, bool] __drain_io( & struct __io_data ring ) {
 | 
|---|
| 171 |                 /* paranoid */ verify( !kernelTLS.preemption_state.enabled );
 | 
|---|
| 172 | 
 | 
|---|
| 173 |                 unsigned to_submit = 0;
 | 
|---|
| 174 |                 if( ring.poller_submits ) {
 | 
|---|
| 175 |                         // If the poller thread also submits, then we need to aggregate the submissions which are ready
 | 
|---|
| 176 |                         to_submit = __collect_submitions( ring );
 | 
|---|
| 177 |                 }
 | 
|---|
| 178 | 
 | 
|---|
| 179 |                 int ret = __io_uring_enter(ring, to_submit, true);
 | 
|---|
| 180 |                 if( ret < 0 ) {
 | 
|---|
| 181 |                         return [0, true];
 | 
|---|
| 182 |                 }
 | 
|---|
| 183 | 
 | 
|---|
| 184 |                 // update statistics
 | 
|---|
| 185 |                 if (to_submit > 0) {
 | 
|---|
| 186 |                         __STATS__( true,
 | 
|---|
| 187 |                                 if( to_submit > 0 ) {
 | 
|---|
| 188 |                                         io.submit_q.submit_avg.rdy += to_submit;
 | 
|---|
| 189 |                                         io.submit_q.submit_avg.csm += ret;
 | 
|---|
| 190 |                                         io.submit_q.submit_avg.cnt += 1;
 | 
|---|
| 191 |                                 }
 | 
|---|
| 192 |                         )
 | 
|---|
| 193 |                 }
 | 
|---|
| 194 | 
 | 
|---|
| 195 |                 // Release the consumed SQEs
 | 
|---|
| 196 |                 __release_consumed_submission( ring );
 | 
|---|
| 197 | 
 | 
|---|
| 198 |                 // Drain the queue
 | 
|---|
| 199 |                 unsigned head = *ring.completion_q.head;
 | 
|---|
| 200 |                 unsigned tail = *ring.completion_q.tail;
 | 
|---|
| 201 |                 const __u32 mask = *ring.completion_q.mask;
 | 
|---|
| 202 | 
 | 
|---|
| 203 |                 // Nothing was new return 0
 | 
|---|
| 204 |                 if (head == tail) {
 | 
|---|
| 205 |                         return [0, to_submit > 0];
 | 
|---|
| 206 |                 }
 | 
|---|
| 207 | 
 | 
|---|
| 208 |                 __u32 count = tail - head;
 | 
|---|
| 209 |                 /* paranoid */ verify( count != 0 );
 | 
|---|
| 210 |                 for(i; count) {
 | 
|---|
| 211 |                         unsigned idx = (head + i) & mask;
 | 
|---|
| 212 |                         struct io_uring_cqe & cqe = ring.completion_q.cqes[idx];
 | 
|---|
| 213 | 
 | 
|---|
| 214 |                         /* paranoid */ verify(&cqe);
 | 
|---|
| 215 | 
 | 
|---|
| 216 |                         process( cqe );
 | 
|---|
| 217 |                 }
 | 
|---|
| 218 | 
 | 
|---|
| 219 |                 // Mark to the kernel that the cqe has been seen
 | 
|---|
| 220 |                 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
 | 
|---|
| 221 |                 __atomic_thread_fence( __ATOMIC_SEQ_CST );
 | 
|---|
| 222 |                 __atomic_fetch_add( ring.completion_q.head, count, __ATOMIC_RELAXED );
 | 
|---|
| 223 | 
 | 
|---|
| 224 |                 return [count, count > 0 || to_submit > 0];
 | 
|---|
| 225 |         }
 | 
|---|
| 226 | 
 | 
|---|
| 227 |         void main( $io_ctx_thread & this ) {
 | 
|---|
| 228 |                 epoll_event ev;
 | 
|---|
| 229 |                 __ioctx_register( this, ev );
 | 
|---|
| 230 | 
 | 
|---|
| 231 |                 __cfadbg_print_safe(io_core, "Kernel I/O : IO poller %p for ring %p ready\n", &this, &this.ring);
 | 
|---|
| 232 | 
 | 
|---|
| 233 |                 int reset = 0;
 | 
|---|
| 234 |                 // Then loop until we need to start
 | 
|---|
| 235 |                 while(!__atomic_load_n(&this.done, __ATOMIC_SEQ_CST)) {
 | 
|---|
| 236 |                         // Drain the io
 | 
|---|
| 237 |                         int count;
 | 
|---|
| 238 |                         bool again;
 | 
|---|
| 239 |                         disable_interrupts();
 | 
|---|
| 240 |                                 [count, again] = __drain_io( *this.ring );
 | 
|---|
| 241 | 
 | 
|---|
| 242 |                                 if(!again) reset++;
 | 
|---|
| 243 | 
 | 
|---|
| 244 |                                 // Update statistics
 | 
|---|
| 245 |                                 __STATS__( true,
 | 
|---|
| 246 |                                         io.complete_q.completed_avg.val += count;
 | 
|---|
| 247 |                                         io.complete_q.completed_avg.cnt += 1;
 | 
|---|
| 248 |                                 )
 | 
|---|
| 249 |                         enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 250 | 
 | 
|---|
| 251 |                         // If we got something, just yield and check again
 | 
|---|
| 252 |                         if(reset < 5) {
 | 
|---|
| 253 |                                 yield();
 | 
|---|
| 254 |                         }
 | 
|---|
| 255 |                         // We didn't get anything baton pass to the slow poller
 | 
|---|
| 256 |                         else {
 | 
|---|
| 257 |                                 __STATS__( false,
 | 
|---|
| 258 |                                         io.complete_q.blocks += 1;
 | 
|---|
| 259 |                                 )
 | 
|---|
| 260 |                                 __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %p\n", &this.self);
 | 
|---|
| 261 |                                 reset = 0;
 | 
|---|
| 262 | 
 | 
|---|
| 263 |                                 // block this thread
 | 
|---|
| 264 |                                 __ioctx_prepare_block( this, ev );
 | 
|---|
| 265 |                                 wait( this.sem );
 | 
|---|
| 266 |                         }
 | 
|---|
| 267 |                 }
 | 
|---|
| 268 | 
 | 
|---|
| 269 |                 __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller for ring %p stopping\n", &this.ring);
 | 
|---|
| 270 |         }
 | 
|---|
| 271 | 
 | 
|---|
| 272 | //=============================================================================================
 | 
|---|
| 273 | // I/O Submissions
 | 
|---|
| 274 | //=============================================================================================
 | 
|---|
| 275 | 
 | 
|---|
| 276 | // Submition steps :
 | 
|---|
| 277 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
 | 
|---|
| 278 | //     listed in sq.array are visible by the kernel. For those not listed, the kernel does not
 | 
|---|
| 279 | //     offer any assurance that an entry is not being filled by multiple flags. Therefore, we
 | 
|---|
| 280 | //     need to write an allocator that allows allocating concurrently.
 | 
|---|
| 281 | //
 | 
|---|
| 282 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
 | 
|---|
| 283 | //
 | 
|---|
| 284 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
 | 
|---|
| 285 | //     needs to arrive to two concensus at the same time:
 | 
|---|
| 286 | //     A - The order in which entries are listed in the array: no two threads must pick the
 | 
|---|
| 287 | //         same index for their entries
 | 
|---|
| 288 | //     B - When can the tail be update for the kernel. EVERY entries in the array between
 | 
|---|
| 289 | //         head and tail must be fully filled and shouldn't ever be touched again.
 | 
|---|
| 290 | //
 | 
|---|
| 291 | 
 | 
|---|
| 292 |         [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ) {
 | 
|---|
| 293 |                 /* paranoid */ verify( data != 0 );
 | 
|---|
| 294 | 
 | 
|---|
| 295 |                 // Prepare the data we need
 | 
|---|
| 296 |                 __attribute((unused)) int len   = 0;
 | 
|---|
| 297 |                 __attribute((unused)) int block = 0;
 | 
|---|
| 298 |                 __u32 cnt = *ring.submit_q.num;
 | 
|---|
| 299 |                 __u32 mask = *ring.submit_q.mask;
 | 
|---|
| 300 | 
 | 
|---|
| 301 |                 disable_interrupts();
 | 
|---|
| 302 |                         __u32 off = __tls_rand();
 | 
|---|
| 303 |                 enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 304 | 
 | 
|---|
| 305 |                 // Loop around looking for an available spot
 | 
|---|
| 306 |                 for() {
 | 
|---|
| 307 |                         // Look through the list starting at some offset
 | 
|---|
| 308 |                         for(i; cnt) {
 | 
|---|
| 309 |                                 __u64 expected = 0;
 | 
|---|
| 310 |                                 __u32 idx = (i + off) & mask;
 | 
|---|
| 311 |                                 struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx];
 | 
|---|
| 312 |                                 volatile __u64 * udata = &sqe->user_data;
 | 
|---|
| 313 | 
 | 
|---|
| 314 |                                 if( *udata == expected &&
 | 
|---|
| 315 |                                         __atomic_compare_exchange_n( udata, &expected, data, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) )
 | 
|---|
| 316 |                                 {
 | 
|---|
| 317 |                                         // update statistics
 | 
|---|
| 318 |                                         __STATS__( false,
 | 
|---|
| 319 |                                                 io.submit_q.alloc_avg.val   += len;
 | 
|---|
| 320 |                                                 io.submit_q.alloc_avg.block += block;
 | 
|---|
| 321 |                                                 io.submit_q.alloc_avg.cnt   += 1;
 | 
|---|
| 322 |                                         )
 | 
|---|
| 323 | 
 | 
|---|
| 324 | 
 | 
|---|
| 325 |                                         // Success return the data
 | 
|---|
| 326 |                                         return [sqe, idx];
 | 
|---|
| 327 |                                 }
 | 
|---|
| 328 |                                 verify(expected != data);
 | 
|---|
| 329 | 
 | 
|---|
| 330 |                                 len ++;
 | 
|---|
| 331 |                         }
 | 
|---|
| 332 | 
 | 
|---|
| 333 |                         block++;
 | 
|---|
| 334 |                         yield();
 | 
|---|
| 335 |                 }
 | 
|---|
| 336 |         }
 | 
|---|
| 337 | 
 | 
|---|
| 338 |         static inline __u32 __submit_to_ready_array( struct __io_data & ring, __u32 idx, const __u32 mask ) {
 | 
|---|
| 339 |                 /* paranoid */ verify( idx <= mask   );
 | 
|---|
| 340 |                 /* paranoid */ verify( idx != -1ul32 );
 | 
|---|
| 341 | 
 | 
|---|
| 342 |                 // We need to find a spot in the ready array
 | 
|---|
| 343 |                 __attribute((unused)) int len   = 0;
 | 
|---|
| 344 |                 __attribute((unused)) int block = 0;
 | 
|---|
| 345 |                 __u32 ready_mask = ring.submit_q.ready_cnt - 1;
 | 
|---|
| 346 | 
 | 
|---|
| 347 |                 disable_interrupts();
 | 
|---|
| 348 |                         __u32 off = __tls_rand();
 | 
|---|
| 349 |                 enable_interrupts( __cfaabi_dbg_ctx );
 | 
|---|
| 350 | 
 | 
|---|
| 351 |                 __u32 picked;
 | 
|---|
| 352 |                 LOOKING: for() {
 | 
|---|
| 353 |                         for(i; ring.submit_q.ready_cnt) {
 | 
|---|
| 354 |                                 picked = (i + off) & ready_mask;
 | 
|---|
| 355 |                                 __u32 expected = -1ul32;
 | 
|---|
| 356 |                                 if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) {
 | 
|---|
| 357 |                                         break LOOKING;
 | 
|---|
| 358 |                                 }
 | 
|---|
| 359 |                                 verify(expected != idx);
 | 
|---|
| 360 | 
 | 
|---|
| 361 |                                 len ++;
 | 
|---|
| 362 |                         }
 | 
|---|
| 363 | 
 | 
|---|
| 364 |                         block++;
 | 
|---|
| 365 | 
 | 
|---|
| 366 |                         __u32 released = __release_consumed_submission( ring );
 | 
|---|
| 367 |                         if( released == 0 ) {
 | 
|---|
| 368 |                                 yield();
 | 
|---|
| 369 |                         }
 | 
|---|
| 370 |                 }
 | 
|---|
| 371 | 
 | 
|---|
| 372 |                 // update statistics
 | 
|---|
| 373 |                 __STATS__( false,
 | 
|---|
| 374 |                         io.submit_q.look_avg.val   += len;
 | 
|---|
| 375 |                         io.submit_q.look_avg.block += block;
 | 
|---|
| 376 |                         io.submit_q.look_avg.cnt   += 1;
 | 
|---|
| 377 |                 )
 | 
|---|
| 378 | 
 | 
|---|
| 379 |                 return picked;
 | 
|---|
| 380 |         }
 | 
|---|
| 381 | 
 | 
|---|
| 382 |         void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))) {
 | 
|---|
| 383 |                 __io_data & ring = *ctx->thrd.ring;
 | 
|---|
| 384 |                 // Get now the data we definetely need
 | 
|---|
| 385 |                 volatile __u32 * const tail = ring.submit_q.tail;
 | 
|---|
| 386 |                 const __u32 mask  = *ring.submit_q.mask;
 | 
|---|
| 387 | 
 | 
|---|
| 388 |                 // There are 2 submission schemes, check which one we are using
 | 
|---|
| 389 |                 if( ring.poller_submits ) {
 | 
|---|
| 390 |                         // If the poller thread submits, then we just need to add this to the ready array
 | 
|---|
| 391 |                         __submit_to_ready_array( ring, idx, mask );
 | 
|---|
| 392 | 
 | 
|---|
| 393 |                         post( ctx->thrd.sem );
 | 
|---|
| 394 | 
 | 
|---|
| 395 |                         __cfadbg_print_safe( io, "Kernel I/O : Added %u to ready for %p\n", idx, active_thread() );
 | 
|---|
| 396 |                 }
 | 
|---|
| 397 |                 else if( ring.eager_submits ) {
 | 
|---|
| 398 |                         __u32 picked = __submit_to_ready_array( ring, idx, mask );
 | 
|---|
| 399 | 
 | 
|---|
| 400 |                         #if defined(LEADER_LOCK)
 | 
|---|
| 401 |                                 if( !try_lock(ring.submit_q.submit_lock) ) {
 | 
|---|
| 402 |                                         __STATS__( false,
 | 
|---|
| 403 |                                                 io.submit_q.helped += 1;
 | 
|---|
| 404 |                                         )
 | 
|---|
| 405 |                                         return;
 | 
|---|
| 406 |                                 }
 | 
|---|
| 407 |                                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 | 
|---|
| 408 |                                 __STATS__( true,
 | 
|---|
| 409 |                                         io.submit_q.leader += 1;
 | 
|---|
| 410 |                                 )
 | 
|---|
| 411 |                         #else
 | 
|---|
| 412 |                                 for() {
 | 
|---|
| 413 |                                         yield();
 | 
|---|
| 414 | 
 | 
|---|
| 415 |                                         if( try_lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2) ) {
 | 
|---|
| 416 |                                                 __STATS__( false,
 | 
|---|
| 417 |                                                         io.submit_q.leader += 1;
 | 
|---|
| 418 |                                                 )
 | 
|---|
| 419 |                                                 break;
 | 
|---|
| 420 |                                         }
 | 
|---|
| 421 | 
 | 
|---|
| 422 |                                         // If some one else collected our index, we are done
 | 
|---|
| 423 |                                         #warning ABA problem
 | 
|---|
| 424 |                                         if( ring.submit_q.ready[picked] != idx ) {
 | 
|---|
| 425 |                                                 __STATS__( false,
 | 
|---|
| 426 |                                                         io.submit_q.helped += 1;
 | 
|---|
| 427 |                                                 )
 | 
|---|
| 428 |                                                 return;
 | 
|---|
| 429 |                                         }
 | 
|---|
| 430 | 
 | 
|---|
| 431 |                                         __STATS__( false,
 | 
|---|
| 432 |                                                 io.submit_q.busy += 1;
 | 
|---|
| 433 |                                         )
 | 
|---|
| 434 |                                 }
 | 
|---|
| 435 |                         #endif
 | 
|---|
| 436 | 
 | 
|---|
| 437 |                         // We got the lock
 | 
|---|
| 438 |                         // Collect the submissions
 | 
|---|
| 439 |                         unsigned to_submit = __collect_submitions( ring );
 | 
|---|
| 440 | 
 | 
|---|
| 441 |                         // Actually submit
 | 
|---|
| 442 |                         int ret = __io_uring_enter( ring, to_submit, false );
 | 
|---|
| 443 | 
 | 
|---|
| 444 |                         #if defined(LEADER_LOCK)
 | 
|---|
| 445 |                                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 | 
|---|
| 446 |                                 next(ring.submit_q.submit_lock);
 | 
|---|
| 447 |                         #else
 | 
|---|
| 448 |                                 unlock(ring.submit_q.submit_lock);
 | 
|---|
| 449 |                         #endif
 | 
|---|
| 450 |                         if( ret < 0 ) return;
 | 
|---|
| 451 | 
 | 
|---|
| 452 |                         // Release the consumed SQEs
 | 
|---|
| 453 |                         __release_consumed_submission( ring );
 | 
|---|
| 454 | 
 | 
|---|
| 455 |                         // update statistics
 | 
|---|
| 456 |                         __STATS__( false,
 | 
|---|
| 457 |                                 io.submit_q.submit_avg.rdy += to_submit;
 | 
|---|
| 458 |                                 io.submit_q.submit_avg.csm += ret;
 | 
|---|
| 459 |                                 io.submit_q.submit_avg.cnt += 1;
 | 
|---|
| 460 |                         )
 | 
|---|
| 461 |                 }
 | 
|---|
| 462 |                 else {
 | 
|---|
| 463 |                         // get mutual exclusion
 | 
|---|
| 464 |                         #if defined(LEADER_LOCK)
 | 
|---|
| 465 |                                 while(!try_lock(ring.submit_q.submit_lock));
 | 
|---|
| 466 |                         #else
 | 
|---|
| 467 |                                 lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2);
 | 
|---|
| 468 |                         #endif
 | 
|---|
| 469 | 
 | 
|---|
| 470 |                         /* paranoid */ verifyf( ring.submit_q.sqes[ idx ].user_data != 0,
 | 
|---|
| 471 |                         /* paranoid */  "index %u already reclaimed\n"
 | 
|---|
| 472 |                         /* paranoid */  "head %u, prev %u, tail %u\n"
 | 
|---|
| 473 |                         /* paranoid */  "[-0: %u,-1: %u,-2: %u,-3: %u]\n",
 | 
|---|
| 474 |                         /* paranoid */  idx,
 | 
|---|
| 475 |                         /* paranoid */  *ring.submit_q.head, ring.submit_q.prev_head, *tail
 | 
|---|
| 476 |                         /* paranoid */  ,ring.submit_q.array[ ((*ring.submit_q.head) - 0) & (*ring.submit_q.mask) ]
 | 
|---|
| 477 |                         /* paranoid */  ,ring.submit_q.array[ ((*ring.submit_q.head) - 1) & (*ring.submit_q.mask) ]
 | 
|---|
| 478 |                         /* paranoid */  ,ring.submit_q.array[ ((*ring.submit_q.head) - 2) & (*ring.submit_q.mask) ]
 | 
|---|
| 479 |                         /* paranoid */  ,ring.submit_q.array[ ((*ring.submit_q.head) - 3) & (*ring.submit_q.mask) ]
 | 
|---|
| 480 |                         /* paranoid */ );
 | 
|---|
| 481 | 
 | 
|---|
| 482 |                         // Append to the list of ready entries
 | 
|---|
| 483 | 
 | 
|---|
| 484 |                         /* paranoid */ verify( idx <= mask );
 | 
|---|
| 485 |                         ring.submit_q.array[ (*tail) & mask ] = idx;
 | 
|---|
| 486 |                         __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST);
 | 
|---|
| 487 | 
 | 
|---|
| 488 |                         // Submit however, many entries need to be submitted
 | 
|---|
| 489 |                         int ret = __io_uring_enter( ring, 1, false );
 | 
|---|
| 490 |                         if( ret < 0 ) {
 | 
|---|
| 491 |                                 switch((int)errno) {
 | 
|---|
| 492 |                                 default:
 | 
|---|
| 493 |                                         abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) );
 | 
|---|
| 494 |                                 }
 | 
|---|
| 495 |                         }
 | 
|---|
| 496 | 
 | 
|---|
| 497 |                         // update statistics
 | 
|---|
| 498 |                         __STATS__( false,
 | 
|---|
| 499 |                                 io.submit_q.submit_avg.csm += 1;
 | 
|---|
| 500 |                                 io.submit_q.submit_avg.cnt += 1;
 | 
|---|
| 501 |                         )
 | 
|---|
| 502 | 
 | 
|---|
| 503 |                         // Release the consumed SQEs
 | 
|---|
| 504 |                         __release_consumed_submission( ring );
 | 
|---|
| 505 | 
 | 
|---|
| 506 |                         #if defined(LEADER_LOCK)
 | 
|---|
| 507 |                                 next(ring.submit_q.submit_lock);
 | 
|---|
| 508 |                         #else
 | 
|---|
| 509 |                                 unlock(ring.submit_q.submit_lock);
 | 
|---|
| 510 |                         #endif
 | 
|---|
| 511 | 
 | 
|---|
| 512 |                         __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret );
 | 
|---|
| 513 |                 }
 | 
|---|
| 514 |         }
 | 
|---|
| 515 | 
 | 
|---|
| 516 |         // #define PARTIAL_SUBMIT 32
 | 
|---|
| 517 |         static unsigned __collect_submitions( struct __io_data & ring ) {
 | 
|---|
| 518 |                 /* paranoid */ verify( ring.submit_q.ready != 0p );
 | 
|---|
| 519 |                 /* paranoid */ verify( ring.submit_q.ready_cnt > 0 );
 | 
|---|
| 520 | 
 | 
|---|
| 521 |                 unsigned to_submit = 0;
 | 
|---|
| 522 |                 __u32 tail = *ring.submit_q.tail;
 | 
|---|
| 523 |                 const __u32 mask = *ring.submit_q.mask;
 | 
|---|
| 524 |                 #if defined(PARTIAL_SUBMIT)
 | 
|---|
| 525 |                         #if defined(LEADER_LOCK)
 | 
|---|
| 526 |                                 #error PARTIAL_SUBMIT and LEADER_LOCK cannot co-exist
 | 
|---|
| 527 |                         #endif
 | 
|---|
| 528 |                         const __u32 cnt = ring.submit_q.ready_cnt > PARTIAL_SUBMIT ? PARTIAL_SUBMIT : ring.submit_q.ready_cnt;
 | 
|---|
| 529 |                         const __u32 offset = ring.submit_q.prev_ready;
 | 
|---|
| 530 |                         ring.submit_q.prev_ready += cnt;
 | 
|---|
| 531 |                 #else
 | 
|---|
| 532 |                         const __u32 cnt = ring.submit_q.ready_cnt;
 | 
|---|
| 533 |                         const __u32 offset = 0;
 | 
|---|
| 534 |                 #endif
 | 
|---|
| 535 | 
 | 
|---|
| 536 |                 // Go through the list of ready submissions
 | 
|---|
| 537 |                 for( c; cnt ) {
 | 
|---|
| 538 |                         __u32 i = (offset + c) % ring.submit_q.ready_cnt;
 | 
|---|
| 539 | 
 | 
|---|
| 540 |                         // replace any submission with the sentinel, to consume it.
 | 
|---|
| 541 |                         __u32 idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED);
 | 
|---|
| 542 | 
 | 
|---|
| 543 |                         // If it was already the sentinel, then we are done
 | 
|---|
| 544 |                         if( idx == -1ul32 ) continue;
 | 
|---|
| 545 | 
 | 
|---|
| 546 |                         // If we got a real submission, append it to the list
 | 
|---|
| 547 |                         ring.submit_q.array[ (tail + to_submit) & mask ] = idx & mask;
 | 
|---|
| 548 |                         to_submit++;
 | 
|---|
| 549 |                 }
 | 
|---|
| 550 | 
 | 
|---|
| 551 |                 // Increment the tail based on how many we are ready to submit
 | 
|---|
| 552 |                 __atomic_fetch_add(ring.submit_q.tail, to_submit, __ATOMIC_SEQ_CST);
 | 
|---|
| 553 | 
 | 
|---|
| 554 |                 return to_submit;
 | 
|---|
| 555 |         }
 | 
|---|
| 556 | 
 | 
|---|
| 557 |         static __u32 __release_consumed_submission( struct __io_data & ring ) {
 | 
|---|
| 558 |                 const __u32 smask = *ring.submit_q.mask;
 | 
|---|
| 559 | 
 | 
|---|
| 560 |                 if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0;
 | 
|---|
| 561 |                 __u32 chead = *ring.submit_q.head;
 | 
|---|
| 562 |                 __u32 phead = ring.submit_q.prev_head;
 | 
|---|
| 563 |                 ring.submit_q.prev_head = chead;
 | 
|---|
| 564 |                 unlock(ring.submit_q.release_lock);
 | 
|---|
| 565 | 
 | 
|---|
| 566 |                 __u32 count = chead - phead;
 | 
|---|
| 567 |                 for( i; count ) {
 | 
|---|
| 568 |                         __u32 idx = ring.submit_q.array[ (phead + i) & smask ];
 | 
|---|
| 569 |                         ring.submit_q.sqes[ idx ].user_data = 0;
 | 
|---|
| 570 |                 }
 | 
|---|
| 571 |                 return count;
 | 
|---|
| 572 |         }
 | 
|---|
| 573 | #endif
 | 
|---|