[ecf6b46] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // io.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Thu Apr 23 17:31:00 2020 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
[4069faad] | 16 | // #define __CFA_DEBUG_PRINT_IO__ |
---|
[0a805f2] | 17 | // #define __CFA_DEBUG_PRINT_IO_CORE__ |
---|
[4069faad] | 18 | |
---|
[92976d9] | 19 | #include "kernel.hfa" |
---|
[5c581cc] | 20 | #include "bitmanip.hfa" |
---|
[92976d9] | 21 | |
---|
| 22 | #if !defined(HAVE_LINUX_IO_URING_H) |
---|
[dd4e2d7] | 23 | void __kernel_io_startup( cluster &, unsigned, bool ) { |
---|
[92976d9] | 24 | // Nothing to do without io_uring |
---|
| 25 | } |
---|
| 26 | |
---|
[3f7d0b4] | 27 | void __kernel_io_finish_start( cluster & ) { |
---|
[f6660520] | 28 | // Nothing to do without io_uring |
---|
| 29 | } |
---|
| 30 | |
---|
[3f7d0b4] | 31 | void __kernel_io_prepare_stop( cluster & ) { |
---|
[f6660520] | 32 | // Nothing to do without io_uring |
---|
| 33 | } |
---|
| 34 | |
---|
[3f7d0b4] | 35 | void __kernel_io_shutdown( cluster &, bool ) { |
---|
[92976d9] | 36 | // Nothing to do without io_uring |
---|
| 37 | } |
---|
| 38 | |
---|
| 39 | #else |
---|
[31bb2e1] | 40 | #define _GNU_SOURCE /* See feature_test_macros(7) */ |
---|
| 41 | #include <errno.h> |
---|
| 42 | #include <stdint.h> |
---|
| 43 | #include <string.h> |
---|
| 44 | #include <unistd.h> |
---|
| 45 | #include <sys/mman.h> |
---|
| 46 | |
---|
[92976d9] | 47 | extern "C" { |
---|
| 48 | #include <sys/syscall.h> |
---|
| 49 | |
---|
| 50 | #include <linux/io_uring.h> |
---|
| 51 | } |
---|
| 52 | |
---|
| 53 | #include "bits/signal.hfa" |
---|
| 54 | #include "kernel_private.hfa" |
---|
| 55 | #include "thread.hfa" |
---|
| 56 | |
---|
| 57 | uint32_t entries_per_cluster() { |
---|
| 58 | return 256; |
---|
| 59 | } |
---|
| 60 | |
---|
[f6660520] | 61 | static void * __io_poller_slow( void * arg ); |
---|
| 62 | |
---|
| 63 | // Weirdly, some systems that do support io_uring don't actually define these |
---|
| 64 | #ifdef __alpha__ |
---|
| 65 | /* |
---|
| 66 | * alpha is the only exception, all other architectures |
---|
| 67 | * have common numbers for new system calls. |
---|
| 68 | */ |
---|
| 69 | #ifndef __NR_io_uring_setup |
---|
| 70 | #define __NR_io_uring_setup 535 |
---|
| 71 | #endif |
---|
| 72 | #ifndef __NR_io_uring_enter |
---|
| 73 | #define __NR_io_uring_enter 536 |
---|
| 74 | #endif |
---|
| 75 | #ifndef __NR_io_uring_register |
---|
| 76 | #define __NR_io_uring_register 537 |
---|
| 77 | #endif |
---|
| 78 | #else /* !__alpha__ */ |
---|
| 79 | #ifndef __NR_io_uring_setup |
---|
| 80 | #define __NR_io_uring_setup 425 |
---|
| 81 | #endif |
---|
| 82 | #ifndef __NR_io_uring_enter |
---|
| 83 | #define __NR_io_uring_enter 426 |
---|
| 84 | #endif |
---|
| 85 | #ifndef __NR_io_uring_register |
---|
| 86 | #define __NR_io_uring_register 427 |
---|
| 87 | #endif |
---|
| 88 | #endif |
---|
| 89 | |
---|
[61dd73d] | 90 | // Fast poller user-thread |
---|
| 91 | // Not using the "thread" keyword because we want to control |
---|
| 92 | // more carefully when to start/stop it |
---|
| 93 | struct __io_poller_fast { |
---|
| 94 | struct __io_data * ring; |
---|
| 95 | $thread thrd; |
---|
| 96 | }; |
---|
| 97 | |
---|
| 98 | void ?{}( __io_poller_fast & this, struct cluster & cltr ) { |
---|
| 99 | this.ring = cltr.io; |
---|
| 100 | (this.thrd){ "Fast I/O Poller", cltr }; |
---|
| 101 | } |
---|
| 102 | void ^?{}( __io_poller_fast & mutex this ); |
---|
| 103 | void main( __io_poller_fast & this ); |
---|
| 104 | static inline $thread * get_thread( __io_poller_fast & this ) { return &this.thrd; } |
---|
| 105 | void ^?{}( __io_poller_fast & mutex this ) {} |
---|
| 106 | |
---|
| 107 | struct __submition_data { |
---|
| 108 | // Head and tail of the ring (associated with array) |
---|
| 109 | volatile uint32_t * head; |
---|
| 110 | volatile uint32_t * tail; |
---|
[34b61882] | 111 | volatile uint32_t prev_head; |
---|
[61dd73d] | 112 | |
---|
| 113 | // The actual kernel ring which uses head/tail |
---|
| 114 | // indexes into the sqes arrays |
---|
| 115 | uint32_t * array; |
---|
| 116 | |
---|
| 117 | // number of entries and mask to go with it |
---|
| 118 | const uint32_t * num; |
---|
| 119 | const uint32_t * mask; |
---|
| 120 | |
---|
| 121 | // Submission flags (Not sure what for) |
---|
| 122 | uint32_t * flags; |
---|
| 123 | |
---|
| 124 | // number of sqes not submitted (whatever that means) |
---|
| 125 | uint32_t * dropped; |
---|
| 126 | |
---|
| 127 | // Like head/tail but not seen by the kernel |
---|
[5dadc9b] | 128 | volatile uint32_t * ready; |
---|
| 129 | uint32_t ready_cnt; |
---|
[61dd73d] | 130 | |
---|
| 131 | __spinlock_t lock; |
---|
[732b406] | 132 | __spinlock_t release_lock; |
---|
[61dd73d] | 133 | |
---|
| 134 | // A buffer of sqes (not the actual ring) |
---|
| 135 | struct io_uring_sqe * sqes; |
---|
| 136 | |
---|
| 137 | // The location and size of the mmaped area |
---|
| 138 | void * ring_ptr; |
---|
| 139 | size_t ring_sz; |
---|
| 140 | }; |
---|
| 141 | |
---|
| 142 | struct __completion_data { |
---|
| 143 | // Head and tail of the ring |
---|
| 144 | volatile uint32_t * head; |
---|
| 145 | volatile uint32_t * tail; |
---|
| 146 | |
---|
| 147 | // number of entries and mask to go with it |
---|
| 148 | const uint32_t * mask; |
---|
| 149 | const uint32_t * num; |
---|
| 150 | |
---|
| 151 | // number of cqes not submitted (whatever that means) |
---|
| 152 | uint32_t * overflow; |
---|
| 153 | |
---|
| 154 | // the kernel ring |
---|
| 155 | struct io_uring_cqe * cqes; |
---|
| 156 | |
---|
| 157 | // The location and size of the mmaped area |
---|
| 158 | void * ring_ptr; |
---|
| 159 | size_t ring_sz; |
---|
| 160 | }; |
---|
| 161 | |
---|
| 162 | struct __io_data { |
---|
| 163 | struct __submition_data submit_q; |
---|
| 164 | struct __completion_data completion_q; |
---|
[b6f2b21] | 165 | uint32_t ring_flags; |
---|
| 166 | int cltr_flags; |
---|
[61dd73d] | 167 | int fd; |
---|
| 168 | semaphore submit; |
---|
| 169 | volatile bool done; |
---|
| 170 | struct { |
---|
| 171 | struct { |
---|
[13c5e19] | 172 | __processor_id_t id; |
---|
[61dd73d] | 173 | void * stack; |
---|
| 174 | pthread_t kthrd; |
---|
[5c581cc] | 175 | volatile bool blocked; |
---|
[61dd73d] | 176 | } slow; |
---|
| 177 | __io_poller_fast fast; |
---|
| 178 | __bin_sem_t sem; |
---|
| 179 | } poller; |
---|
| 180 | }; |
---|
[185efe6] | 181 | |
---|
[92976d9] | 182 | //============================================================================================= |
---|
| 183 | // I/O Startup / Shutdown logic |
---|
| 184 | //============================================================================================= |
---|
[dd4e2d7] | 185 | void __kernel_io_startup( cluster & this, unsigned io_flags, bool main_cluster ) { |
---|
[e46c753] | 186 | if( (io_flags & CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS) && (io_flags & CFA_CLUSTER_IO_EAGER_SUBMITS) ) { |
---|
| 187 | abort("CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS and CFA_CLUSTER_IO_EAGER_SUBMITS cannot be mixed\n"); |
---|
| 188 | } |
---|
| 189 | |
---|
[61dd73d] | 190 | this.io = malloc(); |
---|
| 191 | |
---|
[92976d9] | 192 | // Step 1 : call to setup |
---|
| 193 | struct io_uring_params params; |
---|
| 194 | memset(¶ms, 0, sizeof(params)); |
---|
[47746a2] | 195 | if( io_flags & CFA_CLUSTER_IO_KERNEL_POLL_SUBMITS ) params.flags |= IORING_SETUP_SQPOLL; |
---|
| 196 | if( io_flags & CFA_CLUSTER_IO_KERNEL_POLL_COMPLETES ) params.flags |= IORING_SETUP_IOPOLL; |
---|
[92976d9] | 197 | |
---|
[2d8f7b0] | 198 | uint32_t nentries = entries_per_cluster(); |
---|
| 199 | |
---|
| 200 | int fd = syscall(__NR_io_uring_setup, nentries, ¶ms ); |
---|
[92976d9] | 201 | if(fd < 0) { |
---|
| 202 | abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno)); |
---|
| 203 | } |
---|
| 204 | |
---|
| 205 | // Step 2 : mmap result |
---|
[61dd73d] | 206 | memset( this.io, 0, sizeof(struct __io_data) ); |
---|
| 207 | struct __submition_data & sq = this.io->submit_q; |
---|
| 208 | struct __completion_data & cq = this.io->completion_q; |
---|
[92976d9] | 209 | |
---|
| 210 | // calculate the right ring size |
---|
[2d8f7b0] | 211 | sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned) ); |
---|
| 212 | cq.ring_sz = params.cq_off.cqes + (params.cq_entries * sizeof(struct io_uring_cqe)); |
---|
[92976d9] | 213 | |
---|
| 214 | // Requires features |
---|
[d384787] | 215 | #if defined(IORING_FEAT_SINGLE_MMAP) |
---|
| 216 | // adjust the size according to the parameters |
---|
| 217 | if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) { |
---|
[fb98462] | 218 | cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz); |
---|
[d384787] | 219 | } |
---|
| 220 | #endif |
---|
[92976d9] | 221 | |
---|
| 222 | // mmap the Submit Queue into existence |
---|
[2d8f7b0] | 223 | sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); |
---|
| 224 | if (sq.ring_ptr == (void*)MAP_FAILED) { |
---|
[92976d9] | 225 | abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno)); |
---|
| 226 | } |
---|
| 227 | |
---|
| 228 | // Requires features |
---|
[d384787] | 229 | #if defined(IORING_FEAT_SINGLE_MMAP) |
---|
| 230 | // mmap the Completion Queue into existence (may or may not be needed) |
---|
| 231 | if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) { |
---|
[fb98462] | 232 | cq.ring_ptr = sq.ring_ptr; |
---|
[d384787] | 233 | } |
---|
| 234 | else |
---|
| 235 | #endif |
---|
| 236 | { |
---|
[92976d9] | 237 | // We need multiple call to MMAP |
---|
[2d8f7b0] | 238 | cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); |
---|
| 239 | if (cq.ring_ptr == (void*)MAP_FAILED) { |
---|
| 240 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
[92976d9] | 241 | abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno)); |
---|
| 242 | } |
---|
[d384787] | 243 | } |
---|
[92976d9] | 244 | |
---|
| 245 | // mmap the submit queue entries |
---|
| 246 | size_t size = params.sq_entries * sizeof(struct io_uring_sqe); |
---|
[2d8f7b0] | 247 | sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES); |
---|
| 248 | if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) { |
---|
| 249 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
| 250 | if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz); |
---|
[92976d9] | 251 | abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno)); |
---|
| 252 | } |
---|
| 253 | |
---|
| 254 | // Get the pointers from the kernel to fill the structure |
---|
| 255 | // submit queue |
---|
[2d8f7b0] | 256 | sq.head = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.head); |
---|
| 257 | sq.tail = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail); |
---|
| 258 | sq.mask = ( const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask); |
---|
| 259 | sq.num = ( const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries); |
---|
| 260 | sq.flags = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags); |
---|
| 261 | sq.dropped = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped); |
---|
| 262 | sq.array = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.array); |
---|
[34b61882] | 263 | sq.prev_head = *sq.head; |
---|
[6f121b8] | 264 | |
---|
| 265 | { |
---|
| 266 | const uint32_t num = *sq.num; |
---|
| 267 | for( i; num ) { |
---|
| 268 | sq.sqes[i].user_data = 0ul64; |
---|
| 269 | } |
---|
| 270 | } |
---|
[5dadc9b] | 271 | |
---|
[47746a2] | 272 | (sq.lock){}; |
---|
[732b406] | 273 | (sq.release_lock){}; |
---|
[47746a2] | 274 | |
---|
[e46c753] | 275 | if( io_flags & ( CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS | CFA_CLUSTER_IO_EAGER_SUBMITS ) ) { |
---|
[5c581cc] | 276 | /* paranoid */ verify( is_pow2( io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET ) || ((io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET) < 8) ); |
---|
[dd4e2d7] | 277 | sq.ready_cnt = max(io_flags >> CFA_CLUSTER_IO_BUFFLEN_OFFSET, 8); |
---|
[0335620] | 278 | sq.ready = alloc_align( 64, sq.ready_cnt ); |
---|
[5dadc9b] | 279 | for(i; sq.ready_cnt) { |
---|
| 280 | sq.ready[i] = -1ul32; |
---|
| 281 | } |
---|
| 282 | } |
---|
| 283 | else { |
---|
| 284 | sq.ready_cnt = 0; |
---|
| 285 | sq.ready = 0p; |
---|
| 286 | } |
---|
[92976d9] | 287 | |
---|
| 288 | // completion queue |
---|
[2d8f7b0] | 289 | cq.head = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); |
---|
| 290 | cq.tail = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); |
---|
| 291 | cq.mask = ( const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask); |
---|
| 292 | cq.num = ( const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries); |
---|
| 293 | cq.overflow = ( uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow); |
---|
| 294 | cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); |
---|
| 295 | |
---|
| 296 | // some paranoid checks |
---|
| 297 | /* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask ); |
---|
| 298 | /* paranoid */ verifyf( (*cq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num ); |
---|
| 299 | /* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head ); |
---|
| 300 | /* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail ); |
---|
| 301 | |
---|
| 302 | /* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask ); |
---|
| 303 | /* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num ); |
---|
| 304 | /* paranoid */ verifyf( (*sq.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.head ); |
---|
| 305 | /* paranoid */ verifyf( (*sq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.tail ); |
---|
[92976d9] | 306 | |
---|
| 307 | // Update the global ring info |
---|
[b6f2b21] | 308 | this.io->ring_flags = params.flags; |
---|
| 309 | this.io->cltr_flags = io_flags; |
---|
| 310 | this.io->fd = fd; |
---|
| 311 | this.io->done = false; |
---|
[61dd73d] | 312 | (this.io->submit){ min(*sq.num, *cq.num) }; |
---|
[92976d9] | 313 | |
---|
[f6660520] | 314 | if(!main_cluster) { |
---|
| 315 | __kernel_io_finish_start( this ); |
---|
| 316 | } |
---|
| 317 | } |
---|
| 318 | |
---|
| 319 | void __kernel_io_finish_start( cluster & this ) { |
---|
[b6f2b21] | 320 | if( this.io->cltr_flags & CFA_CLUSTER_IO_POLLER_USER_THREAD ) { |
---|
| 321 | __cfadbg_print_safe(io_core, "Kernel I/O : Creating fast poller for cluter %p\n", &this); |
---|
| 322 | (this.io->poller.fast){ this }; |
---|
| 323 | __thrd_start( this.io->poller.fast, main ); |
---|
| 324 | } |
---|
[f6660520] | 325 | |
---|
[92976d9] | 326 | // Create the poller thread |
---|
[0a805f2] | 327 | __cfadbg_print_safe(io_core, "Kernel I/O : Creating slow poller for cluter %p\n", &this); |
---|
[5c581cc] | 328 | this.io->poller.slow.blocked = false; |
---|
[61dd73d] | 329 | this.io->poller.slow.stack = __create_pthread( &this.io->poller.slow.kthrd, __io_poller_slow, &this ); |
---|
[92976d9] | 330 | } |
---|
| 331 | |
---|
[f6660520] | 332 | void __kernel_io_prepare_stop( cluster & this ) { |
---|
[0a805f2] | 333 | __cfadbg_print_safe(io_core, "Kernel I/O : Stopping pollers for cluster\n", &this); |
---|
[92976d9] | 334 | // Notify the poller thread of the shutdown |
---|
[61dd73d] | 335 | __atomic_store_n(&this.io->done, true, __ATOMIC_SEQ_CST); |
---|
[f6660520] | 336 | |
---|
| 337 | // Stop the IO Poller |
---|
[92976d9] | 338 | sigval val = { 1 }; |
---|
[61dd73d] | 339 | pthread_sigqueue( this.io->poller.slow.kthrd, SIGUSR1, val ); |
---|
| 340 | post( this.io->poller.sem ); |
---|
[92976d9] | 341 | |
---|
| 342 | // Wait for the poller thread to finish |
---|
[61dd73d] | 343 | pthread_join( this.io->poller.slow.kthrd, 0p ); |
---|
| 344 | free( this.io->poller.slow.stack ); |
---|
[f6660520] | 345 | |
---|
[0a805f2] | 346 | __cfadbg_print_safe(io_core, "Kernel I/O : Slow poller stopped for cluster\n", &this); |
---|
[4069faad] | 347 | |
---|
[b6f2b21] | 348 | if( this.io->cltr_flags & CFA_CLUSTER_IO_POLLER_USER_THREAD ) { |
---|
[05cfa4d] | 349 | with( this.io->poller.fast ) { |
---|
[13c5e19] | 350 | /* paranoid */ verify( this.nprocessors == 0 || &this == mainCluster ); |
---|
| 351 | /* paranoid */ verify( !ready_mutate_islocked() ); |
---|
[05cfa4d] | 352 | |
---|
| 353 | // We need to adjust the clean-up based on where the thread is |
---|
[5dadc9b] | 354 | if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { |
---|
[05cfa4d] | 355 | |
---|
[13c5e19] | 356 | ready_schedule_lock( (struct __processor_id_t *)active_processor() ); |
---|
[2f1cb37] | 357 | |
---|
[13c5e19] | 358 | // This is the tricky case |
---|
| 359 | // The thread was preempted and now it is on the ready queue |
---|
| 360 | // The thread should be the last on the list |
---|
| 361 | /* paranoid */ verify( thrd.link.next != 0p ); |
---|
[05cfa4d] | 362 | |
---|
[13c5e19] | 363 | // Remove the thread from the ready queue of this cluster |
---|
| 364 | __attribute__((unused)) bool removed = remove_head( &this, &thrd ); |
---|
| 365 | /* paranoid */ verify( removed ); |
---|
| 366 | thrd.link.next = 0p; |
---|
| 367 | thrd.link.prev = 0p; |
---|
| 368 | __cfaabi_dbg_debug_do( thrd.unpark_stale = true ); |
---|
[05cfa4d] | 369 | |
---|
[13c5e19] | 370 | // Fixup the thread state |
---|
| 371 | thrd.state = Blocked; |
---|
| 372 | thrd.ticket = 0; |
---|
| 373 | thrd.preempted = __NO_PREEMPTION; |
---|
| 374 | |
---|
| 375 | ready_schedule_unlock( (struct __processor_id_t *)active_processor() ); |
---|
[05cfa4d] | 376 | |
---|
| 377 | // Pretend like the thread was blocked all along |
---|
| 378 | } |
---|
| 379 | // !!! This is not an else if !!! |
---|
| 380 | if( thrd.state == Blocked ) { |
---|
[6502a2b] | 381 | |
---|
[05cfa4d] | 382 | // This is the "easy case" |
---|
| 383 | // The thread is parked and can easily be moved to active cluster |
---|
| 384 | verify( thrd.curr_cluster != active_cluster() || thrd.curr_cluster == mainCluster ); |
---|
| 385 | thrd.curr_cluster = active_cluster(); |
---|
[6502a2b] | 386 | |
---|
[13c5e19] | 387 | // unpark the fast io_poller |
---|
[05cfa4d] | 388 | unpark( &thrd __cfaabi_dbg_ctx2 ); |
---|
| 389 | } |
---|
| 390 | else { |
---|
| 391 | |
---|
| 392 | // The thread is in a weird state |
---|
| 393 | // I don't know what to do here |
---|
| 394 | abort("Fast poller thread is in unexpected state, cannot clean-up correctly\n"); |
---|
| 395 | } |
---|
| 396 | |
---|
| 397 | } |
---|
[f6660520] | 398 | |
---|
[61dd73d] | 399 | ^(this.io->poller.fast){}; |
---|
[4069faad] | 400 | |
---|
[0a805f2] | 401 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller stopped for cluster\n", &this); |
---|
[b6f2b21] | 402 | } |
---|
[f6660520] | 403 | } |
---|
| 404 | |
---|
| 405 | void __kernel_io_shutdown( cluster & this, bool main_cluster ) { |
---|
| 406 | if(!main_cluster) { |
---|
| 407 | __kernel_io_prepare_stop( this ); |
---|
| 408 | } |
---|
[92976d9] | 409 | |
---|
| 410 | // Shutdown the io rings |
---|
[61dd73d] | 411 | struct __submition_data & sq = this.io->submit_q; |
---|
| 412 | struct __completion_data & cq = this.io->completion_q; |
---|
[92976d9] | 413 | |
---|
| 414 | // unmap the submit queue entries |
---|
[2d8f7b0] | 415 | munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe)); |
---|
[92976d9] | 416 | |
---|
| 417 | // unmap the Submit Queue ring |
---|
| 418 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
| 419 | |
---|
| 420 | // unmap the Completion Queue ring, if it is different |
---|
| 421 | if (cq.ring_ptr != sq.ring_ptr) { |
---|
| 422 | munmap(cq.ring_ptr, cq.ring_sz); |
---|
| 423 | } |
---|
| 424 | |
---|
| 425 | // close the file descriptor |
---|
[61dd73d] | 426 | close(this.io->fd); |
---|
| 427 | |
---|
[5dadc9b] | 428 | free( this.io->submit_q.ready ); // Maybe null, doesn't matter |
---|
[61dd73d] | 429 | free( this.io ); |
---|
[92976d9] | 430 | } |
---|
| 431 | |
---|
| 432 | //============================================================================================= |
---|
| 433 | // I/O Polling |
---|
| 434 | //============================================================================================= |
---|
[1d5e4711] | 435 | static unsigned __collect_submitions( struct __io_data & ring ); |
---|
[34b61882] | 436 | static uint32_t __release_consumed_submission( struct __io_data & ring ); |
---|
[1d5e4711] | 437 | |
---|
[92976d9] | 438 | // Process a single completion message from the io_uring |
---|
| 439 | // This is NOT thread-safe |
---|
[5dadc9b] | 440 | static [int, bool] __drain_io( & struct __io_data ring, * sigset_t mask, int waitcnt, bool in_kernel ) { |
---|
[e46c753] | 441 | /* paranoid */ verify( !kernelTLS.preemption_state.enabled ); |
---|
| 442 | |
---|
[5dadc9b] | 443 | unsigned to_submit = 0; |
---|
| 444 | if( ring.cltr_flags & CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS ) { |
---|
| 445 | // If the poller thread also submits, then we need to aggregate the submissions which are ready |
---|
[e46c753] | 446 | to_submit = __collect_submitions( ring ); |
---|
[5dadc9b] | 447 | } |
---|
| 448 | |
---|
[1d5e4711] | 449 | if (to_submit > 0 || waitcnt > 0) { |
---|
| 450 | int ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, waitcnt, IORING_ENTER_GETEVENTS, mask, _NSIG / 8); |
---|
| 451 | if( ret < 0 ) { |
---|
| 452 | switch((int)errno) { |
---|
| 453 | case EAGAIN: |
---|
| 454 | case EINTR: |
---|
| 455 | return [0, true]; |
---|
| 456 | default: |
---|
| 457 | abort( "KERNEL ERROR: IO_URING WAIT - %s\n", strerror(errno) ); |
---|
| 458 | } |
---|
[d384787] | 459 | } |
---|
| 460 | |
---|
[1d5e4711] | 461 | // Release the consumed SQEs |
---|
[34b61882] | 462 | __release_consumed_submission( ring ); |
---|
[1d5e4711] | 463 | |
---|
| 464 | // update statistics |
---|
| 465 | __STATS__( true, |
---|
| 466 | if( to_submit > 0 ) { |
---|
| 467 | io.submit_q.submit_avg.rdy += to_submit; |
---|
| 468 | io.submit_q.submit_avg.csm += ret; |
---|
| 469 | io.submit_q.submit_avg.cnt += 1; |
---|
| 470 | } |
---|
| 471 | ) |
---|
[6f121b8] | 472 | } |
---|
| 473 | |
---|
[1d5e4711] | 474 | // Memory barrier |
---|
| 475 | __atomic_thread_fence( __ATOMIC_SEQ_CST ); |
---|
[6f121b8] | 476 | |
---|
[d384787] | 477 | // Drain the queue |
---|
[92976d9] | 478 | unsigned head = *ring.completion_q.head; |
---|
[6f121b8] | 479 | unsigned tail = *ring.completion_q.tail; |
---|
| 480 | const uint32_t mask = *ring.completion_q.mask; |
---|
| 481 | |
---|
[d384787] | 482 | // Nothing was new return 0 |
---|
| 483 | if (head == tail) { |
---|
[e46c753] | 484 | return [0, to_submit > 0]; |
---|
[d384787] | 485 | } |
---|
[92976d9] | 486 | |
---|
[d384787] | 487 | uint32_t count = tail - head; |
---|
[1d5e4711] | 488 | /* paranoid */ verify( count != 0 ); |
---|
[d384787] | 489 | for(i; count) { |
---|
[6f121b8] | 490 | unsigned idx = (head + i) & mask; |
---|
[d384787] | 491 | struct io_uring_cqe & cqe = ring.completion_q.cqes[idx]; |
---|
[92976d9] | 492 | |
---|
[d384787] | 493 | /* paranoid */ verify(&cqe); |
---|
[92976d9] | 494 | |
---|
[31bb2e1] | 495 | struct __io_user_data_t * data = (struct __io_user_data_t *)(uintptr_t)cqe.user_data; |
---|
[4069faad] | 496 | __cfadbg_print_safe( io, "Kernel I/O : Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd ); |
---|
[2d8f7b0] | 497 | |
---|
[d384787] | 498 | data->result = cqe.res; |
---|
[f6660520] | 499 | if(!in_kernel) { unpark( data->thrd __cfaabi_dbg_ctx2 ); } |
---|
[13c5e19] | 500 | else { __unpark( &ring.poller.slow.id, data->thrd __cfaabi_dbg_ctx2 ); } |
---|
[d384787] | 501 | } |
---|
[2d8f7b0] | 502 | |
---|
| 503 | // Allow new submissions to happen |
---|
[6f121b8] | 504 | // V(ring.submit, count); |
---|
[92976d9] | 505 | |
---|
| 506 | // Mark to the kernel that the cqe has been seen |
---|
| 507 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read. |
---|
[6f121b8] | 508 | __atomic_thread_fence( __ATOMIC_SEQ_CST ); |
---|
[d384787] | 509 | __atomic_fetch_add( ring.completion_q.head, count, __ATOMIC_RELAXED ); |
---|
[92976d9] | 510 | |
---|
[5dadc9b] | 511 | return [count, count > 0 || to_submit > 0]; |
---|
[92976d9] | 512 | } |
---|
| 513 | |
---|
[f6660520] | 514 | static void * __io_poller_slow( void * arg ) { |
---|
[13c5e19] | 515 | #if !defined( __CFA_NO_STATISTICS__ ) |
---|
| 516 | __stats_t local_stats; |
---|
| 517 | __init_stats( &local_stats ); |
---|
| 518 | kernelTLS.this_stats = &local_stats; |
---|
| 519 | #endif |
---|
| 520 | |
---|
[92976d9] | 521 | cluster * cltr = (cluster *)arg; |
---|
[61dd73d] | 522 | struct __io_data & ring = *cltr->io; |
---|
[92976d9] | 523 | |
---|
[13c5e19] | 524 | ring.poller.slow.id.id = doregister( &ring.poller.slow.id ); |
---|
| 525 | |
---|
[92976d9] | 526 | sigset_t mask; |
---|
| 527 | sigfillset(&mask); |
---|
| 528 | if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { |
---|
| 529 | abort( "KERNEL ERROR: IO_URING - pthread_sigmask" ); |
---|
| 530 | } |
---|
| 531 | |
---|
| 532 | sigdelset( &mask, SIGUSR1 ); |
---|
| 533 | |
---|
| 534 | verify( (*ring.submit_q.head) == (*ring.submit_q.tail) ); |
---|
| 535 | verify( (*ring.completion_q.head) == (*ring.completion_q.tail) ); |
---|
| 536 | |
---|
[1539bbd] | 537 | __cfadbg_print_safe(io_core, "Kernel I/O : Slow poller for ring %p ready\n", &ring); |
---|
| 538 | |
---|
[b6f2b21] | 539 | if( ring.cltr_flags & CFA_CLUSTER_IO_POLLER_USER_THREAD ) { |
---|
| 540 | while(!__atomic_load_n(&ring.done, __ATOMIC_SEQ_CST)) { |
---|
[5dadc9b] | 541 | |
---|
[5c581cc] | 542 | __atomic_store_n( &ring.poller.slow.blocked, true, __ATOMIC_SEQ_CST ); |
---|
| 543 | |
---|
[f6660520] | 544 | // In the user-thread approach drain and if anything was drained, |
---|
| 545 | // batton pass to the user-thread |
---|
[5dadc9b] | 546 | int count; |
---|
| 547 | bool again; |
---|
[5c581cc] | 548 | [count, again] = __drain_io( ring, &mask, 1, true ); |
---|
| 549 | |
---|
| 550 | __atomic_store_n( &ring.poller.slow.blocked, false, __ATOMIC_SEQ_CST ); |
---|
[3c039b0] | 551 | |
---|
| 552 | // Update statistics |
---|
[47746a2] | 553 | __STATS__( true, |
---|
| 554 | io.complete_q.completed_avg.val += count; |
---|
| 555 | io.complete_q.completed_avg.slow_cnt += 1; |
---|
| 556 | ) |
---|
[3c039b0] | 557 | |
---|
[5dadc9b] | 558 | if(again) { |
---|
[0a805f2] | 559 | __cfadbg_print_safe(io_core, "Kernel I/O : Moving to ring %p to fast poller\n", &ring); |
---|
[13c5e19] | 560 | __unpark( &ring.poller.slow.id, &ring.poller.fast.thrd __cfaabi_dbg_ctx2 ); |
---|
[f6660520] | 561 | wait( ring.poller.sem ); |
---|
| 562 | } |
---|
[b6f2b21] | 563 | } |
---|
| 564 | } |
---|
| 565 | else { |
---|
| 566 | while(!__atomic_load_n(&ring.done, __ATOMIC_SEQ_CST)) { |
---|
[f6660520] | 567 | //In the naive approach, just poll the io completion queue directly |
---|
[5dadc9b] | 568 | int count; |
---|
| 569 | bool again; |
---|
[e46c753] | 570 | [count, again] = __drain_io( ring, &mask, 1, true ); |
---|
[3c039b0] | 571 | |
---|
| 572 | // Update statistics |
---|
[47746a2] | 573 | __STATS__( true, |
---|
| 574 | io.complete_q.completed_avg.val += count; |
---|
| 575 | io.complete_q.completed_avg.slow_cnt += 1; |
---|
| 576 | ) |
---|
[b6f2b21] | 577 | } |
---|
[92976d9] | 578 | } |
---|
| 579 | |
---|
[1539bbd] | 580 | __cfadbg_print_safe(io_core, "Kernel I/O : Slow poller for ring %p stopping\n", &ring); |
---|
| 581 | |
---|
[13c5e19] | 582 | unregister( &ring.poller.slow.id ); |
---|
| 583 | |
---|
[df40a56] | 584 | #if !defined(__CFA_NO_STATISTICS__) |
---|
| 585 | __tally_stats(cltr->stats, &local_stats); |
---|
| 586 | #endif |
---|
| 587 | |
---|
[92976d9] | 588 | return 0p; |
---|
| 589 | } |
---|
| 590 | |
---|
[61dd73d] | 591 | void main( __io_poller_fast & this ) { |
---|
[b6f2b21] | 592 | verify( this.ring->cltr_flags & CFA_CLUSTER_IO_POLLER_USER_THREAD ); |
---|
| 593 | |
---|
[61dd73d] | 594 | // Start parked |
---|
| 595 | park( __cfaabi_dbg_ctx ); |
---|
[f6660520] | 596 | |
---|
[61dd73d] | 597 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller for ring %p ready\n", &this.ring); |
---|
[1539bbd] | 598 | |
---|
[4e74466] | 599 | int reset = 0; |
---|
| 600 | |
---|
[61dd73d] | 601 | // Then loop until we need to start |
---|
| 602 | while(!__atomic_load_n(&this.ring->done, __ATOMIC_SEQ_CST)) { |
---|
[5dadc9b] | 603 | |
---|
[61dd73d] | 604 | // Drain the io |
---|
[5dadc9b] | 605 | int count; |
---|
| 606 | bool again; |
---|
[13c5e19] | 607 | disable_interrupts(); |
---|
| 608 | [count, again] = __drain_io( *this.ring, 0p, 0, false ); |
---|
[5dadc9b] | 609 | |
---|
[13c5e19] | 610 | if(!again) reset++; |
---|
[3c039b0] | 611 | |
---|
[13c5e19] | 612 | // Update statistics |
---|
[47746a2] | 613 | __STATS__( true, |
---|
| 614 | io.complete_q.completed_avg.val += count; |
---|
| 615 | io.complete_q.completed_avg.fast_cnt += 1; |
---|
| 616 | ) |
---|
[13c5e19] | 617 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
[3c039b0] | 618 | |
---|
[5dadc9b] | 619 | // If we got something, just yield and check again |
---|
[4e74466] | 620 | if(reset < 5) { |
---|
[61dd73d] | 621 | yield(); |
---|
| 622 | } |
---|
[5dadc9b] | 623 | // We didn't get anything baton pass to the slow poller |
---|
[61dd73d] | 624 | else { |
---|
| 625 | __cfadbg_print_safe(io_core, "Kernel I/O : Moving to ring %p to slow poller\n", &this.ring); |
---|
[5dadc9b] | 626 | reset = 0; |
---|
| 627 | |
---|
| 628 | // wake up the slow poller |
---|
[61dd73d] | 629 | post( this.ring->poller.sem ); |
---|
[5dadc9b] | 630 | |
---|
| 631 | // park this thread |
---|
[61dd73d] | 632 | park( __cfaabi_dbg_ctx ); |
---|
[f6660520] | 633 | } |
---|
| 634 | } |
---|
[61dd73d] | 635 | |
---|
| 636 | __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller for ring %p stopping\n", &this.ring); |
---|
| 637 | } |
---|
[f6660520] | 638 | |
---|
[0335620] | 639 | static inline void __wake_poller( struct __io_data & ring ) __attribute__((artificial)); |
---|
[5dadc9b] | 640 | static inline void __wake_poller( struct __io_data & ring ) { |
---|
[5c581cc] | 641 | if(!__atomic_load_n( &ring.poller.slow.blocked, __ATOMIC_SEQ_CST)) return; |
---|
| 642 | |
---|
| 643 | sigval val = { 1 }; |
---|
| 644 | pthread_sigqueue( ring.poller.slow.kthrd, SIGUSR1, val ); |
---|
[5dadc9b] | 645 | } |
---|
| 646 | |
---|
[92976d9] | 647 | //============================================================================================= |
---|
| 648 | // I/O Submissions |
---|
| 649 | //============================================================================================= |
---|
| 650 | |
---|
[2d8f7b0] | 651 | // Submition steps : |
---|
[e46c753] | 652 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones |
---|
[2d8f7b0] | 653 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not |
---|
| 654 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we |
---|
| 655 | // need to write an allocator that allows allocating concurrently. |
---|
| 656 | // |
---|
[e46c753] | 657 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step. |
---|
[2d8f7b0] | 658 | // |
---|
[e46c753] | 659 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation |
---|
[2d8f7b0] | 660 | // needs to arrive to two concensus at the same time: |
---|
| 661 | // A - The order in which entries are listed in the array: no two threads must pick the |
---|
| 662 | // same index for their entries |
---|
| 663 | // B - When can the tail be update for the kernel. EVERY entries in the array between |
---|
| 664 | // head and tail must be fully filled and shouldn't ever be touched again. |
---|
| 665 | // |
---|
| 666 | |
---|
[31bb2e1] | 667 | [* struct io_uring_sqe, uint32_t] __submit_alloc( struct __io_data & ring, uint64_t data ) { |
---|
[e46c753] | 668 | /* paranoid */ verify( data != 0 ); |
---|
[13c5e19] | 669 | |
---|
[6f121b8] | 670 | // Prepare the data we need |
---|
| 671 | __attribute((unused)) int len = 0; |
---|
| 672 | __attribute((unused)) int block = 0; |
---|
| 673 | uint32_t cnt = *ring.submit_q.num; |
---|
| 674 | uint32_t mask = *ring.submit_q.mask; |
---|
[8ae4165] | 675 | |
---|
| 676 | disable_interrupts(); |
---|
| 677 | uint32_t off = __tls_rand(); |
---|
| 678 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
[6f121b8] | 679 | |
---|
| 680 | // Loop around looking for an available spot |
---|
[13c5e19] | 681 | for() { |
---|
[6f121b8] | 682 | // Look through the list starting at some offset |
---|
| 683 | for(i; cnt) { |
---|
| 684 | uint64_t expected = 0; |
---|
| 685 | uint32_t idx = (i + off) & mask; |
---|
| 686 | struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx]; |
---|
| 687 | volatile uint64_t * udata = &sqe->user_data; |
---|
| 688 | |
---|
| 689 | if( *udata == expected && |
---|
| 690 | __atomic_compare_exchange_n( udata, &expected, data, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) |
---|
| 691 | { |
---|
| 692 | // update statistics |
---|
[47746a2] | 693 | __STATS__( false, |
---|
| 694 | io.submit_q.alloc_avg.val += len; |
---|
| 695 | io.submit_q.alloc_avg.block += block; |
---|
| 696 | io.submit_q.alloc_avg.cnt += 1; |
---|
| 697 | ) |
---|
[6f121b8] | 698 | |
---|
[13c5e19] | 699 | |
---|
[6f121b8] | 700 | // Success return the data |
---|
| 701 | return [sqe, idx]; |
---|
| 702 | } |
---|
| 703 | verify(expected != data); |
---|
[2489d31] | 704 | |
---|
[6f121b8] | 705 | len ++; |
---|
| 706 | } |
---|
[2489d31] | 707 | |
---|
[6f121b8] | 708 | block++; |
---|
| 709 | yield(); |
---|
| 710 | } |
---|
[2489d31] | 711 | } |
---|
| 712 | |
---|
[df40a56] | 713 | static inline uint32_t __submit_to_ready_array( struct __io_data & ring, uint32_t idx, const uint32_t mask ) { |
---|
| 714 | /* paranoid */ verify( idx <= mask ); |
---|
| 715 | /* paranoid */ verify( idx != -1ul32 ); |
---|
| 716 | |
---|
| 717 | // We need to find a spot in the ready array |
---|
| 718 | __attribute((unused)) int len = 0; |
---|
| 719 | __attribute((unused)) int block = 0; |
---|
| 720 | uint32_t ready_mask = ring.submit_q.ready_cnt - 1; |
---|
| 721 | |
---|
| 722 | disable_interrupts(); |
---|
| 723 | uint32_t off = __tls_rand(); |
---|
| 724 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
| 725 | |
---|
| 726 | uint32_t picked; |
---|
| 727 | LOOKING: for() { |
---|
| 728 | for(i; ring.submit_q.ready_cnt) { |
---|
| 729 | picked = (i + off) & ready_mask; |
---|
| 730 | uint32_t expected = -1ul32; |
---|
| 731 | if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) { |
---|
| 732 | break LOOKING; |
---|
| 733 | } |
---|
| 734 | verify(expected != idx); |
---|
| 735 | |
---|
| 736 | len ++; |
---|
| 737 | } |
---|
| 738 | |
---|
| 739 | block++; |
---|
[34b61882] | 740 | if( try_lock(ring.submit_q.lock __cfaabi_dbg_ctx2) ) { |
---|
| 741 | __release_consumed_submission( ring ); |
---|
| 742 | unlock( ring.submit_q.lock ); |
---|
| 743 | } |
---|
| 744 | else { |
---|
| 745 | yield(); |
---|
| 746 | } |
---|
[df40a56] | 747 | } |
---|
| 748 | |
---|
| 749 | // update statistics |
---|
[47746a2] | 750 | __STATS__( false, |
---|
| 751 | io.submit_q.look_avg.val += len; |
---|
| 752 | io.submit_q.look_avg.block += block; |
---|
| 753 | io.submit_q.look_avg.cnt += 1; |
---|
| 754 | ) |
---|
[df40a56] | 755 | |
---|
| 756 | return picked; |
---|
| 757 | } |
---|
| 758 | |
---|
[31bb2e1] | 759 | void __submit( struct __io_data & ring, uint32_t idx ) { |
---|
[5dadc9b] | 760 | // Get now the data we definetely need |
---|
| 761 | uint32_t * const tail = ring.submit_q.tail; |
---|
[2489d31] | 762 | const uint32_t mask = *ring.submit_q.mask; |
---|
| 763 | |
---|
[5dadc9b] | 764 | // There are 2 submission schemes, check which one we are using |
---|
| 765 | if( ring.cltr_flags & CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS ) { |
---|
| 766 | // If the poller thread submits, then we just need to add this to the ready array |
---|
[df40a56] | 767 | __submit_to_ready_array( ring, idx, mask ); |
---|
[5dadc9b] | 768 | |
---|
| 769 | __wake_poller( ring ); |
---|
| 770 | |
---|
[dd4e2d7] | 771 | __cfadbg_print_safe( io, "Kernel I/O : Added %u to ready for %p\n", idx, active_thread() ); |
---|
[2d8f7b0] | 772 | } |
---|
[e46c753] | 773 | else if( ring.cltr_flags & CFA_CLUSTER_IO_EAGER_SUBMITS ) { |
---|
| 774 | uint32_t picked = __submit_to_ready_array( ring, idx, mask ); |
---|
| 775 | |
---|
| 776 | for() { |
---|
| 777 | yield(); |
---|
| 778 | |
---|
| 779 | // If some one else collected our index, we are done |
---|
[8bb239d] | 780 | #warning ABA problem |
---|
[e46c753] | 781 | if( ring.submit_q.ready[picked] != idx ) { |
---|
[47746a2] | 782 | __STATS__( false, |
---|
| 783 | io.submit_q.helped += 1; |
---|
| 784 | ) |
---|
[e46c753] | 785 | return; |
---|
| 786 | } |
---|
| 787 | |
---|
| 788 | if( try_lock(ring.submit_q.lock __cfaabi_dbg_ctx2) ) { |
---|
[47746a2] | 789 | __STATS__( false, |
---|
| 790 | io.submit_q.leader += 1; |
---|
| 791 | ) |
---|
[e46c753] | 792 | break; |
---|
| 793 | } |
---|
[8bb239d] | 794 | |
---|
[47746a2] | 795 | __STATS__( false, |
---|
| 796 | io.submit_q.busy += 1; |
---|
| 797 | ) |
---|
[e46c753] | 798 | } |
---|
| 799 | |
---|
| 800 | // We got the lock |
---|
| 801 | unsigned to_submit = __collect_submitions( ring ); |
---|
| 802 | int ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, 0, 0p, _NSIG / 8); |
---|
| 803 | if( ret < 0 ) { |
---|
| 804 | switch((int)errno) { |
---|
| 805 | case EAGAIN: |
---|
| 806 | case EINTR: |
---|
| 807 | unlock(ring.submit_q.lock); |
---|
| 808 | return; |
---|
| 809 | default: |
---|
| 810 | abort( "KERNEL ERROR: IO_URING WAIT - %s\n", strerror(errno) ); |
---|
| 811 | } |
---|
| 812 | } |
---|
| 813 | |
---|
| 814 | /* paranoid */ verify( ret > 0 ); |
---|
| 815 | |
---|
| 816 | // Release the consumed SQEs |
---|
[34b61882] | 817 | __release_consumed_submission( ring ); |
---|
[e46c753] | 818 | |
---|
| 819 | // update statistics |
---|
[47746a2] | 820 | __STATS__( true, |
---|
| 821 | io.submit_q.submit_avg.rdy += to_submit; |
---|
| 822 | io.submit_q.submit_avg.csm += ret; |
---|
| 823 | io.submit_q.submit_avg.cnt += 1; |
---|
| 824 | ) |
---|
[e46c753] | 825 | |
---|
| 826 | unlock(ring.submit_q.lock); |
---|
| 827 | } |
---|
[5dadc9b] | 828 | else { |
---|
| 829 | // get mutual exclusion |
---|
| 830 | lock(ring.submit_q.lock __cfaabi_dbg_ctx2); |
---|
[2489d31] | 831 | |
---|
[5dadc9b] | 832 | // Append to the list of ready entries |
---|
| 833 | |
---|
| 834 | /* paranoid */ verify( idx <= mask ); |
---|
| 835 | |
---|
| 836 | ring.submit_q.array[ (*tail) & mask ] = idx & mask; |
---|
| 837 | __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST); |
---|
[d384787] | 838 | |
---|
[732b406] | 839 | /* paranoid */ verify( ring.submit_q.sqes[ idx ].user_data != 0 ); |
---|
| 840 | |
---|
[5dadc9b] | 841 | // Submit however, many entries need to be submitted |
---|
| 842 | int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0); |
---|
| 843 | if( ret < 0 ) { |
---|
| 844 | switch((int)errno) { |
---|
| 845 | default: |
---|
| 846 | abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) ); |
---|
| 847 | } |
---|
| 848 | } |
---|
[d384787] | 849 | |
---|
[5dadc9b] | 850 | // update statistics |
---|
[47746a2] | 851 | __STATS__( false, |
---|
| 852 | io.submit_q.submit_avg.csm += 1; |
---|
| 853 | io.submit_q.submit_avg.cnt += 1; |
---|
| 854 | ) |
---|
[5dadc9b] | 855 | |
---|
[34b61882] | 856 | // Release the consumed SQEs |
---|
| 857 | __release_consumed_submission( ring ); |
---|
[7bfc849] | 858 | |
---|
[5dadc9b] | 859 | unlock(ring.submit_q.lock); |
---|
[dd4e2d7] | 860 | |
---|
| 861 | __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret ); |
---|
[5dadc9b] | 862 | } |
---|
[2489d31] | 863 | } |
---|
[e46c753] | 864 | |
---|
| 865 | static unsigned __collect_submitions( struct __io_data & ring ) { |
---|
| 866 | /* paranoid */ verify( ring.submit_q.ready != 0p ); |
---|
| 867 | /* paranoid */ verify( ring.submit_q.ready_cnt > 0 ); |
---|
| 868 | |
---|
| 869 | unsigned to_submit = 0; |
---|
| 870 | uint32_t tail = *ring.submit_q.tail; |
---|
| 871 | const uint32_t mask = *ring.submit_q.mask; |
---|
| 872 | |
---|
| 873 | // Go through the list of ready submissions |
---|
| 874 | for( i; ring.submit_q.ready_cnt ) { |
---|
| 875 | // replace any submission with the sentinel, to consume it. |
---|
| 876 | uint32_t idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED); |
---|
| 877 | |
---|
| 878 | // If it was already the sentinel, then we are done |
---|
| 879 | if( idx == -1ul32 ) continue; |
---|
| 880 | |
---|
| 881 | // If we got a real submission, append it to the list |
---|
| 882 | ring.submit_q.array[ (tail + to_submit) & mask ] = idx & mask; |
---|
| 883 | to_submit++; |
---|
| 884 | } |
---|
| 885 | |
---|
| 886 | // Increment the tail based on how many we are ready to submit |
---|
| 887 | __atomic_fetch_add(ring.submit_q.tail, to_submit, __ATOMIC_SEQ_CST); |
---|
| 888 | |
---|
| 889 | return to_submit; |
---|
| 890 | } |
---|
[34b61882] | 891 | |
---|
| 892 | static uint32_t __release_consumed_submission( struct __io_data & ring ) { |
---|
| 893 | const uint32_t smask = *ring.submit_q.mask; |
---|
[732b406] | 894 | |
---|
| 895 | if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0; |
---|
[34b61882] | 896 | uint32_t chead = *ring.submit_q.head; |
---|
| 897 | uint32_t phead = ring.submit_q.prev_head; |
---|
| 898 | ring.submit_q.prev_head = chead; |
---|
[732b406] | 899 | unlock(ring.submit_q.release_lock); |
---|
| 900 | |
---|
[34b61882] | 901 | uint32_t count = chead - phead; |
---|
| 902 | for( i; count ) { |
---|
| 903 | uint32_t idx = ring.submit_q.array[ (phead + i) & smask ]; |
---|
| 904 | ring.submit_q.sqes[ idx ].user_data = 0; |
---|
| 905 | } |
---|
| 906 | return count; |
---|
| 907 | } |
---|
[47746a2] | 908 | #endif |
---|