[3e2b9c9] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
| 7 | // io/setup.cfa -- |
---|
| 8 | // |
---|
| 9 | // Author : Thierry Delisle |
---|
| 10 | // Created On : Fri Jul 31 16:25:51 2020 |
---|
| 11 | // Last Modified By : |
---|
| 12 | // Last Modified On : |
---|
| 13 | // Update Count : |
---|
| 14 | // |
---|
| 15 | |
---|
| 16 | #define __cforall_thread__ |
---|
| 17 | #define _GNU_SOURCE /* See feature_test_macros(7) */ |
---|
| 18 | |
---|
[80444bb] | 19 | #if defined(__CFA_DEBUG__) |
---|
| 20 | // #define __CFA_DEBUG_PRINT_IO__ |
---|
| 21 | // #define __CFA_DEBUG_PRINT_IO_CORE__ |
---|
| 22 | #endif |
---|
| 23 | |
---|
[3e2b9c9] | 24 | #include "io/types.hfa" |
---|
[c44d652] | 25 | #include "kernel.hfa" |
---|
[3e2b9c9] | 26 | |
---|
| 27 | #if !defined(CFA_HAVE_LINUX_IO_URING_H) |
---|
| 28 | void __kernel_io_startup() { |
---|
| 29 | // Nothing to do without io_uring |
---|
| 30 | } |
---|
| 31 | |
---|
| 32 | void __kernel_io_shutdown() { |
---|
| 33 | // Nothing to do without io_uring |
---|
| 34 | } |
---|
| 35 | |
---|
[f277633e] | 36 | void ?{}(io_context_params & this) {} |
---|
| 37 | |
---|
[3e2b9c9] | 38 | void ?{}(io_context & this, struct cluster & cl) {} |
---|
| 39 | void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) {} |
---|
| 40 | |
---|
| 41 | void ^?{}(io_context & this) {} |
---|
| 42 | void ^?{}(io_context & this, bool cluster_context) {} |
---|
| 43 | |
---|
[b7664a0] | 44 | void register_fixed_files( io_context &, int *, unsigned ) {} |
---|
| 45 | void register_fixed_files( cluster &, int *, unsigned ) {} |
---|
| 46 | |
---|
[3e2b9c9] | 47 | #else |
---|
| 48 | #include <errno.h> |
---|
| 49 | #include <stdint.h> |
---|
| 50 | #include <string.h> |
---|
| 51 | #include <signal.h> |
---|
| 52 | #include <unistd.h> |
---|
| 53 | |
---|
| 54 | extern "C" { |
---|
| 55 | #include <pthread.h> |
---|
| 56 | #include <sys/epoll.h> |
---|
[426f60c] | 57 | #include <sys/eventfd.h> |
---|
[3e2b9c9] | 58 | #include <sys/mman.h> |
---|
| 59 | #include <sys/syscall.h> |
---|
| 60 | |
---|
| 61 | #include <linux/io_uring.h> |
---|
| 62 | } |
---|
| 63 | |
---|
| 64 | #include "bitmanip.hfa" |
---|
| 65 | #include "kernel_private.hfa" |
---|
| 66 | #include "thread.hfa" |
---|
| 67 | |
---|
| 68 | void ?{}(io_context_params & this) { |
---|
| 69 | this.num_entries = 256; |
---|
| 70 | this.num_ready = 256; |
---|
| 71 | this.submit_aff = -1; |
---|
| 72 | this.eager_submits = false; |
---|
| 73 | this.poller_submits = false; |
---|
| 74 | this.poll_submit = false; |
---|
| 75 | this.poll_complete = false; |
---|
| 76 | } |
---|
| 77 | |
---|
| 78 | static void * __io_poller_slow( void * arg ); |
---|
| 79 | |
---|
| 80 | // Weirdly, some systems that do support io_uring don't actually define these |
---|
| 81 | #ifdef __alpha__ |
---|
| 82 | /* |
---|
| 83 | * alpha is the only exception, all other architectures |
---|
| 84 | * have common numbers for new system calls. |
---|
| 85 | */ |
---|
| 86 | #ifndef __NR_io_uring_setup |
---|
| 87 | #define __NR_io_uring_setup 535 |
---|
| 88 | #endif |
---|
| 89 | #ifndef __NR_io_uring_enter |
---|
| 90 | #define __NR_io_uring_enter 536 |
---|
| 91 | #endif |
---|
| 92 | #ifndef __NR_io_uring_register |
---|
| 93 | #define __NR_io_uring_register 537 |
---|
| 94 | #endif |
---|
| 95 | #else /* !__alpha__ */ |
---|
| 96 | #ifndef __NR_io_uring_setup |
---|
| 97 | #define __NR_io_uring_setup 425 |
---|
| 98 | #endif |
---|
| 99 | #ifndef __NR_io_uring_enter |
---|
| 100 | #define __NR_io_uring_enter 426 |
---|
| 101 | #endif |
---|
| 102 | #ifndef __NR_io_uring_register |
---|
| 103 | #define __NR_io_uring_register 427 |
---|
| 104 | #endif |
---|
| 105 | #endif |
---|
| 106 | |
---|
| 107 | //============================================================================================= |
---|
| 108 | // I/O Startup / Shutdown logic + Master Poller |
---|
| 109 | //============================================================================================= |
---|
| 110 | |
---|
| 111 | // IO Master poller loop forward |
---|
| 112 | static void * iopoll_loop( __attribute__((unused)) void * args ); |
---|
| 113 | |
---|
| 114 | static struct { |
---|
[4fc3343] | 115 | pthread_t thrd; // pthread handle to io poller thread |
---|
| 116 | void * stack; // pthread stack for io poller thread |
---|
| 117 | int epollfd; // file descriptor to the epoll instance |
---|
| 118 | volatile bool run; // Whether or not to continue |
---|
| 119 | volatile bool stopped; // Whether the poller has finished running |
---|
| 120 | volatile uint64_t epoch; // Epoch used for memory reclamation |
---|
[3e2b9c9] | 121 | } iopoll; |
---|
| 122 | |
---|
| 123 | void __kernel_io_startup(void) { |
---|
[80444bb] | 124 | __cfadbg_print_safe(io_core, "Kernel : Creating EPOLL instance\n" ); |
---|
[3e2b9c9] | 125 | |
---|
| 126 | iopoll.epollfd = epoll_create1(0); |
---|
| 127 | if (iopoll.epollfd == -1) { |
---|
| 128 | abort( "internal error, epoll_create1\n"); |
---|
| 129 | } |
---|
| 130 | |
---|
[80444bb] | 131 | __cfadbg_print_safe(io_core, "Kernel : Starting io poller thread\n" ); |
---|
[3e2b9c9] | 132 | |
---|
[4fc3343] | 133 | iopoll.stack = __create_pthread( &iopoll.thrd, iopoll_loop, 0p ); |
---|
| 134 | iopoll.run = true; |
---|
| 135 | iopoll.stopped = false; |
---|
| 136 | iopoll.epoch = 0; |
---|
[3e2b9c9] | 137 | } |
---|
| 138 | |
---|
| 139 | void __kernel_io_shutdown(void) { |
---|
| 140 | // Notify the io poller thread of the shutdown |
---|
| 141 | iopoll.run = false; |
---|
| 142 | sigval val = { 1 }; |
---|
| 143 | pthread_sigqueue( iopoll.thrd, SIGUSR1, val ); |
---|
| 144 | |
---|
| 145 | // Wait for the io poller thread to finish |
---|
| 146 | |
---|
[bfcf6b9] | 147 | __destroy_pthread( iopoll.thrd, iopoll.stack, 0p ); |
---|
[3e2b9c9] | 148 | |
---|
| 149 | int ret = close(iopoll.epollfd); |
---|
| 150 | if (ret == -1) { |
---|
| 151 | abort( "internal error, close epoll\n"); |
---|
| 152 | } |
---|
| 153 | |
---|
| 154 | // Io polling is now fully stopped |
---|
| 155 | |
---|
[80444bb] | 156 | __cfadbg_print_safe(io_core, "Kernel : IO poller stopped\n" ); |
---|
[3e2b9c9] | 157 | } |
---|
| 158 | |
---|
| 159 | static void * iopoll_loop( __attribute__((unused)) void * args ) { |
---|
| 160 | __processor_id_t id; |
---|
[58d64a4] | 161 | id.full_proc = false; |
---|
[3e2b9c9] | 162 | id.id = doregister(&id); |
---|
[8fc652e0] | 163 | __cfaabi_tls.this_proc_id = &id; |
---|
[80444bb] | 164 | __cfadbg_print_safe(io_core, "Kernel : IO poller thread starting\n" ); |
---|
[3e2b9c9] | 165 | |
---|
| 166 | // Block signals to control when they arrive |
---|
| 167 | sigset_t mask; |
---|
| 168 | sigfillset(&mask); |
---|
| 169 | if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { |
---|
| 170 | abort( "internal error, pthread_sigmask" ); |
---|
| 171 | } |
---|
| 172 | |
---|
| 173 | sigdelset( &mask, SIGUSR1 ); |
---|
| 174 | |
---|
| 175 | // Create sufficient events |
---|
| 176 | struct epoll_event events[10]; |
---|
| 177 | // Main loop |
---|
| 178 | while( iopoll.run ) { |
---|
[ece0e80] | 179 | __cfadbg_print_safe(io_core, "Kernel I/O - epoll : waiting on io_uring contexts\n"); |
---|
| 180 | |
---|
[d611995] | 181 | // increment the epoch to notify any deleters we are starting a new cycle |
---|
| 182 | __atomic_fetch_add(&iopoll.epoch, 1, __ATOMIC_SEQ_CST); |
---|
| 183 | |
---|
[3e2b9c9] | 184 | // Wait for events |
---|
| 185 | int nfds = epoll_pwait( iopoll.epollfd, events, 10, -1, &mask ); |
---|
| 186 | |
---|
[ece0e80] | 187 | __cfadbg_print_safe(io_core, "Kernel I/O - epoll : %d io contexts events, waking up\n", nfds); |
---|
| 188 | |
---|
[3e2b9c9] | 189 | // Check if an error occured |
---|
| 190 | if (nfds == -1) { |
---|
| 191 | if( errno == EINTR ) continue; |
---|
| 192 | abort( "internal error, pthread_sigmask" ); |
---|
| 193 | } |
---|
| 194 | |
---|
| 195 | for(i; nfds) { |
---|
| 196 | $io_ctx_thread * io_ctx = ($io_ctx_thread *)(uintptr_t)events[i].data.u64; |
---|
| 197 | /* paranoid */ verify( io_ctx ); |
---|
[426f60c] | 198 | __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Unparking io poller %d (%p)\n", io_ctx->ring->fd, io_ctx); |
---|
[3e2b9c9] | 199 | #if !defined( __CFA_NO_STATISTICS__ ) |
---|
[8fc652e0] | 200 | __cfaabi_tls.this_stats = io_ctx->self.curr_cluster->stats; |
---|
[3e2b9c9] | 201 | #endif |
---|
[d48b174] | 202 | |
---|
| 203 | eventfd_t v; |
---|
| 204 | eventfd_read(io_ctx->ring->efd, &v); |
---|
| 205 | |
---|
[e873838] | 206 | post( io_ctx->sem ); |
---|
[3e2b9c9] | 207 | } |
---|
| 208 | } |
---|
| 209 | |
---|
[4fc3343] | 210 | __atomic_store_n(&iopoll.stopped, true, __ATOMIC_SEQ_CST); |
---|
| 211 | |
---|
[80444bb] | 212 | __cfadbg_print_safe(io_core, "Kernel : IO poller thread stopping\n" ); |
---|
[3e2b9c9] | 213 | unregister(&id); |
---|
| 214 | return 0p; |
---|
| 215 | } |
---|
| 216 | |
---|
| 217 | //============================================================================================= |
---|
| 218 | // I/O Context Constrution/Destruction |
---|
| 219 | //============================================================================================= |
---|
| 220 | |
---|
| 221 | void ?{}($io_ctx_thread & this, struct cluster & cl) { (this.self){ "IO Poller", cl }; } |
---|
| 222 | void main( $io_ctx_thread & this ); |
---|
| 223 | static inline $thread * get_thread( $io_ctx_thread & this ) { return &this.self; } |
---|
| 224 | void ^?{}( $io_ctx_thread & mutex this ) {} |
---|
| 225 | |
---|
| 226 | static void __io_create ( __io_data & this, const io_context_params & params_in ); |
---|
| 227 | static void __io_destroy( __io_data & this ); |
---|
| 228 | |
---|
| 229 | void ?{}(io_context & this, struct cluster & cl, const io_context_params & params) { |
---|
| 230 | (this.thrd){ cl }; |
---|
| 231 | this.thrd.ring = malloc(); |
---|
| 232 | __cfadbg_print_safe(io_core, "Kernel I/O : Creating ring for io_context %p\n", &this); |
---|
| 233 | __io_create( *this.thrd.ring, params ); |
---|
| 234 | |
---|
| 235 | __cfadbg_print_safe(io_core, "Kernel I/O : Starting poller thread for io_context %p\n", &this); |
---|
| 236 | this.thrd.done = false; |
---|
| 237 | __thrd_start( this.thrd, main ); |
---|
| 238 | |
---|
| 239 | __cfadbg_print_safe(io_core, "Kernel I/O : io_context %p ready\n", &this); |
---|
| 240 | } |
---|
| 241 | |
---|
| 242 | void ?{}(io_context & this, struct cluster & cl) { |
---|
| 243 | io_context_params params; |
---|
| 244 | (this){ cl, params }; |
---|
| 245 | } |
---|
| 246 | |
---|
| 247 | void ^?{}(io_context & this, bool cluster_context) { |
---|
| 248 | __cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %p\n", &this); |
---|
| 249 | |
---|
| 250 | // Notify the thread of the shutdown |
---|
| 251 | __atomic_store_n(&this.thrd.done, true, __ATOMIC_SEQ_CST); |
---|
| 252 | |
---|
| 253 | // If this is an io_context within a cluster, things get trickier |
---|
| 254 | $thread & thrd = this.thrd.self; |
---|
| 255 | if( cluster_context ) { |
---|
[ece0e80] | 256 | // We are about to do weird things with the threads |
---|
| 257 | // we don't need interrupts to complicate everything |
---|
| 258 | disable_interrupts(); |
---|
| 259 | |
---|
| 260 | // Get cluster info |
---|
[3e2b9c9] | 261 | cluster & cltr = *thrd.curr_cluster; |
---|
[1eb239e4] | 262 | /* paranoid */ verify( cltr.idles.total == 0 || &cltr == mainCluster ); |
---|
[3e2b9c9] | 263 | /* paranoid */ verify( !ready_mutate_islocked() ); |
---|
| 264 | |
---|
| 265 | // We need to adjust the clean-up based on where the thread is |
---|
| 266 | if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { |
---|
[ece0e80] | 267 | // This is the tricky case |
---|
| 268 | // The thread was preempted or ready to run and now it is on the ready queue |
---|
| 269 | // but the cluster is shutting down, so there aren't any processors to run the ready queue |
---|
| 270 | // the solution is to steal the thread from the ready-queue and pretend it was blocked all along |
---|
[3e2b9c9] | 271 | |
---|
[e873838] | 272 | ready_schedule_lock(); |
---|
[ece0e80] | 273 | // The thread should on the list |
---|
[3e2b9c9] | 274 | /* paranoid */ verify( thrd.link.next != 0p ); |
---|
| 275 | |
---|
| 276 | // Remove the thread from the ready queue of this cluster |
---|
[ece0e80] | 277 | // The thread should be the last on the list |
---|
[3e2b9c9] | 278 | __attribute__((unused)) bool removed = remove_head( &cltr, &thrd ); |
---|
| 279 | /* paranoid */ verify( removed ); |
---|
| 280 | thrd.link.next = 0p; |
---|
| 281 | thrd.link.prev = 0p; |
---|
| 282 | |
---|
| 283 | // Fixup the thread state |
---|
| 284 | thrd.state = Blocked; |
---|
[6a77224] | 285 | thrd.ticket = TICKET_BLOCKED; |
---|
[3e2b9c9] | 286 | thrd.preempted = __NO_PREEMPTION; |
---|
| 287 | |
---|
[e873838] | 288 | ready_schedule_unlock(); |
---|
[3e2b9c9] | 289 | |
---|
| 290 | // Pretend like the thread was blocked all along |
---|
| 291 | } |
---|
| 292 | // !!! This is not an else if !!! |
---|
[ece0e80] | 293 | // Ok, now the thread is blocked (whether we cheated to get here or not) |
---|
[3e2b9c9] | 294 | if( thrd.state == Blocked ) { |
---|
| 295 | // This is the "easy case" |
---|
| 296 | // The thread is parked and can easily be moved to active cluster |
---|
| 297 | verify( thrd.curr_cluster != active_cluster() || thrd.curr_cluster == mainCluster ); |
---|
| 298 | thrd.curr_cluster = active_cluster(); |
---|
| 299 | |
---|
| 300 | // unpark the fast io_poller |
---|
[0d72d45] | 301 | unpark( &thrd ); |
---|
[3e2b9c9] | 302 | } |
---|
| 303 | else { |
---|
| 304 | // The thread is in a weird state |
---|
| 305 | // I don't know what to do here |
---|
| 306 | abort("io_context poller thread is in unexpected state, cannot clean-up correctly\n"); |
---|
| 307 | } |
---|
[ece0e80] | 308 | |
---|
| 309 | // The weird thread kidnapping stuff is over, restore interrupts. |
---|
| 310 | enable_interrupts( __cfaabi_dbg_ctx ); |
---|
[3e2b9c9] | 311 | } else { |
---|
[3c80ccc] | 312 | post( this.thrd.sem ); |
---|
[3e2b9c9] | 313 | } |
---|
| 314 | |
---|
| 315 | ^(this.thrd){}; |
---|
| 316 | __cfadbg_print_safe(io_core, "Kernel I/O : Stopped poller thread for io_context %p\n", &this); |
---|
| 317 | |
---|
| 318 | __io_destroy( *this.thrd.ring ); |
---|
| 319 | __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %p\n", &this); |
---|
| 320 | |
---|
| 321 | free(this.thrd.ring); |
---|
| 322 | } |
---|
| 323 | |
---|
| 324 | void ^?{}(io_context & this) { |
---|
| 325 | ^(this){ false }; |
---|
| 326 | } |
---|
| 327 | |
---|
[7222630] | 328 | extern void __disable_interrupts_hard(); |
---|
| 329 | extern void __enable_interrupts_hard(); |
---|
[bb58825] | 330 | |
---|
[3e2b9c9] | 331 | static void __io_create( __io_data & this, const io_context_params & params_in ) { |
---|
| 332 | // Step 1 : call to setup |
---|
| 333 | struct io_uring_params params; |
---|
| 334 | memset(¶ms, 0, sizeof(params)); |
---|
| 335 | if( params_in.poll_submit ) params.flags |= IORING_SETUP_SQPOLL; |
---|
| 336 | if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL; |
---|
| 337 | |
---|
[4998155] | 338 | __u32 nentries = params_in.num_entries != 0 ? params_in.num_entries : 256; |
---|
[63fe427c] | 339 | if( !is_pow2(nentries) ) { |
---|
| 340 | abort("ERROR: I/O setup 'num_entries' must be a power of 2\n"); |
---|
| 341 | } |
---|
| 342 | if( params_in.poller_submits && params_in.eager_submits ) { |
---|
| 343 | abort("ERROR: I/O setup 'poller_submits' and 'eager_submits' cannot be used together\n"); |
---|
| 344 | } |
---|
[3e2b9c9] | 345 | |
---|
| 346 | int fd = syscall(__NR_io_uring_setup, nentries, ¶ms ); |
---|
| 347 | if(fd < 0) { |
---|
| 348 | abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno)); |
---|
| 349 | } |
---|
| 350 | |
---|
| 351 | // Step 2 : mmap result |
---|
| 352 | memset( &this, 0, sizeof(struct __io_data) ); |
---|
| 353 | struct __submition_data & sq = this.submit_q; |
---|
| 354 | struct __completion_data & cq = this.completion_q; |
---|
| 355 | |
---|
| 356 | // calculate the right ring size |
---|
| 357 | sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned) ); |
---|
| 358 | cq.ring_sz = params.cq_off.cqes + (params.cq_entries * sizeof(struct io_uring_cqe)); |
---|
| 359 | |
---|
| 360 | // Requires features |
---|
| 361 | #if defined(IORING_FEAT_SINGLE_MMAP) |
---|
| 362 | // adjust the size according to the parameters |
---|
| 363 | if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) { |
---|
| 364 | cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz); |
---|
| 365 | } |
---|
| 366 | #endif |
---|
| 367 | |
---|
| 368 | // mmap the Submit Queue into existence |
---|
| 369 | sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); |
---|
| 370 | if (sq.ring_ptr == (void*)MAP_FAILED) { |
---|
| 371 | abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno)); |
---|
| 372 | } |
---|
| 373 | |
---|
| 374 | // Requires features |
---|
| 375 | #if defined(IORING_FEAT_SINGLE_MMAP) |
---|
| 376 | // mmap the Completion Queue into existence (may or may not be needed) |
---|
| 377 | if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) { |
---|
| 378 | cq.ring_ptr = sq.ring_ptr; |
---|
| 379 | } |
---|
| 380 | else |
---|
| 381 | #endif |
---|
| 382 | { |
---|
| 383 | // We need multiple call to MMAP |
---|
| 384 | cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); |
---|
| 385 | if (cq.ring_ptr == (void*)MAP_FAILED) { |
---|
| 386 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
| 387 | abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno)); |
---|
| 388 | } |
---|
| 389 | } |
---|
| 390 | |
---|
| 391 | // mmap the submit queue entries |
---|
| 392 | size_t size = params.sq_entries * sizeof(struct io_uring_sqe); |
---|
| 393 | sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES); |
---|
| 394 | if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) { |
---|
| 395 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
| 396 | if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz); |
---|
| 397 | abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno)); |
---|
| 398 | } |
---|
| 399 | |
---|
[426f60c] | 400 | // Step 3 : Initialize the data structure |
---|
[3e2b9c9] | 401 | // Get the pointers from the kernel to fill the structure |
---|
| 402 | // submit queue |
---|
[4998155] | 403 | sq.head = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.head); |
---|
| 404 | sq.tail = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail); |
---|
| 405 | sq.mask = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask); |
---|
| 406 | sq.num = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries); |
---|
| 407 | sq.flags = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags); |
---|
| 408 | sq.dropped = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped); |
---|
| 409 | sq.array = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.array); |
---|
[3e2b9c9] | 410 | sq.prev_head = *sq.head; |
---|
| 411 | |
---|
| 412 | { |
---|
[4998155] | 413 | const __u32 num = *sq.num; |
---|
[3e2b9c9] | 414 | for( i; num ) { |
---|
[ec19b21] | 415 | __sqe_clean( &sq.sqes[i] ); |
---|
[3e2b9c9] | 416 | } |
---|
| 417 | } |
---|
| 418 | |
---|
[2fafe7e] | 419 | (sq.submit_lock){}; |
---|
[3e2b9c9] | 420 | (sq.release_lock){}; |
---|
| 421 | |
---|
| 422 | if( params_in.poller_submits || params_in.eager_submits ) { |
---|
| 423 | /* paranoid */ verify( is_pow2( params_in.num_ready ) || (params_in.num_ready < 8) ); |
---|
| 424 | sq.ready_cnt = max( params_in.num_ready, 8 ); |
---|
[ceb7db8] | 425 | sq.ready = alloc( sq.ready_cnt, 64`align ); |
---|
[3e2b9c9] | 426 | for(i; sq.ready_cnt) { |
---|
| 427 | sq.ready[i] = -1ul32; |
---|
| 428 | } |
---|
[1095ccd] | 429 | sq.prev_ready = 0; |
---|
[3e2b9c9] | 430 | } |
---|
| 431 | else { |
---|
| 432 | sq.ready_cnt = 0; |
---|
| 433 | sq.ready = 0p; |
---|
[1095ccd] | 434 | sq.prev_ready = 0; |
---|
[3e2b9c9] | 435 | } |
---|
| 436 | |
---|
| 437 | // completion queue |
---|
[4998155] | 438 | cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); |
---|
| 439 | cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); |
---|
| 440 | cq.mask = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask); |
---|
| 441 | cq.num = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries); |
---|
| 442 | cq.overflow = ( __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow); |
---|
| 443 | cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); |
---|
[3e2b9c9] | 444 | |
---|
[426f60c] | 445 | // Step 4 : eventfd |
---|
[bb58825] | 446 | // io_uring_register is so f*cking slow on some machine that it |
---|
| 447 | // will never succeed if preemption isn't hard blocked |
---|
[7222630] | 448 | __disable_interrupts_hard(); |
---|
[bb58825] | 449 | |
---|
| 450 | int efd = eventfd(0, 0); |
---|
| 451 | if (efd < 0) { |
---|
| 452 | abort("KERNEL ERROR: IO_URING EVENTFD - %s\n", strerror(errno)); |
---|
[426f60c] | 453 | } |
---|
| 454 | |
---|
[bb58825] | 455 | int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &efd, 1); |
---|
| 456 | if (ret < 0) { |
---|
| 457 | abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); |
---|
[426f60c] | 458 | } |
---|
| 459 | |
---|
[7222630] | 460 | __enable_interrupts_hard(); |
---|
[bb58825] | 461 | |
---|
[3e2b9c9] | 462 | // some paranoid checks |
---|
| 463 | /* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask ); |
---|
| 464 | /* paranoid */ verifyf( (*cq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num ); |
---|
| 465 | /* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head ); |
---|
| 466 | /* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail ); |
---|
| 467 | |
---|
| 468 | /* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask ); |
---|
| 469 | /* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num ); |
---|
| 470 | /* paranoid */ verifyf( (*sq.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.head ); |
---|
| 471 | /* paranoid */ verifyf( (*sq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.tail ); |
---|
| 472 | |
---|
| 473 | // Update the global ring info |
---|
| 474 | this.ring_flags = params.flags; |
---|
| 475 | this.fd = fd; |
---|
[426f60c] | 476 | this.efd = efd; |
---|
[3e2b9c9] | 477 | this.eager_submits = params_in.eager_submits; |
---|
| 478 | this.poller_submits = params_in.poller_submits; |
---|
| 479 | } |
---|
| 480 | |
---|
| 481 | static void __io_destroy( __io_data & this ) { |
---|
| 482 | // Shutdown the io rings |
---|
| 483 | struct __submition_data & sq = this.submit_q; |
---|
| 484 | struct __completion_data & cq = this.completion_q; |
---|
| 485 | |
---|
| 486 | // unmap the submit queue entries |
---|
| 487 | munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe)); |
---|
| 488 | |
---|
| 489 | // unmap the Submit Queue ring |
---|
| 490 | munmap(sq.ring_ptr, sq.ring_sz); |
---|
| 491 | |
---|
| 492 | // unmap the Completion Queue ring, if it is different |
---|
| 493 | if (cq.ring_ptr != sq.ring_ptr) { |
---|
| 494 | munmap(cq.ring_ptr, cq.ring_sz); |
---|
| 495 | } |
---|
| 496 | |
---|
| 497 | // close the file descriptor |
---|
| 498 | close(this.fd); |
---|
[426f60c] | 499 | close(this.efd); |
---|
[3e2b9c9] | 500 | |
---|
| 501 | free( this.submit_q.ready ); // Maybe null, doesn't matter |
---|
| 502 | } |
---|
| 503 | |
---|
| 504 | //============================================================================================= |
---|
| 505 | // I/O Context Sleep |
---|
| 506 | //============================================================================================= |
---|
[d48b174] | 507 | static inline void __ioctx_epoll_ctl($io_ctx_thread & ctx, int op, const char * error) { |
---|
| 508 | struct epoll_event ev; |
---|
[d611995] | 509 | ev.events = EPOLLIN | EPOLLONESHOT; |
---|
[4998155] | 510 | ev.data.u64 = (__u64)&ctx; |
---|
[d48b174] | 511 | int ret = epoll_ctl(iopoll.epollfd, op, ctx.ring->efd, &ev); |
---|
[3e2b9c9] | 512 | if (ret < 0) { |
---|
[d48b174] | 513 | abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) ); |
---|
[3e2b9c9] | 514 | } |
---|
| 515 | } |
---|
| 516 | |
---|
[d48b174] | 517 | void __ioctx_register($io_ctx_thread & ctx) { |
---|
| 518 | __ioctx_epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD"); |
---|
| 519 | } |
---|
| 520 | |
---|
| 521 | void __ioctx_prepare_block($io_ctx_thread & ctx) { |
---|
[426f60c] | 522 | __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.ring->fd, &ctx); |
---|
[d48b174] | 523 | __ioctx_epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM"); |
---|
[3e2b9c9] | 524 | } |
---|
| 525 | |
---|
[d611995] | 526 | void __ioctx_unregister($io_ctx_thread & ctx) { |
---|
| 527 | // Read the current epoch so we know when to stop |
---|
| 528 | size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST); |
---|
| 529 | |
---|
| 530 | // Remove the fd from the iopoller |
---|
| 531 | __ioctx_epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE"); |
---|
| 532 | |
---|
| 533 | // Notify the io poller thread of the shutdown |
---|
| 534 | iopoll.run = false; |
---|
| 535 | sigval val = { 1 }; |
---|
| 536 | pthread_sigqueue( iopoll.thrd, SIGUSR1, val ); |
---|
| 537 | |
---|
| 538 | // Make sure all this is done |
---|
| 539 | __atomic_thread_fence(__ATOMIC_SEQ_CST); |
---|
| 540 | |
---|
| 541 | // Wait for the next epoch |
---|
[4fc3343] | 542 | while(curr == iopoll.epoch && !iopoll.stopped) Pause(); |
---|
[d611995] | 543 | } |
---|
| 544 | |
---|
[3e2b9c9] | 545 | //============================================================================================= |
---|
| 546 | // I/O Context Misc Setup |
---|
| 547 | //============================================================================================= |
---|
| 548 | void register_fixed_files( io_context & ctx, int * files, unsigned count ) { |
---|
| 549 | int ret = syscall( __NR_io_uring_register, ctx.thrd.ring->fd, IORING_REGISTER_FILES, files, count ); |
---|
| 550 | if( ret < 0 ) { |
---|
| 551 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); |
---|
| 552 | } |
---|
| 553 | |
---|
| 554 | __cfadbg_print_safe( io_core, "Kernel I/O : Performed io_register for %p, returned %d\n", active_thread(), ret ); |
---|
| 555 | } |
---|
| 556 | |
---|
| 557 | void register_fixed_files( cluster & cltr, int * files, unsigned count ) { |
---|
| 558 | for(i; cltr.io.cnt) { |
---|
| 559 | register_fixed_files( cltr.io.ctxs[i], files, count ); |
---|
| 560 | } |
---|
| 561 | } |
---|
[c44d652] | 562 | #endif |
---|