| 1 | #include "kernel.hfa"
 | 
|---|
| 2 | 
 | 
|---|
| 3 | #if !defined(HAVE_LINUX_IO_URING_H)
 | 
|---|
| 4 |         void __kernel_io_startup( cluster & this ) {
 | 
|---|
| 5 |                 // Nothing to do without io_uring
 | 
|---|
| 6 |         }
 | 
|---|
| 7 | 
 | 
|---|
| 8 |         void __kernel_io_shutdown( cluster & this ) {
 | 
|---|
| 9 |                 // Nothing to do without io_uring
 | 
|---|
| 10 |         }
 | 
|---|
| 11 | 
 | 
|---|
| 12 |         bool is_async( void (*)() ) {
 | 
|---|
| 13 |                 return false;
 | 
|---|
| 14 |         }
 | 
|---|
| 15 | 
 | 
|---|
| 16 | #else
 | 
|---|
| 17 |         extern "C" {
 | 
|---|
| 18 |                 #define _GNU_SOURCE         /* See feature_test_macros(7) */
 | 
|---|
| 19 |                 #include <errno.h>
 | 
|---|
| 20 |                 #include <stdint.h>
 | 
|---|
| 21 |                 #include <string.h>
 | 
|---|
| 22 |                 #include <unistd.h>
 | 
|---|
| 23 |                 #include <sys/mman.h>
 | 
|---|
| 24 |                 #include <sys/syscall.h>
 | 
|---|
| 25 | 
 | 
|---|
| 26 |                 #include <linux/io_uring.h>
 | 
|---|
| 27 |         }
 | 
|---|
| 28 | 
 | 
|---|
| 29 |         #include "bits/signal.hfa"
 | 
|---|
| 30 |         #include "kernel_private.hfa"
 | 
|---|
| 31 |         #include "thread.hfa"
 | 
|---|
| 32 | 
 | 
|---|
| 33 |         uint32_t entries_per_cluster() {
 | 
|---|
| 34 |                 return 256;
 | 
|---|
| 35 |         }
 | 
|---|
| 36 | 
 | 
|---|
| 37 |         static void * __io_poller( void * arg );
 | 
|---|
| 38 | 
 | 
|---|
| 39 | //=============================================================================================
 | 
|---|
| 40 | // I/O Startup / Shutdown logic
 | 
|---|
| 41 | //=============================================================================================
 | 
|---|
| 42 |         void __kernel_io_startup( cluster & this ) {
 | 
|---|
| 43 |                 // Step 1 : call to setup
 | 
|---|
| 44 |                 struct io_uring_params params;
 | 
|---|
| 45 |                 memset(¶ms, 0, sizeof(params));
 | 
|---|
| 46 | 
 | 
|---|
| 47 |                 uint32_t nentries = entries_per_cluster();
 | 
|---|
| 48 | 
 | 
|---|
| 49 |                 int fd = syscall(__NR_io_uring_setup, nentries, ¶ms );
 | 
|---|
| 50 |                 if(fd < 0) {
 | 
|---|
| 51 |                         abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
 | 
|---|
| 52 |                 }
 | 
|---|
| 53 | 
 | 
|---|
| 54 |                 // Step 2 : mmap result
 | 
|---|
| 55 |                 memset(&this.io, 0, sizeof(struct io_ring));
 | 
|---|
| 56 |                 struct io_uring_sq & sq = this.io.submit_q;
 | 
|---|
| 57 |                 struct io_uring_cq & cq = this.io.completion_q;
 | 
|---|
| 58 | 
 | 
|---|
| 59 |                 // calculate the right ring size
 | 
|---|
| 60 |                 sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned)           );
 | 
|---|
| 61 |                 cq.ring_sz = params.cq_off.cqes  + (params.cq_entries * sizeof(struct io_uring_cqe));
 | 
|---|
| 62 | 
 | 
|---|
| 63 |                 // Requires features
 | 
|---|
| 64 |                 // // adjust the size according to the parameters
 | 
|---|
| 65 |                 // if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
 | 
|---|
| 66 |                 //      cq->ring_sz = sq->ring_sz = max(cq->ring_sz, sq->ring_sz);
 | 
|---|
| 67 |                 // }
 | 
|---|
| 68 | 
 | 
|---|
| 69 |                 // mmap the Submit Queue into existence
 | 
|---|
| 70 |                 sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
 | 
|---|
| 71 |                 if (sq.ring_ptr == (void*)MAP_FAILED) {
 | 
|---|
| 72 |                         abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
 | 
|---|
| 73 |                 }
 | 
|---|
| 74 | 
 | 
|---|
| 75 |                 // mmap the Completion Queue into existence (may or may not be needed)
 | 
|---|
| 76 |                 // Requires features
 | 
|---|
| 77 |                 // if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
 | 
|---|
| 78 |                 //      cq->ring_ptr = sq->ring_ptr;
 | 
|---|
| 79 |                 // }
 | 
|---|
| 80 |                 // else {
 | 
|---|
| 81 |                         // We need multiple call to MMAP
 | 
|---|
| 82 |                         cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
 | 
|---|
| 83 |                         if (cq.ring_ptr == (void*)MAP_FAILED) {
 | 
|---|
| 84 |                                 munmap(sq.ring_ptr, sq.ring_sz);
 | 
|---|
| 85 |                                 abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
 | 
|---|
| 86 |                         }
 | 
|---|
| 87 |                 // }
 | 
|---|
| 88 | 
 | 
|---|
| 89 |                 // mmap the submit queue entries
 | 
|---|
| 90 |                 size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
 | 
|---|
| 91 |                 sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
 | 
|---|
| 92 |                 if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
 | 
|---|
| 93 |                         munmap(sq.ring_ptr, sq.ring_sz);
 | 
|---|
| 94 |                         if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
 | 
|---|
| 95 |                         abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
 | 
|---|
| 96 |                 }
 | 
|---|
| 97 | 
 | 
|---|
| 98 |                 // Get the pointers from the kernel to fill the structure
 | 
|---|
| 99 |                 // submit queue
 | 
|---|
| 100 |                 sq.head    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
 | 
|---|
| 101 |                 sq.tail    = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
 | 
|---|
| 102 |                 sq.mask    = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
 | 
|---|
| 103 |                 sq.num     = (   const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
 | 
|---|
| 104 |                 sq.flags   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
 | 
|---|
| 105 |                 sq.dropped = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
 | 
|---|
| 106 |                 sq.array   = (         uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
 | 
|---|
| 107 |                 sq.alloc = *sq.tail;
 | 
|---|
| 108 | 
 | 
|---|
| 109 |                 // completion queue
 | 
|---|
| 110 |                 cq.head     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
 | 
|---|
| 111 |                 cq.tail     = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
 | 
|---|
| 112 |                 cq.mask     = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
 | 
|---|
| 113 |                 cq.num      = (   const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
 | 
|---|
| 114 |                 cq.overflow = (         uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
 | 
|---|
| 115 |                 cq.cqes   = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
 | 
|---|
| 116 | 
 | 
|---|
| 117 |                 // some paranoid checks
 | 
|---|
| 118 |                 /* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask  );
 | 
|---|
| 119 |                 /* paranoid */ verifyf( (*cq.num)  >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num );
 | 
|---|
| 120 |                 /* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head );
 | 
|---|
| 121 |                 /* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail );
 | 
|---|
| 122 | 
 | 
|---|
| 123 |                 /* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask );
 | 
|---|
| 124 |                 /* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num );
 | 
|---|
| 125 |                 /* paranoid */ verifyf( (*sq.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.head );
 | 
|---|
| 126 |                 /* paranoid */ verifyf( (*sq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.tail );
 | 
|---|
| 127 | 
 | 
|---|
| 128 |                 // Update the global ring info
 | 
|---|
| 129 |                 this.io.flags = params.flags;
 | 
|---|
| 130 |                 this.io.fd    = fd;
 | 
|---|
| 131 |                 this.io.done  = false;
 | 
|---|
| 132 |                 (this.io.submit){ min(*sq.num, *cq.num) };
 | 
|---|
| 133 | 
 | 
|---|
| 134 |                 // Create the poller thread
 | 
|---|
| 135 |                 this.io.stack = __create_pthread( &this.io.poller, __io_poller, &this );
 | 
|---|
| 136 |         }
 | 
|---|
| 137 | 
 | 
|---|
| 138 |         void __kernel_io_shutdown( cluster & this ) {
 | 
|---|
| 139 |                 // Stop the IO Poller
 | 
|---|
| 140 |                 // Notify the poller thread of the shutdown
 | 
|---|
| 141 |                 __atomic_store_n(&this.io.done, true, __ATOMIC_SEQ_CST);
 | 
|---|
| 142 |                 sigval val = { 1 };
 | 
|---|
| 143 |                 pthread_sigqueue( this.io.poller, SIGUSR1, val );
 | 
|---|
| 144 | 
 | 
|---|
| 145 |                 // Wait for the poller thread to finish
 | 
|---|
| 146 |                 pthread_join( this.io.poller, 0p );
 | 
|---|
| 147 |                 free( this.io.stack );
 | 
|---|
| 148 | 
 | 
|---|
| 149 |                 // Shutdown the io rings
 | 
|---|
| 150 |                 struct io_uring_sq & sq = this.io.submit_q;
 | 
|---|
| 151 |                 struct io_uring_cq & cq = this.io.completion_q;
 | 
|---|
| 152 | 
 | 
|---|
| 153 |                 // unmap the submit queue entries
 | 
|---|
| 154 |                 munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe));
 | 
|---|
| 155 | 
 | 
|---|
| 156 |                 // unmap the Submit Queue ring
 | 
|---|
| 157 |                 munmap(sq.ring_ptr, sq.ring_sz);
 | 
|---|
| 158 | 
 | 
|---|
| 159 |                 // unmap the Completion Queue ring, if it is different
 | 
|---|
| 160 |                 if (cq.ring_ptr != sq.ring_ptr) {
 | 
|---|
| 161 |                         munmap(cq.ring_ptr, cq.ring_sz);
 | 
|---|
| 162 |                 }
 | 
|---|
| 163 | 
 | 
|---|
| 164 |                 // close the file descriptor
 | 
|---|
| 165 |                 close(this.io.fd);
 | 
|---|
| 166 |         }
 | 
|---|
| 167 | 
 | 
|---|
| 168 | //=============================================================================================
 | 
|---|
| 169 | // I/O Polling
 | 
|---|
| 170 | //=============================================================================================
 | 
|---|
| 171 |         struct io_user_data {
 | 
|---|
| 172 |                 int32_t result;
 | 
|---|
| 173 |                 $thread * thrd;
 | 
|---|
| 174 |         };
 | 
|---|
| 175 | 
 | 
|---|
| 176 |         // Process a single completion message from the io_uring
 | 
|---|
| 177 |         // This is NOT thread-safe
 | 
|---|
| 178 |         static bool __io_process(struct io_ring & ring) {
 | 
|---|
| 179 |                 unsigned head = *ring.completion_q.head;
 | 
|---|
| 180 |                 unsigned tail = __atomic_load_n(ring.completion_q.tail, __ATOMIC_ACQUIRE);
 | 
|---|
| 181 | 
 | 
|---|
| 182 |                 if (head == tail) return false;
 | 
|---|
| 183 | 
 | 
|---|
| 184 |                 unsigned idx = head & (*ring.completion_q.mask);
 | 
|---|
| 185 |                 struct io_uring_cqe & cqe = ring.completion_q.cqes[idx];
 | 
|---|
| 186 | 
 | 
|---|
| 187 |                 /* paranoid */ verify(&cqe);
 | 
|---|
| 188 | 
 | 
|---|
| 189 |                 struct io_user_data * data = (struct io_user_data *)cqe.user_data;
 | 
|---|
| 190 |                 __cfaabi_bits_print_safe( STDERR_FILENO, "Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd );
 | 
|---|
| 191 | 
 | 
|---|
| 192 |                 data->result = cqe.res;
 | 
|---|
| 193 |                 __unpark( data->thrd __cfaabi_dbg_ctx2 );
 | 
|---|
| 194 | 
 | 
|---|
| 195 |                 // Allow new submissions to happen
 | 
|---|
| 196 |                 V(ring.submit);
 | 
|---|
| 197 | 
 | 
|---|
| 198 |                 // Mark to the kernel that the cqe has been seen
 | 
|---|
| 199 |                 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
 | 
|---|
| 200 |                 __atomic_fetch_add( ring.completion_q.head, 1, __ATOMIC_RELAXED );
 | 
|---|
| 201 | 
 | 
|---|
| 202 |                 return true;
 | 
|---|
| 203 |         }
 | 
|---|
| 204 | 
 | 
|---|
| 205 |         static void * __io_poller( void * arg ) {
 | 
|---|
| 206 |                 cluster * cltr = (cluster *)arg;
 | 
|---|
| 207 |                 struct io_ring & ring = cltr->io;
 | 
|---|
| 208 | 
 | 
|---|
| 209 |                 sigset_t mask;
 | 
|---|
| 210 |                 sigfillset(&mask);
 | 
|---|
| 211 |                 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
 | 
|---|
| 212 |                         abort( "KERNEL ERROR: IO_URING - pthread_sigmask" );
 | 
|---|
| 213 |                 }
 | 
|---|
| 214 | 
 | 
|---|
| 215 |                 sigdelset( &mask, SIGUSR1 );
 | 
|---|
| 216 | 
 | 
|---|
| 217 |                 verify( (*ring.submit_q.head) == (*ring.submit_q.tail) );
 | 
|---|
| 218 |                 verify( (*ring.completion_q.head) == (*ring.completion_q.tail) );
 | 
|---|
| 219 | 
 | 
|---|
| 220 |                 LOOP: while(!__atomic_load_n(&ring.done, __ATOMIC_SEQ_CST)) {
 | 
|---|
| 221 |                         int ret = syscall( __NR_io_uring_enter, ring.fd, 0, 1, IORING_ENTER_GETEVENTS, &mask, _NSIG / 8);
 | 
|---|
| 222 |                         if( ret < 0 ) {
 | 
|---|
| 223 |                                 switch((int)errno) {
 | 
|---|
| 224 |                                 case EAGAIN:
 | 
|---|
| 225 |                                 case EINTR:
 | 
|---|
| 226 |                                         continue LOOP;
 | 
|---|
| 227 |                                 default:
 | 
|---|
| 228 |                                         abort( "KERNEL ERROR: IO_URING WAIT - %s\n", strerror(errno) );
 | 
|---|
| 229 |                                 }
 | 
|---|
| 230 |                         }
 | 
|---|
| 231 | 
 | 
|---|
| 232 |                         // Drain the queue
 | 
|---|
| 233 |                         while(__io_process(ring)) {}
 | 
|---|
| 234 |                 }
 | 
|---|
| 235 | 
 | 
|---|
| 236 |                 return 0p;
 | 
|---|
| 237 |         }
 | 
|---|
| 238 | 
 | 
|---|
| 239 | //=============================================================================================
 | 
|---|
| 240 | // I/O Submissions
 | 
|---|
| 241 | //=============================================================================================
 | 
|---|
| 242 | 
 | 
|---|
| 243 | // Submition steps :
 | 
|---|
| 244 | // 1 - We need to make sure we don't overflow any of the buffer, P(ring.submit) to make sure
 | 
|---|
| 245 | //     entries are available. The semaphore make sure that there is no more operations in
 | 
|---|
| 246 | //     progress then the number of entries in the buffer. This probably limits concurrency
 | 
|---|
| 247 | //     more than necessary since submitted but not completed operations don't need any
 | 
|---|
| 248 | //     entries in user space. However, I don't know what happens if we overflow the buffers
 | 
|---|
| 249 | //     because too many requests completed at once. This is a safe approach in all cases.
 | 
|---|
| 250 | //     Furthermore, with hundreds of entries, this may be okay.
 | 
|---|
| 251 | //
 | 
|---|
| 252 | // 2 - Allocate a queue entry. The ring already has memory for all entries but only the ones
 | 
|---|
| 253 | //     listed in sq.array are visible by the kernel. For those not listed, the kernel does not
 | 
|---|
| 254 | //     offer any assurance that an entry is not being filled by multiple flags. Therefore, we
 | 
|---|
| 255 | //     need to write an allocator that allows allocating concurrently.
 | 
|---|
| 256 | //
 | 
|---|
| 257 | // 3 - Actually fill the submit entry, this is the only simple and straightforward step.
 | 
|---|
| 258 | //
 | 
|---|
| 259 | // 4 - Append the entry index to the array and adjust the tail accordingly. This operation
 | 
|---|
| 260 | //     needs to arrive to two concensus at the same time:
 | 
|---|
| 261 | //     A - The order in which entries are listed in the array: no two threads must pick the
 | 
|---|
| 262 | //         same index for their entries
 | 
|---|
| 263 | //     B - When can the tail be update for the kernel. EVERY entries in the array between
 | 
|---|
| 264 | //         head and tail must be fully filled and shouldn't ever be touched again.
 | 
|---|
| 265 | //
 | 
|---|
| 266 | 
 | 
|---|
| 267 | static inline [* struct io_uring_sqe, uint32_t] __submit_alloc( struct io_ring & ring ) {
 | 
|---|
| 268 |         // Wait for a spot to be available
 | 
|---|
| 269 |         P(ring.submit);
 | 
|---|
| 270 | 
 | 
|---|
| 271 |         // Allocate the sqe
 | 
|---|
| 272 |         uint32_t idx = __atomic_fetch_add(&ring.submit_q.alloc, 1ul32, __ATOMIC_SEQ_CST);
 | 
|---|
| 273 | 
 | 
|---|
| 274 |         // Validate that we didn't overflow anything
 | 
|---|
| 275 |         // Check that nothing overflowed
 | 
|---|
| 276 |         /* paranoid */ verify( true );
 | 
|---|
| 277 | 
 | 
|---|
| 278 |         // Check that it goes head -> tail -> alloc and never head -> alloc -> tail
 | 
|---|
| 279 |         /* paranoid */ verify( true );
 | 
|---|
| 280 | 
 | 
|---|
| 281 |         // Return the sqe
 | 
|---|
| 282 |         return [&ring.submit_q.sqes[ idx & (*ring.submit_q.mask)], idx];
 | 
|---|
| 283 | }
 | 
|---|
| 284 | 
 | 
|---|
| 285 | static inline void __submit( struct io_ring & ring, uint32_t idx ) {
 | 
|---|
| 286 |         // get mutual exclusion
 | 
|---|
| 287 |         lock(ring.submit_q.lock __cfaabi_dbg_ctx2);
 | 
|---|
| 288 | 
 | 
|---|
| 289 |         // Append to the list of ready entries
 | 
|---|
| 290 |         uint32_t * tail = ring.submit_q.tail;
 | 
|---|
| 291 |         const uint32_t mask = *ring.submit_q.mask;
 | 
|---|
| 292 | 
 | 
|---|
| 293 |         ring.submit_q.array[ (*tail) & mask ] = idx & mask;
 | 
|---|
| 294 |         __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST);
 | 
|---|
| 295 | 
 | 
|---|
| 296 |         // Submit however, many entries need to be submitted
 | 
|---|
| 297 |         int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0);
 | 
|---|
| 298 |         __cfaabi_bits_print_safe( STDERR_FILENO, "Performed io_submit, returned %d\n", ret );
 | 
|---|
| 299 |         if( ret < 0 ) {
 | 
|---|
| 300 |                 switch((int)errno) {
 | 
|---|
| 301 |                 default:
 | 
|---|
| 302 |                         abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) );
 | 
|---|
| 303 |                 }
 | 
|---|
| 304 |         }
 | 
|---|
| 305 | 
 | 
|---|
| 306 |         unlock(ring.submit_q.lock);
 | 
|---|
| 307 |         // Make sure that idx was submitted
 | 
|---|
| 308 |         // Be careful to not get false positive if we cycled the entire list or that someone else submitted for us
 | 
|---|
| 309 | }
 | 
|---|
| 310 | 
 | 
|---|
| 311 | static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd) {
 | 
|---|
| 312 |         this.opcode = opcode;
 | 
|---|
| 313 |         #if !defined(IOSQE_ASYNC)
 | 
|---|
| 314 |                 this.flags = 0;
 | 
|---|
| 315 |         #else
 | 
|---|
| 316 |                 this.flags = IOSQE_ASYNC;
 | 
|---|
| 317 |         #endif
 | 
|---|
| 318 |         this.ioprio = 0;
 | 
|---|
| 319 |         this.fd = fd;
 | 
|---|
| 320 |         this.off = 0;
 | 
|---|
| 321 |         this.addr = 0;
 | 
|---|
| 322 |         this.len = 0;
 | 
|---|
| 323 |         this.rw_flags = 0;
 | 
|---|
| 324 |         this.__pad2[0] = this.__pad2[1] = this.__pad2[2] = 0;
 | 
|---|
| 325 | }
 | 
|---|
| 326 | 
 | 
|---|
| 327 | static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd, void * addr, uint32_t len, uint64_t off ) {
 | 
|---|
| 328 |         (this){ opcode, fd };
 | 
|---|
| 329 |         this.off = off;
 | 
|---|
| 330 |         this.addr = (uint64_t)addr;
 | 
|---|
| 331 |         this.len = len;
 | 
|---|
| 332 | }
 | 
|---|
| 333 | 
 | 
|---|
| 334 | //=============================================================================================
 | 
|---|
| 335 | // I/O Interface
 | 
|---|
| 336 | //=============================================================================================
 | 
|---|
| 337 |         extern "C" {
 | 
|---|
| 338 |                 #define __USE_GNU
 | 
|---|
| 339 |                 #define _GNU_SOURCE
 | 
|---|
| 340 |                 #include <fcntl.h>
 | 
|---|
| 341 |                 #include <sys/uio.h>
 | 
|---|
| 342 |                 #include <sys/socket.h>
 | 
|---|
| 343 |                 #include <sys/stat.h>
 | 
|---|
| 344 |         }
 | 
|---|
| 345 | 
 | 
|---|
| 346 |         #define __submit_prelude \
 | 
|---|
| 347 |                 struct io_ring & ring = active_cluster()->io; \
 | 
|---|
| 348 |                 struct io_uring_sqe * sqe; \
 | 
|---|
| 349 |                 uint32_t idx; \
 | 
|---|
| 350 |                 [sqe, idx] = __submit_alloc( ring );
 | 
|---|
| 351 | 
 | 
|---|
| 352 |         #define __submit_wait \
 | 
|---|
| 353 |                 io_user_data data = { 0, active_thread() }; \
 | 
|---|
| 354 |                 __cfaabi_bits_print_safe( STDERR_FILENO, "Preparing user data %p for %p\n", &data, data.thrd ); \
 | 
|---|
| 355 |                 sqe->user_data = (uint64_t)&data; \
 | 
|---|
| 356 |                 __submit( ring, idx ); \
 | 
|---|
| 357 |                 park( __cfaabi_dbg_ctx ); \
 | 
|---|
| 358 |                 return data.result;
 | 
|---|
| 359 | 
 | 
|---|
| 360 | //-----------------------------------------------------------------------------
 | 
|---|
| 361 | // Asynchronous operations
 | 
|---|
| 362 |         ssize_t async_preadv2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) {
 | 
|---|
| 363 |                 #if !defined(IORING_OP_READV)
 | 
|---|
| 364 |                         return preadv2(fd, iov, iovcnt, offset, flags);
 | 
|---|
| 365 |                 #else
 | 
|---|
| 366 |                         __submit_prelude
 | 
|---|
| 367 | 
 | 
|---|
| 368 |                         (*sqe){ IORING_OP_READV, fd, iov, iovcnt, offset };
 | 
|---|
| 369 | 
 | 
|---|
| 370 |                         __submit_wait
 | 
|---|
| 371 |                 #endif
 | 
|---|
| 372 |         }
 | 
|---|
| 373 | 
 | 
|---|
| 374 |         ssize_t async_pwritev2(int fd, const struct iovec *iov, int iovcnt, off_t offset, int flags) {
 | 
|---|
| 375 |                 #if !defined(IORING_OP_WRITEV)
 | 
|---|
| 376 |                         return pwritev2(fd, iov, iovcnt, offset, flags);
 | 
|---|
| 377 |                 #else
 | 
|---|
| 378 |                         __submit_prelude
 | 
|---|
| 379 | 
 | 
|---|
| 380 |                         (*sqe){ IORING_OP_WRITEV, fd, iov, iovcnt, offset };
 | 
|---|
| 381 | 
 | 
|---|
| 382 |                         __submit_wait
 | 
|---|
| 383 |                 #endif
 | 
|---|
| 384 |         }
 | 
|---|
| 385 | 
 | 
|---|
| 386 |         int async_fsync(int fd) {
 | 
|---|
| 387 |                 #if !defined(IORING_OP_FSYNC)
 | 
|---|
| 388 |                         return fsync(fd);
 | 
|---|
| 389 |                 #else
 | 
|---|
| 390 |                         __submit_prelude
 | 
|---|
| 391 | 
 | 
|---|
| 392 |                         (*sqe){ IORING_OP_FSYNC, fd };
 | 
|---|
| 393 | 
 | 
|---|
| 394 |                         __submit_wait
 | 
|---|
| 395 |                 #endif
 | 
|---|
| 396 |         }
 | 
|---|
| 397 | 
 | 
|---|
| 398 |         int async_sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags) {
 | 
|---|
| 399 |                 #if !defined(IORING_OP_SYNC_FILE_RANGE)
 | 
|---|
| 400 |                         return sync_file_range(fd, offset, nbytes, flags);
 | 
|---|
| 401 |                 #else
 | 
|---|
| 402 |                         __submit_prelude
 | 
|---|
| 403 | 
 | 
|---|
| 404 |                         (*sqe){ IORING_OP_SYNC_FILE_RANGE, fd };
 | 
|---|
| 405 |                         sqe->off = offset;
 | 
|---|
| 406 |                         sqe->len = nbytes;
 | 
|---|
| 407 |                         sqe->sync_range_flags = flags;
 | 
|---|
| 408 | 
 | 
|---|
| 409 |                         __submit_wait
 | 
|---|
| 410 |                 #endif
 | 
|---|
| 411 |         }
 | 
|---|
| 412 | 
 | 
|---|
| 413 | 
 | 
|---|
| 414 |         ssize_t async_sendmsg(int sockfd, const struct msghdr *msg, int flags) {
 | 
|---|
| 415 |                 #if !defined(IORING_OP_SENDMSG)
 | 
|---|
| 416 |                         return recv(sockfd, msg, flags);
 | 
|---|
| 417 |                 #else
 | 
|---|
| 418 |                         __submit_prelude
 | 
|---|
| 419 | 
 | 
|---|
| 420 |                         (*sqe){ IORING_OP_SENDMSG, sockfd, msg, 1, 0 };
 | 
|---|
| 421 |                         sqe->msg_flags = flags;
 | 
|---|
| 422 | 
 | 
|---|
| 423 |                         __submit_wait
 | 
|---|
| 424 |                 #endif
 | 
|---|
| 425 |         }
 | 
|---|
| 426 | 
 | 
|---|
| 427 |         ssize_t async_recvmsg(int sockfd, struct msghdr *msg, int flags) {
 | 
|---|
| 428 |                 #if !defined(IORING_OP_RECVMSG)
 | 
|---|
| 429 |                         return recv(sockfd, msg, flags);
 | 
|---|
| 430 |                 #else
 | 
|---|
| 431 |                         __submit_prelude
 | 
|---|
| 432 | 
 | 
|---|
| 433 |                         (*sqe){ IORING_OP_RECVMSG, sockfd, msg, 1, 0 };
 | 
|---|
| 434 |                         sqe->msg_flags = flags;
 | 
|---|
| 435 | 
 | 
|---|
| 436 |                         __submit_wait
 | 
|---|
| 437 |                 #endif
 | 
|---|
| 438 |         }
 | 
|---|
| 439 | 
 | 
|---|
| 440 |         ssize_t async_send(int sockfd, const void *buf, size_t len, int flags) {
 | 
|---|
| 441 |                 #if !defined(IORING_OP_SEND)
 | 
|---|
| 442 |                         return send( sockfd, buf, len, flags );
 | 
|---|
| 443 |                 #else
 | 
|---|
| 444 |                         __submit_prelude
 | 
|---|
| 445 | 
 | 
|---|
| 446 |                         (*sqe){ IORING_OP_SEND, sockfd };
 | 
|---|
| 447 |                         sqe->addr = (uint64_t)buf;
 | 
|---|
| 448 |                         sqe->len = len;
 | 
|---|
| 449 |                         sqe->msg_flags = flags;
 | 
|---|
| 450 | 
 | 
|---|
| 451 |                         __submit_wait
 | 
|---|
| 452 |                 #endif
 | 
|---|
| 453 |         }
 | 
|---|
| 454 | 
 | 
|---|
| 455 |         ssize_t async_recv(int sockfd, void *buf, size_t len, int flags) {
 | 
|---|
| 456 |                 #if !defined(IORING_OP_RECV)
 | 
|---|
| 457 |                         return recv( sockfd, buf, len, flags );
 | 
|---|
| 458 |                 #else
 | 
|---|
| 459 |                         __submit_prelude
 | 
|---|
| 460 | 
 | 
|---|
| 461 |                         (*sqe){ IORING_OP_RECV, sockfd };
 | 
|---|
| 462 |                         sqe->addr = (uint64_t)buf;
 | 
|---|
| 463 |                         sqe->len = len;
 | 
|---|
| 464 |                         sqe->msg_flags = flags;
 | 
|---|
| 465 | 
 | 
|---|
| 466 |                         __submit_wait
 | 
|---|
| 467 |                 #endif
 | 
|---|
| 468 |         }
 | 
|---|
| 469 | 
 | 
|---|
| 470 |         int async_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags) {
 | 
|---|
| 471 |                 #if !defined(IORING_OP_ACCEPT)
 | 
|---|
| 472 |                         __SOCKADDR_ARG _addr;
 | 
|---|
| 473 |                         _addr.__sockaddr__ = addr;
 | 
|---|
| 474 |                         return accept4( sockfd, _addr, addrlen, flags );
 | 
|---|
| 475 |                 #else
 | 
|---|
| 476 |                         __submit_prelude
 | 
|---|
| 477 | 
 | 
|---|
| 478 |                         (*sqe){ IORING_OP_ACCEPT, sockfd };
 | 
|---|
| 479 |                         sqe->addr = addr;
 | 
|---|
| 480 |                         sqe->addr2 = addrlen;
 | 
|---|
| 481 |                         sqe->accept_flags = flags;
 | 
|---|
| 482 | 
 | 
|---|
| 483 |                         __submit_wait
 | 
|---|
| 484 |                 #endif
 | 
|---|
| 485 |         }
 | 
|---|
| 486 | 
 | 
|---|
| 487 |         int async_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) {
 | 
|---|
| 488 |                 #if !defined(IORING_OP_CONNECT)
 | 
|---|
| 489 |                         __CONST_SOCKADDR_ARG _addr;
 | 
|---|
| 490 |                         _addr.__sockaddr__ = addr;
 | 
|---|
| 491 |                         return connect( sockfd, _addr, addrlen );
 | 
|---|
| 492 |                 #else
 | 
|---|
| 493 |                         __submit_prelude
 | 
|---|
| 494 | 
 | 
|---|
| 495 |                         (*sqe){ IORING_OP_CONNECT, sockfd };
 | 
|---|
| 496 |                         sqe->addr = (uint64_t)addr;
 | 
|---|
| 497 |                         sqe->off = addrlen;
 | 
|---|
| 498 | 
 | 
|---|
| 499 |                         __submit_wait
 | 
|---|
| 500 |                 #endif
 | 
|---|
| 501 |         }
 | 
|---|
| 502 | 
 | 
|---|
| 503 |         int async_fallocate(int fd, int mode, uint64_t offset, uint64_t len) {
 | 
|---|
| 504 |                 #if !defined(IORING_OP_FALLOCATE)
 | 
|---|
| 505 |                         return fallocate( fd, mode, offset, len );
 | 
|---|
| 506 |                 #else
 | 
|---|
| 507 |                         __submit_prelude
 | 
|---|
| 508 | 
 | 
|---|
| 509 |                         (*sqe){ IORING_OP_FALLOCATE, fd };
 | 
|---|
| 510 |                         sqe->off = offset;
 | 
|---|
| 511 |                         sqe->len = length;
 | 
|---|
| 512 |                         sqe->mode = mode;
 | 
|---|
| 513 | 
 | 
|---|
| 514 |                         __submit_wait
 | 
|---|
| 515 |                 #endif
 | 
|---|
| 516 |         }
 | 
|---|
| 517 | 
 | 
|---|
| 518 |         int async_fadvise(int fd, uint64_t offset, uint64_t len, int advice) {
 | 
|---|
| 519 |                 #if !defined(IORING_OP_FADVISE)
 | 
|---|
| 520 |                         return posix_fadvise( fd, offset, len, advice );
 | 
|---|
| 521 |                 #else
 | 
|---|
| 522 |                         __submit_prelude
 | 
|---|
| 523 | 
 | 
|---|
| 524 |                         (*sqe){ IORING_OP_FADVISE, fd };
 | 
|---|
| 525 |                         sqe->off = (uint64_t)offset;
 | 
|---|
| 526 |                         sqe->len = length;
 | 
|---|
| 527 |                         sqe->fadvise_advice = advice;
 | 
|---|
| 528 | 
 | 
|---|
| 529 |                         __submit_wait
 | 
|---|
| 530 |                 #endif
 | 
|---|
| 531 |         }
 | 
|---|
| 532 | 
 | 
|---|
| 533 |         int async_madvise(void *addr, size_t length, int advice) {
 | 
|---|
| 534 |                 #if !defined(IORING_OP_MADVISE)
 | 
|---|
| 535 |                         return madvise( addr, length, advice );
 | 
|---|
| 536 |                 #else
 | 
|---|
| 537 |                         __submit_prelude
 | 
|---|
| 538 | 
 | 
|---|
| 539 |                         (*sqe){ IORING_OP_MADVISE, 0 };
 | 
|---|
| 540 |                         sqe->addr = (uint64_t)addr;
 | 
|---|
| 541 |                         sqe->len = length;
 | 
|---|
| 542 |                         sqe->fadvise_advice = advice;
 | 
|---|
| 543 | 
 | 
|---|
| 544 |                         __submit_wait
 | 
|---|
| 545 |                 #endif
 | 
|---|
| 546 |         }
 | 
|---|
| 547 | 
 | 
|---|
| 548 |         int async_openat(int dirfd, const char *pathname, int flags, mode_t mode) {
 | 
|---|
| 549 |                 #if !defined(IORING_OP_OPENAT)
 | 
|---|
| 550 |                         return openat( dirfd, pathname, flags, mode );
 | 
|---|
| 551 |                 #else
 | 
|---|
| 552 |                         __submit_prelude
 | 
|---|
| 553 | 
 | 
|---|
| 554 |                         (*sqe){ IORING_OP_OPENAT, dirfd };
 | 
|---|
| 555 |                         sqe->addr = (uint64_t)pathname;
 | 
|---|
| 556 |                         sqe->open_flags = flags;
 | 
|---|
| 557 |                         sqe->mode = mode;
 | 
|---|
| 558 | 
 | 
|---|
| 559 |                         __submit_wait
 | 
|---|
| 560 |                 #endif
 | 
|---|
| 561 |         }
 | 
|---|
| 562 | 
 | 
|---|
| 563 |         int async_close(int fd) {
 | 
|---|
| 564 |                 #if !defined(IORING_OP_CLOSE)
 | 
|---|
| 565 |                         return close( fd );
 | 
|---|
| 566 |                 #else
 | 
|---|
| 567 |                         __submit_prelude
 | 
|---|
| 568 | 
 | 
|---|
| 569 |                         (*sqe){ IORING_OP_CLOSE, fd };
 | 
|---|
| 570 | 
 | 
|---|
| 571 |                         __submit_wait
 | 
|---|
| 572 |                 #endif
 | 
|---|
| 573 |         }
 | 
|---|
| 574 | 
 | 
|---|
| 575 |         int async_statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) {
 | 
|---|
| 576 |                 #if !defined(IORING_OP_STATX)
 | 
|---|
| 577 |                         return statx( dirfd, pathname, flags, mask, statxbuf );
 | 
|---|
| 578 |                 #else
 | 
|---|
| 579 |                         __submit_prelude
 | 
|---|
| 580 | 
 | 
|---|
| 581 |                         (*sqe){ IORING_OP_STATX, dirfd };
 | 
|---|
| 582 |                         sqe->addr = (uint64_t)pathname;
 | 
|---|
| 583 |                         sqe->statx_flags = flags;
 | 
|---|
| 584 |                         sqe->len = mask;
 | 
|---|
| 585 |                         sqe->off = (uint64_t)statxbuf;
 | 
|---|
| 586 | 
 | 
|---|
| 587 |                         __submit_wait
 | 
|---|
| 588 |                 #endif
 | 
|---|
| 589 |         }
 | 
|---|
| 590 | 
 | 
|---|
| 591 | 
 | 
|---|
| 592 |         ssize_t async_read(int fd, void *buf, size_t count) {
 | 
|---|
| 593 |                 #if !defined(IORING_OP_READ)
 | 
|---|
| 594 |                         return read( fd, buf, count );
 | 
|---|
| 595 |                 #else
 | 
|---|
| 596 |                         __submit_prelude
 | 
|---|
| 597 | 
 | 
|---|
| 598 |                         (*sqe){ IORING_OP_READ, fd, buf, count, 0 };
 | 
|---|
| 599 | 
 | 
|---|
| 600 |                         __submit_wait
 | 
|---|
| 601 |                 #endif
 | 
|---|
| 602 |         }
 | 
|---|
| 603 | 
 | 
|---|
| 604 |         ssize_t async_write(int fd, void *buf, size_t count) {
 | 
|---|
| 605 |                 #if !defined(IORING_OP_WRITE)
 | 
|---|
| 606 |                         return read( fd, buf, count );
 | 
|---|
| 607 |                 #else
 | 
|---|
| 608 |                         __submit_prelude
 | 
|---|
| 609 | 
 | 
|---|
| 610 |                         (*sqe){ IORING_OP_WRITE, fd, buf, count, 0 };
 | 
|---|
| 611 | 
 | 
|---|
| 612 |                         __submit_wait
 | 
|---|
| 613 |                 #endif
 | 
|---|
| 614 |         }
 | 
|---|
| 615 | 
 | 
|---|
| 616 | //-----------------------------------------------------------------------------
 | 
|---|
| 617 | // Check if a function is asynchronous
 | 
|---|
| 618 | 
 | 
|---|
| 619 | // Macro magic to reduce the size of the following switch case
 | 
|---|
| 620 |         #define IS_DEFINED_APPLY(f, ...) f(__VA_ARGS__)
 | 
|---|
| 621 |         #define IS_DEFINED_SECOND(first, second, ...) second
 | 
|---|
| 622 |         #define IS_DEFINED_TEST(expansion) _CFA_IO_FEATURE_##expansion
 | 
|---|
| 623 |         #define IS_DEFINED(macro) IS_DEFINED_APPLY( IS_DEFINED_SECOND,IS_DEFINED_TEST(macro) false, true)
 | 
|---|
| 624 | 
 | 
|---|
| 625 |         bool is_async( fptr_t func ) {
 | 
|---|
| 626 | 
 | 
|---|
| 627 |                 if( /*func == (fptr_t)preadv2 || */
 | 
|---|
| 628 |                         func == (fptr_t)async_preadv2 )
 | 
|---|
| 629 |                         #define _CFA_IO_FEATURE_IORING_OP_READV ,
 | 
|---|
| 630 |                         return IS_DEFINED(IORING_OP_READV);
 | 
|---|
| 631 | 
 | 
|---|
| 632 |                 if( /*func == (fptr_t)pwritev2 || */
 | 
|---|
| 633 |                       func == (fptr_t)async_pwritev2 )
 | 
|---|
| 634 |                         #define _CFA_IO_FEATURE_IORING_OP_WRITEV ,
 | 
|---|
| 635 |                         return IS_DEFINED(IORING_OP_WRITEV);
 | 
|---|
| 636 | 
 | 
|---|
| 637 |                 if( /*func == (fptr_t)fsync || */
 | 
|---|
| 638 |                       func == (fptr_t)async_fsync )
 | 
|---|
| 639 |                         #define _CFA_IO_FEATURE_IORING_OP_FSYNC ,
 | 
|---|
| 640 |                         return IS_DEFINED(IORING_OP_FSYNC);
 | 
|---|
| 641 | 
 | 
|---|
| 642 |                 if( /*func == (fptr_t)ync_file_range || */
 | 
|---|
| 643 |                       func == (fptr_t)async_sync_file_range )
 | 
|---|
| 644 |                         #define _CFA_IO_FEATURE_IORING_OP_SYNC_FILE_RANGE ,
 | 
|---|
| 645 |                         return IS_DEFINED(IORING_OP_SYNC_FILE_RANGE);
 | 
|---|
| 646 | 
 | 
|---|
| 647 |                 if( /*func == (fptr_t)sendmsg || */
 | 
|---|
| 648 |                       func == (fptr_t)async_sendmsg )
 | 
|---|
| 649 |                         #define _CFA_IO_FEATURE_IORING_OP_SENDMSG ,
 | 
|---|
| 650 |                         return IS_DEFINED(IORING_OP_SENDMSG);
 | 
|---|
| 651 | 
 | 
|---|
| 652 |                 if( /*func == (fptr_t)recvmsg || */
 | 
|---|
| 653 |                       func == (fptr_t)async_recvmsg )
 | 
|---|
| 654 |                         #define _CFA_IO_FEATURE_IORING_OP_RECVMSG ,
 | 
|---|
| 655 |                         return IS_DEFINED(IORING_OP_RECVMSG);
 | 
|---|
| 656 | 
 | 
|---|
| 657 |                 if( /*func == (fptr_t)send || */
 | 
|---|
| 658 |                         func == (fptr_t)async_send )
 | 
|---|
| 659 |                         #define _CFA_IO_FEATURE_IORING_OP_SEND ,
 | 
|---|
| 660 |                         return IS_DEFINED(IORING_OP_SEND);
 | 
|---|
| 661 | 
 | 
|---|
| 662 |                 if( /*func == (fptr_t)recv || */
 | 
|---|
| 663 |                         func == (fptr_t)async_recv )
 | 
|---|
| 664 |                         #define _CFA_IO_FEATURE_IORING_OP_RECV ,
 | 
|---|
| 665 |                         return IS_DEFINED(IORING_OP_RECV);
 | 
|---|
| 666 | 
 | 
|---|
| 667 |                 if( /*func == (fptr_t)accept4 || */
 | 
|---|
| 668 |                         func == (fptr_t)async_accept4 )
 | 
|---|
| 669 |                         #define _CFA_IO_FEATURE_IORING_OP_ACCEPT ,
 | 
|---|
| 670 |                         return IS_DEFINED(IORING_OP_ACCEPT);
 | 
|---|
| 671 | 
 | 
|---|
| 672 |                 if( /*func == (fptr_t)connect || */
 | 
|---|
| 673 |                         func == (fptr_t)async_connect )
 | 
|---|
| 674 |                         #define _CFA_IO_FEATURE_IORING_OP_CONNECT ,
 | 
|---|
| 675 |                         return IS_DEFINED(IORING_OP_CONNECT);
 | 
|---|
| 676 | 
 | 
|---|
| 677 |                 if( /*func == (fptr_t)fallocate || */
 | 
|---|
| 678 |                         func == (fptr_t)async_fallocate )
 | 
|---|
| 679 |                         #define _CFA_IO_FEATURE_IORING_OP_FALLOCATE ,
 | 
|---|
| 680 |                         return IS_DEFINED(IORING_OP_FALLOCATE);
 | 
|---|
| 681 | 
 | 
|---|
| 682 |                 if( /*func == (fptr_t)fadvise || */
 | 
|---|
| 683 |                         func == (fptr_t)async_fadvise )
 | 
|---|
| 684 |                         #define _CFA_IO_FEATURE_IORING_OP_FADVISE ,
 | 
|---|
| 685 |                         return IS_DEFINED(IORING_OP_FADVISE);
 | 
|---|
| 686 | 
 | 
|---|
| 687 |                 if( /*func == (fptr_t)madvise || */
 | 
|---|
| 688 |                         func == (fptr_t)async_madvise )
 | 
|---|
| 689 |                         #define _CFA_IO_FEATURE_IORING_OP_MADVISE ,
 | 
|---|
| 690 |                         return IS_DEFINED(IORING_OP_MADVISE);
 | 
|---|
| 691 | 
 | 
|---|
| 692 |                 if( /*func == (fptr_t)openat || */
 | 
|---|
| 693 |                         func == (fptr_t)async_openat )
 | 
|---|
| 694 |                         #define _CFA_IO_FEATURE_IORING_OP_OPENAT ,
 | 
|---|
| 695 |                         return IS_DEFINED(IORING_OP_OPENAT);
 | 
|---|
| 696 | 
 | 
|---|
| 697 |                 if( /*func == (fptr_t)close || */
 | 
|---|
| 698 |                         func == (fptr_t)async_close )
 | 
|---|
| 699 |                         #define _CFA_IO_FEATURE_IORING_OP_CLOSE ,
 | 
|---|
| 700 |                         return IS_DEFINED(IORING_OP_CLOSE);
 | 
|---|
| 701 | 
 | 
|---|
| 702 |                 if( /*func == (fptr_t)statx || */
 | 
|---|
| 703 |                         func == (fptr_t)async_statx )
 | 
|---|
| 704 |                         #define _CFA_IO_FEATURE_IORING_OP_STATX ,
 | 
|---|
| 705 |                         return IS_DEFINED(IORING_OP_STATX);
 | 
|---|
| 706 | 
 | 
|---|
| 707 |                 if( /*func == (fptr_t)read || */
 | 
|---|
| 708 |                       func == (fptr_t)async_read )
 | 
|---|
| 709 |                         #define _CFA_IO_FEATURE_IORING_OP_READ ,
 | 
|---|
| 710 |                         return IS_DEFINED(IORING_OP_READ);
 | 
|---|
| 711 | 
 | 
|---|
| 712 |                 if( /*func == (fptr_t)write || */
 | 
|---|
| 713 |                       func == (fptr_t)async_write )
 | 
|---|
| 714 |                         #define _CFA_IO_FEATURE_IORING_OP_WRITE ,
 | 
|---|
| 715 |                         return IS_DEFINED(IORING_OP_WRITE);
 | 
|---|
| 716 | 
 | 
|---|
| 717 |                 return false;
 | 
|---|
| 718 |         }
 | 
|---|
| 719 | 
 | 
|---|
| 720 | #endif
 | 
|---|