source: libcfa/src/concurrency/io.cfa@ 5908fb4

ADT ast-experimental
Last change on this file since 5908fb4 was 1afd9ccb, checked in by Peter A. Buhr <pabuhr@…>, 2 years ago

update call.cfa.in with generic casts for io-uring field types

  • Property mode set to 100644
File size: 25.9 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Thu Apr 23 17:31:00 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17
18#if defined(__CFA_DEBUG__)
19 // #define __CFA_DEBUG_PRINT_IO__
20 // #define __CFA_DEBUG_PRINT_IO_CORE__
21#endif
22
23
24#if defined(CFA_HAVE_LINUX_IO_URING_H)
25 #include <errno.h>
26 #include <signal.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <unistd.h>
30
31 extern "C" {
32 #include <sys/syscall.h>
33 #include <sys/eventfd.h>
34 #include <sys/uio.h>
35
36 #include <linux/io_uring.h>
37 }
38
39 #include "stats.hfa"
40 #include "kernel.hfa"
41 #include "kernel/fwd.hfa"
42 #include "kernel/private.hfa"
43 #include "kernel/cluster.hfa"
44 #include "io/types.hfa"
45
46 __attribute__((unused)) static const char * opcodes[] = {
47 "OP_NOP",
48 "OP_READV",
49 "OP_WRITEV",
50 "OP_FSYNC",
51 "OP_READ_FIXED",
52 "OP_WRITE_FIXED",
53 "OP_POLL_ADD",
54 "OP_POLL_REMOVE",
55 "OP_SYNC_FILE_RANGE",
56 "OP_SENDMSG",
57 "OP_RECVMSG",
58 "OP_TIMEOUT",
59 "OP_TIMEOUT_REMOVE",
60 "OP_ACCEPT",
61 "OP_ASYNC_CANCEL",
62 "OP_LINK_TIMEOUT",
63 "OP_CONNECT",
64 "OP_FALLOCATE",
65 "OP_OPENAT",
66 "OP_CLOSE",
67 "OP_FILES_UPDATE",
68 "OP_STATX",
69 "OP_READ",
70 "OP_WRITE",
71 "OP_FADVISE",
72 "OP_MADVISE",
73 "OP_SEND",
74 "OP_RECV",
75 "OP_OPENAT2",
76 "OP_EPOLL_CTL",
77 "OP_SPLICE",
78 "OP_PROVIDE_BUFFERS",
79 "OP_REMOVE_BUFFERS",
80 "OP_TEE",
81 "INVALID_OP"
82 };
83
84 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want );
85 static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy );
86 static void __ioarbiter_flush ( io_context$ &, bool kernel );
87 static inline void __ioarbiter_notify( io_context$ & ctx );
88//=============================================================================================
89// I/O Polling
90//=============================================================================================
91 static inline unsigned __flush( struct io_context$ & );
92 static inline __u32 __release_sqes( struct io_context$ & );
93 extern void __kernel_unpark( thread$ * thrd, unpark_hint );
94
95 static inline void __post(oneshot & this, bool kernel, unpark_hint hint) {
96 thread$ * t = post( this, false );
97 if(kernel) __kernel_unpark( t, hint );
98 else unpark( t, hint );
99 }
100
101 // actual system call of io uring
102 // wrap so everything that needs to happen around it is always done
103 // i.e., stats, book keeping, sqe reclamation, etc.
104 static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) {
105 __STATS__( true, io.calls.flush++; )
106 int ret;
107 for() {
108 // do the system call in a loop, repeat on interrupts
109 ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
110 if( ret < 0 ) {
111 switch((int)errno) {
112 case EINTR:
113 continue;
114 case EAGAIN:
115 case EBUSY:
116 // Update statistics
117 __STATS__( false, io.calls.errors.busy ++; )
118 return false;
119 default:
120 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
121 }
122 }
123 break;
124 }
125
126 __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
127 __STATS__( true, io.calls.submitted += ret; )
128 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
129 /* paranoid */ verify( ctx.sq.to_submit >= ret );
130
131 // keep track of how many still need submitting
132 __atomic_fetch_sub(&ctx.sq.to_submit, ret, __ATOMIC_SEQ_CST);
133
134 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
135
136 // Release the consumed SQEs
137 __release_sqes( ctx );
138
139 /* paranoid */ verify( ! __preemption_enabled() );
140
141 // mark that there is no pending io left
142 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
143 }
144
145 // try to acquire an io context for draining, helping means we never *need* to drain, we can always do it later
146 static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) {
147 /* paranoid */ verify( ! __preemption_enabled() );
148 /* paranoid */ verify( ready_schedule_islocked() );
149
150
151 {
152 // if there is nothing to drain there is no point in acquiring anything
153 const __u32 head = *ctx->cq.head;
154 const __u32 tail = *ctx->cq.tail;
155
156 if(head == tail) return false;
157 }
158
159 // try a simple spinlock acquire, it's likely there are completions to drain
160 if(!__atomic_try_acquire(&ctx->cq.try_lock)) {
161 // some other processor already has it
162 __STATS__( false, io.calls.locked++; )
163 return false;
164 }
165
166 // acquired!!
167 return true;
168 }
169
170 // actually drain the completion
171 static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {
172 /* paranoid */ verify( ! __preemption_enabled() );
173 /* paranoid */ verify( ready_schedule_islocked() );
174 /* paranoid */ verify( ctx->cq.try_lock == true );
175
176 // get all the invariants and initial state
177 const __u32 mask = *ctx->cq.mask;
178 const __u32 num = *ctx->cq.num;
179 unsigned long long ts_prev = ctx->cq.ts;
180 unsigned long long ts_next;
181
182 // We might need to do this multiple times if more events completed than can fit in the queue.
183 for() {
184 // re-read the head and tail in case it already changed.
185 // count the difference between the two
186 const __u32 head = *ctx->cq.head;
187 const __u32 tail = *ctx->cq.tail;
188 const __u32 count = tail - head;
189 __STATS__( false, io.calls.drain++; io.calls.completed += count; )
190
191 // for everything between head and tail, drain it
192 for(i; count) {
193 unsigned idx = (head + i) & mask;
194 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
195
196 /* paranoid */ verify(&cqe);
197
198 // find the future in the completion
199 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
200 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
201
202 // don't directly fulfill the future, preemption is disabled so we need to use kernel_unpark
203 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
204 }
205
206 // update the timestamps accordingly
207 // keep a local copy so we can update the relaxed copy
208 ts_next = ctx->cq.ts = rdtscl();
209
210 // Mark to the kernel that the cqe has been seen
211 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
212 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
213 ctx->proc->idle_wctx.drain_time = ts_next;
214
215 // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel.
216 if(likely(count < num)) break;
217
218 // the ring buffer was full, there could be more stuff in the kernel.
219 ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS);
220 }
221
222 __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
223 /* paranoid */ verify( ready_schedule_islocked() );
224 /* paranoid */ verify( ! __preemption_enabled() );
225
226 // everything is drained, we can release the lock
227 __atomic_unlock(&ctx->cq.try_lock);
228
229 // update the relaxed timestamp
230 touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next, false );
231
232 return true;
233 }
234
235 // call from a processor to flush
236 // contains all the bookkeeping a proc must do, not just the barebones flushing logic
237 void __cfa_do_flush( io_context$ & ctx, bool kernel ) {
238 /* paranoid */ verify( ! __preemption_enabled() );
239
240 // flush any external requests
241 ctx.sq.last_external = false; // clear the external bit, the arbiter will reset it if needed
242 __ioarbiter_flush( ctx, kernel );
243
244 // if submitting must be submitted, do the system call
245 if(ctx.sq.to_submit != 0) {
246 ioring_syscsll(ctx, 0, 0);
247 }
248 }
249
250 // call from a processor to drain
251 // contains all the bookkeeping a proc must do, not just the barebones draining logic
252 bool __cfa_io_drain( struct processor * proc ) {
253 bool local = false;
254 bool remote = false;
255
256 // make sure no ones creates/destroys io contexts
257 ready_schedule_lock();
258
259 cluster * const cltr = proc->cltr;
260 io_context$ * const ctx = proc->io.ctx;
261 /* paranoid */ verify( cltr );
262 /* paranoid */ verify( ctx );
263
264 // Help if needed
265 with(cltr->sched) {
266 const size_t ctxs_count = io.count;
267
268 /* paranoid */ verify( ready_schedule_islocked() );
269 /* paranoid */ verify( ! __preemption_enabled() );
270 /* paranoid */ verify( active_processor() == proc );
271 /* paranoid */ verify( __shard_factor.io > 0 );
272 /* paranoid */ verify( ctxs_count > 0 );
273 /* paranoid */ verify( ctx->cq.id < ctxs_count );
274
275 const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
276 const unsigned long long ctsc = rdtscl();
277
278 // only help once every other time
279 // pick a target when not helping
280 if(proc->io.target == UINT_MAX) {
281 uint64_t chaos = __tls_rand();
282 // choose who to help and whether to accept helping far processors
283 unsigned ext = chaos & 0xff;
284 unsigned other = (chaos >> 8) % (ctxs_count);
285
286 // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it
287 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
288 proc->io.target = other;
289 }
290 }
291 else {
292 // a target was picked last time, help it
293 const unsigned target = proc->io.target;
294 /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX );
295 // make sure the target hasn't stopped existing since last time
296 HELP: if(target < ctxs_count) {
297 // calculate it's age and how young it could be before we give up on helping
298 const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false);
299 const __readyQ_avg_t age = moving_average(ctsc, io.tscs[target].t.tv, io.tscs[target].t.ma, false);
300 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
301 // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger
302 if(age <= cutoff) break HELP;
303
304 // attempt to help the submission side
305 __cfa_do_flush( *io.data[target], true );
306
307 // attempt to help the completion side
308 if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed
309
310 // actually help
311 if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
312
313 // track we did help someone
314 remote = true;
315 __STATS__( true, io.calls.helped++; )
316 }
317
318 // reset the target
319 proc->io.target = UINT_MAX;
320 }
321 }
322
323 // Drain the local queue
324 if(try_acquire( proc->io.ctx )) {
325 local = __cfa_do_drain( proc->io.ctx, cltr );
326 }
327
328 /* paranoid */ verify( ready_schedule_islocked() );
329 /* paranoid */ verify( ! __preemption_enabled() );
330 /* paranoid */ verify( active_processor() == proc );
331
332 ready_schedule_unlock();
333
334 // return true if some completion entry, local or remote, was drained
335 return local || remote;
336 }
337
338
339
340 // call from a processor to flush
341 // contains all the bookkeeping a proc must do, not just the barebones flushing logic
342 bool __cfa_io_flush( struct processor * proc ) {
343 /* paranoid */ verify( ! __preemption_enabled() );
344 /* paranoid */ verify( proc );
345 /* paranoid */ verify( proc->io.ctx );
346
347 __cfa_do_flush( *proc->io.ctx, false );
348
349 // also drain since some stuff will immediately complete
350 return __cfa_io_drain( proc );
351 }
352
353//=============================================================================================
354// I/O Submissions
355//=============================================================================================
356
357// Submition steps :
358// 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
359// listed in sq.array are visible by the kernel. For those not listed, the kernel does not
360// offer any assurance that an entry is not being filled by multiple flags. Therefore, we
361// need to write an allocator that allows allocating concurrently.
362//
363// 2 - Actually fill the submit entry, this is the only simple and straightforward step.
364//
365// 3 - Append the entry index to the array and adjust the tail accordingly. This operation
366// needs to arrive to two concensus at the same time:
367// A - The order in which entries are listed in the array: no two threads must pick the
368// same index for their entries
369// B - When can the tail be update for the kernel. EVERY entries in the array between
370// head and tail must be fully filled and shouldn't ever be touched again.
371//
372 //=============================================================================================
373 // Allocation
374 // for user's convenience fill the sqes from the indexes
375 static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct io_context$ * ctx) {
376 struct io_uring_sqe * sqes = ctx->sq.sqes;
377 for(i; want) {
378 // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
379 out_sqes[i] = &sqes[idxs[i]];
380 }
381 }
382
383 // Try to directly allocate from the a given context
384 // Not thread-safe
385 static inline bool __alloc(struct io_context$ * ctx, __u32 idxs[], __u32 want) {
386 __sub_ring_t & sq = ctx->sq;
387 const __u32 mask = *sq.mask;
388 __u32 fhead = sq.free_ring.head; // get the current head of the queue
389 __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
390
391 // If we don't have enough sqes, fail
392 if((ftail - fhead) < want) { return false; }
393
394 // copy all the indexes we want from the available list
395 for(i; want) {
396 // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
397 idxs[i] = sq.free_ring.array[(fhead + i) & mask];
398 }
399
400 // Advance the head to mark the indexes as consumed
401 __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
402
403 // return success
404 return true;
405 }
406
407 // Allocate an submit queue entry.
408 // The kernel cannot see these entries until they are submitted, but other threads must be
409 // able to see which entries can be used and which are already un used by an other thread
410 // for convenience, return both the index and the pointer to the sqe
411 // sqe == &sqes[idx]
412 struct io_context$ * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
413 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
414
415 disable_interrupts();
416 struct processor * proc = __cfaabi_tls.this_processor;
417 io_context$ * ctx = proc->io.ctx;
418 /* paranoid */ verify( __cfaabi_tls.this_processor );
419 /* paranoid */ verify( ctx );
420
421 // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
422
423 // We can proceed to the fast path
424 if( __alloc(ctx, idxs, want) ) {
425 // Allocation was successful
426 __STATS__( true, io.alloc.fast += 1; )
427 enable_interrupts();
428
429 // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
430
431 __fill( sqes, want, idxs, ctx );
432 return ctx;
433 }
434 // The fast path failed, fallback
435 __STATS__( true, io.alloc.fail += 1; )
436
437 // Fast path failed, fallback on arbitration
438 __STATS__( true, io.alloc.slow += 1; )
439 enable_interrupts();
440
441 io_arbiter$ * ioarb = proc->cltr->io.arbiter;
442 /* paranoid */ verify( ioarb );
443
444 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
445
446 struct io_context$ * ret = __ioarbiter_allocate(*ioarb, idxs, want);
447
448 // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
449
450 __fill( sqes, want, idxs,ret );
451 return ret;
452 }
453
454 //=============================================================================================
455 // submission
456 // barebones logic to submit a group of sqes
457 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) {
458 if(!lock)
459 lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 );
460 // We can proceed to the fast path
461 // Get the right objects
462 __sub_ring_t & sq = ctx->sq;
463 const __u32 mask = *sq.mask;
464 __u32 tail = *sq.kring.tail;
465
466 // Add the sqes to the array
467 for( i; have ) {
468 // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
469 sq.kring.array[ (tail + i) & mask ] = idxs[i];
470 }
471
472 // Make the sqes visible to the submitter
473 __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
474 __atomic_fetch_add(&sq.to_submit, have, __ATOMIC_SEQ_CST);
475
476 // set the bit to mark things need to be flushed
477 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
478 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED);
479
480 if(!lock)
481 unlock( ctx->ext_sq.lock );
482 }
483
484 // submission logic + maybe flushing
485 static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) {
486 __sub_ring_t & sq = ctx->sq;
487 __submit_only(ctx, idxs, have, false);
488
489 if(sq.to_submit > 30) {
490 __tls_stats()->io.flush.full++;
491 __cfa_io_flush( ctx->proc );
492 }
493 if(!lazy) {
494 __tls_stats()->io.flush.eager++;
495 __cfa_io_flush( ctx->proc );
496 }
497 }
498
499 // call from a processor to flush
500 // might require arbitration if the thread was migrated after the allocation
501 void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
502 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
503
504 disable_interrupts();
505 __STATS__( true, if(!lazy) io.submit.eagr += 1; )
506 struct processor * proc = __cfaabi_tls.this_processor;
507 io_context$ * ctx = proc->io.ctx;
508 /* paranoid */ verify( __cfaabi_tls.this_processor );
509 /* paranoid */ verify( ctx );
510
511 // Can we proceed to the fast path
512 if( ctx == inctx ) // We have the right instance?
513 {
514 // yes! fast submit
515 __submit(ctx, idxs, have, lazy);
516
517 // Mark the instance as no longer in-use, re-enable interrupts and return
518 __STATS__( true, io.submit.fast += 1; )
519 enable_interrupts();
520
521 // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
522 return;
523 }
524
525 // Fast path failed, fallback on arbitration
526 __STATS__( true, io.submit.slow += 1; )
527 enable_interrupts();
528
529 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
530
531 __ioarbiter_submit(inctx, idxs, have, lazy);
532 }
533
534 //=============================================================================================
535 // Flushing
536 // Go through the ring's submit queue and release everything that has already been consumed
537 // by io_uring
538 // This cannot be done by multiple threads
539 static __u32 __release_sqes( struct io_context$ & ctx ) {
540 const __u32 mask = *ctx.sq.mask;
541
542 __attribute__((unused))
543 __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
544 __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
545 __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
546
547 __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
548
549 // the 3 fields are organized like this diagram
550 // except it's are ring
551 // ---+--------+--------+----
552 // ---+--------+--------+----
553 // ^ ^ ^
554 // phead chead ctail
555
556 // make sure ctail doesn't wrap around and reach phead
557 /* paranoid */ verify(
558 (ctail >= chead && chead >= phead)
559 || (chead >= phead && phead >= ctail)
560 || (phead >= ctail && ctail >= chead)
561 );
562
563 // find the range we need to clear
564 __u32 count = chead - phead;
565
566 if(count == 0) {
567 return 0;
568 }
569
570 // We acquired an previous-head/current-head range
571 // go through the range and release the sqes
572 for( i; count ) {
573 // __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
574 __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
575 ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
576 }
577
578 ctx.sq.kring.released = chead; // note up to were we processed
579 __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
580
581 // notify the allocator that new allocations can be made
582 __ioarbiter_notify(ctx);
583
584 return count;
585 }
586
587//=============================================================================================
588// I/O Arbiter
589//=============================================================================================
590 static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
591 bool was_empty;
592
593 // Lock the list, it's not thread safe
594 lock( queue.lock __cfaabi_dbg_ctx2 );
595 {
596 was_empty = empty(queue.queue);
597
598 // Add our request to the list
599 add( queue.queue, item );
600
601 // Mark as pending
602 __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
603 }
604 unlock( queue.lock );
605
606 return was_empty;
607 }
608
609 static inline bool empty(__outstanding_io_queue & queue ) {
610 return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
611 }
612
613 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ) {
614 // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
615
616 __STATS__( false, io.alloc.block += 1; )
617
618 // No one has any resources left, wait for something to finish
619 // We need to add ourself to a list of pending allocs and wait for an answer
620 __pending_alloc pa;
621 pa.idxs = idxs;
622 pa.want = want;
623
624 enqueue(this.pending, (__outstanding_io&)pa);
625
626 wait( pa.waitctx );
627
628 return pa.ctx;
629
630 }
631
632 // notify the arbiter that new allocations are available
633 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) {
634 /* paranoid */ verify( !empty(this.pending.queue) );
635 /* paranoid */ verify( __preemption_enabled() );
636
637 // mutual exclusion is needed
638 lock( this.pending.lock __cfaabi_dbg_ctx2 );
639 {
640 __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
641
642 // as long as there are pending allocations try to satisfy them
643 // for simplicity do it in FIFO order
644 while( !empty(this.pending.queue) ) {
645 // get first pending allocs
646 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
647 __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
648
649 // check if we have enough to satisfy the request
650 if( have > pa.want ) goto DONE;
651
652 // if there are enough allocations it means we can drop the request
653 drop( this.pending.queue );
654
655 /* paranoid */__attribute__((unused)) bool ret =
656
657 // actually do the alloc
658 __alloc(ctx, pa.idxs, pa.want);
659
660 /* paranoid */ verify( ret );
661
662 // write out which context statisfied the request and post
663 // this
664 pa.ctx = ctx;
665 post( pa.waitctx );
666 }
667
668 this.pending.empty = true;
669 DONE:;
670 }
671 unlock( this.pending.lock );
672
673 /* paranoid */ verify( __preemption_enabled() );
674 }
675
676 // short hand to avoid the mutual exclusion of the pending is empty regardless
677 static void __ioarbiter_notify( io_context$ & ctx ) {
678 if(empty( ctx.arbiter->pending )) return;
679 __ioarbiter_notify( *ctx.arbiter, &ctx );
680 }
681
682 // Submit from outside the local processor: append to the outstanding list
683 static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) {
684 __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
685
686 __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
687
688 // create the intrusive object to append
689 __external_io ei;
690 ei.idxs = idxs;
691 ei.have = have;
692 ei.lazy = lazy;
693
694 // enqueue the io
695 bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
696
697 // mark pending
698 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
699
700 // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing
701 // if it's not the first enqueue, a signal is already in transit
702 if( we ) {
703 sigval_t value = { PREEMPT_IO };
704 __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
705 __STATS__( false, io.flush.signal += 1; )
706 }
707 __STATS__( false, io.submit.extr += 1; )
708
709 // to avoid dynamic allocation/memory reclamation headaches, wait for it to have been submitted
710 wait( ei.waitctx );
711
712 __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
713 }
714
715 // flush the io arbiter: move all external io operations to the submission ring
716 static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) {
717 // if there are no external operations just return
718 if(empty( ctx.ext_sq )) return;
719
720 // stats and logs
721 __STATS__( false, io.flush.external += 1; )
722 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
723
724 // this can happen from multiple processors, mutual exclusion is needed
725 lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
726 {
727 // pop each operation one at a time.
728 // There is no wait morphing because of the io sq ring
729 while( !empty(ctx.ext_sq.queue) ) {
730 // drop the element from the queue
731 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
732
733 // submit it
734 __submit_only(&ctx, ei.idxs, ei.have, true);
735
736 // wake the thread that was waiting on it
737 // since this can both be called from kernel and user, check the flag before posting
738 __post( ei.waitctx, kernel, UNPARK_LOCAL );
739 }
740
741 // mark the queue as empty
742 ctx.ext_sq.empty = true;
743 ctx.sq.last_external = true;
744 }
745 unlock(ctx.ext_sq.lock );
746 }
747
748 extern "C" {
749 // debug functions used for gdb
750 // io_uring doesn't yet support gdb soe the kernel-shared data structures aren't viewable in gdb
751 // these functions read the data that gdb can't and should be removed once the support is added
752 static __u32 __cfagdb_cq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.head; }
753 static __u32 __cfagdb_cq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.tail; }
754 static __u32 __cfagdb_cq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.mask; }
755 static __u32 __cfagdb_sq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.head; }
756 static __u32 __cfagdb_sq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.tail; }
757 static __u32 __cfagdb_sq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.mask; }
758
759 // fancier version that reads an sqe and copies it out.
760 static struct io_uring_sqe __cfagdb_sq_at( io_context$ * ctx, __u32 at ) __attribute__((nonnull(1),used,noinline)) {
761 __u32 ax = at & *ctx->sq.mask;
762 __u32 ix = ctx->sq.kring.array[ax];
763 return ctx->sq.sqes[ix];
764 }
765 }
766#endif
Note: See TracBrowser for help on using the repository browser.