source: libcfa/src/concurrency/io.cfa@ e716aec

ADT ast-experimental
Last change on this file since e716aec was 26544f9, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

added helping and lock to allow remote processors to flush unresponsive procs

  • Property mode set to 100644
File size: 25.9 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Thu Apr 23 17:31:00 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19#if defined(__CFA_DEBUG__)
20 // #define __CFA_DEBUG_PRINT_IO__
21 // #define __CFA_DEBUG_PRINT_IO_CORE__
22#endif
23
24
25#if defined(CFA_HAVE_LINUX_IO_URING_H)
26 #include <errno.h>
27 #include <signal.h>
28 #include <stdint.h>
29 #include <string.h>
30 #include <unistd.h>
31
32 extern "C" {
33 #include <sys/syscall.h>
34 #include <sys/eventfd.h>
35 #include <sys/uio.h>
36
37 #include <linux/io_uring.h>
38 }
39
40 #include "stats.hfa"
41 #include "kernel.hfa"
42 #include "kernel/fwd.hfa"
43 #include "kernel/private.hfa"
44 #include "kernel/cluster.hfa"
45 #include "io/types.hfa"
46
47 __attribute__((unused)) static const char * opcodes[] = {
48 "OP_NOP",
49 "OP_READV",
50 "OP_WRITEV",
51 "OP_FSYNC",
52 "OP_READ_FIXED",
53 "OP_WRITE_FIXED",
54 "OP_POLL_ADD",
55 "OP_POLL_REMOVE",
56 "OP_SYNC_FILE_RANGE",
57 "OP_SENDMSG",
58 "OP_RECVMSG",
59 "OP_TIMEOUT",
60 "OP_TIMEOUT_REMOVE",
61 "OP_ACCEPT",
62 "OP_ASYNC_CANCEL",
63 "OP_LINK_TIMEOUT",
64 "OP_CONNECT",
65 "OP_FALLOCATE",
66 "OP_OPENAT",
67 "OP_CLOSE",
68 "OP_FILES_UPDATE",
69 "OP_STATX",
70 "OP_READ",
71 "OP_WRITE",
72 "OP_FADVISE",
73 "OP_MADVISE",
74 "OP_SEND",
75 "OP_RECV",
76 "OP_OPENAT2",
77 "OP_EPOLL_CTL",
78 "OP_SPLICE",
79 "OP_PROVIDE_BUFFERS",
80 "OP_REMOVE_BUFFERS",
81 "OP_TEE",
82 "INVALID_OP"
83 };
84
85 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want );
86 static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy );
87 static void __ioarbiter_flush ( io_context$ &, bool kernel );
88 static inline void __ioarbiter_notify( io_context$ & ctx );
89//=============================================================================================
90// I/O Polling
91//=============================================================================================
92 static inline unsigned __flush( struct io_context$ & );
93 static inline __u32 __release_sqes( struct io_context$ & );
94 extern void __kernel_unpark( thread$ * thrd, unpark_hint );
95
96 static inline void __post(oneshot & this, bool kernel, unpark_hint hint) {
97 thread$ * t = post( this, false );
98 if(kernel) __kernel_unpark( t, hint );
99 else unpark( t, hint );
100 }
101
102 // actual system call of io uring
103 // wrap so everything that needs to happen around it is always done
104 // i.e., stats, book keeping, sqe reclamation, etc.
105 static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) {
106 __STATS__( true, io.calls.flush++; )
107 int ret;
108 for() {
109 // do the system call in a loop, repeat on interrupts
110 ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
111 if( ret < 0 ) {
112 switch((int)errno) {
113 case EINTR:
114 continue;
115 case EAGAIN:
116 case EBUSY:
117 // Update statistics
118 __STATS__( false, io.calls.errors.busy ++; )
119 return false;
120 default:
121 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
122 }
123 }
124 break;
125 }
126
127 __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
128 __STATS__( true, io.calls.submitted += ret; )
129 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
130 /* paranoid */ verify( ctx.sq.to_submit >= ret );
131
132 // keep track of how many still need submitting
133 __atomic_fetch_sub(&ctx.sq.to_submit, ret, __ATOMIC_SEQ_CST);
134
135 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
136
137 // Release the consumed SQEs
138 __release_sqes( ctx );
139
140 /* paranoid */ verify( ! __preemption_enabled() );
141
142 // mark that there is no pending io left
143 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
144 }
145
146 // try to acquire an io context for draining, helping means we never *need* to drain, we can always do it later
147 static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) {
148 /* paranoid */ verify( ! __preemption_enabled() );
149 /* paranoid */ verify( ready_schedule_islocked() );
150
151
152 {
153 // if there is nothing to drain there is no point in acquiring anything
154 const __u32 head = *ctx->cq.head;
155 const __u32 tail = *ctx->cq.tail;
156
157 if(head == tail) return false;
158 }
159
160 // try a simple spinlock acquire, it's likely there are completions to drain
161 if(!__atomic_try_acquire(&ctx->cq.try_lock)) {
162 // some other processor already has it
163 __STATS__( false, io.calls.locked++; )
164 return false;
165 }
166
167 // acquired!!
168 return true;
169 }
170
171 // actually drain the completion
172 static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {
173 /* paranoid */ verify( ! __preemption_enabled() );
174 /* paranoid */ verify( ready_schedule_islocked() );
175 /* paranoid */ verify( ctx->cq.try_lock == true );
176
177 // get all the invariants and initial state
178 const __u32 mask = *ctx->cq.mask;
179 const __u32 num = *ctx->cq.num;
180 unsigned long long ts_prev = ctx->cq.ts;
181 unsigned long long ts_next;
182
183 // We might need to do this multiple times if more events completed than can fit in the queue.
184 for() {
185 // re-read the head and tail in case it already changed.
186 // count the difference between the two
187 const __u32 head = *ctx->cq.head;
188 const __u32 tail = *ctx->cq.tail;
189 const __u32 count = tail - head;
190 __STATS__( false, io.calls.drain++; io.calls.completed += count; )
191
192 // for everything between head and tail, drain it
193 for(i; count) {
194 unsigned idx = (head + i) & mask;
195 volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
196
197 /* paranoid */ verify(&cqe);
198
199 // find the future in the completion
200 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
201 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
202
203 // don't directly fulfill the future, preemption is disabled so we need to use kernel_unpark
204 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
205 }
206
207 // update the timestamps accordingly
208 // keep a local copy so we can update the relaxed copy
209 ts_next = ctx->cq.ts = rdtscl();
210
211 // Mark to the kernel that the cqe has been seen
212 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
213 __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
214 ctx->proc->idle_wctx.drain_time = ts_next;
215
216 // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel.
217 if(likely(count < num)) break;
218
219 // the ring buffer was full, there could be more stuff in the kernel.
220 ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS);
221 }
222
223 __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
224 /* paranoid */ verify( ready_schedule_islocked() );
225 /* paranoid */ verify( ! __preemption_enabled() );
226
227 // everything is drained, we can release the lock
228 __atomic_unlock(&ctx->cq.try_lock);
229
230 // update the relaxed timestamp
231 touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next, false );
232
233 return true;
234 }
235
236 // call from a processor to flush
237 // contains all the bookkeeping a proc must do, not just the barebones flushing logic
238 void __cfa_do_flush( io_context$ & ctx, bool kernel ) {
239 /* paranoid */ verify( ! __preemption_enabled() );
240
241 // flush any external requests
242 ctx.sq.last_external = false; // clear the external bit, the arbiter will reset it if needed
243 __ioarbiter_flush( ctx, kernel );
244
245 // if submitting must be submitted, do the system call
246 if(ctx.sq.to_submit != 0) {
247 ioring_syscsll(ctx, 0, 0);
248 }
249 }
250
251 // call from a processor to drain
252 // contains all the bookkeeping a proc must do, not just the barebones draining logic
253 bool __cfa_io_drain( struct processor * proc ) {
254 bool local = false;
255 bool remote = false;
256
257 // make sure no ones creates/destroys io contexts
258 ready_schedule_lock();
259
260 cluster * const cltr = proc->cltr;
261 io_context$ * const ctx = proc->io.ctx;
262 /* paranoid */ verify( cltr );
263 /* paranoid */ verify( ctx );
264
265 // Help if needed
266 with(cltr->sched) {
267 const size_t ctxs_count = io.count;
268
269 /* paranoid */ verify( ready_schedule_islocked() );
270 /* paranoid */ verify( ! __preemption_enabled() );
271 /* paranoid */ verify( active_processor() == proc );
272 /* paranoid */ verify( __shard_factor.io > 0 );
273 /* paranoid */ verify( ctxs_count > 0 );
274 /* paranoid */ verify( ctx->cq.id < ctxs_count );
275
276 const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
277 const unsigned long long ctsc = rdtscl();
278
279 // only help once every other time
280 // pick a target when not helping
281 if(proc->io.target == UINT_MAX) {
282 uint64_t chaos = __tls_rand();
283 // choose who to help and whether to accept helping far processors
284 unsigned ext = chaos & 0xff;
285 unsigned other = (chaos >> 8) % (ctxs_count);
286
287 // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it
288 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
289 proc->io.target = other;
290 }
291 }
292 else {
293 // a target was picked last time, help it
294 const unsigned target = proc->io.target;
295 /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX );
296 // make sure the target hasn't stopped existing since last time
297 HELP: if(target < ctxs_count) {
298 // calculate it's age and how young it could be before we give ip on helping
299 const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false);
300 const __readyQ_avg_t age = moving_average(ctsc, io.tscs[target].t.tv, io.tscs[target].t.ma, false);
301 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
302 // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger
303 if(age <= cutoff) break HELP;
304
305 // attempt to help the submission side
306 __cfa_do_flush( *io.data[target], true );
307
308 // attempt to help the completion side
309 if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed
310
311 // actually help
312 if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
313
314 // track we did help someone
315 remote = true;
316 __STATS__( true, io.calls.helped++; )
317 }
318
319 // reset the target
320 proc->io.target = UINT_MAX;
321 }
322 }
323
324 // Drain the local queue
325 if(try_acquire( proc->io.ctx )) {
326 local = __cfa_do_drain( proc->io.ctx, cltr );
327 }
328
329 /* paranoid */ verify( ready_schedule_islocked() );
330 /* paranoid */ verify( ! __preemption_enabled() );
331 /* paranoid */ verify( active_processor() == proc );
332
333 ready_schedule_unlock();
334
335 // return true if some completion entry, local or remote, was drained
336 return local || remote;
337 }
338
339
340
341 // call from a processor to flush
342 // contains all the bookkeeping a proc must do, not just the barebones flushing logic
343 bool __cfa_io_flush( struct processor * proc ) {
344 /* paranoid */ verify( ! __preemption_enabled() );
345 /* paranoid */ verify( proc );
346 /* paranoid */ verify( proc->io.ctx );
347
348 __cfa_do_flush( *proc->io.ctx, false );
349
350 // also drain since some stuff will immediately complete
351 return __cfa_io_drain( proc );
352 }
353
354//=============================================================================================
355// I/O Submissions
356//=============================================================================================
357
358// Submition steps :
359// 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
360// listed in sq.array are visible by the kernel. For those not listed, the kernel does not
361// offer any assurance that an entry is not being filled by multiple flags. Therefore, we
362// need to write an allocator that allows allocating concurrently.
363//
364// 2 - Actually fill the submit entry, this is the only simple and straightforward step.
365//
366// 3 - Append the entry index to the array and adjust the tail accordingly. This operation
367// needs to arrive to two concensus at the same time:
368// A - The order in which entries are listed in the array: no two threads must pick the
369// same index for their entries
370// B - When can the tail be update for the kernel. EVERY entries in the array between
371// head and tail must be fully filled and shouldn't ever be touched again.
372//
373 //=============================================================================================
374 // Allocation
375 // for user's convenience fill the sqes from the indexes
376 static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct io_context$ * ctx) {
377 struct io_uring_sqe * sqes = ctx->sq.sqes;
378 for(i; want) {
379 // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
380 out_sqes[i] = &sqes[idxs[i]];
381 }
382 }
383
384 // Try to directly allocate from the a given context
385 // Not thread-safe
386 static inline bool __alloc(struct io_context$ * ctx, __u32 idxs[], __u32 want) {
387 __sub_ring_t & sq = ctx->sq;
388 const __u32 mask = *sq.mask;
389 __u32 fhead = sq.free_ring.head; // get the current head of the queue
390 __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
391
392 // If we don't have enough sqes, fail
393 if((ftail - fhead) < want) { return false; }
394
395 // copy all the indexes we want from the available list
396 for(i; want) {
397 // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
398 idxs[i] = sq.free_ring.array[(fhead + i) & mask];
399 }
400
401 // Advance the head to mark the indexes as consumed
402 __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
403
404 // return success
405 return true;
406 }
407
408 // Allocate an submit queue entry.
409 // The kernel cannot see these entries until they are submitted, but other threads must be
410 // able to see which entries can be used and which are already un used by an other thread
411 // for convenience, return both the index and the pointer to the sqe
412 // sqe == &sqes[idx]
413 struct io_context$ * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
414 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
415
416 disable_interrupts();
417 struct processor * proc = __cfaabi_tls.this_processor;
418 io_context$ * ctx = proc->io.ctx;
419 /* paranoid */ verify( __cfaabi_tls.this_processor );
420 /* paranoid */ verify( ctx );
421
422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
423
424 // We can proceed to the fast path
425 if( __alloc(ctx, idxs, want) ) {
426 // Allocation was successful
427 __STATS__( true, io.alloc.fast += 1; )
428 enable_interrupts();
429
430 // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
431
432 __fill( sqes, want, idxs, ctx );
433 return ctx;
434 }
435 // The fast path failed, fallback
436 __STATS__( true, io.alloc.fail += 1; )
437
438 // Fast path failed, fallback on arbitration
439 __STATS__( true, io.alloc.slow += 1; )
440 enable_interrupts();
441
442 io_arbiter$ * ioarb = proc->cltr->io.arbiter;
443 /* paranoid */ verify( ioarb );
444
445 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
446
447 struct io_context$ * ret = __ioarbiter_allocate(*ioarb, idxs, want);
448
449 // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
450
451 __fill( sqes, want, idxs,ret );
452 return ret;
453 }
454
455 //=============================================================================================
456 // submission
457 // barebones logic to submit a group of sqes
458 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) {
459 if(!lock)
460 lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 );
461 // We can proceed to the fast path
462 // Get the right objects
463 __sub_ring_t & sq = ctx->sq;
464 const __u32 mask = *sq.mask;
465 __u32 tail = *sq.kring.tail;
466
467 // Add the sqes to the array
468 for( i; have ) {
469 // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
470 sq.kring.array[ (tail + i) & mask ] = idxs[i];
471 }
472
473 // Make the sqes visible to the submitter
474 __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
475 __atomic_fetch_add(&sq.to_submit, have, __ATOMIC_SEQ_CST);
476
477 // set the bit to mark things need to be flushed
478 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
479 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED);
480
481 if(!lock)
482 unlock( ctx->ext_sq.lock );
483 }
484
485 // submission logic + maybe flushing
486 static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) {
487 __sub_ring_t & sq = ctx->sq;
488 __submit_only(ctx, idxs, have, false);
489
490 if(sq.to_submit > 30) {
491 __tls_stats()->io.flush.full++;
492 __cfa_io_flush( ctx->proc );
493 }
494 if(!lazy) {
495 __tls_stats()->io.flush.eager++;
496 __cfa_io_flush( ctx->proc );
497 }
498 }
499
500 // call from a processor to flush
501 // might require arbitration if the thread was migrated after the allocation
502 void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
503 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
504
505 disable_interrupts();
506 __STATS__( true, if(!lazy) io.submit.eagr += 1; )
507 struct processor * proc = __cfaabi_tls.this_processor;
508 io_context$ * ctx = proc->io.ctx;
509 /* paranoid */ verify( __cfaabi_tls.this_processor );
510 /* paranoid */ verify( ctx );
511
512 // Can we proceed to the fast path
513 if( ctx == inctx ) // We have the right instance?
514 {
515 // yes! fast submit
516 __submit(ctx, idxs, have, lazy);
517
518 // Mark the instance as no longer in-use, re-enable interrupts and return
519 __STATS__( true, io.submit.fast += 1; )
520 enable_interrupts();
521
522 // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
523 return;
524 }
525
526 // Fast path failed, fallback on arbitration
527 __STATS__( true, io.submit.slow += 1; )
528 enable_interrupts();
529
530 // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
531
532 __ioarbiter_submit(inctx, idxs, have, lazy);
533 }
534
535 //=============================================================================================
536 // Flushing
537 // Go through the ring's submit queue and release everything that has already been consumed
538 // by io_uring
539 // This cannot be done by multiple threads
540 static __u32 __release_sqes( struct io_context$ & ctx ) {
541 const __u32 mask = *ctx.sq.mask;
542
543 __attribute__((unused))
544 __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
545 __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
546 __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
547
548 __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
549
550 // the 3 fields are organized like this diagram
551 // except it's are ring
552 // ---+--------+--------+----
553 // ---+--------+--------+----
554 // ^ ^ ^
555 // phead chead ctail
556
557 // make sure ctail doesn't wrap around and reach phead
558 /* paranoid */ verify(
559 (ctail >= chead && chead >= phead)
560 || (chead >= phead && phead >= ctail)
561 || (phead >= ctail && ctail >= chead)
562 );
563
564 // find the range we need to clear
565 __u32 count = chead - phead;
566
567 if(count == 0) {
568 return 0;
569 }
570
571 // We acquired an previous-head/current-head range
572 // go through the range and release the sqes
573 for( i; count ) {
574 // __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
575 __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
576 ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
577 }
578
579 ctx.sq.kring.released = chead; // note up to were we processed
580 __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
581
582 // notify the allocator that new allocations can be made
583 __ioarbiter_notify(ctx);
584
585 return count;
586 }
587
588//=============================================================================================
589// I/O Arbiter
590//=============================================================================================
591 static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
592 bool was_empty;
593
594 // Lock the list, it's not thread safe
595 lock( queue.lock __cfaabi_dbg_ctx2 );
596 {
597 was_empty = empty(queue.queue);
598
599 // Add our request to the list
600 add( queue.queue, item );
601
602 // Mark as pending
603 __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
604 }
605 unlock( queue.lock );
606
607 return was_empty;
608 }
609
610 static inline bool empty(__outstanding_io_queue & queue ) {
611 return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
612 }
613
614 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ) {
615 // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
616
617 __STATS__( false, io.alloc.block += 1; )
618
619 // No one has any resources left, wait for something to finish
620 // We need to add ourself to a list of pending allocs and wait for an answer
621 __pending_alloc pa;
622 pa.idxs = idxs;
623 pa.want = want;
624
625 enqueue(this.pending, (__outstanding_io&)pa);
626
627 wait( pa.waitctx );
628
629 return pa.ctx;
630
631 }
632
633 // notify the arbiter that new allocations are available
634 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) {
635 /* paranoid */ verify( !empty(this.pending.queue) );
636 /* paranoid */ verify( __preemption_enabled() );
637
638 // mutual exclusion is needed
639 lock( this.pending.lock __cfaabi_dbg_ctx2 );
640 {
641 __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
642
643 // as long as there are pending allocations try to satisfy them
644 // for simplicity do it in FIFO order
645 while( !empty(this.pending.queue) ) {
646 // get first pending allocs
647 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
648 __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
649
650 // check if we have enough to satisfy the request
651 if( have > pa.want ) goto DONE;
652
653 // if there are enough allocations it means we can drop the request
654 drop( this.pending.queue );
655
656 /* paranoid */__attribute__((unused)) bool ret =
657
658 // actually do the alloc
659 __alloc(ctx, pa.idxs, pa.want);
660
661 /* paranoid */ verify( ret );
662
663 // write out which context statisfied the request and post
664 // this
665 pa.ctx = ctx;
666 post( pa.waitctx );
667 }
668
669 this.pending.empty = true;
670 DONE:;
671 }
672 unlock( this.pending.lock );
673
674 /* paranoid */ verify( __preemption_enabled() );
675 }
676
677 // short hand to avoid the mutual exclusion of the pending is empty regardless
678 static void __ioarbiter_notify( io_context$ & ctx ) {
679 if(empty( ctx.arbiter->pending )) return;
680 __ioarbiter_notify( *ctx.arbiter, &ctx );
681 }
682
683 // Submit from outside the local processor: append to the outstanding list
684 static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) {
685 __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
686
687 __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
688
689 // create the intrusive object to append
690 __external_io ei;
691 ei.idxs = idxs;
692 ei.have = have;
693 ei.lazy = lazy;
694
695 // enqueue the io
696 bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
697
698 // mark pending
699 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
700
701 // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing
702 // if it's not the first enqueue, a signal is already in transit
703 if( we ) {
704 sigval_t value = { PREEMPT_IO };
705 __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
706 __STATS__( false, io.flush.signal += 1; )
707 }
708 __STATS__( false, io.submit.extr += 1; )
709
710 // to avoid dynamic allocation/memory reclamation headaches, wait for it to have been submitted
711 wait( ei.waitctx );
712
713 __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
714 }
715
716 // flush the io arbiter: move all external io operations to the submission ring
717 static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) {
718 // if there are no external operations just return
719 if(empty( ctx.ext_sq )) return;
720
721 // stats and logs
722 __STATS__( false, io.flush.external += 1; )
723 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
724
725 // this can happen from multiple processors, mutual exclusion is needed
726 lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
727 {
728 // pop each operation one at a time.
729 // There is no wait morphing because of the io sq ring
730 while( !empty(ctx.ext_sq.queue) ) {
731 // drop the element from the queue
732 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
733
734 // submit it
735 __submit_only(&ctx, ei.idxs, ei.have, true);
736
737 // wake the thread that was waiting on it
738 // since this can both be called from kernel and user, check the flag before posting
739 __post( ei.waitctx, kernel, UNPARK_LOCAL );
740 }
741
742 // mark the queue as empty
743 ctx.ext_sq.empty = true;
744 ctx.sq.last_external = true;
745 }
746 unlock(ctx.ext_sq.lock );
747 }
748
749 extern "C" {
750 // debug functions used for gdb
751 // io_uring doesn't yet support gdb soe the kernel-shared data structures aren't viewable in gdb
752 // these functions read the data that gdb can't and should be removed once the support is added
753 static __u32 __cfagdb_cq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.head; }
754 static __u32 __cfagdb_cq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.tail; }
755 static __u32 __cfagdb_cq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.mask; }
756 static __u32 __cfagdb_sq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.head; }
757 static __u32 __cfagdb_sq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.tail; }
758 static __u32 __cfagdb_sq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.mask; }
759
760 // fancier version that reads an sqe and copies it out.
761 static struct io_uring_sqe __cfagdb_sq_at( io_context$ * ctx, __u32 at ) __attribute__((nonnull(1),used,noinline)) {
762 __u32 ax = at & *ctx->sq.mask;
763 __u32 ix = ctx->sq.kring.array[ax];
764 return ctx->sq.sqes[ix];
765 }
766 }
767#endif
Note: See TracBrowser for help on using the repository browser.