1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // io.cfa --
|
---|
8 | //
|
---|
9 | // Author : Thierry Delisle
|
---|
10 | // Created On : Thu Apr 23 17:31:00 2020
|
---|
11 | // Last Modified By :
|
---|
12 | // Last Modified On :
|
---|
13 | // Update Count :
|
---|
14 | //
|
---|
15 |
|
---|
16 | #define __cforall_thread__
|
---|
17 | #define _GNU_SOURCE
|
---|
18 |
|
---|
19 | #if defined(__CFA_DEBUG__)
|
---|
20 | // #define __CFA_DEBUG_PRINT_IO__
|
---|
21 | // #define __CFA_DEBUG_PRINT_IO_CORE__
|
---|
22 | #endif
|
---|
23 |
|
---|
24 |
|
---|
25 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
|
---|
26 | #include <errno.h>
|
---|
27 | #include <signal.h>
|
---|
28 | #include <stdint.h>
|
---|
29 | #include <string.h>
|
---|
30 | #include <unistd.h>
|
---|
31 |
|
---|
32 | extern "C" {
|
---|
33 | #include <sys/syscall.h>
|
---|
34 | #include <sys/eventfd.h>
|
---|
35 |
|
---|
36 | #include <linux/io_uring.h>
|
---|
37 | }
|
---|
38 |
|
---|
39 | #include "stats.hfa"
|
---|
40 | #include "kernel.hfa"
|
---|
41 | #include "kernel/fwd.hfa"
|
---|
42 | #include "kernel_private.hfa"
|
---|
43 | #include "io/types.hfa"
|
---|
44 |
|
---|
45 | __attribute__((unused)) static const char * opcodes[] = {
|
---|
46 | "OP_NOP",
|
---|
47 | "OP_READV",
|
---|
48 | "OP_WRITEV",
|
---|
49 | "OP_FSYNC",
|
---|
50 | "OP_READ_FIXED",
|
---|
51 | "OP_WRITE_FIXED",
|
---|
52 | "OP_POLL_ADD",
|
---|
53 | "OP_POLL_REMOVE",
|
---|
54 | "OP_SYNC_FILE_RANGE",
|
---|
55 | "OP_SENDMSG",
|
---|
56 | "OP_RECVMSG",
|
---|
57 | "OP_TIMEOUT",
|
---|
58 | "OP_TIMEOUT_REMOVE",
|
---|
59 | "OP_ACCEPT",
|
---|
60 | "OP_ASYNC_CANCEL",
|
---|
61 | "OP_LINK_TIMEOUT",
|
---|
62 | "OP_CONNECT",
|
---|
63 | "OP_FALLOCATE",
|
---|
64 | "OP_OPENAT",
|
---|
65 | "OP_CLOSE",
|
---|
66 | "OP_FILES_UPDATE",
|
---|
67 | "OP_STATX",
|
---|
68 | "OP_READ",
|
---|
69 | "OP_WRITE",
|
---|
70 | "OP_FADVISE",
|
---|
71 | "OP_MADVISE",
|
---|
72 | "OP_SEND",
|
---|
73 | "OP_RECV",
|
---|
74 | "OP_OPENAT2",
|
---|
75 | "OP_EPOLL_CTL",
|
---|
76 | "OP_SPLICE",
|
---|
77 | "OP_PROVIDE_BUFFERS",
|
---|
78 | "OP_REMOVE_BUFFERS",
|
---|
79 | "OP_TEE",
|
---|
80 | "INVALID_OP"
|
---|
81 | };
|
---|
82 |
|
---|
83 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want );
|
---|
84 | static void __ioarbiter_submit( $io_context * , __u32 idxs[], __u32 have, bool lazy );
|
---|
85 | static void __ioarbiter_flush ( $io_context & );
|
---|
86 | static inline void __ioarbiter_notify( $io_context & ctx );
|
---|
87 | //=============================================================================================
|
---|
88 | // I/O Polling
|
---|
89 | //=============================================================================================
|
---|
90 | static inline unsigned __flush( struct $io_context & );
|
---|
91 | static inline __u32 __release_sqes( struct $io_context & );
|
---|
92 | extern void __kernel_unpark( thread$ * thrd, unpark_hint );
|
---|
93 |
|
---|
94 | bool __cfa_io_drain( processor * proc ) {
|
---|
95 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
96 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
97 | /* paranoid */ verify( proc );
|
---|
98 | /* paranoid */ verify( proc->io.ctx );
|
---|
99 |
|
---|
100 | // Drain the queue
|
---|
101 | $io_context * ctx = proc->io.ctx;
|
---|
102 | unsigned head = *ctx->cq.head;
|
---|
103 | unsigned tail = *ctx->cq.tail;
|
---|
104 | const __u32 mask = *ctx->cq.mask;
|
---|
105 |
|
---|
106 | __u32 count = tail - head;
|
---|
107 | __STATS__( false, io.calls.drain++; io.calls.completed += count; )
|
---|
108 |
|
---|
109 | if(count == 0) return false;
|
---|
110 |
|
---|
111 | for(i; count) {
|
---|
112 | unsigned idx = (head + i) & mask;
|
---|
113 | volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
|
---|
114 |
|
---|
115 | /* paranoid */ verify(&cqe);
|
---|
116 |
|
---|
117 | struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
|
---|
118 | __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
|
---|
119 |
|
---|
120 | __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
|
---|
121 | }
|
---|
122 |
|
---|
123 | __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
|
---|
124 |
|
---|
125 | // Mark to the kernel that the cqe has been seen
|
---|
126 | // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
|
---|
127 | __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
|
---|
128 |
|
---|
129 | /* paranoid */ verify( ready_schedule_islocked() );
|
---|
130 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
131 |
|
---|
132 | return true;
|
---|
133 | }
|
---|
134 |
|
---|
135 | bool __cfa_io_flush( processor * proc, bool wait ) {
|
---|
136 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
137 | /* paranoid */ verify( proc );
|
---|
138 | /* paranoid */ verify( proc->io.ctx );
|
---|
139 |
|
---|
140 | __attribute__((unused)) cluster * cltr = proc->cltr;
|
---|
141 | $io_context & ctx = *proc->io.ctx;
|
---|
142 |
|
---|
143 | __ioarbiter_flush( ctx );
|
---|
144 |
|
---|
145 | __STATS__( true, io.calls.flush++; )
|
---|
146 | int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, wait ? 1 : 0, 0, (sigset_t *)0p, _NSIG / 8);
|
---|
147 | if( ret < 0 ) {
|
---|
148 | switch((int)errno) {
|
---|
149 | case EAGAIN:
|
---|
150 | case EINTR:
|
---|
151 | case EBUSY:
|
---|
152 | // Update statistics
|
---|
153 | __STATS__( false, io.calls.errors.busy ++; )
|
---|
154 | return false;
|
---|
155 | default:
|
---|
156 | abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
|
---|
157 | }
|
---|
158 | }
|
---|
159 |
|
---|
160 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
|
---|
161 | __STATS__( true, io.calls.submitted += ret; )
|
---|
162 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
163 | /* paranoid */ verify( ctx.sq.to_submit >= ret );
|
---|
164 |
|
---|
165 | ctx.sq.to_submit -= ret;
|
---|
166 |
|
---|
167 | /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
|
---|
168 |
|
---|
169 | // Release the consumed SQEs
|
---|
170 | __release_sqes( ctx );
|
---|
171 |
|
---|
172 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
173 |
|
---|
174 | ctx.proc->io.pending = false;
|
---|
175 | ready_schedule_lock();
|
---|
176 | bool ret = __cfa_io_drain( proc );
|
---|
177 | ready_schedule_unlock();
|
---|
178 | return ret;
|
---|
179 | }
|
---|
180 |
|
---|
181 | //=============================================================================================
|
---|
182 | // I/O Submissions
|
---|
183 | //=============================================================================================
|
---|
184 |
|
---|
185 | // Submition steps :
|
---|
186 | // 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
|
---|
187 | // listed in sq.array are visible by the kernel. For those not listed, the kernel does not
|
---|
188 | // offer any assurance that an entry is not being filled by multiple flags. Therefore, we
|
---|
189 | // need to write an allocator that allows allocating concurrently.
|
---|
190 | //
|
---|
191 | // 2 - Actually fill the submit entry, this is the only simple and straightforward step.
|
---|
192 | //
|
---|
193 | // 3 - Append the entry index to the array and adjust the tail accordingly. This operation
|
---|
194 | // needs to arrive to two concensus at the same time:
|
---|
195 | // A - The order in which entries are listed in the array: no two threads must pick the
|
---|
196 | // same index for their entries
|
---|
197 | // B - When can the tail be update for the kernel. EVERY entries in the array between
|
---|
198 | // head and tail must be fully filled and shouldn't ever be touched again.
|
---|
199 | //
|
---|
200 | //=============================================================================================
|
---|
201 | // Allocation
|
---|
202 | // for user's convenience fill the sqes from the indexes
|
---|
203 | static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct $io_context * ctx) {
|
---|
204 | struct io_uring_sqe * sqes = ctx->sq.sqes;
|
---|
205 | for(i; want) {
|
---|
206 | __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
|
---|
207 | out_sqes[i] = &sqes[idxs[i]];
|
---|
208 | }
|
---|
209 | }
|
---|
210 |
|
---|
211 | // Try to directly allocate from the a given context
|
---|
212 | // Not thread-safe
|
---|
213 | static inline bool __alloc(struct $io_context * ctx, __u32 idxs[], __u32 want) {
|
---|
214 | __sub_ring_t & sq = ctx->sq;
|
---|
215 | const __u32 mask = *sq.mask;
|
---|
216 | __u32 fhead = sq.free_ring.head; // get the current head of the queue
|
---|
217 | __u32 ftail = sq.free_ring.tail; // get the current tail of the queue
|
---|
218 |
|
---|
219 | // If we don't have enough sqes, fail
|
---|
220 | if((ftail - fhead) < want) { return false; }
|
---|
221 |
|
---|
222 | // copy all the indexes we want from the available list
|
---|
223 | for(i; want) {
|
---|
224 | __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
|
---|
225 | idxs[i] = sq.free_ring.array[(fhead + i) & mask];
|
---|
226 | }
|
---|
227 |
|
---|
228 | // Advance the head to mark the indexes as consumed
|
---|
229 | __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
|
---|
230 |
|
---|
231 | // return success
|
---|
232 | return true;
|
---|
233 | }
|
---|
234 |
|
---|
235 | // Allocate an submit queue entry.
|
---|
236 | // The kernel cannot see these entries until they are submitted, but other threads must be
|
---|
237 | // able to see which entries can be used and which are already un used by an other thread
|
---|
238 | // for convenience, return both the index and the pointer to the sqe
|
---|
239 | // sqe == &sqes[idx]
|
---|
240 | struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
|
---|
241 | __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
|
---|
242 |
|
---|
243 | disable_interrupts();
|
---|
244 | processor * proc = __cfaabi_tls.this_processor;
|
---|
245 | $io_context * ctx = proc->io.ctx;
|
---|
246 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
247 | /* paranoid */ verify( ctx );
|
---|
248 |
|
---|
249 | __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
|
---|
250 |
|
---|
251 | // We can proceed to the fast path
|
---|
252 | if( __alloc(ctx, idxs, want) ) {
|
---|
253 | // Allocation was successful
|
---|
254 | __STATS__( true, io.alloc.fast += 1; )
|
---|
255 | enable_interrupts();
|
---|
256 |
|
---|
257 | __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
|
---|
258 |
|
---|
259 | __fill( sqes, want, idxs, ctx );
|
---|
260 | return ctx;
|
---|
261 | }
|
---|
262 | // The fast path failed, fallback
|
---|
263 | __STATS__( true, io.alloc.fail += 1; )
|
---|
264 |
|
---|
265 | // Fast path failed, fallback on arbitration
|
---|
266 | __STATS__( true, io.alloc.slow += 1; )
|
---|
267 | enable_interrupts();
|
---|
268 |
|
---|
269 | $io_arbiter * ioarb = proc->cltr->io.arbiter;
|
---|
270 | /* paranoid */ verify( ioarb );
|
---|
271 |
|
---|
272 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
|
---|
273 |
|
---|
274 | struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want);
|
---|
275 |
|
---|
276 | __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
|
---|
277 |
|
---|
278 | __fill( sqes, want, idxs,ret );
|
---|
279 | return ret;
|
---|
280 | }
|
---|
281 |
|
---|
282 | //=============================================================================================
|
---|
283 | // submission
|
---|
284 | static inline void __submit( struct $io_context * ctx, __u32 idxs[], __u32 have, bool lazy) {
|
---|
285 | // We can proceed to the fast path
|
---|
286 | // Get the right objects
|
---|
287 | __sub_ring_t & sq = ctx->sq;
|
---|
288 | const __u32 mask = *sq.mask;
|
---|
289 | __u32 tail = *sq.kring.tail;
|
---|
290 |
|
---|
291 | // Add the sqes to the array
|
---|
292 | for( i; have ) {
|
---|
293 | __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
|
---|
294 | sq.kring.array[ (tail + i) & mask ] = idxs[i];
|
---|
295 | }
|
---|
296 |
|
---|
297 | // Make the sqes visible to the submitter
|
---|
298 | __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
|
---|
299 | sq.to_submit += have;
|
---|
300 |
|
---|
301 | ctx->proc->io.pending = true;
|
---|
302 | ctx->proc->io.dirty = true;
|
---|
303 | if(sq.to_submit > 30 || !lazy) {
|
---|
304 | __cfa_io_flush( ctx->proc, false );
|
---|
305 | }
|
---|
306 | }
|
---|
307 |
|
---|
308 | void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
|
---|
309 | __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
|
---|
310 |
|
---|
311 | disable_interrupts();
|
---|
312 | processor * proc = __cfaabi_tls.this_processor;
|
---|
313 | $io_context * ctx = proc->io.ctx;
|
---|
314 | /* paranoid */ verify( __cfaabi_tls.this_processor );
|
---|
315 | /* paranoid */ verify( ctx );
|
---|
316 |
|
---|
317 | // Can we proceed to the fast path
|
---|
318 | if( ctx == inctx ) // We have the right instance?
|
---|
319 | {
|
---|
320 | __submit(ctx, idxs, have, lazy);
|
---|
321 |
|
---|
322 | // Mark the instance as no longer in-use, re-enable interrupts and return
|
---|
323 | __STATS__( true, io.submit.fast += 1; )
|
---|
324 | enable_interrupts();
|
---|
325 |
|
---|
326 | __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
|
---|
327 | return;
|
---|
328 | }
|
---|
329 |
|
---|
330 | // Fast path failed, fallback on arbitration
|
---|
331 | __STATS__( true, io.submit.slow += 1; )
|
---|
332 | enable_interrupts();
|
---|
333 |
|
---|
334 | __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
|
---|
335 |
|
---|
336 | __ioarbiter_submit(inctx, idxs, have, lazy);
|
---|
337 | }
|
---|
338 |
|
---|
339 | //=============================================================================================
|
---|
340 | // Flushing
|
---|
341 | // Go through the ring's submit queue and release everything that has already been consumed
|
---|
342 | // by io_uring
|
---|
343 | // This cannot be done by multiple threads
|
---|
344 | static __u32 __release_sqes( struct $io_context & ctx ) {
|
---|
345 | const __u32 mask = *ctx.sq.mask;
|
---|
346 |
|
---|
347 | __attribute__((unused))
|
---|
348 | __u32 ctail = *ctx.sq.kring.tail; // get the current tail of the queue
|
---|
349 | __u32 chead = *ctx.sq.kring.head; // get the current head of the queue
|
---|
350 | __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
|
---|
351 |
|
---|
352 | __u32 ftail = ctx.sq.free_ring.tail; // get the current tail of the queue
|
---|
353 |
|
---|
354 | // the 3 fields are organized like this diagram
|
---|
355 | // except it's are ring
|
---|
356 | // ---+--------+--------+----
|
---|
357 | // ---+--------+--------+----
|
---|
358 | // ^ ^ ^
|
---|
359 | // phead chead ctail
|
---|
360 |
|
---|
361 | // make sure ctail doesn't wrap around and reach phead
|
---|
362 | /* paranoid */ verify(
|
---|
363 | (ctail >= chead && chead >= phead)
|
---|
364 | || (chead >= phead && phead >= ctail)
|
---|
365 | || (phead >= ctail && ctail >= chead)
|
---|
366 | );
|
---|
367 |
|
---|
368 | // find the range we need to clear
|
---|
369 | __u32 count = chead - phead;
|
---|
370 |
|
---|
371 | if(count == 0) {
|
---|
372 | return 0;
|
---|
373 | }
|
---|
374 |
|
---|
375 | // We acquired an previous-head/current-head range
|
---|
376 | // go through the range and release the sqes
|
---|
377 | for( i; count ) {
|
---|
378 | __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
|
---|
379 | __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
|
---|
380 | ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
|
---|
381 | }
|
---|
382 |
|
---|
383 | ctx.sq.kring.released = chead; // note up to were we processed
|
---|
384 | __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
|
---|
385 |
|
---|
386 | __ioarbiter_notify(ctx);
|
---|
387 |
|
---|
388 | return count;
|
---|
389 | }
|
---|
390 |
|
---|
391 | //=============================================================================================
|
---|
392 | // I/O Arbiter
|
---|
393 | //=============================================================================================
|
---|
394 | static inline void block(__outstanding_io_queue & queue, __outstanding_io & item) {
|
---|
395 | // Lock the list, it's not thread safe
|
---|
396 | lock( queue.lock __cfaabi_dbg_ctx2 );
|
---|
397 | {
|
---|
398 | // Add our request to the list
|
---|
399 | add( queue.queue, item );
|
---|
400 |
|
---|
401 | // Mark as pending
|
---|
402 | __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
|
---|
403 | }
|
---|
404 | unlock( queue.lock );
|
---|
405 |
|
---|
406 | wait( item.sem );
|
---|
407 | }
|
---|
408 |
|
---|
409 | static inline bool empty(__outstanding_io_queue & queue ) {
|
---|
410 | return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
|
---|
411 | }
|
---|
412 |
|
---|
413 | static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) {
|
---|
414 | __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
|
---|
415 |
|
---|
416 | __STATS__( false, io.alloc.block += 1; )
|
---|
417 |
|
---|
418 | // No one has any resources left, wait for something to finish
|
---|
419 | // We need to add ourself to a list of pending allocs and wait for an answer
|
---|
420 | __pending_alloc pa;
|
---|
421 | pa.idxs = idxs;
|
---|
422 | pa.want = want;
|
---|
423 |
|
---|
424 | block(this.pending, (__outstanding_io&)pa);
|
---|
425 |
|
---|
426 | return pa.ctx;
|
---|
427 |
|
---|
428 | }
|
---|
429 |
|
---|
430 | static void __ioarbiter_notify( $io_arbiter & this, $io_context * ctx ) {
|
---|
431 | /* paranoid */ verify( !empty(this.pending.queue) );
|
---|
432 |
|
---|
433 | lock( this.pending.lock __cfaabi_dbg_ctx2 );
|
---|
434 | {
|
---|
435 | while( !empty(this.pending.queue) ) {
|
---|
436 | __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
|
---|
437 | __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
|
---|
438 | __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
|
---|
439 |
|
---|
440 | if( have > pa.want ) goto DONE;
|
---|
441 | drop( this.pending.queue );
|
---|
442 |
|
---|
443 | /* paranoid */__attribute__((unused)) bool ret =
|
---|
444 |
|
---|
445 | __alloc(ctx, pa.idxs, pa.want);
|
---|
446 |
|
---|
447 | /* paranoid */ verify( ret );
|
---|
448 |
|
---|
449 | pa.ctx = ctx;
|
---|
450 |
|
---|
451 | post( pa.sem );
|
---|
452 | }
|
---|
453 |
|
---|
454 | this.pending.empty = true;
|
---|
455 | DONE:;
|
---|
456 | }
|
---|
457 | unlock( this.pending.lock );
|
---|
458 | }
|
---|
459 |
|
---|
460 | static void __ioarbiter_notify( $io_context & ctx ) {
|
---|
461 | if(!empty( ctx.arbiter->pending )) {
|
---|
462 | __ioarbiter_notify( *ctx.arbiter, &ctx );
|
---|
463 | }
|
---|
464 | }
|
---|
465 |
|
---|
466 | // Simply append to the pending
|
---|
467 | static void __ioarbiter_submit( $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) {
|
---|
468 | __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
|
---|
469 |
|
---|
470 | __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
|
---|
471 |
|
---|
472 | __external_io ei;
|
---|
473 | ei.idxs = idxs;
|
---|
474 | ei.have = have;
|
---|
475 | ei.lazy = lazy;
|
---|
476 |
|
---|
477 | block(ctx->ext_sq, (__outstanding_io&)ei);
|
---|
478 |
|
---|
479 | __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
|
---|
480 | }
|
---|
481 |
|
---|
482 | static void __ioarbiter_flush( $io_context & ctx ) {
|
---|
483 | if(!empty( ctx.ext_sq )) {
|
---|
484 | __STATS__( false, io.flush.external += 1; )
|
---|
485 |
|
---|
486 | __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
|
---|
487 |
|
---|
488 | lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
|
---|
489 | {
|
---|
490 | while( !empty(ctx.ext_sq.queue) ) {
|
---|
491 | __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
|
---|
492 |
|
---|
493 | __submit(&ctx, ei.idxs, ei.have, ei.lazy);
|
---|
494 |
|
---|
495 | post( ei.sem );
|
---|
496 | }
|
---|
497 |
|
---|
498 | ctx.ext_sq.empty = true;
|
---|
499 | }
|
---|
500 | unlock(ctx.ext_sq.lock );
|
---|
501 | }
|
---|
502 | }
|
---|
503 |
|
---|
504 | bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) {
|
---|
505 | $io_context * ctx = proc->io.ctx;
|
---|
506 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
507 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
508 | /* paranoid */ verify( ctx );
|
---|
509 |
|
---|
510 | __u32 idx;
|
---|
511 | struct io_uring_sqe * sqe;
|
---|
512 |
|
---|
513 | // We can proceed to the fast path
|
---|
514 | if( !__alloc(ctx, &idx, 1) ) return false;
|
---|
515 |
|
---|
516 | // Allocation was successful
|
---|
517 | __fill( &sqe, 1, &idx, ctx );
|
---|
518 |
|
---|
519 | sqe->opcode = IORING_OP_READ;
|
---|
520 | sqe->user_data = (uintptr_t)&future;
|
---|
521 | sqe->flags = 0;
|
---|
522 | sqe->ioprio = 0;
|
---|
523 | sqe->fd = 0;
|
---|
524 | sqe->off = 0;
|
---|
525 | sqe->fsync_flags = 0;
|
---|
526 | sqe->__pad2[0] = 0;
|
---|
527 | sqe->__pad2[1] = 0;
|
---|
528 | sqe->__pad2[2] = 0;
|
---|
529 | sqe->addr = (uintptr_t)buf;
|
---|
530 | sqe->len = sizeof(uint64_t);
|
---|
531 |
|
---|
532 | asm volatile("": : :"memory");
|
---|
533 |
|
---|
534 | /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
|
---|
535 | __submit( ctx, &idx, 1, true );
|
---|
536 |
|
---|
537 | /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
|
---|
538 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
539 | }
|
---|
540 | #endif
|
---|