source: libcfa/src/concurrency/io.cfa @ 466787a

ADTast-experimental
Last change on this file since 466787a was 26544f9, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

added helping and lock to allow remote processors to flush unresponsive procs

  • Property mode set to 100644
File size: 25.9 KB
RevLine 
[ecf6b46]1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Thu Apr 23 17:31:00 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
[3e2b9c9]16#define __cforall_thread__
[43784ac]17#define _GNU_SOURCE
[3e2b9c9]18
[20ab637]19#if defined(__CFA_DEBUG__)
[d60d30e]20        // #define __CFA_DEBUG_PRINT_IO__
21        // #define __CFA_DEBUG_PRINT_IO_CORE__
[20ab637]22#endif
[4069faad]23
[f6660520]24
[3e2b9c9]25#if defined(CFA_HAVE_LINUX_IO_URING_H)
[31bb2e1]26        #include <errno.h>
[3e2b9c9]27        #include <signal.h>
[31bb2e1]28        #include <stdint.h>
29        #include <string.h>
30        #include <unistd.h>
31
[92976d9]32        extern "C" {
33                #include <sys/syscall.h>
[dddb3dd0]34                #include <sys/eventfd.h>
[d3605f8]35                #include <sys/uio.h>
[92976d9]36
37                #include <linux/io_uring.h>
38        }
39
[3e2b9c9]40        #include "stats.hfa"
41        #include "kernel.hfa"
42        #include "kernel/fwd.hfa"
[708ae38]43        #include "kernel/private.hfa"
[78a580d]44        #include "kernel/cluster.hfa"
[3e2b9c9]45        #include "io/types.hfa"
[185efe6]46
[2fab24e3]47        __attribute__((unused)) static const char * opcodes[] = {
[426f60c]48                "OP_NOP",
49                "OP_READV",
50                "OP_WRITEV",
51                "OP_FSYNC",
52                "OP_READ_FIXED",
53                "OP_WRITE_FIXED",
54                "OP_POLL_ADD",
55                "OP_POLL_REMOVE",
56                "OP_SYNC_FILE_RANGE",
57                "OP_SENDMSG",
58                "OP_RECVMSG",
59                "OP_TIMEOUT",
60                "OP_TIMEOUT_REMOVE",
61                "OP_ACCEPT",
62                "OP_ASYNC_CANCEL",
63                "OP_LINK_TIMEOUT",
64                "OP_CONNECT",
65                "OP_FALLOCATE",
66                "OP_OPENAT",
67                "OP_CLOSE",
68                "OP_FILES_UPDATE",
69                "OP_STATX",
70                "OP_READ",
71                "OP_WRITE",
72                "OP_FADVISE",
73                "OP_MADVISE",
74                "OP_SEND",
75                "OP_RECV",
76                "OP_OPENAT2",
77                "OP_EPOLL_CTL",
78                "OP_SPLICE",
79                "OP_PROVIDE_BUFFERS",
80                "OP_REMOVE_BUFFERS",
81                "OP_TEE",
82                "INVALID_OP"
83        };
84
[8bee858]85        static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want );
86        static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy );
[26544f9]87        static void __ioarbiter_flush ( io_context$ &, bool kernel );
[8bee858]88        static inline void __ioarbiter_notify( io_context$ & ctx );
[92976d9]89//=============================================================================================
90// I/O Polling
91//=============================================================================================
[8bee858]92        static inline unsigned __flush( struct io_context$ & );
93        static inline __u32 __release_sqes( struct io_context$ & );
[24e321c]94        extern void __kernel_unpark( thread$ * thrd, unpark_hint );
[1d5e4711]95
[26544f9]96        static inline void __post(oneshot & this, bool kernel, unpark_hint hint) {
97                thread$ * t = post( this, false );
98                if(kernel) __kernel_unpark( t, hint );
99                else unpark( t, hint );
100        }
101
102        // actual system call of io uring
103        // wrap so everything that needs to happen around it is always done
104        //   i.e., stats, book keeping, sqe reclamation, etc.
[8bee858]105        static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) {
[18f7858]106                __STATS__( true, io.calls.flush++; )
[bdfd0bd]107                int ret;
108                for() {
[26544f9]109                        // do the system call in a loop, repeat on interrupts
[bdfd0bd]110                        ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
111                        if( ret < 0 ) {
112                                switch((int)errno) {
113                                case EINTR:
114                                        continue;
115                                case EAGAIN:
116                                case EBUSY:
117                                        // Update statistics
118                                        __STATS__( false, io.calls.errors.busy ++; )
119                                        return false;
120                                default:
121                                        abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
122                                }
[18f7858]123                        }
[bdfd0bd]124                        break;
[18f7858]125                }
126
127                __cfadbg_print_safe(io, "Kernel I/O : %u submitted to io_uring %d\n", ret, ctx.fd);
128                __STATS__( true, io.calls.submitted += ret; )
129                /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
130                /* paranoid */ verify( ctx.sq.to_submit >= ret );
131
[26544f9]132                // keep track of how many still need submitting
133                __atomic_fetch_sub(&ctx.sq.to_submit, ret, __ATOMIC_SEQ_CST);
[18f7858]134
135                /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num );
136
137                // Release the consumed SQEs
138                __release_sqes( ctx );
139
[dddb3dd0]140                /* paranoid */ verify( ! __preemption_enabled() );
[6f121b8]141
[26544f9]142                // mark that there is no pending io left
[18f7858]143                __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED);
144        }
145
[26544f9]146        // try to acquire an io context for draining, helping means we never *need* to drain, we can always do it later
[8bee858]147        static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) {
[18f7858]148                /* paranoid */ verify( ! __preemption_enabled() );
149                /* paranoid */ verify( ready_schedule_islocked() );
[92976d9]150
[d60d30e]151
[3caf5e3]152                {
[26544f9]153                        // if there is nothing to drain there is no point in acquiring anything
[3caf5e3]154                        const __u32 head = *ctx->cq.head;
155                        const __u32 tail = *ctx->cq.tail;
156
157                        if(head == tail) return false;
158                }
[c1c95b1]159
[26544f9]160                // try a simple spinlock acquire, it's likely there are completions to drain
161                if(!__atomic_try_acquire(&ctx->cq.try_lock)) {
162                        // some other processor already has it
[54c1196]163                        __STATS__( false, io.calls.locked++; )
[4ecc35a]164                        return false;
165                }
166
[26544f9]167                // acquired!!
[18f7858]168                return true;
169        }
170
[26544f9]171        // actually drain the completion
[8bee858]172        static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) {
[18f7858]173                /* paranoid */ verify( ! __preemption_enabled() );
174                /* paranoid */ verify( ready_schedule_islocked() );
[26544f9]175                /* paranoid */ verify( ctx->cq.try_lock == true );
[18f7858]176
[26544f9]177                // get all the invariants and initial state
[18f7858]178                const __u32 mask = *ctx->cq.mask;
[7affcda]179                const __u32 num  = *ctx->cq.num;
[78a580d]180                unsigned long long ts_prev = ctx->cq.ts;
[7affcda]181                unsigned long long ts_next;
[78a580d]182
[7affcda]183                // We might need to do this multiple times if more events completed than can fit in the queue.
184                for() {
185                        // re-read the head and tail in case it already changed.
[26544f9]186                        // count the difference between the two
[7affcda]187                        const __u32 head = *ctx->cq.head;
188                        const __u32 tail = *ctx->cq.tail;
189                        const __u32 count = tail - head;
190                        __STATS__( false, io.calls.drain++; io.calls.completed += count; )
[3caf5e3]191
[26544f9]192                        // for everything between head and tail, drain it
[7affcda]193                        for(i; count) {
194                                unsigned idx = (head + i) & mask;
195                                volatile struct io_uring_cqe & cqe = ctx->cq.cqes[idx];
[92976d9]196
[7affcda]197                                /* paranoid */ verify(&cqe);
[92976d9]198
[26544f9]199                                // find the future in the completion
[7affcda]200                                struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
201                                // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
[78da4ab]202
[26544f9]203                                // don't directly fulfill the future, preemption is disabled so we need to use kernel_unpark
[7affcda]204                                __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL );
205                        }
206
[26544f9]207                        // update the timestamps accordingly
208                        // keep a local copy so we can update the relaxed copy
[7affcda]209                        ts_next = ctx->cq.ts = rdtscl();
[78da4ab]210
[7affcda]211                        // Mark to the kernel that the cqe has been seen
212                        // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
213                        __atomic_store_n( ctx->cq.head, head + count, __ATOMIC_SEQ_CST );
214                        ctx->proc->idle_wctx.drain_time = ts_next;
[2d8f7b0]215
[26544f9]216                        // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel.
[7affcda]217                        if(likely(count < num)) break;
218
[26544f9]219                        // the ring buffer was full, there could be more stuff in the kernel.
[7affcda]220                        ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS);
221                }
[92976d9]222
[1e6ffb44]223                __cfadbg_print_safe(io, "Kernel I/O : %u completed age %llu\n", count, ts_next);
[e9c0b4c]224                /* paranoid */ verify( ready_schedule_islocked() );
[dddb3dd0]225                /* paranoid */ verify( ! __preemption_enabled() );
226
[26544f9]227                // everything is drained, we can release the lock
228                __atomic_unlock(&ctx->cq.try_lock);
[4ecc35a]229
[26544f9]230                // update the relaxed timestamp
[5f9c42b]231                touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next, false );
[78a580d]232
[c1c95b1]233                return true;
[92976d9]234        }
235
[26544f9]236        // call from a processor to flush
237        // contains all the bookkeeping a proc must do, not just the barebones flushing logic
238        void __cfa_do_flush( io_context$ & ctx, bool kernel ) {
239                /* paranoid */ verify( ! __preemption_enabled() );
240
241                // flush any external requests
242                ctx.sq.last_external = false; // clear the external bit, the arbiter will reset it if needed
243                __ioarbiter_flush( ctx, kernel );
244
245                // if submitting must be submitted, do the system call
246                if(ctx.sq.to_submit != 0) {
247                        ioring_syscsll(ctx, 0, 0);
248                }
249        }
250
251        // call from a processor to drain
252        // contains all the bookkeeping a proc must do, not just the barebones draining logic
[1756e08]253        bool __cfa_io_drain( struct processor * proc ) {
[4479890]254                bool local = false;
255                bool remote = false;
256
[26544f9]257                // make sure no ones creates/destroys io contexts
[18f7858]258                ready_schedule_lock();
259
[4479890]260                cluster * const cltr = proc->cltr;
[8bee858]261                io_context$ * const ctx = proc->io.ctx;
[4479890]262                /* paranoid */ verify( cltr );
263                /* paranoid */ verify( ctx );
264
[26544f9]265                // Help if needed
[4479890]266                with(cltr->sched) {
267                        const size_t ctxs_count = io.count;
268
269                        /* paranoid */ verify( ready_schedule_islocked() );
270                        /* paranoid */ verify( ! __preemption_enabled() );
271                        /* paranoid */ verify( active_processor() == proc );
272                        /* paranoid */ verify( __shard_factor.io > 0 );
273                        /* paranoid */ verify( ctxs_count > 0 );
274                        /* paranoid */ verify( ctx->cq.id < ctxs_count );
275
276                        const unsigned this_cache = cache_id(cltr, ctx->cq.id / __shard_factor.io);
277                        const unsigned long long ctsc = rdtscl();
278
[26544f9]279                        // only help once every other time
280                        // pick a target when not helping
[b035046]281                        if(proc->io.target == UINT_MAX) {
[4479890]282                                uint64_t chaos = __tls_rand();
[26544f9]283                                // choose who to help and whether to accept helping far processors
[4479890]284                                unsigned ext = chaos & 0xff;
285                                unsigned other  = (chaos >> 8) % (ctxs_count);
286
[26544f9]287                                // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it
[4479890]288                                if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
289                                        proc->io.target = other;
290                                }
291                        }
292                        else {
[26544f9]293                                // a target was picked last time, help it
[4479890]294                                const unsigned target = proc->io.target;
[2af1943]295                                /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX );
[26544f9]296                                // make sure the target hasn't stopped existing since last time
[18f7858]297                                HELP: if(target < ctxs_count) {
[26544f9]298                                        // calculate it's age and how young it could be before we give ip on helping
[31c967b]299                                        const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false);
300                                        const __readyQ_avg_t age = moving_average(ctsc, io.tscs[target].t.tv, io.tscs[target].t.ma, false);
[edf247b]301                                        __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
[26544f9]302                                        // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger
[18f7858]303                                        if(age <= cutoff) break HELP;
304
[26544f9]305                                        // attempt to help the submission side
306                                        __cfa_do_flush( *io.data[target], true );
307
308                                        // attempt to help the completion side
309                                        if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed
[18f7858]310
[26544f9]311                                        // actually help
[18f7858]312                                        if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
313
[26544f9]314                                        // track we did help someone
[18f7858]315                                        remote = true;
[8a5e357]316                                        __STATS__( true, io.calls.helped++; )
[4479890]317                                }
[26544f9]318
319                                // reset the target
[b035046]320                                proc->io.target = UINT_MAX;
[4479890]321                        }
322                }
323
324                // Drain the local queue
[18f7858]325                if(try_acquire( proc->io.ctx )) {
326                        local = __cfa_do_drain( proc->io.ctx, cltr );
327                }
[4479890]328
329                /* paranoid */ verify( ready_schedule_islocked() );
330                /* paranoid */ verify( ! __preemption_enabled() );
331                /* paranoid */ verify( active_processor() == proc );
[18f7858]332
333                ready_schedule_unlock();
[26544f9]334
335                // return true if some completion entry, local or remote, was drained
[4479890]336                return local || remote;
337        }
338
[26544f9]339
340
341        // call from a processor to flush
342        // contains all the bookkeeping a proc must do, not just the barebones flushing logic
[1756e08]343        bool __cfa_io_flush( struct processor * proc ) {
[dddb3dd0]344                /* paranoid */ verify( ! __preemption_enabled() );
345                /* paranoid */ verify( proc );
346                /* paranoid */ verify( proc->io.ctx );
[1539bbd]347
[26544f9]348                __cfa_do_flush( *proc->io.ctx, false );
[61dd73d]349
[26544f9]350                // also drain since some stuff will immediately complete
[18f7858]351                return __cfa_io_drain( proc );
[61dd73d]352        }
[f6660520]353
[92976d9]354//=============================================================================================
355// I/O Submissions
356//=============================================================================================
357
[2d8f7b0]358// Submition steps :
[e46c753]359// 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
[2d8f7b0]360//     listed in sq.array are visible by the kernel. For those not listed, the kernel does not
361//     offer any assurance that an entry is not being filled by multiple flags. Therefore, we
362//     need to write an allocator that allows allocating concurrently.
363//
[e46c753]364// 2 - Actually fill the submit entry, this is the only simple and straightforward step.
[2d8f7b0]365//
[e46c753]366// 3 - Append the entry index to the array and adjust the tail accordingly. This operation
[2d8f7b0]367//     needs to arrive to two concensus at the same time:
368//     A - The order in which entries are listed in the array: no two threads must pick the
369//         same index for their entries
370//     B - When can the tail be update for the kernel. EVERY entries in the array between
371//         head and tail must be fully filled and shouldn't ever be touched again.
372//
[78da4ab]373        //=============================================================================================
374        // Allocation
375        // for user's convenience fill the sqes from the indexes
[8bee858]376        static inline void __fill(struct io_uring_sqe * out_sqes[], __u32 want, __u32 idxs[], struct io_context$ * ctx)  {
[78da4ab]377                struct io_uring_sqe * sqes = ctx->sq.sqes;
378                for(i; want) {
[1e6ffb44]379                        // __cfadbg_print_safe(io, "Kernel I/O : filling loop\n");
[78da4ab]380                        out_sqes[i] = &sqes[idxs[i]];
381                }
382        }
[2489d31]383
[78da4ab]384        // Try to directly allocate from the a given context
385        // Not thread-safe
[8bee858]386        static inline bool __alloc(struct io_context$ * ctx, __u32 idxs[], __u32 want) {
[78da4ab]387                __sub_ring_t & sq = ctx->sq;
388                const __u32 mask  = *sq.mask;
389                __u32 fhead = sq.free_ring.head;    // get the current head of the queue
390                __u32 ftail = sq.free_ring.tail;    // get the current tail of the queue
[2489d31]391
[78da4ab]392                // If we don't have enough sqes, fail
393                if((ftail - fhead) < want) { return false; }
[426f60c]394
[78da4ab]395                // copy all the indexes we want from the available list
396                for(i; want) {
[1e6ffb44]397                        // __cfadbg_print_safe(io, "Kernel I/O : allocating loop\n");
[78da4ab]398                        idxs[i] = sq.free_ring.array[(fhead + i) & mask];
[6f121b8]399                }
[2489d31]400
[78da4ab]401                // Advance the head to mark the indexes as consumed
402                __atomic_store_n(&sq.free_ring.head, fhead + want, __ATOMIC_RELEASE);
[df40a56]403
[78da4ab]404                // return success
405                return true;
406        }
[df40a56]407
[78da4ab]408        // Allocate an submit queue entry.
409        // The kernel cannot see these entries until they are submitted, but other threads must be
410        // able to see which entries can be used and which are already un used by an other thread
411        // for convenience, return both the index and the pointer to the sqe
412        // sqe == &sqes[idx]
[8bee858]413        struct io_context$ * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
[1e6ffb44]414                // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
[df40a56]415
[78da4ab]416                disable_interrupts();
[1756e08]417                struct processor * proc = __cfaabi_tls.this_processor;
[8bee858]418                io_context$ * ctx = proc->io.ctx;
[78da4ab]419                /* paranoid */ verify( __cfaabi_tls.this_processor );
[dddb3dd0]420                /* paranoid */ verify( ctx );
[78da4ab]421
[1e6ffb44]422                // __cfadbg_print_safe(io, "Kernel I/O : attempting to fast allocation\n");
[78da4ab]423
[dddb3dd0]424                // We can proceed to the fast path
425                if( __alloc(ctx, idxs, want) ) {
426                        // Allocation was successful
427                        __STATS__( true, io.alloc.fast += 1; )
[a3821fa]428                        enable_interrupts();
[df40a56]429
[1e6ffb44]430                        // __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
[2fafe7e]431
[dddb3dd0]432                        __fill( sqes, want, idxs, ctx );
433                        return ctx;
[df40a56]434                }
[dddb3dd0]435                // The fast path failed, fallback
436                __STATS__( true, io.alloc.fail += 1; )
[df40a56]437
[78da4ab]438                // Fast path failed, fallback on arbitration
[d60d30e]439                __STATS__( true, io.alloc.slow += 1; )
[a3821fa]440                enable_interrupts();
[78da4ab]441
[8bee858]442                io_arbiter$ * ioarb = proc->cltr->io.arbiter;
[dddb3dd0]443                /* paranoid */ verify( ioarb );
444
[1e6ffb44]445                // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n");
[78da4ab]446
[8bee858]447                struct io_context$ * ret = __ioarbiter_allocate(*ioarb, idxs, want);
[78da4ab]448
[1e6ffb44]449                // __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd);
[df40a56]450
[78da4ab]451                __fill( sqes, want, idxs,ret );
452                return ret;
[df40a56]453        }
454
[78da4ab]455        //=============================================================================================
456        // submission
[26544f9]457        // barebones logic to submit a group of sqes
458        static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) {
459                if(!lock)
460                        lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 );
[78da4ab]461                // We can proceed to the fast path
462                // Get the right objects
463                __sub_ring_t & sq = ctx->sq;
464                const __u32 mask  = *sq.mask;
[dddb3dd0]465                __u32 tail = *sq.kring.tail;
[78da4ab]466
467                // Add the sqes to the array
468                for( i; have ) {
[1e6ffb44]469                        // __cfadbg_print_safe(io, "Kernel I/O : __submit loop\n");
[78da4ab]470                        sq.kring.array[ (tail + i) & mask ] = idxs[i];
[426f60c]471                }
472
[78da4ab]473                // Make the sqes visible to the submitter
[dddb3dd0]474                __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE);
[26544f9]475                __atomic_fetch_add(&sq.to_submit, have, __ATOMIC_SEQ_CST);
[426f60c]476
[26544f9]477                // set the bit to mark things need to be flushed
[d529ad0]478                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED);
479                __atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
[26544f9]480
481                if(!lock)
482                        unlock( ctx->ext_sq.lock );
[2432e8e]483        }
484
[26544f9]485        // submission logic + maybe flushing
[8bee858]486        static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) {
[2432e8e]487                __sub_ring_t & sq = ctx->sq;
[26544f9]488                __submit_only(ctx, idxs, have, false);
[2432e8e]489
[70b4aeb9]490                if(sq.to_submit > 30) {
491                        __tls_stats()->io.flush.full++;
[18f7858]492                        __cfa_io_flush( ctx->proc );
[70b4aeb9]493                }
494                if(!lazy) {
495                        __tls_stats()->io.flush.eager++;
[18f7858]496                        __cfa_io_flush( ctx->proc );
[dddb3dd0]497                }
[78da4ab]498        }
[2489d31]499
[26544f9]500        // call from a processor to flush
501        // might require arbitration if the thread was migrated after the allocation
[8bee858]502        void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
[1e6ffb44]503                // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
[5dadc9b]504
[78da4ab]505                disable_interrupts();
[7ce8873]506                __STATS__( true, if(!lazy) io.submit.eagr += 1; )
[1756e08]507                struct processor * proc = __cfaabi_tls.this_processor;
[8bee858]508                io_context$ * ctx = proc->io.ctx;
[dddb3dd0]509                /* paranoid */ verify( __cfaabi_tls.this_processor );
510                /* paranoid */ verify( ctx );
[e46c753]511
[78da4ab]512                // Can we proceed to the fast path
[dddb3dd0]513                if( ctx == inctx )              // We have the right instance?
[78da4ab]514                {
[26544f9]515                        // yes! fast submit
[dddb3dd0]516                        __submit(ctx, idxs, have, lazy);
[e46c753]517
[78da4ab]518                        // Mark the instance as no longer in-use, re-enable interrupts and return
[d60d30e]519                        __STATS__( true, io.submit.fast += 1; )
[a3821fa]520                        enable_interrupts();
[ece0e80]521
[1e6ffb44]522                        // __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
[78da4ab]523                        return;
[e46c753]524                }
[d384787]525
[78da4ab]526                // Fast path failed, fallback on arbitration
[d60d30e]527                __STATS__( true, io.submit.slow += 1; )
[a3821fa]528                enable_interrupts();
[5dadc9b]529
[1e6ffb44]530                // __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
[426f60c]531
[11054eb]532                __ioarbiter_submit(inctx, idxs, have, lazy);
[78da4ab]533        }
[2fab24e3]534
[78da4ab]535        //=============================================================================================
536        // Flushing
[426f60c]537        // Go through the ring's submit queue and release everything that has already been consumed
538        // by io_uring
[78da4ab]539        // This cannot be done by multiple threads
[8bee858]540        static __u32 __release_sqes( struct io_context$ & ctx ) {
[78da4ab]541                const __u32 mask = *ctx.sq.mask;
[732b406]542
[426f60c]543                __attribute__((unused))
[78da4ab]544                __u32 ctail = *ctx.sq.kring.tail;    // get the current tail of the queue
545                __u32 chead = *ctx.sq.kring.head;        // get the current head of the queue
546                __u32 phead = ctx.sq.kring.released; // get the head the last time we were here
547
548                __u32 ftail = ctx.sq.free_ring.tail;  // get the current tail of the queue
[732b406]549
[426f60c]550                // the 3 fields are organized like this diagram
551                // except it's are ring
552                // ---+--------+--------+----
553                // ---+--------+--------+----
554                //    ^        ^        ^
555                // phead    chead    ctail
556
557                // make sure ctail doesn't wrap around and reach phead
558                /* paranoid */ verify(
559                           (ctail >= chead && chead >= phead)
560                        || (chead >= phead && phead >= ctail)
561                        || (phead >= ctail && ctail >= chead)
562                );
563
564                // find the range we need to clear
[4998155]565                __u32 count = chead - phead;
[426f60c]566
[78da4ab]567                if(count == 0) {
568                        return 0;
569                }
570
[426f60c]571                // We acquired an previous-head/current-head range
572                // go through the range and release the sqes
[34b61882]573                for( i; count ) {
[1e6ffb44]574                        // __cfadbg_print_safe(io, "Kernel I/O : release loop\n");
[78da4ab]575                        __u32 idx = ctx.sq.kring.array[ (phead + i) & mask ];
576                        ctx.sq.free_ring.array[ (ftail + i) & mask ] = idx;
[34b61882]577                }
[78da4ab]578
579                ctx.sq.kring.released = chead;          // note up to were we processed
580                __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST);
581
[26544f9]582                // notify the allocator that new allocations can be made
[78da4ab]583                __ioarbiter_notify(ctx);
584
[34b61882]585                return count;
586        }
[35285fd]587
[78da4ab]588//=============================================================================================
589// I/O Arbiter
590//=============================================================================================
[9f5a71eb]591        static inline bool enqueue(__outstanding_io_queue & queue, __outstanding_io & item) {
592                bool was_empty;
593
[11054eb]594                // Lock the list, it's not thread safe
595                lock( queue.lock __cfaabi_dbg_ctx2 );
596                {
[9f5a71eb]597                        was_empty = empty(queue.queue);
598
[11054eb]599                        // Add our request to the list
600                        add( queue.queue, item );
601
602                        // Mark as pending
603                        __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST );
604                }
605                unlock( queue.lock );
606
[9f5a71eb]607                return was_empty;
[11054eb]608        }
609
610        static inline bool empty(__outstanding_io_queue & queue ) {
611                return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST);
612        }
613
[8bee858]614        static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ) {
[1e6ffb44]615                // __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n");
[78da4ab]616
[d60d30e]617                __STATS__( false, io.alloc.block += 1; )
618
[78da4ab]619                // No one has any resources left, wait for something to finish
[11054eb]620                // We need to add ourself to a list of pending allocs and wait for an answer
621                __pending_alloc pa;
622                pa.idxs = idxs;
623                pa.want = want;
[78da4ab]624
[9f5a71eb]625                enqueue(this.pending, (__outstanding_io&)pa);
626
[a55472cc]627                wait( pa.waitctx );
[78da4ab]628
[11054eb]629                return pa.ctx;
[dddb3dd0]630
[78da4ab]631        }
632
[26544f9]633        // notify the arbiter that new allocations are available
[8bee858]634        static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) {
[11054eb]635                /* paranoid */ verify( !empty(this.pending.queue) );
[26544f9]636                /* paranoid */ verify( __preemption_enabled() );
[78da4ab]637
[26544f9]638                // mutual exclusion is needed
[11054eb]639                lock( this.pending.lock __cfaabi_dbg_ctx2 );
640                {
[26544f9]641                        __cfadbg_print_safe(io, "Kernel I/O : notifying\n");
642
643                        // as long as there are pending allocations try to satisfy them
644                        // for simplicity do it in FIFO order
[11054eb]645                        while( !empty(this.pending.queue) ) {
[26544f9]646                                // get first pending allocs
[11054eb]647                                __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
648                                __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue );
[78da4ab]649
[26544f9]650                                // check if we have enough to satisfy the request
[11054eb]651                                if( have > pa.want ) goto DONE;
[26544f9]652
653                                // if there are enough allocations it means we can drop the request
[11054eb]654                                drop( this.pending.queue );
[78da4ab]655
[11054eb]656                                /* paranoid */__attribute__((unused)) bool ret =
[78da4ab]657
[26544f9]658                                // actually do the alloc
[11054eb]659                                __alloc(ctx, pa.idxs, pa.want);
660
661                                /* paranoid */ verify( ret );
662
[26544f9]663                                // write out which context statisfied the request and post
664                                // this
[11054eb]665                                pa.ctx = ctx;
[a55472cc]666                                post( pa.waitctx );
[11054eb]667                        }
668
669                        this.pending.empty = true;
670                        DONE:;
671                }
672                unlock( this.pending.lock );
[26544f9]673
674                /* paranoid */ verify( __preemption_enabled() );
[78da4ab]675        }
676
[26544f9]677        // short hand to avoid the mutual exclusion of the pending is empty regardless
[8bee858]678        static void __ioarbiter_notify( io_context$ & ctx ) {
[26544f9]679                if(empty( ctx.arbiter->pending )) return;
680                __ioarbiter_notify( *ctx.arbiter, &ctx );
[78da4ab]681        }
682
[26544f9]683        // Submit from outside the local processor: append to the outstanding list
[8bee858]684        static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) {
[78da4ab]685                __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd);
686
687                __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have);
688
[26544f9]689                // create the intrusive object to append
[11054eb]690                __external_io ei;
691                ei.idxs = idxs;
692                ei.have = have;
693                ei.lazy = lazy;
[78da4ab]694
[26544f9]695                // enqueue the io
[9f5a71eb]696                bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei);
697
[26544f9]698                // mark pending
[d529ad0]699                __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST);
[9f5a71eb]700
[26544f9]701                // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing
702                // if it's not the first enqueue, a signal is already in transit
[9f5a71eb]703                if( we ) {
704                        sigval_t value = { PREEMPT_IO };
[95dab9e]705                        __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
[26544f9]706                        __STATS__( false, io.flush.signal += 1; )
[9f5a71eb]707                }
[26544f9]708                __STATS__( false, io.submit.extr += 1; )
[9f5a71eb]709
[26544f9]710                // to avoid dynamic allocation/memory reclamation headaches, wait for it to have been submitted
[a55472cc]711                wait( ei.waitctx );
[78da4ab]712
713                __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have);
714        }
715
[26544f9]716        // flush the io arbiter: move all external io operations to the submission ring
717        static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) {
718                // if there are no external operations just return
719                if(empty( ctx.ext_sq )) return;
[d60d30e]720
[26544f9]721                // stats and logs
722                __STATS__( false, io.flush.external += 1; )
723                __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n");
[78da4ab]724
[26544f9]725                // this can happen from multiple processors, mutual exclusion is needed
726                lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 );
727                {
728                        // pop each operation one at a time.
729                        // There is no wait morphing because of the io sq ring
730                        while( !empty(ctx.ext_sq.queue) ) {
731                                // drop the element from the queue
732                                __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue );
733
734                                // submit it
735                                __submit_only(&ctx, ei.idxs, ei.have, true);
736
737                                // wake the thread that was waiting on it
738                                // since this can both be called from kernel and user, check the flag before posting
739                                __post( ei.waitctx, kernel, UNPARK_LOCAL );
740                        }
[78da4ab]741
[26544f9]742                        // mark the queue as empty
743                        ctx.ext_sq.empty = true;
744                        ctx.sq.last_external = true;
745                }
746                unlock(ctx.ext_sq.lock );
747        }
[11054eb]748
[26544f9]749        extern "C" {
750                // debug functions used for gdb
751                // io_uring doesn't yet support gdb soe the kernel-shared data structures aren't viewable in gdb
752                // these functions read the data that gdb can't and should be removed once the support is added
753                static __u32 __cfagdb_cq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.head; }
754                static __u32 __cfagdb_cq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.tail; }
755                static __u32 __cfagdb_cq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.mask; }
756                static __u32 __cfagdb_sq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.head; }
757                static __u32 __cfagdb_sq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.tail; }
758                static __u32 __cfagdb_sq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.mask; }
759
760                // fancier version that reads an sqe and copies it out.
761                static struct io_uring_sqe __cfagdb_sq_at( io_context$ * ctx, __u32 at ) __attribute__((nonnull(1),used,noinline)) {
762                        __u32 ax = at & *ctx->sq.mask;
763                        __u32 ix = ctx->sq.kring.array[ax];
764                        return ctx->sq.sqes[ix];
[11054eb]765                }
[78da4ab]766        }
[47746a2]767#endif
Note: See TracBrowser for help on using the repository browser.