[3e2b9c9] | 1 | // |
---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo |
---|
| 3 | // |
---|
| 4 | // The contents of this file are covered under the licence agreement in the |
---|
| 5 | // file "LICENCE" distributed with Cforall. |
---|
| 6 | // |
---|
[454f478] | 7 | // io/types.hfa -- PRIVATE |
---|
| 8 | // Types used by the I/O subsystem |
---|
[3e2b9c9] | 9 | // |
---|
| 10 | // Author : Thierry Delisle |
---|
| 11 | // Created On : Fri Jul 31 16:22:47 2020 |
---|
| 12 | // Last Modified By : |
---|
| 13 | // Last Modified On : |
---|
| 14 | // Update Count : |
---|
| 15 | // |
---|
| 16 | |
---|
| 17 | #pragma once |
---|
| 18 | |
---|
[930e57e] | 19 | extern "C" { |
---|
| 20 | #include <linux/types.h> |
---|
| 21 | } |
---|
[4998155] | 22 | |
---|
[9db2c92] | 23 | #include "bits/locks.hfa" |
---|
[11054eb] | 24 | #include "bits/queue.hfa" |
---|
[454f478] | 25 | #include "kernel/fwd.hfa" |
---|
[3e2b9c9] | 26 | |
---|
[930e57e] | 27 | #if defined(CFA_HAVE_LINUX_IO_URING_H) |
---|
[78da4ab] | 28 | #include "bits/sequence.hfa" |
---|
| 29 | #include "monitor.hfa" |
---|
[2fafe7e] | 30 | |
---|
[78da4ab] | 31 | struct processor; |
---|
| 32 | monitor $io_arbiter; |
---|
[2fafe7e] | 33 | |
---|
[3e2b9c9] | 34 | //----------------------------------------------------------------------- |
---|
| 35 | // Ring Data structure |
---|
[78da4ab] | 36 | struct __sub_ring_t { |
---|
| 37 | struct { |
---|
| 38 | // Head and tail of the ring (associated with array) |
---|
| 39 | volatile __u32 * head; // one passed last index consumed by the kernel |
---|
| 40 | volatile __u32 * tail; // one passed last index visible to the kernel |
---|
| 41 | volatile __u32 released; // one passed last index released back to the free list |
---|
| 42 | |
---|
| 43 | // The actual kernel ring which uses head/tail |
---|
| 44 | // indexes into the sqes arrays |
---|
| 45 | __u32 * array; |
---|
| 46 | } kring; |
---|
| 47 | |
---|
| 48 | struct { |
---|
| 49 | volatile __u32 head; |
---|
| 50 | volatile __u32 tail; |
---|
| 51 | // The ring which contains free allocations |
---|
| 52 | // indexes into the sqes arrays |
---|
| 53 | __u32 * array; |
---|
| 54 | } free_ring; |
---|
| 55 | |
---|
| 56 | // number of sqes to submit on next system call. |
---|
| 57 | __u32 to_submit; |
---|
[3e2b9c9] | 58 | |
---|
| 59 | // number of entries and mask to go with it |
---|
[4998155] | 60 | const __u32 * num; |
---|
| 61 | const __u32 * mask; |
---|
[3e2b9c9] | 62 | |
---|
[78da4ab] | 63 | // Submission flags, currently only IORING_SETUP_SQPOLL |
---|
[4998155] | 64 | __u32 * flags; |
---|
[3e2b9c9] | 65 | |
---|
[78da4ab] | 66 | // number of sqes not submitted |
---|
| 67 | // From documentation : [dropped] is incremented for each invalid submission queue entry encountered in the ring buffer. |
---|
[4998155] | 68 | __u32 * dropped; |
---|
[3e2b9c9] | 69 | |
---|
| 70 | // A buffer of sqes (not the actual ring) |
---|
[78da4ab] | 71 | struct io_uring_sqe * sqes; |
---|
[3e2b9c9] | 72 | |
---|
| 73 | // The location and size of the mmaped area |
---|
| 74 | void * ring_ptr; |
---|
| 75 | size_t ring_sz; |
---|
| 76 | }; |
---|
| 77 | |
---|
[78da4ab] | 78 | struct __cmp_ring_t { |
---|
[3e2b9c9] | 79 | // Head and tail of the ring |
---|
[4998155] | 80 | volatile __u32 * head; |
---|
| 81 | volatile __u32 * tail; |
---|
[3e2b9c9] | 82 | |
---|
| 83 | // number of entries and mask to go with it |
---|
[4998155] | 84 | const __u32 * mask; |
---|
| 85 | const __u32 * num; |
---|
[3e2b9c9] | 86 | |
---|
[78da4ab] | 87 | // I don't know what this value is for |
---|
[4998155] | 88 | __u32 * overflow; |
---|
[3e2b9c9] | 89 | |
---|
| 90 | // the kernel ring |
---|
[426f60c] | 91 | volatile struct io_uring_cqe * cqes; |
---|
[3e2b9c9] | 92 | |
---|
| 93 | // The location and size of the mmaped area |
---|
| 94 | void * ring_ptr; |
---|
| 95 | size_t ring_sz; |
---|
| 96 | }; |
---|
| 97 | |
---|
[11054eb] | 98 | struct __outstanding_io { |
---|
| 99 | inline Colable; |
---|
| 100 | single_sem sem; |
---|
| 101 | }; |
---|
| 102 | static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } |
---|
| 103 | |
---|
| 104 | struct __outstanding_io_queue { |
---|
| 105 | __spinlock_t lock; |
---|
| 106 | Queue(__outstanding_io) queue; |
---|
| 107 | volatile bool empty; |
---|
| 108 | }; |
---|
| 109 | |
---|
| 110 | struct __external_io { |
---|
| 111 | inline __outstanding_io; |
---|
| 112 | __u32 * idxs; |
---|
| 113 | __u32 have; |
---|
| 114 | bool lazy; |
---|
| 115 | }; |
---|
| 116 | |
---|
| 117 | |
---|
[78da4ab] | 118 | struct __attribute__((aligned(128))) $io_context { |
---|
| 119 | $io_arbiter * arbiter; |
---|
[dddb3dd0] | 120 | processor * proc; |
---|
[78da4ab] | 121 | |
---|
[11054eb] | 122 | __outstanding_io_queue ext_sq; |
---|
[78da4ab] | 123 | |
---|
| 124 | struct __sub_ring_t sq; |
---|
| 125 | struct __cmp_ring_t cq; |
---|
[4998155] | 126 | __u32 ring_flags; |
---|
[3e2b9c9] | 127 | int fd; |
---|
[78da4ab] | 128 | }; |
---|
| 129 | |
---|
[11054eb] | 130 | struct __pending_alloc { |
---|
| 131 | inline __outstanding_io; |
---|
| 132 | __u32 * idxs; |
---|
| 133 | __u32 want; |
---|
| 134 | $io_context * ctx; |
---|
| 135 | }; |
---|
| 136 | |
---|
| 137 | struct __attribute__((aligned(128))) $io_arbiter { |
---|
| 138 | __outstanding_io_queue pending; |
---|
[3e2b9c9] | 139 | }; |
---|
| 140 | |
---|
| 141 | //----------------------------------------------------------------------- |
---|
| 142 | // Misc |
---|
| 143 | // Weirdly, some systems that do support io_uring don't actually define these |
---|
| 144 | #ifdef __alpha__ |
---|
| 145 | /* |
---|
| 146 | * alpha is the only exception, all other architectures |
---|
| 147 | * have common numbers for new system calls. |
---|
| 148 | */ |
---|
| 149 | #ifndef __NR_io_uring_setup |
---|
| 150 | #define __NR_io_uring_setup 535 |
---|
| 151 | #endif |
---|
| 152 | #ifndef __NR_io_uring_enter |
---|
| 153 | #define __NR_io_uring_enter 536 |
---|
| 154 | #endif |
---|
| 155 | #ifndef __NR_io_uring_register |
---|
| 156 | #define __NR_io_uring_register 537 |
---|
| 157 | #endif |
---|
| 158 | #else /* !__alpha__ */ |
---|
| 159 | #ifndef __NR_io_uring_setup |
---|
| 160 | #define __NR_io_uring_setup 425 |
---|
| 161 | #endif |
---|
| 162 | #ifndef __NR_io_uring_enter |
---|
| 163 | #define __NR_io_uring_enter 426 |
---|
| 164 | #endif |
---|
| 165 | #ifndef __NR_io_uring_register |
---|
| 166 | #define __NR_io_uring_register 427 |
---|
| 167 | #endif |
---|
| 168 | #endif |
---|
| 169 | |
---|
[dddb3dd0] | 170 | // void __ioctx_prepare_block($io_context & ctx); |
---|
[930e57e] | 171 | #endif |
---|
| 172 | |
---|
| 173 | //----------------------------------------------------------------------- |
---|
| 174 | // IO user data |
---|
| 175 | struct io_future_t { |
---|
| 176 | future_t self; |
---|
| 177 | __s32 result; |
---|
| 178 | }; |
---|
| 179 | |
---|
| 180 | static inline { |
---|
[e84ab3d] | 181 | thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) { |
---|
[930e57e] | 182 | this.result = result; |
---|
[a76efc8] | 183 | return fulfil(this.self, do_unpark); |
---|
[930e57e] | 184 | } |
---|
| 185 | |
---|
| 186 | // Wait for the future to be fulfilled |
---|
[7ef162b2] | 187 | bool wait ( io_future_t & this ) { return wait (this.self); } |
---|
| 188 | void reset ( io_future_t & this ) { return reset (this.self); } |
---|
| 189 | bool available( io_future_t & this ) { return available(this.self); } |
---|
[930e57e] | 190 | } |
---|