Changeset 26544f9 for libcfa/src/concurrency/io
- Timestamp:
- Dec 15, 2022, 12:08:44 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- e716aec
- Parents:
- 1ab773e0
- Location:
- libcfa/src/concurrency/io
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io/setup.cfa
r1ab773e0 r26544f9 216 216 217 217 // completion queue 218 cq. lock= false;218 cq.try_lock = false; 219 219 cq.id = MAX; 220 220 cq.ts = rdtscl(); -
libcfa/src/concurrency/io/types.hfa
r1ab773e0 r26544f9 37 37 //----------------------------------------------------------------------- 38 38 // Ring Data structure 39 struct __sub_ring_t { 39 // represent the io_uring submission ring which contains operations that will be sent to io_uring for processing 40 struct __sub_ring_t { 41 // lock needed because remote processors might need to flush the instance 42 __spinlock_t lock; 43 40 44 struct { 41 45 // Head and tail of the ring (associated with array) … … 58 62 59 63 // number of sqes to submit on next system call. 60 __u32 to_submit;64 volatile __u32 to_submit; 61 65 62 66 // number of entries and mask to go with it … … 77 81 void * ring_ptr; 78 82 size_t ring_sz; 79 }; 80 83 84 // for debug purposes, whether or not the last flush was due to a arbiter flush 85 bool last_external; 86 }; 87 88 // represent the io_uring completion ring which contains operations that have completed 81 89 struct __cmp_ring_t { 82 volatile bool lock; 83 90 // needed because remote processors can help drain the buffer 91 volatile bool try_lock; 92 93 // id of the ring, used for the helping/topology algorithms 84 94 unsigned id; 85 95 96 // timestamp from last time it was drained 86 97 unsigned long long ts; 87 98 … … 105 116 }; 106 117 118 // struct representing an io operation that still needs processing 119 // actual operations are expected to inherit from this 107 120 struct __outstanding_io { 121 // intrusive link fields 108 122 inline Colable; 123 124 // primitive on which to block until the io is processed 109 125 oneshot waitctx; 110 126 }; 111 127 static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } 112 128 129 // queue of operations that are outstanding 113 130 struct __outstanding_io_queue { 131 // spinlock for protection 132 // TODO: changing to a lock that blocks, I haven't examined whether it should be a kernel or user lock 114 133 __spinlock_t lock; 134 135 // the actual queue 115 136 Queue(__outstanding_io) queue; 137 138 // volatile used to avoid the need for taking the lock if it's empty 116 139 volatile bool empty; 117 140 }; 118 141 142 // struct representing an operation that was submitted 119 143 struct __external_io { 144 // inherits from outstanding io 120 145 inline __outstanding_io; 146 147 // pointer and count to an array of ids to be submitted 121 148 __u32 * idxs; 122 149 __u32 have; 150 151 // whether or not these can be accumulated before flushing the buffer 123 152 bool lazy; 124 153 }; 125 154 126 155 // complete io_context, contains all the data for io submission and completion 127 156 struct __attribute__((aligned(64))) io_context$ { 157 // arbiter, used in cases where threads for migrated at unfortunate moments 128 158 io_arbiter$ * arbiter; 159 160 // which prcessor the context is tied to 129 161 struct processor * proc; 130 162 163 // queue of io submissions that haven't beeen processed. 131 164 __outstanding_io_queue ext_sq; 132 165 166 // io_uring ring data structures 133 167 struct __sub_ring_t sq; 134 168 struct __cmp_ring_t cq; 169 170 // flag the io_uring rings where created with 135 171 __u32 ring_flags; 172 173 // file descriptor that identifies the io_uring instance 136 174 int fd; 137 175 }; 138 176 177 // short hand to check when the io_context was last processed (io drained) 139 178 static inline unsigned long long ts(io_context$ *& this) { 140 179 const __u32 head = *this->cq.head; 141 180 const __u32 tail = *this->cq.tail; 142 181 182 // if there is no pending completions, just pretend it's infinetely recent 143 183 if(head == tail) return ULLONG_MAX; 144 184 … … 146 186 } 147 187 188 // structure represeting allocations that couldn't succeed locally 148 189 struct __pending_alloc { 190 // inherit from outstanding io 149 191 inline __outstanding_io; 192 193 // array and size of the desired allocation 150 194 __u32 * idxs; 151 195 __u32 want; 196 197 // output param, the context the io was allocated from 152 198 io_context$ * ctx; 153 199 }; 154 200 201 // arbiter that handles cases where the context tied to the local processor is unable to satisfy the io 155 202 monitor __attribute__((aligned(64))) io_arbiter$ { 203 // contains a queue of io for pending allocations 156 204 __outstanding_io_queue pending; 157 205 };
Note:
See TracChangeset
for help on using the changeset viewer.