source: libcfa/src/concurrency/io.cfa@ 9082e0f1

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 9082e0f1 was fe9468e2, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Added function thread_rand as a tls-safe version of tls_rand()

  • Property mode set to 100644
File size: 16.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Thu Apr 23 17:31:00 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17
18#if defined(__CFA_DEBUG__)
19 // #define __CFA_DEBUG_PRINT_IO__
20 // #define __CFA_DEBUG_PRINT_IO_CORE__
21#endif
22
23
24#if defined(CFA_HAVE_LINUX_IO_URING_H)
25 #define _GNU_SOURCE /* See feature_test_macros(7) */
26 #include <errno.h>
27 #include <signal.h>
28 #include <stdint.h>
29 #include <string.h>
30 #include <unistd.h>
31
32 extern "C" {
33 #include <sys/epoll.h>
34 #include <sys/syscall.h>
35
36 #include <linux/io_uring.h>
37 }
38
39 #include "stats.hfa"
40 #include "kernel.hfa"
41 #include "kernel/fwd.hfa"
42 #include "io/types.hfa"
43
44 // returns true of acquired as leader or second leader
45 static inline bool try_lock( __leaderlock_t & this ) {
46 const uintptr_t thrd = 1z | (uintptr_t)active_thread();
47 bool block;
48 disable_interrupts();
49 for() {
50 struct $thread * expected = this.value;
51 if( 1p != expected && 0p != expected ) {
52 /* paranoid */ verify( thrd != (uintptr_t)expected ); // We better not already be the next leader
53 enable_interrupts( __cfaabi_dbg_ctx );
54 return false;
55 }
56 struct $thread * desired;
57 if( 0p == expected ) {
58 // If the lock isn't locked acquire it, no need to block
59 desired = 1p;
60 block = false;
61 }
62 else {
63 // If the lock is already locked try becomming the next leader
64 desired = (struct $thread *)thrd;
65 block = true;
66 }
67 if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
68 }
69 if( block ) {
70 enable_interrupts( __cfaabi_dbg_ctx );
71 park();
72 disable_interrupts();
73 }
74 return true;
75 }
76
77 static inline bool next( __leaderlock_t & this ) {
78 /* paranoid */ verify( ! __preemption_enabled() );
79 struct $thread * nextt;
80 for() {
81 struct $thread * expected = this.value;
82 /* paranoid */ verify( (1 & (uintptr_t)expected) == 1 ); // The lock better be locked
83
84 struct $thread * desired;
85 if( 1p == expected ) {
86 // No next leader, just unlock
87 desired = 0p;
88 nextt = 0p;
89 }
90 else {
91 // There is a next leader, remove but keep locked
92 desired = 1p;
93 nextt = (struct $thread *)(~1z & (uintptr_t)expected);
94 }
95 if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break;
96 }
97
98 if(nextt) {
99 unpark( nextt );
100 enable_interrupts( __cfaabi_dbg_ctx );
101 return true;
102 }
103 enable_interrupts( __cfaabi_dbg_ctx );
104 return false;
105 }
106
107//=============================================================================================
108// I/O Syscall
109//=============================================================================================
110 static int __io_uring_enter( struct __io_data & ring, unsigned to_submit, bool get ) {
111 bool need_sys_to_submit = false;
112 bool need_sys_to_complete = false;
113 unsigned flags = 0;
114
115 TO_SUBMIT:
116 if( to_submit > 0 ) {
117 if( !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
118 need_sys_to_submit = true;
119 break TO_SUBMIT;
120 }
121 if( (*ring.submit_q.flags) & IORING_SQ_NEED_WAKEUP ) {
122 need_sys_to_submit = true;
123 flags |= IORING_ENTER_SQ_WAKEUP;
124 }
125 }
126
127 if( get && !(ring.ring_flags & IORING_SETUP_SQPOLL) ) {
128 flags |= IORING_ENTER_GETEVENTS;
129 if( (ring.ring_flags & IORING_SETUP_IOPOLL) ) {
130 need_sys_to_complete = true;
131 }
132 }
133
134 int ret = 0;
135 if( need_sys_to_submit || need_sys_to_complete ) {
136 ret = syscall( __NR_io_uring_enter, ring.fd, to_submit, 0, flags, (sigset_t *)0p, _NSIG / 8);
137 if( ret < 0 ) {
138 switch((int)errno) {
139 case EAGAIN:
140 case EINTR:
141 ret = -1;
142 break;
143 default:
144 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) );
145 }
146 }
147 }
148
149 // Memory barrier
150 __atomic_thread_fence( __ATOMIC_SEQ_CST );
151 return ret;
152 }
153
154//=============================================================================================
155// I/O Polling
156//=============================================================================================
157 static unsigned __collect_submitions( struct __io_data & ring );
158 static __u32 __release_consumed_submission( struct __io_data & ring );
159
160 static inline void process(struct io_uring_cqe & cqe ) {
161 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data;
162 __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future );
163
164 fulfil( *future, cqe.res );
165 }
166
167 // Process a single completion message from the io_uring
168 // This is NOT thread-safe
169 static [int, bool] __drain_io( & struct __io_data ring ) {
170 /* paranoid */ verify( ! __preemption_enabled() );
171
172 unsigned to_submit = 0;
173 if( ring.poller_submits ) {
174 // If the poller thread also submits, then we need to aggregate the submissions which are ready
175 to_submit = __collect_submitions( ring );
176 }
177
178 int ret = __io_uring_enter(ring, to_submit, true);
179 if( ret < 0 ) {
180 return [0, true];
181 }
182
183 // update statistics
184 if (to_submit > 0) {
185 __STATS__( true,
186 if( to_submit > 0 ) {
187 io.submit_q.submit_avg.rdy += to_submit;
188 io.submit_q.submit_avg.csm += ret;
189 io.submit_q.submit_avg.cnt += 1;
190 }
191 )
192 }
193
194 // Release the consumed SQEs
195 __release_consumed_submission( ring );
196
197 // Drain the queue
198 unsigned head = *ring.completion_q.head;
199 unsigned tail = *ring.completion_q.tail;
200 const __u32 mask = *ring.completion_q.mask;
201
202 // Nothing was new return 0
203 if (head == tail) {
204 return [0, to_submit > 0];
205 }
206
207 __u32 count = tail - head;
208 /* paranoid */ verify( count != 0 );
209 for(i; count) {
210 unsigned idx = (head + i) & mask;
211 struct io_uring_cqe & cqe = ring.completion_q.cqes[idx];
212
213 /* paranoid */ verify(&cqe);
214
215 process( cqe );
216 }
217
218 // Mark to the kernel that the cqe has been seen
219 // Ensure that the kernel only sees the new value of the head index after the CQEs have been read.
220 __atomic_thread_fence( __ATOMIC_SEQ_CST );
221 __atomic_fetch_add( ring.completion_q.head, count, __ATOMIC_RELAXED );
222
223 return [count, count > 0 || to_submit > 0];
224 }
225
226 void main( $io_ctx_thread & this ) {
227 epoll_event ev;
228 __ioctx_register( this, ev );
229
230 __cfadbg_print_safe(io_core, "Kernel I/O : IO poller %p for ring %p ready\n", &this, &this.ring);
231
232 int reset = 0;
233 // Then loop until we need to start
234 while(!__atomic_load_n(&this.done, __ATOMIC_SEQ_CST)) {
235 // Drain the io
236 int count;
237 bool again;
238 disable_interrupts();
239 [count, again] = __drain_io( *this.ring );
240
241 if(!again) reset++;
242
243 // Update statistics
244 __STATS__( true,
245 io.complete_q.completed_avg.val += count;
246 io.complete_q.completed_avg.cnt += 1;
247 )
248 enable_interrupts( __cfaabi_dbg_ctx );
249
250 // If we got something, just yield and check again
251 if(reset < 5) {
252 yield();
253 }
254 // We didn't get anything baton pass to the slow poller
255 else {
256 __STATS__( false,
257 io.complete_q.blocks += 1;
258 )
259 __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %p\n", &this.self);
260 reset = 0;
261
262 // block this thread
263 __ioctx_prepare_block( this, ev );
264 wait( this.sem );
265 }
266 }
267
268 __cfadbg_print_safe(io_core, "Kernel I/O : Fast poller for ring %p stopping\n", &this.ring);
269 }
270
271//=============================================================================================
272// I/O Submissions
273//=============================================================================================
274
275// Submition steps :
276// 1 - Allocate a queue entry. The ring already has memory for all entries but only the ones
277// listed in sq.array are visible by the kernel. For those not listed, the kernel does not
278// offer any assurance that an entry is not being filled by multiple flags. Therefore, we
279// need to write an allocator that allows allocating concurrently.
280//
281// 2 - Actually fill the submit entry, this is the only simple and straightforward step.
282//
283// 3 - Append the entry index to the array and adjust the tail accordingly. This operation
284// needs to arrive to two concensus at the same time:
285// A - The order in which entries are listed in the array: no two threads must pick the
286// same index for their entries
287// B - When can the tail be update for the kernel. EVERY entries in the array between
288// head and tail must be fully filled and shouldn't ever be touched again.
289//
290
291 [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ) {
292 /* paranoid */ verify( data != 0 );
293
294 // Prepare the data we need
295 __attribute((unused)) int len = 0;
296 __attribute((unused)) int block = 0;
297 __u32 cnt = *ring.submit_q.num;
298 __u32 mask = *ring.submit_q.mask;
299
300 __u32 off = thread_rand();
301
302 // Loop around looking for an available spot
303 for() {
304 // Look through the list starting at some offset
305 for(i; cnt) {
306 __u64 expected = 0;
307 __u32 idx = (i + off) & mask;
308 struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx];
309 volatile __u64 * udata = &sqe->user_data;
310
311 if( *udata == expected &&
312 __atomic_compare_exchange_n( udata, &expected, data, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) )
313 {
314 // update statistics
315 __STATS__( false,
316 io.submit_q.alloc_avg.val += len;
317 io.submit_q.alloc_avg.block += block;
318 io.submit_q.alloc_avg.cnt += 1;
319 )
320
321
322 // Success return the data
323 return [sqe, idx];
324 }
325 verify(expected != data);
326
327 len ++;
328 }
329
330 block++;
331 yield();
332 }
333 }
334
335 static inline __u32 __submit_to_ready_array( struct __io_data & ring, __u32 idx, const __u32 mask ) {
336 /* paranoid */ verify( idx <= mask );
337 /* paranoid */ verify( idx != -1ul32 );
338
339 // We need to find a spot in the ready array
340 __attribute((unused)) int len = 0;
341 __attribute((unused)) int block = 0;
342 __u32 ready_mask = ring.submit_q.ready_cnt - 1;
343
344 __u32 off = thread_rand();
345
346 __u32 picked;
347 LOOKING: for() {
348 for(i; ring.submit_q.ready_cnt) {
349 picked = (i + off) & ready_mask;
350 __u32 expected = -1ul32;
351 if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) {
352 break LOOKING;
353 }
354 verify(expected != idx);
355
356 len ++;
357 }
358
359 block++;
360
361 __u32 released = __release_consumed_submission( ring );
362 if( released == 0 ) {
363 yield();
364 }
365 }
366
367 // update statistics
368 __STATS__( false,
369 io.submit_q.look_avg.val += len;
370 io.submit_q.look_avg.block += block;
371 io.submit_q.look_avg.cnt += 1;
372 )
373
374 return picked;
375 }
376
377 void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))) {
378 __io_data & ring = *ctx->thrd.ring;
379 // Get now the data we definetely need
380 volatile __u32 * const tail = ring.submit_q.tail;
381 const __u32 mask = *ring.submit_q.mask;
382
383 // There are 2 submission schemes, check which one we are using
384 if( ring.poller_submits ) {
385 // If the poller thread submits, then we just need to add this to the ready array
386 __submit_to_ready_array( ring, idx, mask );
387
388 post( ctx->thrd.sem );
389
390 __cfadbg_print_safe( io, "Kernel I/O : Added %u to ready for %p\n", idx, active_thread() );
391 }
392 else if( ring.eager_submits ) {
393 __u32 picked = __submit_to_ready_array( ring, idx, mask );
394
395 #if defined(LEADER_LOCK)
396 if( !try_lock(ring.submit_q.submit_lock) ) {
397 __STATS__( false,
398 io.submit_q.helped += 1;
399 )
400 return;
401 }
402 /* paranoid */ verify( ! __preemption_enabled() );
403 __STATS__( true,
404 io.submit_q.leader += 1;
405 )
406 #else
407 for() {
408 yield();
409
410 if( try_lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2) ) {
411 __STATS__( false,
412 io.submit_q.leader += 1;
413 )
414 break;
415 }
416
417 // If some one else collected our index, we are done
418 #warning ABA problem
419 if( ring.submit_q.ready[picked] != idx ) {
420 __STATS__( false,
421 io.submit_q.helped += 1;
422 )
423 return;
424 }
425
426 __STATS__( false,
427 io.submit_q.busy += 1;
428 )
429 }
430 #endif
431
432 // We got the lock
433 // Collect the submissions
434 unsigned to_submit = __collect_submitions( ring );
435
436 // Actually submit
437 int ret = __io_uring_enter( ring, to_submit, false );
438
439 #if defined(LEADER_LOCK)
440 /* paranoid */ verify( ! __preemption_enabled() );
441 next(ring.submit_q.submit_lock);
442 #else
443 unlock(ring.submit_q.submit_lock);
444 #endif
445 if( ret < 0 ) return;
446
447 // Release the consumed SQEs
448 __release_consumed_submission( ring );
449
450 // update statistics
451 __STATS__( false,
452 io.submit_q.submit_avg.rdy += to_submit;
453 io.submit_q.submit_avg.csm += ret;
454 io.submit_q.submit_avg.cnt += 1;
455 )
456 }
457 else {
458 // get mutual exclusion
459 #if defined(LEADER_LOCK)
460 while(!try_lock(ring.submit_q.submit_lock));
461 #else
462 lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2);
463 #endif
464
465 /* paranoid */ verifyf( ring.submit_q.sqes[ idx ].user_data != 0,
466 /* paranoid */ "index %u already reclaimed\n"
467 /* paranoid */ "head %u, prev %u, tail %u\n"
468 /* paranoid */ "[-0: %u,-1: %u,-2: %u,-3: %u]\n",
469 /* paranoid */ idx,
470 /* paranoid */ *ring.submit_q.head, ring.submit_q.prev_head, *tail
471 /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 0) & (*ring.submit_q.mask) ]
472 /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 1) & (*ring.submit_q.mask) ]
473 /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 2) & (*ring.submit_q.mask) ]
474 /* paranoid */ ,ring.submit_q.array[ ((*ring.submit_q.head) - 3) & (*ring.submit_q.mask) ]
475 /* paranoid */ );
476
477 // Append to the list of ready entries
478
479 /* paranoid */ verify( idx <= mask );
480 ring.submit_q.array[ (*tail) & mask ] = idx;
481 __atomic_fetch_add(tail, 1ul32, __ATOMIC_SEQ_CST);
482
483 // Submit however, many entries need to be submitted
484 int ret = __io_uring_enter( ring, 1, false );
485 if( ret < 0 ) {
486 switch((int)errno) {
487 default:
488 abort( "KERNEL ERROR: IO_URING SUBMIT - %s\n", strerror(errno) );
489 }
490 }
491
492 // update statistics
493 __STATS__( false,
494 io.submit_q.submit_avg.csm += 1;
495 io.submit_q.submit_avg.cnt += 1;
496 )
497
498 // Release the consumed SQEs
499 __release_consumed_submission( ring );
500
501 #if defined(LEADER_LOCK)
502 next(ring.submit_q.submit_lock);
503 #else
504 unlock(ring.submit_q.submit_lock);
505 #endif
506
507 __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret );
508 }
509 }
510
511 // #define PARTIAL_SUBMIT 32
512 static unsigned __collect_submitions( struct __io_data & ring ) {
513 /* paranoid */ verify( ring.submit_q.ready != 0p );
514 /* paranoid */ verify( ring.submit_q.ready_cnt > 0 );
515
516 unsigned to_submit = 0;
517 __u32 tail = *ring.submit_q.tail;
518 const __u32 mask = *ring.submit_q.mask;
519 #if defined(PARTIAL_SUBMIT)
520 #if defined(LEADER_LOCK)
521 #error PARTIAL_SUBMIT and LEADER_LOCK cannot co-exist
522 #endif
523 const __u32 cnt = ring.submit_q.ready_cnt > PARTIAL_SUBMIT ? PARTIAL_SUBMIT : ring.submit_q.ready_cnt;
524 const __u32 offset = ring.submit_q.prev_ready;
525 ring.submit_q.prev_ready += cnt;
526 #else
527 const __u32 cnt = ring.submit_q.ready_cnt;
528 const __u32 offset = 0;
529 #endif
530
531 // Go through the list of ready submissions
532 for( c; cnt ) {
533 __u32 i = (offset + c) % ring.submit_q.ready_cnt;
534
535 // replace any submission with the sentinel, to consume it.
536 __u32 idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED);
537
538 // If it was already the sentinel, then we are done
539 if( idx == -1ul32 ) continue;
540
541 // If we got a real submission, append it to the list
542 ring.submit_q.array[ (tail + to_submit) & mask ] = idx & mask;
543 to_submit++;
544 }
545
546 // Increment the tail based on how many we are ready to submit
547 __atomic_fetch_add(ring.submit_q.tail, to_submit, __ATOMIC_SEQ_CST);
548
549 return to_submit;
550 }
551
552 static __u32 __release_consumed_submission( struct __io_data & ring ) {
553 const __u32 smask = *ring.submit_q.mask;
554
555 if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0;
556 __u32 chead = *ring.submit_q.head;
557 __u32 phead = ring.submit_q.prev_head;
558 ring.submit_q.prev_head = chead;
559 unlock(ring.submit_q.release_lock);
560
561 __u32 count = chead - phead;
562 for( i; count ) {
563 __u32 idx = ring.submit_q.array[ (phead + i) & smask ];
564 ring.submit_q.sqes[ idx ].user_data = 0;
565 }
566 return count;
567 }
568#endif
Note: See TracBrowser for help on using the repository browser.