source: libcfa/src/concurrency/io/setup.cfa@ 683cc13

ADT ast-experimental pthread-emulation
Last change on this file since 683cc13 was 8bee858, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Changed io types to have trailing $ instead of leading

  • Property mode set to 100644
File size: 13.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io/setup.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Fri Jul 31 16:25:51 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19#if defined(__CFA_DEBUG__)
20 // #define __CFA_DEBUG_PRINT_IO__
21 // #define __CFA_DEBUG_PRINT_IO_CORE__
22#endif
23
24#include "io/types.hfa"
25#include "kernel.hfa"
26
27#if !defined(CFA_HAVE_LINUX_IO_URING_H)
28 void ?{}(io_context_params & this) libcfa_public {}
29
30 void ?{}(io_context$ & this, struct cluster & cl) {}
31 void ^?{}(io_context$ & this) {}
32
33 void __cfa_io_start( processor * proc ) {}
34 bool __cfa_io_flush( processor * proc ) { return false; }
35 bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
36 void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
37 void __cfa_io_stop ( processor * proc ) {}
38
39 io_arbiter$ * create(void) { return 0p; }
40 void destroy(io_arbiter$ *) {}
41
42#else
43#pragma GCC diagnostic push
44#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
45 #include <errno.h>
46 #include <stdint.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <unistd.h>
50
51 extern "C" {
52 #include <pthread.h>
53 #include <sys/epoll.h>
54 #include <sys/eventfd.h>
55 #include <sys/mman.h>
56 #include <sys/syscall.h>
57
58 #include <linux/io_uring.h>
59 }
60
61 #include "bitmanip.hfa"
62 #include "fstream.hfa"
63 #include "kernel/private.hfa"
64 #include "limits.hfa"
65 #include "thread.hfa"
66#pragma GCC diagnostic pop
67
68 void ?{}(io_context_params & this) libcfa_public {
69 this.num_entries = 256;
70 }
71
72 static void * __io_poller_slow( void * arg );
73
74 // Weirdly, some systems that do support io_uring don't actually define these
75 #ifdef __alpha__
76 /*
77 * alpha is the only exception, all other architectures
78 * have common numbers for new system calls.
79 */
80 #ifndef __NR_io_uring_setup
81 #define __NR_io_uring_setup 535
82 #endif
83 #ifndef __NR_io_uring_enter
84 #define __NR_io_uring_enter 536
85 #endif
86 #ifndef __NR_io_uring_register
87 #define __NR_io_uring_register 537
88 #endif
89 #else /* !__alpha__ */
90 #ifndef __NR_io_uring_setup
91 #define __NR_io_uring_setup 425
92 #endif
93 #ifndef __NR_io_uring_enter
94 #define __NR_io_uring_enter 426
95 #endif
96 #ifndef __NR_io_uring_register
97 #define __NR_io_uring_register 427
98 #endif
99 #endif
100
101//=============================================================================================
102// I/O Context Constrution/Destruction
103//=============================================================================================
104
105
106
107 static void __io_uring_setup ( io_context$ & this, const io_context_params & params_in, int procfd );
108 static void __io_uring_teardown( io_context$ & this );
109 static void __epoll_register(io_context$ & ctx);
110 static void __epoll_unregister(io_context$ & ctx);
111 void __ioarbiter_register( io_arbiter$ & mutex, io_context$ & ctx );
112 void __ioarbiter_unregister( io_arbiter$ & mutex, io_context$ & ctx );
113
114 void ?{}(io_context$ & this, processor * proc, struct cluster & cl) {
115 /* paranoid */ verify( cl.io.arbiter );
116 this.proc = proc;
117 this.arbiter = cl.io.arbiter;
118 this.ext_sq.empty = true;
119 (this.ext_sq.queue){};
120 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd );
121 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this);
122 }
123
124 void ^?{}(io_context$ & this) {
125 __cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %u\n", this.fd);
126
127 __io_uring_teardown( this );
128 __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd);
129 }
130
131 static void __io_uring_setup( io_context$ & this, const io_context_params & params_in, int procfd ) {
132 // Step 1 : call to setup
133 struct io_uring_params params;
134 memset(&params, 0, sizeof(params));
135 // if( params_in.poll_submit ) params.flags |= IORING_SETUP_SQPOLL;
136 // if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL;
137
138 __u32 nentries = params_in.num_entries != 0 ? params_in.num_entries : 256;
139 if( !is_pow2(nentries) ) {
140 abort("ERROR: I/O setup 'num_entries' must be a power of 2, was %u\n", nentries);
141 }
142
143 int fd = syscall(__NR_io_uring_setup, nentries, &params );
144 if(fd < 0) {
145 abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
146 }
147
148 // Step 2 : mmap result
149 struct __sub_ring_t & sq = this.sq;
150 struct __cmp_ring_t & cq = this.cq;
151
152 // calculate the right ring size
153 sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned) );
154 cq.ring_sz = params.cq_off.cqes + (params.cq_entries * sizeof(struct io_uring_cqe));
155
156 // Requires features
157 #if defined(IORING_FEAT_SINGLE_MMAP)
158 // adjust the size according to the parameters
159 if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
160 cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz);
161 }
162 #endif
163
164 // mmap the Submit Queue into existence
165 sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
166 if (sq.ring_ptr == (void*)MAP_FAILED) {
167 abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
168 }
169
170 // Requires features
171 #if defined(IORING_FEAT_SINGLE_MMAP)
172 // mmap the Completion Queue into existence (may or may not be needed)
173 if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
174 cq.ring_ptr = sq.ring_ptr;
175 }
176 else
177 #endif
178 {
179 // We need multiple call to MMAP
180 cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
181 if (cq.ring_ptr == (void*)MAP_FAILED) {
182 munmap(sq.ring_ptr, sq.ring_sz);
183 abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
184 }
185 }
186
187 // mmap the submit queue entries
188 size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
189 sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
190 if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
191 munmap(sq.ring_ptr, sq.ring_sz);
192 if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
193 abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
194 }
195
196 // Step 3 : Initialize the data structure
197 // Get the pointers from the kernel to fill the structure
198 // submit queue
199 sq.kring.head = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
200 sq.kring.tail = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
201 sq.kring.array = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
202 sq.mask = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
203 sq.num = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
204 sq.flags = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
205 sq.dropped = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
206
207 sq.kring.released = 0;
208
209 sq.free_ring.head = 0;
210 sq.free_ring.tail = *sq.num;
211 sq.free_ring.array = alloc( *sq.num, 128`align );
212 for(i; (__u32)*sq.num) {
213 sq.free_ring.array[i] = i;
214 }
215
216 sq.to_submit = 0;
217
218 // completion queue
219 cq.lock = false;
220 cq.id = MAX;
221 cq.ts = rdtscl();
222 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
223 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
224 cq.mask = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
225 cq.num = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
226 cq.overflow = ( __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
227 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
228
229 #if !defined(CFA_WITH_IO_URING_IDLE)
230 // Step 4 : eventfd
231 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
232
233 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
234 if (ret < 0) {
235 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
236 }
237
238 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
239 #endif
240
241 // #if defined(CFA_HAVE_IORING_REGISTER_IOWQ_MAX_WORKERS)
242 // // Step 5 : max worker count
243 // __cfadbg_print_safe(io_core, "Kernel I/O : lmiting max workers for ring %d\n", fd);
244
245 // unsigned int maxes[2];
246 // maxes[0] = 64; // max number of bounded workers (Regular files / block)
247 // maxes[1] = 64; // max number of unbounded workers (IOSQE_ASYNC)
248 // int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_IOWQ_MAX_WORKERS, maxes, 2);
249 // if (ret < 0) {
250 // abort("KERNEL ERROR: IO_URING MAX WORKER REGISTER - %s\n", strerror(errno));
251 // }
252
253 // __cfadbg_print_safe(io_core, "Kernel I/O : lmited max workers for ring %d\n", fd);
254 // #endif
255
256 // some paranoid checks
257 /* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask );
258 /* paranoid */ verifyf( (*cq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num );
259 /* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head );
260 /* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail );
261
262 /* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask );
263 /* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num );
264 /* paranoid */ verifyf( (*sq.kring.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.kring.head );
265 /* paranoid */ verifyf( (*sq.kring.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.kring.tail );
266
267 // Update the global ring info
268 this.ring_flags = 0;
269 this.fd = fd;
270 }
271
272 static void __io_uring_teardown( io_context$ & this ) {
273 // Shutdown the io rings
274 struct __sub_ring_t & sq = this.sq;
275 struct __cmp_ring_t & cq = this.cq;
276 {
277 __u32 fhead = sq.free_ring.head;
278 __u32 ftail = sq.free_ring.tail;
279
280 __u32 total = *sq.num;
281 __u32 avail = ftail - fhead;
282
283 if(avail != total) abort | "Processor (" | (void*)this.proc | ") tearing down ring with" | (total - avail) | "entries allocated but not submitted, out of" | total;
284 }
285
286 // unmap the submit queue entries
287 munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe));
288
289 // unmap the Submit Queue ring
290 munmap(sq.ring_ptr, sq.ring_sz);
291
292 // unmap the Completion Queue ring, if it is different
293 if (cq.ring_ptr != sq.ring_ptr) {
294 munmap(cq.ring_ptr, cq.ring_sz);
295 }
296
297 // close the file descriptor
298 close(this.fd);
299
300 free( this.sq.free_ring.array ); // Maybe null, doesn't matter
301 }
302
303 void __cfa_io_start( processor * proc ) {
304 proc->io.ctx = alloc();
305 (*proc->io.ctx){proc, *proc->cltr};
306 }
307 void __cfa_io_stop ( processor * proc ) {
308 ^(*proc->io.ctx){};
309 free(proc->io.ctx);
310 }
311
312//=============================================================================================
313// I/O Context Sleep
314//=============================================================================================
315 // static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) {
316 // struct epoll_event ev;
317 // ev.events = EPOLLIN | EPOLLONESHOT;
318 // ev.data.u64 = (__u64)&ctx;
319 // int ret = epoll_ctl(iopoll.epollfd, op, ctx.efd, &ev);
320 // if (ret < 0) {
321 // abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) );
322 // }
323 // }
324
325 // static void __epoll_register(io_context$ & ctx) {
326 // __epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD");
327 // }
328
329 // static void __epoll_unregister(io_context$ & ctx) {
330 // // Read the current epoch so we know when to stop
331 // size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST);
332
333 // // Remove the fd from the iopoller
334 // __epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE");
335
336 // // Notify the io poller thread of the shutdown
337 // iopoll.run = false;
338 // sigval val = { 1 };
339 // pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
340
341 // // Make sure all this is done
342 // __atomic_thread_fence(__ATOMIC_SEQ_CST);
343
344 // // Wait for the next epoch
345 // while(curr == iopoll.epoch && !iopoll.stopped) Pause();
346 // }
347
348 // void __ioctx_prepare_block(io_context$ & ctx) {
349 // __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx);
350 // __epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM");
351 // }
352
353
354//=============================================================================================
355// I/O Context Misc Setup
356//=============================================================================================
357 void ?{}( io_arbiter$ & this ) {
358 this.pending.empty = true;
359 }
360
361 void ^?{}( io_arbiter$ & mutex this ) {}
362
363 io_arbiter$ * create(void) {
364 return new();
365 }
366 void destroy(io_arbiter$ * arbiter) {
367 delete(arbiter);
368 }
369
370//=============================================================================================
371// I/O Context Misc Setup
372//=============================================================================================
373
374#endif
Note: See TracBrowser for help on using the repository browser.