source: libcfa/src/concurrency/io/setup.cfa@ fa2e183

ADT ast-experimental
Last change on this file since fa2e183 was 95dab9e, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Changed real_pthread symbols (now cfaabi_pthread) to be protected in libcfathread

  • Property mode set to 100644
File size: 13.3 KB
RevLine 
[3e2b9c9]1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// io/setup.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Fri Jul 31 16:25:51 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
[43784ac]17#define _GNU_SOURCE
[3e2b9c9]18
[80444bb]19#if defined(__CFA_DEBUG__)
20 // #define __CFA_DEBUG_PRINT_IO__
21 // #define __CFA_DEBUG_PRINT_IO_CORE__
22#endif
23
[3e2b9c9]24#include "io/types.hfa"
[c44d652]25#include "kernel.hfa"
[3e2b9c9]26
27#if !defined(CFA_HAVE_LINUX_IO_URING_H)
[108345a]28 void ?{}(io_context_params & this) libcfa_public {}
[f277633e]29
[8bee858]30 void ?{}(io_context$ & this, struct cluster & cl) {}
31 void ^?{}(io_context$ & this) {}
[3e2b9c9]32
[f815c46]33 void __cfa_io_start( processor * proc ) {}
[18f7858]34 bool __cfa_io_flush( processor * proc ) { return false; }
[7425720]35 bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
36 void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
[f815c46]37 void __cfa_io_stop ( processor * proc ) {}
38
[8bee858]39 io_arbiter$ * create(void) { return 0p; }
40 void destroy(io_arbiter$ *) {}
[b7664a03]41
[3e2b9c9]42#else
[bfb9bf5]43#pragma GCC diagnostic push
44#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
[3e2b9c9]45 #include <errno.h>
46 #include <stdint.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <unistd.h>
50
51 extern "C" {
52 #include <pthread.h>
53 #include <sys/epoll.h>
[426f60c]54 #include <sys/eventfd.h>
[3e2b9c9]55 #include <sys/mman.h>
56 #include <sys/syscall.h>
57
58 #include <linux/io_uring.h>
59 }
60
61 #include "bitmanip.hfa"
[b0d0285]62 #include "fstream.hfa"
[708ae38]63 #include "kernel/private.hfa"
[78a580d]64 #include "limits.hfa"
[3e2b9c9]65 #include "thread.hfa"
[bfb9bf5]66#pragma GCC diagnostic pop
[3e2b9c9]67
[108345a]68 void ?{}(io_context_params & this) libcfa_public {
[3e2b9c9]69 this.num_entries = 256;
70 }
71
72 static void * __io_poller_slow( void * arg );
73
74 // Weirdly, some systems that do support io_uring don't actually define these
75 #ifdef __alpha__
76 /*
77 * alpha is the only exception, all other architectures
78 * have common numbers for new system calls.
79 */
80 #ifndef __NR_io_uring_setup
81 #define __NR_io_uring_setup 535
82 #endif
83 #ifndef __NR_io_uring_enter
84 #define __NR_io_uring_enter 536
85 #endif
86 #ifndef __NR_io_uring_register
87 #define __NR_io_uring_register 537
88 #endif
89 #else /* !__alpha__ */
90 #ifndef __NR_io_uring_setup
91 #define __NR_io_uring_setup 425
92 #endif
93 #ifndef __NR_io_uring_enter
94 #define __NR_io_uring_enter 426
95 #endif
96 #ifndef __NR_io_uring_register
97 #define __NR_io_uring_register 427
98 #endif
99 #endif
100
101//=============================================================================================
[dddb3dd0]102// I/O Context Constrution/Destruction
[3e2b9c9]103//=============================================================================================
104
105
106
[8bee858]107 static void __io_uring_setup ( io_context$ & this, const io_context_params & params_in, int procfd );
108 static void __io_uring_teardown( io_context$ & this );
109 static void __epoll_register(io_context$ & ctx);
110 static void __epoll_unregister(io_context$ & ctx);
111 void __ioarbiter_register( io_arbiter$ & mutex, io_context$ & ctx );
112 void __ioarbiter_unregister( io_arbiter$ & mutex, io_context$ & ctx );
[3e2b9c9]113
[8bee858]114 void ?{}(io_context$ & this, processor * proc, struct cluster & cl) {
[dddb3dd0]115 /* paranoid */ verify( cl.io.arbiter );
116 this.proc = proc;
117 this.arbiter = cl.io.arbiter;
[78da4ab]118 this.ext_sq.empty = true;
[11054eb]119 (this.ext_sq.queue){};
[22226e4]120 __io_uring_setup( this, cl.io.params, proc->idle_wctx.evfd );
[78da4ab]121 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this);
[3e2b9c9]122 }
123
[8bee858]124 void ^?{}(io_context$ & this) {
[78da4ab]125 __cfadbg_print_safe(io_core, "Kernel I/O : tearing down io_context %u\n", this.fd);
[3e2b9c9]126
[78da4ab]127 __io_uring_teardown( this );
128 __cfadbg_print_safe(io_core, "Kernel I/O : Destroyed ring for io_context %u\n", this.fd);
129 }
[3e2b9c9]130
[8bee858]131 static void __io_uring_setup( io_context$ & this, const io_context_params & params_in, int procfd ) {
[3e2b9c9]132 // Step 1 : call to setup
133 struct io_uring_params params;
134 memset(&params, 0, sizeof(params));
[78da4ab]135 // if( params_in.poll_submit ) params.flags |= IORING_SETUP_SQPOLL;
136 // if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL;
[3e2b9c9]137
[4998155]138 __u32 nentries = params_in.num_entries != 0 ? params_in.num_entries : 256;
[63fe427c]139 if( !is_pow2(nentries) ) {
[bf0263c]140 abort("ERROR: I/O setup 'num_entries' must be a power of 2, was %u\n", nentries);
[63fe427c]141 }
[3e2b9c9]142
143 int fd = syscall(__NR_io_uring_setup, nentries, &params );
144 if(fd < 0) {
145 abort("KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
146 }
147
148 // Step 2 : mmap result
[78da4ab]149 struct __sub_ring_t & sq = this.sq;
150 struct __cmp_ring_t & cq = this.cq;
[3e2b9c9]151
152 // calculate the right ring size
153 sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned) );
154 cq.ring_sz = params.cq_off.cqes + (params.cq_entries * sizeof(struct io_uring_cqe));
155
156 // Requires features
157 #if defined(IORING_FEAT_SINGLE_MMAP)
158 // adjust the size according to the parameters
159 if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
160 cq.ring_sz = sq.ring_sz = max(cq.ring_sz, sq.ring_sz);
161 }
162 #endif
163
164 // mmap the Submit Queue into existence
165 sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
166 if (sq.ring_ptr == (void*)MAP_FAILED) {
167 abort("KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
168 }
169
170 // Requires features
171 #if defined(IORING_FEAT_SINGLE_MMAP)
172 // mmap the Completion Queue into existence (may or may not be needed)
173 if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
174 cq.ring_ptr = sq.ring_ptr;
175 }
176 else
177 #endif
178 {
179 // We need multiple call to MMAP
180 cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
181 if (cq.ring_ptr == (void*)MAP_FAILED) {
182 munmap(sq.ring_ptr, sq.ring_sz);
183 abort("KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
184 }
185 }
186
187 // mmap the submit queue entries
188 size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
189 sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
190 if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
191 munmap(sq.ring_ptr, sq.ring_sz);
192 if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
193 abort("KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
194 }
195
[426f60c]196 // Step 3 : Initialize the data structure
[3e2b9c9]197 // Get the pointers from the kernel to fill the structure
198 // submit queue
[78da4ab]199 sq.kring.head = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
200 sq.kring.tail = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
201 sq.kring.array = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
202 sq.mask = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
203 sq.num = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
204 sq.flags = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
205 sq.dropped = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
206
207 sq.kring.released = 0;
208
209 sq.free_ring.head = 0;
210 sq.free_ring.tail = *sq.num;
211 sq.free_ring.array = alloc( *sq.num, 128`align );
212 for(i; (__u32)*sq.num) {
213 sq.free_ring.array[i] = i;
[3e2b9c9]214 }
215
[78da4ab]216 sq.to_submit = 0;
[3e2b9c9]217
218 // completion queue
[18f7858]219 cq.lock = false;
[78a580d]220 cq.id = MAX;
221 cq.ts = rdtscl();
[4998155]222 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
223 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
224 cq.mask = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
225 cq.num = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
226 cq.overflow = ( __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
227 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
[3e2b9c9]228
[d3605f8]229 #if !defined(CFA_WITH_IO_URING_IDLE)
[19cb0cb]230 {
[7ef162b2]231 // Step 4 : eventfd
232 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd);
[bb58825]233
[7ef162b2]234 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1);
235 if (ret < 0) {
236 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno));
237 }
[426f60c]238
[7ef162b2]239 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd);
[19cb0cb]240 }
[7ef162b2]241 #endif
[dddb3dd0]242
[19cb0cb]243 // TODO: implement a proper version of this.
244 // I have not found a better maximum that works in general but users should be able to configure it
245 // the same way they configure other I/O options
[7ce8873]246 // #if defined(CFA_HAVE_IORING_REGISTER_IOWQ_MAX_WORKERS)
[19cb0cb]247 // {
[7ce8873]248 // // Step 5 : max worker count
249 // __cfadbg_print_safe(io_core, "Kernel I/O : lmiting max workers for ring %d\n", fd);
250
251 // unsigned int maxes[2];
252 // maxes[0] = 64; // max number of bounded workers (Regular files / block)
253 // maxes[1] = 64; // max number of unbounded workers (IOSQE_ASYNC)
254 // int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_IOWQ_MAX_WORKERS, maxes, 2);
255 // if (ret < 0) {
256 // abort("KERNEL ERROR: IO_URING MAX WORKER REGISTER - %s\n", strerror(errno));
257 // }
258
259 // __cfadbg_print_safe(io_core, "Kernel I/O : lmited max workers for ring %d\n", fd);
[19cb0cb]260 // }
[7ce8873]261 // #endif
262
[3e2b9c9]263 // some paranoid checks
264 /* paranoid */ verifyf( (*cq.mask) == ((*cq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*cq.num) - 1ul32, *cq.num, *cq.mask );
265 /* paranoid */ verifyf( (*cq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *cq.num );
266 /* paranoid */ verifyf( (*cq.head) == 0, "IO_URING Expected head to be 0, got %u", *cq.head );
267 /* paranoid */ verifyf( (*cq.tail) == 0, "IO_URING Expected tail to be 0, got %u", *cq.tail );
268
269 /* paranoid */ verifyf( (*sq.mask) == ((*sq.num) - 1ul32), "IO_URING Expected mask to be %u (%u entries), was %u", (*sq.num) - 1ul32, *sq.num, *sq.mask );
270 /* paranoid */ verifyf( (*sq.num) >= nentries, "IO_URING Expected %u entries, got %u", nentries, *sq.num );
[78da4ab]271 /* paranoid */ verifyf( (*sq.kring.head) == 0, "IO_URING Expected head to be 0, got %u", *sq.kring.head );
272 /* paranoid */ verifyf( (*sq.kring.tail) == 0, "IO_URING Expected tail to be 0, got %u", *sq.kring.tail );
[3e2b9c9]273
274 // Update the global ring info
[78da4ab]275 this.ring_flags = 0;
[3e2b9c9]276 this.fd = fd;
277 }
278
[8bee858]279 static void __io_uring_teardown( io_context$ & this ) {
[3e2b9c9]280 // Shutdown the io rings
[78da4ab]281 struct __sub_ring_t & sq = this.sq;
282 struct __cmp_ring_t & cq = this.cq;
[b0d0285]283 {
284 __u32 fhead = sq.free_ring.head;
285 __u32 ftail = sq.free_ring.tail;
286
287 __u32 total = *sq.num;
288 __u32 avail = ftail - fhead;
289
290 if(avail != total) abort | "Processor (" | (void*)this.proc | ") tearing down ring with" | (total - avail) | "entries allocated but not submitted, out of" | total;
291 }
[3e2b9c9]292
293 // unmap the submit queue entries
294 munmap(sq.sqes, (*sq.num) * sizeof(struct io_uring_sqe));
295
296 // unmap the Submit Queue ring
297 munmap(sq.ring_ptr, sq.ring_sz);
298
299 // unmap the Completion Queue ring, if it is different
300 if (cq.ring_ptr != sq.ring_ptr) {
301 munmap(cq.ring_ptr, cq.ring_sz);
302 }
303
304 // close the file descriptor
305 close(this.fd);
306
[78da4ab]307 free( this.sq.free_ring.array ); // Maybe null, doesn't matter
[3e2b9c9]308 }
309
[dddb3dd0]310 void __cfa_io_start( processor * proc ) {
311 proc->io.ctx = alloc();
312 (*proc->io.ctx){proc, *proc->cltr};
313 }
314 void __cfa_io_stop ( processor * proc ) {
315 ^(*proc->io.ctx){};
316 free(proc->io.ctx);
317 }
318
[3e2b9c9]319//=============================================================================================
320// I/O Context Sleep
321//=============================================================================================
[8bee858]322 // static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) {
[dddb3dd0]323 // struct epoll_event ev;
324 // ev.events = EPOLLIN | EPOLLONESHOT;
325 // ev.data.u64 = (__u64)&ctx;
326 // int ret = epoll_ctl(iopoll.epollfd, op, ctx.efd, &ev);
327 // if (ret < 0) {
328 // abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) );
329 // }
330 // }
[3e2b9c9]331
[8bee858]332 // static void __epoll_register(io_context$ & ctx) {
[dddb3dd0]333 // __epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD");
334 // }
[3e2b9c9]335
[8bee858]336 // static void __epoll_unregister(io_context$ & ctx) {
[dddb3dd0]337 // // Read the current epoch so we know when to stop
338 // size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST);
[d611995]339
[dddb3dd0]340 // // Remove the fd from the iopoller
341 // __epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE");
[d611995]342
[dddb3dd0]343 // // Notify the io poller thread of the shutdown
344 // iopoll.run = false;
345 // sigval val = { 1 };
[95dab9e]346 // __cfaabi_pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
[d611995]347
[dddb3dd0]348 // // Make sure all this is done
349 // __atomic_thread_fence(__ATOMIC_SEQ_CST);
[d611995]350
[dddb3dd0]351 // // Wait for the next epoch
352 // while(curr == iopoll.epoch && !iopoll.stopped) Pause();
353 // }
[d611995]354
[8bee858]355 // void __ioctx_prepare_block(io_context$ & ctx) {
[dddb3dd0]356 // __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx);
357 // __epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM");
358 // }
[78da4ab]359
360
[3e2b9c9]361//=============================================================================================
362// I/O Context Misc Setup
363//=============================================================================================
[8bee858]364 void ?{}( io_arbiter$ & this ) {
[11054eb]365 this.pending.empty = true;
[78da4ab]366 }
[3e2b9c9]367
[8bee858]368 void ^?{}( io_arbiter$ & mutex this ) {}
[3e2b9c9]369
[8bee858]370 io_arbiter$ * create(void) {
[78da4ab]371 return new();
[3e2b9c9]372 }
[8bee858]373 void destroy(io_arbiter$ * arbiter) {
[78da4ab]374 delete(arbiter);
375 }
376
377//=============================================================================================
378// I/O Context Misc Setup
379//=============================================================================================
380
[c44d652]381#endif
Note: See TracBrowser for help on using the repository browser.