source: benchmark/io/io_uring.h@ 9317419

ADT ast-experimental
Last change on this file since 9317419 was 43784ac, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Changed libcfathread to consistently define _GNU_SOURCE

  • Property mode set to 100644
File size: 5.1 KB
Line 
1extern "C" {
2 #include <errno.h>
3 #include <stdio.h>
4 #include <stdint.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <unistd.h>
8 #include <sys/mman.h>
9 #include <sys/syscall.h>
10 #include <sys/uio.h>
11 #include <fcntl.h>
12
13 #include <linux/io_uring.h>
14}
15
16# ifndef __NR_io_uring_setup
17# define __NR_io_uring_setup 425
18# endif
19# ifndef __NR_io_uring_enter
20# define __NR_io_uring_enter 426
21# endif
22# ifndef __NR_io_uring_register
23# define __NR_io_uring_register 427
24# endif
25
26struct io_uring_sq {
27 // Head and tail of the ring (associated with array)
28 volatile uint32_t * head;
29 volatile uint32_t * tail;
30
31 // The actual kernel ring which uses head/tail
32 // indexes into the sqes arrays
33 uint32_t * array;
34
35 // number of entries and mask to go with it
36 const uint32_t * num;
37 const uint32_t * mask;
38
39 // Submission flags (Not sure what for)
40 uint32_t * flags;
41
42 // number of sqes not submitted (whatever that means)
43 uint32_t * dropped;
44
45 // Like head/tail but not seen by the kernel
46 volatile uint32_t alloc;
47
48 // A buffer of sqes (not the actual ring)
49 struct io_uring_sqe * sqes;
50
51 // The location and size of the mmaped area
52 void * ring_ptr;
53 size_t ring_sz;
54};
55
56struct io_uring_cq {
57 // Head and tail of the ring
58 volatile uint32_t * head;
59 volatile uint32_t * tail;
60
61 // number of entries and mask to go with it
62 const uint32_t * mask;
63 const uint32_t * num;
64
65 // number of cqes not submitted (whatever that means)
66 uint32_t * overflow;
67
68 // the kernel ring
69 struct io_uring_cqe * cqes;
70
71 // The location and size of the mmaped area
72 void * ring_ptr;
73 size_t ring_sz;
74};
75
76struct io_ring {
77 struct io_uring_sq submit_q;
78 struct io_uring_cq completion_q;
79 uint32_t flags;
80 int fd;
81};
82
83struct IO_singleton {
84 io_ring io;
85};
86
87IO_singleton self;
88
89void init_uring(uint32_t nentries) {
90 // Step 1 : call to setup
91 struct io_uring_params params;
92 memset(&params, 0, sizeof(params));
93 // params.flags = IORING_SETUP_SQPOLL;
94
95 int fd = syscall(__NR_io_uring_setup, nentries, &params );
96 if(fd < 0) {
97 fprintf(stderr, "KERNEL ERROR: IO_URING SETUP - %s\n", strerror(errno));
98 abort();
99 }
100
101 // Step 2 : mmap result
102 memset(&self.io, 0, sizeof(struct io_ring));
103 struct io_uring_sq & sq = self.io.submit_q;
104 struct io_uring_cq & cq = self.io.completion_q;
105
106 // calculate the right ring size
107 sq.ring_sz = params.sq_off.array + (params.sq_entries * sizeof(unsigned) );
108 cq.ring_sz = params.cq_off.cqes + (params.cq_entries * sizeof(struct io_uring_cqe));
109
110 // Requires features
111 // // adjust the size according to the parameters
112 // if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
113 // cq->ring_sz = sq->ring_sz = max(cq->ring_sz, sq->ring_sz);
114 // }
115
116 // mmap the Submit Queue into existence
117 sq.ring_ptr = mmap(0, sq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
118 if (sq.ring_ptr == (void*)MAP_FAILED) {
119 fprintf(stderr, "KERNEL ERROR: IO_URING MMAP1 - %s\n", strerror(errno));
120 abort();
121 }
122
123 // mmap the Completion Queue into existence (may or may not be needed)
124 // Requires features
125 // if ((params.features & IORING_FEAT_SINGLE_MMAP) != 0) {
126 // cq->ring_ptr = sq->ring_ptr;
127 // }
128 // else {
129 // We need multiple call to MMAP
130 cq.ring_ptr = mmap(0, cq.ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
131 if (cq.ring_ptr == (void*)MAP_FAILED) {
132 munmap(sq.ring_ptr, sq.ring_sz);
133 fprintf(stderr, "KERNEL ERROR: IO_URING MMAP2 - %s\n", strerror(errno));
134 abort();
135 }
136 // }
137
138 // mmap the submit queue entries
139 size_t size = params.sq_entries * sizeof(struct io_uring_sqe);
140 sq.sqes = (struct io_uring_sqe *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
141 if (sq.sqes == (struct io_uring_sqe *)MAP_FAILED) {
142 munmap(sq.ring_ptr, sq.ring_sz);
143 if (cq.ring_ptr != sq.ring_ptr) munmap(cq.ring_ptr, cq.ring_sz);
144 fprintf(stderr, "KERNEL ERROR: IO_URING MMAP3 - %s\n", strerror(errno));
145 abort();
146 }
147
148 // Get the pointers from the kernel to fill the structure
149 // submit queue
150 sq.head = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.head);
151 sq.tail = (volatile uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);
152 sq.mask = ( const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);
153 sq.num = ( const uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);
154 sq.flags = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);
155 sq.dropped = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);
156 sq.array = ( uint32_t *)(((intptr_t)sq.ring_ptr) + params.sq_off.array);
157 sq.alloc = *sq.tail;
158
159 // completion queue
160 cq.head = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
161 cq.tail = (volatile uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
162 cq.mask = ( const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);
163 cq.num = ( const uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);
164 cq.overflow = ( uint32_t *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);
165 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes);
166
167 self.io.fd = fd;
168}
Note: See TracBrowser for help on using the repository browser.