source: benchmark/io/http/main.cfa@ c4072d8e

ADT ast-experimental pthread-emulation qualifiedEnum
Last change on this file since c4072d8e was 329e26a, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Re-instated the isolate/multi-cluster option.

  • Property mode set to 100644
File size: 11.1 KB
Line 
1#define _GNU_SOURCE
2
3#include <errno.h>
4#include <signal.h>
5#include <stdio.h>
6#include <string.h>
7#include <unistd.h>
8extern "C" {
9 #include <sched.h>
10 #include <signal.h>
11 #include <sys/eventfd.h>
12 #include <sys/socket.h>
13 #include <netinet/in.h>
14}
15
16#include <fstream.hfa>
17#include <kernel.hfa>
18#include <locks.hfa>
19#include <iofwd.hfa>
20#include <stats.hfa>
21#include <time.hfa>
22#include <thread.hfa>
23
24#include "filecache.hfa"
25#include "options.hfa"
26#include "socket.hfa"
27#include "printer.hfa"
28#include "worker.hfa"
29
30extern void register_fixed_files( cluster &, int *, unsigned count );
31
32Duration default_preemption() {
33 return 0;
34}
35
36//=============================================================================================
37// Globals
38//=============================================================================================
39void ?{}( ServerCluster & this ) {
40 (this.self){ "Server Cluster", options.clopts.params };
41
42 cpu_set_t fullset;
43 CPU_ZERO(&fullset);
44 int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset);
45 if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno );
46 int cnt = CPU_COUNT(&fullset);
47
48 this.procs = alloc(options.clopts.nprocs);
49 for(i; options.clopts.nprocs) {
50 (this.procs[i]){ "Benchmark Processor", this.self };
51
52 // int c = 0;
53 // int n = 1 + (i % cnt);
54 // for(int j = 0; j < CPU_SETSIZE; j++) {
55 // if(CPU_ISSET(j, &fullset)) n--;
56 // if(n == 0) {
57 // c = j;
58 // break;
59 // }
60 // }
61 // cpu_set_t localset;
62 // CPU_ZERO(&localset);
63 // CPU_SET(c, &localset);
64 // ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset);
65 // if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret );
66
67 #if !defined(__CFA_NO_STATISTICS__)
68 if( options.clopts.procstats ) {
69 print_stats_at_exit( *this.procs, this.self.print_stats );
70 }
71 if( options.clopts.viewhalts ) {
72 print_halts( *this.procs );
73 }
74 #endif
75 }
76
77 #if !defined(__CFA_NO_STATISTICS__)
78 print_stats_at_exit( this.self, CFA_STATS_READY_Q | CFA_STATS_IO );
79 #endif
80
81 options.clopts.instance[options.clopts.cltr_cnt] = &this.self;
82 options.clopts.cltr_cnt++;
83}
84
85void ^?{}( ServerCluster & this ) {
86 for(i; options.clopts.nprocs) {
87 ^(this.procs[i]){};
88 }
89 free(this.procs);
90
91 ^(this.self){};
92}
93
94extern void init_protocol(void);
95extern void deinit_protocol(void);
96
97//=============================================================================================
98// REUSEPORT
99//=============================================================================================
100
101size_t sockarr_size;
102struct __attribute__((aligned(128))) Q {
103 mpsc_queue(PendingRead) q;
104};
105
106//=============================================================================================
107// Termination
108//=============================================================================================
109
110int closefd;
111void cleanstop(int) {
112 eventfd_t buffer = 1;
113 char * buffer_s = (char*)&buffer;
114 int ret = write(closefd, buffer_s, sizeof(buffer));
115 if(ret < 0) abort( "eventfd write error: (%d) %s\n", (int)errno, strerror(errno) );
116 return;
117}
118
119//=============================================================================================
120// Main
121//============================================================================================='
122int main( int argc, char * argv[] ) {
123 int ret;
124 __sighandler_t s = 1p;
125 signal(SIGPIPE, s);
126
127 //===================
128 // Parse args
129 parse_options(argc, argv);
130
131 //===================
132 // Setup non-interactive termination
133 if(!options.interactive) {
134 closefd = eventfd(0, 0);
135 if(closefd < 0) abort( "eventfd error: (%d) %s\n", (int)errno, strerror(errno) );
136
137 sighandler_t prev = signal(SIGTERM, cleanstop);
138 intptr_t prev_workaround = (intptr_t) prev;
139 // can't use SIG_ERR it crashes the compiler
140 if(prev_workaround == -1) abort( "signal setup error: (%d) %s\n", (int)errno, strerror(errno) );
141
142 sout | "Signal termination ready";
143 }
144
145 //===================
146 // Open Files
147 if( options.file_cache.path ) {
148 sout | "Filling cache from" | options.file_cache.path;
149 fill_cache( options.file_cache.path );
150 }
151
152 //===================
153 // Open Socket
154 sout | getpid() | ": Listening on port" | options.socket.port;
155
156 struct sockaddr_in address;
157 int addrlen = prepaddr(address);
158
159 int server_fd;
160
161 //===================
162 // Run Server Cluster
163 {
164 int pipe_cnt = options.clopts.nworkers * 2;
165 int pipe_off;
166 int * fds;
167 [fds, pipe_off] = filefds( pipe_cnt );
168 for(i; 0 ~ pipe_cnt ~ 2) {
169 int ret = pipe(&fds[pipe_off + i]);
170 if( ret < 0 ) { abort( "pipe error: (%d) %s\n", (int)errno, strerror(errno) ); }
171 }
172
173 // if(options.file_cache.path && options.file_cache.fixed_fds) {
174 // register_fixed_files(cl, fds, pipe_off);
175 // }
176
177 {
178 // Stats printer makes a copy so this needs to persist longer than normal
179 connection ** conns;
180 AcceptWorker * aworkers = 0p;
181 ChannelWorker * cworkers = 0p;
182 Acceptor * acceptors = 0p;
183 Q * queues = 0p;
184 ServerCluster cl[options.clopts.nclusters];
185
186 if(options.stats) {
187 stats_thrd = alloc();
188 (*stats_thrd){ cl };
189 } else {
190 stats_thrd = 0p;
191 }
192
193 init_protocol();
194 {
195 int nacceptors = options.clopts.nprocs * options.clopts.nclusters;
196 conns = alloc(options.clopts.nworkers);
197 if(options.socket.reuseport) {
198 queues = alloc(nacceptors);
199 acceptors = alloc(nacceptors);
200 sout | "Creating" | nacceptors | "Acceptors";
201 for(i; nacceptors) {
202 (acceptors[i]){ i % options.clopts.nclusters };
203 }
204 for(i; nacceptors) {
205 (queues[i]){};
206 {
207 acceptors[i].sockfd = listener(address, addrlen);
208 acceptors[i].addr = (struct sockaddr *)&address;
209 acceptors[i].addrlen = (socklen_t*)&addrlen;
210 acceptors[i].flags = 0;
211 acceptors[i].queue = &queues[i].q;
212 }
213 unpark( acceptors[i] );
214 }
215
216 cworkers = anew(options.clopts.nworkers);
217 for(i; options.clopts.nworkers) {
218 {
219 cworkers[i].conn.pipe[0] = fds[pipe_off + (i * 2) + 0];
220 cworkers[i].conn.pipe[1] = fds[pipe_off + (i * 2) + 1];
221 cworkers[i].queue = &queues[i % nacceptors].q;
222 conns[i] = &cworkers[i].conn;
223 }
224 unpark( cworkers[i] );
225 }
226 }
227 else {
228 server_fd = listener(address, addrlen);
229 aworkers = anew(options.clopts.nworkers);
230 for(i; options.clopts.nworkers) {
231 // if( options.file_cache.fixed_fds ) {
232 // workers[i].pipe[0] = pipe_off + (i * 2) + 0;
233 // workers[i].pipe[1] = pipe_off + (i * 2) + 1;
234 // }
235 // else
236 {
237 aworkers[i].conn.pipe[0] = fds[pipe_off + (i * 2) + 0];
238 aworkers[i].conn.pipe[1] = fds[pipe_off + (i * 2) + 1];
239 aworkers[i].sockfd = server_fd;
240 aworkers[i].addr = (struct sockaddr *)&address;
241 aworkers[i].addrlen = (socklen_t*)&addrlen;
242 aworkers[i].flags = 0;
243 conns[i] = &aworkers[i].conn;
244 }
245 unpark( aworkers[i] );
246 }
247 }
248 sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors /" | options.clopts.nclusters | "clusters";
249 for(i; options.clopts.nclusters) {
250 sout | options.clopts.thrd_cnt[i] | nonl;
251 }
252 sout | nl;
253 {
254 if(options.interactive) {
255 char buffer[128];
256 for() {
257 int ret = cfa_read(0, buffer, 128, 0);
258 if(ret == 0) break;
259 if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) );
260 sout | "User wrote '" | "" | nonl;
261 write(sout, buffer, ret - 1);
262 sout | "'";
263 }
264 }
265 else {
266 char buffer[sizeof(eventfd_t)];
267 int ret = cfa_read(closefd, buffer, sizeof(eventfd_t), 0);
268 if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) );
269 }
270
271 sout | "Shutdown received";
272 }
273
274 //===================
275 // Close Socket and join
276 if(options.socket.reuseport) {
277 sout | "Notifying connections..." | nonl; flush( sout );
278 for(i; nacceptors) {
279 acceptors[i].done = true;
280 }
281 for(i; options.clopts.nworkers) {
282 cworkers[i].done = true;
283 }
284 sout | "done";
285
286 sout | "Shutting down Socket..." | nonl; flush( sout );
287 for(i; nacceptors) {
288 ret = shutdown( acceptors[i].sockfd, SHUT_RD );
289 if( ret < 0 ) {
290 abort( "shutdown1 error: (%d) %s\n", (int)errno, strerror(errno) );
291 }
292 }
293 sout | "done";
294
295 sout | "Closing Socket..." | nonl; flush( sout );
296 for(i; nacceptors) {
297 ret = close( acceptors[i].sockfd );
298 if( ret < 0) {
299 abort( "close socket error: (%d) %s\n", (int)errno, strerror(errno) );
300 }
301 }
302 sout | "done";
303
304 sout | "Stopping accept threads..." | nonl; flush( sout );
305 for(i; nacceptors) {
306 join(acceptors[i]);
307 }
308 sout | "done";
309
310 sout | "Draining worker queues..." | nonl; flush( sout );
311 for(i; nacceptors) {
312 PendingRead * p = 0p;
313 while(p = pop(queues[i].q)) {
314 fulfil(p->f, -ECONNRESET);
315 }
316 }
317 sout | "done";
318
319 sout | "Stopping worker threads..." | nonl; flush( sout );
320 for(i; options.clopts.nworkers) {
321 for(j; 2) {
322 ret = close(cworkers[i].conn.pipe[j]);
323 if(ret < 0) abort( "close pipe %d error: (%d) %s\n", j, (int)errno, strerror(errno) );
324 }
325 join(cworkers[i]);
326 }
327 }
328 else {
329 sout | "Notifying connections..." | nonl; flush( sout );
330 for(i; options.clopts.nworkers) {
331 aworkers[i].done = true;
332 }
333 sout | "done";
334
335 sout | "Shutting down Socket..." | nonl; flush( sout );
336 ret = shutdown( server_fd, SHUT_RD );
337 if( ret < 0 ) {
338 abort( "shutdown2 error: (%d) %s\n", (int)errno, strerror(errno) );
339 }
340 sout | "done";
341
342 sout | "Closing Socket..." | nonl; flush( sout );
343 ret = close( server_fd );
344 if(ret < 0) {
345 abort( "close socket error: (%d) %s\n", (int)errno, strerror(errno) );
346 }
347 sout | "done";
348
349 sout | "Stopping connection threads..." | nonl; flush( sout );
350 for(i; options.clopts.nworkers) {
351 for(j; 2) {
352 ret = close(aworkers[i].conn.pipe[j]);
353 if(ret < 0) abort( "close pipe %d error: (%d) %s\n", j, (int)errno, strerror(errno) );
354 }
355 join(aworkers[i]);
356 }
357 }
358 }
359 sout | "done";
360
361 sout | "Stopping protocol threads..." | nonl; flush( sout );
362 deinit_protocol();
363 sout | "done";
364
365 sout | "Stopping printer threads..." | nonl; flush( sout );
366 if(stats_thrd) {
367 notify_one(stats_thrd->var);
368 }
369 delete(stats_thrd);
370 sout | "done";
371
372 // Now that the stats printer is stopped, we can reclaim this
373 adelete(aworkers);
374 adelete(cworkers);
375 adelete(acceptors);
376 adelete(queues);
377 free(conns);
378
379 sout | "Stopping processors/clusters..." | nonl; flush( sout );
380 }
381 sout | "done";
382
383 free(fds);
384
385 sout | "Stopping processors..." | nonl; flush( sout );
386 }
387 sout | "done";
388
389 //===================
390 // Close Files
391 if( options.file_cache.path ) {
392 sout | "Closing open files..." | nonl; flush( sout );
393 close_cache();
394 sout | "done";
395 }
396}
397
398const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 };
399static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0]));
Note: See TracBrowser for help on using the repository browser.