#define _GNU_SOURCE #include #include #include #include #include extern "C" { #include #include #include #include #include } #include #include #include #include #include #include #include #include "filecache.hfa" #include "options.hfa" #include "socket.hfa" #include "printer.hfa" #include "worker.hfa" extern void register_fixed_files( cluster &, int *, unsigned count ); Duration default_preemption() { return 0; } //============================================================================================= // Globals //============================================================================================= void ?{}( ServerCluster & this ) { (this.self){ "Server Cluster", options.clopts.params }; cpu_set_t fullset; CPU_ZERO(&fullset); int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset); if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno ); int cnt = CPU_COUNT(&fullset); this.procs = alloc(options.clopts.nprocs); for(i; options.clopts.nprocs) { (this.procs[i]){ "Benchmark Processor", this.self }; // int c = 0; // int n = 1 + (i % cnt); // for(int j = 0; j < CPU_SETSIZE; j++) { // if(CPU_ISSET(j, &fullset)) n--; // if(n == 0) { // c = j; // break; // } // } // cpu_set_t localset; // CPU_ZERO(&localset); // CPU_SET(c, &localset); // ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset); // if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret ); #if !defined(__CFA_NO_STATISTICS__) if( options.clopts.procstats ) { print_stats_at_exit( *this.procs, this.self.print_stats ); } if( options.clopts.viewhalts ) { print_halts( *this.procs ); } #endif } #if !defined(__CFA_NO_STATISTICS__) print_stats_at_exit( this.self, CFA_STATS_READY_Q | CFA_STATS_IO ); #endif options.clopts.instance[options.clopts.cltr_cnt] = &this.self; options.clopts.cltr_cnt++; } void ^?{}( ServerCluster & this ) { for(i; options.clopts.nprocs) { ^(this.procs[i]){}; } free(this.procs); ^(this.self){}; } extern void init_protocol(void); extern void deinit_protocol(void); //============================================================================================= // REUSEPORT //============================================================================================= size_t sockarr_size; struct __attribute__((aligned(128))) Q { mpsc_queue(PendingRead) q; }; //============================================================================================= // Termination //============================================================================================= int closefd; void cleanstop(int) { eventfd_t buffer = 1; char * buffer_s = (char*)&buffer; int ret = write(closefd, buffer_s, sizeof(buffer)); if(ret < 0) abort( "eventfd write error: (%d) %s\n", (int)errno, strerror(errno) ); return; } //============================================================================================= // Main //=============================================================================================' int main( int argc, char * argv[] ) { int ret; __sighandler_t s = 1p; signal(SIGPIPE, s); //=================== // Parse args parse_options(argc, argv); //=================== // Setup non-interactive termination if(!options.interactive) { closefd = eventfd(0, 0); if(closefd < 0) abort( "eventfd error: (%d) %s\n", (int)errno, strerror(errno) ); sighandler_t prev = signal(SIGTERM, cleanstop); intptr_t prev_workaround = (intptr_t) prev; // can't use SIG_ERR it crashes the compiler if(prev_workaround == -1) abort( "signal setup error: (%d) %s\n", (int)errno, strerror(errno) ); sout | "Signal termination ready"; } //=================== // Open Files if( options.file_cache.path ) { sout | "Filling cache from" | options.file_cache.path; fill_cache( options.file_cache.path ); } //=================== // Open Socket sout | getpid() | ": Listening on port" | options.socket.port; struct sockaddr_in address; int addrlen = prepaddr(address); int server_fd; //=================== // Run Server Cluster { int pipe_cnt = options.clopts.nworkers * 2; int pipe_off; int * fds; [fds, pipe_off] = filefds( pipe_cnt ); for(i; 0 ~ pipe_cnt ~ 2) { int ret = pipe(&fds[pipe_off + i]); if( ret < 0 ) { abort( "pipe error: (%d) %s\n", (int)errno, strerror(errno) ); } } // if(options.file_cache.path && options.file_cache.fixed_fds) { // register_fixed_files(cl, fds, pipe_off); // } { // Stats printer makes a copy so this needs to persist longer than normal connection ** conns; AcceptWorker * aworkers = 0p; ChannelWorker * cworkers = 0p; Acceptor * acceptors = 0p; Q * queues = 0p; ServerCluster cl[options.clopts.nclusters]; if(options.stats) { stats_thrd = alloc(); (*stats_thrd){ cl }; } else { stats_thrd = 0p; } init_protocol(); { int nacceptors = options.clopts.nprocs * options.clopts.nclusters; conns = alloc(options.clopts.nworkers); if(options.socket.reuseport) { queues = alloc(nacceptors); acceptors = alloc(nacceptors); sout | "Creating" | nacceptors | "Acceptors"; for(i; nacceptors) { (acceptors[i]){ i % options.clopts.nclusters }; } for(i; nacceptors) { (queues[i]){}; { acceptors[i].sockfd = listener(address, addrlen); acceptors[i].addr = (struct sockaddr *)&address; acceptors[i].addrlen = (socklen_t*)&addrlen; acceptors[i].flags = 0; acceptors[i].queue = &queues[i].q; } unpark( acceptors[i] ); } cworkers = anew(options.clopts.nworkers); for(i; options.clopts.nworkers) { { cworkers[i].conn.pipe[0] = fds[pipe_off + (i * 2) + 0]; cworkers[i].conn.pipe[1] = fds[pipe_off + (i * 2) + 1]; cworkers[i].queue = &queues[i % nacceptors].q; conns[i] = &cworkers[i].conn; } unpark( cworkers[i] ); } } else { server_fd = listener(address, addrlen); aworkers = anew(options.clopts.nworkers); for(i; options.clopts.nworkers) { // if( options.file_cache.fixed_fds ) { // workers[i].pipe[0] = pipe_off + (i * 2) + 0; // workers[i].pipe[1] = pipe_off + (i * 2) + 1; // } // else { aworkers[i].conn.pipe[0] = fds[pipe_off + (i * 2) + 0]; aworkers[i].conn.pipe[1] = fds[pipe_off + (i * 2) + 1]; aworkers[i].sockfd = server_fd; aworkers[i].addr = (struct sockaddr *)&address; aworkers[i].addrlen = (socklen_t*)&addrlen; aworkers[i].flags = 0; conns[i] = &aworkers[i].conn; } unpark( aworkers[i] ); } } sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors /" | options.clopts.nclusters | "clusters"; for(i; options.clopts.nclusters) { sout | options.clopts.thrd_cnt[i] | nonl; } sout | nl; { if(options.interactive) { char buffer[128]; for() { int ret = cfa_read(0, buffer, 128, 0); if(ret == 0) break; if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) ); sout | "User wrote '" | "" | nonl; write(sout, buffer, ret - 1); sout | "'"; } } else { char buffer[sizeof(eventfd_t)]; int ret = cfa_read(closefd, buffer, sizeof(eventfd_t), 0); if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "Shutdown received"; } //=================== // Close Socket and join if(options.socket.reuseport) { sout | "Notifying connections..." | nonl; flush( sout ); for(i; nacceptors) { acceptors[i].done = true; } for(i; options.clopts.nworkers) { cworkers[i].done = true; } sout | "done"; sout | "Shutting down Socket..." | nonl; flush( sout ); for(i; nacceptors) { ret = shutdown( acceptors[i].sockfd, SHUT_RD ); if( ret < 0 ) { abort( "shutdown1 error: (%d) %s\n", (int)errno, strerror(errno) ); } } sout | "done"; sout | "Closing Socket..." | nonl; flush( sout ); for(i; nacceptors) { ret = close( acceptors[i].sockfd ); if( ret < 0) { abort( "close socket error: (%d) %s\n", (int)errno, strerror(errno) ); } } sout | "done"; sout | "Stopping accept threads..." | nonl; flush( sout ); for(i; nacceptors) { join(acceptors[i]); } sout | "done"; sout | "Draining worker queues..." | nonl; flush( sout ); for(i; nacceptors) { PendingRead * p = 0p; while(p = pop(queues[i].q)) { fulfil(p->f, -ECONNRESET); } } sout | "done"; sout | "Stopping worker threads..." | nonl; flush( sout ); for(i; options.clopts.nworkers) { for(j; 2) { ret = close(cworkers[i].conn.pipe[j]); if(ret < 0) abort( "close pipe %d error: (%d) %s\n", j, (int)errno, strerror(errno) ); } join(cworkers[i]); } } else { sout | "Notifying connections..." | nonl; flush( sout ); for(i; options.clopts.nworkers) { aworkers[i].done = true; } sout | "done"; sout | "Shutting down Socket..." | nonl; flush( sout ); ret = shutdown( server_fd, SHUT_RD ); if( ret < 0 ) { abort( "shutdown2 error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "done"; sout | "Closing Socket..." | nonl; flush( sout ); ret = close( server_fd ); if(ret < 0) { abort( "close socket error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "done"; sout | "Stopping connection threads..." | nonl; flush( sout ); for(i; options.clopts.nworkers) { for(j; 2) { ret = close(aworkers[i].conn.pipe[j]); if(ret < 0) abort( "close pipe %d error: (%d) %s\n", j, (int)errno, strerror(errno) ); } join(aworkers[i]); } } } sout | "done"; sout | "Stopping protocol threads..." | nonl; flush( sout ); deinit_protocol(); sout | "done"; sout | "Stopping printer threads..." | nonl; flush( sout ); if(stats_thrd) { notify_one(stats_thrd->var); } delete(stats_thrd); sout | "done"; // Now that the stats printer is stopped, we can reclaim this adelete(aworkers); adelete(cworkers); adelete(acceptors); adelete(queues); free(conns); sout | "Stopping processors/clusters..." | nonl; flush( sout ); } sout | "done"; free(fds); sout | "Stopping processors..." | nonl; flush( sout ); } sout | "done"; //=================== // Close Files if( options.file_cache.path ) { sout | "Closing open files..." | nonl; flush( sout ); close_cache(); sout | "done"; } } const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 }; static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0]));