#define _GNU_SOURCE #include #include #include #include #include extern "C" { #include #include #include #include #include } #include #include #include #include #include #include #include "filecache.hfa" #include "options.hfa" #include "worker.hfa" extern void register_fixed_files( cluster &, int *, unsigned count ); Duration default_preemption() { return 0; } //============================================================================================= // Stats Printer //=============================================================================================' thread StatsPrinter { Worker * workers; int worker_cnt; }; void ?{}( StatsPrinter & this, cluster & cl ) { ((thread&)this){ "Stats Printer Thread", cl }; this.worker_cnt = 0; } void ^?{}( StatsPrinter & mutex this ) {} #define eng3(X) (ws(3, 3, unit(eng( X )))) void main(StatsPrinter & this) { LOOP: for() { waitfor( ^?{} : this) { break LOOP; } or else {} sleep(10`s); print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO ); if(this.worker_cnt != 0) { uint64_t tries = 0; uint64_t calls = 0; uint64_t header = 0; uint64_t splcin = 0; uint64_t splcot = 0; struct { volatile uint64_t calls; volatile uint64_t bytes; } avgrd[zipf_cnts]; memset(avgrd, 0, sizeof(avgrd)); for(i; this.worker_cnt) { tries += this.workers[i].stats.sendfile.tries; calls += this.workers[i].stats.sendfile.calls; header += this.workers[i].stats.sendfile.header; splcin += this.workers[i].stats.sendfile.splcin; splcot += this.workers[i].stats.sendfile.splcot; for(j; zipf_cnts) { avgrd[j].calls += this.workers[i].stats.sendfile.avgrd[j].calls; avgrd[j].bytes += this.workers[i].stats.sendfile.avgrd[j].bytes; } } double ratio = ((double)tries) / calls; sout | "----- Worker Stats -----"; sout | "sendfile : " | calls | "calls," | tries | "tries (" | ratio | " try/call)"; sout | " " | header | "header," | splcin | "splice in," | splcot | "splice out"; sout | " - zipf sizes:"; for(i; zipf_cnts) { double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0; sout | " " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written"; } } else { sout | "No Workers!"; } } } //============================================================================================= // Globals //============================================================================================= struct ServerCluster { cluster self; processor * procs; // io_context * ctxs; StatsPrinter * prnt; }; void ?{}( ServerCluster & this ) { (this.self){ "Server Cluster", options.clopts.params }; cpu_set_t fullset; CPU_ZERO(&fullset); int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset); if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno ); int cnt = CPU_COUNT(&fullset); this.procs = alloc(options.clopts.nprocs); for(i; options.clopts.nprocs) { (this.procs[i]){ "Benchmark Processor", this.self }; int c = 0; int n = 1 + (i % cnt); for(int j = 0; j < CPU_SETSIZE; j++) { if(CPU_ISSET(j, &fullset)) n--; if(n == 0) { c = j; break; } } cpu_set_t localset; CPU_ZERO(&localset); CPU_SET(c, &localset); ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset); if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret ); #if !defined(__CFA_NO_STATISTICS__) if( options.clopts.procstats ) { print_stats_at_exit( *this.procs, this.self.print_stats ); } if( options.clopts.viewhalts ) { print_halts( *this.procs ); } #endif } if(options.stats) { this.prnt = alloc(); (*this.prnt){ this.self }; } else { this.prnt = 0p; } #if !defined(__CFA_NO_STATISTICS__) print_stats_at_exit( this.self, CFA_STATS_READY_Q | CFA_STATS_IO ); #endif options.clopts.instance[options.clopts.cltr_cnt] = &this.self; options.clopts.cltr_cnt++; } void ^?{}( ServerCluster & this ) { delete(this.prnt); for(i; options.clopts.nprocs) { ^(this.procs[i]){}; } free(this.procs); ^(this.self){}; } extern void init_protocol(void); extern void deinit_protocol(void); //============================================================================================= // Termination //============================================================================================= int closefd; void cleanstop(int) { eventfd_t buffer = 1; char * buffer_s = (char*)&buffer; int ret = write(closefd, buffer_s, sizeof(buffer)); if(ret < 0) abort( "eventfd write error: (%d) %s\n", (int)errno, strerror(errno) ); return; } //============================================================================================= // Main //=============================================================================================' int main( int argc, char * argv[] ) { __sighandler_t s = 1p; signal(SIGPIPE, s); //=================== // Parse args parse_options(argc, argv); //=================== // Setup non-interactive termination if(!options.interactive) { closefd = eventfd(0, 0); if(closefd < 0) abort( "eventfd error: (%d) %s\n", (int)errno, strerror(errno) ); sighandler_t prev = signal(SIGTERM, cleanstop); intptr_t prev_workaround = (intptr_t) prev; // can't use SIG_ERR it crashes the compiler if(prev_workaround == -1) abort( "signal setup error: (%d) %s\n", (int)errno, strerror(errno) ); sout | "Signal termination ready"; } //=================== // Open Files if( options.file_cache.path ) { sout | "Filling cache from" | options.file_cache.path; fill_cache( options.file_cache.path ); } //=================== // Open Socket sout | getpid() | ": Listening on port" | options.socket.port; int server_fd = socket(AF_INET, SOCK_STREAM, 0); if(server_fd < 0) { abort( "socket error: (%d) %s\n", (int)errno, strerror(errno) ); } int ret = 0; struct sockaddr_in address; int addrlen = sizeof(address); memset( (char *)&address, '\0' ); address.sin_family = AF_INET; address.sin_addr.s_addr = htonl(INADDR_ANY); address.sin_port = htons( options.socket.port ); int waited = 0; for() { int sockfd = server_fd; __CONST_SOCKADDR_ARG addr; addr.__sockaddr__ = (struct sockaddr *)&address; socklen_t addrlen = sizeof(address); ret = bind( sockfd, addr, addrlen ); if(ret < 0) { if(errno == EADDRINUSE) { if(waited == 0) { if(!options.interactive) abort | "Port already in use in non-interactive mode. Aborting"; sout | "Waiting for port"; } else { sout | "\r" | waited | nonl; flush( sout ); } waited ++; sleep( 1`s ); continue; } abort( "bind error: (%d) %s\n", (int)errno, strerror(errno) ); } break; } ret = listen( server_fd, options.socket.backlog ); if(ret < 0) { abort( "listen error: (%d) %s\n", (int)errno, strerror(errno) ); } //=================== // Run Server Cluster { int pipe_cnt = options.clopts.nworkers * 2; int pipe_off; int * fds; [fds, pipe_off] = filefds( pipe_cnt ); for(i; 0 ~ pipe_cnt ~ 2) { int ret = pipe(&fds[pipe_off + i]); if( ret < 0 ) { abort( "pipe error: (%d) %s\n", (int)errno, strerror(errno) ); } } // if(options.file_cache.path && options.file_cache.fixed_fds) { // register_fixed_files(cl, fds, pipe_off); // } { // Stats printer makes a copy so this needs to persist longer than normal Worker * workers; ServerCluster cl[options.clopts.nclusters]; init_protocol(); { workers = anew(options.clopts.nworkers); cl[0].prnt->workers = workers; cl[0].prnt->worker_cnt = options.clopts.nworkers; for(i; options.clopts.nworkers) { // if( options.file_cache.fixed_fds ) { // workers[i].pipe[0] = pipe_off + (i * 2) + 0; // workers[i].pipe[1] = pipe_off + (i * 2) + 1; // } // else { workers[i].pipe[0] = fds[pipe_off + (i * 2) + 0]; workers[i].pipe[1] = fds[pipe_off + (i * 2) + 1]; workers[i].sockfd = server_fd; workers[i].addr = (struct sockaddr *)&address; workers[i].addrlen = (socklen_t*)&addrlen; workers[i].flags = 0; } unpark( workers[i] ); } sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors /" | options.clopts.nclusters | "clusters"; for(i; options.clopts.nclusters) { sout | options.clopts.thrd_cnt[i] | nonl; } sout | nl; { if(options.interactive) { char buffer[128]; for() { int ret = cfa_read(0, buffer, 128, 0); if(ret == 0) break; if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) ); sout | "User wrote '" | "" | nonl; write(sout, buffer, ret - 1); sout | "'"; } } else { char buffer[sizeof(eventfd_t)]; int ret = cfa_read(closefd, buffer, sizeof(eventfd_t), 0); if(ret < 0) abort( "main read error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "Shutdown received"; } sout | "Notifying connections..." | nonl; flush( sout ); for(i; options.clopts.nworkers) { workers[i].done = true; } sout | "done"; sout | "Shutting down socket..." | nonl; flush( sout ); int ret = shutdown( server_fd, SHUT_RD ); if( ret < 0 ) { abort( "shutdown error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "done"; //=================== // Close Socket sout | "Closing Socket..." | nonl; flush( sout ); ret = close( server_fd ); if(ret < 0) { abort( "close socket error: (%d) %s\n", (int)errno, strerror(errno) ); } sout | "done"; sout | "Stopping connection threads..." | nonl; flush( sout ); for(i; options.clopts.nworkers) { join(workers[i]); } } sout | "done"; sout | "Stopping protocol threads..." | nonl; flush( sout ); deinit_protocol(); sout | "done"; sout | "Stopping printer threads..." | nonl; flush( sout ); for(i; options.clopts.nclusters) { StatsPrinter * p = cl[i].prnt; if(p) join(*p); } sout | "done"; // Now that the stats printer is stopped, we can reclaim this adelete(workers); sout | "Stopping processors/clusters..." | nonl; flush( sout ); } sout | "done"; sout | "Closing splice fds..." | nonl; flush( sout ); for(i; pipe_cnt) { ret = close( fds[pipe_off + i] ); if(ret < 0) { abort( "close pipe error: (%d) %s\n", (int)errno, strerror(errno) ); } } free(fds); sout | "done"; sout | "Stopping processors..." | nonl; flush( sout ); } sout | "done"; //=================== // Close Files if( options.file_cache.path ) { sout | "Closing open files..." | nonl; flush( sout ); close_cache(); sout | "done"; } } const size_t zipf_sizes[] = { 102, 204, 307, 409, 512, 614, 716, 819, 921, 1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192, 9216, 10240, 20480, 30720, 40960, 51200, 61440, 71680, 81920, 92160, 102400, 204800, 307200, 409600, 512000, 614400, 716800, 819200, 921600 }; static_assert(zipf_cnts == sizeof(zipf_sizes) / sizeof(zipf_sizes[0]));