Index: benchmark/io/http/Makefile.am
===================================================================
--- benchmark/io/http/Makefile.am	(revision 8c58e73044352ccaa70169474c50cd256956baae)
+++ benchmark/io/http/Makefile.am	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -37,4 +37,6 @@
 	options.cfa \
 	options.hfa \
+	printer.cfa \
+	printer.hfa \
 	protocol.cfa \
 	protocol.hfa \
Index: benchmark/io/http/main.cfa
===================================================================
--- benchmark/io/http/main.cfa	(revision 8c58e73044352ccaa70169474c50cd256956baae)
+++ benchmark/io/http/main.cfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -25,4 +25,5 @@
 #include "options.hfa"
 #include "socket.hfa"
+#include "printer.hfa"
 #include "worker.hfa"
 
@@ -31,74 +32,4 @@
 Duration default_preemption() {
 	return 0;
-}
-
-//=============================================================================================
-// Stats Printer
-//============================================================================================='
-
-thread StatsPrinter {
-	connection ** conns;
-	volatile int conn_cnt;
-	condition_variable(fast_block_lock) var;
-};
-
-void ?{}( StatsPrinter & this, cluster & cl ) {
-	((thread&)this){ "Stats Printer Thread", cl };
-	this.conn_cnt = 0;
-}
-
-void ^?{}( StatsPrinter & mutex this ) {}
-
-#define eng3(X) (ws(3, 3, unit(eng( X ))))
-
-void main(StatsPrinter & this) {
-	LOOP: for() {
-		waitfor( ^?{} : this) {
-			break LOOP;
-		}
-		or else {}
-
-		wait(this.var, 10`s);
-
-		print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO );
-		if(this.conn_cnt != 0) {
-			uint64_t tries = 0;
-			uint64_t calls = 0;
-			uint64_t header = 0;
-			uint64_t splcin = 0;
-			uint64_t splcot = 0;
-			struct {
-				volatile uint64_t calls;
-				volatile uint64_t bytes;
-			} avgrd[zipf_cnts];
-			memset(avgrd, 0, sizeof(avgrd));
-
-			for(i; this.conn_cnt) {
-				tries += this.conns[i]->stats.sendfile.tries;
-				calls += this.conns[i]->stats.sendfile.calls;
-				header += this.conns[i]->stats.sendfile.header;
-				splcin += this.conns[i]->stats.sendfile.splcin;
-				splcot += this.conns[i]->stats.sendfile.splcot;
-				for(j; zipf_cnts) {
-					avgrd[j].calls += this.conns[i]->stats.sendfile.avgrd[j].calls;
-					avgrd[j].bytes += this.conns[i]->stats.sendfile.avgrd[j].bytes;
-				}
-			}
-
-			double ratio = ((double)tries) / calls;
-
-			sout | "----- Connection Stats -----";
-			sout | "sendfile  : " | calls | "calls," | tries | "tries (" | ratio | " try/call)";
-			sout | "            " | header | "header," | splcin | "splice in," | splcot | "splice out";
-			sout | " - zipf sizes:";
-			for(i; zipf_cnts) {
-				double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0;
-				sout | "        " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written";
-			}
-		}
-		else {
-			sout | "No Connections!";
-		}
-	}
 }
 
@@ -109,6 +40,4 @@
 	cluster self;
 	processor    * procs;
-	// io_context   * ctxs;
-	StatsPrinter * prnt;
 
 };
@@ -152,11 +81,4 @@
 	}
 
-	if(options.stats) {
-		this.prnt = alloc();
-		(*this.prnt){ this.self };
-	} else {
-		this.prnt = 0p;
-	}
-
 	#if !defined(__CFA_NO_STATISTICS__)
 		print_stats_at_exit( this.self, CFA_STATS_READY_Q | CFA_STATS_IO );
@@ -167,6 +89,4 @@
 
 void ^?{}( ServerCluster & this ) {
-	delete(this.prnt);
-
 	for(i; options.clopts.nprocs) {
 		^(this.procs[i]){};
@@ -268,4 +188,11 @@
 			Q * queues = 0p;
 			ServerCluster cl;
+
+			if(options.stats) {
+				stats_thrd = alloc();
+				(*stats_thrd){ cl.self };
+			} else {
+				stats_thrd = 0p;
+			}
 
 			init_protocol();
@@ -319,6 +246,5 @@
 					}
 				}
-				cl.prnt->conns = conns;
-				cl.prnt->conn_cnt = options.clopts.nworkers;
+
 				sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors";
 				sout | nl;
@@ -436,9 +362,8 @@
 
 			sout | "Stopping printer threads..." | nonl; flush( sout );
-			StatsPrinter * p = cl.prnt;
-			if(p) {
-				notify_one(p->var);
-				join(*p);
-			}
+			if(stats_thrd) {
+				notify_one(stats_thrd->var);
+			}
+			delete(stats_thrd);
 			sout | "done";
 
Index: benchmark/io/http/printer.cfa
===================================================================
--- benchmark/io/http/printer.cfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
+++ benchmark/io/http/printer.cfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -0,0 +1,98 @@
+#include "printer.hfa"
+
+#include <fstream.hfa>
+#include <stats.hfa>
+
+void ?{}( sendfile_stats_t & this ) {
+	this.calls = 0;
+	this.tries = 0;
+	this.header = 0;
+	this.splcin = 0;
+	this.splcot = 0;
+	for(i; zipf_cnts) {
+		this.avgrd[i].calls = 0;
+		this.avgrd[i].bytes = 0;
+	};
+}
+
+void push(sendfile_stats_t & from, sendfile_stats_t & to) {
+	__atomic_fetch_add(&to.calls, from.calls, __ATOMIC_RELAXED); from.calls = 0;
+	__atomic_fetch_add(&to.tries, from.tries, __ATOMIC_RELAXED); from.tries = 0;
+	__atomic_fetch_add(&to.header, from.header, __ATOMIC_RELAXED); from.header = 0;
+	__atomic_fetch_add(&to.splcin, from.splcin, __ATOMIC_RELAXED); from.splcin = 0;
+	__atomic_fetch_add(&to.splcot, from.splcot, __ATOMIC_RELAXED); from.splcot = 0;
+	for(i; zipf_cnts) {
+		__atomic_fetch_add(&to.avgrd[i].calls, from.avgrd[i].calls, __ATOMIC_RELAXED); from.avgrd[i].calls = 0;
+		__atomic_fetch_add(&to.avgrd[i].bytes, from.avgrd[i].bytes, __ATOMIC_RELAXED); from.avgrd[i].bytes = 0;
+	};
+}
+
+void ?{}( acceptor_stats_t & this ) {
+	this.creates = 0;
+	this.accepts = 0;
+	this.eagains = 0;
+}
+
+void push(acceptor_stats_t & from, acceptor_stats_t & to) {
+	__atomic_fetch_add(&to.creates, from.creates, __ATOMIC_RELAXED); from.creates = 0;
+	__atomic_fetch_add(&to.accepts, from.accepts, __ATOMIC_RELAXED); from.accepts = 0;
+	__atomic_fetch_add(&to.eagains, from.eagains, __ATOMIC_RELAXED); from.eagains = 0;
+}
+
+void ?{}( StatsPrinter & this, cluster & cl ) {
+	((thread&)this){ "Stats Printer Thread", cl };
+
+	memset(&this.stats, 0, sizeof(this.stats));;
+}
+
+void ^?{}( StatsPrinter & mutex this ) {}
+
+#define eng3(X) (ws(3, 3, unit(eng( X ))))
+
+void main(StatsPrinter & this) {
+	LOOP: for() {
+		waitfor( ^?{} : this) {
+			break LOOP;
+		}
+		or else {}
+
+		wait(this.var, 10`s);
+
+		print_stats_now( *active_cluster(), CFA_STATS_READY_Q | CFA_STATS_IO );
+		{
+			struct {
+				volatile uint64_t calls;
+				volatile uint64_t bytes;
+			} avgrd[zipf_cnts];
+			memset(avgrd, 0, sizeof(avgrd));
+
+			uint64_t tries = this.stats.send.tries;
+			uint64_t calls  = this.stats.send.calls;
+			uint64_t header = this.stats.send.header;
+			uint64_t splcin = this.stats.send.splcin;
+			uint64_t splcot = this.stats.send.splcot;
+			for(j; zipf_cnts) {
+				avgrd[j].calls += this.stats.send.avgrd[j].calls;
+				avgrd[j].bytes += this.stats.send.avgrd[j].bytes;
+			}
+
+			double ratio = ((double)tries) / calls;
+
+			if(this.stats.accpt.accepts > 0) {
+				sout | "----- Acceptor Stats -----";
+				sout | "accept  : " | this.stats.accpt.accepts | "calls," | this.stats.accpt.eagains | "eagains," | this.stats.accpt.creates | " thrds";
+			}
+
+			sout | "----- Connection Stats -----";
+			sout | "sendfile  : " | calls | "calls," | tries | "tries (" | ratio | " try/call)";
+			sout | "            " | header | "header," | splcin | "splice in," | splcot | "splice out";
+			sout | " - zipf sizes:";
+			for(i; zipf_cnts) {
+				double written = avgrd[i].calls > 0 ? ((double)avgrd[i].bytes) / avgrd[i].calls : 0;
+				sout | "        " | zipf_sizes[i] | "bytes," | avgrd[i].calls | "shorts," | written | "written";
+			}
+		}
+	}
+}
+
+StatsPrinter * stats_thrd;
Index: benchmark/io/http/printer.hfa
===================================================================
--- benchmark/io/http/printer.hfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
+++ benchmark/io/http/printer.hfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -0,0 +1,49 @@
+#pragma once
+
+#include <stdint.h>
+
+#include <locks.hfa>
+#include <thread.hfa>
+
+extern const size_t zipf_sizes[];
+enum { zipf_cnts = 36, };
+
+struct sendfile_stats_t {
+	volatile uint64_t calls;
+	volatile uint64_t tries;
+	volatile uint64_t header;
+	volatile uint64_t splcin;
+	volatile uint64_t splcot;
+	struct {
+		volatile uint64_t calls;
+		volatile uint64_t bytes;
+	} avgrd[zipf_cnts];
+};
+
+void ?{}( sendfile_stats_t & this );
+
+void push(sendfile_stats_t & from, sendfile_stats_t & to);
+
+struct acceptor_stats_t {
+	volatile uint64_t creates;
+	volatile uint64_t accepts;
+	volatile uint64_t eagains;
+};
+
+void ?{}( acceptor_stats_t & this );
+
+void push(acceptor_stats_t & from, acceptor_stats_t & to);
+
+thread StatsPrinter {
+	struct {
+		__spinlock_t lock;
+		sendfile_stats_t send;
+		acceptor_stats_t accpt;
+	} stats;
+	condition_variable(fast_block_lock) var;
+};
+
+void ?{}( StatsPrinter & this, cluster & cl );
+void ^?{}( StatsPrinter & mutex this );
+
+extern StatsPrinter * stats_thrd;
Index: benchmark/io/http/protocol.cfa
===================================================================
--- benchmark/io/http/protocol.cfa	(revision 8c58e73044352ccaa70169474c50cd256956baae)
+++ benchmark/io/http/protocol.cfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -587,5 +587,5 @@
 
 void ?{}( DateFormater & this ) {
-	((thread&)this){ "Server Date Thread", *options.clopts.instance[0] };
+	((thread&)this){ "Server Date Thread", *options.clopts.instance };
 	this.idx = 0;
 	memset( &this.buffers[0], 0, sizeof(this.buffers[0]) );
Index: benchmark/io/http/worker.cfa
===================================================================
--- benchmark/io/http/worker.cfa	(revision 8c58e73044352ccaa70169474c50cd256956baae)
+++ benchmark/io/http/worker.cfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -14,20 +14,8 @@
 #include "filecache.hfa"
 
-void ?{}( sendfile_stats_t & this ) {
-	this.calls = 0;
-	this.tries = 0;
-	this.header = 0;
-	this.splcin = 0;
-	this.splcot = 0;
-	for(i; zipf_cnts) {
-		this.avgrd[i].calls = 0;
-		this.avgrd[i].bytes = 0;
-	}
-}
-
 //=============================================================================================
 // Generic connection handling
 //=============================================================================================
-static void handle_connection( connection & this, volatile int & fd, char * buffer, size_t len, io_future_t * f ) {
+static void handle_connection( connection & this, volatile int & fd, char * buffer, size_t len, io_future_t * f, unsigned long long & last ) {
 	REQUEST:
 	for() {
@@ -111,4 +99,15 @@
 		if( options.log ) sout | "=== Answer sent ===";
 	}
+
+	if (stats_thrd) {
+		unsigned long long next = rdtscl();
+		if(next > (last + 500000000)) {
+			if(try_lock(stats_thrd->stats.lock)) {
+				push(this.stats.sendfile, stats_thrd->stats.send);
+				unlock(stats_thrd->stats.lock);
+				last = next;
+			}
+		}
+	}
 }
 
@@ -124,4 +123,5 @@
 void main( AcceptWorker & this ) {
 	park();
+	unsigned long long last = rdtscl();
 	/* paranoid */ assert( this.conn.pipe[0] != -1 );
 	/* paranoid */ assert( this.conn.pipe[1] != -1 );
@@ -139,5 +139,5 @@
 		size_t len = options.socket.buflen;
 		char buffer[len];
-		handle_connection( this.conn, fd, buffer, len, 0p );
+		handle_connection( this.conn, fd, buffer, len, 0p, last );
 
 		if( options.log ) sout | "=== Connection closed ===";
@@ -157,4 +157,5 @@
 void main( ChannelWorker & this ) {
 	park();
+	unsigned long long last = rdtscl();
 	/* paranoid */ assert( this.conn.pipe[0] != -1 );
 	/* paranoid */ assert( this.conn.pipe[1] != -1 );
@@ -168,5 +169,5 @@
 
 		if( options.log ) sout | "=== Waiting new connection ===";
-		handle_connection( this.conn, p.out.fd, buffer, len, &p.f );
+		handle_connection( this.conn, p.out.fd, buffer, len, &p.f, last );
 
 		if( options.log ) sout | "=== Connection closed ===";
@@ -187,4 +188,5 @@
 void main( Acceptor & this ) {
 	park();
+	unsigned long long last = rdtscl();
 	if( options.log ) sout | "=== Accepting connection ===";
 	for() {
@@ -192,4 +194,5 @@
 		if(fd < 0) {
 			if( errno == EWOULDBLOCK) {
+				this.stats.eagains++;
 				yield();
 				continue;
@@ -199,4 +202,6 @@
 			abort( "accept error: (%d) %s\n", (int)errno, strerror(errno) );
 		}
+		this.stats.accepts++;
+
 		if(this.done) return;
 
@@ -210,4 +215,5 @@
 				if(p) break;
 				yield();
+				this.stats.creates++;
 			};
 
@@ -216,4 +222,15 @@
 		}
 
+		if (stats_thrd) {
+			unsigned long long next = rdtscl();
+			if(next > (last + 500000000)) {
+				if(try_lock(stats_thrd->stats.lock)) {
+					push(this.stats, stats_thrd->stats.accpt);
+					unlock(stats_thrd->stats.lock);
+					last = next;
+				}
+			}
+		}
+
 		if( options.log ) sout | "=== Accepting connection ===";
 	}
Index: benchmark/io/http/worker.hfa
===================================================================
--- benchmark/io/http/worker.hfa	(revision 8c58e73044352ccaa70169474c50cd256956baae)
+++ benchmark/io/http/worker.hfa	(revision 137974aeb3cc4ec478d6e887e15e09f3a7df22d3)
@@ -9,24 +9,9 @@
 }
 
+#include "printer.hfa"
+
 //=============================================================================================
 // Worker Thread
 //=============================================================================================
-
-extern const size_t zipf_sizes[];
-enum { zipf_cnts = 36, };
-
-struct sendfile_stats_t {
-	volatile uint64_t calls;
-	volatile uint64_t tries;
-	volatile uint64_t header;
-	volatile uint64_t splcin;
-	volatile uint64_t splcot;
-	struct {
-		volatile uint64_t calls;
-		volatile uint64_t bytes;
-	} avgrd[zipf_cnts];
-};
-
-void ?{}( sendfile_stats_t & this );
 
 struct connection {
@@ -85,4 +70,5 @@
 	int flags;
 	volatile bool done;
+	acceptor_stats_t stats;
 };
 void ?{}( Acceptor & );
