Index: benchmark/io/http/main.cfa
===================================================================
--- benchmark/io/http/main.cfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/main.cfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -37,10 +37,4 @@
 // Globals
 //=============================================================================================
-struct ServerCluster {
-	cluster self;
-	processor    * procs;
-
-};
-
 void ?{}( ServerCluster & this ) {
 	(this.self){ "Server Cluster", options.clopts.params };
@@ -56,18 +50,18 @@
 		(this.procs[i]){ "Benchmark Processor", this.self };
 
-		int c = 0;
-		int n = 1 + (i % cnt);
-		for(int j = 0; j < CPU_SETSIZE; j++) {
-			if(CPU_ISSET(j, &fullset)) n--;
-			if(n == 0) {
-				c = j;
-				break;
-			}
-		}
-		cpu_set_t localset;
-		CPU_ZERO(&localset);
-		CPU_SET(c, &localset);
-		ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset);
-		if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret );
+		// int c = 0;
+		// int n = 1 + (i % cnt);
+		// for(int j = 0; j < CPU_SETSIZE; j++) {
+		// 	if(CPU_ISSET(j, &fullset)) n--;
+		// 	if(n == 0) {
+		// 		c = j;
+		// 		break;
+		// 	}
+		// }
+		// cpu_set_t localset;
+		// CPU_ZERO(&localset);
+		// CPU_SET(c, &localset);
+		// ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset);
+		// if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret );
 
 		#if !defined(__CFA_NO_STATISTICS__)
@@ -85,5 +79,6 @@
 	#endif
 
-	options.clopts.instance = &this.self;
+	options.clopts.instance[options.clopts.cltr_cnt] = &this.self;
+	options.clopts.cltr_cnt++;
 }
 
@@ -187,9 +182,9 @@
 			Acceptor * acceptors = 0p;
 			Q * queues = 0p;
-			ServerCluster cl;
+			ServerCluster cl[options.clopts.nclusters];
 
 			if(options.stats) {
 				stats_thrd = alloc();
-				(*stats_thrd){ cl.self };
+				(*stats_thrd){ cl };
 			} else {
 				stats_thrd = 0p;
@@ -198,9 +193,14 @@
 			init_protocol();
 			{
+				int nacceptors = options.clopts.nprocs * options.clopts.nclusters;
 				conns = alloc(options.clopts.nworkers);
 				if(options.socket.reuseport) {
-					queues = alloc(options.clopts.nprocs);
-					acceptors = anew(options.clopts.nprocs);
-					for(i; options.clopts.nprocs) {
+					queues = alloc(nacceptors);
+					acceptors = alloc(nacceptors);
+					sout | "Creating" | nacceptors | "Acceptors";
+					for(i; nacceptors) {
+						(acceptors[i]){ i % options.clopts.nclusters };
+					}
+					for(i; nacceptors) {
 						(queues[i]){};
 						{
@@ -219,5 +219,5 @@
 							cworkers[i].conn.pipe[0] = fds[pipe_off + (i * 2) + 0];
 							cworkers[i].conn.pipe[1] = fds[pipe_off + (i * 2) + 1];
-							cworkers[i].queue = &queues[i % options.clopts.nprocs].q;
+							cworkers[i].queue = &queues[i % nacceptors].q;
 							conns[i] = &cworkers[i].conn;
 						}
@@ -246,6 +246,8 @@
 					}
 				}
-
-				sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors";
+				sout | options.clopts.nworkers | "workers started on" | options.clopts.nprocs | "processors /" | options.clopts.nclusters | "clusters";
+				for(i; options.clopts.nclusters) {
+					sout | options.clopts.thrd_cnt[i] | nonl;
+				}
 				sout | nl;
 				{
@@ -274,5 +276,5 @@
 				if(options.socket.reuseport) {
 					sout | "Notifying connections..." | nonl; flush( sout );
-					for(i; options.clopts.nprocs) {
+					for(i; nacceptors) {
 						acceptors[i].done = true;
 					}
@@ -283,5 +285,5 @@
 
 					sout | "Shutting down Socket..." | nonl; flush( sout );
-					for(i; options.clopts.nprocs) {
+					for(i; nacceptors) {
 						ret = shutdown( acceptors[i].sockfd, SHUT_RD );
 						if( ret < 0 ) {
@@ -292,5 +294,5 @@
 
 					sout | "Closing Socket..." | nonl; flush( sout );
-					for(i; options.clopts.nprocs) {
+					for(i; nacceptors) {
 						ret = close( acceptors[i].sockfd );
 						if( ret < 0) {
@@ -301,5 +303,5 @@
 
 					sout | "Stopping accept threads..." | nonl; flush( sout );
-					for(i; options.clopts.nprocs) {
+					for(i; nacceptors) {
 						join(acceptors[i]);
 					}
@@ -307,5 +309,5 @@
 
 					sout | "Draining worker queues..." | nonl; flush( sout );
-					for(i; options.clopts.nprocs) {
+					for(i; nacceptors) {
 						PendingRead * p = 0p;
 						while(p = pop(queues[i].q)) {
Index: benchmark/io/http/options.cfa
===================================================================
--- benchmark/io/http/options.cfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/options.cfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -42,4 +42,5 @@
 
 	{ // cluster
+		1,     // nclusters;
 		1,     // nprocs;
 		1,     // nworkers;
@@ -53,8 +54,12 @@
 void parse_options( int argc, char * argv[] ) {
 	unsigned nentries = 0;
+	bool isolate = false;
+
+
 	static cfa_option opt[] = {
 		{ 'p', "port",           "Port the server will listen on", options.socket.port},
 		{ 'c', "cpus",           "Number of processors to use", options.clopts.nprocs},
 		{ 't', "threads",        "Number of worker threads to use", options.clopts.nworkers},
+		{'\0', "isolate",        "Create one cluster per processor", isolate, parse_settrue},
 		{'\0', "log",            "Enable logs", options.log, parse_settrue},
 		{'\0', "sout",           "Redirect standard out to file", options.reopen_stdout},
@@ -91,7 +96,15 @@
 		nentries = v;
 	}
+	if(isolate) {
+		options.clopts.nclusters = options.clopts.nprocs;
+		options.clopts.nprocs = 1;
+	}
 	options.clopts.params.num_entries = nentries;
-	options.clopts.instance = 0p;
-	options.clopts.thrd_cnt = 0;
+	options.clopts.instance = alloc(options.clopts.nclusters);
+	options.clopts.thrd_cnt = alloc(options.clopts.nclusters);
+	options.clopts.cltr_cnt = 0;
+	for(i; options.clopts.nclusters) {
+		options.clopts.thrd_cnt[i] = 0;
+	}
 
 
Index: benchmark/io/http/options.hfa
===================================================================
--- benchmark/io/http/options.hfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/options.hfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -31,4 +31,5 @@
 
 	struct {
+		int nclusters;
 		int nprocs;
 		int nworkers;
@@ -36,6 +37,7 @@
 		bool procstats;
 		bool viewhalts;
-		cluster * instance;
-		size_t    thrd_cnt;
+		cluster ** instance;
+		size_t   * thrd_cnt;
+		size_t     cltr_cnt;
 	} clopts;
 };
Index: benchmark/io/http/printer.cfa
===================================================================
--- benchmark/io/http/printer.cfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/printer.cfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -1,3 +1,4 @@
 #include "printer.hfa"
+#include "options.hfa"
 
 #include <fstream.hfa>
@@ -40,7 +41,7 @@
 }
 
-void ?{}( StatsPrinter & this, cluster & cl ) {
+void ?{}( StatsPrinter & this, ServerCluster * cl ) {
 	((thread&)this){ "Stats Printer Thread" };
-	&this.cl = &cl;
+	this.cl = cl;
 	memset(&this.stats, 0, sizeof(this.stats));;
 }
@@ -59,5 +60,5 @@
 		wait(this.var, 10`s);
 
-		print_stats_now( this.cl, CFA_STATS_READY_Q | CFA_STATS_IO );
+		for(i; options.clopts.nclusters) print_stats_now( this.cl[i].self, CFA_STATS_READY_Q | CFA_STATS_IO );
 		{
 			struct {
Index: benchmark/io/http/printer.hfa
===================================================================
--- benchmark/io/http/printer.hfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/printer.hfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -35,4 +35,9 @@
 void push(acceptor_stats_t & from, acceptor_stats_t & to);
 
+struct ServerCluster {
+	cluster self;
+	processor    * procs;
+};
+
 thread StatsPrinter {
 	struct {
@@ -42,8 +47,8 @@
 	} stats;
 	condition_variable(fast_block_lock) var;
-	cluster & cl;
+	ServerCluster * cl;
 };
 
-void ?{}( StatsPrinter & this, cluster & cl );
+void ?{}( StatsPrinter & this, ServerCluster * cl );
 void ^?{}( StatsPrinter & mutex this );
 
Index: benchmark/io/http/worker.cfa
===================================================================
--- benchmark/io/http/worker.cfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/worker.cfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -116,6 +116,7 @@
 //=============================================================================================
 void ?{}( AcceptWorker & this ) {
-	((thread&)this){ "Server Worker Thread", *options.clopts.instance, 64000 };
-	options.clopts.thrd_cnt++;
+	size_t cli = rand() % options.clopts.cltr_cnt;
+	((thread&)this){ "Server Worker Thread", *options.clopts.instance[cli], 64000 };
+	options.clopts.thrd_cnt[cli]++;
 	this.done = false;
 }
@@ -150,6 +151,7 @@
 //=============================================================================================
 void ?{}( ChannelWorker & this ) {
-	((thread&)this){ "Server Worker Thread", *options.clopts.instance, 64000 };
-	options.clopts.thrd_cnt++;
+	size_t cli = rand() % options.clopts.cltr_cnt;
+	((thread&)this){ "Server Worker Thread", *options.clopts.instance[cli], 64000 };
+	options.clopts.thrd_cnt[cli]++;
 	this.done = false;
 }
@@ -181,7 +183,7 @@
 }
 
-void ?{}( Acceptor & this ) {
-	((thread&)this){ "Server Acceptor Thread", *options.clopts.instance, 64000 };
-	options.clopts.thrd_cnt++;
+void ?{}( Acceptor & this, int cli ) {
+	((thread&)this){ "Server Acceptor Thread", *options.clopts.instance[cli], 64000 };
+	options.clopts.thrd_cnt[cli]++;
 	this.done = false;
 }
Index: benchmark/io/http/worker.hfa
===================================================================
--- benchmark/io/http/worker.hfa	(revision bf7c7ea7946ac946275183d79e4fdc63abc7185c)
+++ benchmark/io/http/worker.hfa	(revision 329e26a71eb5f9e64e5157e67c327196dd41508b)
@@ -72,4 +72,4 @@
 	acceptor_stats_t stats;
 };
-void ?{}( Acceptor & );
+void ?{}( Acceptor &, int cli );
 void main( Acceptor & );
