Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 7d94bfe3b6866cec94d84d615e7304e8e4346d2f)
+++ libcfa/src/concurrency/kernel.cfa	(revision 124c1b7d8164dc87fd5c283690c523ad199ce87f)
@@ -228,14 +228,12 @@
 static void * __invoke_processor(void * arg);
 
-void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) {
+static init(processor & this, const char name[], cluster & _cltr) with( this ) {
 	this.name = name;
 	this.cltr = &_cltr;
 	id = -1u;
-	terminated{ 0 };
 	destroyer = 0p;
 	do_terminate = false;
 	preemption_alarm = 0p;
 	pending_preemption = false;
-	runner.proc = &this;
 
 	#if !defined(__CFA_NO_STATISTICS__)
@@ -244,12 +242,48 @@
 	#endif
 
-	idle{};
+	__atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
+
+	id = doregister((__processor_id_t*)&this);
+
+	// Lock the RWlock so no-one pushes/pops while we are changing the queue
+	uint_fast32_t last_size = ready_mutate_lock();
+
+		// Adjust the ready queue size
+		ready_queue_grow( cltr );
+
+	// Unlock the RWlock
+	ready_mutate_unlock( last_size );
+
+	__cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
+}
+
+// Not a ctor, it just preps the destruction but should not destroy members
+void deinit(processor & this) {
+	// Lock the RWlock so no-one pushes/pops while we are changing the queue
+	uint_fast32_t last_size = ready_mutate_lock();
+
+		// Adjust the ready queue size
+		ready_queue_shrink( this.cltr );
+
+		// Make sure we aren't on the idle queue
+		unsafe_remove( this.cltr->idles, &this );
+
+	// Unlock the RWlock
+	ready_mutate_unlock( last_size );
+
+	// Finally we don't need the read_lock any more
+	unregister((__processor_id_t*)&this);
+}
+
+void ?{}(processor & this, const char name[], cluster & _cltr) {
+	( this.idle ){};
+	( this.terminated ){ 0 };
+	( this.runner ){};
+	init( this, name, _cltr );
 
 	__cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
 
 	this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
-	__atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
-
-	__cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
+
 }
 
@@ -269,4 +303,6 @@
 
 	free( this.stack );
+
+	deinit( this );
 
 	__atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
@@ -318,23 +354,9 @@
 
 	__cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
-
-	// register the processor unless it's the main thread which is handled in the boot sequence
-	if(this != mainProcessor) {
-		this->id = doregister((__processor_id_t*)this);
-		#if !defined(__CFA_NO_STATISTICS__)
-			if( this->print_halts ) {
-				__cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this);
-			}
-		#endif
-
-		// Lock the RWlock so no-one pushes/pops while we are changing the queue
-		uint_fast32_t last_size = ready_mutate_lock();
-
-			// Adjust the ready queue size
-			ready_queue_grow( this->cltr );
-
-		// Unlock the RWlock
-		ready_mutate_unlock( last_size );
-	}
+	#if !defined(__CFA_NO_STATISTICS__)
+		if( this->print_halts ) {
+			__cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this);
+		}
+	#endif
 
 	{
@@ -375,29 +397,5 @@
 	V( this->terminated );
 
-	// unregister the processor unless it's the main thread which is handled in the boot sequence
-	if(this != mainProcessor) {
-		// Lock the RWlock so no-one pushes/pops while we are changing the queue
-		uint_fast32_t last_size = ready_mutate_lock();
-
-			// Adjust the ready queue size
-			ready_queue_shrink( this->cltr );
-
-			// Make sure we aren't on the idle queue
-			#if !defined(__CFA_NO_STATISTICS__)
-				bool removed =
-			#endif
-			unsafe_remove( this->cltr->idles, this );
-
-			#if !defined(__CFA_NO_STATISTICS__)
-				if(removed) __tls_stats()->ready.sleep.exits++;
-			#endif
-
-		// Unlock the RWlock
-		ready_mutate_unlock( last_size );
-
-		// Finally we don't need the read_lock any more
-		unregister((__processor_id_t*)this);
-	}
-	else {
+	if(this == mainProcessor) {
 		// HACK : the coroutine context switch expects this_thread to be set
 		// and it make sense for it to be set in all other cases except here
@@ -859,22 +857,12 @@
 
 	void ?{}(processor & this) with( this ) {
-		name = "Main Processor";
-		cltr = mainCluster;
-		terminated{ 0 };
-		do_terminate = false;
-		preemption_alarm = 0p;
-		pending_preemption = false;
+		( this.idle ){};
+		( this.terminated ){ 0 };
+		( this.runner ){};
+		init( this, "Main Processor", *mainCluster );
 		kernel_thread = pthread_self();
-		id = -1u;
-
-		#if !defined(__CFA_NO_STATISTICS__)
-			print_stats = false;
-			print_halts = false;
-		#endif
 
 		runner{ &this };
 		__cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
-
-		__atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
 	}
 
@@ -883,6 +871,4 @@
 	mainProcessor = (processor *)&storage_mainProcessor;
 	(*mainProcessor){};
-
-	mainProcessor->id = doregister( (__processor_id_t*)mainProcessor);
 
 	//initialize the global state variables
@@ -944,9 +930,9 @@
 	kernel_stop_preemption();
 
-	unregister((__processor_id_t*)mainProcessor);
-
 	// Destroy the main processor and its context in reverse order of construction
 	// These were manually constructed so we need manually destroy them
 	void ^?{}(processor & this) with( this ){
+		deinit( this );
+
 		/* paranoid */ verify( this.do_terminate == true );
 		__atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision 7d94bfe3b6866cec94d84d615e7304e8e4346d2f)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision 124c1b7d8164dc87fd5c283690c523ad199ce87f)
@@ -186,22 +186,11 @@
 //=======================================================================
 void ?{}(__ready_queue_t & this) with (this) {
-
-	lanes.data = alloc(4);
-	for( i; 4 ) {
-		(lanes.data[i]){};
-	}
-	lanes.count = 4;
-	snzi{ log2( lanes.count / 8 ) };
+	lanes.data  = 0p;
+	lanes.count = 0;
 }
 
 void ^?{}(__ready_queue_t & this) with (this) {
-	verify( 4  == lanes.count );
+	verify( 0  == lanes.count );
 	verify( !query( snzi ) );
-
-	^(snzi){};
-
-	for( i; 4 ) {
-		^(lanes.data[i]){};
-	}
 	free(lanes.data);
 }
@@ -495,4 +484,19 @@
 }
 
+#warning remove when alloc is fixed
+forall( dtype T | sized(T) )
+static inline T * correct_alloc( T ptr[], size_t dim ) {
+	if( dim == 0 ) {
+		free(ptr);
+		return 0p;
+	}
+	T * temp = alloc( dim );
+	if(ptr) {
+		memcpy( temp, ptr, dim * sizeof(T));
+		free(ptr);
+	}
+	return temp;
+}
+
 // Grow the ready queue
 void ready_queue_grow  (struct cluster * cltr) {
@@ -513,5 +517,5 @@
 
 		// Allocate new array (uses realloc and memcpies the data)
-		lanes.data = alloc(lanes.data, ncount);
+		lanes.data = correct_alloc(lanes.data, ncount);
 
 		// Fix the moved data
@@ -558,5 +562,5 @@
 		size_t ocount = lanes.count;
 		// Check that we have some space left
-		if(ocount < 8) abort("Program attempted to destroy more Ready Queues than were created");
+		if(ocount < 4) abort("Program attempted to destroy more Ready Queues than were created");
 
 		// reduce the actual count so push doesn't use the old queues
@@ -600,5 +604,5 @@
 
 		// Allocate new array (uses realloc and memcpies the data)
-		lanes.data = alloc(lanes.data, lanes.count);
+		lanes.data = correct_alloc(lanes.data, lanes.count);
 
 		// Fix the moved data
Index: libcfa/src/concurrency/snzi.hfa
===================================================================
--- libcfa/src/concurrency/snzi.hfa	(revision 7d94bfe3b6866cec94d84d615e7304e8e4346d2f)
+++ libcfa/src/concurrency/snzi.hfa	(revision 124c1b7d8164dc87fd5c283690c523ad199ce87f)
@@ -120,4 +120,10 @@
 //--------------------------------------------------
 // SNZI object
+void  ?{}( __snzi_t & this ) {
+	this.mask = 0;
+	this.root = 0;
+	this.nodes = 0p;
+}
+
 void  ?{}( __snzi_t & this, unsigned depth ) with( this ) {
 	mask = (1 << depth) - 1;
