Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision fc59b580bf1b688acdfb86f4e29cc1f5c54fae03)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision a017ee7e5c430fc297274eb5bf2b1e7a1e81c6ad)
@@ -469,4 +469,5 @@
 	this.name = name;
 	this.cltr = &_cltr;
+	this.cltr_id = -1u;
 	do_terminate = false;
 	preemption_alarm = 0p;
@@ -491,9 +492,9 @@
 	// Register and Lock the RWlock so no-one pushes/pops while we are changing the queue
 	uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this);
-		int target = this.cltr->procs.total += 1u;
+		this.cltr->procs.total += 1u;
 		insert_last(this.cltr->procs.actives, this);
 
 		// Adjust the ready queue size
-		this.cltr_id = ready_queue_grow( cltr, target );
+		ready_queue_grow( cltr );
 
 	// Unlock the RWlock
@@ -507,9 +508,9 @@
 	// Lock the RWlock so no-one pushes/pops while we are changing the queue
 	uint_fast32_t last_size = ready_mutate_lock();
-		int target = this.cltr->procs.total -= 1u;
+		this.cltr->procs.total -= 1u;
 		remove(this);
 
 		// Adjust the ready queue size
-		ready_queue_shrink( this.cltr, target );
+		ready_queue_shrink( this.cltr );
 
 	// Unlock the RWlock and unregister: we don't need the read_lock any more
@@ -586,5 +587,5 @@
 
 		// Adjust the ready queue size
-		ready_queue_grow( &this, 0 );
+		ready_queue_grow( &this );
 
 	// Unlock the RWlock
@@ -601,5 +602,5 @@
 
 		// Adjust the ready queue size
-		ready_queue_shrink( &this, 0 );
+		ready_queue_shrink( &this );
 
 	// Unlock the RWlock
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision fc59b580bf1b688acdfb86f4e29cc1f5c54fae03)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision a017ee7e5c430fc297274eb5bf2b1e7a1e81c6ad)
@@ -312,9 +312,9 @@
 //-----------------------------------------------------------------------
 // Increase the width of the ready queue (number of lanes) by 4
-unsigned ready_queue_grow  (struct cluster * cltr, int target);
+void ready_queue_grow  (struct cluster * cltr);
 
 //-----------------------------------------------------------------------
 // Decrease the width of the ready queue (number of lanes) by 4
-void ready_queue_shrink(struct cluster * cltr, int target);
+void ready_queue_shrink(struct cluster * cltr);
 
 
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision fc59b580bf1b688acdfb86f4e29cc1f5c54fae03)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision a017ee7e5c430fc297274eb5bf2b1e7a1e81c6ad)
@@ -254,4 +254,5 @@
 	__attribute__((unused)) int preferred;
 	#if defined(BIAS)
+		/* paranoid */ verify(external || kernelTLS().this_processor->cltr_id < lanes.count );
 		preferred =
 			//*
@@ -344,5 +345,5 @@
 	int preferred;
 	#if defined(BIAS)
-		// Don't bother trying locally too much
+		/* paranoid */ verify(kernelTLS().this_processor->cltr_id < lanes.count );
 		preferred = kernelTLS().this_processor->cltr_id;
 	#endif
@@ -541,8 +542,24 @@
 }
 
+static void assign_list(unsigned & value, const int inc, dlist(processor, processor) & list, unsigned count) {
+	processor * it = &list`first;
+	for(unsigned i = 0; i < count; i++) {
+		/* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
+		it->cltr_id = value;
+		value += inc;
+		it = &(*it)`next;
+	}
+}
+
+static void reassign_cltr_id(struct cluster * cltr, const int inc) {
+	unsigned preferred = 0;
+	assign_list(preferred, inc, cltr->procs.actives, cltr->procs.total - cltr->procs.idle);
+	assign_list(preferred, inc, cltr->procs.idles  , cltr->procs.idle );
+}
+
 // Grow the ready queue
-unsigned ready_queue_grow(struct cluster * cltr, int target) {
-	unsigned preferred;
+void ready_queue_grow(struct cluster * cltr) {
 	size_t ncount;
+	int target = cltr->procs.total;
 
 	/* paranoid */ verify( ready_mutate_islocked() );
@@ -562,8 +579,6 @@
 		if(target >= 2) {
 			ncount = target * 4;
-			preferred = ncount - 4;
 		} else {
 			ncount = 1;
-			preferred = 0;
 		}
 
@@ -595,4 +610,6 @@
 	}
 
+	reassign_cltr_id(cltr, 4);
+
 	// Make sure that everything is consistent
 	/* paranoid */ check( cltr->ready_queue );
@@ -601,9 +618,8 @@
 
 	/* paranoid */ verify( ready_mutate_islocked() );
-	return preferred;
 }
 
 // Shrink the ready queue
-void ready_queue_shrink(struct cluster * cltr, int target) {
+void ready_queue_shrink(struct cluster * cltr) {
 	/* paranoid */ verify( ready_mutate_islocked() );
 	__cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
@@ -611,4 +627,6 @@
 	// Make sure that everything is consistent
 	/* paranoid */ check( cltr->ready_queue );
+
+	int target = cltr->procs.total;
 
 	with( cltr->ready_queue ) {
@@ -679,4 +697,6 @@
 	}
 
+	reassign_cltr_id(cltr, 4);
+
 	// Make sure that everything is consistent
 	/* paranoid */ check( cltr->ready_queue );
