Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision a33c11376e88fecef869e2f63e32b80f9410edc8)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision a7504db5aad15d98237e3e2f2520fdffee68aeae)
@@ -489,10 +489,7 @@
 	#endif
 
-	lock( this.cltr->idles );
-		int target = this.cltr->idles.total += 1u;
-	unlock( this.cltr->idles );
-
 	// Register and Lock the RWlock so no-one pushes/pops while we are changing the queue
 	uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this);
+		int target = this.cltr->idles.total += 1u;
 
 		// Adjust the ready queue size
@@ -507,10 +504,7 @@
 // Not a ctor, it just preps the destruction but should not destroy members
 static void deinit(processor & this) {
-	lock( this.cltr->idles );
-		int target = this.cltr->idles.total -= 1u;
-	unlock( this.cltr->idles );
-
 	// Lock the RWlock so no-one pushes/pops while we are changing the queue
 	uint_fast32_t last_size = ready_mutate_lock();
+		int target = this.cltr->idles.total -= 1u;
 
 		// Adjust the ready queue size
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision a33c11376e88fecef869e2f63e32b80f9410edc8)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision a7504db5aad15d98237e3e2f2520fdffee68aeae)
@@ -89,22 +89,4 @@
 // Unregister a processor from a given cluster using its id, getting back the original pointer
 void unregister_proc_id( struct __processor_id_t * proc );
-
-//-----------------------------------------------------------------------
-// Cluster idle lock/unlock
-static inline void lock(__cluster_idles & this) {
-	for() {
-		uint64_t l = this.lock;
-		if(
-			(0 == (l % 2))
-			&& __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
-		) return;
-		Pause();
-	}
-}
-
-static inline void unlock(__cluster_idles & this) {
-	/* paranoid */ verify( 1 == (this.lock % 2) );
-	__atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
-}
 
 //=======================================================================
@@ -263,4 +245,40 @@
 }
 
+//-----------------------------------------------------------------------
+// Cluster idle lock/unlock
+static inline void lock(__cluster_idles & this) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+
+	// Start by locking the global RWlock so that we know no-one is
+	// adding/removing processors while we mess with the idle lock
+	ready_schedule_lock();
+
+	// Simple counting lock, acquired, acquired by incrementing the counter
+	// to an odd number
+	for() {
+		uint64_t l = this.lock;
+		if(
+			(0 == (l % 2))
+			&& __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+		) return;
+		Pause();
+	}
+
+	/* paranoid */ verify( ! __preemption_enabled() );
+}
+
+static inline void unlock(__cluster_idles & this) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+
+	/* paranoid */ verify( 1 == (this.lock % 2) );
+	// Simple couting lock, release by incrementing to an even number
+	__atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
+
+	// Release the global lock, which we acquired when locking
+	ready_schedule_unlock();
+
+	/* paranoid */ verify( ! __preemption_enabled() );
+}
+
 //=======================================================================
 // Ready-Queue API
