Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision a1574e292b87162268e9c0e5e2e3ee9345178046)
+++ libcfa/src/concurrency/kernel.cfa	(revision 3a0ddb658a45493c4de1b2967c94eaecdb8b69a4)
@@ -124,5 +124,5 @@
 static void __wake_one(cluster * cltr);
 
-static void mark_idle (__cluster_proc_list & idles, processor & proc);
+static bool mark_idle (__cluster_proc_list & idles, processor & proc);
 static void mark_awake(__cluster_proc_list & idles, processor & proc);
 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );
@@ -213,5 +213,5 @@
 
 				// Push self to idle stack
-				mark_idle(this->cltr->procs, * this);
+				if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
 
 				// Confirm the ready-queue is empty
@@ -331,5 +331,5 @@
 				// Push self to idle stack
 				ready_schedule_unlock();
-				mark_idle(this->cltr->procs, * this);
+				if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;
 				ready_schedule_lock();
 
@@ -806,7 +806,7 @@
 }
 
-static void mark_idle(__cluster_proc_list & this, processor & proc) {
-	/* paranoid */ verify( ! __preemption_enabled() );
-	lock( this );
+static bool mark_idle(__cluster_proc_list & this, processor & proc) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+	if(!try_lock( this )) return false;
 		this.idle++;
 		/* paranoid */ verify( this.idle <= this.total );
@@ -815,4 +815,6 @@
 	unlock( this );
 	/* paranoid */ verify( ! __preemption_enabled() );
+
+	return true;
 }
 
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision a1574e292b87162268e9c0e5e2e3ee9345178046)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision 3a0ddb658a45493c4de1b2967c94eaecdb8b69a4)
@@ -282,4 +282,30 @@
 }
 
+static inline bool try_lock(__cluster_proc_list & this) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+
+	// Start by locking the global RWlock so that we know no-one is
+	// adding/removing processors while we mess with the idle lock
+	ready_schedule_lock();
+
+	// Simple counting lock, acquired, acquired by incrementing the counter
+	// to an odd number
+	uint64_t l = this.lock;
+	if(
+		(0 == (l % 2))
+		&& __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+	) {
+		// success
+		/* paranoid */ verify( ! __preemption_enabled() );
+		return true;
+	}
+
+	// failed to lock
+	ready_schedule_unlock();
+
+	/* paranoid */ verify( ! __preemption_enabled() );
+	return false;
+}
+
 static inline void unlock(__cluster_proc_list & this) {
 	/* paranoid */ verify( ! __preemption_enabled() );
