Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 34b8cb78d5c3fe9fee27074af9317f20dd531a38)
+++ libcfa/src/concurrency/kernel.cfa	(revision b14ec5fb3dea61c99fd6a0570e26b45982e7b423)
@@ -124,5 +124,5 @@
 static void __wake_one(cluster * cltr);
 
-static void mark_idle (__cluster_proc_list & idles, processor & proc);
+static bool mark_idle (__cluster_proc_list & idles, processor & proc);
 static void mark_awake(__cluster_proc_list & idles, processor & proc);
 
@@ -212,5 +212,5 @@
 
 				// Push self to idle stack
-				mark_idle(this->cltr->procs, * this);
+				if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
 
 				// Confirm the ready-queue is empty
@@ -330,5 +330,5 @@
 				// Push self to idle stack
 				ready_schedule_unlock();
-				mark_idle(this->cltr->procs, * this);
+				if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;
 				ready_schedule_lock();
 
@@ -802,7 +802,7 @@
 }
 
-static void mark_idle(__cluster_proc_list & this, processor & proc) {
-	/* paranoid */ verify( ! __preemption_enabled() );
-	lock( this );
+static bool mark_idle(__cluster_proc_list & this, processor & proc) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+	if(!try_lock( this )) return false;
 		this.idle++;
 		/* paranoid */ verify( this.idle <= this.total );
@@ -813,4 +813,6 @@
 	unlock( this );
 	/* paranoid */ verify( ! __preemption_enabled() );
+
+	return true;
 }
 
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision 34b8cb78d5c3fe9fee27074af9317f20dd531a38)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision b14ec5fb3dea61c99fd6a0570e26b45982e7b423)
@@ -282,4 +282,30 @@
 }
 
+static inline bool try_lock(__cluster_proc_list & this) {
+	/* paranoid */ verify( ! __preemption_enabled() );
+
+	// Start by locking the global RWlock so that we know no-one is
+	// adding/removing processors while we mess with the idle lock
+	ready_schedule_lock();
+
+	// Simple counting lock, acquired, acquired by incrementing the counter
+	// to an odd number
+	uint64_t l = this.lock;
+	if(
+		(0 == (l % 2))
+		&& __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+	) {
+		// success
+		/* paranoid */ verify( ! __preemption_enabled() );
+		return true;
+	}
+
+	// failed to lock
+	ready_schedule_unlock();
+
+	/* paranoid */ verify( ! __preemption_enabled() );
+	return false;
+}
+
 static inline void unlock(__cluster_proc_list & this) {
 	/* paranoid */ verify( ! __preemption_enabled() );
