Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision 175f9f442245bf2962d704caa69c27b31711d255)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision 753fb9784c694cd9fc5f0a571f03237cf5351f17)
@@ -219,4 +219,5 @@
 
 	// Step 2 : acquire our local lock
+	/*paranoid*/ verify(!kernelTLS().sched_lock);
 	__atomic_acquire( &kernelTLS().sched_lock );
 	/*paranoid*/ verify(kernelTLS().sched_lock);
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision 175f9f442245bf2962d704caa69c27b31711d255)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision 753fb9784c694cd9fc5f0a571f03237cf5351f17)
@@ -201,5 +201,4 @@
 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
 	/* paranoid */ verify( ! __preemption_enabled() );
-	/* paranoid */ verify( ! kernelTLS().sched_lock );
 
 	// Step 1 : lock global lock
@@ -207,4 +206,9 @@
 	//   to simply lock their own lock and enter.
 	__atomic_acquire( &write_lock );
+
+	// Make sure we won't deadlock ourself
+	// Checking before acquiring the writer lock isn't safe
+	// because someone else could have locked us.
+	/* paranoid */ verify( ! kernelTLS().sched_lock );
 
 	// Step 2 : lock per-proc lock
