Index: src/libcfa/bits/locks.h
===================================================================
--- src/libcfa/bits/locks.h	(revision 813ddcaa461a7899c9c16a6cdc83093b86a1589c)
+++ src/libcfa/bits/locks.h	(revision 2e9aed42d469b0fedc4f55d73dca46a3152f1743)
@@ -65,4 +65,5 @@
 	extern void yield( unsigned int );
 	extern thread_local struct thread_desc *    volatile this_thread;
+	extern thread_local struct processor *      volatile this_processor;
 
 	static inline void ?{}( __spinlock_t & this ) {
@@ -112,20 +113,20 @@
 	}
 
-	// Lock the spinlock, spin if already acquired
-	static inline void lock_yield( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
-		for ( unsigned int i = 1;; i += 1 ) {
-			if ( __lock_test_and_test_and_set( this.lock ) ) break;
-			yield( i );
-		}
-		disable_interrupts();
-		__cfaabi_dbg_debug_do(
-			this.prev_name = caller;
-			this.prev_thrd = this_thread;
-		)
-	}
+	// // Lock the spinlock, yield if already acquired
+	// static inline void lock_yield( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
+	// 	for ( unsigned int i = 1;; i += 1 ) {
+	// 		if ( __lock_test_and_test_and_set( this.lock ) ) break;
+	// 		yield( i );
+	// 	}
+	// 	disable_interrupts();
+	// 	__cfaabi_dbg_debug_do(
+	// 		this.prev_name = caller;
+	// 		this.prev_thrd = this_thread;
+	// 	)
+	// }
 
 	static inline void unlock( __spinlock_t & this ) {
+		enable_interrupts_noPoll();
 		__lock_release( this.lock );
-		enable_interrupts_noPoll();
 	}
 #endif
Index: src/libcfa/concurrency/kernel.c
===================================================================
--- src/libcfa/concurrency/kernel.c	(revision 813ddcaa461a7899c9c16a6cdc83093b86a1589c)
+++ src/libcfa/concurrency/kernel.c	(revision 2e9aed42d469b0fedc4f55d73dca46a3152f1743)
@@ -242,4 +242,5 @@
 void finishRunning(processor * this) {
 	if( this->finish.action_code == Release ) {
+		verify( disable_preempt_count > 1 );
 		unlock( *this->finish.lock );
 	}
@@ -248,8 +249,10 @@
 	}
 	else if( this->finish.action_code == Release_Schedule ) {
+		verify( disable_preempt_count > 1 );
 		unlock( *this->finish.lock );
 		ScheduleThread( this->finish.thrd );
 	}
 	else if( this->finish.action_code == Release_Multi ) {
+		verify( disable_preempt_count > this->finish.lock_count );
 		for(int i = 0; i < this->finish.lock_count; i++) {
 			unlock( *this->finish.locks[i] );
@@ -257,4 +260,5 @@
 	}
 	else if( this->finish.action_code == Release_Multi_Schedule ) {
+		verify( disable_preempt_count > this->finish.lock_count );
 		for(int i = 0; i < this->finish.lock_count; i++) {
 			unlock( *this->finish.locks[i] );
@@ -363,5 +367,5 @@
 	this_processor->finish.lock = lock;
 
-	verify( disable_preempt_count > 0 );
+	verify( disable_preempt_count > 1 );
 	suspend();
 	verify( disable_preempt_count > 0 );
@@ -391,5 +395,5 @@
 	this_processor->finish.thrd = thrd;
 
-	verify( disable_preempt_count > 0 );
+	verify( disable_preempt_count > 1 );
 	suspend();
 	verify( disable_preempt_count > 0 );
Index: src/libcfa/concurrency/monitor.c
===================================================================
--- src/libcfa/concurrency/monitor.c	(revision 813ddcaa461a7899c9c16a6cdc83093b86a1589c)
+++ src/libcfa/concurrency/monitor.c	(revision 2e9aed42d469b0fedc4f55d73dca46a3152f1743)
@@ -53,10 +53,4 @@
 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
 
-#ifndef __CFA_LOCK_NO_YIELD
-#define DO_LOCK lock_yield
-#else
-#define DO_LOCK lock
-#endif
-
 //-----------------------------------------------------------------------------
 // Useful defines
@@ -90,6 +84,8 @@
 	static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
 		// Lock the monitor spinlock
-		DO_LOCK( this->lock __cfaabi_dbg_ctx2 );
+		lock( this->lock __cfaabi_dbg_ctx2 );
 		thread_desc * thrd = this_thread;
+
+		verify( disable_preempt_count > 0 );
 
 		__cfaabi_dbg_print_safe("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
@@ -121,4 +117,7 @@
 			// Some one else has the monitor, wait in line for it
 			append( this->entry_queue, thrd );
+
+			verify( disable_preempt_count > 0 );
+
 			BlockInternal( &this->lock );
 
@@ -138,5 +137,5 @@
 	static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
 		// Lock the monitor spinlock
-		DO_LOCK( this->lock __cfaabi_dbg_ctx2 );
+		lock( this->lock __cfaabi_dbg_ctx2 );
 		thread_desc * thrd = this_thread;
 
@@ -201,6 +200,6 @@
 	// Leave single monitor
 	void __leave_monitor_desc( monitor_desc * this ) {
-		// Lock the monitor spinlock, DO_LOCK to reduce contention
-		DO_LOCK( this->lock __cfaabi_dbg_ctx2 );
+		// Lock the monitor spinlock
+		lock( this->lock __cfaabi_dbg_ctx2 );
 
 		__cfaabi_dbg_print_safe("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
@@ -248,5 +247,5 @@
 
 		// Lock the monitor now
-		DO_LOCK( this->lock __cfaabi_dbg_ctx2 );
+		lock( this->lock __cfaabi_dbg_ctx2 );
 
 		disable_interrupts();
@@ -397,6 +396,11 @@
 	append( this.blocked, &waiter );
 
+	verify( disable_preempt_count == 0 );
+
 	// Lock all monitors (aggregates the locks as well)
 	lock_all( monitors, locks, count );
+
+	// verifyf( disable_preempt_count == count, "Got %d, expected %d\n", disable_preempt_count, count );
+	if(disable_preempt_count != count) { __cfaabi_dbg_print_buffer_decl("----------Gonna crash\n"); }
 
 	// Find the next thread(s) to run
@@ -473,6 +477,12 @@
 	monitor_ctx( this.monitors, this.monitor_count );
 
+	verify( disable_preempt_count == 0 );
+
 	// Lock all monitors (aggregates the locks them as well)
 	lock_all( monitors, locks, count );
+
+	// verify( disable_preempt_count == count );
+	if(disable_preempt_count != count) { __cfaabi_dbg_print_buffer_decl("----------Gonna crash\n"); }
+
 
 	// Create the node specific to this wait operation
@@ -737,5 +747,5 @@
 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) {
 	for( __lock_size_t i = 0; i < count; i++ ) {
-		DO_LOCK( *locks[i] __cfaabi_dbg_ctx2 );
+		lock( *locks[i] __cfaabi_dbg_ctx2 );
 	}
 }
@@ -744,5 +754,5 @@
 	for( __lock_size_t i = 0; i < count; i++ ) {
 		__spinlock_t * l = &source[i]->lock;
-		DO_LOCK( *l __cfaabi_dbg_ctx2 );
+		lock( *l __cfaabi_dbg_ctx2 );
 		if(locks) locks[i] = l;
 	}
Index: src/libcfa/concurrency/preemption.c
===================================================================
--- src/libcfa/concurrency/preemption.c	(revision 813ddcaa461a7899c9c16a6cdc83093b86a1589c)
+++ src/libcfa/concurrency/preemption.c	(revision 2e9aed42d469b0fedc4f55d73dca46a3152f1743)
@@ -169,5 +169,5 @@
 	void enable_interrupts_noPoll() {
 		__attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
-		verify( prev != 0u );                     // If this triggers someone is enabled already enabled interrupts
+		verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
 	}
 }
@@ -293,5 +293,5 @@
 	if( !preemption_ready() ) { return; }
 
-	// __cfaabi_dbg_print_buffer_decl(" KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
+	__cfaabi_dbg_print_buffer_decl(" KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
 
 	preemption_in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
