Index: libcfa/src/concurrency/clib/cfathread.cfa
===================================================================
--- libcfa/src/concurrency/clib/cfathread.cfa	(revision 5e180c24b9ff1883c850b498f89023236ad9a0a7)
+++ libcfa/src/concurrency/clib/cfathread.cfa	(revision 0640c3507d67bbce432c379b8c8ce8eef0e2e324)
@@ -439,5 +439,5 @@
 	// Mutex
 	struct cfathread_mutex {
-		linear_backoff_then_block_lock impl;
+		exp_backoff_then_block_lock impl;
 	};
 	int cfathread_mutex_init(cfathread_mutex_t *restrict mut, const cfathread_mutexattr_t *restrict) __attribute__((nonnull (1))) { *mut = new(); return 0; }
@@ -454,5 +454,5 @@
 	// Condition
 	struct cfathread_condition {
-		condition_variable(linear_backoff_then_block_lock) impl;
+		condition_variable(exp_backoff_then_block_lock) impl;
 	};
 	int cfathread_cond_init(cfathread_cond_t *restrict cond, const cfathread_condattr_t *restrict) __attribute__((nonnull (1))) { *cond = new(); return 0; }
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 5e180c24b9ff1883c850b498f89023236ad9a0a7)
+++ libcfa/src/concurrency/locks.hfa	(revision 0640c3507d67bbce432c379b8c8ce8eef0e2e324)
@@ -38,6 +38,8 @@
 #include <unistd.h>
 
-// undef to make a number of the locks not reacquire upon waking from a condlock
-#define REACQ 1
+// C_TODO: cleanup this and locks.cfa
+// - appropriate separation of interface and impl
+// - clean up unused/unneeded locks
+// - change messy big blocking lock from inheritance to composition to remove need for flags
 
 //-----------------------------------------------------------------------------
@@ -249,14 +251,10 @@
 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
-static inline void on_wakeup(clh_lock & this, size_t recursion ) {
-	#ifdef REACQ
-	lock(this);
-	#endif
-}
-
-
-//-----------------------------------------------------------------------------
-// Linear backoff Spinlock
-struct linear_backoff_then_block_lock {
+static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
+
+
+//-----------------------------------------------------------------------------
+// Exponential backoff then block lock
+struct exp_backoff_then_block_lock {
 	// Spin lock used for mutual exclusion
 	__spinlock_t spinlock;
@@ -269,14 +267,14 @@
 };
 
-static inline void  ?{}( linear_backoff_then_block_lock & this ) {
+static inline void  ?{}( exp_backoff_then_block_lock & this ) {
 	this.spinlock{};
 	this.blocked_threads{};
 	this.lock_value = 0;
 }
-static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
-// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
-// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
-
-static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
+static inline void ^?{}( exp_backoff_then_block_lock & this ) {}
+// static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
+// static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
+
+static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
 	if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
 		return true;
@@ -285,7 +283,7 @@
 }
 
-static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
-
-static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
+static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
+
+static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
 	if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
 		return true;
@@ -294,5 +292,5 @@
 }
 
-static inline bool block(linear_backoff_then_block_lock & this) with(this) {
+static inline bool block(exp_backoff_then_block_lock & this) with(this) {
 	lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
 	if (lock_value != 2) {
@@ -306,5 +304,5 @@
 }
 
-static inline void lock(linear_backoff_then_block_lock & this) with(this) {
+static inline void lock(exp_backoff_then_block_lock & this) with(this) {
 	size_t compare_val = 0;
 	int spin = 4;
@@ -324,5 +322,5 @@
 }
 
-static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
+static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
     if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
 	lock( spinlock __cfaabi_dbg_ctx2 );
@@ -332,11 +330,7 @@
 }
 
-static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
-static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
-static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { 
-	#ifdef REACQ
-	lock(this);
-	#endif
-}
+static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
+static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
+static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
 
 //-----------------------------------------------------------------------------
@@ -390,11 +384,7 @@
 
 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
-	#ifdef REACQ
-		lock( lock __cfaabi_dbg_ctx2 );
-		insert_last( blocked_threads, *t );
-		unlock( lock );
-	#else
-		unpark(t);
-	#endif
+    lock( lock __cfaabi_dbg_ctx2 );
+    insert_last( blocked_threads, *t );
+    unlock( lock );
 }
 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
@@ -553,9 +543,5 @@
 }
 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
-static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
-	#ifdef REACQ
-	lock(this);
-	#endif
-}
+static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
 
 
@@ -598,9 +584,5 @@
 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
-static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
-	#ifdef REACQ
-	lock(this);
-	#endif
-}
+static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
 
 //-----------------------------------------------------------------------------
@@ -640,5 +622,4 @@
 
 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
-  #ifdef REACQ
 	// first we acquire internal fast_block_lock
 	lock( lock __cfaabi_dbg_ctx2 );
@@ -652,16 +633,12 @@
 	unlock( lock );
 
-  #endif
 	unpark(t);
-	
 }
 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
-  #ifdef REACQ
 	// now we acquire the entire block_spin_lock upon waking up
 	while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
 	__atomic_store_n(&held, true, __ATOMIC_RELEASE);
 	unlock( lock ); // Now we release the internal fast_spin_lock
-  #endif
 }
 
