Index: libcfa/src/concurrency/channel.hfa
===================================================================
--- libcfa/src/concurrency/channel.hfa	(revision 1d245ea070764ba5b100c52d36b44e4fbce2b808)
+++ libcfa/src/concurrency/channel.hfa	(revision d30e3ebfc2de91ddf2b472e0097ef737bb4f3654)
@@ -2,25 +2,11 @@
 
 #include <locks.hfa>
-
-struct no_reacq_lock {
-    inline exp_backoff_then_block_lock;
-};
-
-// have to override these by hand to get around plan 9 inheritance bug where resolver can't find the appropriate routine to call
-static inline void   ?{}( no_reacq_lock & this ) { ((exp_backoff_then_block_lock &)this){}; }
-static inline bool   try_lock(no_reacq_lock & this) { return try_lock(((exp_backoff_then_block_lock &)this)); }
-static inline void   lock(no_reacq_lock & this) { lock(((exp_backoff_then_block_lock &)this)); }
-static inline void   unlock(no_reacq_lock & this) { unlock(((exp_backoff_then_block_lock &)this)); }
-static inline void   on_notify(no_reacq_lock & this, struct thread$ * t ) { on_notify(((exp_backoff_then_block_lock &)this), t); }
-static inline size_t on_wait(no_reacq_lock & this) { return on_wait(((exp_backoff_then_block_lock &)this)); }
-// override wakeup so that we don't reacquire the lock if using a condvar
-static inline void   on_wakeup( no_reacq_lock & this, size_t recursion ) {}
-
-#define __PREVENTION_CHANNEL
+#include <list.hfa>
+
+// #define __PREVENTION_CHANNEL
 #ifdef __PREVENTION_CHANNEL
 forall( T ) {
 struct channel {
-    size_t size;
-    size_t front, back, count;
+    size_t size, count, front, back;
     T * buffer;
     thread$ * chair;
@@ -87,5 +73,5 @@
         return;
     }
-    else insert_( chan, elem );
+    insert_( chan, elem );
 
     unlock( mutex_lock );
@@ -110,5 +96,5 @@
 
     // wait if buffer is empty, work will be completed by someone else
-    if ( count == 0 ) { 
+    if ( count == 0 ) {
         chair = active_thread();
         chair_elem = &retval;
@@ -121,5 +107,6 @@
     memcpy((void *)&retval, (void *)&buffer[front], sizeof(T));
     count -= 1;
-    front = (front + 1) % size;
+    front++;
+    if ( front == size ) front = 0;
 
     if ( chair != 0p ) {
@@ -142,11 +129,31 @@
 
 #ifndef __PREVENTION_CHANNEL
+
+// link field used for threads waiting on channel
+struct wait_link {
+    // used to put wait_link on a dl queue
+    inline dlink(wait_link);
+
+    // waiting thread
+    struct thread$ * t;
+
+    // shadow field
+    void * elem;
+};
+P9_EMBEDDED( wait_link, dlink(wait_link) )
+
+static inline void ?{}( wait_link & this, thread$ * t, void * elem ) {
+    this.t = t;
+    this.elem = elem;
+}
+
 forall( T ) {
+
 struct channel {
     size_t size;
     size_t front, back, count;
     T * buffer;
-    fast_cond_var( no_reacq_lock ) prods, cons;
-    no_reacq_lock mutex_lock;
+    dlist( wait_link ) prods, cons;
+    exp_backoff_then_block_lock mutex_lock;
 };
 
@@ -164,7 +171,7 @@
 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; }
 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; }
-static inline bool has_waiters( channel(T) & chan ) with(chan) { return !empty( cons ) || !empty( prods ); }
-static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !empty( cons ); }
-static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !empty( prods ); }
+static inline bool has_waiters( channel(T) & chan ) with(chan) { return !cons`isEmpty || !prods`isEmpty; }
+static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !cons`isEmpty; }
+static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; }
 
 static inline void insert_( channel(T) & chan, T & elem ) with(chan) {
@@ -175,4 +182,15 @@
 }
 
+static inline void wake_one( dlist( wait_link ) & queue ) {
+    wait_link & popped = try_pop_front( queue );
+    unpark( popped.t );
+}
+
+static inline void block( dlist( wait_link ) & queue, void * elem_ptr, exp_backoff_then_block_lock & lock ) {
+    wait_link w{ active_thread(), elem_ptr };
+    insert_last( queue, w );
+    unlock( lock );
+    park();
+}
 
 static inline void insert( channel(T) & chan, T elem ) with(chan) {
@@ -180,7 +198,7 @@
 
     // have to check for the zero size channel case
-    if ( size == 0 && !empty( cons ) ) {
-        memcpy((void *)front( cons ), (void *)&elem, sizeof(T));
-        notify_one( cons );
+    if ( size == 0 && !cons`isEmpty ) {
+        memcpy(cons`first.elem, (void *)&elem, sizeof(T));
+        wake_one( cons );
         unlock( mutex_lock );
         return;
@@ -188,15 +206,14 @@
 
     // wait if buffer is full, work will be completed by someone else
-    if ( count == size ) { 
-        wait( prods, mutex_lock, (uintptr_t)&elem );
+    if ( count == size ) {
+        block( prods, &elem, mutex_lock );
         return;
     } // if
 
-    if ( count == 0 && !empty( cons ) )
-        // do waiting consumer work
-        memcpy((void *)front( cons ), (void *)&elem, sizeof(T)); 
-    else insert_( chan, elem );
+    if ( count == 0 && !cons`isEmpty ) {
+        memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work
+        wake_one( cons );
+    } else insert_( chan, elem );
     
-    notify_one( cons );
     unlock( mutex_lock );
 }
@@ -207,7 +224,7 @@
 
     // have to check for the zero size channel case
-    if ( size == 0 && !empty( prods ) ) {
-        memcpy((void *)&retval, (void *)front( prods ), sizeof(T));
-        notify_one( prods );
+    if ( size == 0 && !prods`isEmpty ) {
+        memcpy((void *)&retval, (void *)prods`first.elem, sizeof(T));
+        wake_one( prods );
         unlock( mutex_lock );
         return retval;
@@ -215,6 +232,6 @@
 
     // wait if buffer is empty, work will be completed by someone else
-    if (count == 0) { 
-        wait( cons, mutex_lock, (uintptr_t)&retval );
+    if (count == 0) {
+        block( cons, &retval, mutex_lock );
         return retval;
     }
@@ -225,12 +242,12 @@
     front = (front + 1) % size;
 
-    if (count == size - 1 && !empty( prods ) ) 
-        insert_( chan, *((T *)front( prods )) );  // do waiting producer work
-
-    notify_one( prods );
+    if (count == size - 1 && !prods`isEmpty ) {
+        insert_( chan, *(T *)prods`first.elem );  // do waiting producer work
+        wake_one( prods );
+    }
+
     unlock( mutex_lock );
     return retval;
 }
-
 } // forall( T )
 #endif
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 1d245ea070764ba5b100c52d36b44e4fbce2b808)
+++ libcfa/src/concurrency/locks.hfa	(revision d30e3ebfc2de91ddf2b472e0097ef737bb4f3654)
@@ -253,5 +253,4 @@
 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
 
-
 //-----------------------------------------------------------------------------
 // Exponential backoff then block lock
@@ -272,13 +271,7 @@
 	this.lock_value = 0;
 }
-static inline void ^?{}( exp_backoff_then_block_lock & this ) {}
-// static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
-// static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
 
 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
-	if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
-		return true;
-	}
-	return false;
+	return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 }
 
@@ -286,18 +279,15 @@
 
 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
-	if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
-		return true;
-	}
-	return false;
+	return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
 }
 
 static inline bool block(exp_backoff_then_block_lock & this) with(this) {
-	lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
-	if (lock_value != 2) {
-		unlock( spinlock );
-		return true;
-	}
-	insert_last( blocked_threads, *active_thread() );
-	unlock( spinlock );
+    lock( spinlock __cfaabi_dbg_ctx2 );
+    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
+        unlock( spinlock );
+        return true;
+    }
+    insert_last( blocked_threads, *active_thread() );
+    unlock( spinlock );
 	park( );
 	return true;
@@ -307,4 +297,5 @@
 	size_t compare_val = 0;
 	int spin = 4;
+
 	// linear backoff
 	for( ;; ) {
@@ -324,8 +315,8 @@
 static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
     if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
-	lock( spinlock __cfaabi_dbg_ctx2 );
-	thread$ * t = &try_pop_front( blocked_threads );
-	unlock( spinlock );
-	unpark( t );
+    lock( spinlock __cfaabi_dbg_ctx2 );
+    thread$ * t = &try_pop_front( blocked_threads );
+    unlock( spinlock );
+    unpark( t );
 }
 
