Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision e07187dac365a5bfe1b92d9321504237bf7f714f)
+++ libcfa/src/concurrency/invoke.h	(revision f835806612595a6682261400a9575e2a7fa995c5)
@@ -195,10 +195,4 @@
 		struct __monitor_group_t monitors;
 
-		// used to put threads on user data structures
-		struct {
-			struct thread$ * next;
-			struct thread$ * back;
-		} seqable;
-
 		// used to put threads on dlist data structure
 		__cfa_dlink(thread$);
@@ -208,4 +202,10 @@
 			struct thread$ * prev;
 		} node;
+
+		// used to store state between clh lock/unlock
+		volatile bool * clh_prev;
+
+		// used to point to this thd's current clh node
+		volatile bool * clh_node;
 
 		struct processor * last_proc;
@@ -240,20 +240,4 @@
 		}
 
-		static inline thread$ * volatile & ?`next ( thread$ * this )  __attribute__((const)) {
-			return this->seqable.next;
-		}
-
-		static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
-			return this->seqable.back;
-		}
-
-		static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
-				return this->seqable.next;
-		}
-
-		static inline bool listed( thread$ * this ) {
-			return this->seqable.next != 0p;
-		}
-
 		static inline void ?{}(__monitor_group_t & this) {
 			(this.data){0p};
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision e07187dac365a5bfe1b92d9321504237bf7f714f)
+++ libcfa/src/concurrency/locks.hfa	(revision f835806612595a6682261400a9575e2a7fa995c5)
@@ -101,4 +101,70 @@
 
 //-----------------------------------------------------------------------------
+// MCS Spin Lock
+// - No recursive acquisition
+// - Needs to be released by owner
+
+struct mcs_spin_node {
+	mcs_spin_node * volatile next;
+	bool locked:1;
+};
+
+struct mcs_spin_queue {
+	mcs_spin_node * volatile tail;
+};
+
+static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
+
+static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
+	return node->next;
+}
+
+struct mcs_spin_lock {
+	mcs_spin_queue queue;
+};
+
+static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
+	mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
+	if(prev != 0p) {
+		prev->next = &n;
+		while(n.locked) Pause();
+	}
+}
+
+static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
+	mcs_spin_node * n_ptr = &n;
+	if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
+		while (n.next == 0p) {}
+		n.next->locked = false;
+	}
+}
+
+//-----------------------------------------------------------------------------
+// CLH Spinlock
+// - No recursive acquisition
+// - Needs to be released by owner
+
+struct clh_lock {
+	volatile bool * volatile tail;
+};
+
+static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
+static inline void ^?{}( clh_lock & this ) { free(this.tail); }
+
+static inline void lock(clh_lock & l) {
+	thread$ * curr_thd = active_thread();
+	*(curr_thd->clh_node) = false;
+	volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
+	while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
+	curr_thd->clh_prev = prev;
+}
+
+static inline void unlock(clh_lock & l) {
+	thread$ * curr_thd = active_thread();
+	__atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
+	curr_thd->clh_node = curr_thd->clh_prev;
+}
+
+//-----------------------------------------------------------------------------
 // Linear backoff Spinlock
 struct linear_backoff_then_block_lock {
@@ -205,16 +271,22 @@
 // Fast Block Lock
 
-// High efficiency minimal blocking lock
+// minimal blocking lock
 // - No reacquire for cond var
 // - No recursive acquisition
 // - No ownership
 struct fast_block_lock {
+	// List of blocked threads
+	dlist( thread$ ) blocked_threads;
+
 	// Spin lock used for mutual exclusion
 	__spinlock_t lock;
 
-	// List of blocked threads
-	dlist( thread$ ) blocked_threads;
-
+	// flag showing if lock is held
 	bool held:1;
+
+	#ifdef __CFA_DEBUG__
+	// for deadlock detection
+	struct thread$ * owner;
+	#endif
 };
 
@@ -231,4 +303,8 @@
 static inline void lock(fast_block_lock & this) with(this) {
 	lock( lock __cfaabi_dbg_ctx2 );
+
+	#ifdef __CFA_DEBUG__
+	assert(!(held && owner == active_thread()));
+	#endif
 	if (held) {
 		insert_last( blocked_threads, *active_thread() );
@@ -238,4 +314,7 @@
 	}
 	held = true;
+	#ifdef __CFA_DEBUG__
+	owner = active_thread();
+	#endif
 	unlock( lock );
 }
@@ -246,4 +325,7 @@
 	thread$ * t = &try_pop_front( blocked_threads );
 	held = ( t ? true : false );
+	#ifdef __CFA_DEBUG__
+	owner = ( t ? t : 0p );
+	#endif
 	unpark( t );
 	unlock( lock );
@@ -253,4 +335,264 @@
 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
+
+//-----------------------------------------------------------------------------
+// simple_owner_lock
+
+// pthread owner lock
+// - reacquire for cond var
+// - recursive acquisition
+// - ownership
+struct simple_owner_lock {
+	// List of blocked threads
+	dlist( thread$ ) blocked_threads;
+
+	// Spin lock used for mutual exclusion
+	__spinlock_t lock;
+
+	// owner showing if lock is held
+	struct thread$ * owner;
+
+	size_t recursion_count;
+};
+
+static inline void  ?{}( simple_owner_lock & this ) with(this) {
+	lock{};
+	blocked_threads{};
+	owner = 0p;
+	recursion_count = 0;
+}
+static inline void ^?{}( simple_owner_lock & this ) {}
+static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
+static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
+
+static inline void lock(simple_owner_lock & this) with(this) {
+	if (owner == active_thread()) {
+		recursion_count++;
+		return;
+	}
+	lock( lock __cfaabi_dbg_ctx2 );
+
+	if (owner != 0p) {
+		insert_last( blocked_threads, *active_thread() );
+		unlock( lock );
+		park( );
+		return;
+	}
+	owner = active_thread();
+	recursion_count = 1;
+	unlock( lock );
+}
+
+void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
+	thread$ * t = &try_pop_front( blocked_threads );
+	owner = t;
+	recursion_count = ( t ? 1 : 0 );
+	unpark( t );
+}
+
+static inline void unlock(simple_owner_lock & this) with(this) {
+	lock( lock __cfaabi_dbg_ctx2 );
+	/* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
+	/* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
+	// if recursion count is zero release lock and set new owner if one is waiting
+	recursion_count--;
+	if ( recursion_count == 0 ) {
+		pop_and_set_new_owner( this );
+	}
+	unlock( lock );
+}
+
+static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
+	lock( lock __cfaabi_dbg_ctx2 );
+	// lock held
+	if ( owner != 0p ) {
+		insert_last( blocked_threads, *t );
+		unlock( lock );
+	}
+	// lock not held
+	else {
+		owner = t;
+		recursion_count = 1;
+		unpark( t );
+		unlock( lock );
+	}
+}
+
+static inline size_t on_wait(simple_owner_lock & this) with(this) { 
+	lock( lock __cfaabi_dbg_ctx2 );
+	/* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
+	/* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
+
+	size_t ret = recursion_count;
+
+	pop_and_set_new_owner( this );
+
+	unlock( lock );
+	return ret;
+}
+
+static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
+
+//-----------------------------------------------------------------------------
+// Spin Queue Lock
+
+// - No reacquire for cond var
+// - No recursive acquisition
+// - No ownership
+// - spin lock with no locking/atomics in unlock
+struct spin_queue_lock {
+	// Spin lock used for mutual exclusion
+	mcs_spin_lock lock;
+
+	// flag showing if lock is held
+	bool held:1;
+
+	#ifdef __CFA_DEBUG__
+	// for deadlock detection
+	struct thread$ * owner;
+	#endif
+};
+
+static inline void  ?{}( spin_queue_lock & this ) with(this) {
+	lock{};
+	held = false;
+}
+static inline void ^?{}( spin_queue_lock & this ) {}
+static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
+static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
+
+// if this is called recursively IT WILL DEADLOCK!!!!!
+static inline void lock(spin_queue_lock & this) with(this) {
+	mcs_spin_node node;
+	#ifdef __CFA_DEBUG__
+	assert(!(held && owner == active_thread()));
+	#endif
+	lock( lock, node );
+	while(held) Pause();
+	held = true;
+	unlock( lock, node );
+	#ifdef __CFA_DEBUG__
+	owner = active_thread();
+	#endif
+}
+
+static inline void unlock(spin_queue_lock & this) with(this) {
+	#ifdef __CFA_DEBUG__
+	owner = 0p;
+	#endif
+	held = false;
+}
+
+static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
+static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
+static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
+
+
+//-----------------------------------------------------------------------------
+// MCS Block Spin Lock
+
+// - No reacquire for cond var
+// - No recursive acquisition
+// - No ownership
+// - Blocks but first node spins (like spin queue but blocking for not first thd)
+struct mcs_block_spin_lock {
+	// Spin lock used for mutual exclusion
+	mcs_lock lock;
+
+	// flag showing if lock is held
+	bool held:1;
+
+	#ifdef __CFA_DEBUG__
+	// for deadlock detection
+	struct thread$ * owner;
+	#endif
+};
+
+static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
+	lock{};
+	held = false;
+}
+static inline void ^?{}( mcs_block_spin_lock & this ) {}
+static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
+static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
+
+// if this is called recursively IT WILL DEADLOCK!!!!!
+static inline void lock(mcs_block_spin_lock & this) with(this) {
+	mcs_node node;
+	#ifdef __CFA_DEBUG__
+	assert(!(held && owner == active_thread()));
+	#endif
+	lock( lock, node );
+	while(held) Pause();
+	held = true;
+	unlock( lock, node );
+	#ifdef __CFA_DEBUG__
+	owner = active_thread();
+	#endif
+}
+
+static inline void unlock(mcs_block_spin_lock & this) with(this) {
+	#ifdef __CFA_DEBUG__
+	owner = 0p;
+	#endif
+	held = false;
+}
+
+static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
+static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
+static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
+
+//-----------------------------------------------------------------------------
+// Block Spin Lock
+
+// - No reacquire for cond var
+// - No recursive acquisition
+// - No ownership
+// - Blocks but first node spins (like spin queue but blocking for not first thd)
+struct block_spin_lock {
+	// Spin lock used for mutual exclusion
+	fast_block_lock lock;
+
+	// flag showing if lock is held
+	bool held:1;
+
+	#ifdef __CFA_DEBUG__
+	// for deadlock detection
+	struct thread$ * owner;
+	#endif
+};
+
+static inline void  ?{}( block_spin_lock & this ) with(this) {
+	lock{};
+	held = false;
+}
+static inline void ^?{}( block_spin_lock & this ) {}
+static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
+static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
+
+// if this is called recursively IT WILL DEADLOCK!!!!!
+static inline void lock(block_spin_lock & this) with(this) {
+	#ifdef __CFA_DEBUG__
+	assert(!(held && owner == active_thread()));
+	#endif
+	lock( lock );
+	while(held) Pause();
+	held = true;
+	unlock( lock );
+	#ifdef __CFA_DEBUG__
+	owner = active_thread();
+	#endif
+}
+
+static inline void unlock(block_spin_lock & this) with(this) {
+	#ifdef __CFA_DEBUG__
+	owner = 0p;
+	#endif
+	held = false;
+}
+
+static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
+static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
+static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
 
 //-----------------------------------------------------------------------------
Index: libcfa/src/concurrency/thread.cfa
===================================================================
--- libcfa/src/concurrency/thread.cfa	(revision e07187dac365a5bfe1b92d9321504237bf7f714f)
+++ libcfa/src/concurrency/thread.cfa	(revision f835806612595a6682261400a9575e2a7fa995c5)
@@ -50,11 +50,10 @@
 	#endif
 
-	seqable.next = 0p;
-	seqable.back = 0p;
-
 	node.next = 0p;
 	node.prev = 0p;
+
+	clh_node = new( false );
+
 	doregister(curr_cluster, this);
-
 	monitors{ &self_mon_p, 1, (fptr_t)0 };
 }
@@ -64,4 +63,5 @@
 		canary = 0xDEADDEADDEADDEADp;
 	#endif
+	delete(clh_node);
 	unregister(curr_cluster, this);
 	^self_cor{};
