Index: libcfa/src/collections/lockfree.hfa
===================================================================
--- libcfa/src/collections/lockfree.hfa	(revision 4f4ae60ebc9e71ae1b57987f977afa6d66313862)
+++ libcfa/src/collections/lockfree.hfa	(revision 2853d6fb02befcb65b90aa2717750f5b7e57ad95)
@@ -6,5 +6,5 @@
 #include <bits/defs.hfa>
 
-forall( T &) {
+forall( T & ) {
 	//------------------------------------------------------------
 	// Queue based on the MCS lock
@@ -200,6 +200,6 @@
 forall( T & )
 struct LinkData {
-	T * volatile top;								// pointer to stack top
-	uintptr_t count;								// count each push
+	T * volatile top;									// pointer to stack top
+	uintptr_t count;									// count each push
 };
 
@@ -215,5 +215,5 @@
 }; // Link
 
-forall( T | sized(T) | { Link(T) * ?`next( T * ); } ) {
+forall( T /*| sized(T)*/ | { Link(T) * ?`next( T * ); } ) {
 	struct StackLF {
 		Link(T) stack;
@@ -235,5 +235,5 @@
 			Link(T) t @= stack;							// atomic assignment unnecessary, or use CAA
 			for () {									// busy wait
-				if ( t.data.top == 0p ) return 0p;				// empty stack ?
+				if ( t.data.top == 0p ) return 0p;		// empty stack ?
 				Link(T) * next = ( t.data.top )`next;
 				if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 4f4ae60ebc9e71ae1b57987f977afa6d66313862)
+++ libcfa/src/concurrency/locks.hfa	(revision 2853d6fb02befcb65b90aa2717750f5b7e57ad95)
@@ -10,7 +10,7 @@
 // Author           : Colby Alexander Parsons
 // Created On       : Thu Jan 21 19:46:50 2021
-// Last Modified By :
-// Last Modified On :
-// Update Count     :
+// Last Modified By : Peter A. Buhr
+// Last Modified On : Tue Dec 24 09:36:52 2024
+// Update Count     : 16
 //
 
@@ -33,7 +33,7 @@
 
 // futex headers
-#include <linux/futex.h>      /* Definition of FUTEX_* constants */
-#include <sys/syscall.h>      /* Definition of SYS_* constants */
-#include <unistd.h>           /* Definition of syscall routine */
+#include <linux/futex.h>								// Definition of FUTEX_* constants
+#include <sys/syscall.h>								// Definition of SYS_* constants
+#include <unistd.h>										// Definition of syscall routine
 
 typedef void (*__cfa_pre_park)( void * );
@@ -43,5 +43,5 @@
 //-----------------------------------------------------------------------------
 // is_blocking_lock
-forall( L & | sized(L) )
+forall( L & /*| sized( L )*/ )
 trait is_blocking_lock {
 	// For synchronization locks to use when acquiring
@@ -63,5 +63,5 @@
 
 #define DEFAULT_ON_NOTIFY( lock_type ) \
-    static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
+    static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
 
 #define DEFAULT_ON_WAIT( lock_type ) \
@@ -74,9 +74,9 @@
 // on_wakeup impl if lock should be reacquired after waking up
 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
-    static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
+    static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
 
 // on_wakeup impl if lock will not be reacquired after waking up
 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
-    static inline void on_wakeup( lock_type & this, size_t recursion ) {}
+    static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
 
 
@@ -87,13 +87,13 @@
 	__spinlock_t lock;
 	int count;
-	__queue_t(thread$) waiting;
-};
-
-void  ?{}(semaphore & this, int count = 1);
-void ^?{}(semaphore & this);
-bool   P (semaphore & this);
-bool   V (semaphore & this);
-bool   V (semaphore & this, unsigned count);
-thread$ * V (semaphore & this, bool );
+	__queue_t( thread$) waiting;
+};
+
+void ?{}( semaphore & this, int count = 1 );
+void ^?{}( semaphore & this );
+bool P( semaphore & this );
+bool V( semaphore & this );
+bool V( semaphore & this, unsigned count );
+thread$ * V( semaphore & this, bool );
 
 //----------
@@ -102,15 +102,15 @@
 };
 
-static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
+static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
 static inline void ^?{}( single_acquisition_lock & this ) {}
-static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
-static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
-static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
-static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
-static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
-static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
-static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
-static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
-static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
+static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
+static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
+static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
+static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
+static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
+static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
+static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
+static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
+static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
 __CFA_SELECT_GET_TYPE( single_acquisition_lock );
 
@@ -120,15 +120,15 @@
 };
 
-static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
+static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
 static inline void ^?{}( owner_lock & this ) {}
-static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
-static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
-static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
-static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
-static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
-static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
-static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
-static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
-static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
+static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
+static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
+static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
+static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
+static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
+static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
+static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
+static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
+static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
 __CFA_SELECT_GET_TYPE( owner_lock );
 
@@ -147,15 +147,15 @@
 
 struct mcs_lock {
-	mcs_queue(mcs_node) queue;
+	mcs_queue( mcs_node ) queue;
 };
 
 static inline void lock( mcs_lock & l, mcs_node & n ) {
-	if(push(l.queue, &n))
-		wait(n.sem);
-}
-
-static inline void unlock(mcs_lock & l, mcs_node & n) {
-	mcs_node * next = advance(l.queue, &n);
-	if(next) post(next->sem);
+	if ( push( l.queue, &n ) )
+		wait( n.sem );
+}
+
+static inline void unlock( mcs_lock & l, mcs_node & n ) {
+	mcs_node * next = advance( l.queue, &n );
+	if ( next ) post( next->sem );
 }
 
@@ -183,33 +183,33 @@
     n.locked = true;
 
-	#if defined(__ARM_ARCH)
+	#if defined( __ARM_ARCH )
 	__asm__ __volatile__ ( "DMB ISH" ::: );
 	#endif
 
-	mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
-	if( prev == 0p ) return;
+	mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
+	if ( prev == 0p ) return;
 	prev->next = &n;
 	
-	#if defined(__ARM_ARCH)
+	#if defined( __ARM_ARCH )
 	__asm__ __volatile__ ( "DMB ISH" ::: );
 	#endif
 
-	while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
-
-	#if defined(__ARM_ARCH)
+	while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
+
+	#if defined( __ARM_ARCH )
 	__asm__ __volatile__ ( "DMB ISH" ::: );
 	#endif
 }
 
-static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
-	#if defined(__ARM_ARCH)
+static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
+	#if defined( __ARM_ARCH )
 	__asm__ __volatile__ ( "DMB ISH" ::: );
 	#endif
 
 	mcs_spin_node * n_ptr = &n;
-	if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
-	while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
-
-	#if defined(__ARM_ARCH)
+	if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
+	while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
+
+	#if defined( __ARM_ARCH )
 	__asm__ __volatile__ ( "DMB ISH" ::: );
 	#endif
@@ -233,46 +233,46 @@
 
 // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
-static inline int futex(int *uaddr, int futex_op, int val) {
-    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
-}
-
-static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
-
-static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
-	return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
-}
-
-static inline int internal_exchange( futex_mutex & this ) with(this) {
-	return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
+static inline int futex( int *uaddr, int futex_op, int val ) {
+    return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
+}
+
+static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
+
+static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
+	return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
+}
+
+static inline int internal_exchange( futex_mutex & this ) with( this ) {
+	return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
 }
 
 // if this is called recursively IT WILL DEADLOCK!!!!!
-static inline void lock( futex_mutex & this ) with(this) {
+static inline void lock( futex_mutex & this ) with( this ) {
 	int state;
 
-	for( int spin = 4; spin < 1024; spin += spin) {
+	for ( spin; 4 ~ 1024 ~ spin ) {
 		state = 0;
 		// if unlocked, lock and return
-		if (internal_try_lock(this, state)) return;
-		if (2 == state) break;
-		for (int i = 0; i < spin; i++) Pause();
+		if ( internal_try_lock( this, state ) ) return;
+		if ( state == 2 ) break;
+		for ( spin ) Pause();
 	}
 	
 	// if not in contended state, set to be in contended state
-	if (state != 2) state = internal_exchange(this);
+	if ( state != 2 ) state = internal_exchange( this );
 
 	// block and spin until we win the lock
-	while (state != 0) {
-		futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
-		state = internal_exchange(this);
-	}
-}
-
-static inline void unlock(futex_mutex & this) with(this) {
+	while ( state != 0 ) {
+		futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
+		state = internal_exchange( this );
+	}
+}
+
+static inline void unlock( futex_mutex & this ) with( this ) {
 	// if uncontended do atomic unlock and then return
-    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
+    if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
 	
 	// otherwise threads are blocked so we must wake one
-	futex((int *)&val, FUTEX_WAKE, 1);
+	futex(( int *)&val, FUTEX_WAKE, 1 );
 }
 
@@ -295,14 +295,14 @@
 	int val; 
 };
-static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
+static inline void  ?{}( go_mutex & this ) with( this ) { val = 0; }
 static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
 static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
 
-static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
-	return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
-}
-
-static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
-	return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
+static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
+	return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
+}
+
+static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
+	return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
 }
 
@@ -312,17 +312,17 @@
 
     // speculative grab
-    state = internal_exchange(this, 1);
-    if ( !state ) return; // state == 0
+    state = internal_exchange( this, 1 );
+    if ( ! state ) return;								// state == 0
     init_state = state;
-    for (;;) {
-        for( int i = 0; i < 4; i++ ) {
-            while( !val ) { // lock unlocked
+    for () {
+        for ( 4 ) {
+            while ( ! val ) {							// lock unlocked
                 state = 0;
                 if ( internal_try_lock( this, state, init_state ) ) return;
             }
-            for (int i = 0; i < 30; i++) Pause();
+            for ( 30 ) Pause();
         }
 
-        while( !val ) { // lock unlocked
+        while ( ! val ) {								// lock unlocked
             state = 0;
             if ( internal_try_lock( this, state, init_state ) ) return;
@@ -332,13 +332,13 @@
         // if not in contended state, set to be in contended state
         state = internal_exchange( this, 2 );
-        if ( !state ) return; // state == 0
+        if ( ! state ) return;							// state == 0
         init_state = 2;
-        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
+        futex( (int*)&val, FUTEX_WAIT, 2 );				// if val is not 2 this returns with EWOULDBLOCK
     }
 }
 
-static inline void unlock( go_mutex & this ) with(this) {
+static inline void unlock( go_mutex & this ) with( this ) {
 	// if uncontended do atomic unlock and then return
-    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
+    if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
 	
 	// otherwise threads are blocked so we must wake one
@@ -373,17 +373,17 @@
 static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
 
-static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
-	return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
+	return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
 }
 
 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
 
-static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
-	return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
-}
-
-static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
+static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
+	return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
+}
+
+static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
     lock( spinlock __cfaabi_dbg_ctx2 );
-    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
+    if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
         unlock( spinlock );
         return true;
@@ -395,25 +395,25 @@
 }
 
-static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
+static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
 	size_t compare_val = 0;
 	int spin = 4;
 
 	// linear backoff
-	for( ;; ) {
+	for () {
 		compare_val = 0;
-		if (internal_try_lock(this, compare_val)) return;
-		if (2 == compare_val) break;
-		for (int i = 0; i < spin; i++) Pause();
-		if (spin >= 1024) break;
+		if ( internal_try_lock( this, compare_val ) ) return;
+		if ( compare_val == 2 ) break;
+		for ( spin ) Pause();
+		if ( spin >= 1024 ) break;
 		spin += spin;
 	}
 
-	if(2 != compare_val && try_lock_contention(this)) return;
+	if ( 2 != compare_val && try_lock_contention( this ) ) return;
 	// block until signalled
-	while (block(this)) if(try_lock_contention(this)) return;
-}
-
-static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
-    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
+	while ( block( this ) ) if ( try_lock_contention( this ) ) return;
+}
+
+static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
+    if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
     lock( spinlock __cfaabi_dbg_ctx2 );
     thread$ * t = &try_pop_front( blocked_threads );
@@ -444,5 +444,5 @@
 };
 
-static inline void  ?{}( fast_block_lock & this ) with(this) {
+static inline void  ?{}( fast_block_lock & this ) with( this ) {
 	lock{};
 	blocked_threads{};
@@ -454,5 +454,5 @@
 
 // if this is called recursively IT WILL DEADLOCK!!!!!
-static inline void lock( fast_block_lock & this ) with(this) {
+static inline void lock( fast_block_lock & this ) with( this ) {
 	lock( lock __cfaabi_dbg_ctx2 );
 	if ( held ) {
@@ -466,5 +466,5 @@
 }
 
-static inline void unlock( fast_block_lock & this ) with(this) {
+static inline void unlock( fast_block_lock & this ) with( this ) {
 	lock( lock __cfaabi_dbg_ctx2 );
 	/* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
@@ -475,5 +475,5 @@
 }
 
-static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
+static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
     lock( lock __cfaabi_dbg_ctx2 );
     insert_last( blocked_threads, *t );
@@ -503,5 +503,5 @@
 };
 
-static inline void  ?{}( simple_owner_lock & this ) with(this) {
+static inline void  ?{}( simple_owner_lock & this ) with( this ) {
 	lock{};
 	blocked_threads{};
@@ -513,5 +513,5 @@
 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
 
-static inline void lock( simple_owner_lock & this ) with(this) {
+static inline void lock( simple_owner_lock & this ) with( this ) {
 	if ( owner == active_thread() ) {
 		recursion_count++;
@@ -532,5 +532,5 @@
 }
 
-static inline void pop_node( simple_owner_lock & this ) with(this) {
+static inline void pop_node( simple_owner_lock & this ) with( this ) {
     __handle_waituntil_OR( blocked_threads );
     select_node * node = &try_pop_front( blocked_threads );
@@ -538,5 +538,5 @@
         owner = node->blocked_thread;
         recursion_count = 1;
-        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
+        // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
         wake_one( blocked_threads, *node );
     } else {
@@ -546,5 +546,5 @@
 }
 
-static inline void unlock( simple_owner_lock & this ) with(this) {
+static inline void unlock( simple_owner_lock & this ) with( this ) {
 	lock( lock __cfaabi_dbg_ctx2 );
 	/* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
@@ -558,5 +558,5 @@
 }
 
-static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
+static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
 	lock( lock __cfaabi_dbg_ctx2 );
 	// lock held
@@ -573,5 +573,5 @@
 }
 
-static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
+static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
 	lock( lock __cfaabi_dbg_ctx2 );
 	/* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
@@ -591,13 +591,13 @@
 }
 
-static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
+static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
 
 // waituntil() support
-static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
+static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
     lock( lock __cfaabi_dbg_ctx2 );
 
     // check if we can complete operation. If so race to establish winner in special OR case
-    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
-        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
+    if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
+        if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
            unlock( lock );
            return false;
@@ -626,5 +626,5 @@
 }
 
-static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
+static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
     lock( lock __cfaabi_dbg_ctx2 );
     if ( node`isListed ) {
@@ -644,5 +644,5 @@
 }
 
-static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
+static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
 __CFA_SELECT_GET_TYPE( simple_owner_lock );
 
@@ -662,5 +662,5 @@
 };
 
-static inline void  ?{}( spin_queue_lock & this ) with(this) {
+static inline void  ?{}( spin_queue_lock & this ) with( this ) {
 	lock{};
 	held = false;
@@ -671,14 +671,14 @@
 
 // if this is called recursively IT WILL DEADLOCK!
-static inline void lock( spin_queue_lock & this ) with(this) {
+static inline void lock( spin_queue_lock & this ) with( this ) {
 	mcs_spin_node node;
 	lock( lock, node );
-	while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
-	__atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
+	while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
+	__atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
 	unlock( lock, node );
 }
 
-static inline void unlock( spin_queue_lock & this ) with(this) {
-	__atomic_store_n(&held, false, __ATOMIC_RELEASE);
+static inline void unlock( spin_queue_lock & this ) with( this ) {
+	__atomic_store_n( &held, false, __ATOMIC_RELEASE );
 }
 
@@ -702,5 +702,5 @@
 };
 
-static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
+static inline void  ?{}( mcs_block_spin_lock & this ) with( this ) {
 	lock{};
 	held = false;
@@ -711,14 +711,14 @@
 
 // if this is called recursively IT WILL DEADLOCK!!!!!
-static inline void lock( mcs_block_spin_lock & this ) with(this) {
+static inline void lock( mcs_block_spin_lock & this ) with( this ) {
 	mcs_node node;
 	lock( lock, node );
-	while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
-	__atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
+	while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
+	__atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
 	unlock( lock, node );
 }
 
-static inline void unlock(mcs_block_spin_lock & this) with(this) {
-	__atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
+static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
+	__atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
 }
 
@@ -742,5 +742,5 @@
 };
 
-static inline void  ?{}( block_spin_lock & this ) with(this) {
+static inline void  ?{}( block_spin_lock & this ) with( this ) {
 	lock{};
 	held = false;
@@ -751,16 +751,16 @@
 
 // if this is called recursively IT WILL DEADLOCK!!!!!
-static inline void lock( block_spin_lock & this ) with(this) {
+static inline void lock( block_spin_lock & this ) with( this ) {
 	lock( lock );
-	while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
-	__atomic_store_n(&held, true, __ATOMIC_RELEASE);
+	while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
+	__atomic_store_n( &held, true, __ATOMIC_RELEASE );
 	unlock( lock );
 }
 
-static inline void unlock( block_spin_lock & this ) with(this) {
-	__atomic_store_n(&held, false, __ATOMIC_RELEASE);
-}
-
-static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
+static inline void unlock( block_spin_lock & this ) with( this ) {
+	__atomic_store_n( &held, false, __ATOMIC_RELEASE );
+}
+
+static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
 	// first we acquire internal fast_block_lock
 	lock( lock __cfaabi_dbg_ctx2 );
@@ -774,11 +774,11 @@
 	unlock( lock );
 
-	unpark(t);
+	unpark( t );
 }
 DEFAULT_ON_WAIT( block_spin_lock )
-static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
+	static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
 	// now we acquire the entire block_spin_lock upon waking up
-	while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
-	__atomic_store_n(&held, true, __ATOMIC_RELEASE);
+	while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
+	__atomic_store_n( &held, true, __ATOMIC_RELEASE );
 	unlock( lock ); // Now we release the internal fast_spin_lock
 }
@@ -788,5 +788,5 @@
 // // the info thread is a wrapper around a thread used
 // // to store extra data for use in the condition variable
-forall(L & | is_blocking_lock(L)) {
+forall( L & | is_blocking_lock( L ) ) {
 	struct info_thread;
 }
@@ -794,5 +794,5 @@
 //-----------------------------------------------------------------------------
 // Synchronization Locks
-forall(L & | is_blocking_lock(L)) {
+forall( L & | is_blocking_lock( L ) ) {
 
 	//-----------------------------------------------------------------------------
@@ -810,5 +810,5 @@
 
 		// List of blocked threads
-		dlist( info_thread(L) ) blocked_threads;
+		dlist( info_thread( L ) ) blocked_threads;
 
 		// Count of current blocked threads
@@ -816,25 +816,24 @@
 	};
 
-
-	void  ?{}( condition_variable(L) & this );
-	void ^?{}( condition_variable(L) & this );
-
-	bool notify_one( condition_variable(L) & this );
-	bool notify_all( condition_variable(L) & this );
-
-	uintptr_t front( condition_variable(L) & this );
-
-	bool empty  ( condition_variable(L) & this );
-	int  counter( condition_variable(L) & this );
-
-	void wait( condition_variable(L) & this );
-	void wait( condition_variable(L) & this, uintptr_t info );
-	bool wait( condition_variable(L) & this, Duration duration );
-	bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
-
-	void wait( condition_variable(L) & this, L & l );
-	void wait( condition_variable(L) & this, L & l, uintptr_t info );
-	bool wait( condition_variable(L) & this, L & l, Duration duration );
-	bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
+	void ?{}( condition_variable( L ) & this );
+	void ^?{}( condition_variable( L ) & this );
+
+	bool notify_one( condition_variable( L ) & this );
+	bool notify_all( condition_variable( L ) & this );
+
+	uintptr_t front( condition_variable( L ) & this );
+
+	bool empty  ( condition_variable( L ) & this );
+	int  counter( condition_variable( L ) & this );
+
+	void wait( condition_variable( L ) & this );
+	void wait( condition_variable( L ) & this, uintptr_t info );
+	bool wait( condition_variable( L ) & this, Duration duration );
+	bool wait( condition_variable( L ) & this, uintptr_t info, Duration duration );
+
+	void wait( condition_variable( L ) & this, L & l );
+	void wait( condition_variable( L ) & this, L & l, uintptr_t info );
+	bool wait( condition_variable( L ) & this, L & l, Duration duration );
+	bool wait( condition_variable( L ) & this, L & l, uintptr_t info, Duration duration );
 
 	//-----------------------------------------------------------------------------
@@ -848,5 +847,5 @@
 	struct fast_cond_var {
 		// List of blocked threads
-		dlist( info_thread(L) ) blocked_threads;
+		dlist( info_thread( L ) ) blocked_threads;
 		#ifdef __CFA_DEBUG__
 		L * lock_used;
@@ -854,15 +853,15 @@
 	};
 
-	void  ?{}( fast_cond_var(L) & this );
-	void ^?{}( fast_cond_var(L) & this );
-
-	bool notify_one( fast_cond_var(L) & this );
-	bool notify_all( fast_cond_var(L) & this );
-
-	uintptr_t front( fast_cond_var(L) & this );
-	bool empty  ( fast_cond_var(L) & this );
-
-	void wait( fast_cond_var(L) & this, L & l );
-	void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
+	void ?{}( fast_cond_var( L ) & this );
+	void ^?{}( fast_cond_var( L ) & this );
+
+	bool notify_one( fast_cond_var( L ) & this );
+	bool notify_all( fast_cond_var( L ) & this );
+
+	uintptr_t front( fast_cond_var( L ) & this );
+	bool empty  ( fast_cond_var( L ) & this );
+
+	void wait( fast_cond_var( L ) & this, L & l );
+	void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
 
 
@@ -874,20 +873,20 @@
 
 	struct pthread_cond_var {
-		dlist( info_thread(L) ) blocked_threads;
+		dlist( info_thread( L ) ) blocked_threads;
 		__spinlock_t lock;
 	};
 
-	void  ?{}( pthread_cond_var(L) & this );
-	void ^?{}( pthread_cond_var(L) & this );
-
-	bool notify_one( pthread_cond_var(L) & this );
-	bool notify_all( pthread_cond_var(L) & this );
-
-	uintptr_t front( pthread_cond_var(L) & this );
-	bool empty ( pthread_cond_var(L) & this );
-
-	void wait( pthread_cond_var(L) & this, L & l );
-	void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
-	bool wait( pthread_cond_var(L) & this, L & l, timespec t );
-	bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
-}
+	void  ?{}( pthread_cond_var( L ) & this );
+	void ^?{}( pthread_cond_var( L ) & this );
+
+	bool notify_one( pthread_cond_var( L ) & this );
+	bool notify_all( pthread_cond_var( L ) & this );
+
+	uintptr_t front( pthread_cond_var( L ) & this );
+	bool empty ( pthread_cond_var( L ) & this );
+
+	void wait( pthread_cond_var( L ) & this, L & l );
+	void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
+	bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
+	bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
+}
Index: libcfa/src/concurrency/select.hfa
===================================================================
--- libcfa/src/concurrency/select.hfa	(revision 4f4ae60ebc9e71ae1b57987f977afa6d66313862)
+++ libcfa/src/concurrency/select.hfa	(revision 2853d6fb02befcb65b90aa2717750f5b7e57ad95)
@@ -184,5 +184,5 @@
 
 // wake one thread from the list
-static inline void wake_one( dlist( select_node ) & queue, select_node & popped ) {
+static inline void wake_one( dlist( select_node ) & /*queue*/, select_node & popped ) {
     if ( !popped.clause_status                              // normal case, node is not a select node
         || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
