Index: libcfa/src/bits/collection.hfa
===================================================================
--- libcfa/src/bits/collection.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/bits/collection.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -26,11 +26,11 @@
 	// PUBLIC
 
-	void ?{}( Colable & co ) with( co ) {
-		next = 0p;
+	void ?{}( Colable & co ) {
+		co.next = 0p;
 	} // post: ! listed()
 
 	// return true iff *this is an element of a collection
-	bool listed( Colable & co ) with( co ) {			// pre: this != 0
-		return next != 0p;
+	bool listed( Colable & co ) {						// pre: this != 0
+		return co.next != 0p;
 	}
 
Index: libcfa/src/bits/queue.hfa
===================================================================
--- libcfa/src/bits/queue.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/bits/queue.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -24,11 +24,11 @@
 		Queue(T) & ?=?( const Queue(T) & ) = void;		// no assignment
 
-		void ?{}( Queue(T) & q ) with( q ) {
+		void ?{}( Queue(T) & q ) {
 			((Collection &)q){};
-			last = 0p;
+			q.last = 0p;
 		} // post: empty()
 
-		T & tail( Queue(T) & q ) with( q ) {
-			return *last;
+		T & tail( Queue(T) & q ) {
+			return *q.last;
 		}
 
@@ -46,5 +46,5 @@
 			if ( listed( &n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q, &n );
 			#endif // __CFA_DEBUG__
-			if ( last ) {
+			if ( q.last ) {
 				Next( &n ) = &head( q );
 				q.root = &n;
@@ -60,5 +60,5 @@
 			if ( listed( &n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q, &n );
 			#endif // __CFA_DEBUG__
-			if ( last ) Next( last ) = &n;
+			if ( q.last ) Next( last ) = &n;
 			else root = &n;
 			last = &n;
Index: libcfa/src/collections/list.hfa
===================================================================
--- libcfa/src/collections/list.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/collections/list.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Wed Apr 22 18:00:00 2020
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Sun Apr 20 19:04:50 2025
-// Update Count     : 51
+// Last Modified On : Thu Apr 24 18:12:59 2025
+// Update Count     : 72
 //
 
@@ -72,6 +72,6 @@
 
 // The origin is the position encountered at the start of iteration, signifying, "need to advance to the first element,"
-// and at the end of iteration, signifying, "no more elements."  Normal comsumption of an iterator runs ?`moveNext as
-// the first step, and uses the return of ?`moveNext as a guard, before dereferencing the iterator.  So normal
+// and at the end of iteration, signifying, "no more elements."  Normal comsumption of an iterator runs "advance" as
+// the first step, and uses the return of "advance" as a guard, before dereferencing the iterator.  So normal
 // consumption of an iterator does not dereference an iterator in origin position.  The value of a pointer (underlying a
 // refence) that is exposed publicly as an iteraor, and also a pointer stored internally in a link field, is tagged, to
@@ -128,4 +128,28 @@
 
 static inline forall( tE &, tLinks & | embedded( tE, tLinks, dlink( tE ) ) ) {
+	bool isListed( tE & node ) {
+		verify( &node != 0p );
+		dlink( tE ) & node_links = node`inner;
+		return (node_links.prev != 0p) || (node_links.next != 0p);
+	}
+
+	bool isEmpty( dlist( tE, tLinks ) & list ) {
+		tE * firstPtr = list.next;
+		if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p;
+		return firstPtr == 0p;
+	}
+
+	tE & first( dlist( tE, tLinks ) & list ) {
+		tE * firstPtr = list.next;
+		if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p;
+		return *firstPtr;
+	}
+
+	tE & last( dlist( tE, tLinks ) & list ) {
+		tE * lastPtr = list.prev;
+		if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p;
+		return *lastPtr;
+	}
+
 	tE & insert_before( tE & before, tE & node ) {
 		verify( &before != 0p );
@@ -194,37 +218,17 @@
 	}
 
-	tE & ?`first( dlist( tE, tLinks ) & list ) {
-		tE * firstPtr = list.next;
-		if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p;
-		return *firstPtr;
-	}
-
-	tE & ?`last( dlist( tE, tLinks ) & list ) {
-		tE * lastPtr = list.prev;
-		if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p;
-		return *lastPtr;
-	}
-
-	bool ?`isEmpty( dlist( tE, tLinks ) & list ) {
-		tE * firstPtr = list.next;
-		if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p;
-		return firstPtr == 0p;
-	}
-
-	bool ?`isListed( tE & node ) {
-		verify( &node != 0p );
-		dlink( tE ) & node_links = node`inner;
-		return (node_links.prev != 0p) || (node_links.next != 0p);
-	}
-
-	tE & ?`elems( dlist( tE, tLinks ) & list ) {
+	tE & iter( dlist( tE, tLinks ) & list ) {
 		tE * origin = $get_list_origin_addr( list );
 		return *origin;
 	}
-	tE & ?`head( dlist( tE, tLinks ) & list ) {
-		return list`elems;
-	}
-
-	bool ?`moveNext( tE && refx ) {
+
+	bool recede( tE && refx ) {
+		tE && ref_inner = refx;
+		tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
+		&ref_inner = oldReferent`inner.prev;
+		return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
+	}
+
+	bool advance( tE && refx ) {
 		tE && ref_inner = refx;
 		tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
@@ -232,46 +236,33 @@
 		return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
 	}
-	bool ?`next( tE && refx ) {							// alternate name
-		return refx`moveNext;
-	}
-
-	bool ?`movePrev( tE && refx ) {
-		tE && ref_inner = refx;
-		tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
-		&ref_inner = oldReferent`inner.prev;
-		return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
-	}
-	bool ?`prev( tE && refx ) {							// alternate name
-		return refx`movePrev;
-	}
-
-	bool ?`hasNext( tE & node ) {
-		return node`moveNext;
-	}
-
-	bool ?`hasPrev( tE & node ) {
-		return node`movePrev;
-	}
-
-	tE & ?`next( tE & node ) {
-		if ( node`moveNext ) return node;
+
+    bool isFirst( tE & node ) {
+        return recede( node );
+    }
+
+    bool isLast( tE & node ) {
+        return advance( node );
+    }
+
+	tE & prev( tE & node ) {
+		if ( recede( node ) ) return node;
 		return *0p;
 	}
 
-	tE & ?`prev( tE & node ) {
-		if ( node`movePrev ) return node;
+	tE & next( tE & node ) {
+		if ( advance( node ) ) return node;
 		return *0p;
 	}
 
 	tE & insert_first( dlist( tE, tLinks ) & list, tE & node ) {
-		insert_after( list`elems, node );
+		insert_after( iter( list ), node );
 		return node;
 	}
 
 	tE & insert_last( dlist( tE, tLinks ) & list, tE & node ) {
-		insert_before( list`elems, node );
-		return node;
-	}
-	tE &  insert( dlist( tE, tLinks ) & list, tE & node ) {	// alternate name
+		insert_before( iter( list ), node );
+		return node;
+	}
+	tE & insert( dlist( tE, tLinks ) & list, tE & node ) { // synonym for insert_last
 		insert_last( list, node );
 		return node;
@@ -279,9 +270,13 @@
 
 	tE & remove_first( dlist( tE, tLinks ) & list ) {
-		return remove( list`first );
+		tE & first_node = first( list );
+		if ( &first_node ) return remove( first_node );
+		return first_node;
 	}
 
 	tE & remove_last( dlist( tE, tLinks ) & list ) {
-		return remove( list`last );
+		tE & last_node = last( list );
+		if ( &last_node ) return remove( last_node );
+		return last_node;
 	}
 
@@ -322,47 +317,32 @@
 //	}
 
-	tE & try_pop_front( dlist( tE, tLinks ) & list ) {
-		tE & first_inlist = list`first;
-		tE & first_item = first_inlist;
-		if ( &first_item ) remove( first_inlist );
-		return first_item;
-	}
-
-	tE & try_pop_back( dlist( tE, tLinks ) & list ) {
-		tE & last_inlist = list`last;
-		tE & last_item = last_inlist;
-		if ( &last_item ) remove( last_inlist );
-		return last_item;
-	}
-
-
 	#if ! defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__))
 	bool $validate_fwd( dlist( tE, tLinks ) & this ) {
-		if ( ! & this`first ) return &this`last == 0p;
+		if ( ! & first( this ) ) return &last( this ) == 0p;
 
 		tE & lagElem = *0p;
-		while ( tE & it = this`elems; it`moveNext ) {
-			if ( & lagElem == 0p &&  &it != & this`first ) return false;
+		while ( tE & it = iter( this ); advance( it ) ) {
+			if ( & lagElem == 0p &&  &it != & first( this ) ) return false;
 			&lagElem = &it;
 		}
 
-		if ( &lagElem != &this`last ) return false;
-
-		// TODO: verify that it is back at this`elems;
+		if ( &lagElem != &last( this ) ) return false;
+
+		// TODO: verify that it is back at iter( this );
 		return true;
 	}
 
 	bool $validate_rev( dlist( tE, tLinks ) & this ) {
-		if ( ! & this`last ) return &this`first == 0p;
+		if ( ! & last( this ) ) return &first( this ) == 0p;
 
 		tE & lagElem = *0p;
-		while ( tE & it = this`elems; it`movePrev ) {
-			if ( &lagElem == 0p && &it != & this`last ) return false;
+		while ( tE & it = iter( this ); recede( it ) ) {
+			if ( &lagElem == 0p && &it != & last( this ) ) return false;
 			&lagElem = &it;
 		}
 
-		if ( &lagElem != &this`first ) return false;
-
-		// TODO: verify that it is back at this`elems;
+		if ( &lagElem != &first( this ) ) return false;
+
+		// TODO: verify that it is back at iter( this );
 		return true;
 	}
@@ -375,6 +355,6 @@
 
 // TEMPORARY, until foreach statement created.
-#define FOREACH( list, index ) for ( typeof((list)`head) & (index) = (list)`head; (index)`next; )
-#define FOREACH_REV( list, index ) for ( typeof((list)`head) & (index) = (list)`head; (index)`prev; )
-#define FOREACH_COND( list, index, expr ) for ( typeof((list)`head) & (index) = (list)`head; (index)`next && !(expr); )
-#define FOREACH_REV_COND( list, index, expr ) for ( typeof((list)`head) & (index) = (list)`head; (index)`prev && !(expr); )
+#define FOREACH( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ); )
+#define FOREACH_REV( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ); )
+#define FOREACH_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ) && !(expr); )
+#define FOREACH_REV_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ) && !(expr); )
Index: libcfa/src/collections/lockfree.hfa
===================================================================
--- libcfa/src/collections/lockfree.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/collections/lockfree.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -16,20 +16,19 @@
 	};
 
-	static inline void ?{}(mcs_queue(T) & this) { this.tail = 0p; }
-	static inline bool empty(const mcs_queue(T) & this) { return !this.tail; }
-
- 	static inline forall(| { T * volatile & ?`next ( T * ); })
-	{
+	static inline void ?{}( mcs_queue(T) & this ) { this.tail = 0p; }
+	static inline bool empty( const mcs_queue(T) & this ) { return ! this.tail; }
+
+ 	static inline forall( | { T * volatile & next ( T * ); }) {
 		// Adds an element to the list
 		// Multi-Thread Safe, Lock-Free
-		T * push(mcs_queue(T) & this, T * elem) __attribute__((artificial));
-		T * push(mcs_queue(T) & this, T * elem) {
-			/* paranoid */ verify(!(elem`next));
+		T * push( mcs_queue(T) & this, T * elem ) __attribute__((artificial));
+		T * push( mcs_queue(T) & this, T * elem ) {
+			/* paranoid */ verify( ! next( elem ) );
 			// Race to add to the tail
-			T * prev = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST);
+			T * prev_val = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST);
 			// If we aren't the first, we need to tell the person before us
 			// No need to
-			if (prev) prev`next = elem;
-			return prev;
+			if ( prev_val ) next( prev_val ) = elem;
+			return prev_val;
 		}
 
@@ -37,6 +36,6 @@
 		// Passing an element that is not the head is undefined behavior
 		// NOT Multi-Thread Safe, concurrent pushes are safe
-		T * advance(mcs_queue(T) & this, T * elem) __attribute__((artificial));
-		T * advance(mcs_queue(T) & this, T * elem) {
+		T * advance( mcs_queue(T) & this, T * elem ) __attribute__((artificial));
+		T * advance( mcs_queue(T) & this, T * elem ) {
 			T * expected = elem;
 			// Check if this is already the last item
@@ -44,11 +43,11 @@
 
 			// If not wait for next item to show-up, filled by push
-			while (!(elem`next)) Pause();
+			while ( ! next( elem ) ) Pause();
 
 			// we need to return if the next link was empty
-			T * ret = elem`next;
+			T * ret = next( elem );
 
 			// invalidate link to reset to initial state
-			elem`next = 0p;
+			next( elem ) = 0p;
 			return ret;
 		}
@@ -65,38 +64,37 @@
 	};
 
-	static inline void ?{}(mpsc_queue(T) & this) {
+	static inline void ?{}( mpsc_queue(T) & this ) {
 		((mcs_queue(T)&)this){};
 		this.head = 0p;
 	}
 
-	static inline forall(| { T * volatile & ?`next ( T * ); })
-	{
+	static inline forall( | { T * volatile & next ( T * ); }) {
 		// Added a new element to the queue
 		// Multi-Thread Safe, Lock-Free
-		T * push(mpsc_queue(T) & this, T * elem) __attribute__((artificial));
-		T * push(mpsc_queue(T) & this, T * elem) {
-			T * prev = push((mcs_queue(T)&)this, elem);
-			if (!prev) this.head = elem;
-			return prev;
+		T * push( mpsc_queue(T) & this, T * elem ) __attribute__((artificial));
+		T * push( mpsc_queue(T) & this, T * elem ) {
+			T * prev_val = push( (mcs_queue(T)&)this, elem );
+			if ( ! prev_val ) this.head = elem;
+			return prev_val;
 		}
 
 		// Pop an element from the queue
 		// return the element that was removed
-		// next is set to the new head of the queue
+		// head is set to the new head of the queue
 		// NOT Multi-Thread Safe
-		T * pop(mpsc_queue(T) & this, T *& next) __attribute__((artificial));
-		T * pop(mpsc_queue(T) & this, T *& next) {
+		T * pop( mpsc_queue(T) & this, T *& head ) __attribute__((artificial));
+		T * pop( mpsc_queue(T) & this, T *& head ) {
 			T * elem = this.head;
 			// If head is empty just return
-			if (!elem) return 0p;
+			if ( ! elem ) return 0p;
 
 			// If there is already someone in the list, then it's easy
-			if (elem`next) {
-				this.head = next = elem`next;
+			if ( next( elem ) ) {
+				this.head = head = next( elem );
 				// force memory sync
 				__atomic_thread_fence(__ATOMIC_SEQ_CST);
 
 				// invalidate link to reset to initial state
-				elem`next = 0p;
+				next( elem ) = 0p;
 			}
 			// Otherwise, there might be a race where it only looks but someone is enqueuing
@@ -106,12 +104,11 @@
 				// after that point, it could overwrite the write in push
 				this.head = 0p;
-				next = advance((mcs_queue(T)&)this, elem);
+				head = advance( (mcs_queue(T)&)this, elem );
 
 				// Only write to the head if there is a next element
 				// it is the only way we can guarantee we are not overwriting
 				// a write made in push
-				if (next) this.head = next;
-			}
-
+				if ( head ) this.head = head;
+			}
 			// return removed element
 			return elem;
@@ -119,5 +116,5 @@
 
 		// Same as previous function
-		T * pop(mpsc_queue(T) & this) {
+		T * pop( mpsc_queue(T) & this ) {
 			T * _ = 0p;
 			return pop(this, _);
@@ -144,12 +141,12 @@
 	static inline bool is_poisoned( const poison_list(T) & this ) { return 1p == this.head; }
 
- 	static inline forall(| { T * volatile & ?`next ( T * ); })
+ 	static inline forall( | { T * volatile & next( T * ); })
 	{
 		// Adds an element to the list
 		// Multi-Thread Safe, Lock-Free
-		bool push(poison_list(T) & this, T * elem) __attribute__((artificial));
-		bool push(poison_list(T) & this, T * elem) {
-			/* paranoid */ verify(0p == (elem`next));
-			__atomic_store_n( &elem`next, (T*)1p, __ATOMIC_RELAXED );
+		bool push( poison_list(T) & this, T * elem ) __attribute__((artificial));
+		bool push( poison_list(T) & this, T * elem ) {
+			/* paranoid */ verify( 0p == next( elem ) );
+			__atomic_store_n( &next( elem ), (T *)1p, __ATOMIC_RELAXED );
 
 			// read the head up-front
@@ -164,10 +161,10 @@
 
 					// We should never succeed the CAS if it's poisonned and the elem should be 1p.
-					/* paranoid */ verify( expected  != 1p );
-					/* paranoid */ verify( elem`next == 1p );
+					/* paranoid */ verify( expected != 1p );
+					/* paranoid */ verify( next( elem ) == 1p );
 
 					// If we aren't the first, we need to tell the person before us
 					// No need to
-					elem`next = expected;
+					next( elem ) = expected;
 					return true;
 				}
@@ -178,10 +175,10 @@
 		// Passing an element that is not the head is undefined behavior
 		// NOT Multi-Thread Safe, concurrent pushes are safe
-		T * advance(T * elem) __attribute__((artificial));
-		T * advance(T * elem) {
+		T * advance( T * elem ) __attribute__((artificial));
+		T * advance( T * elem ) {
 			T * ret;
 
 			// Wait for next item to show-up, filled by push
-			while (1p == (ret = __atomic_load_n(&elem`next, __ATOMIC_RELAXED))) Pause();
+			while (1p == (ret = __atomic_load_n( &next( elem ), __ATOMIC_RELAXED ) ) ) Pause();
 
 			return ret;
@@ -189,6 +186,6 @@
 
 		// Poison the queue, preveting new pushes and returning the head
-		T * poison(poison_list(T) & this) __attribute__((artificial));
-		T * poison(poison_list(T) & this) {
+		T * poison( poison_list(T) & this ) __attribute__((artificial));
+		T * poison( poison_list(T) & this ) {
 			T * ret = __atomic_exchange_n( &this.head, (T*)1p, __ATOMIC_SEQ_CST );
 			/* paranoid */ verifyf( ret != (T*)1p, "Poison list %p poisoned more than once!", &this );
@@ -215,5 +212,5 @@
 }; // Link
 
-forall( T /*| sized(T)*/ | { Link(T) * ?`next( T * ); } ) {
+forall( T /*| sized(T)*/ | { Link(T) * next( T * ); } ) {
 	struct StackLF {
 		Link(T) stack;
@@ -226,7 +223,7 @@
 
 		void push( StackLF(T) & this, T & n ) with(this) {
-			*( &n )`next = stack;						// atomic assignment unnecessary, or use CAA
+			*next( &n ) = stack;						// atomic assignment unnecessary, or use CAA
 			for () {									// busy wait
-				if ( __atomic_compare_exchange_n( &stack.atom, &( &n )`next->atom, (Link(T))@{ (LinkData(T))@{ &n, ( &n )`next->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node
+				if ( __atomic_compare_exchange_n( &stack.atom, &next( &n )->atom, (Link(T))@{ (LinkData(T))@{ &n, next( &n )->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node
 			} // for
 		} // push
@@ -236,5 +233,5 @@
 			for () {									// busy wait
 				if ( t.data.top == 0p ) return 0p;		// empty stack ?
-				Link(T) * next = ( t.data.top )`next;
+				Link(T) * next = next( t.data.top );
 				if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node
 			} // for
@@ -246,11 +243,11 @@
 				// TODO: Avoiding some problems with double fields access.
 				LinkData(T) * data = &link->data;
-				T * next = (T *)&(*data).top;
-				if ( next == node ) {
-					data->top = ( node )`next->data.top;
+				T * ntop = (T *)&(*data).top;
+				if ( ntop == node ) {
+					data->top = next( node )->data.top;
 					return true;
 				}
-				if ( next == 0p ) return false;
-				link = ( next )`next;
+				if ( ntop == 0p ) return false;
+				link = next( ntop );
 			}
 		}
Index: libcfa/src/collections/vector2.hfa
===================================================================
--- libcfa/src/collections/vector2.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/collections/vector2.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Thu Jun 23 22:00:00 2021
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Tue Mar 14 08:40:53 2023
-// Update Count     : 2
+// Last Modified On : Wed Apr 23 14:39:51 2025
+// Update Count     : 6
 //
 
@@ -254,5 +254,5 @@
         }
 
-        while ( vector_permit(T) & liveIter = this.live_iters_$`elems; liveIter`moveNext ) {
+        while ( vector_permit(T) & liveIter = iter( this.live_iters_$ ); advance( liveIter ) ) {
             liveIter.item_$ += (newItems - this.buffer_first_$);
         }
@@ -350,5 +350,5 @@
         *insertTarget = val;
 
-        while ( vector_permit(T) & liveIter = col.live_iters_$`elems; liveIter`moveNext ) {
+        while ( vector_permit(T) & liveIter = iter( col.live_iters_$ ); advance( liveIter ) ) {
             if ( inRange_$(liveIter.item_$, insertTarget, col.elems_end_$) ) {
                 liveIter.item_$ += 1;
Index: libcfa/src/concurrency/alarm.cfa
===================================================================
--- libcfa/src/concurrency/alarm.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/alarm.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Fri Jun 2 11:31:25 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Wed Jun 17 16:11:35 2020
-// Update Count     : 75
+// Last Modified On : Thu Apr 24 22:22:25 2025
+// Update Count     : 88
 //
 
@@ -84,14 +84,7 @@
 
 void insert( alarm_list_t * this, alarm_node_t * n ) {
-	alarm_node_t * it = & (*this)`first;
-	while( it && (n->deadline > it->deadline) ) {
-		it = & (*it)`next;
-	}
-	if ( it ) {
-		insert_before( *it, *n );
-	} else {
-		insert_last(*this, *n);
-	}
-
+	alarm_node_t & it = iter( *this );
+	while ( advance( it ) && it.deadline <= n->deadline );
+	insert_before( it, *n );
 	verify( validate( *this ) );
 }
@@ -99,5 +92,5 @@
 alarm_node_t * pop( alarm_list_t * this ) {
 	verify( validate( *this ) );
-	alarm_node_t * head = & (*this)`first;
+	alarm_node_t * head = &first( *this );
 	if( head ) {
 		remove(*head);
@@ -147,7 +140,7 @@
 	park();
 
-	/* paranoid */ verify( !node.set );
-	/* paranoid */ verify( & node`next == 0p );
-	/* paranoid */ verify( & node`prev == 0p );
+	/* paranoid */ verify( ! node.set );
+	/* paranoid */ verify( & next( node ) == 0p );
+	/* paranoid */ verify( & prev( node ) == 0p );
 }
 
Index: libcfa/src/concurrency/barrier.hfa
===================================================================
--- libcfa/src/concurrency/barrier.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/barrier.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -11,6 +11,6 @@
 // Created On       : Sun Nov 10 08:07:35 2024
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Wed Nov 13 12:37:04 2024
-// Update Count     : 9
+// Last Modified On : Thu Apr 24 22:41:11 2025
+// Update Count     : 12
 // 
 
@@ -31,6 +31,6 @@
 
 // Returns a value indicating the reverse order the threads arrived, i.e. last thread returns 0 (and does not block)
-// last is an optional hook that is called by the Gth thread before unblocking the other threads.
-static inline unsigned int block( barrier & mutex b, fptr_t last = (fptr_t)0 ) with( b ) {
+// hook is an optional hook that is called by the Gth thread before unblocking the other threads.
+static inline unsigned int block( barrier & mutex b, fptr_t hook = (fptr_t)0 ) with( b ) {
 	arrivals -= 1;										// prefix decrement so last is 0 not 1
 	unsigned arrived = b.arrivals;						// note arrival order
@@ -38,5 +38,5 @@
 		wait( b.c );
 	} else {											// group formed
-		if ( last ) last();								// safe to call
+		if ( hook ) hook();								// safe to call
 		signal_all( c );								// unblock group
 		arrivals = group;								// reset
Index: libcfa/src/concurrency/channel.hfa
===================================================================
--- libcfa/src/concurrency/channel.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/channel.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -57,529 +57,535 @@
 
 forall( T ) {
-
-struct __attribute__((aligned(128))) channel {
-    size_t size, front, back, count;
-    T * buffer;
-    dlist( select_node ) prods, cons; // lists of blocked threads
-    go_mutex mutex_lock;              // MX lock
-    bool closed;                      // indicates channel close/open
-    #ifdef CHAN_STATS
-    size_t p_blocks, p_ops, c_blocks, c_ops;      // counts total ops and ops resulting in a blocked thd
-    #endif
-};
-static inline void ?{}( channel(T) & this, channel(T) this2 ) = void;
-static inline void ?=?( channel(T) & this, channel(T) this2 ) = void;
-
-static inline void ?{}( channel(T) &c, size_t _size ) with(c) {
-    size = _size;
-    front = back = count = 0;
-    if ( size != 0 ) buffer = aalloc( size );
-    prods{};
-    cons{};
-    mutex_lock{};
-    closed = false;
-    #ifdef CHAN_STATS
-    p_blocks = 0;
-    p_ops = 0;
-    c_blocks = 0;
-    c_ops = 0;
-    #endif
-}
-
-static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }
-static inline void ^?{}( channel(T) &c ) with(c) {
-    #ifdef CHAN_STATS
-    printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100);
-    printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100);
-    printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100);
-    #endif
-    verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || cons`isEmpty && prods`isEmpty, 
-        "Attempted to delete channel with waiting threads (Deadlock).\n" );
-    if ( size != 0 ) delete( buffer );
-}
-static inline size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); }
-static inline size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); }
-static inline bool has_waiters( channel(T) & chan ) with(chan) { return !cons`isEmpty || !prods`isEmpty; }
-static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !cons`isEmpty; }
-static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; }
-
-// closes the channel and notifies all blocked threads
-static inline void close( channel(T) & chan ) with(chan) {
-    lock( mutex_lock );
-    closed = true;
-
-    // flush waiting consumers and producers
-    while ( has_waiting_consumers( chan ) ) {
-        if( !__handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race
-            break;  // if __handle_waituntil_OR returns false cons is empty so break
-        cons`first.extra = 0p;
-        wake_one( cons );
-    }
-    while ( has_waiting_producers( chan ) ) {
-        if( !__handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race
-            break;  // if __handle_waituntil_OR returns false prods is empty so break
-        prods`first.extra = 0p;
-        wake_one( prods );
-    }
-    unlock(mutex_lock);
-}
-
-static inline void is_closed( channel(T) & chan ) with(chan) { return closed; }
-
-// used to hand an element to a blocked consumer and signal it
-static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
-    memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
-    wake_one( cons );
-}
-
-// used to hand an element to a blocked producer and signal it
-static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
-    memcpy( (void *)&retval, prods`first.extra, sizeof(T) );
-    wake_one( prods );
-}
-
-static inline void flush( channel(T) & chan, T elem ) with(chan) {
-    lock( mutex_lock );
-    while ( count == 0 && !cons`isEmpty ) {
-        __cons_handoff( chan, elem );
-    }
-    unlock( mutex_lock );
-}
-
-// handles buffer insert
-static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) {
-    memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) );
-    count += 1;
-    back++;
-    if ( back == size ) back = 0;
-}
-
-// needed to avoid an extra copy in closed case
-static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) {
-    lock( mutex_lock );
-    #ifdef CHAN_STATS
-    p_ops++;
-    #endif
-
-    ConsEmpty: if ( !cons`isEmpty ) {
-        if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
-        __cons_handoff( chan, elem );
-        unlock( mutex_lock );
-        return true;
-    }
-
-    if ( count == size ) { unlock( mutex_lock ); return false; }
-
-    __buf_insert( chan, elem );
-    unlock( mutex_lock );
-    return true;
-}
-
-// attempts a nonblocking insert
-// returns true if insert was successful, false otherwise
-static inline bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); }
-
-// handles closed case of insert routine
-static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) {
-    channel_closed except{ &channel_closed_vt, &elem, &chan };
-    throwResume except; // throw closed resumption
-    if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination
-}
-
-static inline void insert( channel(T) & chan, T elem ) with(chan) {
-    // check for close before acquire mx
-    if ( unlikely(closed) ) {
-        __closed_insert( chan, elem );
-        return;
-    } 
-
-    lock( mutex_lock );
-
-    #ifdef CHAN_STATS
-    if ( !closed ) p_ops++;
-    #endif
-
-    // if closed handle
-    if ( unlikely(closed) ) {
-        unlock( mutex_lock );
-        __closed_insert( chan, elem );
-        return;
-    }
-
-    // buffer count must be zero if cons are blocked (also handles zero-size case)
-    ConsEmpty: if ( !cons`isEmpty ) {
-        if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
-        __cons_handoff( chan, elem );
-        unlock( mutex_lock );
-        return;
-    }
-
-    // wait if buffer is full, work will be completed by someone else
-    if ( count == size ) {
-        #ifdef CHAN_STATS
-        p_blocks++;
-        #endif
-
-        // check for if woken due to close
-        if ( unlikely( block( prods, &elem, mutex_lock ) ) )
-            __closed_insert( chan, elem );
-        return;
-    } // if
-
-    __buf_insert( chan, elem );
-    unlock( mutex_lock );
-}
-
-// does the buffer remove and potentially does waiting producer work
-static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) {
-    memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) );
-    count -= 1;
-    front = (front + 1) % size;
-    if (count == size - 1 && !prods`isEmpty ) {
-        if ( !__handle_waituntil_OR( prods ) ) return;
-        __buf_insert( chan, *(T *)prods`first.extra );  // do waiting producer work
-        wake_one( prods );
-    }
-}
-
-// needed to avoid an extra copy in closed case and single return val case
-static inline bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) {
-    lock( mutex_lock );
-    #ifdef CHAN_STATS
-    c_ops++;
-    #endif
-
-    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
-        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
-        __prods_handoff( chan, retval );
-        unlock( mutex_lock );
-        return true;
-    }
-
-    if ( count == 0 ) { unlock( mutex_lock ); return false; }
-
-    __do_remove( chan, retval );
-    unlock( mutex_lock );
-    return true;
-}
-
-// attempts a nonblocking remove
-// returns [T, true] if insert was successful
-// returns [T, false] if insert was successful (T uninit)
-static inline [T, bool] try_remove( channel(T) & chan ) {
-    T retval;
-    bool success = __internal_try_remove( chan, retval );
-    return [ retval, success ];
-}
-
-static inline T try_remove( channel(T) & chan ) {
-    T retval;
-    __internal_try_remove( chan, retval );
-    return retval;
-}
-
-// handles closed case of insert routine
-static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) {
-    channel_closed except{ &channel_closed_vt, 0p, &chan };
-    throwResume except; // throw resumption
-    if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination
-}
-
-static inline T remove( channel(T) & chan ) with(chan) {
-    T retval;
-    if ( unlikely(closed) ) {
-        __closed_remove( chan, retval );
-        return retval;
-    } 
-    lock( mutex_lock );
-
-    #ifdef CHAN_STATS
-    if ( !closed ) c_ops++;
-    #endif
-
-    if ( unlikely(closed) ) {
-        unlock( mutex_lock );
-        __closed_remove( chan, retval );
-        return retval;
-    } 
-
-    // have to check for the zero size channel case
-    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
-        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
-        __prods_handoff( chan, retval );
-        unlock( mutex_lock );
-        return retval;
-    }
-
-    // wait if buffer is empty, work will be completed by someone else
-    if ( count == 0 ) {
-        #ifdef CHAN_STATS
-        c_blocks++;
-        #endif
-        // check for if woken due to close
-        if ( unlikely( block( cons, &retval, mutex_lock ) ) )
-            __closed_remove( chan, retval );
-        return retval;
-    }
-
-    // Remove from buffer
-    __do_remove( chan, retval );
-    unlock( mutex_lock );
-    return retval;
-}
-static inline void remove( channel(T) & chan ) { T elem = (T)remove( chan ); }
-
-
-///////////////////////////////////////////////////////////////////////////////////////////
-// The following is Go-style operator support for channels
-///////////////////////////////////////////////////////////////////////////////////////////
-
-static inline void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); }
-static inline void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); }
-
-///////////////////////////////////////////////////////////////////////////////////////////
-// The following is support for waituntil (select) statements
-///////////////////////////////////////////////////////////////////////////////////////////
-static inline bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) {
-    if ( !node`isListed && !node.park_counter ) return false; // handle special OR case
-    lock( mutex_lock );
-    if ( node`isListed ) { // op wasn't performed
-        remove( node );
-        unlock( mutex_lock );
-        return false;
-    }
-    unlock( mutex_lock );
-
-    // only return true when not special OR case and status is SAT
-    return !node.park_counter ? false : *node.clause_status == __SELECT_SAT;
-}
-
-// special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case
-static inline bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) {
-    while ( !queue`isEmpty ) {
-        // if node not a special OR case or if we win the special OR case race break
-        if ( !queue`first.clause_status || queue`first.park_counter || __pending_set_other( queue`first, mine, ((unsigned long int)(&(queue`first))) ) )
-            return true;
+	 struct __attribute__((aligned(128))) channel {
+		size_t size, front, back, count;
+		T * buffer;
+		dlist( select_node ) prods, cons;				// lists of blocked threads
+		go_mutex mutex_lock;							// MX lock
+		bool closed;									// indicates channel close/open
+		#ifdef CHAN_STATS
+		size_t p_blocks, p_ops, c_blocks, c_ops;		// counts total ops and ops resulting in a blocked thd
+	    #endif
+	};
+
+	 // type used by select statement to capture a chan read as the selected operation
+	 struct chan_read {
+		 T * ret;
+		 channel(T) * chan;
+	 };
+	 __CFA_SELECT_GET_TYPE( chan_read(T) );
+
+	 // type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to
+	 struct chan_read_no_ret {
+		 T retval;
+		 chan_read( T ) c_read;
+	 };
+	 __CFA_SELECT_GET_TYPE( chan_read_no_ret(T) );
+
+	 // type used by select statement to capture a chan write as the selected operation
+	 struct chan_write {
+		 T elem;
+		 channel(T) * chan;
+	 };
+	 __CFA_SELECT_GET_TYPE( chan_write(T) );
+} // distribution
+
+static inline forall( T ) {
+	void ?{}( channel(T) & this, channel(T) this2 ) = void;
+	void ?=?( channel(T) & this, channel(T) this2 ) = void;
+
+	void ?{}( channel(T) &c, size_t _size ) with(c) {
+		size = _size;
+		front = back = count = 0;
+		if ( size != 0 ) buffer = aalloc( size );
+		prods{};
+		cons{};
+		mutex_lock{};
+		closed = false;
+	    #ifdef CHAN_STATS
+		p_blocks = 0;
+		p_ops = 0;
+		c_blocks = 0;
+		c_ops = 0;
+	    #endif
+	}
+
+	void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }
+	void ^?{}( channel(T) &c ) with(c) {
+	    #ifdef CHAN_STATS
+		printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100);
+		printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100);
+		printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100);
+	    #endif
+		verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || isEmpty( cons ) && isEmpty( prods ), 
+				 "Attempted to delete channel with waiting threads (Deadlock).\n" );
+		if ( size != 0 ) delete( buffer );
+	}
+	size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); }
+	size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); }
+	bool has_waiters( channel(T) & chan ) with(chan) { return ! isEmpty( cons ) || ! isEmpty( prods ); }
+	bool has_waiting_consumers( channel(T) & chan ) with(chan) { return ! isEmpty( cons ); }
+	bool has_waiting_producers( channel(T) & chan ) with(chan) { return ! isEmpty( prods ); }
+
+	// closes the channel and notifies all blocked threads
+	void close( channel(T) & chan ) with(chan) {
+		lock( mutex_lock );
+		closed = true;
+
+		// flush waiting consumers and producers
+		while ( has_waiting_consumers( chan ) ) {
+			if( ! __handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race
+				break;  // if __handle_waituntil_OR returns false cons is empty so break
+			first( cons ).extra = 0p;
+			wake_one( cons );
+		}
+		while ( has_waiting_producers( chan ) ) {
+			if( ! __handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race
+				break;  // if __handle_waituntil_OR returns false prods is empty so break
+			first( prods ).extra = 0p;
+			wake_one( prods );
+		}
+		unlock(mutex_lock);
+	}
+
+	void is_closed( channel(T) & chan ) with(chan) { return closed; }
+
+	// used to hand an element to a blocked consumer and signal it
+	void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
+		memcpy( first( cons ).extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
+		wake_one( cons );
+	}
+
+	// used to hand an element to a blocked producer and signal it
+	void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
+		memcpy( (void *)&retval, first( prods ).extra, sizeof(T) );
+		wake_one( prods );
+	}
+
+	void flush( channel(T) & chan, T elem ) with(chan) {
+		lock( mutex_lock );
+		while ( count == 0 && ! isEmpty( cons ) ) {
+			__cons_handoff( chan, elem );
+		}
+		unlock( mutex_lock );
+	}
+
+	// handles buffer insert
+	void __buf_insert( channel(T) & chan, T & elem ) with(chan) {
+		memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) );
+		count += 1;
+		back++;
+		if ( back == size ) back = 0;
+	}
+
+	// needed to avoid an extra copy in closed case
+	bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) {
+		lock( mutex_lock );
+	    #ifdef CHAN_STATS
+		p_ops++;
+	    #endif
+
+	  ConsEmpty:
+		if ( ! isEmpty( cons ) ) {
+			if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
+			__cons_handoff( chan, elem );
+			unlock( mutex_lock );
+			return true;
+		}
+
+		if ( count == size ) { unlock( mutex_lock ); return false; }
+
+		__buf_insert( chan, elem );
+		unlock( mutex_lock );
+		return true;
+	}
+
+	// attempts a nonblocking insert
+	// returns true if insert was successful, false otherwise
+	bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); }
+
+	// handles closed case of insert routine
+	void __closed_insert( channel(T) & chan, T & elem ) with(chan) {
+		channel_closed except{ &channel_closed_vt, &elem, &chan };
+		throwResume except; // throw closed resumption
+		if ( ! __internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination
+	}
+
+	void insert( channel(T) & chan, T elem ) with(chan) {
+		// check for close before acquire mx
+		if ( unlikely(closed) ) {
+			__closed_insert( chan, elem );
+			return;
+		}
+
+		lock( mutex_lock );
+
+	    #ifdef CHAN_STATS
+		if ( ! closed ) p_ops++;
+	    #endif
+
+		// if closed handle
+		if ( unlikely(closed) ) {
+			unlock( mutex_lock );
+			__closed_insert( chan, elem );
+			return;
+		}
+
+		// buffer count must be zero if cons are blocked (also handles zero-size case)
+	  ConsEmpty:
+		if ( ! isEmpty( cons ) ) {
+			if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
+			__cons_handoff( chan, elem );
+			unlock( mutex_lock );
+			return;
+		}
+
+		// wait if buffer is full, work will be completed by someone else
+		if ( count == size ) {
+	        #ifdef CHAN_STATS
+			p_blocks++;
+	        #endif
+
+			// check for if woken due to close
+			if ( unlikely( block( prods, &elem, mutex_lock ) ) )
+				__closed_insert( chan, elem );
+			return;
+		} // if
+
+		__buf_insert( chan, elem );
+		unlock( mutex_lock );
+	}
+
+	// does the buffer remove and potentially does waiting producer work
+	void __do_remove( channel(T) & chan, T & retval ) with(chan) {
+		memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) );
+		count -= 1;
+		front = (front + 1) % size;
+		if (count == size - 1 && ! isEmpty( prods ) ) {
+			if ( ! __handle_waituntil_OR( prods ) ) return;
+			__buf_insert( chan, *(T *)first( prods ).extra );  // do waiting producer work
+			wake_one( prods );
+		}
+	}
+
+	// needed to avoid an extra copy in closed case and single return val case
+	bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) {
+		lock( mutex_lock );
+	    #ifdef CHAN_STATS
+		c_ops++;
+	    #endif
+
+	  ZeroSize:
+		if ( size == 0 && ! isEmpty( prods ) ) {
+			if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
+			__prods_handoff( chan, retval );
+			unlock( mutex_lock );
+			return true;
+		}
+
+		if ( count == 0 ) { unlock( mutex_lock ); return false; }
+
+		__do_remove( chan, retval );
+		unlock( mutex_lock );
+		return true;
+	}
+
+	// attempts a nonblocking remove
+	// returns [T, true] if insert was successful
+	// returns [T, false] if insert was successful (T uninit)
+	[T, bool] try_remove( channel(T) & chan ) {
+		T retval;
+		bool success = __internal_try_remove( chan, retval );
+		return [ retval, success ];
+	}
+
+	T try_remove( channel(T) & chan ) {
+		T retval;
+		__internal_try_remove( chan, retval );
+		return retval;
+	}
+
+	// handles closed case of insert routine
+	void __closed_remove( channel(T) & chan, T & retval ) with(chan) {
+		channel_closed except{ &channel_closed_vt, 0p, &chan };
+		throwResume except; // throw resumption
+		if ( ! __internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination
+	}
+
+	T remove( channel(T) & chan ) with(chan) {
+		T retval;
+		if ( unlikely(closed) ) {
+			__closed_remove( chan, retval );
+			return retval;
+		}
+		lock( mutex_lock );
+
+	    #ifdef CHAN_STATS
+		if ( ! closed ) c_ops++;
+		#endif
+
+		if ( unlikely(closed) ) {
+			unlock( mutex_lock );
+			__closed_remove( chan, retval );
+			return retval;
+		}
+
+		// have to check for the zero size channel case
+	  ZeroSize:
+		if ( size == 0 && ! isEmpty( prods ) ) {
+			if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
+			__prods_handoff( chan, retval );
+			unlock( mutex_lock );
+			return retval;
+		}
+
+		// wait if buffer is empty, work will be completed by someone else
+		if ( count == 0 ) {
+	        #ifdef CHAN_STATS
+			c_blocks++;
+	        #endif
+			// check for if woken due to close
+			if ( unlikely( block( cons, &retval, mutex_lock ) ) )
+				__closed_remove( chan, retval );
+			return retval;
+		}
+
+		// Remove from buffer
+		__do_remove( chan, retval );
+		unlock( mutex_lock );
+		return retval;
+	}
+	void remove( channel(T) & chan ) { T elem = (T)remove( chan ); }
+
+
+	///////////////////////////////////////////////////////////////////////////////////////////
+	// The following is Go-style operator support for channels
+	///////////////////////////////////////////////////////////////////////////////////////////
+
+	void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); }
+	void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); }
+
+	///////////////////////////////////////////////////////////////////////////////////////////
+	// The following is support for waituntil (select) statements
+	///////////////////////////////////////////////////////////////////////////////////////////
+	bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) {
+	    if ( ! isListed( node ) && ! node.park_counter ) return false; // handle special OR case
+	    lock( mutex_lock );
+	    if ( isListed( node ) ) { // op wasn't performed
+	        remove( node );
+	        unlock( mutex_lock );
+	        return false;
+	    }
+	    unlock( mutex_lock );
+
+	    // only return true when not special OR case and status is SAT
+	    return ! node.park_counter ? false : *node.clause_status == __SELECT_SAT;
+	}
+
+	// special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case
+	bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) {
+	    while ( ! isEmpty( queue ) ) {
+	        // if node not a special OR case or if we win the special OR case race break
+	        if ( ! first( queue ).clause_status || first( queue ).park_counter || __pending_set_other( first( queue ), mine, ((unsigned long int)(&(first( queue )))) ) )
+	            return true;
         
-        // our node lost the race when toggling in __pending_set_other
-        if ( *mine.clause_status != __SELECT_PENDING ) 
-            return false;
-
-        // otherwise we lost the special OR race so discard node
-        try_pop_front( queue );
-    }
-    return false;
-}
-
-// type used by select statement to capture a chan read as the selected operation
-struct chan_read {
-    T * ret;
-    channel(T) * chan;
-};
-__CFA_SELECT_GET_TYPE( chan_read(T) );
-
-static inline void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) {
-    cr.chan = chan;
-    cr.ret = ret;
-}
-static inline chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; }
-
-static inline void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
-    __closed_remove( *chan, *ret );
-    // if we get here then the insert succeeded
-    __make_select_node_available( node );
-}
-
-static inline bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
-    lock( mutex_lock );
-    node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close
-
-    #ifdef CHAN_STATS
-    if ( !closed ) c_ops++;
-    #endif
-
-    if ( !node.park_counter ) {
-        // are we special case OR and front of cons is also special case OR
-        if ( !unlikely(closed) && !prods`isEmpty && prods`first.clause_status && !prods`first.park_counter ) {
-            if ( !__make_select_node_pending( node ) ) {
-                unlock( mutex_lock );
-                return false;
-            }
-
-            if ( __handle_pending( prods, node ) ) {
-                __prods_handoff( *chan, *ret );
-                __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
-                unlock( mutex_lock );
-                return true;
-            }
-            if ( *node.clause_status == __SELECT_PENDING )
-                __make_select_node_unsat( node );
-        }
-        // check if we can complete operation. If so race to establish winner in special OR case
-        if ( count != 0 || !prods`isEmpty || unlikely(closed) ) {
-            if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
-                unlock( mutex_lock );
-                return false;
-            }
-        }
-    }
-
-    if ( unlikely(closed) ) {
-        unlock( mutex_lock );
-        __handle_select_closed_read( this, node );
-        return true;
-    }
-
-    // have to check for the zero size channel case
-    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
-        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
-        __prods_handoff( *chan, *ret );
-        __set_avail_then_unlock( node, mutex_lock );
-        return true;
-    }
-
-    // wait if buffer is empty, work will be completed by someone else
-    if ( count == 0 ) {
-        #ifdef CHAN_STATS
-        c_blocks++;
-        #endif
+	        // our node lost the race when toggling in __pending_set_other
+	        if ( *mine.clause_status != __SELECT_PENDING ) 
+	            return false;
+
+	        // otherwise we lost the special OR race so discard node
+	        remove_first( queue );
+	    }
+	    return false;
+	}
+
+	void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) {
+	    cr.chan = chan;
+	    cr.ret = ret;
+	}
+	chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; }
+
+		void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
+	    __closed_remove( *chan, *ret );
+	    // if we get here then the insert succeeded
+	    __make_select_node_available( node );
+	}
+
+	bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
+	    lock( mutex_lock );
+	    node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close
+
+	    #ifdef CHAN_STATS
+	    if ( ! closed ) c_ops++;
+	    #endif
+
+	    if ( ! node.park_counter ) {
+	        // are we special case OR and front of cons is also special case OR
+	        if ( ! unlikely(closed) && ! isEmpty( prods ) && first( prods ).clause_status && ! first( prods ).park_counter ) {
+	            if ( ! __make_select_node_pending( node ) ) {
+	                unlock( mutex_lock );
+	                return false;
+	            }
+
+		            if ( __handle_pending( prods, node ) ) {
+	                __prods_handoff( *chan, *ret );
+	                __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
+	                unlock( mutex_lock );
+	                return true;
+	            }
+	            if ( *node.clause_status == __SELECT_PENDING )
+	                __make_select_node_unsat( node );
+	        }
+	        // check if we can complete operation. If so race to establish winner in special OR case
+	        if ( count != 0 || ! isEmpty( prods ) || unlikely(closed) ) {
+	            if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
+	                unlock( mutex_lock );
+	                return false;
+	            }
+	        }
+	    }
+
+	    if ( unlikely(closed) ) {
+	        unlock( mutex_lock );
+	        __handle_select_closed_read( this, node );
+	        return true;
+	    }
+
+	    // have to check for the zero size channel case
+	    ZeroSize:
+		if ( size == 0 && ! isEmpty( prods ) ) {
+			if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
+			__prods_handoff( *chan, *ret );
+			__set_avail_then_unlock( node, mutex_lock );
+			return true;
+		}
+
+		// wait if buffer is empty, work will be completed by someone else
+		if ( count == 0 ) {
+	        #ifdef CHAN_STATS
+	        c_blocks++;
+	        #endif
         
-        insert_last( cons, node );
-        unlock( mutex_lock );
-        return false;
-    }
-
-    // Remove from buffer
-    __do_remove( *chan, *ret );
-    __set_avail_then_unlock( node, mutex_lock );
-    return true;
-}
-static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
-static inline bool on_selected( chan_read(T) & this, select_node & node ) with(this) {
-    if ( unlikely(node.extra == 0p) ) {
-        if ( !exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel
-        else return false;
-    }
-    // This is only reachable if not closed or closed exception was handled
-    return true;
-}
-
-// type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to
-struct chan_read_no_ret {
-    T retval;
-    chan_read( T ) c_read;
-};
-__CFA_SELECT_GET_TYPE( chan_read_no_ret(T) );
-
-static inline void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) {
-    this.c_read{ &chan, &this.retval };
-}
-
-static inline chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; }
-static inline bool register_select( chan_read_no_ret(T) & this, select_node & node ) { 
-    this.c_read.ret = &this.retval;
-    return register_select( this.c_read, node );
-}
-static inline bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); }
-static inline bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); }
-
-// type used by select statement to capture a chan write as the selected operation
-struct chan_write {
-    T elem;
-    channel(T) * chan;
-};
-__CFA_SELECT_GET_TYPE( chan_write(T) );
-
-static inline void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) {
-    cw.chan = chan;
-    memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) );
-}
-static inline chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; }
-static inline chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; }
-
-static inline void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
-    __closed_insert( *chan, elem );
-    // if we get here then the insert succeeded
-    __make_select_node_available( node );
-}
-
-static inline bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
-    lock( mutex_lock );
-    node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close
-
-    #ifdef CHAN_STATS
-    if ( !closed ) p_ops++;
-    #endif
-
-    // special OR case handling
-    if ( !node.park_counter ) {
-        // are we special case OR and front of cons is also special case OR
-        if ( !unlikely(closed) && !cons`isEmpty && cons`first.clause_status && !cons`first.park_counter ) {
-            if ( !__make_select_node_pending( node ) ) {
-                unlock( mutex_lock );
-                return false;
-            }
-
-            if ( __handle_pending( cons, node ) ) {
-                __cons_handoff( *chan, elem );
-                __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
-                unlock( mutex_lock );
-                return true;
-            }
-            if ( *node.clause_status == __SELECT_PENDING )
-                __make_select_node_unsat( node );
-        }
-        // check if we can complete operation. If so race to establish winner in special OR case
-        if ( count != size || !cons`isEmpty || unlikely(closed) ) {
-            if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
-                unlock( mutex_lock );
-                return false;
-            }
-        }
-    }
-
-    // if closed handle
-    if ( unlikely(closed) ) {
-        unlock( mutex_lock );
-        __handle_select_closed_write( this, node );
-        return true;
-    }
-
-    // handle blocked consumer case via handoff (buffer is implicitly empty)
-    ConsEmpty: if ( !cons`isEmpty ) {
-        if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
-        __cons_handoff( *chan, elem );
-        __set_avail_then_unlock( node, mutex_lock );
-        return true;
-    }
-
-    // insert node in list if buffer is full, work will be completed by someone else
-    if ( count == size ) {
-        #ifdef CHAN_STATS
-        p_blocks++;
-        #endif
-
-        insert_last( prods, node );
-        unlock( mutex_lock );
-        return false;
-    } // if
-
-    // otherwise carry out write either via normal insert
-    __buf_insert( *chan, elem );
-    __set_avail_then_unlock( node, mutex_lock );
-    return true;
-}
-static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
-
-static inline bool on_selected( chan_write(T) & this, select_node & node ) with(this) { 
-    if ( unlikely(node.extra == 0p) ) {
-        if ( !exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel
-        else return false;
-    }
-    // This is only reachable if not closed or closed exception was handled
-    return true;
-}
-
-} // forall( T )
-
-
+	        insert_last( cons, node );
+	        unlock( mutex_lock );
+	        return false;
+	    }
+
+	    // Remove from buffer
+	    __do_remove( *chan, *ret );
+	    __set_avail_then_unlock( node, mutex_lock );
+	    return true;
+	}
+	bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
+	bool on_selected( chan_read(T) & this, select_node & node ) with(this) {
+	    if ( unlikely(node.extra == 0p) ) {
+	        if ( ! exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel
+	        else return false;
+	    }
+	    // This is only reachable if not closed or closed exception was handled
+	    return true;
+	}
+
+	void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) {
+	    this.c_read{ &chan, &this.retval };
+	}
+
+	chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; }
+	bool register_select( chan_read_no_ret(T) & this, select_node & node ) { 
+	    this.c_read.ret = &this.retval;
+	    return register_select( this.c_read, node );
+	}
+	bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); }
+	bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); }
+
+	void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) {
+	    cw.chan = chan;
+	    memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) );
+	}
+	chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; }
+	chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; }
+
+	void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
+	    __closed_insert( *chan, elem );
+	    // if we get here then the insert succeeded
+	    __make_select_node_available( node );
+	}
+
+	bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
+	    lock( mutex_lock );
+	    node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close
+	
+	    #ifdef CHAN_STATS
+	    if ( ! closed ) p_ops++;
+	    #endif
+
+	    // special OR case handling
+	    if ( ! node.park_counter ) {
+	        // are we special case OR and front of cons is also special case OR
+	        if ( ! unlikely(closed) && ! isEmpty( cons ) && first( cons ).clause_status && ! first( cons ).park_counter ) {
+	            if ( ! __make_select_node_pending( node ) ) {
+	                unlock( mutex_lock );
+	                return false;
+	            }
+	            if ( __handle_pending( cons, node ) ) {
+					__cons_handoff( *chan, elem );
+					__make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
+					unlock( mutex_lock );
+					return true;
+				}
+				if ( *node.clause_status == __SELECT_PENDING )
+					__make_select_node_unsat( node );
+			}
+			// check if we can complete operation. If so race to establish winner in special OR case
+			if ( count != size || ! isEmpty( cons ) || unlikely(closed) ) {
+				if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
+					unlock( mutex_lock );
+					return false;
+				}
+			}
+		}
+
+		// if closed handle
+		if ( unlikely(closed) ) {
+			unlock( mutex_lock );
+			__handle_select_closed_write( this, node );
+			return true;
+		}
+
+		// handle blocked consumer case via handoff (buffer is implicitly empty)
+    ConsEmpty:
+		if ( ! isEmpty( cons ) ) {
+			if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
+			__cons_handoff( *chan, elem );
+			__set_avail_then_unlock( node, mutex_lock );
+			return true;
+		}
+
+		// insert node in list if buffer is full, work will be completed by someone else
+		if ( count == size ) {
+	        #ifdef CHAN_STATS
+			p_blocks++;
+	        #endif
+
+			insert_last( prods, node );
+			unlock( mutex_lock );
+			return false;
+		} // if
+
+		// otherwise carry out write either via normal insert
+		__buf_insert( *chan, elem );
+		__set_avail_then_unlock( node, mutex_lock );
+		return true;
+	}
+	bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
+
+	bool on_selected( chan_write(T) & this, select_node & node ) with(this) { 
+		if ( unlikely(node.extra == 0p) ) {
+			if ( ! exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel
+			else return false;
+		}
+		// This is only reachable if not closed or closed exception was handled
+		return true;
+	}
+} // distribution
+
+
Index: libcfa/src/concurrency/cofor.hfa
===================================================================
--- libcfa/src/concurrency/cofor.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/cofor.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -33,9 +33,9 @@
 
 void main( cofor_runner & this ) with(this) {
-    while ( !done || !items`isEmpty ) {
+    while ( ! done || ! isEmpty( items ) ) {
 		lock( mutex_lock );
-        runner_node * node = &try_pop_front( items );
+        runner_node * node = &remove_first( items );
 		unlock( mutex_lock );
-		if ( !node )
+		if ( ! node )
 			continue;
         func( node->value );
Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/coroutine.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Mon Nov 28 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Sep 18 21:47:12 2023
-// Update Count     : 25
+// Last Modified On : Fri Apr 25 06:48:19 2025
+// Update Count     : 31
 //
 
@@ -82,8 +82,8 @@
 // helper for popping from coroutine's ehm buffer
 static nonlocal_exception * pop_ehm_head( coroutine$ * this ) {
-    lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
-    nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
-    unlock( this->ehm_state.buffer_lock );
-    return nl_ex;
+	lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
+	nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
+	unlock( this->ehm_state.buffer_lock );
+	return nl_ex;
 }
 
@@ -97,5 +97,5 @@
 
 void __stack_prepare( __stack_info_t * this, size_t create_size );
-static void __stack_clean  ( __stack_info_t * this );
+static void __stack_clean( __stack_info_t * this );
 
 //-----------------------------------------------------------------------------
@@ -105,5 +105,5 @@
 
 	// Did we get a piece of storage ?
-	if (this.storage || storageSize != 0) {
+	if ( this.storage || storageSize != 0 ) {
 		// We either got a piece of storage or the user asked for a specific size
 		// Immediately create the stack
@@ -128,30 +128,29 @@
 	state = Start;
 	starter = 0p;
-	last = 0p;
+	this.last = 0p;
 	cancellation = 0p;
-    ehm_state.ehm_buffer{};
-    ehm_state.buffer_lock{};
-    ehm_state.ehm_enabled = false;
-}
-
-void ^?{}(coroutine$& this) libcfa_public {
-    // handle any leftover pending non-local exceptions
-    nonlocal_exception * nl_ex = pop_ehm_head( &this );
-    unsigned unhandled_ex = 0;
-    
-    // if any leftover exceptions handle 
-    while ( nl_ex != 0p ){
-        unhandled_ex++;
-        free( nl_ex->the_exception );
-        free( nl_ex );
-        nl_ex = pop_ehm_head( &this );
-    }
-
-    #ifdef __CFA_DEBUG__
-    if ( unhandled_ex > 0 )
-        printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex );
-    #endif
-
-	if(this.state != Halted && this.state != Start && this.state != Primed) {
+	ehm_state.ehm_buffer{};
+	ehm_state.buffer_lock{};
+	ehm_state.ehm_enabled = false;
+}
+
+void ^?{}( coroutine$ & this ) libcfa_public {
+	// handle any leftover pending non-local exceptions
+	nonlocal_exception * nl_ex = pop_ehm_head( &this );
+	unsigned unhandled_ex = 0;
+
+	// if any leftover exceptions handle 
+	for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( &this ) ) {
+		unhandled_ex++;
+		free( nl_ex->the_exception );
+		free( nl_ex );
+	}
+
+	#ifdef __CFA_DEBUG__
+	if ( unhandled_ex > 0 )
+		printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex );
+	#endif
+
+	if ( this.state != Halted && this.state != Start && this.state != Primed ) {
 		coroutine$ * src = active_coroutine();
 		coroutine$ * dst = &this;
@@ -174,18 +173,18 @@
 // Part of the Public API
 // Not inline since only ever called once per coroutine
-forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
-void prime(T& cor) libcfa_public {
-	coroutine$* this = get_coroutine(cor);
-	assert(this->state == Start);
+forall( T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); } )
+void prime( T & cor ) libcfa_public {
+	coroutine$ * this = get_coroutine(cor);
+	assert( this->state == Start );
 
 	this->state = Primed;
-	resume(cor);
+	resume( cor );
 }
 
 static [void *, size_t] __stack_alloc( size_t storageSize ) {
 	const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
-	assert(__page_size != 0l);
+	assert( __page_size != 0l );
 	size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
-	size = ceiling(size, __page_size);
+	size = ceiling( size, __page_size );
 
 	// If we are running debug, we also need to allocate a guardpage to catch stack overflows.
@@ -193,5 +192,5 @@
 	#if CFA_COROUTINE_USE_MMAP
 		storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
-		if(storage == ((void*)-1)) {
+		if (storage == ((void*)-1)) {
 			abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
 		}
@@ -227,5 +226,5 @@
 		size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t);
 		storage = (void *)(((intptr_t)storage) - __page_size);
-		if(munmap(storage, size + __page_size) == -1) {
+		if (munmap(storage, size + __page_size) == -1) {
 			abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
 		}
@@ -248,5 +247,5 @@
 	void * storage;
 	size_t size;
-	if ( !this->storage ) {
+	if ( ! this->storage ) {
 		userStack = false;
 		[storage, size] = __stack_alloc( create_size );
@@ -302,5 +301,5 @@
 		athrd->corctx_flag = false;
 
-		if(cor->state == Primed) {
+		if (cor->state == Primed) {
 			__cfactx_suspend();
 		}
@@ -317,6 +316,6 @@
 
 void defaultResumeAtHandler( exception_t * except ) {
-    __cfaehm_allocate_exception( except );
-    __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
+	__cfaehm_allocate_exception( except );
+	__cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
 }
 
@@ -328,19 +327,17 @@
 
 bool poll( coroutine$ * cor ) libcfa_public {
-    nonlocal_exception * nl_ex = pop_ehm_head( cor );
-
-    // if no exceptions return false
-    if ( nl_ex == 0p ) return false;
-    
-    // otherwise loop and throwResume all pending exceptions
-    while ( nl_ex != 0p ){
+	nonlocal_exception * nl_ex = pop_ehm_head( cor );
+
+	// if no exceptions return false
+	if ( nl_ex == 0p ) return false;
+	
+	// otherwise loop and throwResume all pending exceptions
+	for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( cor ) ) {
 		ehm_cleanup ex_holder{ nl_ex->the_exception };
-        free( nl_ex );
-        __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler );
-        
-        nl_ex = pop_ehm_head( cor );
-    }
-    
-    return true;
+		free( nl_ex );
+		__cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler );
+	}
+	
+	return true;
 }
 
@@ -354,16 +351,16 @@
 // user facing ehm operations
 forall(T & | is_coroutine(T)) {
-    // enable/disable non-local exceptions
-    void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
-    void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
-
-    // poll for non-local exceptions
-    bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
-
-    // poll iff nonlocal ehm is enabled
-    bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
-
-    coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
-    coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; }
+	// enable/disable non-local exceptions
+	void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
+	void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
+
+	// poll for non-local exceptions
+	bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
+
+	// poll iff nonlocal ehm is enabled
+	bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
+
+	coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
+	coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; }
 }
 
@@ -371,23 +368,23 @@
 forall(exceptT *, T & | ehm_resume_at( exceptT, T ))
 void resumeAt( T & receiver, exceptT & ex ) libcfa_public {
-    coroutine$ * cor = get_coroutine( receiver );
-    nonlocal_exception * nl_ex = alloc();
-    exceptT * ex_copy = alloc();
-    memcpy( ex_copy, &ex, sizeof(exceptT) );
-    (*nl_ex){ (exception_t *)ex_copy };
-    lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
-    append( cor->ehm_state.ehm_buffer, nl_ex ); 
-    unlock( cor->ehm_state.buffer_lock );
+	coroutine$ * cor = get_coroutine( receiver );
+	nonlocal_exception * nl_ex = alloc();
+	exceptT * ex_copy = alloc();
+	memcpy( ex_copy, &ex, sizeof(exceptT) );
+	(*nl_ex){ (exception_t *)ex_copy };
+	lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
+	append( cor->ehm_state.ehm_buffer, nl_ex ); 
+	unlock( cor->ehm_state.buffer_lock );
 }
 
 forall(exceptT * | { void $throwResume(exceptT &); })
 void resumeAt( coroutine$ * receiver, exceptT & ex ) libcfa_public {
-    nonlocal_exception * nl_ex = alloc();
-    exceptT * ex_copy = alloc();
-    memcpy( ex_copy, &ex, sizeof(exceptT) );
-    (*nl_ex){ (exception_t *)ex_copy };
-    lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
-    append( receiver->ehm_state.ehm_buffer, nl_ex ); 
-    unlock( receiver->ehm_state.buffer_lock );
+	nonlocal_exception * nl_ex = alloc();
+	exceptT * ex_copy = alloc();
+	memcpy( ex_copy, &ex, sizeof(exceptT) );
+	(*nl_ex){ (exception_t *)ex_copy };
+	lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
+	append( receiver->ehm_state.ehm_buffer, nl_ex ); 
+	unlock( receiver->ehm_state.buffer_lock );
 }
 
Index: libcfa/src/concurrency/coroutine.hfa
===================================================================
--- libcfa/src/concurrency/coroutine.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/coroutine.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Mon Nov 28 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Feb  2 11:31:42 2023
-// Update Count     : 13
+// Last Modified On : Fri Apr 25 06:52:04 2025
+// Update Count     : 15
 //
 
@@ -26,7 +26,8 @@
     nonlocal_exception * next;
 };
-static inline void ?{} ( nonlocal_exception & this, exception_t * ex ) with(this) {
+
+static inline void ?{}( nonlocal_exception & this, exception_t * ex ) with(this) {
     the_exception = ex;
-    next = 0p;
+    this.next = 0p;
 }
 
@@ -66,5 +67,5 @@
 // void ^?{}( coStack_t & this );
 
-void  ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize );
+void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize );
 void ^?{}( coroutine$ & this );
 
Index: libcfa/src/concurrency/future.hfa
===================================================================
--- libcfa/src/concurrency/future.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/future.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Wed Jan 06 17:33:18 2021
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Sun Mar  2 14:45:56 2025
-// Update Count     : 19
+// Last Modified On : Wed Apr 23 22:41:10 2025
+// Update Count     : 22
 //
 
@@ -63,5 +63,5 @@
 		void reset( future(T) & this ) with(this) {
 			lock( lock );
-			if ( ! waiters`isEmpty )
+			if ( ! isEmpty( waiters ) )
 				abort("Attempting to reset a future with blocked waiters");
 			state = FUTURE_EMPTY;
@@ -82,10 +82,10 @@
 
 		bool fulfil$( future(T) & this ) with(this) {	// helper
-			bool ret_val = ! waiters`isEmpty;
+			bool ret_val = ! isEmpty( waiters );
 			state = FUTURE_FULFILLED;
-			while ( ! waiters`isEmpty ) {
+			while ( ! isEmpty( waiters ) ) {
 				if ( !__handle_waituntil_OR( waiters ) ) // handle special waituntil OR case
 					break; // if handle_OR returns false then waiters is empty so break
-				select_node &s = try_pop_front( waiters );
+				select_node &s = remove_first( waiters );
 
 				if ( s.clause_status == 0p )			// poke in result so that woken threads do not need to reacquire any locks
@@ -208,7 +208,7 @@
 
 		bool unregister_select( future(T) & this, select_node & s ) with(this) {
-			if ( ! s`isListed ) return false;
-			lock( lock );
-			if ( s`isListed ) remove( s );
+			if ( ! isListed( s ) ) return false;
+			lock( lock );
+			if ( isListed( s ) ) remove( s );
 			unlock( lock );
 			return false;
Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/invoke.h	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Tue Jan 17 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Wed Aug 30 21:27:51 2023
-// Update Count     : 60
+// Last Modified On : Wed Apr 23 15:27:18 2025
+// Update Count     : 61
 //
 
@@ -259,5 +259,5 @@
         }
 
-		static inline thread$ * volatile & ?`next ( thread$ * this ) {
+		static inline thread$ * volatile & next( thread$ * this ) {
 			return this->user_link.next;
 		}
Index: libcfa/src/concurrency/io.cfa
===================================================================
--- libcfa/src/concurrency/io.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/io.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -95,5 +95,5 @@
 	static inline void __post(oneshot & this, bool kernel, unpark_hint hint) {
 		thread$ * t = post( this, false );
-		if(kernel) __kernel_unpark( t, hint );
+		if (kernel) __kernel_unpark( t, hint );
 		else unpark( t, hint );
 	}
@@ -108,5 +108,5 @@
 			// do the system call in a loop, repeat on interrupts
 			ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
-			if( ret < 0 ) {
+			if ( ret < 0 ) {
 				switch((int)errno) {
 				case EINTR:
@@ -154,9 +154,9 @@
 			const __u32 tail = *ctx->cq.tail;
 
-			if(head == tail) return false;
+			if (head == tail) return false;
 		}
 
 		// try a simple spinlock acquire, it's likely there are completions to drain
-		if(!__atomic_try_acquire(&ctx->cq.try_lock)) {
+		if ( ! __atomic_try_acquire(&ctx->cq.try_lock)) {
 			// some other processor already has it
 			__STATS__( false, io.calls.locked++; )
@@ -214,5 +214,5 @@
 
 			// we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel.
-			if(likely(count < num)) break;
+			if (likely(count < num)) break;
 
 			// the ring buffer was full, there could be more stuff in the kernel.
@@ -243,5 +243,5 @@
 
 		// if submitting must be submitted, do the system call
-		if(ctx.sq.to_submit != 0) {
+		if (ctx.sq.to_submit != 0) {
 			ioring_syscsll(ctx, 0, 0);
 		}
@@ -278,5 +278,5 @@
 			// only help once every other time
 			// pick a target when not helping
-			if(proc->io.target == UINT_MAX) {
+			if (proc->io.target == UINT_MAX) {
 				uint64_t chaos = __tls_rand();
 				// choose who to help and whether to accept helping far processors 
@@ -285,5 +285,5 @@
 
 				// if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it
-				if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
+				if (ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
 					proc->io.target = other;
 				}
@@ -294,5 +294,5 @@
 				/* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX );
 				// make sure the target hasn't stopped existing since last time
-				HELP: if(target < ctxs_count) {
+				HELP: if (target < ctxs_count) {
 					// calculate it's age and how young it could be before we give up on helping
 					const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false);
@@ -300,5 +300,5 @@
 					__cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
 					// is the target older than the cutoff, recall 0 is oldest and bigger ints are younger
-					if(age <= cutoff) break HELP;
+					if (age <= cutoff) break HELP;
 
 					// attempt to help the submission side
@@ -306,8 +306,8 @@
 
 					// attempt to help the completion side
-					if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed
+					if ( ! try_acquire(io.data[target])) break HELP; // already acquire no help needed
 
 					// actually help
-					if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
+					if ( ! __cfa_do_drain( io.data[target], cltr )) break HELP;
 
 					// track we did help someone
@@ -322,5 +322,5 @@
 
 		// Drain the local queue
-		if(try_acquire( proc->io.ctx )) {
+		if (try_acquire( proc->io.ctx )) {
 			local = __cfa_do_drain( proc->io.ctx, cltr );
 		}
@@ -390,5 +390,5 @@
 
 		// If we don't have enough sqes, fail
-		if((ftail - fhead) < want) { return false; }
+		if ((ftail - fhead) < want) { return false; }
 
 		// copy all the indexes we want from the available list
@@ -422,5 +422,5 @@
 
 		// We can proceed to the fast path
-		if( __alloc(ctx, idxs, want) ) {
+		if ( __alloc(ctx, idxs, want) ) {
 			// Allocation was successful
 			__STATS__( true, io.alloc.fast += 1; )
@@ -456,5 +456,5 @@
 	// barebones logic to submit a group of sqes
 	static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) {
-		if(!lock) 
+		if ( ! lock) 
 			lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 );
 		// We can proceed to the fast path
@@ -478,5 +478,5 @@
 		__atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
 
-		if(!lock) 
+		if ( ! lock) 
 			unlock( ctx->ext_sq.lock );
 	}
@@ -487,9 +487,9 @@
 		__submit_only(ctx, idxs, have, false);
 
-		if(sq.to_submit > 30) {
+		if (sq.to_submit > 30) {
 			__tls_stats()->io.flush.full++;
 			__cfa_io_flush( ctx->proc );
 		}
-		if(!lazy) {
+		if ( ! lazy ) {
 			__tls_stats()->io.flush.eager++;
 			__cfa_io_flush( ctx->proc );
@@ -503,5 +503,5 @@
 
 		disable_interrupts();
-		__STATS__( true, if(!lazy) io.submit.eagr += 1; )
+		__STATS__( true, if ( ! lazy ) io.submit.eagr += 1; )
 		struct processor * proc = __cfaabi_tls.this_processor;
 		io_context$ * ctx = proc->io.ctx;
@@ -510,5 +510,5 @@
 
 		// Can we proceed to the fast path
-		if( ctx == inctx )		// We have the right instance?
+		if ( ctx == inctx )		// We have the right instance?
 		{
 			// yes! fast submit
@@ -564,5 +564,5 @@
 		__u32 count = chead - phead;
 
-		if(count == 0) {
+		if (count == 0) {
 			return 0;
 		}
@@ -594,5 +594,5 @@
 		lock( queue.lock __cfaabi_dbg_ctx2 );
 		{
-			was_empty = queue.queue`isEmpty;
+			was_empty = isEmpty( queue.queue );
 
 			// Add our request to the list
@@ -632,5 +632,5 @@
 	// notify the arbiter that new allocations are available
 	static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) {
-		/* paranoid */ verify( !this.pending.queue`isEmpty );
+		/* paranoid */ verify( ! isEmpty( this.pending.queue ) );
 		/* paranoid */ verify( __preemption_enabled() );
 
@@ -642,14 +642,14 @@
 			// as long as there are pending allocations try to satisfy them
 			// for simplicity do it in FIFO order
-			while( !this.pending.queue`isEmpty ) {
+			while( ! isEmpty( this.pending.queue ) ) {
 				// get first pending allocs
 				__u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
-				__pending_alloc & pa = (__pending_alloc&)(this.pending.queue`first);
+				__pending_alloc & pa = (__pending_alloc&)( first( this.pending.queue ));
 
 				// check if we have enough to satisfy the request
-				if( have > pa.want ) goto DONE;
+				if ( have > pa.want ) goto DONE;
 
 				// if there are enough allocations it means we can drop the request
-				try_pop_front( this.pending.queue );
+				remove_first( this.pending.queue );
 
 				/* paranoid */__attribute__((unused)) bool ret =
@@ -676,5 +676,5 @@
 	// short hand to avoid the mutual exclusion of the pending is empty regardless
 	static void __ioarbiter_notify( io_context$ & ctx ) {
-		if(empty( ctx.arbiter->pending )) return;
+		if (empty( ctx.arbiter->pending )) return;
 		__ioarbiter_notify( *ctx.arbiter, &ctx );
 	}
@@ -700,5 +700,5 @@
 		// if this is the first to be enqueued, signal the processor in an attempt to speed up flushing
 		// if it's not the first enqueue, a signal is already in transit
-		if( we ) {
+		if ( we ) {
 			sigval_t value = { PREEMPT_IO };
 			__cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
@@ -716,5 +716,5 @@
 	static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) {
 		// if there are no external operations just return
-		if(empty( ctx.ext_sq )) return;
+		if ( empty( ctx.ext_sq ) ) return;
 
 		// stats and logs
@@ -727,7 +727,7 @@
 			// pop each operation one at a time.
 			// There is no wait morphing because of the io sq ring
-			while( !ctx.ext_sq.queue`isEmpty ) {
+			while( ! isEmpty( ctx.ext_sq.queue ) ) {
 				// drop the element from the queue
-				__external_io & ei = (__external_io&)try_pop_front( ctx.ext_sq.queue );
+				__external_io & ei = (__external_io&)remove_first( ctx.ext_sq.queue );
 
 				// submit it
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/kernel.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Tue Jan 17 12:27:26 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Jan  9 08:42:05 2023
-// Update Count     : 77
+// Last Modified On : Fri Apr 25 07:02:42 2025
+// Update Count     : 82
 //
 
@@ -45,5 +45,5 @@
 #pragma GCC diagnostic pop
 
-#if !defined(__CFA_NO_STATISTICS__)
+#if ! defined(__CFA_NO_STATISTICS__)
 	#define __STATS_DEF( ...) __VA_ARGS__
 #else
@@ -158,6 +158,6 @@
 
 	__cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
-	#if !defined(__CFA_NO_STATISTICS__)
-		if( this->print_halts ) {
+	#if ! defined(__CFA_NO_STATISTICS__)
+		if ( this->print_halts ) {
 			__cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
 		}
@@ -169,5 +169,5 @@
 
 		// if we need to run some special setup, now is the time to do it.
-		if(this->init.thrd) {
+		if (this->init.thrd) {
 			this->init.thrd->curr_cluster = this->cltr;
 			__run_thread(this, this->init.thrd);
@@ -185,5 +185,5 @@
 			readyThread = __next_thread( this->cltr );
 
-			if( !readyThread ) {
+			if ( ! readyThread ) {
 				// there is no point in holding submissions if we are idle
 				__IO_STATS__(true, io.flush.idle++; )
@@ -196,8 +196,8 @@
 			}
 
-			if( !readyThread ) for(5) {
+			if ( ! readyThread ) for(5) {
 				readyThread = __next_thread_slow( this->cltr );
 
-				if( readyThread ) break;
+				if ( readyThread ) break;
 
 				// It's unlikely we still I/O to submit, but the arbiter could
@@ -210,14 +210,14 @@
 
 			HALT:
-			if( !readyThread ) {
+			if ( ! readyThread ) {
 				// Don't block if we are done
-				if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
+				if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
 
 				// Push self to idle stack
-				if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
+				if ( ! mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
 
 				// Confirm the ready-queue is empty
 				readyThread = __next_thread_search( this->cltr );
-				if( readyThread ) {
+				if ( readyThread ) {
 					// A thread was found, cancel the halt
 					mark_awake(this->cltr->procs, * this);
@@ -247,7 +247,7 @@
 
 			// Are we done?
-			if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
-
-			if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
+			if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
+
+			if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && ! __atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
 				__IO_STATS__(true, io.flush.dirty++; )
 				__cfa_io_flush( this );
@@ -263,5 +263,5 @@
 	post( this->terminated );
 
-	if(this == mainProcessor) {
+	if (this == mainProcessor) {
 		// HACK : the coroutine context switch expects this_thread to be set
 		// and it make sense for it to be set in all other cases except here
@@ -294,5 +294,6 @@
 
 	// Actually run the thread
-	RUNNING:  while(true) {
+	RUNNING:
+	while( true ) {
 		thrd_dst->preempted = __NO_PREEMPTION;
 
@@ -339,5 +340,5 @@
 		// In case 2, we lost the race so we now own the thread.
 
-		if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
+		if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
 			// Reset the this_thread now that we know
 			// the state isn't active anymore
@@ -349,5 +350,5 @@
 		}
 
-		if(unlikely(thrd_dst->state == Halting)) {
+		if (unlikely(thrd_dst->state == Halting)) {
 			// Reset the this_thread now that we know
 			// the state isn't active anymore
@@ -418,7 +419,7 @@
 	}
 
-	#if !defined(__CFA_NO_STATISTICS__)
+	#if ! defined(__CFA_NO_STATISTICS__)
 		/* paranoid */ verify( thrd_src->last_proc != 0p );
-		if(thrd_src->last_proc != kernelTLS().this_processor) {
+		if (thrd_src->last_proc != kernelTLS().this_processor) {
 			__tls_stats()->ready.threads.migration++;
 		}
@@ -440,7 +441,7 @@
 	/* paranoid */ verify( thrd->curr_cluster );
 	/* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
-	/* paranoid */ 	if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
+	/* paranoid */ 	if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
 					"Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
-	/* paranoid */ 	if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
+	/* paranoid */ 	if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
 					"Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
 	/* paranoid */ #endif
@@ -463,8 +464,8 @@
 	__wake_one( cl );
 
-	#if !defined(__CFA_NO_STATISTICS__)
-		if( kernelTLS().this_stats ) {
+	#if ! defined(__CFA_NO_STATISTICS__)
+		if ( kernelTLS().this_stats ) {
 			__tls_stats()->ready.threads.threads++;
-			if(outside) {
+			if (outside) {
 				__tls_stats()->ready.threads.extunpark++;
 			}
@@ -542,7 +543,7 @@
 	/* paranoid */ verify( ready_schedule_islocked());
 
-	if( !thrd ) return;
-
-	if(__must_unpark(thrd)) {
+	if ( ! thrd ) return;
+
+	if (__must_unpark(thrd)) {
 		// Wake lost the race,
 		__schedule_thread( thrd, hint );
@@ -554,7 +555,7 @@
 
 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
-	if( !thrd ) return;
-
-	if(__must_unpark(thrd)) {
+	if ( ! thrd ) return;
+
+	if (__must_unpark(thrd)) {
 		disable_interrupts();
 			// Wake lost the race,
@@ -592,7 +593,7 @@
 		/* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
 
-		if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
-		if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
-		if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
+		if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
+		if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
+		if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
 
 		thrd->state = Halting;
@@ -618,5 +619,5 @@
 		// If that is the case, abandon the preemption.
 		bool preempted = false;
-		if(thrd->rdy_link.next == 0p) {
+		if (thrd->rdy_link.next == 0p) {
 			preempted = true;
 			thrd->preempted = reason;
@@ -641,8 +642,8 @@
 
 	// If no one is sleeping: we are done
-	if( fdp == 0p ) return;
+	if ( fdp == 0p ) return;
 
 	int fd = 1;
-	if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
+	if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
 		fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
 	}
@@ -652,6 +653,6 @@
 	case 0:
 		// If the processor isn't ready to sleep then the exchange will already wake it up
-		#if !defined(__CFA_NO_STATISTICS__)
-			if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
+		#if ! defined(__CFA_NO_STATISTICS__)
+			if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
 			} else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
 		#endif
@@ -659,6 +660,6 @@
 	case 1:
 		// If someone else already said they will wake them: we are done
-		#if !defined(__CFA_NO_STATISTICS__)
-			if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
+		#if ! defined(__CFA_NO_STATISTICS__)
+			if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
 			} else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
 		#endif
@@ -670,6 +671,6 @@
 		/* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
 
-		#if !defined(__CFA_NO_STATISTICS__)
-			if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
+		#if ! defined(__CFA_NO_STATISTICS__)
+			if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
 			} else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
 		#endif
@@ -710,8 +711,8 @@
 
 		// Someone already told us to wake-up! No time for a nap.
-		if(expected == 1) { return; }
+		if (expected == 1) { return; }
 
 		// Try to mark that we are going to sleep
-		if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
+		if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
 			// Every one agreed, taking a nap
 			break;
@@ -720,6 +721,6 @@
 
 
-	#if !defined(__CFA_NO_STATISTICS__)
-		if(this->print_halts) {
+	#if ! defined(__CFA_NO_STATISTICS__)
+		if (this->print_halts) {
 			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
 		}
@@ -731,5 +732,5 @@
 		eventfd_t val;
 		ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
-		if(ret < 0) {
+		if (ret < 0) {
 			switch((int)errno) {
 			case EAGAIN:
@@ -746,6 +747,6 @@
 	}
 
-	#if !defined(__CFA_NO_STATISTICS__)
-		if(this->print_halts) {
+	#if ! defined(__CFA_NO_STATISTICS__)
+		if (this->print_halts) {
 			__cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
 		}
@@ -759,5 +760,5 @@
 
 	/* paranoid */ verify( ! __preemption_enabled() );
-	if(!try_lock( this )) return false;
+	if ( ! try_lock( this )) return false;
 		this.idle++;
 		/* paranoid */ verify( this.idle <= this.total );
@@ -784,5 +785,5 @@
 			// update the pointer to the head wait context
 			struct __fd_waitctx * wctx = 0;
-			if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
+			if ( ! isEmpty( this.idles )) wctx = &first( this. idles ).idle_wctx;
 			__atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
 		}
@@ -798,5 +799,5 @@
 	thread$ * thrd = __cfaabi_tls.this_thread;
 
-	if(thrd) {
+	if (thrd) {
 		int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
 		__cfaabi_bits_write( STDERR_FILENO, abort_text, len );
@@ -847,5 +848,5 @@
 //-----------------------------------------------------------------------------
 // Statistics
-#if !defined(__CFA_NO_STATISTICS__)
+#if ! defined(__CFA_NO_STATISTICS__)
 	void print_halts( processor & this ) libcfa_public {
 		this.print_halts = true;
@@ -855,5 +856,5 @@
 		/* paranoid */ verify( cltr->stats );
 
-		processor * it = &list`first;
+		processor * it = &first( list );
 		for(unsigned i = 0; i < count; i++) {
 			/* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
@@ -861,5 +862,5 @@
 			// __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
 			__tally_stats( cltr->stats, it->local_data->this_stats );
-			it = &(*it)`next;
+			it = &next( *it );
 		}
 	}
Index: libcfa/src/concurrency/kernel/cluster.cfa
===================================================================
--- libcfa/src/concurrency/kernel/cluster.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/kernel/cluster.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -234,5 +234,5 @@
 
 static void assign_list(unsigned & valrq, unsigned & valio, dlist(struct processor) & list, unsigned count) {
-	struct processor * it = &list`first;
+	struct processor * it = &first( list );
 	for(unsigned i = 0; i < count; i++) {
 		/* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
@@ -245,5 +245,5 @@
 			valio += __shard_factor.io;
 		#endif
-		it = &(*it)`next;
+		it = &next( *it );
 	}
 }
@@ -258,10 +258,10 @@
 #if defined(CFA_HAVE_LINUX_IO_URING_H)
 	static void assign_io(io_context$ ** data, size_t count, dlist(struct processor) & list) {
-		struct processor * it = &list`first;
+		struct processor * it = &first( list );
 		while(it) {
 			/* paranoid */ verifyf( it, "Unexpected null iterator\n");
 			/* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count);
 			data[it->io.ctx->cq.id] = it->io.ctx;
-			it = &(*it)`next;
+			it = &next( *it );
 		}
 	}
Index: libcfa/src/concurrency/kernel/private.hfa
===================================================================
--- libcfa/src/concurrency/kernel/private.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/kernel/private.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Mon Feb 13 12:27:26 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Mar  2 16:04:46 2023
-// Update Count     : 11
+// Last Modified On : Mon Apr 21 18:08:48 2025
+// Update Count     : 12
 //
 
@@ -287,6 +287,5 @@
 static inline [unsigned, uint_fast32_t] ready_mutate_register() {
 	unsigned id = register_proc_id();
-	uint_fast32_t last = ready_mutate_lock();
-	return [id, last];
+	return [id, ready_mutate_lock()];
 }
 
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -69,6 +69,6 @@
 //-----------------------------------------------------------------------------
 // Start and stop routine for the kernel, declared first to make sure they run first
-static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
-static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
+static void __kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
+static void __kernel_shutdown(void) __attribute__(( destructor( STARTUP_PRIORITY_KERNEL ) ));
 
 //-----------------------------------------------------------------------------
@@ -78,5 +78,5 @@
 static void * __invoke_processor(void * arg);
 static void __kernel_first_resume( processor * this );
-static void __kernel_last_resume ( processor * this );
+static void __kernel_last_resume( processor * this );
 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT);
 static void deinit(processor & this);
@@ -99,5 +99,5 @@
 extern void __kernel_alarm_shutdown(void);
 extern void __cfa_io_start( processor * );
-extern void __cfa_io_stop ( processor * );
+extern void __cfa_io_stop( processor * );
 
 //-----------------------------------------------------------------------------
@@ -110,15 +110,15 @@
 //-----------------------------------------------------------------------------
 // Kernel storage
-KERNEL_STORAGE(cluster,	             mainCluster);
-KERNEL_STORAGE(processor,            mainProcessor);
-KERNEL_STORAGE(thread$,	             mainThread);
-KERNEL_STORAGE(__stack_t,            mainThreadCtx);
+KERNEL_STORAGE(cluster, mainCluster);
+KERNEL_STORAGE(processor, mainProcessor);
+KERNEL_STORAGE(thread$, mainThread);
+KERNEL_STORAGE(__stack_t, mainThreadCtx);
 #if !defined(__CFA_NO_STATISTICS__)
 KERNEL_STORAGE(__stats_t, mainProcStats);
 #endif
 
-cluster              * mainCluster libcfa_public;
-processor            * mainProcessor;
-thread$              * mainThread;
+cluster * mainCluster libcfa_public;
+processor * mainProcessor;
+thread$ * mainThread;
 
 extern "C" {
@@ -150,8 +150,8 @@
 // Struct to steal stack
 struct current_stack_info_t {
-	__stack_t * storage;  // pointer to stack object
-	void * base;          // base of stack
-	void * limit;         // stack grows towards stack limit
-	void * context;       // address of cfa_context_t
+	__stack_t * storage;								// pointer to stack object
+	void * base;										// base of stack
+	void * limit;										// stack grows towards stack limit
+	void * context;										// address of cfa_context_t
 };
 
@@ -234,5 +234,5 @@
 	//initialize the global state variables
 	__cfaabi_tls.this_processor = mainProcessor;
-	__cfaabi_tls.this_thread    = mainThread;
+	__cfaabi_tls.this_thread = mainThread;
 
 	#if !defined( __CFA_NO_STATISTICS__ )
@@ -355,5 +355,5 @@
 	processor * proc = (processor *) arg;
 	__cfaabi_tls.this_processor = proc;
-	__cfaabi_tls.this_thread    = 0p;
+	__cfaabi_tls.this_thread = 0p;
 	__cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1];
 	proc->local_data = &__cfaabi_tls;
@@ -477,6 +477,6 @@
 	stack.storage = info->storage;
 	with(*stack.storage) {
-		limit     = info->limit;
-		base      = info->base;
+		limit = info->limit;
+		base = info->base;
 	}
 	__attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
@@ -485,9 +485,9 @@
 	state = Start;
 	starter = 0p;
-	last = 0p;
+	this.last = 0p;
 	cancellation = 0p;
-    ehm_state.ehm_buffer{};
-    ehm_state.buffer_lock{};
-    ehm_state.ehm_enabled = false;
+	ehm_state.ehm_buffer{};
+	ehm_state.buffer_lock{};
+	ehm_state.ehm_enabled = false;
 }
 
@@ -502,5 +502,5 @@
 	self_mon_p = &self_mon;
 	rdy_link.next = 0p;
-	rdy_link.ts   = MAX;
+	rdy_link.ts = MAX;
 	user_link.next = 0p;
 	user_link.prev = 0p;
@@ -509,5 +509,5 @@
 	preferred = ready_queue_new_preferred();
 	last_proc = 0p;
-	PRNG_SET_SEED( random_state,  __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
+	PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
 	#if defined( __CFA_WITH_VERIFY__ )
 		executing = 0p;
@@ -531,8 +531,8 @@
 	this.name = name;
 	this.cltr = &_cltr;
-    __atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED );
+	__atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED );
 	this.rdq.its = 0;
 	this.rdq.itr = 0;
-	this.rdq.id  = 0;
+	this.rdq.id = 0;
 	this.rdq.target = MAX;
 	this.rdq.last = MAX;
@@ -545,5 +545,5 @@
 	this.io.ctx = 0p;
 	this.io.pending = false;
-	this.io.dirty   = false;
+	this.io.dirty = false;
 
 	this.init.thrd = initT;
@@ -599,5 +599,5 @@
 	__cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
 
-    __atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED );
+	__atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED );
 
 	__atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
@@ -619,7 +619,7 @@
 // Cluster
 static void ?{}(__cluster_proc_list & this) {
-	this.fdw   = 0p;
-	this.idle  = 0;
-    this.constructed = 0;
+	this.fdw = 0p;
+	this.idle = 0;
+	this.constructed = 0;
 	this.total = 0;
 }
@@ -706,12 +706,12 @@
 //-----------------------------------------------------------------------------
 // Global Queues
-static void doregister( cluster     & cltr ) {
-	lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
+static void doregister( cluster & cltr ) {
+	lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
 	push_front( __cfa_dbg_global_clusters.list, cltr );
-	unlock    ( __cfa_dbg_global_clusters.lock );
-}
-
-static void unregister( cluster     & cltr ) {
-	lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
+	unlock( __cfa_dbg_global_clusters.lock );
+}
+
+static void unregister( cluster & cltr ) {
+	lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
 	remove( __cfa_dbg_global_clusters.list, cltr );
 	unlock( __cfa_dbg_global_clusters.lock );
@@ -719,12 +719,12 @@
 
 void doregister( cluster * cltr, thread$ & thrd ) {
-	lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
+	lock(cltr->thread_list_lock __cfaabi_dbg_ctx2);
 	cltr->nthreads += 1;
 	insert_first(cltr->threads, thrd);
-	unlock    (cltr->thread_list_lock);
+	unlock(cltr->thread_list_lock);
 }
 
 void unregister( cluster * cltr, thread$ & thrd ) {
-	lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
+	lock(cltr->thread_list_lock __cfaabi_dbg_ctx2);
 	{
 		tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) = void;
Index: libcfa/src/concurrency/locks.cfa
===================================================================
--- libcfa/src/concurrency/locks.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/locks.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -79,10 +79,10 @@
 	// lock is held by some other thread
 	if ( owner != 0p && owner != thrd ) {
-        select_node node;
+		select_node node;
 		insert_last( blocked_threads, node );
 		wait_count++;
 		unlock( lock );
 		park( );
-        return;
+		return;
 	} else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
 		recursion_count++;
@@ -91,5 +91,5 @@
 		recursion_count = 1;
 	}
-    unlock( lock );
+	unlock( lock );
 }
 
@@ -115,16 +115,16 @@
 
 static inline void pop_node( blocking_lock & this ) with( this ) {
-    __handle_waituntil_OR( blocked_threads );
-    select_node * node = &try_pop_front( blocked_threads );
-    if ( node ) {
-        wait_count--;
-        owner = node->blocked_thread;
-        recursion_count = 1;
-        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
-        wake_one( blocked_threads, *node );
-    } else {
-        owner = 0p;
-        recursion_count = 0;
-    }
+	__handle_waituntil_OR( blocked_threads );
+	select_node * node = &remove_first( blocked_threads );
+	if ( node ) {
+		wait_count--;
+		owner = node->blocked_thread;
+		recursion_count = 1;
+		// if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
+		wake_one( blocked_threads, *node );
+	} else {
+		owner = 0p;
+		recursion_count = 0;
+	}
 }
 
@@ -160,5 +160,5 @@
 		unpark( t );
 	}
-    unlock( lock );
+	unlock( lock );
 }
 
@@ -172,9 +172,9 @@
 	pop_node( this );
 
-    select_node node;
-    active_thread()->link_node = (void *)&node;
-	unlock( lock );
-
-    pre_park_then_park( pp_fn, pp_datum );
+	select_node node;
+	active_thread()->link_node = (void *)&node;
+	unlock( lock );
+
+	pre_park_then_park( pp_fn, pp_datum );
 
 	return ret;
@@ -187,5 +187,5 @@
 // waituntil() support
 bool register_select( blocking_lock & this, select_node & node ) with(this) {
-    lock( lock __cfaabi_dbg_ctx2 );
+	lock( lock __cfaabi_dbg_ctx2 );
 	thread$ * thrd = active_thread();
 
@@ -193,10 +193,10 @@
 	/* paranoid */ verifyf( owner != thrd || multi_acquisition, "Single acquisition lock holder (%p) attempted to reacquire the lock %p resulting in a deadlock.", owner, &this );
 
-    if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case
-        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
-           unlock( lock );
-           return false;
-        }
-    }
+	if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case
+		if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
+			unlock( lock );
+			return false;
+		}
+	}
 
 	// lock is held by some other thread
@@ -205,5 +205,5 @@
 		wait_count++;
 		unlock( lock );
-        return false;
+		return false;
 	} else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
 		recursion_count++;
@@ -213,28 +213,28 @@
 	}
 
-    if ( node.park_counter ) __make_select_node_available( node );
-    unlock( lock );
-    return true;
+	if ( node.park_counter ) __make_select_node_available( node );
+	unlock( lock );
+	return true;
 }
 
 bool unregister_select( blocking_lock & this, select_node & node ) with(this) {
-    lock( lock __cfaabi_dbg_ctx2 );
-    if ( node`isListed ) {
-        remove( node );
-        wait_count--;
-        unlock( lock );
-        return false;
-    }
-    
-    if ( owner == active_thread() ) {
-        /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count );
-        // if recursion count is zero release lock and set new owner if one is waiting
-        recursion_count--;
-        if ( recursion_count == 0 ) {
-            pop_node( this );
-        }
-    }
-	unlock( lock );
-    return false;
+	lock( lock __cfaabi_dbg_ctx2 );
+	if ( isListed( node ) ) {
+		remove( node );
+		wait_count--;
+		unlock( lock );
+		return false;
+	}
+	
+	if ( owner == active_thread() ) {
+		/* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count );
+		// if recursion count is zero release lock and set new owner if one is waiting
+		recursion_count--;
+		if ( recursion_count == 0 ) {
+			pop_node( this );
+		}
+	}
+	unlock( lock );
+	return false;
 }
 
@@ -265,10 +265,10 @@
 		// 	may still be called after a thread has been removed from the queue but
 		// 	before the alarm is unregistered
-		if ( (*info_thd)`isListed ) {	// is thread on queue
+		if ( isListed( *info_thd ) ) {					// is thread on queue
 			info_thd->signalled = false;
 			// remove this thread O(1)
 			remove( *info_thd );
 			cond->count--;
-			if( info_thd->lock ) {
+			if ( info_thd->lock ) {
 				// call lock's on_notify if a lock was passed
 				on_notify(*info_thd->lock, info_thd->t);
@@ -304,5 +304,5 @@
 		// 	may still be called after a thread has been removed from the queue but
 		// 	before the alarm is unregistered
-		if ( (*info_thd)`isListed ) {	// is thread on queue
+		if ( isListed( *info_thd ) ) {					// is thread on queue
 			info_thd->signalled = false;
 			// remove this thread O(1)
@@ -332,5 +332,5 @@
 
 	static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
-		if(&popped != 0p) {
+		if (&popped != 0p) {
 			popped.signalled = true;
 			count--;
@@ -347,6 +347,6 @@
 	bool notify_one( condition_variable(L) & this ) with( this ) {
 		lock( lock __cfaabi_dbg_ctx2 );
-		bool ret = ! blocked_threads`isEmpty;
-		process_popped(this, try_pop_front( blocked_threads ));
+		bool ret = ! isEmpty( blocked_threads );
+		process_popped(this, remove_first( blocked_threads ));
 		unlock( lock );
 		return ret;
@@ -355,7 +355,7 @@
 	bool notify_all( condition_variable(L) & this ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
-		bool ret = ! blocked_threads`isEmpty;
-		while( ! blocked_threads`isEmpty ) {
-			process_popped(this, try_pop_front( blocked_threads ));
+		bool ret = ! isEmpty( blocked_threads );
+		while( ! isEmpty( blocked_threads ) ) {
+			process_popped(this, remove_first( blocked_threads ));
 		}
 		unlock( lock );
@@ -364,10 +364,10 @@
 
 	uintptr_t front( condition_variable(L) & this ) with(this) {
-		return blocked_threads`isEmpty ? NULL : blocked_threads`first.info;
+		return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info;
 	}
 
 	bool empty( condition_variable(L) & this ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
-		bool ret = blocked_threads`isEmpty;
+		bool ret = isEmpty( blocked_threads );
 		unlock( lock );
 		return ret;
@@ -382,22 +382,22 @@
 	}
 
-    static size_t block_and_get_recursion( info_thread(L) & i, __cfa_pre_park pp_fn, void * pp_datum ) {
-        size_t recursion_count = 0;
+	static size_t block_and_get_recursion( info_thread(L) & i, __cfa_pre_park pp_fn, void * pp_datum ) {
+		size_t recursion_count = 0;
 		if ( i.lock ) // if lock was passed get recursion count to reset to after waking thread
 			recursion_count = on_wait( *i.lock, pp_fn, pp_datum ); // this call blocks
 		else
-            pre_park_then_park( pp_fn, pp_datum );
-        return recursion_count;
-    }
-    static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); }
+			pre_park_then_park( pp_fn, pp_datum );
+		return recursion_count;
+	}
+	static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); }
 
 	// helper for wait()'s' with no timeout
 	static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
-        enqueue_thread( this, &i );
+		enqueue_thread( this, &i );
 		unlock( lock );
 
 		// blocks here
-        size_t recursion_count = block_and_get_recursion( i );
+		size_t recursion_count = block_and_get_recursion( i );
 
 		// resets recursion count here after waking
@@ -409,15 +409,15 @@
 		queue_info_thread( this, i );
 
-    static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); }
+	static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); }
 
 	// helper for wait()'s' with a timeout
 	static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
-        enqueue_thread( this, &info );
+		enqueue_thread( this, &info );
 		alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
 		unlock( lock );
 
 		// blocks here and registers alarm node before blocking after releasing locks to avoid deadlock
-        size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
+		size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
 		// park();
 
@@ -434,17 +434,17 @@
 		return i.signalled;
 
-	void wait( condition_variable(L) & this                        ) with(this) { WAIT( 0, 0p    ) }
-	void wait( condition_variable(L) & this, uintptr_t info        ) with(this) { WAIT( info, 0p ) }
-	void wait( condition_variable(L) & this, L & l                 ) with(this) { WAIT( 0, &l    ) }
+	void wait( condition_variable(L) & this ) with(this) { WAIT( 0, 0p ) }
+	void wait( condition_variable(L) & this, uintptr_t info ) with(this) { WAIT( info, 0p ) }
+	void wait( condition_variable(L) & this, L & l  ) with(this) { WAIT( 0, &l ) }
 	void wait( condition_variable(L) & this, L & l, uintptr_t info ) with(this) { WAIT( info, &l ) }
 
-	bool wait( condition_variable(L) & this, Duration duration                        ) with(this) { WAIT_TIME( 0   , 0p , duration ) }
-	bool wait( condition_variable(L) & this, uintptr_t info, Duration duration        ) with(this) { WAIT_TIME( info, 0p , duration ) }
-	bool wait( condition_variable(L) & this, L & l, Duration duration                 ) with(this) { WAIT_TIME( 0   , &l , duration ) }
+	bool wait( condition_variable(L) & this, Duration duration ) with(this) { WAIT_TIME( 0 , 0p , duration ) }
+	bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, 0p , duration ) }
+	bool wait( condition_variable(L) & this, L & l, Duration duration  ) with(this) { WAIT_TIME( 0 , &l , duration ) }
 	bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) }
 
 	//-----------------------------------------------------------------------------
 	// fast_cond_var
-	void  ?{}( fast_cond_var(L) & this ){
+	void ?{}( fast_cond_var(L) & this ){
 		this.blocked_threads{};
 		#ifdef __CFA_DEBUG__
@@ -455,7 +455,7 @@
 
 	bool notify_one( fast_cond_var(L) & this ) with(this) {
-		bool ret = ! blocked_threads`isEmpty;
+		bool ret = ! isEmpty( blocked_threads );
 		if ( ret ) {
-			info_thread(L) & popped = try_pop_front( blocked_threads );
+			info_thread(L) & popped = remove_first( blocked_threads );
 			on_notify(*popped.lock, popped.t);
 		}
@@ -463,7 +463,7 @@
 	}
 	bool notify_all( fast_cond_var(L) & this ) with(this) {
-		bool ret = ! blocked_threads`isEmpty;
-		while( ! blocked_threads`isEmpty ) {
-			info_thread(L) & popped = try_pop_front( blocked_threads );
+		bool ret = ! isEmpty( blocked_threads );
+		while( ! isEmpty( blocked_threads ) ) {
+			info_thread(L) & popped = remove_first( blocked_threads );
 			on_notify(*popped.lock, popped.t);
 		}
@@ -471,6 +471,6 @@
 	}
 
-	uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
-	bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
+	uintptr_t front( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; }
+	bool empty ( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); }
 
 	void wait( fast_cond_var(L) & this, L & l ) {
@@ -494,5 +494,5 @@
 	// pthread_cond_var
 
-	void  ?{}( pthread_cond_var(L) & this ) with(this) {
+	void ?{}( pthread_cond_var(L) & this ) with(this) {
 		blocked_threads{};
 		lock{};
@@ -503,7 +503,7 @@
 	bool notify_one( pthread_cond_var(L) & this ) with(this) { 
 		lock( lock __cfaabi_dbg_ctx2 );
-		bool ret = ! blocked_threads`isEmpty;
+		bool ret = ! isEmpty( blocked_threads );
 		if ( ret ) {
-			info_thread(L) & popped = try_pop_front( blocked_threads );
+			info_thread(L) & popped = remove_first( blocked_threads );
 			popped.signalled = true;
 			on_notify(*popped.lock, popped.t);
@@ -515,7 +515,7 @@
 	bool notify_all( pthread_cond_var(L) & this ) with(this) { 
 		lock( lock __cfaabi_dbg_ctx2 );
-		bool ret = ! blocked_threads`isEmpty;
-		while( ! blocked_threads`isEmpty ) {
-			info_thread(L) & popped = try_pop_front( blocked_threads );
+		bool ret = ! isEmpty( blocked_threads );
+		while( ! isEmpty( blocked_threads ) ) {
+			info_thread(L) & popped = remove_first( blocked_threads );
 			popped.signalled = true;
 			on_notify(*popped.lock, popped.t);
@@ -525,15 +525,15 @@
 	}
 
-	uintptr_t front( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
-	bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
+	uintptr_t front( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; }
+	bool empty ( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); }
 
 	static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
 		lock( lock __cfaabi_dbg_ctx2 );
-        insert_last( blocked_threads, info );
+		insert_last( blocked_threads, info );
 		pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
 		unlock( lock );
 
 		// blocks here and registers alarm node before blocking after releasing locks to avoid deadlock
-        size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
+		size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
 
 		// unregisters alarm so it doesn't go off if signal happens first
@@ -551,8 +551,8 @@
 		lock( lock __cfaabi_dbg_ctx2 );
 		info_thread( L ) i = { active_thread(), info, &l };
-        insert_last( blocked_threads, i );
-		unlock( lock );
-
-        // blocks here
+		insert_last( blocked_threads, i );
+		unlock( lock );
+
+		// blocks here
 		size_t recursion_count = block_and_get_recursion( i );
 
@@ -579,5 +579,5 @@
 	}
 	
-	bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t  ) {
+	bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ) {
 		PTHREAD_WAIT_TIME( info, &l , getDuration( t ) )
 	}
@@ -585,5 +585,5 @@
 //-----------------------------------------------------------------------------
 // Semaphore
-void  ?{}( semaphore & this, int count = 1 ) {
+void ?{}( semaphore & this, int count = 1 ) {
 	(this.lock){};
 	this.count = count;
@@ -603,8 +603,7 @@
 		park();
 		return true;
-	}
-	else {
-	    unlock( lock );
-	    return false;
+	} else {
+		unlock( lock );
+		return false;
 	}
 }
@@ -622,5 +621,5 @@
 
 	// make new owner
-	if( doUnpark ) unpark( thrd );
+	if ( doUnpark ) unpark( thrd );
 
 	return thrd;
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/locks.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -11,6 +11,6 @@
 // Created On       : Thu Jan 21 19:46:50 2021
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Tue Dec 24 09:36:52 2024
-// Update Count     : 16
+// Last Modified On : Fri Apr 25 07:14:16 2025
+// Update Count     : 22
 //
 
@@ -56,6 +56,6 @@
 
 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
-    pp_fn( pp_datum );
-    park();
+	pp_fn( pp_datum );
+	park();
 }
 
@@ -63,20 +63,20 @@
 
 #define DEFAULT_ON_NOTIFY( lock_type ) \
-    static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
+	static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
 
 #define DEFAULT_ON_WAIT( lock_type ) \
-    static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
-        unlock( this ); \
-        pre_park_then_park( pp_fn, pp_datum ); \
-        return 0; \
-    }
+	static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
+		unlock( this ); \
+		pre_park_then_park( pp_fn, pp_datum ); \
+		return 0; \
+	}
 
 // on_wakeup impl if lock should be reacquired after waking up
 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
-    static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
+	static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
 
 // on_wakeup impl if lock will not be reacquired after waking up
 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
-    static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
+	static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
 
 
@@ -142,5 +142,5 @@
 static inline void ?{}( mcs_node & this ) { this.next = 0p; }
 
-static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
+static inline mcs_node * volatile & next( mcs_node * node ) {
 	return node->next;
 }
@@ -156,6 +156,6 @@
 
 static inline void unlock( mcs_lock & l, mcs_node & n ) {
-	mcs_node * next = advance( l.queue, &n );
-	if ( next ) post( next->sem );
+	mcs_node * nxt = advance( l.queue, &n );
+	if ( nxt ) post( nxt->sem );
 }
 
@@ -181,5 +181,5 @@
 
 static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
-    n.locked = true;
+	n.locked = true;
 
 	#if defined( __ARM_ARCH )
@@ -187,7 +187,7 @@
 	#endif
 
-	mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
-	if ( prev == 0p ) return;
-	prev->next = &n;
+	mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
+	if ( prev_val == 0p ) return;
+	prev_val->next = &n;
 	
 	#if defined( __ARM_ARCH )
@@ -234,5 +234,5 @@
 // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
 static inline int futex( int *uaddr, int futex_op, int val ) {
-    return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
+	return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
 }
 
@@ -271,5 +271,5 @@
 static inline void unlock( futex_mutex & this ) with( this ) {
 	// if uncontended do atomic unlock and then return
-    if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
+	if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
 	
 	// otherwise threads are blocked so we must wake one
@@ -311,34 +311,34 @@
 	int state, init_state;
 
-    // speculative grab
-    state = internal_exchange( this, 1 );
-    if ( ! state ) return;								// state == 0
-    init_state = state;
-    for () {
-        for ( 4 ) {
-            while ( ! val ) {							// lock unlocked
-                state = 0;
-                if ( internal_try_lock( this, state, init_state ) ) return;
-            }
-            for ( 30 ) Pause();
-        }
-
-        while ( ! val ) {								// lock unlocked
-            state = 0;
-            if ( internal_try_lock( this, state, init_state ) ) return;
-        }
-        sched_yield();
-        
-        // if not in contended state, set to be in contended state
-        state = internal_exchange( this, 2 );
-        if ( ! state ) return;							// state == 0
-        init_state = 2;
-        futex( (int*)&val, FUTEX_WAIT, 2 );				// if val is not 2 this returns with EWOULDBLOCK
-    }
+	// speculative grab
+	state = internal_exchange( this, 1 );
+	if ( ! state ) return;								// state == 0
+	init_state = state;
+	for () {
+		for ( 4 ) {
+			while ( ! val ) {							// lock unlocked
+				state = 0;
+				if ( internal_try_lock( this, state, init_state ) ) return;
+			}
+			for ( 30 ) Pause();
+		}
+
+		while ( ! val ) {								// lock unlocked
+			state = 0;
+			if ( internal_try_lock( this, state, init_state ) ) return;
+		}
+		sched_yield();
+		
+		// if not in contended state, set to be in contended state
+		state = internal_exchange( this, 2 );
+		if ( ! state ) return;							// state == 0
+		init_state = 2;
+		futex( (int*)&val, FUTEX_WAIT, 2 );				// if val is not 2 this returns with EWOULDBLOCK
+	}
 }
 
 static inline void unlock( go_mutex & this ) with( this ) {
 	// if uncontended do atomic unlock and then return
-    if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
+	if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
 	
 	// otherwise threads are blocked so we must wake one
@@ -384,11 +384,11 @@
 
 static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
-    lock( spinlock __cfaabi_dbg_ctx2 );
-    if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
-        unlock( spinlock );
-        return true;
-    }
-    insert_last( blocked_threads, *active_thread() );
-    unlock( spinlock );
+	lock( spinlock __cfaabi_dbg_ctx2 );
+	if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
+		unlock( spinlock );
+		return true;
+	}
+	insert_last( blocked_threads, *active_thread() );
+	unlock( spinlock );
 	park( );
 	return true;
@@ -415,9 +415,9 @@
 
 static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
-    if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
-    lock( spinlock __cfaabi_dbg_ctx2 );
-    thread$ * t = &try_pop_front( blocked_threads );
-    unlock( spinlock );
-    unpark( t );
+	if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
+	lock( spinlock __cfaabi_dbg_ctx2 );
+	thread$ * t = &remove_first( blocked_threads );
+	unlock( spinlock );
+	unpark( t );
 }
 
@@ -469,5 +469,5 @@
 	lock( lock __cfaabi_dbg_ctx2 );
 	/* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
-	thread$ * t = &try_pop_front( blocked_threads );
+	thread$ * t = &remove_first( blocked_threads );
 	held = ( t ? true : false );
 	unpark( t );
@@ -476,7 +476,7 @@
 
 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
-    lock( lock __cfaabi_dbg_ctx2 );
-    insert_last( blocked_threads, *t );
-    unlock( lock );
+	lock( lock __cfaabi_dbg_ctx2 );
+	insert_last( blocked_threads, *t );
+	unlock( lock );
 }
 DEFAULT_ON_WAIT( fast_block_lock )
@@ -521,5 +521,5 @@
 
 	if ( owner != 0p ) {
-        select_node node;
+		select_node node;
 		insert_last( blocked_threads, node );
 		unlock( lock );
@@ -533,15 +533,15 @@
 
 static inline void pop_node( simple_owner_lock & this ) with( this ) {
-    __handle_waituntil_OR( blocked_threads );
-    select_node * node = &try_pop_front( blocked_threads );
-    if ( node ) {
-        owner = node->blocked_thread;
-        recursion_count = 1;
-        // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
-        wake_one( blocked_threads, *node );
-    } else {
-        owner = 0p;
-        recursion_count = 0;
-    }
+	__handle_waituntil_OR( blocked_threads );
+	select_node * node = &remove_first( blocked_threads );
+	if ( node ) {
+		owner = node->blocked_thread;
+		recursion_count = 1;
+		// if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
+		wake_one( blocked_threads, *node );
+	} else {
+		owner = 0p;
+		recursion_count = 0;
+	}
 }
 
@@ -582,9 +582,9 @@
 	pop_node( this );
 
-    select_node node;
-    active_thread()->link_node = (void *)&node;
-	unlock( lock );
-
-    pre_park_then_park( pp_fn, pp_datum );
+	select_node node;
+	active_thread()->link_node = (void *)&node;
+	unlock( lock );
+
+	pre_park_then_park( pp_fn, pp_datum );
 
 	return ret;
@@ -595,51 +595,51 @@
 // waituntil() support
 static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
-    lock( lock __cfaabi_dbg_ctx2 );
-
-    // check if we can complete operation. If so race to establish winner in special OR case
-    if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
-        if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
-           unlock( lock );
-           return false;
-        }
-    }
-
-    if ( owner == active_thread() ) {
+	lock( lock __cfaabi_dbg_ctx2 );
+
+	// check if we can complete operation. If so race to establish winner in special OR case
+	if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
+		if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
+			unlock( lock );
+			return false;
+		}
+	}
+
+	if ( owner == active_thread() ) {
 		recursion_count++;
-        if ( node.park_counter ) __make_select_node_available( node );
-        unlock( lock );
+		if ( node.park_counter ) __make_select_node_available( node );
+		unlock( lock );
 		return true;
 	}
 
-    if ( owner != 0p ) {
+	if ( owner != 0p ) {
 		insert_last( blocked_threads, node );
 		unlock( lock );
 		return false;
 	}
-    
+	
 	owner = active_thread();
 	recursion_count = 1;
 
-    if ( node.park_counter ) __make_select_node_available( node );
-    unlock( lock );
-    return true;
+	if ( node.park_counter ) __make_select_node_available( node );
+	unlock( lock );
+	return true;
 }
 
 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
-    lock( lock __cfaabi_dbg_ctx2 );
-    if ( node`isListed ) {
-        remove( node );
-        unlock( lock );
-        return false;
-    }
-
-    if ( owner == active_thread() ) {
-        recursion_count--;
-        if ( recursion_count == 0 ) {
-            pop_node( this );
-        }
-    }
-    unlock( lock );
-    return false;
+	lock( lock __cfaabi_dbg_ctx2 );
+	if ( isListed( node ) ) {
+		remove( node );
+		unlock( lock );
+		return false;
+	}
+
+	if ( owner == active_thread() ) {
+		recursion_count--;
+		if ( recursion_count == 0 ) {
+			pop_node( this );
+		}
+	}
+	unlock( lock );
+	return false;
 }
 
Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/monitor.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -9,7 +9,7 @@
 // Author           : Thierry Delisle
 // Created On       : Thd Feb 23 12:27:26 2017
-// Last Modified By : Kyoung Seo
-// Last Modified On : Thd Jan 16 12:59:00 2025
-// Update Count     : 73
+// Last Modified By : Peter A. Buhr
+// Last Modified On : Fri Apr 25 07:20:22 2025
+// Update Count     : 80
 //
 
@@ -78,5 +78,5 @@
 	__spinlock_t * locks[count];							/* We need to pass-in an array of locks to BlockInternal */
 
-#define monitor_save    save   ( monitors, count, locks, recursions, masks )
+#define monitor_save save ( monitors, count, locks, recursions, masks )
 #define monitor_restore restore( monitors, count, locks, recursions, masks )
 
@@ -95,14 +95,14 @@
 	if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
 		abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
-	} else if ( !this->owner ) {
+	} else if ( ! this->owner ) {
 		// No one has the monitor, just take it
 		__set_owner( this, thrd );
 
-		__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon is free \n" );
 	} else if ( this->owner == thrd) {
 		// We already have the monitor, just note how many times we took it
 		this->recursion += 1;
 
-		__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon already owned \n" );
 	} else if ( is_accepted( this, group) ) {
 		// Some one was waiting for us, enter
@@ -112,7 +112,7 @@
 		reset_mask( this );
 
-		__cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon accepts \n" );
 	} else {
-		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
+		__cfaabi_dbg_print_safe( "Kernel : blocking \n" );
 
 		// Some one else has the monitor, wait in line for it
@@ -124,5 +124,5 @@
 		park();
 
-		__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
+		__cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
 
 		/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
@@ -130,5 +130,5 @@
 	}
 
-	__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
+	__cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
 
 	/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
@@ -152,5 +152,5 @@
 
 
-	if ( !this->owner ) {
+	if ( ! this->owner ) {
 		__cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
 
@@ -159,9 +159,9 @@
 
 		/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
-		/* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
+		/* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );
 
 		unlock( this->lock );
 		return;
-	} else if ( this->owner == thrd && !join) {
+	} else if ( this->owner == thrd && ! join) {
 		// We already have the monitor... but where about to destroy it so the nesting will fail
 		// Abort!
@@ -179,5 +179,5 @@
 
 		/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
-		/* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
+		/* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );
 
 		unlock( this->lock );
@@ -186,5 +186,5 @@
 
 	// The monitor is busy, if this is a thread and the thread owns itself, it better be active
-	/* paranoid */ verify( !is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );
+	/* paranoid */ verify( ! is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );
 
 	__lock_size_t count = 1;
@@ -192,5 +192,5 @@
 	__monitor_group_t group = { &this, 1, func };
 	if ( is_accepted( this, group) ) {
-		__cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" );
 
 		// Wake the thread that is waiting for this
@@ -220,5 +220,5 @@
 		return;
 	} else {
-		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
+		__cfaabi_dbg_print_safe( "Kernel : blocking \n" );
 
 		wait_ctx( thrd, 0 )
@@ -254,5 +254,5 @@
 	// it means we don't need to do anything
 	if ( this->recursion != 0) {
-		__cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
+		__cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion);
 		unlock( this->lock );
 		return;
@@ -264,5 +264,5 @@
 	// Check the new owner is consistent with who we wake-up
 	// new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
-	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+	/* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
 
 	// We can now let other threads in safely
@@ -270,5 +270,5 @@
 
 	//We need to wake-up the thread
-	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+	/* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
 	unpark( new_owner );
 }
@@ -280,5 +280,5 @@
 			abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner);
 		}
-		if ( this->recursion != 1  && !join ) {
+		if ( this->recursion != 1 && ! join ) {
 			abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
 		}
@@ -317,5 +317,5 @@
 
 	// Unpark the next owner if needed
-	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+	/* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
 	/* paranoid */ verify( ! __preemption_enabled() );
 	/* paranoid */ verify( thrd->state == Halted );
@@ -424,15 +424,15 @@
 
 static void ?{}(__condition_criterion_t & this ) with( this ) {
-	ready  = false;
+	ready = false;
 	target = 0p;
-	owner  = 0p;
-	next   = 0p;
+	owner = 0p;
+	this.next = 0p;
 }
 
 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
-	this.ready  = false;
+	this.ready = false;
 	this.target = target;
-	this.owner  = &owner;
-	this.next   = 0p;
+	this.owner = &owner;
+	this.next = 0p;
 }
 
@@ -525,5 +525,5 @@
 	for ( i; count ) {
 		__condition_criterion_t * crit = &node->criteria[i];
-		assert( !crit->ready );
+		assert( ! crit->ready );
 		push( crit->target->signal_stack, crit );
 	}
@@ -536,5 +536,5 @@
 
 bool signal_block( condition & this ) libcfa_public {
-	if ( !this.blocked.head ) { return false; }
+	if ( ! this.blocked.head ) { return false; }
 
 	//Check that everything is as expected
@@ -571,5 +571,5 @@
 	// WE WOKE UP
 
-	__cfaabi_dbg_print_buffer_local( "Kernel :   signal_block returned\n" );
+	__cfaabi_dbg_print_buffer_local( "Kernel : signal_block returned\n" );
 
 	//We are back, restore the masks and recursions
@@ -581,5 +581,5 @@
 // Access the user_info of the thread waiting at the front of the queue
 uintptr_t front( condition & this ) libcfa_public {
-	verifyf( !is_empty(this),
+	verifyf( ! is_empty(this),
 		"Attempt to access user data on an empty condition.\n"
 		"Possible cause is not checking if the condition is empty before reading stored data."
@@ -624,13 +624,13 @@
 	{
 		// Check if the entry queue
-		thread$ * next; int index;
-		[next, index] = search_entry_queue( mask, monitors, count );
-
-		if ( next ) {
+		thread$ * nxt; int index;
+		[nxt, index] = search_entry_queue( mask, monitors, count );
+
+		if ( nxt ) {
 			*mask.accepted = index;
 			__acceptable_t& accepted = mask[index];
 			if ( accepted.is_dtor ) {
 				__cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n" );
-				verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
+				verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." );
 
 				monitor$ * mon2dtor = accepted[0];
@@ -651,5 +651,5 @@
 				monitor_save;
 
-				__cfaabi_dbg_print_buffer_local( "Kernel :  baton of %"PRIdFAST16" monitors : ", count );
+				__cfaabi_dbg_print_buffer_local( "Kernel : baton of %"PRIdFAST16" monitors : ", count );
 				#ifdef __CFA_DEBUG_PRINT__
 					for ( i; count ) {
@@ -660,5 +660,5 @@
 
 				// Set the owners to be the next thread
-				__set_owner( monitors, count, next );
+				__set_owner( monitors, count, nxt );
 
 				// unlock all the monitors
@@ -666,5 +666,5 @@
 
 				// unpark the thread we signalled
-				unpark( next );
+				unpark( nxt );
 
 				//Everything is ready to go to sleep
@@ -741,11 +741,11 @@
 	/* paranoid */ verify ( monitors[0]->lock.lock );
 	/* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
-	monitors[0]->owner        = owner;
-	monitors[0]->recursion    = 1;
+	monitors[0]->owner = owner;
+	monitors[0]->recursion = 1;
 	for ( i; 1~count ) {
 		/* paranoid */ verify ( monitors[i]->lock.lock );
 		/* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] );
-		monitors[i]->owner        = owner;
-		monitors[i]->recursion    = 0;
+		monitors[i]->owner = owner;
+		monitors[i]->recursion = 0;
 	}
 }
@@ -765,5 +765,5 @@
 static inline thread$ * next_thread( monitor$ * this ) {
 	//Check the signaller stack
-	__cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
+	__cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top);
 	__condition_criterion_t * urgent = pop( this->signal_stack );
 	if ( urgent ) {
@@ -771,6 +771,6 @@
 		//regardless of if we are ready to baton pass,
 		//we need to set the monitor as in use
-		/* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
-		__set_owner( this,  urgent->owner->waiting_thread );
+		/* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
+		__set_owner( this, urgent->owner->waiting_thread );
 
 		return check_condition( urgent );
@@ -780,6 +780,6 @@
 	// Get the next thread in the entry_queue
 	thread$ * new_owner = pop_head( this->entry_queue );
-	/* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
-	/* paranoid */ verify( !new_owner || new_owner->user_link.next == 0p );
+	/* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
+	/* paranoid */ verify( ! new_owner || new_owner->user_link.next == 0p );
 	__set_owner( this, new_owner );
 
@@ -792,5 +792,5 @@
 
 	// Check if there are any acceptable functions
-	if ( !it ) return false;
+	if ( ! it ) return false;
 
 	// If this isn't the first monitor to test this, there is no reason to repeat the test.
@@ -820,5 +820,5 @@
 	for ( i; count ) {
 		(criteria[i]){ monitors[i], waiter };
-		__cfaabi_dbg_print_safe( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
+		__cfaabi_dbg_print_safe( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] );
 		push( criteria[i].target->signal_stack, &criteria[i] );
 	}
@@ -902,5 +902,5 @@
 	}
 
-	__cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
+	__cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
 	return ready2run ? node->waiting_thread : 0p;
 }
@@ -908,5 +908,5 @@
 static inline void brand_condition( condition & this ) {
 	thread$ * thrd = active_thread();
-	if ( !this.monitors ) {
+	if ( ! this.monitors ) {
 		// __cfaabi_dbg_print_safe( "Branding\n" );
 		assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data );
@@ -928,5 +928,5 @@
 	for ( __acceptable_t * it = begin; it != end; it++, i++ ) {
 		#if defined( __CFA_WITH_VERIFY__ )
-		thread$ * last = 0p;
+		thread$ * prior = 0p;
 		#endif // __CFA_WITH_VERIFY__
 
@@ -934,7 +934,7 @@
 			thread$ * curr = *thrd_it;
 
-			/* paranoid */ verifyf( !last || last->user_link.next == curr, "search not making progress, from %p (%p) to %p",
-									last, last->user_link.next, curr );
-			/* paranoid */ verifyf( curr != last, "search not making progress, from %p to %p", last, curr );
+			/* paranoid */ verifyf( ! prior || prior->user_link.next == curr, "search not making progress, from %p (%p) to %p",
+									prior, prior->user_link.next, curr );
+			/* paranoid */ verifyf( curr != prior, "search not making progress, from %p to %p", prior, curr );
 
 			// For each thread in the entry-queue check for a match
@@ -945,5 +945,5 @@
 
 			#if defined( __CFA_WITH_VERIFY__ )
-			last = curr;
+			prior = curr;
 			#endif
 		} // for
@@ -1001,16 +1001,16 @@
 	if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
 		abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
-	} else if ( !this->owner ) {
+	} else if ( ! this->owner ) {
 		// No one has the monitor, just take it
 		__set_owner( this, thrd );
 
-		__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon is free \n" );
 	} else if ( this->owner == thrd) {
 		// We already have the monitor, just note how many times we took it
 		this->recursion += 1;
 
-		__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
+		__cfaabi_dbg_print_safe( "Kernel : mon already owned \n" );
 	} else {
-		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
+		__cfaabi_dbg_print_safe( "Kernel : blocking \n" );
 
 		// Some one else has the monitor, wait in line for it
@@ -1022,5 +1022,5 @@
 		park();
 
-		__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
+		__cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
 
 		/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
@@ -1028,5 +1028,5 @@
 	}
 
-	__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
+	__cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
 
 	/* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/preemption.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,6 +10,6 @@
 // Created On       : Mon Jun 5 14:20:42 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Jan  9 08:42:59 2023
-// Update Count     : 60
+// Last Modified On : Fri Apr 25 07:24:39 2025
+// Update Count     : 63
 //
 
@@ -39,5 +39,5 @@
 __attribute__((weak)) Duration default_preemption() libcfa_public {
 	const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
-	if(!preempt_rate_s) {
+	if ( !preempt_rate_s) {
 		__cfadbg_print_safe(preemption, "No CFA_DEFAULT_PREEMPTION in ENV\n");
 		return __CFA_DEFAULT_PREEMPTION__;
@@ -46,9 +46,9 @@
 	char * endptr = 0p;
 	long int preempt_rate_l = strtol(preempt_rate_s, &endptr, 10);
-	if(preempt_rate_l < 0 || preempt_rate_l > 65535) {
+	if (preempt_rate_l < 0 || preempt_rate_l > 65535) {
 		__cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION out of range : %ld\n", preempt_rate_l);
 		return __CFA_DEFAULT_PREEMPTION__;
 	}
-	if('\0' != *endptr) {
+	if ('\0' != *endptr) {
 		__cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION not a decimal number : %s\n", preempt_rate_s);
 		return __CFA_DEFAULT_PREEMPTION__;
@@ -64,9 +64,9 @@
 // FwdDeclarations : Signal handlers
 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
-static void sigHandler_alarm    ( __CFA_SIGPARMS__ );
-static void sigHandler_segv     ( __CFA_SIGPARMS__ );
-static void sigHandler_ill      ( __CFA_SIGPARMS__ );
-static void sigHandler_fpe      ( __CFA_SIGPARMS__ );
-static void sigHandler_abort    ( __CFA_SIGPARMS__ );
+static void sigHandler_alarm( __CFA_SIGPARMS__ );
+static void sigHandler_segv( __CFA_SIGPARMS__ );
+static void sigHandler_ill( __CFA_SIGPARMS__ );
+static void sigHandler_fpe( __CFA_SIGPARMS__ );
+static void sigHandler_abort( __CFA_SIGPARMS__ );
 
 // FwdDeclarations : alarm thread main
@@ -86,8 +86,8 @@
 #endif
 
-KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
-event_kernel_t * event_kernel;                        // kernel public handle to even kernel
-static pthread_t alarm_thread;                        // pthread handle to alarm thread
-static void * alarm_stack;							  // pthread stack for alarm thread
+KERNEL_STORAGE(event_kernel_t, event_kernel);			// private storage for event kernel
+event_kernel_t * event_kernel;							// kernel public handle to even kernel
+static pthread_t alarm_thread;							// pthread handle to alarm thread
+static void * alarm_stack;								// pthread stack for alarm thread
 
 static void ?{}(event_kernel_t & this) with( this ) {
@@ -102,7 +102,7 @@
 // Get next expired node
 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
-	if( ! & (*alarms)`first ) return 0p;						// If no alarms return null
-	if( (*alarms)`first.deadline >= currtime ) return 0p;	// If alarms head not expired return null
-	return pop(alarms);									// Otherwise just pop head
+	if ( ! & first( *alarms ) ) return 0p;				  // If no alarms return null
+	if ( first( *alarms ).deadline >= currtime ) return 0p; // If alarms head not expired return null
+	return pop(alarms);									  // Otherwise just pop head
 }
 
@@ -117,6 +117,6 @@
 		__cfadbg_print_buffer_decl( preemption, " KERNEL: preemption tick %lu\n", currtime.tn);
 		Duration period = node->period;
-		if( period == 0 ) {
-			node->set = false;                  // Node is one-shot, just mark it as not pending
+		if ( period == 0 ) {
+			node->set = false;				  // Node is one-shot, just mark it as not pending
 		}
 
@@ -125,8 +125,8 @@
 
 		// Check if this is a kernel
-		if( node->type == Kernel ) {
+		if ( node->type == Kernel ) {
 			preempt( node->proc );
 		}
-		else if( node->type == User ) {
+		else if ( node->type == User ) {
 			__cfadbg_print_buffer_local( preemption, " KERNEL: alarm unparking %p.\n", node->thrd );
 			timeout( node->thrd );
@@ -137,14 +137,14 @@
 
 		// Check if this is a periodic alarm
-		if( period > 0 ) {
+		if ( period > 0 ) {
 			__cfadbg_print_buffer_local( preemption, " KERNEL: alarm period is %lu.\n", period`ns );
 			node->deadline = currtime + period;  // Alarm is periodic, add currtime to it (used cached current time)
-			insert( alarms, node );             // Reinsert the node for the next time it triggers
+			insert( alarms, node );			 // Reinsert the node for the next time it triggers
 		}
 	}
 
 	// If there are still alarms pending, reset the timer
-	if( & (*alarms)`first ) {
-		Duration delta = (*alarms)`first.deadline - currtime;
+	if ( & first( *alarms ) ) {
+		Duration delta = first( *alarms ).deadline - currtime;
 		__kernel_set_timer( delta );
 	}
@@ -283,5 +283,5 @@
 		__attribute__((unused)) unsigned short new_val = disable_count + 1;
 		disable_count = new_val;
-		verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
+		verify( new_val < 65_000u );			  // If this triggers someone is disabling interrupts without enabling them
 	}
 
@@ -301,5 +301,5 @@
 
 			// Check if we need to prempt the thread because an interrupt was missed
-			if( prev == 1 ) {
+			if ( prev == 1 ) {
 				#if GCC_VERSION > 50000
 					static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
@@ -313,5 +313,5 @@
 				// Signal the compiler that a fence is needed but only for signal handlers
 				__atomic_signal_fence(__ATOMIC_RELEASE);
-				if( poll && proc->pending_preemption ) {
+				if ( poll && proc->pending_preemption ) {
 					proc->pending_preemption = false;
 					force_yield( __POLL_PREEMPTION );
@@ -334,5 +334,5 @@
 		// Signal the compiler that a fence is needed but only for signal handlers
 		__atomic_signal_fence(__ATOMIC_RELEASE);
-		if( unlikely( proc->pending_preemption ) ) {
+		if ( unlikely( proc->pending_preemption ) ) {
 			proc->pending_preemption = false;
 			force_yield( __POLL_PREEMPTION );
@@ -347,22 +347,22 @@
 void __cfaabi_check_preemption() libcfa_public {
 	bool ready = __preemption_enabled();
-	if(!ready) { abort("Preemption should be ready"); }
+	if ( !ready) { abort("Preemption should be ready"); }
 
 	sigset_t oldset;
 	int ret;
 	ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
-	if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
+	if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
 
 	ret = sigismember(&oldset, SIGUSR1);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
 
 	ret = sigismember(&oldset, SIGALRM);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
 
 	ret = sigismember(&oldset, SIGTERM);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 1) { abort("ERROR SIGTERM is disabled"); }
 }
 
@@ -385,5 +385,5 @@
 
 	if ( __cfaabi_pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
-	    abort( "internal error, pthread_sigmask" );
+		abort( "internal error, pthread_sigmask" );
 	}
 }
@@ -415,13 +415,13 @@
 	int ret;
 	ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
-	if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
+	if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
 
 	ret = sigismember(&oldset, SIGUSR1);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
 
 	ret = sigismember(&oldset, SIGALRM);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
 
 	signal_block( SIGUSR1 );
@@ -434,13 +434,13 @@
 	int ret;
 	ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
-	if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
+	if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
 
 	ret = sigismember(&oldset, SIGUSR1);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
 
 	ret = sigismember(&oldset, SIGALRM);
-	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
-	if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
+	if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
+	if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
 }
 
@@ -453,9 +453,9 @@
 	// Check if preemption is safe
 	bool ready = true;
-	if( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; };
-	if( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; };
-
-	if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
-	if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
+	if ( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; };
+	if ( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; };
+
+	if ( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
+	if ( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
 
 EXIT:
@@ -484,5 +484,5 @@
 	// Setup proper signal handlers
 	__cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // __cfactx_switch handler
-	__cfaabi_sigaction( SIGALRM, sigHandler_alarm    , SA_SIGINFO ); // debug handler
+	__cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO ); // debug handler
 
 	signal_block( SIGALRM );
@@ -551,9 +551,9 @@
 	// before the kernel thread has even started running. When that happens, an interrupt
 	// with a null 'this_processor' will be caught, just ignore it.
-	if(! __cfaabi_tls.this_processor ) return;
+	if ( ! __cfaabi_tls.this_processor ) return;
 
 	choose(sfp->si_value.sival_int) {
-		case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
-		case PREEMPT_IO       : ;// I/O asked to stop spinning, nothing to do here
+		case PREEMPT_NORMAL: ;							// Normal case, nothing to do here
+		case PREEMPT_IO: ;								// I/O asked to stop spinning, nothing to do here
 		case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
 		default:
@@ -562,5 +562,5 @@
 
 	// Check if it is safe to preempt here
-	if( !preemption_ready( ip ) ) {
+	if ( !preemption_ready( ip ) ) {
 		#if !defined(__CFA_NO_STATISTICS__)
 			__cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++;
@@ -607,5 +607,5 @@
 	sigfillset(&mask);
 	if ( __cfaabi_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
-	    abort( "internal error, pthread_sigmask" );
+		abort( "internal error, pthread_sigmask" );
 	}
 
@@ -622,5 +622,5 @@
 		__cfadbg_print_buffer_local( preemption, " KERNEL: SI_QUEUE %d, SI_TIMER %d, SI_KERNEL %d\n", SI_QUEUE, SI_TIMER, SI_KERNEL );
 
-		if( sig < 0 ) {
+		if ( sig < 0 ) {
 			//Error!
 			int err = errno;
Index: libcfa/src/concurrency/pthread.cfa
===================================================================
--- libcfa/src/concurrency/pthread.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/pthread.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -9,7 +9,7 @@
 // Author           : Zhenyan Zhu
 // Created On       : Sat Aug 6 16:29:18 2022
-// Last Modified By : Kyoung Seo
-// Last Modified On : Mon Jan 27 20:35:00 2025
-// Update Count     : 1
+// Last Modified By : Peter A. Buhr
+// Last Modified On : Fri Apr 25 07:28:01 2025
+// Update Count     : 4
 //
 
@@ -40,5 +40,5 @@
 	bool in_use;
 	void (* destructor)( void * );
-    dlist( pthread_values ) threads;
+	dlist( pthread_values ) threads;
 };
 
@@ -543,9 +543,7 @@
 		// 	p.in_use = false;
 		// }
-        pthread_values * p = &try_pop_front( cfa_pthread_keys[key].threads );
-        for ( ; p; ) {            
-            p->in_use = false;
-            p = &try_pop_front( cfa_pthread_keys[key].threads );
-        }
+		for ( pthread_values * p = &remove_first( cfa_pthread_keys[key].threads ); p; p = &remove_first( cfa_pthread_keys[key].threads ) ) {
+			p->in_use = false;
+		}
 		unlock(key_lock);
 		return 0;
@@ -603,8 +601,7 @@
 	//######################### Parallelism #########################
 	void pthread_delete_kernel_threads_() __THROW {	// see uMain::~uMain
-		Pthread_kernel_threads * p = &try_pop_front(cfa_pthreads_kernel_threads);
-		for ( ; p; ) {
-            delete(p);
-			p = &try_pop_front(cfa_pthreads_kernel_threads);
+		
+		for ( Pthread_kernel_threads * p = &remove_first(cfa_pthreads_kernel_threads); p; p = &remove_first(cfa_pthreads_kernel_threads) ) {
+			delete(p);
 		} // for
 	} // pthread_delete_kernel_threads_
@@ -626,5 +623,5 @@
 		} // for
 		for ( ; new_level < cfa_pthreads_no_kernel_threads; cfa_pthreads_no_kernel_threads -= 1 ) { // remove processors ?
-			delete(&try_pop_front(cfa_pthreads_kernel_threads));
+			delete(&remove_first(cfa_pthreads_kernel_threads));
 		} // for
 		unlock( concurrency_lock );
Index: libcfa/src/concurrency/select.hfa
===================================================================
--- libcfa/src/concurrency/select.hfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/concurrency/select.hfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -10,7 +10,7 @@
 // Author           : Colby Alexander Parsons
 // Created On       : Thu Jan 21 19:46:50 2023
-// Last Modified By : Kyoung Seo
-// Last Modified On : Wed Mar 19 12:00:00 2025
-// Update Count     : 1
+// Last Modified By : Peter A. Buhr
+// Last Modified On : Fri Apr 25 07:31:26 2025
+// Update Count     : 5
 //
 
@@ -33,39 +33,39 @@
 static inline bool __CFA_has_clause_run( unsigned long int status ) { return status == __SELECT_RUN; }
 static inline void __CFA_maybe_park( int * park_counter ) {
-    if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 )
-        park();
+	if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 )
+		park();
 }
 
 // node used for coordinating waituntil synchronization
 struct select_node {
-    int * park_counter;                 // If this is 0p then the node is in a special OR case waituntil
-    unsigned long int * clause_status;  // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil
-
-    void * extra;                       // used to store arbitrary data needed by some primitives
-
-    thread$ * blocked_thread;
-    inline dlink(select_node);
+	int * park_counter;				 // If this is 0p then the node is in a special OR case waituntil
+	unsigned long int * clause_status;  // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil
+
+	void * extra;					   // used to store arbitrary data needed by some primitives
+
+	thread$ * blocked_thread;
+	inline dlink(select_node);
 };
 P9_EMBEDDED( select_node, dlink(select_node) )
 
 static inline void ?{}( select_node & this ) {
-    this.blocked_thread = active_thread();
-    this.clause_status = 0p;
-    this.park_counter = 0p;
-    this.extra = 0p;
+	this.blocked_thread = active_thread();
+	this.clause_status = 0p;
+	this.park_counter = 0p;
+	this.extra = 0p;
 }
 
 static inline void ?{}( select_node & this, thread$ * blocked_thread ) {
-    this.blocked_thread = blocked_thread;
-    this.clause_status = 0p;
-    this.park_counter = 0p;
-    this.extra = 0p;
+	this.blocked_thread = blocked_thread;
+	this.clause_status = 0p;
+	this.park_counter = 0p;
+	this.extra = 0p;
 }
 
 static inline void ?{}( select_node & this, thread$ * blocked_thread, void * extra ) {
-    this.blocked_thread = blocked_thread;
-    this.clause_status = 0p;
-    this.park_counter = 0p;
-    this.extra = extra;
+	this.blocked_thread = blocked_thread;
+	this.clause_status = 0p;
+	this.park_counter = 0p;
+	this.extra = extra;
 }
 static inline void ^?{}( select_node & this ) {}
@@ -76,7 +76,7 @@
 // this is used inside the compiler to attempt to establish an else clause as a winner in the OR special case race
 static inline bool __select_node_else_race( select_node & this ) with( this ) {
-    unsigned long int cmp_status = __SELECT_UNSAT;
-    return *clause_status == 0 
-            && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
+	unsigned long int cmp_status = __SELECT_UNSAT;
+	return *clause_status == 0 
+			&& __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
 }
 
@@ -85,15 +85,15 @@
 forall(T & | sized(T))
 trait is_selectable {
-    // For registering a select stmt on a selectable concurrency primitive
-    // Returns bool that indicates if operation is already SAT
-    bool register_select( T &, select_node & );
-
-    // For unregistering a select stmt on a selectable concurrency primitive
-    // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN)
-    bool unregister_select( T &, select_node & );
-
-    // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node
-    //    passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped
-    bool on_selected( T &, select_node & );
+	// For registering a select stmt on a selectable concurrency primitive
+	// Returns bool that indicates if operation is already SAT
+	bool register_select( T &, select_node & );
+
+	// For unregistering a select stmt on a selectable concurrency primitive
+	// If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN)
+	bool unregister_select( T &, select_node & );
+
+	// This routine is run on the selecting thread prior to executing the statement corresponding to the select_node
+	//	passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped
+	bool on_selected( T &, select_node & );
 };
 // Used inside the compiler to allow for overloading on return type for operations such as '?<<?' for channels
@@ -107,45 +107,45 @@
 
 static inline void __make_select_node_unsat( select_node & this ) with( this ) {
-    __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST );
+	__atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST );
 }
 static inline void __make_select_node_sat( select_node & this ) with( this ) {
-    __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST );
+	__atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST );
 }
 
 // used for the 2-stage avail needed by the special OR case
 static inline bool __mark_select_node( select_node & this, unsigned long int val ) with( this ) {
-    /* paranoid */ verify( park_counter == 0p );
-    /* paranoid */ verify( clause_status != 0p );
-
-    unsigned long int cmp_status = __SELECT_UNSAT;
-    while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
-        if ( cmp_status != __SELECT_PENDING ) return false;
-        cmp_status = __SELECT_UNSAT;
-    }
-    return true;
+	/* paranoid */ verify( park_counter == 0p );
+	/* paranoid */ verify( clause_status != 0p );
+
+	unsigned long int cmp_status = __SELECT_UNSAT;
+	while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
+		if ( cmp_status != __SELECT_PENDING ) return false;
+		cmp_status = __SELECT_UNSAT;
+	}
+	return true;
 }
 
 // used for the 2-stage avail by the thread who owns a pending node
 static inline bool __pending_set_other( select_node & other, select_node & mine, unsigned long int val ) with( other ) {
-    /* paranoid */ verify( park_counter == 0p );
-    /* paranoid */ verify( clause_status != 0p );
-
-    unsigned long int cmp_status = __SELECT_UNSAT;
-    while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
-        if ( cmp_status != __SELECT_PENDING )
-            return false;
-
-        // toggle current status flag to avoid starvation/deadlock
-        __make_select_node_unsat( mine );
-        cmp_status = __SELECT_UNSAT;
-        if ( !__atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
-            return false;
-        cmp_status = __SELECT_UNSAT;
-    }
-    return true;
+	/* paranoid */ verify( park_counter == 0p );
+	/* paranoid */ verify( clause_status != 0p );
+
+	unsigned long int cmp_status = __SELECT_UNSAT;
+	while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
+		if ( cmp_status != __SELECT_PENDING )
+			return false;
+
+		// toggle current status flag to avoid starvation/deadlock
+		__make_select_node_unsat( mine );
+		cmp_status = __SELECT_UNSAT;
+		if ( ! __atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
+			return false;
+		cmp_status = __SELECT_UNSAT;
+	}
+	return true;
 }
 
 static inline bool __make_select_node_pending( select_node & this ) with( this ) {
-    return __mark_select_node( this, __SELECT_PENDING );
+	return __mark_select_node( this, __SELECT_PENDING );
 }
 
@@ -153,54 +153,54 @@
 // return true if we want to unpark the thd
 static inline bool __make_select_node_available( select_node & this ) with( this ) {
-    /* paranoid */ verify( clause_status != 0p );
-    if( !park_counter )
-        return __mark_select_node( this, (unsigned long int)&this );
-
-    unsigned long int cmp_status = __SELECT_UNSAT;
-
-    return *clause_status == 0 // C_TODO might not need a cmp_xchg in non special OR case
-        && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write
-        && !__atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);
+	/* paranoid */ verify( clause_status != 0p );
+	if ( ! park_counter )
+		return __mark_select_node( this, (unsigned long int)&this );
+
+	unsigned long int cmp_status = __SELECT_UNSAT;
+
+	return *clause_status == 0 // C_TODO might not need a cmp_xchg in non special OR case
+		&& __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write
+		&& ! __atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);
 }
 
 // Handles the special OR case of the waituntil statement
 // Since only one select node can win in the OR case, we need to race to set the node available BEFORE
-//    performing the operation since if we lose the race the operation should not be performed as it will be lost
+// performing the operation since if we lose the race the operation should not be performed as it will be lost
 // Returns true if execution can continue normally and false if the queue has now been drained
 static inline bool __handle_waituntil_OR( dlist( select_node ) & queue ) {
-    if ( queue`isEmpty ) return false;
-    if ( queue`first.clause_status && !queue`first.park_counter ) {
-        while ( !queue`isEmpty ) {
-            // if node not a special OR case or if we win the special OR case race break
-            if ( !queue`first.clause_status || queue`first.park_counter || __make_select_node_available( queue`first ) )
-                return true;
-            // otherwise we lost the special OR race so discard node
-            try_pop_front( queue );
-        }
-        return false;
-    }
-    return true;
+	if ( isEmpty( queue ) ) return false;
+	if ( first( queue ).clause_status && ! first( queue ).park_counter ) {
+		while ( ! isEmpty( queue ) ) {
+			// if node not a special OR case or if we win the special OR case race break
+			if ( ! first( queue ).clause_status || first( queue ).park_counter || __make_select_node_available( first( queue ) ) )
+				return true;
+			// otherwise we lost the special OR race so discard node
+			remove_first( queue );
+		}
+		return false;
+	}
+	return true;
 }
 
 // wake one thread from the list
 static inline void wake_one( dlist( select_node ) & /*queue*/, select_node & popped ) {
-    if ( !popped.clause_status                              // normal case, node is not a select node
-        || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
-        || __make_select_node_available( popped ) )         // check if popped link belongs to a selecting thread
-        unpark( popped.blocked_thread );
-}
-
-static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, try_pop_front( queue ) ); }
+	if ( ! popped.clause_status							  // normal case, node is not a select node
+		|| ( popped.clause_status && ! popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
+		|| __make_select_node_available( popped ) )		 // check if popped link belongs to a selecting thread
+		unpark( popped.blocked_thread );
+}
+
+static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, remove_first( queue ) ); }
 
 static inline void setup_clause( select_node & this, unsigned long int * clause_status, int * park_counter ) {
-    this.blocked_thread = active_thread();
-    this.clause_status = clause_status;
-    this.park_counter = park_counter;
+	this.blocked_thread = active_thread();
+	this.clause_status = clause_status;
+	this.park_counter = park_counter;
 }
 
 // waituntil ( timeout( ... ) ) support
 struct select_timeout_node {
-    alarm_node_t a_node;
-    select_node * s_node;
+	alarm_node_t a_node;
+	select_node * s_node;
 };
 void ?{}( select_timeout_node & this, Duration duration, Alarm_Callback callback );
Index: libcfa/src/executor.cfa
===================================================================
--- libcfa/src/executor.cfa	(revision 65b0402655546e1fea596afe650e9a1b34129bd8)
+++ libcfa/src/executor.cfa	(revision 6b33e891da6ed781bdcec69c5dfdd3cb8a9d0e44)
@@ -21,6 +21,6 @@
 	T * remove( Buffer(T, TLink) & mutex buf ) with(buf) {
 		dlist( T, TLink ) * qptr = &queue;				// workaround https://cforall.uwaterloo.ca/trac/ticket/166
-		// if ( (*qptr)`isEmpty ) wait( delay );		// no request to process ? => wait
-	  if ( (*qptr)`isEmpty ) return 0p;					// no request to process ? => wait
+		// if ( isEmpty( *qptr ) ) wait( delay );		// no request to process ? => wait
+	  if ( isEmpty( *qptr ) ) return 0p;				// no request to process ? => wait
 		return &try_pop_front( *qptr );
 	} // remove
@@ -93,6 +93,6 @@
 	unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
 //	for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
-    for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
-	    range = reqPerWorker + ( i < extras ? 1 : 0 );
+	for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
+		range = reqPerWorker + ( i < extras ? 1 : 0 );
 		workers[i] = new( cluster, requests, start, range );
 	} // for
