Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision 1cd28391383d23e6aebd5fe961764d41dbd475bf)
+++ libcfa/src/concurrency/monitor.cfa	(revision 3e2e9b2826111caa0283009feb80b9d771e40592)
@@ -10,6 +10,6 @@
 // Created On       : Thd Feb 23 12:27:26 2017
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Nov 21 08:31:55 2024
-// Update Count     : 18
+// Last Modified On : Wed Nov 27 12:13:14 2024
+// Update Count     : 72
 //
 
@@ -27,7 +27,7 @@
 //-----------------------------------------------------------------------------
 // Forward declarations
-static inline void __set_owner ( monitor$ * this, thread$ * owner );
-static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner );
-static inline void set_mask  ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
+static inline void __set_owner( monitor$ * this, thread$ * owner );
+static inline void __set_owner( monitor$ * storage [], __lock_size_t count, thread$ * owner );
+static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
 static inline void reset_mask( monitor$ * this );
 
@@ -35,10 +35,10 @@
 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors );
 
-static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
-static inline void lock_all  ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
+static inline void lock_all( __spinlock_t * locks [], __lock_size_t count );
+static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
 static inline void unlock_all( monitor$ * locks [], __lock_size_t count );
 
-static inline void save   ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
+static inline void save( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
 
@@ -47,34 +47,34 @@
 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
 
-static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
+static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
 
-static inline thread$ *        check_condition   ( __condition_criterion_t * );
-static inline void                 brand_condition   ( condition & );
+static inline thread$ * check_condition ( __condition_criterion_t * );
+static inline void brand_condition( condition & );
 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count );
 
 forall(T & | sized( T ))
 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
-static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
-static inline __lock_size_t aggregate    ( monitor$ * storage [], const __waitfor_mask_t & mask );
+static inline __lock_size_t count_max( const __waitfor_mask_t & mask );
+static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask );
 
 //-----------------------------------------------------------------------------
 // Useful defines
-#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
-	__condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
-	__condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
-	init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
-
-#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
-	__condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
-	__condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
-	init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
-
-#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
-	monitor$ ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
-	__lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
-	unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
-	__waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
-	__spinlock_t *   locks [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
+#define wait_ctx( thrd, user_info )							/* Create the necessary information to use the signaller stack */ \
+	__condition_node_t waiter = { thrd, count, user_info };	/* Create the node specific to this wait operation */ \
+	__condition_criterion_t criteria[count];				/* Create the creteria this wait operation needs to wake up */ \
+	init( count, monitors, waiter, criteria );				/* Link everything together */
+
+#define wait_ctx_primed( thrd, user_info )					/* Create the necessary information to use the signaller stack */ \
+	__condition_node_t waiter = { thrd, count, user_info };	/* Create the node specific to this wait operation */ \
+	__condition_criterion_t criteria[count];				/* Create the creteria this wait operation needs to wake up */ \
+	init_push( count, monitors, waiter, criteria );			/* Link everything together and push it to the AS-Stack */
+
+#define monitor_ctx( mons, cnt )							/* Define that create the necessary struct for internal/external scheduling operations */ \
+	monitor$ ** monitors = mons;							/* Save the targeted monitors */ \
+	__lock_size_t count = cnt;								/* Save the count to a local variable */ \
+	unsigned int recursions[count];							/* Save the current recursion levels to restore them later */ \
+	__waitfor_mask_t masks[count];							/* Save the current waitfor masks to restore them later */ \
+	__spinlock_t * locks[count];							/* We need to pass-in an array of locks to BlockInternal */
 
 #define monitor_save    save   ( monitors, count, locks, recursions, masks )
@@ -93,20 +93,17 @@
 	__cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
 
-	if( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
+	if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
 		abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
-	}
-	else if( !this->owner ) {
+	} else if ( !this->owner ) {
 		// No one has the monitor, just take it
 		__set_owner( this, thrd );
 
 		__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
-	}
-	else if( this->owner == thrd) {
+	} else if ( this->owner == thrd) {
 		// We already have the monitor, just note how many times we took it
 		this->recursion += 1;
 
 		__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
-	}
-	else if( is_accepted( this, group) ) {
+	} else if ( is_accepted( this, group) ) {
 		// Some one was waiting for us, enter
 		__set_owner( this, thrd );
@@ -116,6 +113,5 @@
 
 		__cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
-	}
-	else {
+	} else {
 		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
 
@@ -156,5 +152,5 @@
 
 
-	if( !this->owner ) {
+	if ( !this->owner ) {
 		__cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
 
@@ -167,6 +163,5 @@
 		unlock( this->lock );
 		return;
-	}
-	else if( this->owner == thrd && !join) {
+	} else if ( this->owner == thrd && !join) {
 		// We already have the monitor... but where about to destroy it so the nesting will fail
 		// Abort!
@@ -176,5 +171,5 @@
 	// because join will not release the monitor after it executed.
 	// to avoid that it sets the owner to the special value thrd | 1p before exiting
-	else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) {
+	else if ( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) {
 		// restore the owner and just return
 		__cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
@@ -196,5 +191,5 @@
 	monitor$ ** monitors = &this;
 	__monitor_group_t group = { &this, 1, func };
-	if( is_accepted( this, group) ) {
+	if ( is_accepted( this, group) ) {
 		__cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
 
@@ -224,6 +219,5 @@
 		__cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
 		return;
-	}
-	else {
+	} else {
 		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
 
@@ -259,5 +253,5 @@
 	// If we haven't left the last level of recursion
 	// it means we don't need to do anything
-	if( this->recursion != 0) {
+	if ( this->recursion != 0) {
 		__cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
 		unlock( this->lock );
@@ -283,8 +277,8 @@
 static void __dtor_leave( monitor$ * this, bool join ) {
 	__cfaabi_dbg_debug_do(
-		if( active_thread() != this->owner ) {
+		if ( active_thread() != this->owner ) {
 			abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner);
 		}
-		if( this->recursion != 1  && !join ) {
+		if ( this->recursion != 1  && !join ) {
 			abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
 		}
@@ -332,5 +326,5 @@
 // relies on the monitor array being sorted
 static inline void enter( __monitor_group_t monitors ) {
-	for( __lock_size_t i = 0; i < monitors.size; i++) {
+	for ( i; monitors.size ) {
 		__enter( monitors[i], monitors );
 	}
@@ -340,5 +334,5 @@
 // relies on the monitor array being sorted
 static inline void leave(monitor$ * monitors [], __lock_size_t count) {
-	for( __lock_size_t i = count - 1; i >= 0; i--) {
+	for ( i; -~= count - 1 ) {
 		__leave( monitors[i] );
 	}
@@ -454,5 +448,5 @@
 
 	// Create storage for monitor context
-	monitor_ctx( this.monitors, this.monitor_count );
+	monitor_ctx( this.monitors, this.monitor_count );	// creates monitors, count, recursions, masks, locks
 
 	// Create the node specific to this wait operation
@@ -477,5 +471,5 @@
 
 	// Remove any duplicate threads
-	for( __lock_size_t i = 0; i < count; i++) {
+	for ( i; count ) {
 		thread$ * new_owner = next_thread( monitors[i] );
 		insert_unique( threads, thread_count, new_owner );
@@ -483,10 +477,10 @@
 
 	// Unlock the locks, we don't need them anymore
-	for(int i = 0; i < count; i++) {
+	for ( i; count ) {
 		unlock( *locks[i] );
 	}
 
 	// Wake the threads
-	for(int i = 0; i < thread_count; i++) {
+	for ( i; thread_count ) {
 		unpark( threads[i] );
 	}
@@ -500,5 +494,5 @@
 
 bool signal( condition & this ) libcfa_public {
-	if( is_empty( this ) ) { return false; }
+	if ( is_empty( this ) ) { return false; }
 
 	//Check that everything is as expected
@@ -513,5 +507,5 @@
 		}
 
-		for(int i = 0; i < this.monitor_count; i++) {
+		for ( i; this.monitor_count ) {
 			if ( this.monitors[i] != this_thrd->monitors[i] ) {
 				abort( "Signal on condition %p made with different monitor, expected %p got %p", &this, this.monitors[i], this_thrd->monitors[i] );
@@ -529,5 +523,5 @@
 
 	//Add the thread to the proper AS stack
-	for(int i = 0; i < count; i++) {
+	for ( i; count ) {
 		__condition_criterion_t * crit = &node->criteria[i];
 		assert( !crit->ready );
@@ -542,5 +536,5 @@
 
 bool signal_block( condition & this ) libcfa_public {
-	if( !this.blocked.head ) { return false; }
+	if ( !this.blocked.head ) { return false; }
 
 	//Check that everything is as expected
@@ -549,9 +543,8 @@
 
 	// Create storage for monitor context
-	monitor_ctx( this.monitors, this.monitor_count );
+	monitor_ctx( this.monitors, this.monitor_count );	// creates monitors, count, recursions, masks, locks
 
 	// Lock all monitors (aggregates the locks them as well)
 	lock_all( monitors, locks, count );
-
 
 	// Create the node specific to this wait operation
@@ -576,7 +569,5 @@
 	park();
 
-
 	// WE WOKE UP
-
 
 	__cfaabi_dbg_print_buffer_local( "Kernel :   signal_block returned\n" );
@@ -621,10 +612,10 @@
 	__cfaabi_dbg_print_buffer_decl( "Kernel : waitfor %"PRIdFAST16" (s: %"PRIdFAST16", m: %"PRIdFAST16")\n", actual_count, mask.size, (__lock_size_t)max);
 
-	if(actual_count == 0) return;
+	if (actual_count == 0) return;
 
 	__cfaabi_dbg_print_buffer_local( "Kernel : waitfor internal proceeding\n" );
 
 	// Create storage for monitor context
-	monitor_ctx( mon_storage, actual_count );
+	monitor_ctx( mon_storage, actual_count );			// creates monitors, count, recursions, masks, locks
 
 	// Lock all monitors (aggregates the locks as well)
@@ -636,8 +627,8 @@
 		[next, index] = search_entry_queue( mask, monitors, count );
 
-		if( next ) {
+		if ( next ) {
 			*mask.accepted = index;
 			__acceptable_t& accepted = mask[index];
-			if( accepted.is_dtor ) {
+			if ( accepted.is_dtor ) {
 				__cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n" );
 				verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
@@ -662,5 +653,5 @@
 				__cfaabi_dbg_print_buffer_local( "Kernel :  baton of %"PRIdFAST16" monitors : ", count );
 				#ifdef __CFA_DEBUG_PRINT__
-					for( int i = 0; i < count; i++) {
+					for ( i; count ) {
 						__cfaabi_dbg_print_buffer_local( "%p %p ", monitors[i], monitors[i]->signal_stack.top );
 					}
@@ -692,5 +683,5 @@
 
 
-	if( duration == 0 ) {
+	if ( duration == 0 ) {
 		__cfaabi_dbg_print_buffer_local( "Kernel : non-blocking, exiting\n" );
 
@@ -712,5 +703,5 @@
 	set_mask( monitors, count, mask );
 
-	for( __lock_size_t i = 0; i < count; i++) {
+	for ( i; count ) {
 		verify( monitors[i]->owner == active_thread() );
 	}
@@ -752,5 +743,5 @@
 	monitors[0]->owner        = owner;
 	monitors[0]->recursion    = 1;
-	for( __lock_size_t i = 1; i < count; i++ ) {
+	for ( i; 1~count ) {
 		/* paranoid */ verify ( monitors[i]->lock.lock );
 		/* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] );
@@ -761,5 +752,5 @@
 
 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
-	for( __lock_size_t i = 0; i < count; i++) {
+	for ( i; count) {
 		storage[i]->mask = mask;
 	}
@@ -776,5 +767,5 @@
 	__cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
 	__condition_criterion_t * urgent = pop( this->signal_stack );
-	if( urgent ) {
+	if ( urgent ) {
 		//The signaller stack is not empty,
 		//regardless of if we are ready to baton pass,
@@ -801,12 +792,12 @@
 
 	// Check if there are any acceptable functions
-	if( !it ) return false;
+	if ( !it ) return false;
 
 	// If this isn't the first monitor to test this, there is no reason to repeat the test.
-	if( this != group[0] ) return group[0]->mask.accepted >= 0;
+	if ( this != group[0] ) return group[0]->mask.accepted >= 0;
 
 	// For all acceptable functions check if this is the current function.
-	for( __lock_size_t i = 0; i < count; i++, it++ ) {
-		if( *it == group ) {
+	for ( __lock_size_t i = 0; i < count; i++, it++ ) {
+		if ( *it == group ) {
 			*this->mask.accepted = i;
 			return true;
@@ -819,5 +810,5 @@
 
 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
-	for( __lock_size_t i = 0; i < count; i++) {
+	for ( i; count ) {
 		(criteria[i]){ monitors[i], waiter };
 	}
@@ -827,5 +818,5 @@
 
 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
-	for( __lock_size_t i = 0; i < count; i++) {
+	for ( i; count ) {
 		(criteria[i]){ monitors[i], waiter };
 		__cfaabi_dbg_print_safe( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
@@ -837,5 +828,5 @@
 
 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) {
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		lock( *locks[i] __cfaabi_dbg_ctx2 );
 	}
@@ -843,13 +834,13 @@
 
 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		__spinlock_t * l = &source[i]->lock;
 		lock( *l __cfaabi_dbg_ctx2 );
-		if(locks) locks[i] = l;
+		if (locks) locks[i] = l;
 	}
 }
 
 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) {
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		unlock( *locks[i] );
 	}
@@ -857,5 +848,5 @@
 
 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) {
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		unlock( locks[i]->lock );
 	}
@@ -869,7 +860,7 @@
 	__waitfor_mask_t /*out*/ masks []
 ) {
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		recursions[i] = ctx[i]->recursion;
-		masks[i]      = ctx[i]->mask;
+		masks[i] = ctx[i]->mask;
 	}
 }
@@ -883,7 +874,7 @@
 ) {
 	lock_all( locks, count );
-	for( __lock_size_t i = 0; i < count; i++ ) {
+	for ( i; count ) {
 		ctx[i]->recursion = recursions[i];
-		ctx[i]->mask      = masks[i];
+		ctx[i]->mask = masks[i];
 	}
 	unlock_all( locks, count );
@@ -901,8 +892,7 @@
 	bool ready2run = true;
 
-	for(	int i = 0; i < count; i++ ) {
-
+	for ( i; count ) {
 		// __cfaabi_dbg_print_safe( "Checking %p for %p\n", &criteria[i], target );
-		if( &criteria[i] == target ) {
+		if ( &criteria[i] == target ) {
 			criteria[i].ready = true;
 			// __cfaabi_dbg_print_safe( "True\n" );
@@ -918,5 +908,5 @@
 static inline void brand_condition( condition & this ) {
 	thread$ * thrd = active_thread();
-	if( !this.monitors ) {
+	if ( !this.monitors ) {
 		// __cfaabi_dbg_print_safe( "Branding\n" );
 		assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data );
@@ -924,5 +914,5 @@
 
 		this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) );
-		for( int i = 0; i < this.monitor_count; i++ ) {
+		for ( i; this.monitor_count ) {
 			this.monitors[i] = thrd->monitors[i];
 		}
@@ -947,5 +937,5 @@
 		// For each acceptable check if it matches
 		int i = 0;
-		__acceptable_t * end   = end  (mask);
+		__acceptable_t * end = end(mask);
 		__acceptable_t * begin = begin(mask);
 		for ( __acceptable_t * it = begin; it != end; it++, i++ ) {
@@ -961,10 +951,8 @@
 		#endif
 	}
-#endif
-	int i = 0;
-	__acceptable_t * end   = end  (mask);
-	__acceptable_t * begin = begin(mask);
+#else
 	// For each acceptable (respect lexical priority in waitfor statement)
-	for ( __acceptable_t * it = begin; it != end; it++, i++ ) {
+	__acceptable_t * it = end(mask); it--;				// end is passed the last node, so backup
+	for ( int i = mask.size - 1; i >= 0; i -= 1, it-- ) {
 		#if defined( __CFA_WITH_VERIFY__ )
 		thread$ * last = 0p;
@@ -980,5 +968,5 @@
 			// For each thread in the entry-queue check for a match
 			if ( *it == curr->monitors ) {
-				// If match, return it after removeing from the entry queue
+				// If match, return it after removing from the entry queue
 				return [remove( entry_queue, thrd_it ), i];
 			} // if
@@ -989,23 +977,23 @@
 		} // for
 	} // for
+#endif
 	return [0, -1];
 }
 
-forall(T & | sized( T ))
+forall( T & | sized( T ) )
 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
-	if( !val ) return size;
-
-	for( __lock_size_t i = 0; i <= size; i++) {
-		if( array[i] == val ) return size;
+	if ( ! val ) return size;
+
+	for ( __lock_size_t i; ~= size ) {
+		if ( array[i] == val ) return size;
 	}
 
 	array[size] = val;
-	size = size + 1;
-	return size;
+	return size += 1;
 }
 
 static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
 	__lock_size_t max = 0;
-	for( __lock_size_t i = 0; i < mask.size; i++ ) {
+	for ( i; mask.size ) {
 		__acceptable_t & accepted = mask[i];
 		max += accepted.size;
@@ -1016,8 +1004,8 @@
 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) {
 	__lock_size_t size = 0;
-	for( __lock_size_t i = 0; i < mask.size; i++ ) {
+	for ( i; mask.size ) {
 		__acceptable_t & accepted = mask[i];
 		__libcfa_small_sort( accepted.data, accepted.size );
-		for( __lock_size_t j = 0; j < accepted.size; j++) {
+		for ( __lock_size_t j = 0; j < accepted.size; j++) {
 			insert_unique( storage, size, accepted[j] );
 		}
@@ -1040,20 +1028,17 @@
 	__cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
 
-	if( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
+	if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
 		abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
-	}
-	else if( !this->owner ) {
+	} else if ( !this->owner ) {
 		// No one has the monitor, just take it
 		__set_owner( this, thrd );
 
 		__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
-	}
-	else if( this->owner == thrd) {
+	} else if ( this->owner == thrd) {
 		// We already have the monitor, just note how many times we took it
 		this->recursion += 1;
 
 		__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
-	}
-	else {
+	} else {
 		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
 
