Index: libcfa/src/Makefile.am
===================================================================
--- libcfa/src/Makefile.am	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/Makefile.am	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -44,5 +44,5 @@
 
 headers = common.hfa fstream.hfa heap.hfa iostream.hfa iterator.hfa limits.hfa rational.hfa \
-		time.hfa stdlib.hfa memory.hfa parseargs.hfa \
+		time.hfa stdlib.hfa parseargs.hfa \
 		containers/maybe.hfa containers/pair.hfa containers/result.hfa containers/vector.hfa
 
Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/concurrency/invoke.h	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -26,4 +26,11 @@
 #ifndef _INVOKE_H_
 #define _INVOKE_H_
+
+	struct __cfaehm_try_resume_node;
+	struct __cfaehm_base_exception_t;
+	struct exception_context_t {
+		struct __cfaehm_try_resume_node * top_resume;
+		struct __cfaehm_base_exception_t * current_exception;
+	};
 
 	struct __stack_context_t {
@@ -51,4 +58,7 @@
 		// base of stack
 		void * base;
+
+		// Information for exception handling.
+		struct exception_context_t exception_context;
 	};
 
@@ -84,5 +94,7 @@
 	};
 
-	static inline struct __stack_t * __get_stack( struct $coroutine * cor ) { return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); }
+	static inline struct __stack_t * __get_stack( struct $coroutine * cor ) {
+		return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
+	}
 
 	// struct which calls the monitor is accepting
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -516,5 +516,8 @@
 	( this.terminated ){ 0 };
 	( this.runner ){};
-	init( this, name, _cltr );
+
+	disable_interrupts();
+		init( this, name, _cltr );
+	enable_interrupts( __cfaabi_dbg_ctx );
 
 	__cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
@@ -540,5 +543,7 @@
 	free( this.stack );
 
-	deinit( this );
+	disable_interrupts();
+		deinit( this );
+	enable_interrupts( __cfaabi_dbg_ctx );
 }
 
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -150,4 +150,6 @@
 //  queues or removing them.
 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+
 	// Step 1 : lock global lock
 	// It is needed to avoid processors that register mid Critical-Section
@@ -164,8 +166,11 @@
 	}
 
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 	return s;
 }
 
 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+
 	// Step 1 : release local locks
 	// This must be done while the global lock is held to avoid
@@ -182,4 +187,6 @@
 	/*paranoid*/ assert(true == lock);
 	__atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
+
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 }
 
@@ -419,6 +426,5 @@
 	// Actually pop the list
 	struct $thread * thrd;
-	bool emptied;
-	[thrd, emptied] = pop(lane);
+	thrd = pop(lane);
 
 	/* paranoid */ verify(thrd);
@@ -457,6 +463,5 @@
 			if(head(lane)->link.next == thrd) {
 				$thread * pthrd;
-				bool emptied;
-				[pthrd, emptied] = pop(lane);
+				pthrd = pop(lane);
 
 				/* paranoid */ verify( pthrd == thrd );
@@ -608,6 +613,5 @@
 			while(!is_empty(lanes.data[idx])) {
 				struct $thread * thrd;
-				__attribute__((unused)) bool _;
-				[thrd, _] = pop(lanes.data[idx]);
+				thrd = pop(lanes.data[idx]);
 
 				push(cltr, thrd);
Index: libcfa/src/concurrency/ready_subqueue.hfa
===================================================================
--- libcfa/src/concurrency/ready_subqueue.hfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/concurrency/ready_subqueue.hfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -144,5 +144,5 @@
 // returns popped
 // returns true of lane was empty before push, false otherwise
-[$thread *, bool] pop(__intrusive_lane_t & this) {
+$thread * pop(__intrusive_lane_t & this) {
 	/* paranoid */ verify(this.lock);
 	/* paranoid */ verify(this.before.link.ts != 0ul);
@@ -162,5 +162,6 @@
 	head->link.next = next;
 	next->link.prev = head;
-	node->link.[next, prev] = 0p;
+	node->link.next = 0p;
+	node->link.prev = 0p;
 
 	// Update head time stamp
@@ -180,5 +181,5 @@
 		/* paranoid */ verify(tail(this)->link.prev == head(this));
 		/* paranoid */ verify(head(this)->link.next == tail(this));
-		return [node, true];
+		return node;
 	}
 	else {
@@ -187,5 +188,5 @@
 		/* paranoid */ verify(head(this)->link.next != tail(this));
 		/* paranoid */ verify(this.before.link.ts != 0);
-		return [node, false];
+		return node;
 	}
 }
Index: libcfa/src/exception.c
===================================================================
--- libcfa/src/exception.c	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/exception.c	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -10,6 +10,6 @@
 // Created On       : Mon Jun 26 15:13:00 2017
 // Last Modified By : Andrew Beach
-// Last Modified On : Thr May 21 12:18:00 2020
-// Update Count     : 20
+// Last Modified On : Wed Aug 12 13:55:00 2020
+// Update Count     : 21
 //
 
@@ -28,4 +28,5 @@
 #include <unwind.h>
 #include <bits/debug.hfa>
+#include "concurrency/invoke.h"
 #include "stdhdr/assert.h"
 
@@ -59,10 +60,5 @@
 
 // Temperary global exception context. Does not work with concurency.
-struct exception_context_t {
-	struct __cfaehm_try_resume_node * top_resume;
-
-	exception_t * current_exception;
-	int current_handler_index;
-} static shared_stack = {NULL, NULL, 0};
+static struct exception_context_t shared_stack = {NULL, NULL};
 
 // Get the current exception context.
@@ -122,4 +118,15 @@
 
 // MEMORY MANAGEMENT =========================================================
+
+struct __cfaehm_node {
+	struct _Unwind_Exception unwind_exception;
+	struct __cfaehm_node * next;
+	int handler_index;
+};
+
+#define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node)))
+#define EXCEPT_TO_NODE(except) ((struct __cfaehm_node *)(except) - 1)
+#define UNWIND_TO_NODE(unwind) ((struct __cfaehm_node *)(unwind))
+#define NULL_MAP(map, ptr) ((ptr) ? (map(ptr)) : NULL)
 
 // How to clean up an exception in various situations.
@@ -137,15 +144,4 @@
 }
 
-// We need a piece of storage to raise the exception, for now its a single
-// piece.
-static struct _Unwind_Exception this_exception_storage;
-
-struct __cfaehm_node {
-	struct __cfaehm_node * next;
-};
-
-#define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node)))
-#define EXCEPT_TO_NODE(except) ((struct __cfaehm_node *)(except) - 1)
-
 // Creates a copy of the indicated exception and sets current_exception to it.
 static void __cfaehm_allocate_exception( exception_t * except ) {
@@ -161,14 +157,14 @@
 	}
 
+	// Initialize the node:
+	exception_t * except_store = NODE_TO_EXCEPT(store);
+	store->unwind_exception.exception_class = __cfaehm_exception_class;
+	store->unwind_exception.exception_cleanup = __cfaehm_exception_cleanup;
+	store->handler_index = 0;
+	except->virtual_table->copy( except_store, except );
+
 	// Add the node to the list:
-	store->next = EXCEPT_TO_NODE(context->current_exception);
-	context->current_exception = NODE_TO_EXCEPT(store);
-
-	// Copy the exception to storage.
-	except->virtual_table->copy( context->current_exception, except );
-
-	// Set up the exception storage.
-	this_exception_storage.exception_class = __cfaehm_exception_class;
-	this_exception_storage.exception_cleanup = __cfaehm_exception_cleanup;
+	store->next = NULL_MAP(EXCEPT_TO_NODE, context->current_exception);
+	context->current_exception = except_store;
 }
 
@@ -185,5 +181,5 @@
 	if ( context->current_exception == except ) {
 		node = to_free->next;
-		context->current_exception = (node) ? NODE_TO_EXCEPT(node) : 0;
+		context->current_exception = NULL_MAP(NODE_TO_EXCEPT, node);
 	} else {
 		node = EXCEPT_TO_NODE(context->current_exception);
@@ -213,5 +209,5 @@
 	// Verify actions follow the rules we expect.
 	verify((actions & _UA_CLEANUP_PHASE) && (actions & _UA_FORCE_UNWIND));
-	verify(!(actions & (_UA_SEARCH_PHASE | _UA_HANDER_FRAME)));
+	verify(!(actions & (_UA_SEARCH_PHASE | _UA_HANDLER_FRAME)));
 
 	if ( actions & _UA_END_OF_STACK ) {
@@ -222,9 +218,11 @@
 }
 
+static struct _Unwind_Exception cancel_exception_storage;
+
 // Cancel the current stack, prefroming approprate clean-up and messaging.
 void __cfaehm_cancel_stack( exception_t * exception ) {
 	// TODO: Detect current stack and pick a particular stop-function.
 	_Unwind_Reason_Code ret;
-	ret = _Unwind_ForcedUnwind( &this_exception_storage, _Stop_Fn, (void*)0x22 );
+	ret = _Unwind_ForcedUnwind( &cancel_exception_storage, _Stop_Fn, (void*)0x22 );
 	printf("UNWIND ERROR %d after force unwind\n", ret);
 	abort();
@@ -247,9 +245,10 @@
 static void __cfaehm_begin_unwind(void(*defaultHandler)(exception_t *)) {
 	struct exception_context_t * context = this_exception_context();
-	struct _Unwind_Exception * storage = &this_exception_storage;
 	if ( NULL == context->current_exception ) {
 		printf("UNWIND ERROR missing exception in begin unwind\n");
 		abort();
 	}
+	struct _Unwind_Exception * storage =
+		&EXCEPT_TO_NODE(context->current_exception)->unwind_exception;
 
 	// Call stdlibc to raise the exception
@@ -419,5 +418,5 @@
 				_Unwind_Reason_Code ret = (0 == index)
 					? _URC_CONTINUE_UNWIND : _URC_HANDLER_FOUND;
-				context->current_handler_index = index;
+				UNWIND_TO_NODE(unwind_exception)->handler_index = index;
 
 				// Based on the return value, check if we matched the exception
@@ -425,4 +424,5 @@
 					__cfadbg_print_safe(exception, " handler found\n");
 				} else {
+					// TODO: Continue the search if there is more in the table.
 					__cfadbg_print_safe(exception, " no handler\n");
 				}
@@ -516,5 +516,5 @@
 	// Exception handler
 	// Note: Saving the exception context on the stack breaks termination exceptions.
-	catch_block( this_exception_context()->current_handler_index,
+	catch_block( EXCEPT_TO_NODE( this_exception_context()->current_exception )->handler_index,
 	             this_exception_context()->current_exception );
 }
Index: libcfa/src/iostream.cfa
===================================================================
--- libcfa/src/iostream.cfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/iostream.cfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -10,6 +10,6 @@
 // Created On       : Wed May 27 17:56:53 2015
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Mon Aug 10 09:32:14 2020
-// Update Count     : 1126
+// Last Modified On : Tue Aug 11 22:16:33 2020
+// Update Count     : 1128
 //
 
@@ -37,22 +37,4 @@
 
 forall( dtype ostype | ostream( ostype ) ) {
-	ostype & ?|?( ostype & os, zero_t ) {
-		if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );
-		fmt( os, "%d", 0n );
-		return os;
-	} // ?|?
-	void ?|?( ostype & os, zero_t z ) {
-		(ostype &)(os | z); ends( os );
-	} // ?|?
-
-	ostype & ?|?( ostype & os, one_t ) {
-		if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );
-		fmt( os, "%d", 1n );
-		return os;
-	} // ?|?
-	void ?|?( ostype & os, one_t o ) {
-		(ostype &)(os | o); ends( os );
-	} // ?|?
-
 	ostype & ?|?( ostype & os, bool b ) {
 		if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );
Index: libcfa/src/iostream.hfa
===================================================================
--- libcfa/src/iostream.hfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/iostream.hfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -10,6 +10,6 @@
 // Created On       : Wed May 27 17:56:53 2015
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Jul 16 07:43:32 2020
-// Update Count     : 348
+// Last Modified On : Tue Aug 11 22:16:14 2020
+// Update Count     : 350
 //
 
@@ -67,9 +67,4 @@
 
 forall( dtype ostype | ostream( ostype ) ) {
-	ostype & ?|?( ostype &, zero_t );
-	void ?|?( ostype &, zero_t );
-	ostype & ?|?( ostype &, one_t );
-	void ?|?( ostype &, one_t );
-
 	ostype & ?|?( ostype &, bool );
 	void ?|?( ostype &, bool );
Index: libcfa/src/stdlib.hfa
===================================================================
--- libcfa/src/stdlib.hfa	(revision 309d8141747e741a9d445f6a58c51aa1b19eead4)
+++ libcfa/src/stdlib.hfa	(revision 4c925cd31e0a0d5ba86584befde68164c77404b8)
@@ -10,6 +10,6 @@
 // Created On       : Thu Jan 28 17:12:35 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Thu Jul 30 16:14:58 2020
-// Update Count     : 490
+// Last Modified On : Tue Aug 11 21:11:46 2020
+// Update Count     : 495
 //
 
@@ -136,23 +136,23 @@
 	T * alloc_set( char fill ) {
 		return (T *)memset( (T *)alloc(), (int)fill, sizeof(T) ); // initialize with fill value
-	} // alloc
-
-	T * alloc_set( T fill ) {
+	} // alloc_set
+
+	T * alloc_set( const T & fill ) {
 		return (T *)memcpy( (T *)alloc(), &fill, sizeof(T) ); // initialize with fill value
-	} // alloc
+	} // alloc_set
 
 	T * alloc_set( size_t dim, char fill ) {
 		return (T *)memset( (T *)alloc( dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value
-	} // alloc
-
-	T * alloc_set( size_t dim, T fill ) {
+	} // alloc_set
+
+	T * alloc_set( size_t dim, const T & fill ) {
 		T * r = (T *)alloc( dim );
 		for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value
 		return r;
-	} // alloc
+	} // alloc_set
 
 	T * alloc_set( size_t dim, const T fill[] ) {
 		return (T *)memcpy( (T *)alloc( dim ), fill, dim * sizeof(T) ); // initialize with fill value
-	} // alloc
+	} // alloc_set
 
 	T * alloc_set( T ptr[], size_t dim, char fill ) {	// realloc array with fill
@@ -166,5 +166,5 @@
 	} // alloc_set
 
-	T * alloc_set( T ptr[], size_t dim, T & fill ) {	// realloc array with fill
+	T * alloc_set( T ptr[], size_t dim, const T & fill ) {	// realloc array with fill
 		size_t odim = malloc_size( ptr ) / sizeof(T);	// current dimension
 		size_t nsize = dim * sizeof(T);					// new allocation
@@ -177,5 +177,5 @@
 		} // if
 		return nptr;
-	} // alloc_align_set
+	} // alloc_set
 } // distribution
 
@@ -204,23 +204,23 @@
 	T * alloc_align_set( size_t align, char fill ) {
 		return (T *)memset( (T *)alloc_align( align ), (int)fill, sizeof(T) ); // initialize with fill value
-	} // alloc_align
-
-	T * alloc_align_set( size_t align, T fill ) {
+	} // alloc_align_set
+
+	T * alloc_align_set( size_t align, const T & fill ) {
 		return (T *)memcpy( (T *)alloc_align( align ), &fill, sizeof(T) ); // initialize with fill value
-	} // alloc_align
+	} // alloc_align_set
 
 	T * alloc_align_set( size_t align, size_t dim, char fill ) {
 		return (T *)memset( (T *)alloc_align( align, dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value
-	} // alloc_align
-
-	T * alloc_align_set( size_t align, size_t dim, T fill ) {
+	} // alloc_align_set
+
+	T * alloc_align_set( size_t align, size_t dim, const T & fill ) {
 		T * r = (T *)alloc_align( align, dim );
 		for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value
 		return r;
-	} // alloc_align
+	} // alloc_align_set
 
 	T * alloc_align_set( size_t align, size_t dim, const T fill[] ) {
 		return (T *)memcpy( (T *)alloc_align( align, dim ), fill, dim * sizeof(T) );
-	} // alloc_align
+	} // alloc_align_set
 
 	T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ) {
@@ -234,5 +234,5 @@
 	} // alloc_align_set
 
-	T * alloc_align_set( T ptr[], size_t align, size_t dim, T & fill ) {
+	T * alloc_align_set( T ptr[], size_t align, size_t dim, const T & fill ) {
 		size_t odim = malloc_size( ptr ) / sizeof(T);	// current dimension
 		size_t nsize = dim * sizeof(T);					// new allocation
