Index: libcfa/src/concurrency/clib/cfathread.cfa
===================================================================
--- libcfa/src/concurrency/clib/cfathread.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/clib/cfathread.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -59,5 +59,5 @@
 	void cfathread_setproccnt( int ncnt ) {
 		assert( ncnt >= 1 );
-		adelete(proc_cnt, procs);
+		adelete( procs );
 
 		proc_cnt = ncnt - 1;
Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/coroutine.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -10,6 +10,6 @@
 // Created On       : Mon Nov 28 12:27:26 2016
 // Last Modified By : Peter A. Buhr
-// Last Modified On : Tue May 26 22:06:09 2020
-// Update Count     : 21
+// Last Modified On : Fri Oct 23 23:05:24 2020
+// Update Count     : 22
 //
 
@@ -24,12 +24,8 @@
 #include <unistd.h>
 #include <sys/mman.h>									// mprotect
-extern "C" {
-// use this define to make unwind.h play nice, definitely a hack
-#define HIDE_EXPORTS
 #include <unwind.h>
-#undef HIDE_EXPORTS
-}
 
 #include "kernel_private.hfa"
+#include "exception.hfa"
 
 #define __CFA_INVOKE_PRIVATE__
@@ -49,10 +45,4 @@
 FORALL_DATA_INSTANCE(CoroutineCancelled, (dtype coroutine_t), (coroutine_t))
 
-struct __cfaehm_node {
-	struct _Unwind_Exception unwind_exception;
-	struct __cfaehm_node * next;
-	int handler_index;
-};
-
 forall(dtype T)
 void mark_exception(CoroutineCancelled(T) *) {}
@@ -60,4 +50,5 @@
 forall(dtype T)
 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
+	dst->virtual_table = src->virtual_table;
 	dst->the_coroutine = src->the_coroutine;
 	dst->the_exception = src->the_exception;
@@ -74,5 +65,5 @@
 	verify( desc->cancellation );
 	desc->state = Cancelled;
-	exception_t * except = (exception_t *)(1 + (__cfaehm_node *)desc->cancellation);
+	exception_t * except = __cfaehm_cancellation_exception( desc->cancellation );
 
 	// TODO: Remove explitate vtable set once trac#186 is fixed.
@@ -92,5 +83,5 @@
 
 // minimum feasible stack size in bytes
-#define MinStackSize 1000
+static const size_t MinStackSize = 1000;
 extern size_t __page_size;				// architecture pagesize HACK, should go in proper runtime singleton
 
@@ -217,5 +208,5 @@
 		size = libFloor(create_size - stack_data_size - diff, libAlign());
 	} // if
-	assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
+	assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize );
 
 	this->storage = (__stack_t *)((intptr_t)storage + size);
Index: libcfa/src/concurrency/exception.cfa
===================================================================
--- libcfa/src/concurrency/exception.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/exception.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -10,20 +10,18 @@
 // Created On       : Mon Aug 17 10:41:00 2020
 // Last Modified By : Andrew Beach
-// Last Modified On : Tue Aug 25 14:41:00 2020
-// Update Count     : 0
+// Last Modified On : Wed Oct 28 14:34:00 2020
+// Update Count     : 1
 //
 
-extern "C" {
-// use this define to make unwind.h play nice, definitely a hack
-#define HIDE_EXPORTS
-#include <unwind.h>
-#undef HIDE_EXPORTS
-}
+#define __cforall_thread__
 
-#include "invoke.h"
 #include "exception.hfa"
+
 #include "coroutine.hfa"
 
 extern struct $thread * mainThread;
+extern "C" {
+extern void __cfactx_thrd_leave();
+}
 
 // Common pattern for all the stop functions, wait until the end then act.
@@ -52,6 +50,6 @@
 
 STOP_AT_END_FUNCTION(thread_cancelstop,
-	// TODO: Instead pass information to the joiner.
-	abort();
+	__cfactx_thrd_leave();
+	__cabi_abort( "Resumed cancelled thread" );
 )
 
@@ -85,4 +83,6 @@
 		stop_param = (void *)0x22;
 	} else {
+		this_thread->self_cor.cancellation = unwind_exception;
+
 		stop_func = thread_cancelstop;
 		stop_param = this_thread;
Index: libcfa/src/concurrency/exception.hfa
===================================================================
--- libcfa/src/concurrency/exception.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/exception.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -16,13 +16,14 @@
 #pragma once
 
+// This is an internal bridge between the two modes and must be C compatable.
+
+#include <unwind.h>
 #include "bits/defs.hfa"
 #include "invoke.h"
+#include "exception.h"
 
 #ifdef __cforall
 extern "C" {
-
-#define HIDE_EXPORTS
 #endif
-#include "unwind.h"
 
 struct exception_context_t * this_exception_context(void) OPTIONAL_THREAD;
@@ -32,5 +33,4 @@
 
 #ifdef __cforall
-#undef HIDE_EXPORTS
 }
 #endif
Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/invoke.h	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -157,4 +157,8 @@
 
 		// current execution status for coroutine
+		// Possible values are:
+		//    - TICKET_BLOCKED (-1) thread is blocked
+		//    - TICKET_RUNNING ( 0) thread is running
+		//    - TICKET_UNBLOCK ( 1) thread should ignore next block
 		volatile int ticket;
 		enum __Coroutine_State state:8;
Index: libcfa/src/concurrency/io/call.cfa.in
===================================================================
--- libcfa/src/concurrency/io/call.cfa.in	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/io/call.cfa.in	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -47,37 +47,30 @@
 	#include "kernel/fwd.hfa"
 
-	#if defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC)
-		#define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN | IOSQE_ASYNC)
-	#elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_ASYNC)
-		#define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_ASYNC)
-	#elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN)
-		#define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN)
-	#elif defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC)
-		#define REGULAR_FLAGS (IOSQE_IO_DRAIN | IOSQE_ASYNC)
-	#elif defined(CFA_HAVE_IOSQE_FIXED_FILE)
-		#define REGULAR_FLAGS (IOSQE_FIXED_FILE)
-	#elif defined(CFA_HAVE_IOSQE_IO_DRAIN)
-		#define REGULAR_FLAGS (IOSQE_IO_DRAIN)
-	#elif defined(CFA_HAVE_IOSQE_ASYNC)
-		#define REGULAR_FLAGS (IOSQE_ASYNC)
-	#else
-		#define REGULAR_FLAGS (0)
-	#endif
-
-	#if defined(CFA_HAVE_IOSQE_IO_LINK) && defined(CFA_HAVE_IOSQE_IO_HARDLINK)
-		#define LINK_FLAGS (IOSQE_IO_LINK | IOSQE_IO_HARDLINK)
-	#elif defined(CFA_HAVE_IOSQE_IO_LINK)
-		#define LINK_FLAGS (IOSQE_IO_LINK)
-	#elif defined(CFA_HAVE_IOSQE_IO_HARDLINK)
-		#define LINK_FLAGS (IOSQE_IO_HARDLINK)
-	#else
-		#define LINK_FLAGS (0)
-	#endif
-
-	#if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED)
-		#define SPLICE_FLAGS (SPLICE_F_FD_IN_FIXED)
-	#else
-		#define SPLICE_FLAGS (0)
-	#endif
+	static const __u8 REGULAR_FLAGS = 0
+		#if defined(CFA_HAVE_IOSQE_FIXED_FILE)
+			| IOSQE_FIXED_FILE
+		#endif
+		#if defined(CFA_HAVE_IOSQE_IO_DRAIN)
+			| IOSQE_IO_DRAIN
+		#endif
+		#if defined(CFA_HAVE_IOSQE_ASYNC)
+			| IOSQE_ASYNC
+		#endif
+	;
+
+	static const __u32 LINK_FLAGS = 0
+		#if defined(CFA_HAVE_IOSQE_IO_LINK)
+			| IOSQE_IO_LINK
+		#endif
+		#if defined(CFA_HAVE_IOSQE_IO_HARDLINK)
+			| IOSQE_IO_HARDLINK
+		#endif
+	;
+
+	static const __u32 SPLICE_FLAGS = 0
+		#if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED)
+			| SPLICE_F_FD_IN_FIXED
+		#endif
+	;
 
 	extern [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data );
Index: libcfa/src/concurrency/io/setup.cfa
===================================================================
--- libcfa/src/concurrency/io/setup.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/io/setup.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -149,4 +149,5 @@
 		id.full_proc = false;
 		id.id = doregister(&id);
+		kernelTLS.this_proc_id = &id;
 		__cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
 
@@ -180,5 +181,5 @@
 					kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
 				#endif
-				__post( io_ctx->sem, &id );
+				post( io_ctx->sem );
 			}
 		}
@@ -235,5 +236,5 @@
 			if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) {
 
-				ready_schedule_lock( (struct __processor_id_t *)active_processor() );
+				ready_schedule_lock();
 
 					// This is the tricky case
@@ -250,8 +251,8 @@
 					// Fixup the thread state
 					thrd.state = Blocked;
-					thrd.ticket = 0;
+					thrd.ticket = TICKET_BLOCKED;
 					thrd.preempted = __NO_PREEMPTION;
 
-				ready_schedule_unlock( (struct __processor_id_t *)active_processor() );
+				ready_schedule_unlock();
 
 				// Pretend like the thread was blocked all along
@@ -275,5 +276,5 @@
 			}
 		} else {
-			unpark( &thrd );
+			post( this.thrd.sem );
 		}
 
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/kernel.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -108,5 +108,5 @@
 static $thread * __next_thread_slow(cluster * this);
 static void __run_thread(processor * this, $thread * dst);
-static void __wake_one(struct __processor_id_t * id, cluster * cltr);
+static void __wake_one(cluster * cltr);
 
 static void push  (__cluster_idles & idles, processor & proc);
@@ -252,4 +252,5 @@
 		/* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
 		/* paranoid */ verify( thrd_dst->context.SP );
+		/* paranoid */ verify( thrd_dst->state != Halted );
 		/* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
 		/* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
@@ -281,5 +282,5 @@
 		if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
 			// The thread was preempted, reschedule it and reset the flag
-			__schedule_thread( (__processor_id_t*)this, thrd_dst );
+			__schedule_thread( thrd_dst );
 			break RUNNING;
 		}
@@ -287,7 +288,6 @@
 		if(unlikely(thrd_dst->state == Halted)) {
 			// The thread has halted, it should never be scheduled/run again
-			// We may need to wake someone up here since
-			unpark( this->destroyer );
-			this->destroyer = 0p;
+			// finish the thread
+			__thread_finish( thrd_dst );
 			break RUNNING;
 		}
@@ -299,8 +299,8 @@
 		int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
 		switch(old_ticket) {
-			case 1:
+			case TICKET_RUNNING:
 				// This is case 1, the regular case, nothing more is needed
 				break RUNNING;
-			case 2:
+			case TICKET_UNBLOCK:
 				// This is case 2, the racy case, someone tried to run this thread before it finished blocking
 				// In this case, just run it again.
@@ -358,8 +358,9 @@
 // Scheduler routines
 // KERNEL ONLY
-void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
+void __schedule_thread( $thread * thrd ) {
 	/* paranoid */ verify( thrd );
 	/* paranoid */ verify( thrd->state != Halted );
 	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
 	/* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
 	/* paranoid */ 	if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
@@ -374,8 +375,8 @@
 	if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
 
-	ready_schedule_lock  ( id );
+	ready_schedule_lock();
 		push( thrd->curr_cluster, thrd );
-		__wake_one(id, thrd->curr_cluster);
-	ready_schedule_unlock( id );
+		__wake_one(thrd->curr_cluster);
+	ready_schedule_unlock();
 
 	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
@@ -384,11 +385,13 @@
 // KERNEL ONLY
 static inline $thread * __next_thread(cluster * this) with( *this ) {
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-
-	ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+
+	ready_schedule_lock();
 		$thread * thrd = pop( this );
-	ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
-
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	ready_schedule_unlock();
+
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
 	return thrd;
 }
@@ -396,40 +399,44 @@
 // KERNEL ONLY
 static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-
-	ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+
+	ready_schedule_lock();
 		$thread * thrd = pop_slow( this );
-	ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
-
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	ready_schedule_unlock();
+
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
 	return thrd;
 }
 
-// KERNEL ONLY unpark with out disabling interrupts
-void __unpark(  struct __processor_id_t * id, $thread * thrd ) {
+void unpark( $thread * thrd ) {
+	if( !thrd ) return;
+
+	/* paranoid */ verify( kernelTLS.this_proc_id );
+	bool full = kernelTLS.this_proc_id->full_proc;
+	if(full) disable_interrupts();
+
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 	int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
 	switch(old_ticket) {
-		case 1:
+		case TICKET_RUNNING:
 			// Wake won the race, the thread will reschedule/rerun itself
 			break;
-		case 0:
+		case TICKET_BLOCKED:
 			/* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
 			/* paranoid */ verify( thrd->state == Blocked );
 
 			// Wake lost the race,
-			__schedule_thread( id, thrd );
+			__schedule_thread( thrd );
 			break;
 		default:
 			// This makes no sense, something is wrong abort
-			abort();
-	}
-}
-
-void unpark( $thread * thrd ) {
-	if( !thrd ) return;
-
-	disable_interrupts();
-	__unpark( (__processor_id_t*)kernelTLS.this_processor, thrd );
-	enable_interrupts( __cfaabi_dbg_ctx );
+			abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
+	}
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+
+	if(full) enable_interrupts( __cfaabi_dbg_ctx );
+	/* paranoid */ verify( kernelTLS.this_proc_id );
 }
 
@@ -448,9 +455,28 @@
 }
 
-// KERNEL ONLY
-void __leave_thread() {
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-	returnToKernel();
-	abort();
+extern "C" {
+	// Leave the thread monitor
+	// last routine called by a thread.
+	// Should never return
+	void __cfactx_thrd_leave() {
+		$thread * thrd = TL_GET( this_thread );
+		$monitor * this = &thrd->self_mon;
+
+		// Lock the monitor now
+		lock( this->lock __cfaabi_dbg_ctx2 );
+
+		disable_interrupts();
+
+		thrd->state = Halted;
+		if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
+		if( thrd != this->owner || this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
+
+		// Leave the thread
+		/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+		returnToKernel();
+		abort();
+
+		// Control flow should never reach here!
+	}
 }
 
@@ -486,7 +512,7 @@
 //=============================================================================================
 // Wake a thread from the front if there are any
-static void __wake_one(struct __processor_id_t * id, cluster * this) {
-	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-	/* paranoid */ verify( ready_schedule_islocked( id ) );
+static void __wake_one(cluster * this) {
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( ready_schedule_islocked() );
 
 	// Check if there is a sleeping processor
@@ -506,5 +532,5 @@
 	#endif
 
-	/* paranoid */ verify( ready_schedule_islocked( id ) );
+	/* paranoid */ verify( ready_schedule_islocked() );
 	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
 
@@ -709,4 +735,8 @@
 		this.print_halts = true;
 	}
+
+	void print_stats_now( cluster & this, int flags ) {
+		__print_stats( this.stats, this.print_stats, true, this.name, (void*)&this );
+	}
 #endif
 // Local Variables: //
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/kernel.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -79,8 +79,4 @@
 	// Handle to pthreads
 	pthread_t kernel_thread;
-
-	// RunThread data
-	// Action to do after a thread is ran
-	$thread * destroyer;
 
 	// Preemption data
Index: libcfa/src/concurrency/kernel/fwd.hfa
===================================================================
--- libcfa/src/concurrency/kernel/fwd.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/kernel/fwd.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -35,7 +35,8 @@
 	extern "Cforall" {
 		extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
-			struct $thread    * volatile this_thread;
-			struct processor  * volatile this_processor;
-			struct __stats_t  * volatile this_stats;
+			struct $thread          * volatile this_thread;
+			struct processor        * volatile this_processor;
+			struct __processor_id_t * volatile this_proc_id;
+			struct __stats_t        * volatile this_stats;
 
 			struct {
Index: libcfa/src/concurrency/kernel/startup.cfa
===================================================================
--- libcfa/src/concurrency/kernel/startup.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/kernel/startup.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -122,4 +122,5 @@
 	NULL,
 	NULL,
+	NULL,
 	{ 1, false, false },
 };
@@ -212,4 +213,5 @@
 	//initialize the global state variables
 	kernelTLS.this_processor = mainProcessor;
+	kernelTLS.this_proc_id   = (__processor_id_t*)mainProcessor;
 	kernelTLS.this_thread    = mainThread;
 
@@ -227,5 +229,5 @@
 	// Add the main thread to the ready queue
 	// once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
-	__schedule_thread((__processor_id_t *)mainProcessor, mainThread);
+	__schedule_thread(mainThread);
 
 	// SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
@@ -324,4 +326,5 @@
 	processor * proc = (processor *) arg;
 	kernelTLS.this_processor = proc;
+	kernelTLS.this_proc_id   = (__processor_id_t*)proc;
 	kernelTLS.this_thread    = 0p;
 	kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
@@ -441,5 +444,5 @@
 
 static void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
-	ticket = 1;
+	ticket = TICKET_RUNNING;
 	state = Start;
 	self_cor{ info };
@@ -474,5 +477,4 @@
 	this.cltr = &_cltr;
 	full_proc = true;
-	destroyer = 0p;
 	do_terminate = false;
 	preemption_alarm = 0p;
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -33,12 +33,12 @@
 }
 
-void __schedule_thread( struct __processor_id_t *, $thread * )
+void __schedule_thread( $thread * )
 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
-	__attribute__((nonnull (2)))
+	__attribute__((nonnull (1)))
 #endif
 ;
 
-//Block current thread and release/wake-up the following resources
-void __leave_thread() __attribute__((noreturn));
+//release/wake-up the following resources
+void __thread_finish( $thread * thrd );
 
 //-----------------------------------------------------------------------------
@@ -63,24 +63,7 @@
 )
 
-// KERNEL ONLY unpark with out disabling interrupts
-void __unpark( struct __processor_id_t *, $thread * thrd );
-
-static inline bool __post(single_sem & this, struct __processor_id_t * id) {
-	for() {
-		struct $thread * expected = this.ptr;
-		if(expected == 1p) return false;
-		if(expected == 0p) {
-			if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
-				return false;
-			}
-		}
-		else {
-			if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
-				__unpark( id, expected );
-				return true;
-			}
-		}
-	}
-}
+#define TICKET_BLOCKED (-1) // thread is blocked
+#define TICKET_RUNNING ( 0) // thread is running
+#define TICKET_UNBLOCK ( 1) // thread should ignore next block
 
 //-----------------------------------------------------------------------------
@@ -197,7 +180,9 @@
 // Reader side : acquire when using the ready queue to schedule but not
 //  creating/destroying queues
-static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) {
-	unsigned iproc = proc->id;
-	/*paranoid*/ verify(data[iproc].handle == proc);
+static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
+	/*paranoid*/ verify( kernelTLS.this_proc_id );
+
+	unsigned iproc = kernelTLS.this_proc_id->id;
+	/*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
 	/*paranoid*/ verify(iproc < ready);
 
@@ -221,7 +206,9 @@
 }
 
-static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) {
-	unsigned iproc = proc->id;
-	/*paranoid*/ verify(data[iproc].handle == proc);
+static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
+	/*paranoid*/ verify( kernelTLS.this_proc_id );
+
+	unsigned iproc = kernelTLS.this_proc_id->id;
+	/*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
 	/*paranoid*/ verify(iproc < ready);
 	/*paranoid*/ verify(data[iproc].lock);
@@ -235,5 +222,7 @@
 
 #ifdef __CFA_WITH_VERIFY__
-	static inline bool ready_schedule_islocked( struct __processor_id_t * proc) {
+	static inline bool ready_schedule_islocked(void) {
+		/*paranoid*/ verify( kernelTLS.this_proc_id );
+		__processor_id_t * proc = kernelTLS.this_proc_id;
 		return __scheduler_lock->data[proc->id].owned;
 	}
Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/monitor.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -281,58 +281,29 @@
 }
 
-extern "C" {
-	// Leave the thread monitor
-	// last routine called by a thread.
-	// Should never return
-	void __cfactx_thrd_leave() {
-		$thread * thrd = TL_GET( this_thread );
-		$monitor * this = &thrd->self_mon;
-
-		// Lock the monitor now
-		lock( this->lock __cfaabi_dbg_ctx2 );
-
-		disable_interrupts();
-
-		thrd->state = Halted;
-
-		/* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
-
-		// Leaving a recursion level, decrement the counter
-		this->recursion -= 1;
-
-		// If we haven't left the last level of recursion
-		// it must mean there is an error
-		if( this->recursion != 0) { abort( "Thread internal monitor has unbalanced recursion" ); }
-
-		// Fetch the next thread, can be null
-		$thread * new_owner = next_thread( this );
-
-		// Release the monitor lock
-		unlock( this->lock );
-
-		// Unpark the next owner if needed
-		/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
-		/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
-		/* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
-		/* paranoid */ verify( thrd->state == Halted );
-
-		kernelTLS.this_processor->destroyer = new_owner;
-
-		// Leave the thread
-		__leave_thread();
-
-		// Control flow should never reach here!
-	}
-}
-
-// Join a thread
-forall( dtype T | is_thread(T) )
-T & join( T & this ) {
-	$monitor *    m = get_monitor(this);
-	void (*dtor)(T& mutex this) = ^?{};
-	monitor_dtor_guard_t __guard = { &m, (fptr_t)dtor, true };
-	{
-		return this;
-	}
+void __thread_finish( $thread * thrd ) {
+	$monitor * this = &thrd->self_mon;
+
+	// Lock the monitor now
+	/* paranoid */ verify( this->lock.lock );
+	/* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( thrd->state == Halted );
+	/* paranoid */ verify( this->recursion == 1 );
+
+	// Leaving a recursion level, decrement the counter
+	this->recursion -= 1;
+	this->owner = 0p;
+
+	// Fetch the next thread, can be null
+	$thread * new_owner = next_thread( this );
+
+	// Release the monitor lock
+	unlock( this->lock );
+
+	// Unpark the next owner if needed
+	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+	/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
+	/* paranoid */ verify( thrd->state == Halted );
+	unpark( new_owner );
 }
 
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/preemption.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -38,5 +38,5 @@
 // FwdDeclarations : timeout handlers
 static void preempt( processor   * this );
-static void timeout( struct __processor_id_t * id, $thread * this );
+static void timeout( $thread * this );
 
 // FwdDeclarations : Signal handlers
@@ -91,5 +91,5 @@
 
 // Tick one frame of the Discrete Event Simulation for alarms
-static void tick_preemption( struct __processor_id_t * id ) {
+static void tick_preemption(void) {
 	alarm_node_t * node = 0p;							// Used in the while loop but cannot be declared in the while condition
 	alarm_list_t * alarms = &event_kernel->alarms;		// Local copy for ease of reading
@@ -109,9 +109,9 @@
 		}
 		else if( node->type == User ) {
-			timeout( id, node->thrd );
+			timeout( node->thrd );
 		}
 		else {
 			bool unpark_thd = node->callback(*node);
-			if (unpark_thd) timeout( id, node->thrd );
+			if (unpark_thd) timeout( node->thrd );
 		}
 
@@ -274,9 +274,9 @@
 
 // reserved for future use
-static void timeout( struct __processor_id_t * id, $thread * this ) {
+static void timeout( $thread * this ) {
 	#if !defined( __CFA_NO_STATISTICS__ )
 		kernelTLS.this_stats = this->curr_cluster->stats;
 	#endif
-	__unpark( id, this );
+	unpark( this );
 }
 
@@ -417,4 +417,5 @@
 	id.full_proc = false;
 	id.id = doregister(&id);
+	kernelTLS.this_proc_id = &id;
 
 	// Block sigalrms to control when they arrive
@@ -462,5 +463,5 @@
 			// __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
 			lock( event_kernel->lock __cfaabi_dbg_ctx2 );
-			tick_preemption( &id );
+			tick_preemption();
 			unlock( event_kernel->lock );
 			break;
Index: libcfa/src/concurrency/snzi.hfa
===================================================================
--- libcfa/src/concurrency/snzi.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/snzi.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -36,5 +36,5 @@
 static inline void depart( __snzi_node_t & );
 
-#define __snzi_half -1
+static const int __snzi_half = -1;
 
 //--------------------------------------------------
Index: libcfa/src/concurrency/thread.cfa
===================================================================
--- libcfa/src/concurrency/thread.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/thread.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -19,4 +19,5 @@
 
 #include "kernel_private.hfa"
+#include "exception.hfa"
 
 #define __CFA_INVOKE_PRIVATE__
@@ -28,5 +29,5 @@
 	context{ 0p, 0p };
 	self_cor{ name, storage, storageSize };
-	ticket = 1;
+	ticket = TICKET_RUNNING;
 	state = Start;
 	preempted = __NO_PREEMPTION;
@@ -58,4 +59,60 @@
 }
 
+FORALL_DATA_INSTANCE(ThreadCancelled, (dtype thread_t), (thread_t))
+
+forall(dtype T)
+void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) {
+	dst->virtual_table = src->virtual_table;
+	dst->the_thread = src->the_thread;
+	dst->the_exception = src->the_exception;
+}
+
+forall(dtype T)
+const char * msg(ThreadCancelled(T) *) {
+	return "ThreadCancelled";
+}
+
+forall(dtype T)
+static void default_thread_cancel_handler(ThreadCancelled(T) & ) {
+	abort( "Unhandled thread cancellation.\n" );
+}
+
+forall(dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T)))
+void ?{}( thread_dtor_guard_t & this,
+		T & thrd, void(*defaultResumptionHandler)(ThreadCancelled(T) &)) {
+	$monitor * m = get_monitor(thrd);
+	void (*dtor)(T& mutex this) = ^?{};
+	bool join = defaultResumptionHandler != (void(*)(ThreadCancelled(T)&))0;
+	(this.mg){&m, (void(*)())dtor, join};
+
+	// After the guard set-up and any wait, check for cancellation.
+	$thread * desc = get_thread(thrd);
+	struct _Unwind_Exception * cancellation = desc->self_cor.cancellation;
+	if ( likely( 0p == cancellation ) ) {
+		return;
+	} else if ( Cancelled == desc->state ) {
+		return;
+	}
+	desc->state = Cancelled;
+	if (!join) {
+		defaultResumptionHandler = default_thread_cancel_handler;
+	}
+
+	ThreadCancelled(T) except;
+	// TODO: Remove explitate vtable set once trac#186 is fixed.
+	except.virtual_table = &get_exception_vtable(&except);
+	except.the_thread = &thrd;
+	except.the_exception = __cfaehm_cancellation_exception( cancellation );
+	throwResume except;
+
+	except.the_exception->virtual_table->free( except.the_exception );
+	free( cancellation );
+	desc->self_cor.cancellation = 0p;
+}
+
+void ^?{}( thread_dtor_guard_t & this ) {
+	^(this.mg){};
+}
+
 //-----------------------------------------------------------------------------
 // Starting and stopping threads
@@ -70,5 +127,5 @@
 	verify( this_thrd->context.SP );
 
-	__schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);
+	__schedule_thread( this_thrd );
 	enable_interrupts( __cfaabi_dbg_ctx );
 }
@@ -93,4 +150,11 @@
 }
 
+//-----------------------------------------------------------------------------
+forall(dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T)))
+T & join( T & this ) {
+	thread_dtor_guard_t guard = { this, defaultResumptionHandler };
+	return this;
+}
+
 // Local Variables: //
 // mode: c //
Index: libcfa/src/concurrency/thread.hfa
===================================================================
--- libcfa/src/concurrency/thread.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/concurrency/thread.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -22,12 +22,24 @@
 #include "kernel.hfa"
 #include "monitor.hfa"
+#include "exception.hfa"
 
 //-----------------------------------------------------------------------------
 // thread trait
 trait is_thread(dtype T) {
-      void ^?{}(T& mutex this);
-      void main(T& this);
-      $thread* get_thread(T& this);
+	void ^?{}(T& mutex this);
+	void main(T& this);
+	$thread* get_thread(T& this);
 };
+
+FORALL_DATA_EXCEPTION(ThreadCancelled, (dtype thread_t), (thread_t)) (
+	thread_t * the_thread;
+	exception_t * the_exception;
+);
+
+forall(dtype T)
+void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src);
+
+forall(dtype T)
+const char * msg(ThreadCancelled(T) *);
 
 // define that satisfies the trait without using the thread keyword
@@ -65,4 +77,12 @@
 static inline void ?{}($thread & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
+
+struct thread_dtor_guard_t {
+	monitor_dtor_guard_t mg;
+};
+
+forall( dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T)) )
+void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) );
+void ^?{}( thread_dtor_guard_t & this );
 
 //-----------------------------------------------------------------------------
@@ -108,5 +128,5 @@
 //----------
 // join
-forall( dtype T | is_thread(T) )
+forall( dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T)) )
 T & join( T & this );
 
Index: libcfa/src/exception.c
===================================================================
--- libcfa/src/exception.c	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/exception.c	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -9,7 +9,7 @@
 // Author           : Andrew Beach
 // Created On       : Mon Jun 26 15:13:00 2017
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Sat Aug 29 15:52:22 2020
-// Update Count     : 34
+// Last Modified By : Andrew Beach
+// Last Modified On : Tue Oct 27 16:27:00 2020
+// Update Count     : 35
 //
 
@@ -17,9 +17,10 @@
 #include <stddef.h> // for size_t
 
+#include <unwind.h> // for struct _Unwind_Exception {...};
+
 #include "exception.h"
 
 #include <stdlib.h>
 #include <stdio.h>
-#include <unwind.h>
 #include <bits/debug.hfa>
 #include "concurrency/invoke.h"
@@ -113,10 +114,4 @@
 
 // MEMORY MANAGEMENT =========================================================
-
-struct __cfaehm_node {
-	struct _Unwind_Exception unwind_exception;
-	struct __cfaehm_node * next;
-	int handler_index;
-};
 
 #define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node)))
Index: libcfa/src/exception.h
===================================================================
--- libcfa/src/exception.h	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/exception.h	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -5,19 +5,25 @@
 // file "LICENCE" distributed with Cforall.
 //
-// exception.h -- Builtins for exception handling.
+// exception.h -- Internal exception handling definitions.
 //
 // Author           : Andrew Beach
 // Created On       : Mon Jun 26 15:11:00 2017
 // Last Modified By : Andrew Beach
-// Last Modified On : Tue May 19 14:17:00 2020
-// Update Count     : 10
+// Last Modified On : Tue Oct 27 14:45:00 2020
+// Update Count     : 11
 //
 
 #pragma once
 
+// This could be considered several headers. All are internal to the exception
+// system but needed to depending on whether they are C/Cforall code and
+// whether or not they are part of the builtins.
 
 #ifdef __cforall
 extern "C" {
 #endif
+
+// Included in C code or the built-ins.
+#if !defined(__cforall) || defined(__cforall_builtins__)
 
 struct __cfaehm_base_exception_t;
@@ -47,7 +53,7 @@
 // Function catches termination exceptions.
 void __cfaehm_try_terminate(
-    void (*try_block)(),
-    void (*catch_block)(int index, exception_t * except),
-    int (*match_block)(exception_t * except));
+	void (*try_block)(),
+	void (*catch_block)(int index, exception_t * except),
+	int (*match_block)(exception_t * except));
 
 // Clean-up the exception in catch blocks.
@@ -56,20 +62,39 @@
 // Data structure creates a list of resume handlers.
 struct __cfaehm_try_resume_node {
-    struct __cfaehm_try_resume_node * next;
-    _Bool (*handler)(exception_t * except);
+	struct __cfaehm_try_resume_node * next;
+	_Bool (*handler)(exception_t * except);
 };
 
 // These act as constructor and destructor for the resume node.
 void __cfaehm_try_resume_setup(
-    struct __cfaehm_try_resume_node * node,
-    _Bool (*handler)(exception_t * except));
+	struct __cfaehm_try_resume_node * node,
+	_Bool (*handler)(exception_t * except));
 void __cfaehm_try_resume_cleanup(
-    struct __cfaehm_try_resume_node * node);
+	struct __cfaehm_try_resume_node * node);
 
 // Check for a standard way to call fake deconstructors.
 struct __cfaehm_cleanup_hook {};
 
+#endif
+
+// Included in C code and the library.
+#if !defined(__cforall) || !defined(__cforall_builtins__)
+struct __cfaehm_node {
+	struct _Unwind_Exception unwind_exception;
+	struct __cfaehm_node * next;
+	int handler_index;
+};
+
+static inline exception_t * __cfaehm_cancellation_exception(
+		struct _Unwind_Exception * unwind_exception ) {
+	return (exception_t *)(1 + (struct __cfaehm_node *)unwind_exception);
+}
+#endif
+
 #ifdef __cforall
 }
+
+// Built-ins not visible in C.
+#if defined(__cforall_builtins__)
 
 // Not all the built-ins can be expressed in C. These can't be
@@ -124,2 +149,4 @@
 
 #endif
+
+#endif
Index: libcfa/src/stdhdr/unwind.h
===================================================================
--- libcfa/src/stdhdr/unwind.h	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
+++ libcfa/src/stdhdr/unwind.h	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -0,0 +1,31 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// unwind.h -- Safely include unwind.h from Cforall.
+//
+// Author           : Andrew Beach
+// Created On       : Wed Oct 28 11:25:00 2020
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed Oct 28 14:11:00 2020
+// Update Count     : 0
+//
+
+#pragma once
+
+extern "C" {
+// This prevents some GCC pragmas that CFA can't handle from being generated.
+#define HIDE_EXPORTS
+
+// Always include the header and use its header guard.
+#include_next <unwind.h>
+
+#undef HIDE_EXPORTS
+}
+
+// Local Variables: //
+// mode: c //
+// tab-width: 4 //
+// End: //
Index: libcfa/src/stdlib.cfa
===================================================================
--- libcfa/src/stdlib.cfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/stdlib.cfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -58,6 +58,7 @@
 
 forall( dtype T | sized(T) | { void ^?{}( T & ); } )
-void adelete( size_t dim, T arr[] ) {
+void adelete( T arr[] ) {
 	if ( arr ) {										// ignore null
+		size_t dim = malloc_size( arr ) / sizeof( T );
 		for ( int i = dim - 1; i >= 0; i -= 1 ) {		// reverse allocation order, must be unsigned
 			^(arr[i]){};								// run destructor
@@ -68,6 +69,7 @@
 
 forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } )
-void adelete( size_t dim, T arr[], Params rest ) {
+void adelete( T arr[], Params rest ) {
 	if ( arr ) {										// ignore null
+		size_t dim = malloc_size( arr ) / sizeof( T );
 		for ( int i = dim - 1; i >= 0; i -= 1 ) {		// reverse allocation order, must be unsigned
 			^(arr[i]){};								// run destructor
Index: libcfa/src/stdlib.hfa
===================================================================
--- libcfa/src/stdlib.hfa	(revision 4b30e8ccaaf654cc01c74faafd80728628e173c5)
+++ libcfa/src/stdlib.hfa	(revision c28ea4e2bf0768f205a4ffa626565b1cbfedfb95)
@@ -263,6 +263,6 @@
 // Cforall allocation/deallocation and constructor/destructor, array types
 forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) T * anew( size_t dim, Params p );
-forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( size_t dim, T arr[] );
-forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( size_t dim, T arr[], Params rest );
+forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( T arr[] );
+forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( T arr[], Params rest );
 
 //---------------------------------------
