Index: libcfa/src/concurrency/CtxSwitch-arm.S
===================================================================
--- libcfa/src/concurrency/CtxSwitch-arm.S	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/CtxSwitch-arm.S	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -13,8 +13,8 @@
 	.text
 	.align  2
-	.global CtxSwitch
-	.type   CtxSwitch, %function
+	.global __cfactx_switch
+	.type   __cfactx_switch, %function
 
-CtxSwitch:
+__cfactx_switch:
 	@ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification)
 	@ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved
@@ -52,11 +52,11 @@
 	mov r15, r14
 	#endif // R9_SPECIAL
-	
+
 	.text
 	.align  2
-	.global CtxInvokeStub
-	.type   CtxInvokeStub, %function
+	.global __cfactx_invoke_stub
+	.type   __cfactx_invoke_stub, %function
 
-CtxInvokeStub:
+__cfactx_invoke_stub:
         ldmfd r13!, {r0-r1}
 	mov r15, r1
Index: libcfa/src/concurrency/CtxSwitch-i386.S
===================================================================
--- libcfa/src/concurrency/CtxSwitch-i386.S	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/CtxSwitch-i386.S	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -43,7 +43,7 @@
 	.text
 	.align 2
-	.globl CtxSwitch
-	.type  CtxSwitch, @function
-CtxSwitch:
+	.globl __cfactx_switch
+	.type  __cfactx_switch, @function
+__cfactx_switch:
 
 	// Copy the "from" context argument from the stack to register eax
@@ -83,5 +83,5 @@
 
 	ret
-	.size  CtxSwitch, .-CtxSwitch
+	.size  __cfactx_switch, .-__cfactx_switch
 
 // Local Variables: //
Index: libcfa/src/concurrency/CtxSwitch-x86_64.S
===================================================================
--- libcfa/src/concurrency/CtxSwitch-x86_64.S	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/CtxSwitch-x86_64.S	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -44,7 +44,7 @@
 	.text
 	.align 2
-	.globl CtxSwitch
-	.type  CtxSwitch, @function
-CtxSwitch:
+	.globl __cfactx_switch
+	.type  __cfactx_switch, @function
+__cfactx_switch:
 
 	// Save volatile registers on the stack.
@@ -77,5 +77,5 @@
 
 	ret
-	.size  CtxSwitch, .-CtxSwitch
+	.size  __cfactx_switch, .-__cfactx_switch
 
 //-----------------------------------------------------------------------------
@@ -83,11 +83,11 @@
 	.text
 	.align 2
-	.globl CtxInvokeStub
-	.type	 CtxInvokeStub, @function
-CtxInvokeStub:
+	.globl __cfactx_invoke_stub
+	.type	 __cfactx_invoke_stub, @function
+__cfactx_invoke_stub:
 	movq %rbx, %rdi
 	movq %r12, %rsi
 	jmp *%r13
-	.size  CtxInvokeStub, .-CtxInvokeStub
+	.size  __cfactx_invoke_stub, .-__cfactx_invoke_stub
 
 // Local Variables: //
Index: libcfa/src/concurrency/coroutine.cfa
===================================================================
--- libcfa/src/concurrency/coroutine.cfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/coroutine.cfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -187,5 +187,5 @@
 // is not inline (We can't inline Cforall in C)
 extern "C" {
-	void __leave_coroutine( struct coroutine_desc * src ) {
+	void __cfactx_cor_leave( struct coroutine_desc * src ) {
 		coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
 
@@ -204,5 +204,5 @@
 	}
 
-	struct coroutine_desc * __finish_coroutine(void) {
+	struct coroutine_desc * __cfactx_cor_finish(void) {
 		struct coroutine_desc * cor = kernelTLS.this_thread->curr_cor;
 
Index: libcfa/src/concurrency/coroutine.hfa
===================================================================
--- libcfa/src/concurrency/coroutine.hfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/coroutine.hfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -61,12 +61,12 @@
 // Start coroutine routines
 extern "C" {
-	void CtxInvokeCoroutine(void (*main)(void *), void * this);
+	void __cfactx_invoke_coroutine(void (*main)(void *), void * this);
 
 	forall(dtype T)
-	void CtxStart(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));
+	void __cfactx_start(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));
 
-	extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
+	extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
 
-	extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
+	extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
 }
 
@@ -82,6 +82,6 @@
 	// context switch to specified coroutine
 	verify( dst->context.SP );
-	CtxSwitch( &src->context, &dst->context );
-	// when CtxSwitch returns we are back in the src coroutine
+	__cfactx_switch( &src->context, &dst->context );
+	// when __cfactx_switch returns we are back in the src coroutine
 
 	// set state of new coroutine to active
@@ -89,5 +89,5 @@
 
 	if( unlikely(src->cancellation != 0p) ) {
-		_CtxCoroutine_Unwind(src->cancellation, src);
+		__cfactx_coroutine_unwind(src->cancellation, src);
 	}
 }
@@ -130,5 +130,5 @@
 		TL_GET( this_thread )->curr_cor = dst;
 		__stack_prepare(&dst->stack, 65000);
-		CtxStart(main, dst, cor, CtxInvokeCoroutine);
+		__cfactx_start(main, dst, cor, __cfactx_invoke_coroutine);
 		TL_GET( this_thread )->curr_cor = src;
 	}
Index: libcfa/src/concurrency/invoke.c
===================================================================
--- libcfa/src/concurrency/invoke.c	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/invoke.c	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -29,16 +29,17 @@
 // Called from the kernel when starting a coroutine or task so must switch back to user mode.
 
-extern void __leave_coroutine ( struct coroutine_desc * );
-extern struct coroutine_desc * __finish_coroutine(void);
-extern void __leave_thread_monitor();
+extern struct coroutine_desc * __cfactx_cor_finish(void);
+extern void __cfactx_cor_leave ( struct coroutine_desc * );
+extern void __cfactx_thrd_leave();
+
 extern void disable_interrupts() OPTIONAL_THREAD;
 extern void enable_interrupts( __cfaabi_dbg_ctx_param );
 
-void CtxInvokeCoroutine(
+void __cfactx_invoke_coroutine(
 	void (*main)(void *),
 	void *this
 ) {
 	// Finish setting up the coroutine by setting its state
-	struct coroutine_desc * cor = __finish_coroutine();
+	struct coroutine_desc * cor = __cfactx_cor_finish();
 
 	// Call the main of the coroutine
@@ -46,9 +47,9 @@
 
 	//Final suspend, should never return
-	__leave_coroutine( cor );
+	__cfactx_cor_leave( cor );
 	__cabi_abort( "Resumed dead coroutine" );
 }
 
-static _Unwind_Reason_Code _CtxCoroutine_UnwindStop(
+static _Unwind_Reason_Code __cfactx_coroutine_unwindstop(
 	__attribute((__unused__)) int version,
 	_Unwind_Action actions,
@@ -61,5 +62,5 @@
 		// We finished unwinding the coroutine,
 		// leave it
-		__leave_coroutine( param );
+		__cfactx_cor_leave( param );
 		__cabi_abort( "Resumed dead coroutine" );
 	}
@@ -69,12 +70,12 @@
 }
 
-void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
-void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
-	_Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
+void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
+void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
+	_Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
 	printf("UNWIND ERROR %d after force unwind\n", ret);
 	abort();
 }
 
-void CtxInvokeThread(
+void __cfactx_invoke_thread(
 	void (*main)(void *),
 	void *this
@@ -93,9 +94,9 @@
 	// The order of these 4 operations is very important
 	//Final suspend, should never return
-	__leave_thread_monitor();
+	__cfactx_thrd_leave();
 	__cabi_abort( "Resumed dead thread" );
 }
 
-void CtxStart(
+void __cfactx_start(
 	void (*main)(void *),
 	struct coroutine_desc * cor,
@@ -139,5 +140,5 @@
 
 	fs->dummyReturn = NULL;
-	fs->rturn = CtxInvokeStub;
+	fs->rturn = __cfactx_invoke_stub;
 	fs->fixedRegisters[0] = main;
 	fs->fixedRegisters[1] = this;
@@ -157,5 +158,5 @@
 	struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
 
-	fs->intRegs[8] = CtxInvokeStub;
+	fs->intRegs[8] = __cfactx_invoke_stub;
 	fs->arg[0] = this;
 	fs->arg[1] = invoke;
Index: libcfa/src/concurrency/invoke.h
===================================================================
--- libcfa/src/concurrency/invoke.h	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/invoke.h	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -96,5 +96,5 @@
 
 	struct coroutine_desc {
-		// context that is switch during a CtxSwitch
+		// context that is switch during a __cfactx_switch
 		struct __stack_context_t context;
 
@@ -161,5 +161,5 @@
 	struct thread_desc {
 		// Core threading fields
-		// context that is switch during a CtxSwitch
+		// context that is switch during a __cfactx_switch
 		struct __stack_context_t context;
 
@@ -204,5 +204,5 @@
 		}
 
-		static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/ {
+		static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) __attribute__((const)) {
 			return this.node.[next, prev];
 		}
@@ -254,6 +254,6 @@
 
 	// assembler routines that performs the context switch
-	extern void CtxInvokeStub( void );
-	extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
+	extern void __cfactx_invoke_stub( void );
+	extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
 	// void CtxStore ( void * this ) asm ("CtxStore");
 	// void CtxRet   ( void * dst  ) asm ("CtxRet");
Index: libcfa/src/concurrency/kernel.cfa
===================================================================
--- libcfa/src/concurrency/kernel.cfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/kernel.cfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -208,5 +208,5 @@
 }
 
-static void * CtxInvokeProcessor(void * arg);
+static void * __invoke_processor(void * arg);
 
 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
@@ -224,5 +224,5 @@
 	__cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
 
-	this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this );
+	this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
 
 	__cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
@@ -347,6 +347,6 @@
 		// set context switch to the thread that the processor is executing
 		verify( thrd_dst->context.SP );
-		CtxSwitch( &proc_cor->context, &thrd_dst->context );
-		// when CtxSwitch returns we are back in the processor coroutine
+		__cfactx_switch( &proc_cor->context, &thrd_dst->context );
+		// when __cfactx_switch returns we are back in the processor coroutine
 
 		/* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
@@ -410,5 +410,5 @@
 		#endif
 		verify( proc_cor->context.SP );
-		CtxSwitch( &thrd_src->context, &proc_cor->context );
+		__cfactx_switch( &thrd_src->context, &proc_cor->context );
 		#if defined( __i386 ) || defined( __x86_64 )
 			__x87_load;
@@ -424,5 +424,5 @@
 // This is the entry point for processors (kernel threads)
 // It effectively constructs a coroutine by stealing the pthread stack
-static void * CtxInvokeProcessor(void * arg) {
+static void * __invoke_processor(void * arg) {
 	processor * proc = (processor *) arg;
 	kernelTLS.this_processor = proc;
@@ -502,5 +502,5 @@
 	kernelTLS.this_thread->curr_cor = dst;
 	__stack_prepare( &dst->stack, 65000 );
-	CtxStart(main, dst, this->runner, CtxInvokeCoroutine);
+	__cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
 
 	verify( ! kernelTLS.preemption_state.enabled );
@@ -514,6 +514,6 @@
 	// context switch to specified coroutine
 	verify( dst->context.SP );
-	CtxSwitch( &src->context, &dst->context );
-	// when CtxSwitch returns we are back in the src coroutine
+	__cfactx_switch( &src->context, &dst->context );
+	// when __cfactx_switch returns we are back in the src coroutine
 
 	mainThread->curr_cor = &mainThread->self_cor;
@@ -535,5 +535,5 @@
 
 	// context switch to the processor
-	CtxSwitch( &src->context, &dst->context );
+	__cfactx_switch( &src->context, &dst->context );
 }
 
@@ -729,5 +729,5 @@
 
 	// SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
-	// context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
+	// context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
 	// mainThread is on the ready queue when this call is made.
 	__kernel_first_resume( kernelTLS.this_processor );
Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/kernel.hfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -108,5 +108,5 @@
 static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
 
-static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
+static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
 
 //-----------------------------------------------------------------------------
@@ -151,5 +151,5 @@
 static inline void ?{} (cluster & this, const char name[])        { this{name, default_preemption()}; }
 
-static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
+static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
 
 static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE
Index: libcfa/src/concurrency/kernel_private.hfa
===================================================================
--- libcfa/src/concurrency/kernel_private.hfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/kernel_private.hfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -71,5 +71,5 @@
 // Threads
 extern "C" {
-      void CtxInvokeThread(void (*main)(void *), void * this);
+      void __cfactx_invoke_thread(void (*main)(void *), void * this);
 }
 
Index: libcfa/src/concurrency/monitor.cfa
===================================================================
--- libcfa/src/concurrency/monitor.cfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/monitor.cfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -27,6 +27,6 @@
 //-----------------------------------------------------------------------------
 // Forward declarations
-static inline void set_owner ( monitor_desc * this, thread_desc * owner );
-static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
+static inline void __set_owner ( monitor_desc * this, thread_desc * owner );
+static inline void __set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
 static inline void reset_mask( monitor_desc * this );
@@ -80,193 +80,191 @@
 //-----------------------------------------------------------------------------
 // Enter/Leave routines
-
-
-extern "C" {
-	// Enter single monitor
-	static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
-		// Lock the monitor spinlock
-		lock( this->lock __cfaabi_dbg_ctx2 );
-		// Interrupts disable inside critical section
-		thread_desc * thrd = kernelTLS.this_thread;
-
-		__cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
-
-		if( !this->owner ) {
-			// No one has the monitor, just take it
-			set_owner( this, thrd );
-
-			__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
-		}
-		else if( this->owner == thrd) {
-			// We already have the monitor, just note how many times we took it
-			this->recursion += 1;
-
-			__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
-		}
-		else if( is_accepted( this, group) ) {
-			// Some one was waiting for us, enter
-			set_owner( this, thrd );
-
-			// Reset mask
-			reset_mask( this );
-
-			__cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
-		}
-		else {
-			__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
-
-			// Some one else has the monitor, wait in line for it
-			/* paranoid */ verify( thrd->next == 0p );
-			append( this->entry_queue, thrd );
-			/* paranoid */ verify( thrd->next == 1p );
-
-			unlock( this->lock );
-			park();
-
-			__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
-
-			/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-			return;
-		}
+// Enter single monitor
+static void __enter( monitor_desc * this, const __monitor_group_t & group ) {
+	// Lock the monitor spinlock
+	lock( this->lock __cfaabi_dbg_ctx2 );
+	// Interrupts disable inside critical section
+	thread_desc * thrd = kernelTLS.this_thread;
+
+	__cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
+
+	if( !this->owner ) {
+		// No one has the monitor, just take it
+		__set_owner( this, thrd );
+
+		__cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
+	}
+	else if( this->owner == thrd) {
+		// We already have the monitor, just note how many times we took it
+		this->recursion += 1;
+
+		__cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
+	}
+	else if( is_accepted( this, group) ) {
+		// Some one was waiting for us, enter
+		__set_owner( this, thrd );
+
+		// Reset mask
+		reset_mask( this );
+
+		__cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
+	}
+	else {
+		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
+
+		// Some one else has the monitor, wait in line for it
+		/* paranoid */ verify( thrd->next == 0p );
+		append( this->entry_queue, thrd );
+		/* paranoid */ verify( thrd->next == 1p );
+
+		unlock( this->lock );
+		park();
 
 		__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
 
 		/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-		/* paranoid */ verify( this->lock.lock );
-
-		// Release the lock and leave
+		return;
+	}
+
+	__cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
+
+	/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+	/* paranoid */ verify( this->lock.lock );
+
+	// Release the lock and leave
+	unlock( this->lock );
+	return;
+}
+
+static void __dtor_enter( monitor_desc * this, fptr_t func ) {
+	// Lock the monitor spinlock
+	lock( this->lock __cfaabi_dbg_ctx2 );
+	// Interrupts disable inside critical section
+	thread_desc * thrd = kernelTLS.this_thread;
+
+	__cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
+
+
+	if( !this->owner ) {
+		__cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
+
+		// No one has the monitor, just take it
+		__set_owner( this, thrd );
+
+		verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+
 		unlock( this->lock );
 		return;
 	}
-
-	static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
-		// Lock the monitor spinlock
-		lock( this->lock __cfaabi_dbg_ctx2 );
-		// Interrupts disable inside critical section
-		thread_desc * thrd = kernelTLS.this_thread;
-
-		__cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
-
-
-		if( !this->owner ) {
-			__cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
-
-			// No one has the monitor, just take it
-			set_owner( this, thrd );
-
-			verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-
-			unlock( this->lock );
-			return;
+	else if( this->owner == thrd) {
+		// We already have the monitor... but where about to destroy it so the nesting will fail
+		// Abort!
+		abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
+	}
+
+	__lock_size_t count = 1;
+	monitor_desc ** monitors = &this;
+	__monitor_group_t group = { &this, 1, func };
+	if( is_accepted( this, group) ) {
+		__cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
+
+		// Wake the thread that is waiting for this
+		__condition_criterion_t * urgent = pop( this->signal_stack );
+		/* paranoid */ verify( urgent );
+
+		// Reset mask
+		reset_mask( this );
+
+		// Create the node specific to this wait operation
+		wait_ctx_primed( thrd, 0 )
+
+		// Some one else has the monitor, wait for him to finish and then run
+		unlock( this->lock );
+
+		// Release the next thread
+		/* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+		unpark( urgent->owner->waiting_thread );
+
+		// Park current thread waiting
+		park();
+
+		// Some one was waiting for us, enter
+		/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+	}
+	else {
+		__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
+
+		wait_ctx( thrd, 0 )
+		this->dtor_node = &waiter;
+
+		// Some one else has the monitor, wait in line for it
+		/* paranoid */ verify( thrd->next == 0p );
+		append( this->entry_queue, thrd );
+		/* paranoid */ verify( thrd->next == 1p );
+		unlock( this->lock );
+
+		// Park current thread waiting
+		park();
+
+		/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+		return;
+	}
+
+	__cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
+
+}
+
+// Leave single monitor
+void __leave( monitor_desc * this ) {
+	// Lock the monitor spinlock
+	lock( this->lock __cfaabi_dbg_ctx2 );
+
+	__cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
+
+	/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
+
+	// Leaving a recursion level, decrement the counter
+	this->recursion -= 1;
+
+	// If we haven't left the last level of recursion
+	// it means we don't need to do anything
+	if( this->recursion != 0) {
+		__cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
+		unlock( this->lock );
+		return;
+	}
+
+	// Get the next thread, will be null on low contention monitor
+	thread_desc * new_owner = next_thread( this );
+
+	// Check the new owner is consistent with who we wake-up
+	// new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
+	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+
+	// We can now let other threads in safely
+	unlock( this->lock );
+
+	//We need to wake-up the thread
+	/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
+	unpark( new_owner );
+}
+
+// Leave single monitor for the last time
+void __dtor_leave( monitor_desc * this ) {
+	__cfaabi_dbg_debug_do(
+		if( TL_GET( this_thread ) != this->owner ) {
+			abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
 		}
-		else if( this->owner == thrd) {
-			// We already have the monitor... but where about to destroy it so the nesting will fail
-			// Abort!
-			abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
+		if( this->recursion != 1 ) {
+			abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
 		}
-
-		__lock_size_t count = 1;
-		monitor_desc ** monitors = &this;
-		__monitor_group_t group = { &this, 1, func };
-		if( is_accepted( this, group) ) {
-			__cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
-
-			// Wake the thread that is waiting for this
-			__condition_criterion_t * urgent = pop( this->signal_stack );
-			/* paranoid */ verify( urgent );
-
-			// Reset mask
-			reset_mask( this );
-
-			// Create the node specific to this wait operation
-			wait_ctx_primed( thrd, 0 )
-
-			// Some one else has the monitor, wait for him to finish and then run
-			unlock( this->lock );
-
-			// Release the next thread
-			/* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-			unpark( urgent->owner->waiting_thread );
-
-			// Park current thread waiting
-			park();
-
-			// Some one was waiting for us, enter
-			/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-		}
-		else {
-			__cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
-
-			wait_ctx( thrd, 0 )
-			this->dtor_node = &waiter;
-
-			// Some one else has the monitor, wait in line for it
-			/* paranoid */ verify( thrd->next == 0p );
-			append( this->entry_queue, thrd );
-			/* paranoid */ verify( thrd->next == 1p );
-			unlock( this->lock );
-
-			// Park current thread waiting
-			park();
-
-			/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-			return;
-		}
-
-		__cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
-
-	}
-
-	// Leave single monitor
-	void __leave_monitor_desc( monitor_desc * this ) {
-		// Lock the monitor spinlock
-		lock( this->lock __cfaabi_dbg_ctx2 );
-
-		__cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
-
-		/* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-
-		// Leaving a recursion level, decrement the counter
-		this->recursion -= 1;
-
-		// If we haven't left the last level of recursion
-		// it means we don't need to do anything
-		if( this->recursion != 0) {
-			__cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
-			unlock( this->lock );
-			return;
-		}
-
-		// Get the next thread, will be null on low contention monitor
-		thread_desc * new_owner = next_thread( this );
-
-		// Check the new owner is consistent with who we wake-up
-		// new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
-		/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
-
-		// We can now let other threads in safely
-		unlock( this->lock );
-
-		//We need to wake-up the thread
-		/* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
-		unpark( new_owner );
-	}
-
-	// Leave single monitor for the last time
-	void __leave_dtor_monitor_desc( monitor_desc * this ) {
-		__cfaabi_dbg_debug_do(
-			if( TL_GET( this_thread ) != this->owner ) {
-				abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
-			}
-			if( this->recursion != 1 ) {
-				abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
-			}
-		)
-	}
-
+	)
+}
+
+extern "C" {
 	// Leave the thread monitor
 	// last routine called by a thread.
 	// Should never return
-	void __leave_thread_monitor() {
+	void __cfactx_thrd_leave() {
 		thread_desc * thrd = TL_GET( this_thread );
 		monitor_desc * this = &thrd->self_mon;
@@ -313,5 +311,5 @@
 static inline void enter( __monitor_group_t monitors ) {
 	for( __lock_size_t i = 0; i < monitors.size; i++) {
-		__enter_monitor_desc( monitors[i], monitors );
+		__enter( monitors[i], monitors );
 	}
 }
@@ -321,5 +319,5 @@
 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
 	for( __lock_size_t i = count - 1; i >= 0; i--) {
-		__leave_monitor_desc( monitors[i] );
+		__leave( monitors[i] );
 	}
 }
@@ -381,5 +379,5 @@
 	(thrd->monitors){m, 1, func};
 
-	__enter_monitor_dtor( this.m, func );
+	__dtor_enter( this.m, func );
 }
 
@@ -387,5 +385,5 @@
 void ^?{}( monitor_dtor_guard_t & this ) {
 	// Leave the monitors in order
-	__leave_dtor_monitor_desc( this.m );
+	__dtor_leave( this.m );
 
 	// Restore thread context
@@ -537,5 +535,5 @@
 	thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
 	/* paranoid */ verify( signallee->next == 0p );
-	set_owner( monitors, count, signallee );
+	__set_owner( monitors, count, signallee );
 
 	__cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
@@ -643,5 +641,5 @@
 
 				// Set the owners to be the next thread
-				set_owner( monitors, count, next );
+				__set_owner( monitors, count, next );
 
 				// unlock all the monitors
@@ -711,5 +709,5 @@
 // Utilities
 
-static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
+static inline void __set_owner( monitor_desc * this, thread_desc * owner ) {
 	/* paranoid */ verify( this->lock.lock );
 
@@ -721,5 +719,5 @@
 }
 
-static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
+static inline void __set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
 	/* paranoid */ verify ( monitors[0]->lock.lock );
 	/* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
@@ -755,5 +753,5 @@
 		//we need to set the monitor as in use
 		/* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
-		set_owner( this,  urgent->owner->waiting_thread );
+		__set_owner( this,  urgent->owner->waiting_thread );
 
 		return check_condition( urgent );
@@ -765,5 +763,5 @@
 	/* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
 	/* paranoid */ verify( !new_owner || new_owner->next == 0p );
-	set_owner( this, new_owner );
+	__set_owner( this, new_owner );
 
 	return new_owner;
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/preemption.cfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -184,5 +184,5 @@
 
 	// Enable interrupts by decrementing the counter
-	// If counter reaches 0, execute any pending CtxSwitch
+	// If counter reaches 0, execute any pending __cfactx_switch
 	void enable_interrupts( __cfaabi_dbg_ctx_param ) {
 		processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
@@ -218,5 +218,5 @@
 
 	// Disable interrupts by incrementint the counter
-	// Don't execute any pending CtxSwitch even if counter reaches 0
+	// Don't execute any pending __cfactx_switch even if counter reaches 0
 	void enable_interrupts_noPoll() {
 		unsigned short prev = kernelTLS.preemption_state.disable_count;
@@ -272,5 +272,5 @@
 
 // KERNEL ONLY
-// Check if a CtxSwitch signal handler shoud defer
+// Check if a __cfactx_switch signal handler shoud defer
 // If true  : preemption is safe
 // If false : preemption is unsafe and marked as pending
@@ -302,5 +302,5 @@
 
 	// Setup proper signal handlers
-	__cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler
+	__cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler
 
 	signal_block( SIGALRM );
@@ -393,5 +393,5 @@
 	// Preemption can occur here
 
-	force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch
+	force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch
 }
 
Index: libcfa/src/concurrency/thread.cfa
===================================================================
--- libcfa/src/concurrency/thread.cfa	(revision a50502128f8eabae54ded84a8ab9885411aad583)
+++ libcfa/src/concurrency/thread.cfa	(revision c7a900a12cc0fbe737f58157787bd96980702780)
@@ -56,5 +56,5 @@
 
 	disable_interrupts();
-	CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);
+	__cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
 
 	this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
