Index: libcfa/src/concurrency/kernel/fwd.hfa
===================================================================
--- libcfa/src/concurrency/kernel/fwd.hfa	(revision 6a8882c5d95c78ede2d214a855c752147cb22cd9)
+++ libcfa/src/concurrency/kernel/fwd.hfa	(revision 21255675538be8803fefa8e055131729c60a99ab)
@@ -65,8 +65,5 @@
 
 		extern uintptr_t __cfatls_get( unsigned long int member );
-		// #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
-		#define publicTLS_get( member ) (__cfaabi_tls.member)
-		// extern forall(otype T) T __cfatls_get( T * member, T value );
-		// #define publicTLS_set( member, value ) __cfatls_set( (typeof(member)*)__builtin_offsetof(KernelThreadData, member), value );
+		#define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
 
 		static inline uint64_t __tls_rand() {
Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision 6a8882c5d95c78ede2d214a855c752147cb22cd9)
+++ libcfa/src/concurrency/preemption.cfa	(revision 21255675538be8803fefa8e055131729c60a99ab)
@@ -231,8 +231,21 @@
 }
 
+struct asm_region {
+	void * before;
+	void * after;
+};
+
+static inline bool __cfaasm_in( void * ip, struct asm_region & region ) {
+	return ip >= region.before && ip <= region.after;
+}
+
+
 //----------
 // Get data from the TLS block
+// struct asm_region __cfaasm_get;
 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
 uintptr_t __cfatls_get( unsigned long int offset ) {
+	// __cfaasm_get.before = ({ void * value; asm("movq $__cfaasm_get_before, %[v]\n\t" : [v]"=r"(value) ); value; });
+	// __cfaasm_get.after  = ({ void * value; asm("movq $__cfaasm_get_after , %[v]\n\t" : [v]"=r"(value) ); value; });
 	// create a assembler label before
 	// marked as clobber all to avoid movement
@@ -253,5 +266,5 @@
 		// create a assembler label before
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_disable_before:":::"memory");
+		asm volatile("__cfaasm_dsable_before:":::"memory");
 
 		with( __cfaabi_tls.preemption_state ) {
@@ -275,5 +288,5 @@
 		// create a assembler label after
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_disable_after:":::"memory");
+		asm volatile("__cfaasm_dsable_after:":::"memory");
 	}
 
@@ -283,5 +296,5 @@
 		// create a assembler label before
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_enable_before:":::"memory");
+		asm volatile("__cfaasm_enble_before:":::"memory");
 
 		processor   * proc = __cfaabi_tls.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
@@ -318,5 +331,5 @@
 		// create a assembler label after
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_enable_after:":::"memory");
+		asm volatile("__cfaasm_enble_after:":::"memory");
 	}
 
@@ -386,12 +399,92 @@
 }
 
+//-----------------------------------------------------------------------------
+// Some assembly required
+#if defined( __i386 )
+	#ifdef __PIC__
+		#define RELOC_PRELUDE( label ) \
+			"calll   .Lcfaasm_prelude_" #label "$pb\n\t" \
+			".Lcfaasm_prelude_" #label "$pb:\n\t" \
+			"popl    %%eax\n\t" \
+			".Lcfaasm_prelude_" #label "_end:\n\t" \
+			"addl    $_GLOBAL_OFFSET_TABLE_+(.Lcfaasm_prelude_" #label "_end-.Lcfaasm_prelude_" #label "$pb), %%eax\n\t"
+		#define RELOC_PREFIX ""
+		#define RELOC_SUFFIX "@GOT(%%eax)"
+	#else
+		#define RELOC_PREFIX "$"
+		#define RELOC_SUFFIX ""
+	#endif
+	#define __cfaasm_label( label ) static struct asm_region label = \
+		({ \
+			struct asm_region region; \
+			asm( \
+				RELOC_PRELUDE( label ) \
+				"movl " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \
+				"movl " RELOC_PREFIX "__cfaasm_" #label "_after"  RELOC_SUFFIX ", %[va]\n\t" \
+				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
+			); \
+			region; \
+		});
+#elif defined( __x86_64 )
+	#ifdef __PIC__
+		#define RELOC_PREFIX ""
+		#define RELOC_SUFFIX "@GOTPCREL(%%rip)"
+	#else
+		#define RELOC_PREFIX "$"
+		#define RELOC_SUFFIX ""
+	#endif
+	#define __cfaasm_label( label ) static struct asm_region label = \
+		({ \
+			struct asm_region region; \
+			asm( \
+				"movq " RELOC_PREFIX "__cfaasm_" #label "_before" RELOC_SUFFIX ", %[vb]\n\t" \
+				"movq " RELOC_PREFIX "__cfaasm_" #label "_after"  RELOC_SUFFIX ", %[va]\n\t" \
+				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
+			); \
+			region; \
+		});
+#elif defined( __aarch64__ )
+	#ifdef __PIC__
+		#define RELOC_TAG "@PLT"
+	#else
+		#define RELOC_TAG ""
+	#endif
+	#define __cfaasm_label( label ) \
+		({ \
+			struct asm_region region; \
+			asm( \
+				"mov %[vb], __cfaasm_" #label "_before@GOTPCREL(%%rip)"  "\n\t" \
+				"mov %[va], __cfaasm_" #label "_after@GOTPCREL(%%rip)"   "\n\t" \
+				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
+			); \
+			region; \
+		});
+#else
+	#error unknown hardware architecture
+#endif
+
 // KERNEL ONLY
 // Check if a __cfactx_switch signal handler shoud defer
 // If true  : preemption is safe
 // If false : preemption is unsafe and marked as pending
-static inline bool preemption_ready() {
+static inline bool preemption_ready( void * ip ) {
+	// Get all the region for which it is not safe to preempt
+	__cfaasm_label( get    );
+	__cfaasm_label( check  );
+	__cfaasm_label( dsable );
+	__cfaasm_label( enble );
+	__cfaasm_label( nopoll );
+
 	// Check if preemption is safe
-	bool ready = __cfaabi_tls.preemption_state.enabled && ! __cfaabi_tls.preemption_state.in_progress;
-
+	bool ready = true;
+	if( __cfaasm_in( ip, get    ) ) { ready = false; goto EXIT; };
+	if( __cfaasm_in( ip, check  ) ) { ready = false; goto EXIT; };
+	if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; };
+	if( __cfaasm_in( ip, enble  ) ) { ready = false; goto EXIT; };
+	if( __cfaasm_in( ip, nopoll ) ) { ready = false; goto EXIT; };
+	if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
+	if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
+
+EXIT:
 	// Adjust the pending flag accordingly
 	__cfaabi_tls.this_processor->pending_preemption = !ready;
@@ -468,44 +561,4 @@
 // Kernel Signal Handlers
 //=============================================================================================
-struct asm_region {
-	void * before;
-	void * after;
-};
-
-//-----------------------------------------------------------------------------
-// Some assembly required
-#if defined( __i386 )
-	#define __cfaasm_label( label ) \
-		({ \
-			struct asm_region region; \
-			asm( \
-				"movl $__cfaasm_" #label "_before, %[vb]\n\t" \
-				"movl $__cfaasm_" #label "_after , %[va]\n\t" \
-				 : [vb]"=r"(region.before), [vb]"=r"(region.before) \
-			); \
-			region; \
-		});
-#elif defined( __x86_64 )
-	#ifdef __PIC__
-		#define PLT "@PLT"
-	#else
-		#define PLT ""
-	#endif
-	#define __cfaasm_label( label ) \
-		({ \
-			struct asm_region region; \
-			asm( \
-				"movq $__cfaasm_" #label "_before" PLT ", %[vb]\n\t" \
-				"movq $__cfaasm_" #label "_after"  PLT ", %[va]\n\t" \
-				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
-			); \
-			region; \
-		});
-#elif defined( __aarch64__ )
-	#error __cfaasm_label undefined for arm
-#else
-	#error unknown hardware architecture
-#endif
-
 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )
 
@@ -530,12 +583,5 @@
 
 	// Check if it is safe to preempt here
-	if( !preemption_ready() ) { return; }
-
-	struct asm_region region;
-	region = __cfaasm_label( get     ); if( ip >= region.before && ip <= region.after ) return;
-	region = __cfaasm_label( check   ); if( ip >= region.before && ip <= region.after ) return;
-	region = __cfaasm_label( disable ); if( ip >= region.before && ip <= region.after ) return;
-	region = __cfaasm_label( enable  ); if( ip >= region.before && ip <= region.after ) return;
-	region = __cfaasm_label( nopoll  ); if( ip >= region.before && ip <= region.after ) return;
+	if( !preemption_ready( ip ) ) { return; }
 
 	__cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
Index: libcfa/src/stdlib.hfa
===================================================================
--- libcfa/src/stdlib.hfa	(revision 6a8882c5d95c78ede2d214a855c752147cb22cd9)
+++ libcfa/src/stdlib.hfa	(revision 21255675538be8803fefa8e055131729c60a99ab)
@@ -101,4 +101,23 @@
 		return (T *)pvalloc( sizeof(T) );				// C pvalloc
 	} // pvalloc
+
+	void free( T * addr ) {
+		free( (void *) addr ); 							// C free
+	} // free
+} // distribution
+
+static inline forall( ttype TT | { void free( TT ); } ) {
+	// T* does not take void* and vice-versa
+
+	void free( void * addr, TT rest ) {
+		free( addr );
+		free( rest );
+	} // free
+
+	forall( dtype T | sized(T) )
+	void free( T * addr, TT rest ) {
+		free( addr );
+		free( rest );
+	} // free
 } // distribution
 
@@ -110,5 +129,5 @@
 		forall( dtype T | sized(T) ) {
 			union  U_fill 		{ char c; T * a; T t; };
-			struct S_fill 		{ char tag; char c; size_t size; T * at; char t[50]; };
+			struct S_fill 		{ char tag; U_fill(T) fill; };
 			struct S_realloc	{ inline T *; };
 		}
