Index: libcfa/src/concurrency/preemption.cfa
===================================================================
--- libcfa/src/concurrency/preemption.cfa	(revision f2ccbfd8293b9c7214def7757379a89add64de2e)
+++ libcfa/src/concurrency/preemption.cfa	(revision 91395401fb324e78f0e310fceebf37440cba51a2)
@@ -215,4 +215,8 @@
 // available.
 
+//-----------------------------------------------------------------------------
+// Some assembly required
+#define __cfaasm_label(label, when) when: asm volatile goto(".global __cfaasm_" #label "_" #when "\n" "__cfaasm_" #label "_" #when ":":::"memory":when)
+
 //----------
 // special case for preemption since used often
@@ -220,5 +224,5 @@
 	// create a assembler label before
 	// marked as clobber all to avoid movement
-	asm volatile("__cfaasm_check_before:":::"memory");
+	__cfaasm_label(check, before);
 
 	// access tls as normal
@@ -227,5 +231,5 @@
 	// create a assembler label after
 	// marked as clobber all to avoid movement
-	asm volatile("__cfaasm_check_after:":::"memory");
+	__cfaasm_label(check, after);
 	return enabled;
 }
@@ -246,9 +250,7 @@
 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
 uintptr_t __cfatls_get( unsigned long int offset ) {
-	// __cfaasm_get.before = ({ void * value; asm("movq $__cfaasm_get_before, %[v]\n\t" : [v]"=r"(value) ); value; });
-	// __cfaasm_get.after  = ({ void * value; asm("movq $__cfaasm_get_after , %[v]\n\t" : [v]"=r"(value) ); value; });
 	// create a assembler label before
 	// marked as clobber all to avoid movement
-	asm volatile("__cfaasm_get_before:":::"memory");
+	__cfaasm_label(get, before);
 
 	// access tls as normal (except for pointer arithmetic)
@@ -257,5 +259,5 @@
 	// create a assembler label after
 	// marked as clobber all to avoid movement
-	asm volatile("__cfaasm_get_after:":::"memory");
+	__cfaasm_label(get, after);
 	return val;
 }
@@ -266,5 +268,5 @@
 		// create a assembler label before
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_dsable_before:":::"memory");
+		__cfaasm_label(dsable, before);
 
 		with( __cfaabi_tls.preemption_state ) {
@@ -288,5 +290,6 @@
 		// create a assembler label after
 		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_dsable_after:":::"memory");
+		__cfaasm_label(dsable, after);
+
 	}
 
@@ -294,9 +297,6 @@
 	// If counter reaches 0, execute any pending __cfactx_switch
 	void enable_interrupts( __cfaabi_dbg_ctx_param ) {
-		// create a assembler label before
-		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_enble_before:":::"memory");
-
-		processor   * proc = __cfaabi_tls.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
+		// Cache the processor now since interrupts can start happening after the atomic store
+		processor   * proc = __cfaabi_tls.this_processor;
 		/* paranoid */ verify( proc );
 
@@ -304,10 +304,12 @@
 			unsigned short prev = disable_count;
 			disable_count -= 1;
-			verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
+
+			// If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
+			/* paranoid */ verify( prev != 0u );
 
 			// Check if we need to prempt the thread because an interrupt was missed
 			if( prev == 1 ) {
 				#if GCC_VERSION > 50000
-				static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
+					static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
 				#endif
 
@@ -328,8 +330,4 @@
 		// For debugging purposes : keep track of the last person to enable the interrupts
 		__cfaabi_dbg_debug_do( proc->last_enable = caller; )
-
-		// create a assembler label after
-		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_enble_after:":::"memory");
 	}
 
@@ -337,14 +335,11 @@
 	// Don't execute any pending __cfactx_switch even if counter reaches 0
 	void enable_interrupts_noPoll() {
-		// create a assembler label before
-		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_nopoll_before:":::"memory");
-
 		unsigned short prev = __cfaabi_tls.preemption_state.disable_count;
 		__cfaabi_tls.preemption_state.disable_count -= 1;
-		verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
+		// If this triggers someone is enabled already enabled interrupts
+		/* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev );
 		if( prev == 1 ) {
 			#if GCC_VERSION > 50000
-			static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
+				static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
 			#endif
 			// Set enabled flag to true
@@ -356,10 +351,8 @@
 			__atomic_signal_fence(__ATOMIC_RELEASE);
 		}
-
-		// create a assembler label after
-		// marked as clobber all to avoid movement
-		asm volatile("__cfaasm_nopoll_after:":::"memory");
-	}
-}
+	}
+}
+
+#undef __cfaasm_label
 
 // sigprocmask wrapper : unblock a single signal
@@ -446,18 +439,34 @@
 #elif defined( __aarch64__ )
 	#ifdef __PIC__
-		#define RELOC_TAG "@PLT"
-	#else
-		#define RELOC_TAG ""
-	#endif
-	#define __cfaasm_label( label ) \
+		// Note that this works only for gcc
+		#define __cfaasm_label( label ) static struct asm_region label = \
 		({ \
 			struct asm_region region; \
 			asm( \
-				"mov %[vb], __cfaasm_" #label "_before@GOTPCREL(%%rip)"  "\n\t" \
-				"mov %[va], __cfaasm_" #label "_after@GOTPCREL(%%rip)"   "\n\t" \
+				"adrp %[vb], _GLOBAL_OFFSET_TABLE_"                              "\n\t" \
+        			"ldr  %[vb], [%[vb], #:gotpage_lo15:__cfaasm_" #label "_before]" "\n\t" \
+				"adrp %[va], _GLOBAL_OFFSET_TABLE_"                              "\n\t" \
+        			"ldr  %[va], [%[va], #:gotpage_lo15:__cfaasm_" #label "_after]"  "\n\t" \
 				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
 			); \
 			region; \
 		});
+	#else
+		#error this is not the right thing to do
+		/*
+		#define __cfaasm_label( label ) static struct asm_region label = \
+		({ \
+			struct asm_region region; \
+			asm( \
+				"adrp %[vb], __cfaasm_" #label "_before"              "\n\t" \
+        			"add  %[vb], %[vb], :lo12:__cfaasm_" #label "_before" "\n\t" \
+				"adrp %[va], :got:__cfaasm_" #label "_after"          "\n\t" \
+        			"add  %[va], %[va], :lo12:__cfaasm_" #label "_after"  "\n\t" \
+				 : [vb]"=r"(region.before), [va]"=r"(region.after) \
+			); \
+			region; \
+		});
+		*/
+	#endif
 #else
 	#error unknown hardware architecture
@@ -473,6 +482,4 @@
 	__cfaasm_label( check  );
 	__cfaasm_label( dsable );
-	__cfaasm_label( enble );
-	__cfaasm_label( nopoll );
 
 	// Check if preemption is safe
@@ -481,6 +488,4 @@
 	if( __cfaasm_in( ip, check  ) ) { ready = false; goto EXIT; };
 	if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; };
-	if( __cfaasm_in( ip, enble  ) ) { ready = false; goto EXIT; };
-	if( __cfaasm_in( ip, nopoll ) ) { ready = false; goto EXIT; };
 	if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
 	if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
