Index: libcfa/src/concurrency/channel.hfa
===================================================================
--- libcfa/src/concurrency/channel.hfa	(revision cb94e41db9025ba582ed110d8213c1c52b493aec)
+++ libcfa/src/concurrency/channel.hfa	(revision edc4813be30ce885c1e9eff8c8b0634036512a50)
@@ -28,5 +28,7 @@
     unlock( lock );
     park();
+    #if defined(__ARM_ARCH)
     __atomic_thread_fence( __ATOMIC_SEQ_CST );
+    #endif
     return sn.extra == 0p;
 }
@@ -131,5 +133,7 @@
 static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
     memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
+    #if defined(__ARM_ARCH)
     __atomic_thread_fence( __ATOMIC_SEQ_CST );
+    #endif
     wake_one( cons );
 }
@@ -138,5 +142,7 @@
 static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
     memcpy( (void *)&retval, prods`first.extra, sizeof(T) );
+    #if defined(__ARM_ARCH)
     __atomic_thread_fence( __ATOMIC_SEQ_CST );
+    #endif
     wake_one( prods );
 }
Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision cb94e41db9025ba582ed110d8213c1c52b493aec)
+++ libcfa/src/concurrency/locks.hfa	(revision edc4813be30ce885c1e9eff8c8b0634036512a50)
@@ -182,14 +182,37 @@
 static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
     n.locked = true;
+
+	#if defined(__ARM_ARCH)
+	__asm__ __volatile__ ( "DMB ISH" ::: );
+	#endif
+
 	mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
 	if( prev == 0p ) return;
 	prev->next = &n;
+	
+	#if defined(__ARM_ARCH)
+	__asm__ __volatile__ ( "DMB ISH" ::: );
+	#endif
+
 	while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
+
+	#if defined(__ARM_ARCH)
+	__asm__ __volatile__ ( "DMB ISH" ::: );
+	#endif
 }
 
 static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
+	#if defined(__ARM_ARCH)
+	__asm__ __volatile__ ( "DMB ISH" ::: );
+	#endif
+
 	mcs_spin_node * n_ptr = &n;
 	if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
 	while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
+
+	#if defined(__ARM_ARCH)
+	__asm__ __volatile__ ( "DMB ISH" ::: );
+	#endif
+
 	n.next->locked = false;
 }
