Index: libcfa/src/concurrency/locks.hfa
===================================================================
--- libcfa/src/concurrency/locks.hfa	(revision 5908fb4223d724c7d6e5bbedbfcf0f72d9a7612a)
+++ libcfa/src/concurrency/locks.hfa	(revision 5ece8ce4c66ecfa9393578df09da0a3d91384f7b)
@@ -176,8 +176,4 @@
 static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
 
-static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
-	return node->next;
-}
-
 struct mcs_spin_lock {
 	mcs_spin_queue queue;
@@ -185,9 +181,9 @@
 
 static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
+    n.locked = true;
 	mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
-	n.locked = true;
-	if(prev == 0p) return;
+	if( prev == 0p ) return;
 	prev->next = &n;
-	while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
+	while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
 }
 
@@ -195,5 +191,5 @@
 	mcs_spin_node * n_ptr = &n;
 	if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
-	while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
+	while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
 	n.next->locked = false;
 }
