Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision 25337e082faf7ec7364ea6d7d9c16c8e76e0bf5e)
+++ libcfa/src/concurrency/kernel.hfa	(revision 0fb3ee5ae0058e62c4771ec2f34a753e3b6c8747)
@@ -68,5 +68,4 @@
 		unsigned last;
 		signed   cpu;
-		// unsigned long long int cutoff;
 	} rdq;
 
@@ -154,5 +153,5 @@
 };
 
-struct __attribute__((aligned(128))) __cache_id_t {
+struct __attribute__((aligned(16))) __cache_id_t {
 	volatile unsigned id;
 };
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision 25337e082faf7ec7364ea6d7d9c16c8e76e0bf5e)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision 0fb3ee5ae0058e62c4771ec2f34a753e3b6c8747)
@@ -399,5 +399,9 @@
 		/* paranoid */ verify(cpu < cpu_info.hthrd_count);
 		unsigned this_cache = cpu_info.llc_map[cpu].cache;
-		__atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
+
+		// Super important: don't write the same value over and over again
+		// We want to maximise our chances that his particular values stays in cache
+		if(lanes.caches[this / READYQ_SHARD_FACTOR].id != this_cache)
+			__atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
 
 		const unsigned long long ctsc = rdtscl();
