Index: libcfa/src/concurrency/kernel.hfa
===================================================================
--- libcfa/src/concurrency/kernel.hfa	(revision c90db2db871a3e57c7c5f98cc53831bb1ca50d84)
+++ libcfa/src/concurrency/kernel.hfa	(revision a2a45661d68494461a3f2023e6bb1f882c424ac1)
@@ -67,6 +67,6 @@
 		unsigned target;
 		unsigned last;
-		unsigned cnt;
-		unsigned long long int cutoff;
+		signed   cpu;
+		// unsigned long long int cutoff;
 	} rdq;
 
@@ -152,4 +152,8 @@
 	volatile unsigned long long tv;
 	volatile unsigned long long ma;
+};
+
+struct __attribute__((aligned(128))) __cache_id_t {
+	volatile unsigned id;
 };
 
@@ -164,6 +168,10 @@
 static inline void ^?{}(__timestamp_t & this) {}
 
+struct __attribute__((aligned(128))) __ready_queue_caches_t;
+void  ?{}(__ready_queue_caches_t & this);
+void ^?{}(__ready_queue_caches_t & this);
+
 //TODO adjust cache size to ARCHITECTURE
-// Structure holding the relaxed ready queue
+// Structure holding the ready queue
 struct __ready_queue_t {
 	// Data tracking the actual lanes
@@ -177,4 +185,6 @@
 		// Array of times
 		__timestamp_t * volatile tscs;
+
+		__cache_id_t * volatile caches;
 
 		// Array of stats
Index: libcfa/src/concurrency/ready_queue.cfa
===================================================================
--- libcfa/src/concurrency/ready_queue.cfa	(revision c90db2db871a3e57c7c5f98cc53831bb1ca50d84)
+++ libcfa/src/concurrency/ready_queue.cfa	(revision a2a45661d68494461a3f2023e6bb1f882c424ac1)
@@ -20,7 +20,8 @@
 
 
-#define USE_RELAXED_FIFO
+// #define USE_RELAXED_FIFO
 // #define USE_WORK_STEALING
 // #define USE_CPU_WORK_STEALING
+#define USE_AWARE_STEALING
 
 #include "bits/defs.hfa"
@@ -29,4 +30,5 @@
 
 #include "stdlib.hfa"
+#include "limits.hfa"
 #include "math.hfa"
 
@@ -54,5 +56,8 @@
 #endif
 
-#if   defined(USE_CPU_WORK_STEALING)
+#if   defined(USE_AWARE_STEALING)
+	#define READYQ_SHARD_FACTOR 2
+	#define SEQUENTIAL_SHARD 2
+#elif defined(USE_CPU_WORK_STEALING)
 	#define READYQ_SHARD_FACTOR 2
 #elif defined(USE_RELAXED_FIFO)
@@ -138,5 +143,4 @@
 	__kernel_rseq_register();
 
-	__cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
 	bool * handle = (bool *)&kernelTLS().sched_lock;
 
@@ -174,6 +178,4 @@
 	}
 
-	__cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
-
 	// Return new spot.
 	/* paranoid */ verify(n < ready);
@@ -190,6 +192,4 @@
 
 	__atomic_store_n(cell, 0p, __ATOMIC_RELEASE);
-
-	__cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
 
 	__kernel_rseq_unregister();
@@ -244,11 +244,45 @@
 
 //=======================================================================
+// caches handling
+
+struct __attribute__((aligned(128))) __ready_queue_caches_t {
+	// Count States:
+	// - 0  : No one is looking after this cache
+	// - 1  : No one is looking after this cache, BUT it's not empty
+	// - 2+ : At least one processor is looking after this cache
+	volatile unsigned count;
+};
+
+void  ?{}(__ready_queue_caches_t & this) { this.count = 0; }
+void ^?{}(__ready_queue_caches_t & this) {}
+
+static inline void depart(__ready_queue_caches_t & cache) {
+	/* paranoid */ verify( cache.count > 1);
+	__atomic_fetch_add(&cache.count, -1, __ATOMIC_SEQ_CST);
+	/* paranoid */ verify( cache.count != 0);
+	/* paranoid */ verify( cache.count < 65536 ); // This verify assumes no cluster will have more than 65000 kernel threads mapped to a single cache, which could be correct but is super weird.
+}
+
+static inline void arrive(__ready_queue_caches_t & cache) {
+	// for() {
+	// 	unsigned expected = cache.count;
+	// 	unsigned desired  = 0 == expected ? 2 : expected + 1;
+	// }
+}
+
+//=======================================================================
 // Cforall Ready Queue used for scheduling
 //=======================================================================
-unsigned long long moving_average(unsigned long long nval, unsigned long long oval) {
-	const unsigned long long tw = 16;
-	const unsigned long long nw = 4;
-	const unsigned long long ow = tw - nw;
-	return ((nw * nval) + (ow * oval)) / tw;
+unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) {
+	/* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );
+	/* paranoid */ verifyf( instsc  < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );
+	/* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg );
+
+	const unsigned long long new_val = currtsc > instsc ? currtsc - instsc : 0;
+	const unsigned long long total_weight = 16;
+	const unsigned long long new_weight   = 4;
+	const unsigned long long old_weight = total_weight - new_weight;
+	const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight;
+	return ret;
 }
 
@@ -270,9 +304,15 @@
 			lanes.help[idx].tri = 0;
 		}
+
+		caches = alloc( cpu_info.llc_count );
+		for( idx; (size_t)cpu_info.llc_count ) {
+			(caches[idx]){};
+		}
 	#else
-		lanes.data  = 0p;
-		lanes.tscs  = 0p;
-		lanes.help  = 0p;
-		lanes.count = 0;
+		lanes.data   = 0p;
+		lanes.tscs   = 0p;
+		lanes.caches = 0p;
+		lanes.help   = 0p;
+		lanes.count  = 0;
 	#endif
 }
@@ -285,8 +325,129 @@
 	free(lanes.data);
 	free(lanes.tscs);
+	free(lanes.caches);
 	free(lanes.help);
 }
 
 //-----------------------------------------------------------------------
+#if defined(USE_AWARE_STEALING)
+	__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
+		processor * const proc = kernelTLS().this_processor;
+		const bool external = (!proc) || (cltr != proc->cltr);
+		const bool remote   = hint == UNPARK_REMOTE;
+
+		unsigned i;
+		if( external || remote ) {
+			// Figure out where thread was last time and make sure it's valid
+			/* paranoid */ verify(thrd->preferred >= 0);
+			if(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count) {
+				/* paranoid */ verify(thrd->preferred * READYQ_SHARD_FACTOR < lanes.count);
+				unsigned start = thrd->preferred * READYQ_SHARD_FACTOR;
+				do {
+					unsigned r = __tls_rand();
+					i = start + (r % READYQ_SHARD_FACTOR);
+					/* paranoid */ verify( i < lanes.count );
+					// If we can't lock it retry
+				} while( !__atomic_try_acquire( &lanes.data[i].lock ) );
+			} else {
+				do {
+					i = __tls_rand() % lanes.count;
+				} while( !__atomic_try_acquire( &lanes.data[i].lock ) );
+			}
+		} else {
+			do {
+				unsigned r = proc->rdq.its++;
+				i = proc->rdq.id + (r % READYQ_SHARD_FACTOR);
+				/* paranoid */ verify( i < lanes.count );
+				// If we can't lock it retry
+			} while( !__atomic_try_acquire( &lanes.data[i].lock ) );
+		}
+
+		// Actually push it
+		push(lanes.data[i], thrd);
+
+		// Unlock and return
+		__atomic_unlock( &lanes.data[i].lock );
+
+		#if !defined(__CFA_NO_STATISTICS__)
+			if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
+			else __tls_stats()->ready.push.local.success++;
+		#endif
+	}
+
+	static inline unsigned long long calc_cutoff(const unsigned long long ctsc, const processor * proc, __ready_queue_t & rdq) {
+		unsigned start = proc->rdq.id;
+		unsigned long long max = 0;
+		for(i; READYQ_SHARD_FACTOR) {
+			unsigned long long ptsc = ts(rdq.lanes.data[start + i]);
+			if(ptsc != -1ull) {
+				/* paranoid */ verify( start + i < rdq.lanes.count );
+				unsigned long long tsc = moving_average(ctsc, ptsc, rdq.lanes.tscs[start + i].ma);
+				if(tsc > max) max = tsc;
+			}
+		}
+		return (max + 2 * max) / 2;
+	}
+
+	__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
+		/* paranoid */ verify( lanes.count > 0 );
+		/* paranoid */ verify( kernelTLS().this_processor );
+		/* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count );
+
+		processor * const proc = kernelTLS().this_processor;
+		unsigned this = proc->rdq.id;
+		/* paranoid */ verify( this < lanes.count );
+		__cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
+
+		// Figure out the current cpu and make sure it is valid
+		const int cpu = __kernel_getcpu();
+		/* paranoid */ verify(cpu >= 0);
+		/* paranoid */ verify(cpu < cpu_info.hthrd_count);
+		unsigned this_cache = cpu_info.llc_map[cpu].cache;
+		__atomic_store_n(&lanes.caches[this / READYQ_SHARD_FACTOR].id, this_cache, __ATOMIC_RELAXED);
+
+		const unsigned long long ctsc = rdtscl();
+
+		if(proc->rdq.target == MAX) {
+			uint64_t chaos = __tls_rand();
+			unsigned ext = chaos & 0xff;
+			unsigned other  = (chaos >> 8) % (lanes.count);
+
+			if(ext < 3 || __atomic_load_n(&lanes.caches[other / READYQ_SHARD_FACTOR].id, __ATOMIC_RELAXED) == this_cache) {
+				proc->rdq.target = other;
+			}
+		}
+		else {
+			const unsigned target = proc->rdq.target;
+			__cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, lanes.tscs[target].tv);
+			/* paranoid */ verify( lanes.tscs[target].tv != MAX );
+			if(target < lanes.count) {
+				const unsigned long long cutoff = calc_cutoff(ctsc, proc, cltr->ready_queue);
+				const unsigned long long age = moving_average(ctsc, lanes.tscs[target].tv, lanes.tscs[target].ma);
+				__cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
+				if(age > cutoff) {
+					thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
+					if(t) return t;
+				}
+			}
+			proc->rdq.target = MAX;
+		}
+
+		for(READYQ_SHARD_FACTOR) {
+			unsigned i = this + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
+			if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
+		}
+
+		// All lanes where empty return 0p
+		return 0p;
+
+	}
+	__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
+		unsigned i = __tls_rand() % lanes.count;
+		return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
+	}
+	__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
+		return search(cltr);
+	}
+#endif
 #if defined(USE_CPU_WORK_STEALING)
 	__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->ready_queue) {
@@ -345,4 +506,22 @@
 	}
 
+	static inline int pop_getcpu(processor * proc, __ready_queue_caches_t * caches) {
+		const int prv = proc->rdq.cpu;
+		const int cpu = __kernel_getcpu();
+		if( prv != proc->rdq.cpu ) {
+			unsigned pidx = cpu_info.llc_map[prv].cache;
+			/* paranoid */ verify(pidx < cpu_info.llc_count);
+
+			unsigned nidx = cpu_info.llc_map[cpu].cache;
+			/* paranoid */ verify(pidx < cpu_info.llc_count);
+
+			depart(caches[pidx]);
+			arrive(caches[nidx]);
+
+			__STATS( /* cpu migs++ */ )
+		}
+		return proc->rdq.cpu = cpu;
+	}
+
 	// Pop from the ready queue from a given cluster
 	__attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
@@ -350,5 +529,7 @@
 		/* paranoid */ verify( kernelTLS().this_processor );
 
-		const int cpu = __kernel_getcpu();
+		processor * const proc = kernelTLS().this_processor;
+		const int cpu = pop_getcpu( proc, caches );
+		// const int cpu = __kernel_getcpu();
 		/* paranoid */ verify(cpu >= 0);
 		/* paranoid */ verify(cpu < cpu_info.hthrd_count);
@@ -360,10 +541,9 @@
 		/* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR);
 
-		processor * const proc = kernelTLS().this_processor;
 		const int start = map.self * READYQ_SHARD_FACTOR;
 		const unsigned long long ctsc = rdtscl();
 
 		// Did we already have a help target
-		if(proc->rdq.target == -1u) {
+		if(proc->rdq.target == MAX) {
 			unsigned long long max = 0;
 			for(i; READYQ_SHARD_FACTOR) {
@@ -371,5 +551,5 @@
 				if(tsc > max) max = tsc;
 			}
-			 proc->rdq.cutoff = (max + 2 * max) / 2;
+			//  proc->rdq.cutoff = (max + 2 * max) / 2;
 			/* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores.
 			/* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores.
@@ -384,5 +564,5 @@
 			}
 
-			/* paranoid */ verify(proc->rdq.target != -1u);
+			/* paranoid */ verify(proc->rdq.target != MAX);
 		}
 		else {
@@ -395,22 +575,23 @@
 			{
 				unsigned target = proc->rdq.target;
-				proc->rdq.target = -1u;
+				proc->rdq.target = MAX;
 				lanes.help[target / READYQ_SHARD_FACTOR].tri++;
 				if(moving_average(ctsc - lanes.tscs[target].tv, lanes.tscs[target].ma) > cutoff) {
+					__STATS( __tls_stats()->ready.pop.helped[target]++; )
 					thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
 					proc->rdq.last = target;
 					if(t) return t;
-					else proc->rdq.target = -1u;
 				}
-				else proc->rdq.target = -1u;
+				proc->rdq.target = MAX;
 			}
 
 			unsigned last = proc->rdq.last;
-			if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) {
+			if(last != MAX && moving_average(ctsc - lanes.tscs[last].tv, lanes.tscs[last].ma) > cutoff) {
+				__STATS( __tls_stats()->ready.pop.helped[last]++; )
 				thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
 				if(t) return t;
 			}
 			else {
-				proc->rdq.last = -1u;
+				proc->rdq.last = MAX;
 			}
 		}
@@ -428,8 +609,8 @@
 		processor * const proc = kernelTLS().this_processor;
 		unsigned last = proc->rdq.last;
-		if(last != -1u) {
+		if(last != MAX) {
 			struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
 			if(t) return t;
-			proc->rdq.last = -1u;
+			proc->rdq.last = MAX;
 		}
 
@@ -560,5 +741,5 @@
 		#else
 			unsigned preferred = thrd->preferred;
-			const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == -1u || thrd->curr_cluster != cltr;
+			const bool external = (hint != UNPARK_LOCAL) || (!kernelTLS().this_processor) || preferred == MAX || thrd->curr_cluster != cltr;
 			/* paranoid */ verifyf(external || preferred < lanes.count, "Invalid preferred queue %u for %u lanes", preferred, lanes.count );
 
@@ -612,5 +793,5 @@
 		processor * proc = kernelTLS().this_processor;
 
-		if(proc->rdq.target == -1u) {
+		if(proc->rdq.target == MAX) {
 			unsigned long long min = ts(lanes.data[proc->rdq.id]);
 			for(int i = 0; i < READYQ_SHARD_FACTOR; i++) {
@@ -623,5 +804,5 @@
 		else {
 			unsigned target = proc->rdq.target;
-			proc->rdq.target = -1u;
+			proc->rdq.target = MAX;
 			const unsigned long long bias = 0; //2_500_000_000;
 			const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
@@ -658,4 +839,5 @@
 // try to pop from a lane given by index w
 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
+	/* paranoid */ verify( w < lanes.count );
 	__STATS( stats.attempt++; )
 
@@ -681,5 +863,5 @@
 	// Actually pop the list
 	struct thread$ * thrd;
-	#if defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
+	#if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
 		unsigned long long tsc_before = ts(lane);
 	#endif
@@ -697,11 +879,14 @@
 	__STATS( stats.success++; )
 
-	#if defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
-		unsigned long long now = rdtscl();
-		lanes.tscs[w].tv = tsv;
-		lanes.tscs[w].ma = moving_average(now > tsc_before ? now - tsc_before : 0, lanes.tscs[w].ma);
+	#if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING) || defined(USE_CPU_WORK_STEALING)
+		if (tsv != MAX) {
+			unsigned long long now = rdtscl();
+			unsigned long long pma = __atomic_load_n(&lanes.tscs[w].ma, __ATOMIC_RELAXED);
+			__atomic_store_n(&lanes.tscs[w].tv, tsv, __ATOMIC_RELAXED);
+			__atomic_store_n(&lanes.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
+		}
 	#endif
 
-	#if defined(USE_CPU_WORK_STEALING)
+	#if defined(USE_AWARE_STEALING) || defined(USE_CPU_WORK_STEALING)
 		thrd->preferred = w / READYQ_SHARD_FACTOR;
 	#else
@@ -802,5 +987,5 @@
 		/* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
 		it->rdq.id = value;
-		it->rdq.target = -1u;
+		it->rdq.target = MAX;
 		value += READYQ_SHARD_FACTOR;
 		it = &(*it)`next;
@@ -815,10 +1000,9 @@
 
 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) {
-	#if defined(USE_WORK_STEALING)
+	#if defined(USE_AWARE_STEALING) || defined(USE_WORK_STEALING)
 		lanes.tscs = alloc(lanes.count, lanes.tscs`realloc);
 		for(i; lanes.count) {
-			unsigned long long tsc1 = ts(lanes.data[i]);
-			unsigned long long tsc2 = rdtscl();
-			lanes.tscs[i].tv = min(tsc1, tsc2);
+			lanes.tscs[i].tv = rdtscl();
+			lanes.tscs[i].ma = 0;
 		}
 	#endif
@@ -866,4 +1050,6 @@
 			// Update original
 			lanes.count = ncount;
+
+			lanes.caches = alloc( target, lanes.caches`realloc );
 		}
 
@@ -942,7 +1128,10 @@
 				fix(lanes.data[idx]);
 			}
+
+			lanes.caches = alloc( target, lanes.caches`realloc );
 		}
 
 		fix_times(cltr);
+
 
 		reassign_cltr_id(cltr);
