Index: libcfa/src/device/cpu.cfa
===================================================================
--- libcfa/src/device/cpu.cfa	(revision 45fde9f4fa966516f7ec2ec41a39a93b864e08d2)
+++ libcfa/src/device/cpu.cfa	(revision dcbfcbc5b3b9231d2f0ddc5dc07e9986e2843cd7)
@@ -256,26 +256,36 @@
 }
 
+struct llc_map_t {
+	raw_cache_instance * raw;
+	unsigned count;
+	unsigned start;
+};
+
 // returns an allocate list of all the different distinct last level caches
-static [*idx_range_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) {
+static [*llc_map_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) {
 	// Allocate at least one element
-	idx_range_t * ranges = alloc();
+	llc_map_t* ranges = alloc();
 	size_t range_cnt = 1;
 
 	// Initialize with element 0
-	*ranges = raw[0][llc_idx].range;
+	ranges->raw = &raw[0][llc_idx];
+	ranges->count = 0;
+	ranges->start = -1u;
 
 	// Go over all other cpus
 	CPU_LOOP: for(i; 1~cpus) {
 		// Check if the range is already there
-		idx_range_t candidate = raw[i][llc_idx].range;
+		raw_cache_instance * candidate = &raw[i][llc_idx];
 		for(j; range_cnt) {
-			idx_range_t exist = ranges[j];
+			llc_map_t & exist = ranges[j];
 			// If the range is already there just jump to the next cpu
-			if(0 == strcmp(candidate, exist)) continue CPU_LOOP;
+			if(0 == strcmp(candidate->range, exist.raw->range)) continue CPU_LOOP;
 		}
 
 		// The range wasn't there, added to the list
 		ranges = alloc(range_cnt + 1, ranges`realloc);
-		ranges[range_cnt] = candidate;
+		ranges[range_cnt].raw = candidate;
+		ranges[range_cnt].count = 0;
+		ranges[range_cnt].start = -1u;
 		range_cnt++;
 	}
@@ -287,12 +297,12 @@
 struct cpu_pairing_t {
 	unsigned cpu;
-	unsigned llc_id;
+	unsigned id;
 };
 
 int ?<?( cpu_pairing_t lhs, cpu_pairing_t rhs ) {
-	return lhs.llc_id < rhs.llc_id;
-}
-
-static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, idx_range_t * maps, size_t map_cnt) {
+	return lhs.id < rhs.id;
+}
+
+static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, llc_map_t * maps, size_t map_cnt) {
 	cpu_pairing_t * pairings = alloc(cpus);
 
@@ -301,7 +311,7 @@
 		idx_range_t want = raw[i][0].range;
 		MAP_LOOP: for(j; map_cnt) {
-			if(0 != strcmp(want, maps[j])) continue MAP_LOOP;
-
-			pairings[i].llc_id = j;
+			if(0 != strcmp(want, maps[j].raw->range)) continue MAP_LOOP;
+
+			pairings[i].id = j;
 			continue CPU_LOOP;
 		}
@@ -312,4 +322,6 @@
 	return pairings;
 }
+
+#include <fstream.hfa>
 
 extern "C" {
@@ -336,16 +348,20 @@
 
 		// Find number of distinct cache instances
-		idx_range_t * maps;
+		llc_map_t * maps;
 		size_t map_cnt;
 		[maps, map_cnt] =  distinct_llcs(cpus, cache_levels - llc, raw);
 
 		#if defined(__CFA_WITH_VERIFY__)
+		// Verify that the caches cover the all the cpus
 		{
-			unsigned width = 0;
+			unsigned width1 = 0;
+			unsigned width2 = 0;
 			for(i; map_cnt) {
 				const char * _;
-				width += read_width(maps[i], strlen(maps[i]), &_);
+				width1 += read_width(maps[i].raw->range, strlen(maps[i].raw->range), &_);
+				width2 += maps[i].raw->width;
 			}
-			verify(width == cpus);
+			verify(width1 == cpus);
+			verify(width2 == cpus);
 		}
 		#endif
@@ -357,13 +373,31 @@
 		qsort(pairings, cpus);
 
-		unsigned llc_width = raw[0][cache_levels - llc].width;
-
-		// From the mappins build the actual cpu map we want
+		{
+			unsigned it = 0;
+			for(i; cpus) {
+				unsigned llc_id = pairings[i].id;
+				if(maps[llc_id].start == -1u) {
+					maps[llc_id].start = it;
+					it += maps[llc_id].raw->width;
+					/* paranoid */ verify(maps[llc_id].start < it);
+					/* paranoid */ verify(it != -1u);
+				}
+			}
+			/* paranoid */ verify(it == cpus);
+		}
+
+		// From the mappings build the actual cpu map we want
 		struct cpu_map_entry_t * entries = alloc(cpus);
 		for(i; cpus) { entries[i].count = 0; }
 		for(i; cpus) {
+			/* paranoid */ verify(pairings[i].id < map_cnt);
 			unsigned c = pairings[i].cpu;
-			entries[c].start = pairings[i].llc_id * llc_width;
-			entries[c].count = llc_width;
+			unsigned llc_id = pairings[i].id;
+			unsigned width = maps[llc_id].raw->width;
+			unsigned start = maps[llc_id].start;
+			unsigned self  = start + (maps[llc_id].count++);
+			entries[c].count = width;
+			entries[c].start = start;
+			entries[c].self  = self;
 		}
 
Index: libcfa/src/device/cpu.hfa
===================================================================
--- libcfa/src/device/cpu.hfa	(revision 45fde9f4fa966516f7ec2ec41a39a93b864e08d2)
+++ libcfa/src/device/cpu.hfa	(revision dcbfcbc5b3b9231d2f0ddc5dc07e9986e2843cd7)
@@ -17,4 +17,5 @@
 
 struct cpu_map_entry_t {
+	unsigned self;
 	unsigned start;
 	unsigned count;
