Index: libcfa/src/device/cpu.cfa
===================================================================
--- libcfa/src/device/cpu.cfa	(revision ddd2ec9d646ba47067feec6e4ae77e23aeecd234)
+++ libcfa/src/device/cpu.cfa	(revision 8e6582413cf8b4afa976cac5c741ea047e2fbe90)
@@ -135,5 +135,6 @@
 		count++;
 	}
-	iterate_dir(path, lambda);
+	int ret = iterate_dir(path, lambda);
+	if(ret == ENOTDIR) return 0;
 
 	/* paranoid */ verifyf(count == max + 1, "Inconsistent %s count, counted %d, but max %s was %d", prefix, count, prefix, (int)max);
@@ -224,7 +225,7 @@
 
 struct raw_cache_instance {
-	idx_range_t range;
-	unsigned width;
-	unsigned char level;
+	idx_range_t range;	// A text description of the cpus covered
+	unsigned width;		// The number of cpus covered
+	unsigned char level;	// the cache level
 	// FIXME add at least size and type
 };
@@ -233,4 +234,6 @@
 static void ^?{}(raw_cache_instance & this) { free(this.range);}
 
+// Returns a 2D array of instances of size [cpu count][cache levels]
+// where cache level doesn't include instruction caches
 raw_cache_instance ** build_raw_cache_table(unsigned cpus, unsigned idxs, unsigned cache_levels)
 {
@@ -239,16 +242,26 @@
 	// TODO: this loop is broken, it only works if the present cpu start at 0 and are contiguous which is not guaranteed.
 	for(i; cpus) {
-		raw[i] = alloc(cache_levels);
-		void addcache(unsigned fidx, unsigned char level, idx_range_t range, size_t len) {
-			/* paranoid */ verifyf(level <= cache_levels, "Unexpected cache level %d on cpu %u index %u", (int)level, i, fidx);
-
-			unsigned idx = cache_levels - level;
-			raw_cache_instance & r = raw[i][idx];
-			r.range = strndup(range, len);
-			r.level = level;
-			const char * end;
-			r.width = read_width(range, len, &end);
-		}
-		foreach_cacheidx(i, idxs, addcache);
+		if (cache_levels > 0) {
+			raw[i] = alloc(cache_levels);
+			void addcache(unsigned fidx, unsigned char level, idx_range_t range, size_t len) {
+				/* paranoid */ verifyf(level <= cache_levels, "Unexpected cache level %d on cpu %u index %u", (int)level, i, fidx);
+
+				unsigned idx = cache_levels - level;
+				raw_cache_instance & r = raw[i][idx];
+				r.range = strndup(range, len);
+				r.level = level;
+				const char * end;
+				r.width = read_width(range, len, &end);
+			}
+			foreach_cacheidx(i, idxs, addcache);
+		}
+		else {
+			char buf[128];
+			snprintf(buf, 128, "0-%u", cpus);
+			raw[i] = alloc();
+			raw[i]->range = strndup(buf, 128);
+			raw[i]->level = 0;
+			raw[i]->width = cpus;
+		}
 	}
 
@@ -333,5 +346,5 @@
 		unsigned cache_levels = 0;
 		unsigned llc = 0;
-		{
+		if (idxs != 0) {
 			unsigned char prev = -1u;
 			void first(unsigned idx, unsigned char level, const char * map, size_t len) {
@@ -416,4 +429,5 @@
 		cpu_info.llc_map = entries;
 		cpu_info.hthrd_count = cpus;
+		cpu_info.llc_count = map_cnt;
 	}
 
Index: libcfa/src/device/cpu.hfa
===================================================================
--- libcfa/src/device/cpu.hfa	(revision ddd2ec9d646ba47067feec6e4ae77e23aeecd234)
+++ libcfa/src/device/cpu.hfa	(revision 8e6582413cf8b4afa976cac5c741ea047e2fbe90)
@@ -23,9 +23,12 @@
 
 struct cpu_info_t {
-	 // array of size [hthrd_count]
+	// Array of size [hthrd_count]
 	const cpu_map_entry_t * llc_map;
 
-	 // Number of _hardware_ threads present in the system
+	// Number of _hardware_ threads present in the system
 	size_t hthrd_count;
+
+	// Number of distinct last level caches
+	size_t llc_count;
 };
 
