Changes in / [572a02f:dcbfcbc]
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r572a02f rdcbfcbc 280 280 281 281 // Spin a little on I/O, just in case 282 282 for(5) { 283 283 __maybe_io_drain( this ); 284 284 readyThread = pop_fast( this->cltr ); … … 287 287 288 288 // no luck, try stealing a few times 289 289 for(5) { 290 290 if( __maybe_io_drain( this ) ) { 291 291 readyThread = pop_fast( this->cltr ); -
libcfa/src/concurrency/kernel.hfa
r572a02f rdcbfcbc 66 66 unsigned id; 67 67 unsigned target; 68 unsigned last; 68 69 unsigned long long int cutoff; 69 70 } rdq; -
libcfa/src/concurrency/kernel/startup.cfa
r572a02f rdcbfcbc 541 541 this.rdq.id = -1u; 542 542 this.rdq.target = -1u; 543 this.rdq.last = -1u; 543 544 this.rdq.cutoff = 0ull; 544 545 do_terminate = false; -
libcfa/src/concurrency/ready_queue.cfa
r572a02f rdcbfcbc 24 24 25 25 #include "bits/defs.hfa" 26 #include "device/cpu.hfa" 26 27 #include "kernel_private.hfa" 27 28 … … 47 48 #endif 48 49 49 #if defined(USE_RELAXED_FIFO) 50 #if defined(USE_CPU_WORK_STEALING) 51 #define READYQ_SHARD_FACTOR 2 52 #elif defined(USE_RELAXED_FIFO) 50 53 #define BIAS 4 51 54 #define READYQ_SHARD_FACTOR 4 … … 215 218 //======================================================================= 216 219 void ?{}(__ready_queue_t & this) with (this) { 217 lanes.data = 0p; 218 lanes.tscs = 0p; 219 lanes.count = 0; 220 #if defined(USE_CPU_WORK_STEALING) 221 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; 222 lanes.data = alloc( lanes.count ); 223 lanes.tscs = alloc( lanes.count ); 224 225 for( idx; (size_t)lanes.count ) { 226 (lanes.data[idx]){}; 227 lanes.tscs[idx].tv = rdtscl(); 228 } 229 #else 230 lanes.data = 0p; 231 lanes.tscs = 0p; 232 lanes.count = 0; 233 #endif 220 234 } 221 235 222 236 void ^?{}(__ready_queue_t & this) with (this) { 223 verify( SEQUENTIAL_SHARD == lanes.count ); 237 #if !defined(USE_CPU_WORK_STEALING) 238 verify( SEQUENTIAL_SHARD == lanes.count ); 239 #endif 240 224 241 free(lanes.data); 225 242 free(lanes.tscs); … … 227 244 228 245 //----------------------------------------------------------------------- 246 #if defined(USE_CPU_WORK_STEALING) 247 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) { 248 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 249 250 processor * const proc = kernelTLS().this_processor; 251 const bool external = !push_local || (!proc) || (cltr != proc->cltr); 252 253 const int cpu = __kernel_getcpu(); 254 /* paranoid */ verify(cpu >= 0); 255 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 256 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 257 258 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 259 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 260 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 261 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %u lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 262 263 const int start = map.self * READYQ_SHARD_FACTOR; 264 unsigned i; 265 do { 266 unsigned r; 267 if(unlikely(external)) { r = __tls_rand(); } 268 else { r = proc->rdq.its++; } 269 i = start + (r % READYQ_SHARD_FACTOR); 270 // If we can't lock it retry 271 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 272 273 // Actually push it 274 push(lanes.data[i], thrd); 275 276 // Unlock and return 277 __atomic_unlock( &lanes.data[i].lock ); 278 279 #if !defined(__CFA_NO_STATISTICS__) 280 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 281 else __tls_stats()->ready.push.local.success++; 282 #endif 283 284 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 285 286 } 287 288 // Pop from the ready queue from a given cluster 289 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 290 /* paranoid */ verify( lanes.count > 0 ); 291 /* paranoid */ verify( kernelTLS().this_processor ); 292 293 const int cpu = __kernel_getcpu(); 294 /* paranoid */ verify(cpu >= 0); 295 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 296 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 297 298 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 299 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 300 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 301 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %u lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 302 303 processor * const proc = kernelTLS().this_processor; 304 const int start = map.self * READYQ_SHARD_FACTOR; 305 306 // Did we already have a help target 307 if(proc->rdq.target == -1u) { 308 // if We don't have a 309 unsigned long long min = ts(lanes.data[start]); 310 for(i; READYQ_SHARD_FACTOR) { 311 unsigned long long tsc = ts(lanes.data[start + i]); 312 if(tsc < min) min = tsc; 313 } 314 proc->rdq.cutoff = min; 315 proc->rdq.target = (map.start * READYQ_SHARD_FACTOR) + (__tls_rand() % (map.count* READYQ_SHARD_FACTOR)); 316 } 317 else { 318 const unsigned long long bias = 0; //2_500_000_000; 319 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 320 { 321 unsigned target = proc->rdq.target; 322 proc->rdq.target = -1u; 323 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 324 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 325 proc->rdq.last = target; 326 if(t) return t; 327 } 328 } 329 330 unsigned last = proc->rdq.last; 331 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) { 332 $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 333 if(t) return t; 334 } 335 else { 336 proc->rdq.last = -1u; 337 } 338 } 339 340 for(READYQ_SHARD_FACTOR) { 341 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 342 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 343 } 344 345 // All lanes where empty return 0p 346 return 0p; 347 } 348 349 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 350 processor * const proc = kernelTLS().this_processor; 351 unsigned last = proc->rdq.last; 352 353 unsigned i = __tls_rand() % lanes.count; 354 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 355 } 356 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) { 357 return search(cltr); 358 } 359 #endif 229 360 #if defined(USE_RELAXED_FIFO) 230 361 //----------------------------------------------------------------------- … … 580 711 } 581 712 582 // Grow the ready queue 583 void ready_queue_grow(struct cluster * cltr) { 584 size_t ncount; 585 int target = cltr->procs.total; 586 587 /* paranoid */ verify( ready_mutate_islocked() ); 588 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 589 590 // Make sure that everything is consistent 591 /* paranoid */ check( cltr->ready_queue ); 592 593 // grow the ready queue 594 with( cltr->ready_queue ) { 595 // Find new count 596 // Make sure we always have atleast 1 list 597 if(target >= 2) { 598 ncount = target * READYQ_SHARD_FACTOR; 599 } else { 600 ncount = SEQUENTIAL_SHARD; 601 } 602 603 // Allocate new array (uses realloc and memcpies the data) 604 lanes.data = alloc( ncount, lanes.data`realloc ); 605 606 // Fix the moved data 607 for( idx; (size_t)lanes.count ) { 608 fix(lanes.data[idx]); 609 } 610 611 // Construct new data 612 for( idx; (size_t)lanes.count ~ ncount) { 613 (lanes.data[idx]){}; 614 } 615 616 // Update original 617 lanes.count = ncount; 618 } 619 620 fix_times(cltr); 621 622 reassign_cltr_id(cltr); 623 624 // Make sure that everything is consistent 625 /* paranoid */ check( cltr->ready_queue ); 626 627 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 628 629 /* paranoid */ verify( ready_mutate_islocked() ); 630 } 631 632 // Shrink the ready queue 633 void ready_queue_shrink(struct cluster * cltr) { 634 /* paranoid */ verify( ready_mutate_islocked() ); 635 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 636 637 // Make sure that everything is consistent 638 /* paranoid */ check( cltr->ready_queue ); 639 640 int target = cltr->procs.total; 641 642 with( cltr->ready_queue ) { 643 // Remember old count 644 size_t ocount = lanes.count; 645 646 // Find new count 647 // Make sure we always have atleast 1 list 648 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 649 /* paranoid */ verify( ocount >= lanes.count ); 650 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 651 652 // for printing count the number of displaced threads 653 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 654 __attribute__((unused)) size_t displaced = 0; 655 #endif 656 657 // redistribute old data 658 for( idx; (size_t)lanes.count ~ ocount) { 659 // Lock is not strictly needed but makes checking invariants much easier 660 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 661 verify(locked); 662 663 // As long as we can pop from this lane to push the threads somewhere else in the queue 664 while(!is_empty(lanes.data[idx])) { 665 struct $thread * thrd; 666 unsigned long long _; 667 [thrd, _] = pop(lanes.data[idx]); 668 669 push(cltr, thrd, true); 670 671 // for printing count the number of displaced threads 672 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 673 displaced++; 674 #endif 675 } 676 677 // Unlock the lane 678 __atomic_unlock(&lanes.data[idx].lock); 679 680 // TODO print the queue statistics here 681 682 ^(lanes.data[idx]){}; 683 } 684 685 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 686 687 // Allocate new array (uses realloc and memcpies the data) 688 lanes.data = alloc( lanes.count, lanes.data`realloc ); 689 690 // Fix the moved data 691 for( idx; (size_t)lanes.count ) { 692 fix(lanes.data[idx]); 693 } 694 } 695 696 fix_times(cltr); 697 698 reassign_cltr_id(cltr); 699 700 // Make sure that everything is consistent 701 /* paranoid */ check( cltr->ready_queue ); 702 703 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 704 /* paranoid */ verify( ready_mutate_islocked() ); 705 } 713 #if defined(USE_CPU_WORK_STEALING) 714 // ready_queue size is fixed in this case 715 void ready_queue_grow(struct cluster * cltr) {} 716 void ready_queue_shrink(struct cluster * cltr) {} 717 #else 718 // Grow the ready queue 719 void ready_queue_grow(struct cluster * cltr) { 720 size_t ncount; 721 int target = cltr->procs.total; 722 723 /* paranoid */ verify( ready_mutate_islocked() ); 724 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 725 726 // Make sure that everything is consistent 727 /* paranoid */ check( cltr->ready_queue ); 728 729 // grow the ready queue 730 with( cltr->ready_queue ) { 731 // Find new count 732 // Make sure we always have atleast 1 list 733 if(target >= 2) { 734 ncount = target * READYQ_SHARD_FACTOR; 735 } else { 736 ncount = SEQUENTIAL_SHARD; 737 } 738 739 // Allocate new array (uses realloc and memcpies the data) 740 lanes.data = alloc( ncount, lanes.data`realloc ); 741 742 // Fix the moved data 743 for( idx; (size_t)lanes.count ) { 744 fix(lanes.data[idx]); 745 } 746 747 // Construct new data 748 for( idx; (size_t)lanes.count ~ ncount) { 749 (lanes.data[idx]){}; 750 } 751 752 // Update original 753 lanes.count = ncount; 754 } 755 756 fix_times(cltr); 757 758 reassign_cltr_id(cltr); 759 760 // Make sure that everything is consistent 761 /* paranoid */ check( cltr->ready_queue ); 762 763 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 764 765 /* paranoid */ verify( ready_mutate_islocked() ); 766 } 767 768 // Shrink the ready queue 769 void ready_queue_shrink(struct cluster * cltr) { 770 /* paranoid */ verify( ready_mutate_islocked() ); 771 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 772 773 // Make sure that everything is consistent 774 /* paranoid */ check( cltr->ready_queue ); 775 776 int target = cltr->procs.total; 777 778 with( cltr->ready_queue ) { 779 // Remember old count 780 size_t ocount = lanes.count; 781 782 // Find new count 783 // Make sure we always have atleast 1 list 784 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 785 /* paranoid */ verify( ocount >= lanes.count ); 786 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 787 788 // for printing count the number of displaced threads 789 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 790 __attribute__((unused)) size_t displaced = 0; 791 #endif 792 793 // redistribute old data 794 for( idx; (size_t)lanes.count ~ ocount) { 795 // Lock is not strictly needed but makes checking invariants much easier 796 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 797 verify(locked); 798 799 // As long as we can pop from this lane to push the threads somewhere else in the queue 800 while(!is_empty(lanes.data[idx])) { 801 struct $thread * thrd; 802 unsigned long long _; 803 [thrd, _] = pop(lanes.data[idx]); 804 805 push(cltr, thrd, true); 806 807 // for printing count the number of displaced threads 808 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 809 displaced++; 810 #endif 811 } 812 813 // Unlock the lane 814 __atomic_unlock(&lanes.data[idx].lock); 815 816 // TODO print the queue statistics here 817 818 ^(lanes.data[idx]){}; 819 } 820 821 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 822 823 // Allocate new array (uses realloc and memcpies the data) 824 lanes.data = alloc( lanes.count, lanes.data`realloc ); 825 826 // Fix the moved data 827 for( idx; (size_t)lanes.count ) { 828 fix(lanes.data[idx]); 829 } 830 } 831 832 fix_times(cltr); 833 834 reassign_cltr_id(cltr); 835 836 // Make sure that everything is consistent 837 /* paranoid */ check( cltr->ready_queue ); 838 839 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 840 /* paranoid */ verify( ready_mutate_islocked() ); 841 } 842 #endif 706 843 707 844 #if !defined(__CFA_NO_STATISTICS__) -
libcfa/src/device/cpu.cfa
r572a02f rdcbfcbc 256 256 } 257 257 258 struct llc_map_t { 259 raw_cache_instance * raw; 260 unsigned count; 261 unsigned start; 262 }; 263 258 264 // returns an allocate list of all the different distinct last level caches 259 static [* idx_range_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) {265 static [*llc_map_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) { 260 266 // Allocate at least one element 261 idx_range_t* ranges = alloc();267 llc_map_t* ranges = alloc(); 262 268 size_t range_cnt = 1; 263 269 264 270 // Initialize with element 0 265 *ranges = raw[0][llc_idx].range; 271 ranges->raw = &raw[0][llc_idx]; 272 ranges->count = 0; 273 ranges->start = -1u; 266 274 267 275 // Go over all other cpus 268 276 CPU_LOOP: for(i; 1~cpus) { 269 277 // Check if the range is already there 270 idx_range_t candidate = raw[i][llc_idx].range;278 raw_cache_instance * candidate = &raw[i][llc_idx]; 271 279 for(j; range_cnt) { 272 idx_range_texist = ranges[j];280 llc_map_t & exist = ranges[j]; 273 281 // If the range is already there just jump to the next cpu 274 if(0 == strcmp(candidate , exist)) continue CPU_LOOP;282 if(0 == strcmp(candidate->range, exist.raw->range)) continue CPU_LOOP; 275 283 } 276 284 277 285 // The range wasn't there, added to the list 278 286 ranges = alloc(range_cnt + 1, ranges`realloc); 279 ranges[range_cnt] = candidate; 287 ranges[range_cnt].raw = candidate; 288 ranges[range_cnt].count = 0; 289 ranges[range_cnt].start = -1u; 280 290 range_cnt++; 281 291 } … … 287 297 struct cpu_pairing_t { 288 298 unsigned cpu; 289 unsigned llc_id;299 unsigned id; 290 300 }; 291 301 292 302 int ?<?( cpu_pairing_t lhs, cpu_pairing_t rhs ) { 293 return lhs. llc_id < rhs.llc_id;294 } 295 296 static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, idx_range_t * maps, size_t map_cnt) {303 return lhs.id < rhs.id; 304 } 305 306 static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, llc_map_t * maps, size_t map_cnt) { 297 307 cpu_pairing_t * pairings = alloc(cpus); 298 308 … … 301 311 idx_range_t want = raw[i][0].range; 302 312 MAP_LOOP: for(j; map_cnt) { 303 if(0 != strcmp(want, maps[j] )) continue MAP_LOOP;304 305 pairings[i]. llc_id = j;313 if(0 != strcmp(want, maps[j].raw->range)) continue MAP_LOOP; 314 315 pairings[i].id = j; 306 316 continue CPU_LOOP; 307 317 } … … 312 322 return pairings; 313 323 } 324 325 #include <fstream.hfa> 314 326 315 327 extern "C" { … … 336 348 337 349 // Find number of distinct cache instances 338 idx_range_t * maps;350 llc_map_t * maps; 339 351 size_t map_cnt; 340 352 [maps, map_cnt] = distinct_llcs(cpus, cache_levels - llc, raw); 341 353 342 354 #if defined(__CFA_WITH_VERIFY__) 355 // Verify that the caches cover the all the cpus 343 356 { 344 unsigned width = 0; 357 unsigned width1 = 0; 358 unsigned width2 = 0; 345 359 for(i; map_cnt) { 346 360 const char * _; 347 width += read_width(maps[i], strlen(maps[i]), &_); 361 width1 += read_width(maps[i].raw->range, strlen(maps[i].raw->range), &_); 362 width2 += maps[i].raw->width; 348 363 } 349 verify(width == cpus); 364 verify(width1 == cpus); 365 verify(width2 == cpus); 350 366 } 351 367 #endif … … 357 373 qsort(pairings, cpus); 358 374 359 unsigned llc_width = raw[0][cache_levels - llc].width; 360 361 // From the mappins build the actual cpu map we want 375 { 376 unsigned it = 0; 377 for(i; cpus) { 378 unsigned llc_id = pairings[i].id; 379 if(maps[llc_id].start == -1u) { 380 maps[llc_id].start = it; 381 it += maps[llc_id].raw->width; 382 /* paranoid */ verify(maps[llc_id].start < it); 383 /* paranoid */ verify(it != -1u); 384 } 385 } 386 /* paranoid */ verify(it == cpus); 387 } 388 389 // From the mappings build the actual cpu map we want 362 390 struct cpu_map_entry_t * entries = alloc(cpus); 363 391 for(i; cpus) { entries[i].count = 0; } 364 392 for(i; cpus) { 393 /* paranoid */ verify(pairings[i].id < map_cnt); 365 394 unsigned c = pairings[i].cpu; 366 entries[c].start = pairings[i].llc_id * llc_width; 367 entries[c].count = llc_width; 395 unsigned llc_id = pairings[i].id; 396 unsigned width = maps[llc_id].raw->width; 397 unsigned start = maps[llc_id].start; 398 unsigned self = start + (maps[llc_id].count++); 399 entries[c].count = width; 400 entries[c].start = start; 401 entries[c].self = self; 368 402 } 369 403 -
libcfa/src/device/cpu.hfa
r572a02f rdcbfcbc 17 17 18 18 struct cpu_map_entry_t { 19 unsigned self; 19 20 unsigned start; 20 21 unsigned count; -
tests/device/cpu.cfa
r572a02f rdcbfcbc 17 17 #include <fstream.hfa> 18 18 #include <device/cpu.hfa> 19 #include <stdlib.hfa> 20 21 #include <errno.h> 22 #include <stdio.h> 23 #include <string.h> 24 #include <unistd.h> 25 19 26 extern "C" { 27 #include <dirent.h> 28 #include <sys/types.h> 29 #include <sys/stat.h> 20 30 #include <sys/sysinfo.h> 31 #include <fcntl.h> 32 } 33 34 // go through a directory calling fn on each file 35 static int iterate_dir( const char * path, void (*fn)(struct dirent * ent) ) { 36 // open the directory 37 DIR *dir = opendir(path); 38 if(dir == 0p) { return ENOTDIR; } 39 40 // call fn for each 41 struct dirent * ent; 42 while ((ent = readdir(dir)) != 0p) { 43 fn( ent ); 44 } 45 46 // no longer need this 47 closedir(dir); 48 return 0; 49 } 50 51 // count the number of directories with the specified prefix 52 // the directories counted have the form '[prefix]N' where prefix is the parameter 53 // and N is an base 10 integer. 54 static int count_prefix_dirs(const char * path, const char * prefix) { 55 // read the directory and find the cpu count 56 // and make sure everything is as expected 57 int max = -1; 58 int count = 0; 59 void lambda(struct dirent * ent) { 60 // were are looking for prefixX, where X is a number 61 // check that it starts with 'cpu 62 char * s = strstr(ent->d_name, prefix); 63 if(s == 0p) { return; } 64 if(s != ent->d_name) { return; } 65 66 // check that the next part is a number 67 s += strlen(prefix); 68 char * end; 69 long int val = strtol(s, &end, 10); 70 if(*end != '\0' || val < 0) { return; } 71 72 // check that it's a directory 73 if(ent->d_type != DT_DIR) { return; } 74 75 // it's a match! 76 max = max(val, max); 77 count++; 78 } 79 iterate_dir(path, lambda); 80 81 /* paranoid */ verifyf(count == max + 1, "Inconsistent %s count, counted %d, but max %s was %d", prefix, count, prefix, (int)max); 82 83 return count; 84 } 85 86 // Count number of cache *indexes* in the system 87 // cache indexes are distinct from cache level as Data or Instruction cache 88 // can share a level but not an index 89 // PITFALL: assumes all cpus have the same indexes as cpu0 90 static int count_cache_indexes(void) { 91 return count_prefix_dirs("/sys/devices/system/cpu/cpu0/cache", "index"); 92 } 93 94 // read information about a spcficic cache index/cpu file into the output buffer 95 static size_t read_cpuidxinfo_into(unsigned cpu, unsigned idx, const char * file, char * out, size_t out_len) { 96 // Pick the file we want and read it 97 char buf[128]; 98 /* paranoid */ __attribute__((unused)) int len = 99 snprintf(buf, 128, "/sys/devices/system/cpu/cpu%u/cache/index%u/%s", cpu, idx, file); 100 /* paranoid */ verifyf(len > 0, "Could not generate '%s' filename for cpu %u, index %u", file, cpu, idx); 101 102 int fd = open(buf, 0, O_RDONLY); 103 /* paranoid */ verifyf(fd > 0, "Could not open file '%s'", buf); 104 105 ssize_t r = read(fd, out, out_len); 106 /* paranoid */ verifyf(r > 0, "Could not read file '%s'", buf); 107 108 /* paranoid */ __attribute__((unused)) int ret = 109 close(fd); 110 /* paranoid */ verifyf(ret == 0, "Could not close file '%s'", buf); 111 112 out[r-1] = '\0'; 113 return r-1; 114 } 115 116 unsigned find_idx() { 117 int idxs = count_cache_indexes(); 118 119 unsigned found_level = 0; 120 unsigned found = -1u; 121 for(i; idxs) { 122 unsigned idx = idxs - 1 - i; 123 char buf[32]; 124 125 // Level is the cache level: higher means bigger and slower 126 read_cpuidxinfo_into(0, idx, "level", buf, 32); 127 char * end; 128 unsigned long level = strtoul(buf, &end, 10); 129 /* paranoid */ verifyf(level <= 250, "Cpu %u has more than 250 levels of cache, that doesn't sound right", 0); 130 /* paranoid */ verify(*end == '\0'); 131 132 if(found_level < level) { 133 found_level = level; 134 found = idx; 135 } 136 } 137 138 /* paranoid */ verify(found != -1u); 139 return found; 21 140 } 22 141 23 142 int main() { 143 //----------------------------------------------------------------------- 24 144 int ret1 = get_nprocs(); 25 145 int ret2 = cpu_info.hthrd_count; … … 31 151 } 32 152 153 //----------------------------------------------------------------------- 154 // Make sure no one has the same self 155 for(ime; cpu_info.hthrd_count) { 156 unsigned me = cpu_info.llc_map[ime].self; 157 { 158 unsigned s = cpu_info.llc_map[ime].start; 159 unsigned e = s + cpu_info.llc_map[ime].count; 160 if(me < s || me >= e) { 161 sout | "CPU" | ime | "outside of it's own map: " | s | "<=" | me | "<" | e; 162 } 163 } 164 165 166 for(ithem; cpu_info.hthrd_count) { 167 if(ime == ithem) continue; 168 169 unsigned them = cpu_info.llc_map[ithem].self; 170 if(me == them) { 171 sout | "CPU" | ime | "has conflicting self id with" | ithem | "(" | me | ")"; 172 } 173 } 174 } 175 176 177 //----------------------------------------------------------------------- 178 unsigned idx = find_idx(); 179 // For all procs check mapping is consistent 180 for(cpu_me; cpu_info.hthrd_count) { 181 char buf_me[32]; 182 size_t len_me = read_cpuidxinfo_into(cpu_me, idx, "shared_cpu_list", buf_me, 32); 183 for(cpu_them; cpu_info.hthrd_count) { 184 if(cpu_me == cpu_them) continue; 185 char buf_them[32]; 186 size_t len_them = read_cpuidxinfo_into(cpu_them, idx, "shared_cpu_list", buf_them, 32); 187 188 bool match_file = len_them == len_me && 0 == strncmp(buf_them, buf_me, len_me); 189 bool match_info = cpu_info.llc_map[cpu_me].start == cpu_info.llc_map[cpu_them].start && cpu_info.llc_map[cpu_me].count == cpu_info.llc_map[cpu_them].count; 190 191 if(match_file != match_info) { 192 sout | "CPU" | cpu_me | "and" | cpu_them | "have inconsitent file and cpu_info"; 193 sout | cpu_me | ": <" | cpu_info.llc_map[cpu_me ].start | "," | cpu_info.llc_map[cpu_me ].count | "> '" | buf_me | "'"; 194 sout | cpu_me | ": <" | cpu_info.llc_map[cpu_them].start | "," | cpu_info.llc_map[cpu_them].count | "> '" | buf_them | "'"; 195 } 196 } 197 } 33 198 }
Note: See TracChangeset
for help on using the changeset viewer.