Changeset e319fc5
- Timestamp:
- Jun 19, 2021, 3:53:18 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 15f769c
- Parents:
- 6992f95 (diff), c7d8696a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 3 added
- 32 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r6992f95 re319fc5 280 280 281 281 // Spin a little on I/O, just in case 282 282 for(5) { 283 283 __maybe_io_drain( this ); 284 284 readyThread = pop_fast( this->cltr ); … … 287 287 288 288 // no luck, try stealing a few times 289 289 for(5) { 290 290 if( __maybe_io_drain( this ) ) { 291 291 readyThread = pop_fast( this->cltr ); -
libcfa/src/concurrency/kernel.hfa
r6992f95 re319fc5 66 66 unsigned id; 67 67 unsigned target; 68 unsigned last; 68 69 unsigned long long int cutoff; 69 70 } rdq; -
libcfa/src/concurrency/kernel/startup.cfa
r6992f95 re319fc5 541 541 this.rdq.id = -1u; 542 542 this.rdq.target = -1u; 543 this.rdq.last = -1u; 543 544 this.rdq.cutoff = 0ull; 544 545 do_terminate = false; -
libcfa/src/concurrency/ready_queue.cfa
r6992f95 re319fc5 24 24 25 25 #include "bits/defs.hfa" 26 #include "device/cpu.hfa" 26 27 #include "kernel_private.hfa" 27 28 … … 47 48 #endif 48 49 49 #if defined(USE_RELAXED_FIFO) 50 #if defined(USE_CPU_WORK_STEALING) 51 #define READYQ_SHARD_FACTOR 2 52 #elif defined(USE_RELAXED_FIFO) 50 53 #define BIAS 4 51 54 #define READYQ_SHARD_FACTOR 4 … … 215 218 //======================================================================= 216 219 void ?{}(__ready_queue_t & this) with (this) { 217 lanes.data = 0p; 218 lanes.tscs = 0p; 219 lanes.count = 0; 220 #if defined(USE_CPU_WORK_STEALING) 221 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; 222 lanes.data = alloc( lanes.count ); 223 lanes.tscs = alloc( lanes.count ); 224 225 for( idx; (size_t)lanes.count ) { 226 (lanes.data[idx]){}; 227 lanes.tscs[idx].tv = rdtscl(); 228 } 229 #else 230 lanes.data = 0p; 231 lanes.tscs = 0p; 232 lanes.count = 0; 233 #endif 220 234 } 221 235 222 236 void ^?{}(__ready_queue_t & this) with (this) { 223 verify( SEQUENTIAL_SHARD == lanes.count ); 237 #if !defined(USE_CPU_WORK_STEALING) 238 verify( SEQUENTIAL_SHARD == lanes.count ); 239 #endif 240 224 241 free(lanes.data); 225 242 free(lanes.tscs); … … 227 244 228 245 //----------------------------------------------------------------------- 246 #if defined(USE_CPU_WORK_STEALING) 247 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) { 248 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 249 250 processor * const proc = kernelTLS().this_processor; 251 const bool external = !push_local || (!proc) || (cltr != proc->cltr); 252 253 const int cpu = __kernel_getcpu(); 254 /* paranoid */ verify(cpu >= 0); 255 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 256 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 257 258 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 259 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 260 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 261 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %u lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 262 263 const int start = map.self * READYQ_SHARD_FACTOR; 264 unsigned i; 265 do { 266 unsigned r; 267 if(unlikely(external)) { r = __tls_rand(); } 268 else { r = proc->rdq.its++; } 269 i = start + (r % READYQ_SHARD_FACTOR); 270 // If we can't lock it retry 271 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 272 273 // Actually push it 274 push(lanes.data[i], thrd); 275 276 // Unlock and return 277 __atomic_unlock( &lanes.data[i].lock ); 278 279 #if !defined(__CFA_NO_STATISTICS__) 280 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 281 else __tls_stats()->ready.push.local.success++; 282 #endif 283 284 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 285 286 } 287 288 // Pop from the ready queue from a given cluster 289 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 290 /* paranoid */ verify( lanes.count > 0 ); 291 /* paranoid */ verify( kernelTLS().this_processor ); 292 293 const int cpu = __kernel_getcpu(); 294 /* paranoid */ verify(cpu >= 0); 295 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 296 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 297 298 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 299 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 300 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 301 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %u lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 302 303 processor * const proc = kernelTLS().this_processor; 304 const int start = map.self * READYQ_SHARD_FACTOR; 305 306 // Did we already have a help target 307 if(proc->rdq.target == -1u) { 308 // if We don't have a 309 unsigned long long min = ts(lanes.data[start]); 310 for(i; READYQ_SHARD_FACTOR) { 311 unsigned long long tsc = ts(lanes.data[start + i]); 312 if(tsc < min) min = tsc; 313 } 314 proc->rdq.cutoff = min; 315 proc->rdq.target = (map.start * READYQ_SHARD_FACTOR) + (__tls_rand() % (map.count* READYQ_SHARD_FACTOR)); 316 } 317 else { 318 const unsigned long long bias = 0; //2_500_000_000; 319 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 320 { 321 unsigned target = proc->rdq.target; 322 proc->rdq.target = -1u; 323 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 324 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 325 proc->rdq.last = target; 326 if(t) return t; 327 } 328 } 329 330 unsigned last = proc->rdq.last; 331 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) { 332 $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 333 if(t) return t; 334 } 335 else { 336 proc->rdq.last = -1u; 337 } 338 } 339 340 for(READYQ_SHARD_FACTOR) { 341 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 342 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 343 } 344 345 // All lanes where empty return 0p 346 return 0p; 347 } 348 349 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 350 processor * const proc = kernelTLS().this_processor; 351 unsigned last = proc->rdq.last; 352 353 unsigned i = __tls_rand() % lanes.count; 354 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 355 } 356 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) { 357 return search(cltr); 358 } 359 #endif 229 360 #if defined(USE_RELAXED_FIFO) 230 361 //----------------------------------------------------------------------- … … 580 711 } 581 712 582 // Grow the ready queue 583 void ready_queue_grow(struct cluster * cltr) { 584 size_t ncount; 585 int target = cltr->procs.total; 586 587 /* paranoid */ verify( ready_mutate_islocked() ); 588 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 589 590 // Make sure that everything is consistent 591 /* paranoid */ check( cltr->ready_queue ); 592 593 // grow the ready queue 594 with( cltr->ready_queue ) { 595 // Find new count 596 // Make sure we always have atleast 1 list 597 if(target >= 2) { 598 ncount = target * READYQ_SHARD_FACTOR; 599 } else { 600 ncount = SEQUENTIAL_SHARD; 601 } 602 603 // Allocate new array (uses realloc and memcpies the data) 604 lanes.data = alloc( ncount, lanes.data`realloc ); 605 606 // Fix the moved data 607 for( idx; (size_t)lanes.count ) { 608 fix(lanes.data[idx]); 609 } 610 611 // Construct new data 612 for( idx; (size_t)lanes.count ~ ncount) { 613 (lanes.data[idx]){}; 614 } 615 616 // Update original 617 lanes.count = ncount; 618 } 619 620 fix_times(cltr); 621 622 reassign_cltr_id(cltr); 623 624 // Make sure that everything is consistent 625 /* paranoid */ check( cltr->ready_queue ); 626 627 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 628 629 /* paranoid */ verify( ready_mutate_islocked() ); 630 } 631 632 // Shrink the ready queue 633 void ready_queue_shrink(struct cluster * cltr) { 634 /* paranoid */ verify( ready_mutate_islocked() ); 635 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 636 637 // Make sure that everything is consistent 638 /* paranoid */ check( cltr->ready_queue ); 639 640 int target = cltr->procs.total; 641 642 with( cltr->ready_queue ) { 643 // Remember old count 644 size_t ocount = lanes.count; 645 646 // Find new count 647 // Make sure we always have atleast 1 list 648 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 649 /* paranoid */ verify( ocount >= lanes.count ); 650 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 651 652 // for printing count the number of displaced threads 653 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 654 __attribute__((unused)) size_t displaced = 0; 655 #endif 656 657 // redistribute old data 658 for( idx; (size_t)lanes.count ~ ocount) { 659 // Lock is not strictly needed but makes checking invariants much easier 660 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 661 verify(locked); 662 663 // As long as we can pop from this lane to push the threads somewhere else in the queue 664 while(!is_empty(lanes.data[idx])) { 665 struct $thread * thrd; 666 unsigned long long _; 667 [thrd, _] = pop(lanes.data[idx]); 668 669 push(cltr, thrd, true); 670 671 // for printing count the number of displaced threads 672 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 673 displaced++; 674 #endif 675 } 676 677 // Unlock the lane 678 __atomic_unlock(&lanes.data[idx].lock); 679 680 // TODO print the queue statistics here 681 682 ^(lanes.data[idx]){}; 683 } 684 685 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 686 687 // Allocate new array (uses realloc and memcpies the data) 688 lanes.data = alloc( lanes.count, lanes.data`realloc ); 689 690 // Fix the moved data 691 for( idx; (size_t)lanes.count ) { 692 fix(lanes.data[idx]); 693 } 694 } 695 696 fix_times(cltr); 697 698 reassign_cltr_id(cltr); 699 700 // Make sure that everything is consistent 701 /* paranoid */ check( cltr->ready_queue ); 702 703 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 704 /* paranoid */ verify( ready_mutate_islocked() ); 705 } 713 #if defined(USE_CPU_WORK_STEALING) 714 // ready_queue size is fixed in this case 715 void ready_queue_grow(struct cluster * cltr) {} 716 void ready_queue_shrink(struct cluster * cltr) {} 717 #else 718 // Grow the ready queue 719 void ready_queue_grow(struct cluster * cltr) { 720 size_t ncount; 721 int target = cltr->procs.total; 722 723 /* paranoid */ verify( ready_mutate_islocked() ); 724 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 725 726 // Make sure that everything is consistent 727 /* paranoid */ check( cltr->ready_queue ); 728 729 // grow the ready queue 730 with( cltr->ready_queue ) { 731 // Find new count 732 // Make sure we always have atleast 1 list 733 if(target >= 2) { 734 ncount = target * READYQ_SHARD_FACTOR; 735 } else { 736 ncount = SEQUENTIAL_SHARD; 737 } 738 739 // Allocate new array (uses realloc and memcpies the data) 740 lanes.data = alloc( ncount, lanes.data`realloc ); 741 742 // Fix the moved data 743 for( idx; (size_t)lanes.count ) { 744 fix(lanes.data[idx]); 745 } 746 747 // Construct new data 748 for( idx; (size_t)lanes.count ~ ncount) { 749 (lanes.data[idx]){}; 750 } 751 752 // Update original 753 lanes.count = ncount; 754 } 755 756 fix_times(cltr); 757 758 reassign_cltr_id(cltr); 759 760 // Make sure that everything is consistent 761 /* paranoid */ check( cltr->ready_queue ); 762 763 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 764 765 /* paranoid */ verify( ready_mutate_islocked() ); 766 } 767 768 // Shrink the ready queue 769 void ready_queue_shrink(struct cluster * cltr) { 770 /* paranoid */ verify( ready_mutate_islocked() ); 771 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 772 773 // Make sure that everything is consistent 774 /* paranoid */ check( cltr->ready_queue ); 775 776 int target = cltr->procs.total; 777 778 with( cltr->ready_queue ) { 779 // Remember old count 780 size_t ocount = lanes.count; 781 782 // Find new count 783 // Make sure we always have atleast 1 list 784 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 785 /* paranoid */ verify( ocount >= lanes.count ); 786 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 787 788 // for printing count the number of displaced threads 789 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 790 __attribute__((unused)) size_t displaced = 0; 791 #endif 792 793 // redistribute old data 794 for( idx; (size_t)lanes.count ~ ocount) { 795 // Lock is not strictly needed but makes checking invariants much easier 796 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 797 verify(locked); 798 799 // As long as we can pop from this lane to push the threads somewhere else in the queue 800 while(!is_empty(lanes.data[idx])) { 801 struct $thread * thrd; 802 unsigned long long _; 803 [thrd, _] = pop(lanes.data[idx]); 804 805 push(cltr, thrd, true); 806 807 // for printing count the number of displaced threads 808 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 809 displaced++; 810 #endif 811 } 812 813 // Unlock the lane 814 __atomic_unlock(&lanes.data[idx].lock); 815 816 // TODO print the queue statistics here 817 818 ^(lanes.data[idx]){}; 819 } 820 821 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 822 823 // Allocate new array (uses realloc and memcpies the data) 824 lanes.data = alloc( lanes.count, lanes.data`realloc ); 825 826 // Fix the moved data 827 for( idx; (size_t)lanes.count ) { 828 fix(lanes.data[idx]); 829 } 830 } 831 832 fix_times(cltr); 833 834 reassign_cltr_id(cltr); 835 836 // Make sure that everything is consistent 837 /* paranoid */ check( cltr->ready_queue ); 838 839 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 840 /* paranoid */ verify( ready_mutate_islocked() ); 841 } 842 #endif 706 843 707 844 #if !defined(__CFA_NO_STATISTICS__) -
libcfa/src/containers/array.hfa
r6992f95 re319fc5 1 1 2 2 3 // a type whose size is n 4 #define Z(n) char[n] 5 6 // the inverse of Z(-) 7 #define z(N) sizeof(N) 8 9 forall( T & ) struct tag {}; 3 forall( __CFA_tysys_id_only_X & ) struct tag {}; 10 4 #define ttag(T) ((tag(T)){}) 11 #define ztag(n) ttag( Z(n))5 #define ztag(n) ttag(n) 12 6 13 7 … … 18 12 forall( [N], S & | sized(S), Timmed &, Tbase & ) { 19 13 struct arpk { 20 S strides[ z(N)];14 S strides[N]; 21 15 }; 22 16 … … 56 50 57 51 static inline size_t ?`len( arpk(N, S, Timmed, Tbase) & a ) { 58 return z(N);52 return N; 59 53 } 60 54 61 55 // workaround #226 (and array relevance thereof demonstrated in mike102/otype-slow-ndims.cfa) 62 56 static inline void ?{}( arpk(N, S, Timmed, Tbase) & this ) { 63 void ?{}( S (&inner)[ z(N)] ) {}57 void ?{}( S (&inner)[N] ) {} 64 58 ?{}(this.strides); 65 59 } 66 60 static inline void ^?{}( arpk(N, S, Timmed, Tbase) & this ) { 67 void ^?{}( S (&inner)[ z(N)] ) {}61 void ^?{}( S (&inner)[N] ) {} 68 62 ^?{}(this.strides); 69 63 } -
libcfa/src/device/cpu.cfa
r6992f95 re319fc5 256 256 } 257 257 258 struct llc_map_t { 259 raw_cache_instance * raw; 260 unsigned count; 261 unsigned start; 262 }; 263 258 264 // returns an allocate list of all the different distinct last level caches 259 static [* idx_range_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) {265 static [*llc_map_t, size_t cnt] distinct_llcs(unsigned cpus, unsigned llc_idx, raw_cache_instance ** raw) { 260 266 // Allocate at least one element 261 idx_range_t* ranges = alloc();267 llc_map_t* ranges = alloc(); 262 268 size_t range_cnt = 1; 263 269 264 270 // Initialize with element 0 265 *ranges = raw[0][llc_idx].range; 271 ranges->raw = &raw[0][llc_idx]; 272 ranges->count = 0; 273 ranges->start = -1u; 266 274 267 275 // Go over all other cpus 268 276 CPU_LOOP: for(i; 1~cpus) { 269 277 // Check if the range is already there 270 idx_range_t candidate = raw[i][llc_idx].range;278 raw_cache_instance * candidate = &raw[i][llc_idx]; 271 279 for(j; range_cnt) { 272 idx_range_texist = ranges[j];280 llc_map_t & exist = ranges[j]; 273 281 // If the range is already there just jump to the next cpu 274 if(0 == strcmp(candidate , exist)) continue CPU_LOOP;282 if(0 == strcmp(candidate->range, exist.raw->range)) continue CPU_LOOP; 275 283 } 276 284 277 285 // The range wasn't there, added to the list 278 286 ranges = alloc(range_cnt + 1, ranges`realloc); 279 ranges[range_cnt] = candidate; 287 ranges[range_cnt].raw = candidate; 288 ranges[range_cnt].count = 0; 289 ranges[range_cnt].start = -1u; 280 290 range_cnt++; 281 291 } … … 287 297 struct cpu_pairing_t { 288 298 unsigned cpu; 289 unsigned llc_id;299 unsigned id; 290 300 }; 291 301 292 302 int ?<?( cpu_pairing_t lhs, cpu_pairing_t rhs ) { 293 return lhs. llc_id < rhs.llc_id;294 } 295 296 static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, idx_range_t * maps, size_t map_cnt) {303 return lhs.id < rhs.id; 304 } 305 306 static [[]cpu_pairing_t] get_cpu_pairings(unsigned cpus, raw_cache_instance ** raw, llc_map_t * maps, size_t map_cnt) { 297 307 cpu_pairing_t * pairings = alloc(cpus); 298 308 … … 301 311 idx_range_t want = raw[i][0].range; 302 312 MAP_LOOP: for(j; map_cnt) { 303 if(0 != strcmp(want, maps[j] )) continue MAP_LOOP;304 305 pairings[i]. llc_id = j;313 if(0 != strcmp(want, maps[j].raw->range)) continue MAP_LOOP; 314 315 pairings[i].id = j; 306 316 continue CPU_LOOP; 307 317 } … … 312 322 return pairings; 313 323 } 324 325 #include <fstream.hfa> 314 326 315 327 extern "C" { … … 336 348 337 349 // Find number of distinct cache instances 338 idx_range_t * maps;350 llc_map_t * maps; 339 351 size_t map_cnt; 340 352 [maps, map_cnt] = distinct_llcs(cpus, cache_levels - llc, raw); 341 353 342 354 #if defined(__CFA_WITH_VERIFY__) 355 // Verify that the caches cover the all the cpus 343 356 { 344 unsigned width = 0; 357 unsigned width1 = 0; 358 unsigned width2 = 0; 345 359 for(i; map_cnt) { 346 360 const char * _; 347 width += read_width(maps[i], strlen(maps[i]), &_); 361 width1 += read_width(maps[i].raw->range, strlen(maps[i].raw->range), &_); 362 width2 += maps[i].raw->width; 348 363 } 349 verify(width == cpus); 364 verify(width1 == cpus); 365 verify(width2 == cpus); 350 366 } 351 367 #endif … … 357 373 qsort(pairings, cpus); 358 374 359 unsigned llc_width = raw[0][cache_levels - llc].width; 360 361 // From the mappins build the actual cpu map we want 375 { 376 unsigned it = 0; 377 for(i; cpus) { 378 unsigned llc_id = pairings[i].id; 379 if(maps[llc_id].start == -1u) { 380 maps[llc_id].start = it; 381 it += maps[llc_id].raw->width; 382 /* paranoid */ verify(maps[llc_id].start < it); 383 /* paranoid */ verify(it != -1u); 384 } 385 } 386 /* paranoid */ verify(it == cpus); 387 } 388 389 // From the mappings build the actual cpu map we want 362 390 struct cpu_map_entry_t * entries = alloc(cpus); 363 391 for(i; cpus) { entries[i].count = 0; } 364 392 for(i; cpus) { 393 /* paranoid */ verify(pairings[i].id < map_cnt); 365 394 unsigned c = pairings[i].cpu; 366 entries[c].start = pairings[i].llc_id * llc_width; 367 entries[c].count = llc_width; 395 unsigned llc_id = pairings[i].id; 396 unsigned width = maps[llc_id].raw->width; 397 unsigned start = maps[llc_id].start; 398 unsigned self = start + (maps[llc_id].count++); 399 entries[c].count = width; 400 entries[c].start = start; 401 entries[c].self = self; 368 402 } 369 403 -
libcfa/src/device/cpu.hfa
r6992f95 re319fc5 17 17 18 18 struct cpu_map_entry_t { 19 unsigned self; 19 20 unsigned start; 20 21 unsigned count; -
src/AST/Convert.cpp
r6992f95 re319fc5 2415 2415 } 2416 2416 2417 virtual void visit( const DimensionExpr * old ) override final { 2418 // DimensionExpr gets desugared away in Validate. 2419 // As long as new-AST passes don't use it, this cheap-cheerful error 2420 // detection helps ensure that these occurrences have been compiled 2421 // away, as expected. To move the DimensionExpr boundary downstream 2422 // or move the new-AST translation boundary upstream, implement 2423 // DimensionExpr in the new AST and implement a conversion. 2424 (void) old; 2425 assert(false && "DimensionExpr should not be present at new-AST boundary"); 2426 } 2427 2417 2428 virtual void visit( const AsmExpr * old ) override final { 2418 2429 this->node = visitBaseExpr( old, -
src/AST/Decl.cpp
r6992f95 re319fc5 78 78 79 79 const char * TypeDecl::typeString() const { 80 static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized array length type" };80 static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized length value" }; 81 81 static_assert( sizeof(kindNames) / sizeof(kindNames[0]) == TypeDecl::NUMBER_OF_KINDS, "typeString: kindNames is out of sync." ); 82 82 assertf( kind < TypeDecl::NUMBER_OF_KINDS, "TypeDecl kind is out of bounds." ); -
src/AST/Decl.hpp
r6992f95 re319fc5 175 175 class TypeDecl final : public NamedTypeDecl { 176 176 public: 177 enum Kind { Dtype, DStype, Otype, Ftype, Ttype, ALtype, NUMBER_OF_KINDS };177 enum Kind { Dtype, DStype, Otype, Ftype, Ttype, Dimension, NUMBER_OF_KINDS }; 178 178 179 179 Kind kind; -
src/CodeGen/CodeGenerator.cc
r6992f95 re319fc5 589 589 output << nameExpr->get_name(); 590 590 } // if 591 } 592 593 void CodeGenerator::postvisit( DimensionExpr * dimensionExpr ) { 594 extension( dimensionExpr ); 595 output << "/*non-type*/" << dimensionExpr->get_name(); 591 596 } 592 597 -
src/CodeGen/CodeGenerator.h
r6992f95 re319fc5 92 92 void postvisit( TupleIndexExpr * tupleExpr ); 93 93 void postvisit( TypeExpr *typeExpr ); 94 void postvisit( DimensionExpr *dimensionExpr ); 94 95 void postvisit( AsmExpr * ); 95 96 void postvisit( StmtExpr * ); -
src/Common/PassVisitor.h
r6992f95 re319fc5 167 167 virtual void visit( TypeExpr * typeExpr ) override final; 168 168 virtual void visit( const TypeExpr * typeExpr ) override final; 169 virtual void visit( DimensionExpr * dimensionExpr ) override final; 170 virtual void visit( const DimensionExpr * dimensionExpr ) override final; 169 171 virtual void visit( AsmExpr * asmExpr ) override final; 170 172 virtual void visit( const AsmExpr * asmExpr ) override final; … … 309 311 virtual Expression * mutate( CommaExpr * commaExpr ) override final; 310 312 virtual Expression * mutate( TypeExpr * typeExpr ) override final; 313 virtual Expression * mutate( DimensionExpr * dimensionExpr ) override final; 311 314 virtual Expression * mutate( AsmExpr * asmExpr ) override final; 312 315 virtual Expression * mutate( ImplicitCopyCtorExpr * impCpCtorExpr ) override final; … … 542 545 class WithIndexer { 543 546 protected: 544 WithIndexer( ) {}547 WithIndexer( bool trackIdentifiers = true ) : indexer(trackIdentifiers) {} 545 548 ~WithIndexer() {} 546 549 -
src/Common/PassVisitor.impl.h
r6992f95 re319fc5 2519 2519 2520 2520 //-------------------------------------------------------------------------- 2521 // DimensionExpr 2522 template< typename pass_type > 2523 void PassVisitor< pass_type >::visit( DimensionExpr * node ) { 2524 VISIT_START( node ); 2525 2526 indexerScopedAccept( node->result, *this ); 2527 2528 VISIT_END( node ); 2529 } 2530 2531 template< typename pass_type > 2532 void PassVisitor< pass_type >::visit( const DimensionExpr * node ) { 2533 VISIT_START( node ); 2534 2535 indexerScopedAccept( node->result, *this ); 2536 2537 VISIT_END( node ); 2538 } 2539 2540 template< typename pass_type > 2541 Expression * PassVisitor< pass_type >::mutate( DimensionExpr * node ) { 2542 MUTATE_START( node ); 2543 2544 indexerScopedMutate( node->env , *this ); 2545 indexerScopedMutate( node->result, *this ); 2546 2547 MUTATE_END( Expression, node ); 2548 } 2549 2550 //-------------------------------------------------------------------------- 2521 2551 // AsmExpr 2522 2552 template< typename pass_type > … … 3157 3187 3158 3188 maybeAccept_impl( node->forall, *this ); 3159 // xxx - should PointerType visit/mutate dimension?3189 maybeAccept_impl( node->dimension, *this ); 3160 3190 maybeAccept_impl( node->base, *this ); 3161 3191 … … 3168 3198 3169 3199 maybeAccept_impl( node->forall, *this ); 3170 // xxx - should PointerType visit/mutate dimension?3200 maybeAccept_impl( node->dimension, *this ); 3171 3201 maybeAccept_impl( node->base, *this ); 3172 3202 … … 3179 3209 3180 3210 maybeMutate_impl( node->forall, *this ); 3181 // xxx - should PointerType visit/mutate dimension?3211 maybeMutate_impl( node->dimension, *this ); 3182 3212 maybeMutate_impl( node->base, *this ); 3183 3213 -
src/Parser/DeclarationNode.cc
r6992f95 re319fc5 1076 1076 if ( variable.tyClass != TypeDecl::NUMBER_OF_KINDS ) { 1077 1077 // otype is internally converted to dtype + otype parameters 1078 static const TypeDecl::Kind kindMap[] = { TypeDecl::Dtype, TypeDecl::DStype, TypeDecl::Dtype, TypeDecl::Ftype, TypeDecl::Ttype, TypeDecl::D type};1078 static const TypeDecl::Kind kindMap[] = { TypeDecl::Dtype, TypeDecl::DStype, TypeDecl::Dtype, TypeDecl::Ftype, TypeDecl::Ttype, TypeDecl::Dimension }; 1079 1079 static_assert( sizeof(kindMap) / sizeof(kindMap[0]) == TypeDecl::NUMBER_OF_KINDS, "DeclarationNode::build: kindMap is out of sync." ); 1080 1080 assertf( variable.tyClass < sizeof(kindMap)/sizeof(kindMap[0]), "Variable's tyClass is out of bounds." ); 1081 TypeDecl * ret = new TypeDecl( *name, Type::StorageClasses(), nullptr, kindMap[ variable.tyClass ], variable.tyClass == TypeDecl::Otype || variable.tyClass == TypeDecl::ALtype, variable.initializer ? variable.initializer->buildType() : nullptr );1081 TypeDecl * ret = new TypeDecl( *name, Type::StorageClasses(), nullptr, kindMap[ variable.tyClass ], variable.tyClass == TypeDecl::Otype, variable.initializer ? variable.initializer->buildType() : nullptr ); 1082 1082 buildList( variable.assertions, ret->get_assertions() ); 1083 1083 return ret; -
src/Parser/ExpressionNode.cc
r6992f95 re319fc5 509 509 } // build_varref 510 510 511 DimensionExpr * build_dimensionref( const string * name ) { 512 DimensionExpr * expr = new DimensionExpr( *name ); 513 delete name; 514 return expr; 515 } // build_varref 511 516 // TODO: get rid of this and OperKinds and reuse code from OperatorTable 512 517 static const char * OperName[] = { // must harmonize with OperKinds -
src/Parser/ParseNode.h
r6992f95 re319fc5 183 183 184 184 NameExpr * build_varref( const std::string * name ); 185 DimensionExpr * build_dimensionref( const std::string * name ); 185 186 186 187 Expression * build_cast( DeclarationNode * decl_node, ExpressionNode * expr_node ); -
src/Parser/TypedefTable.cc
r6992f95 re319fc5 10 10 // Created On : Sat May 16 15:20:13 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Mar 15 20:56:47202113 // Update Count : 26 012 // Last Modified On : Wed May 19 08:30:14 2021 13 // Update Count : 262 14 14 // 15 15 … … 31 31 switch ( kind ) { 32 32 case IDENTIFIER: return "identifier"; 33 case TYPEDIMname: return "typedim"; 33 34 case TYPEDEFname: return "typedef"; 34 35 case TYPEGENname: return "typegen"; -
src/Parser/parser.yy
r6992f95 re319fc5 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 26 18:41:54202113 // Update Count : 499012 // Last Modified On : Wed May 19 14:20:36 2021 13 // Update Count : 5022 14 14 // 15 15 … … 287 287 288 288 // names and constants: lexer differentiates between identifier and typedef names 289 %token<tok> IDENTIFIER QUOTED_IDENTIFIER TYPED EFname TYPEGENname289 %token<tok> IDENTIFIER QUOTED_IDENTIFIER TYPEDIMname TYPEDEFname TYPEGENname 290 290 %token<tok> TIMEOUT WOR CATCH RECOVER CATCHRESUME FIXUP FINALLY // CFA 291 291 %token<tok> INTEGERconstant CHARACTERconstant STRINGliteral … … 586 586 | quasi_keyword 587 587 { $$ = new ExpressionNode( build_varref( $1 ) ); } 588 | TYPEDIMname // CFA, generic length argument 589 // { $$ = new ExpressionNode( new TypeExpr( maybeMoveBuildType( DeclarationNode::newFromTypedef( $1 ) ) ) ); } 590 // { $$ = new ExpressionNode( build_varref( $1 ) ); } 591 { $$ = new ExpressionNode( build_dimensionref( $1 ) ); } 588 592 | tuple 589 593 | '(' comma_expression ')' … … 2535 2539 | '[' identifier_or_type_name ']' 2536 2540 { 2537 typedefTable.addToScope( *$2, TYPED EFname, "9" );2538 $$ = DeclarationNode::newTypeParam( TypeDecl:: ALtype, $2 );2541 typedefTable.addToScope( *$2, TYPEDIMname, "9" ); 2542 $$ = DeclarationNode::newTypeParam( TypeDecl::Dimension, $2 ); 2539 2543 } 2540 2544 // | type_specifier identifier_parameter_declarator … … 2590 2594 { $$ = new ExpressionNode( new TypeExpr( maybeMoveBuildType( $1 ) ) ); } 2591 2595 | assignment_expression 2592 { SemanticError( yylloc, toString("Expression generic parameters are currently unimplemented: ", $1->build()) ); $$ = nullptr; }2593 2596 | type_list ',' type 2594 2597 { $$ = (ExpressionNode *)($1->set_last( new ExpressionNode( new TypeExpr( maybeMoveBuildType( $3 ) ) ) )); } 2595 2598 | type_list ',' assignment_expression 2596 { SemanticError( yylloc, toString("Expression generic parameters are currently unimplemented: ", $3->build()) ); $$ = nullptr; } 2597 // { $$ = (ExpressionNode *)( $1->set_last( $3 )); } 2599 { $$ = (ExpressionNode *)( $1->set_last( $3 )); } 2598 2600 ; 2599 2601 -
src/SymTab/Indexer.cc
r6992f95 re319fc5 74 74 } 75 75 76 Indexer::Indexer( )76 Indexer::Indexer( bool trackIdentifiers ) 77 77 : idTable(), typeTable(), structTable(), enumTable(), unionTable(), traitTable(), 78 prevScope(), scope( 0 ), repScope( 0 ) { ++* stats().count; }78 prevScope(), scope( 0 ), repScope( 0 ), trackIdentifiers( trackIdentifiers ) { ++* stats().count; } 79 79 80 80 Indexer::~Indexer() { … … 110 110 111 111 void Indexer::lookupId( const std::string & id, std::list< IdData > &out ) const { 112 assert( trackIdentifiers ); 113 112 114 ++* stats().lookup_calls; 113 115 if ( ! idTable ) return; … … 434 436 const Declaration * deleteStmt ) { 435 437 ++* stats().add_calls; 438 if ( ! trackIdentifiers ) return; 436 439 const std::string &name = decl->name; 437 440 if ( name == "" ) return; -
src/SymTab/Indexer.h
r6992f95 re319fc5 31 31 class Indexer : public std::enable_shared_from_this<SymTab::Indexer> { 32 32 public: 33 explicit Indexer( );33 explicit Indexer( bool trackIdentifiers = true ); 34 34 virtual ~Indexer(); 35 35 … … 180 180 /// returns true if there exists a declaration with C linkage and the given name with a different mangled name 181 181 bool hasIncompatibleCDecl( const std::string & id, const std::string & mangleName ) const; 182 183 bool trackIdentifiers; 182 184 }; 183 185 } // namespace SymTab -
src/SymTab/Validate.cc
r6992f95 re319fc5 105 105 106 106 struct FixQualifiedTypes final : public WithIndexer { 107 FixQualifiedTypes() : WithIndexer(false) {} 107 108 Type * postmutate( QualifiedType * ); 108 109 }; … … 174 175 }; 175 176 177 /// Does early resolution on the expressions that give enumeration constants their values 178 struct ResolveEnumInitializers final : public WithIndexer, public WithGuards, public WithVisitorRef<ResolveEnumInitializers>, public WithShortCircuiting { 179 ResolveEnumInitializers( const Indexer * indexer ); 180 void postvisit( EnumDecl * enumDecl ); 181 182 private: 183 const Indexer * local_indexer; 184 185 }; 186 176 187 /// Replaces array and function types in forall lists by appropriate pointer type and assigns each Object and Function declaration a unique ID. 177 188 struct ForallPointerDecay_old final { … … 260 271 void previsit( StructInstType * inst ); 261 272 void previsit( UnionInstType * inst ); 273 }; 274 275 /// desugar declarations and uses of dimension paramaters like [N], 276 /// from type-system managed values, to tunnneling via ordinary types, 277 /// as char[-] in and sizeof(-) out 278 struct TranslateDimensionGenericParameters : public WithIndexer, public WithGuards { 279 static void translateDimensions( std::list< Declaration * > &translationUnit ); 280 TranslateDimensionGenericParameters(); 281 282 bool nextVisitedNodeIsChildOfSUIT = false; // SUIT = Struct or Union -Inst Type 283 bool visitingChildOfSUIT = false; 284 void changeState_ChildOfSUIT( bool newVal ); 285 void premutate( StructInstType * sit ); 286 void premutate( UnionInstType * uit ); 287 void premutate( BaseSyntaxNode * node ); 288 289 TypeDecl * postmutate( TypeDecl * td ); 290 Expression * postmutate( DimensionExpr * de ); 291 Expression * postmutate( Expression * e ); 262 292 }; 263 293 … … 307 337 PassVisitor<EnumAndPointerDecay_old> epc; 308 338 PassVisitor<LinkReferenceToTypes_old> lrt( nullptr ); 339 PassVisitor<ResolveEnumInitializers> rei( nullptr ); 309 340 PassVisitor<ForallPointerDecay_old> fpd; 310 341 PassVisitor<CompoundLiteral> compoundliteral; … … 326 357 Stats::Heap::newPass("validate-B"); 327 358 Stats::Time::BlockGuard guard("validate-B"); 328 Stats::Time::TimeBlock("Link Reference To Types", [&]() { 329 acceptAll( translationUnit, lrt ); // must happen before autogen, because sized flag needs to propagate to generated functions 330 }); 331 Stats::Time::TimeBlock("Fix Qualified Types", [&]() { 332 mutateAll( translationUnit, fixQual ); // must happen after LinkReferenceToTypes_old, because aggregate members are accessed 333 }); 334 Stats::Time::TimeBlock("Hoist Structs", [&]() { 335 HoistStruct::hoistStruct( translationUnit ); // must happen after EliminateTypedef, so that aggregate typedefs occur in the correct order 336 }); 337 Stats::Time::TimeBlock("Eliminate Typedefs", [&]() { 338 EliminateTypedef::eliminateTypedef( translationUnit ); // 339 }); 359 acceptAll( translationUnit, lrt ); // must happen before autogen, because sized flag needs to propagate to generated functions 360 mutateAll( translationUnit, fixQual ); // must happen after LinkReferenceToTypes_old, because aggregate members are accessed 361 HoistStruct::hoistStruct( translationUnit ); 362 EliminateTypedef::eliminateTypedef( translationUnit ); 340 363 } 341 364 { 342 365 Stats::Heap::newPass("validate-C"); 343 366 Stats::Time::BlockGuard guard("validate-C"); 344 acceptAll( translationUnit, genericParams ); // check as early as possible - can't happen before LinkReferenceToTypes_old 345 ReturnChecker::checkFunctionReturns( translationUnit ); 346 InitTweak::fixReturnStatements( translationUnit ); // must happen before autogen 367 Stats::Time::TimeBlock("Validate Generic Parameters", [&]() { 368 acceptAll( translationUnit, genericParams ); // check as early as possible - can't happen before LinkReferenceToTypes_old; observed failing when attempted before eliminateTypedef 369 }); 370 Stats::Time::TimeBlock("Translate Dimensions", [&]() { 371 TranslateDimensionGenericParameters::translateDimensions( translationUnit ); 372 }); 373 Stats::Time::TimeBlock("Resolve Enum Initializers", [&]() { 374 acceptAll( translationUnit, rei ); // must happen after translateDimensions because rei needs identifier lookup, which needs name mangling 375 }); 376 Stats::Time::TimeBlock("Check Function Returns", [&]() { 377 ReturnChecker::checkFunctionReturns( translationUnit ); 378 }); 379 Stats::Time::TimeBlock("Fix Return Statements", [&]() { 380 InitTweak::fixReturnStatements( translationUnit ); // must happen before autogen 381 }); 347 382 } 348 383 { … … 644 679 } 645 680 646 LinkReferenceToTypes_old::LinkReferenceToTypes_old( const Indexer * other_indexer ) {681 LinkReferenceToTypes_old::LinkReferenceToTypes_old( const Indexer * other_indexer ) : WithIndexer( false ) { 647 682 if ( other_indexer ) { 648 683 local_indexer = other_indexer; … … 664 699 } 665 700 666 void checkGenericParameters( ReferenceToType * inst ) {667 for ( Expression * param : inst->parameters ) {668 if ( ! dynamic_cast< TypeExpr * >( param ) ) {669 SemanticError( inst, "Expression parameters for generic types are currently unsupported: " );670 }671 }672 }673 674 701 void LinkReferenceToTypes_old::postvisit( StructInstType * structInst ) { 675 702 const StructDecl * st = local_indexer->lookupStruct( structInst->name ); … … 682 709 forwardStructs[ structInst->name ].push_back( structInst ); 683 710 } // if 684 checkGenericParameters( structInst );685 711 } 686 712 … … 695 721 forwardUnions[ unionInst->name ].push_back( unionInst ); 696 722 } // if 697 checkGenericParameters( unionInst );698 723 } 699 724 … … 807 832 forwardEnums.erase( fwds ); 808 833 } // if 809 810 for ( Declaration * member : enumDecl->members ) {811 ObjectDecl * field = strict_dynamic_cast<ObjectDecl *>( member );812 if ( field->init ) {813 // need to resolve enumerator initializers early so that other passes that determine if an expression is constexpr have the appropriate information.814 SingleInit * init = strict_dynamic_cast<SingleInit *>( field->init );815 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer );816 }817 }818 834 } // if 819 835 } … … 878 894 typeInst->set_isFtype( typeDecl->kind == TypeDecl::Ftype ); 879 895 } // if 896 } // if 897 } 898 899 ResolveEnumInitializers::ResolveEnumInitializers( const Indexer * other_indexer ) : WithIndexer( true ) { 900 if ( other_indexer ) { 901 local_indexer = other_indexer; 902 } else { 903 local_indexer = &indexer; 904 } // if 905 } 906 907 void ResolveEnumInitializers::postvisit( EnumDecl * enumDecl ) { 908 if ( enumDecl->body ) { 909 for ( Declaration * member : enumDecl->members ) { 910 ObjectDecl * field = strict_dynamic_cast<ObjectDecl *>( member ); 911 if ( field->init ) { 912 // need to resolve enumerator initializers early so that other passes that determine if an expression is constexpr have the appropriate information. 913 SingleInit * init = strict_dynamic_cast<SingleInit *>( field->init ); 914 ResolvExpr::findSingleExpression( init->value, new BasicType( Type::Qualifiers(), BasicType::SignedInt ), indexer ); 915 } 916 } 880 917 } // if 881 918 } … … 1223 1260 } 1224 1261 1262 // Test for special name on a generic parameter. Special treatment for the 1263 // special name is a bootstrapping hack. In most cases, the worlds of T's 1264 // and of N's don't overlap (normal treamtemt). The foundations in 1265 // array.hfa use tagging for both types and dimensions. Tagging treats 1266 // its subject parameter even more opaquely than T&, which assumes it is 1267 // possible to have a pointer/reference to such an object. Tagging only 1268 // seeks to identify the type-system resident at compile time. Both N's 1269 // and T's can make tags. The tag definition uses the special name, which 1270 // is treated as "an N or a T." This feature is not inteded to be used 1271 // outside of the definition and immediate uses of a tag. 1272 static inline bool isReservedTysysIdOnlyName( const std::string & name ) { 1273 // name's prefix was __CFA_tysys_id_only, before it got wrapped in __..._generic 1274 int foundAt = name.find("__CFA_tysys_id_only"); 1275 if (foundAt == 0) return true; 1276 if (foundAt == 2 && name[0] == '_' && name[1] == '_') return true; 1277 return false; 1278 } 1279 1225 1280 template< typename Aggr > 1226 1281 void validateGeneric( Aggr * inst ) { … … 1239 1294 TypeSubstitution sub; 1240 1295 auto paramIter = params->begin(); 1241 for ( size_t i = 0; paramIter != params->end(); ++paramIter, ++i ) { 1242 if ( i < args.size() ) { 1243 TypeExpr * expr = strict_dynamic_cast< TypeExpr * >( * std::next( args.begin(), i ) ); 1244 sub.add( (* paramIter)->get_name(), expr->get_type()->clone() ); 1245 } else if ( i == args.size() ) { 1296 auto argIter = args.begin(); 1297 for ( ; paramIter != params->end(); ++paramIter, ++argIter ) { 1298 if ( argIter != args.end() ) { 1299 TypeExpr * expr = dynamic_cast< TypeExpr * >( * argIter ); 1300 if ( expr ) { 1301 sub.add( (* paramIter)->get_name(), expr->get_type()->clone() ); 1302 } 1303 } else { 1246 1304 Type * defaultType = (* paramIter)->get_init(); 1247 1305 if ( defaultType ) { 1248 1306 args.push_back( new TypeExpr( defaultType->clone() ) ); 1249 1307 sub.add( (* paramIter)->get_name(), defaultType->clone() ); 1308 argIter = std::prev(args.end()); 1309 } else { 1310 SemanticError( inst, "Too few type arguments in generic type " ); 1250 1311 } 1251 1312 } 1313 assert( argIter != args.end() ); 1314 bool typeParamDeclared = (*paramIter)->kind != TypeDecl::Kind::Dimension; 1315 bool typeArgGiven; 1316 if ( isReservedTysysIdOnlyName( (*paramIter)->name ) ) { 1317 // coerce a match when declaration is reserved name, which means "either" 1318 typeArgGiven = typeParamDeclared; 1319 } else { 1320 typeArgGiven = dynamic_cast< TypeExpr * >( * argIter ); 1321 } 1322 if ( ! typeParamDeclared && typeArgGiven ) SemanticError( inst, "Type argument given for value parameter: " ); 1323 if ( typeParamDeclared && ! typeArgGiven ) SemanticError( inst, "Expression argument given for type parameter: " ); 1252 1324 } 1253 1325 1254 1326 sub.apply( inst ); 1255 if ( args.size() < params->size() ) SemanticError( inst, "Too few type arguments in generic type " );1256 1327 if ( args.size() > params->size() ) SemanticError( inst, "Too many type arguments in generic type " ); 1257 1328 } … … 1264 1335 void ValidateGenericParameters::previsit( UnionInstType * inst ) { 1265 1336 validateGeneric( inst ); 1337 } 1338 1339 void TranslateDimensionGenericParameters::translateDimensions( std::list< Declaration * > &translationUnit ) { 1340 PassVisitor<TranslateDimensionGenericParameters> translator; 1341 mutateAll( translationUnit, translator ); 1342 } 1343 1344 TranslateDimensionGenericParameters::TranslateDimensionGenericParameters() : WithIndexer( false ) {} 1345 1346 // Declaration of type variable: forall( [N] ) -> forall( N & | sized( N ) ) 1347 TypeDecl * TranslateDimensionGenericParameters::postmutate( TypeDecl * td ) { 1348 if ( td->kind == TypeDecl::Dimension ) { 1349 td->kind = TypeDecl::Dtype; 1350 if ( ! isReservedTysysIdOnlyName( td->name ) ) { 1351 td->sized = true; 1352 } 1353 } 1354 return td; 1355 } 1356 1357 // Situational awareness: 1358 // array( float, [[currentExpr]] ) has visitingChildOfSUIT == true 1359 // array( float, [[currentExpr]] - 1 ) has visitingChildOfSUIT == false 1360 // size_t x = [[currentExpr]] has visitingChildOfSUIT == false 1361 void TranslateDimensionGenericParameters::changeState_ChildOfSUIT( bool newVal ) { 1362 GuardValue( nextVisitedNodeIsChildOfSUIT ); 1363 GuardValue( visitingChildOfSUIT ); 1364 visitingChildOfSUIT = nextVisitedNodeIsChildOfSUIT; 1365 nextVisitedNodeIsChildOfSUIT = newVal; 1366 } 1367 void TranslateDimensionGenericParameters::premutate( StructInstType * sit ) { 1368 (void) sit; 1369 changeState_ChildOfSUIT(true); 1370 } 1371 void TranslateDimensionGenericParameters::premutate( UnionInstType * uit ) { 1372 (void) uit; 1373 changeState_ChildOfSUIT(true); 1374 } 1375 void TranslateDimensionGenericParameters::premutate( BaseSyntaxNode * node ) { 1376 (void) node; 1377 changeState_ChildOfSUIT(false); 1378 } 1379 1380 // Passing values as dimension arguments: array( float, 7 ) -> array( float, char[ 7 ] ) 1381 // Consuming dimension parameters: size_t x = N - 1 ; -> size_t x = sizeof(N) - 1 ; 1382 // Intertwined reality: array( float, N ) -> array( float, N ) 1383 // array( float, N - 1 ) -> array( float, char[ sizeof(N) - 1 ] ) 1384 // Intertwined case 1 is not just an optimization. 1385 // Avoiding char[sizeof(-)] is necessary to enable the call of f to bind the value of N, in: 1386 // forall([N]) void f( array(float, N) & ); 1387 // array(float, 7) a; 1388 // f(a); 1389 1390 Expression * TranslateDimensionGenericParameters::postmutate( DimensionExpr * de ) { 1391 // Expression de is an occurrence of N in LHS of above examples. 1392 // Look up the name that de references. 1393 // If we are in a struct body, then this reference can be to an entry of the stuct's forall list. 1394 // Whether or not we are in a struct body, this reference can be to an entry of a containing function's forall list. 1395 // If we are in a struct body, then the stuct's forall declarations are innermost (functions don't occur in structs). 1396 // Thus, a potential struct's declaration is highest priority. 1397 // A struct's forall declarations are already renamed with _generic_ suffix. Try that name variant first. 1398 1399 std::string useName = "__" + de->name + "_generic_"; 1400 TypeDecl * namedParamDecl = const_cast<TypeDecl *>( strict_dynamic_cast<const TypeDecl *, nullptr >( indexer.lookupType( useName ) ) ); 1401 1402 if ( ! namedParamDecl ) { 1403 useName = de->name; 1404 namedParamDecl = const_cast<TypeDecl *>( strict_dynamic_cast<const TypeDecl *, nullptr >( indexer.lookupType( useName ) ) ); 1405 } 1406 1407 // Expect to find it always. A misspelled name would have been parsed as an identifier. 1408 assert( namedParamDecl && "Type-system-managed value name not found in symbol table" ); 1409 1410 delete de; 1411 1412 TypeInstType * refToDecl = new TypeInstType( 0, useName, namedParamDecl ); 1413 1414 if ( visitingChildOfSUIT ) { 1415 // As in postmutate( Expression * ), topmost expression needs a TypeExpr wrapper 1416 // But avoid ArrayType-Sizeof 1417 return new TypeExpr( refToDecl ); 1418 } else { 1419 // the N occurrence is being used directly as a runtime value, 1420 // if we are in a type instantiation, then the N is within a bigger value computation 1421 return new SizeofExpr( refToDecl ); 1422 } 1423 } 1424 1425 Expression * TranslateDimensionGenericParameters::postmutate( Expression * e ) { 1426 if ( visitingChildOfSUIT ) { 1427 // e is an expression used as an argument to instantiate a type 1428 if (! dynamic_cast< TypeExpr * >( e ) ) { 1429 // e is a value expression 1430 // but not a DimensionExpr, which has a distinct postmutate 1431 Type * typeExprContent = new ArrayType( 0, new BasicType( 0, BasicType::Char ), e, true, false ); 1432 TypeExpr * result = new TypeExpr( typeExprContent ); 1433 return result; 1434 } 1435 } 1436 return e; 1266 1437 } 1267 1438 -
src/SynTree/Declaration.h
r6992f95 re319fc5 201 201 typedef NamedTypeDecl Parent; 202 202 public: 203 enum Kind { Dtype, DStype, Otype, Ftype, Ttype, ALtype, NUMBER_OF_KINDS };203 enum Kind { Dtype, DStype, Otype, Ftype, Ttype, Dimension, NUMBER_OF_KINDS }; 204 204 205 205 Kind kind; -
src/SynTree/Expression.h
r6992f95 re319fc5 587 587 }; 588 588 589 /// DimensionExpr represents a type-system provided value used in an expression ( forrall([N]) ... N + 1 ) 590 class DimensionExpr : public Expression { 591 public: 592 std::string name; 593 594 DimensionExpr( std::string name ); 595 DimensionExpr( const DimensionExpr & other ); 596 virtual ~DimensionExpr(); 597 598 const std::string & get_name() const { return name; } 599 void set_name( std::string newValue ) { name = newValue; } 600 601 virtual DimensionExpr * clone() const override { return new DimensionExpr( * this ); } 602 virtual void accept( Visitor & v ) override { v.visit( this ); } 603 virtual void accept( Visitor & v ) const override { v.visit( this ); } 604 virtual Expression * acceptMutator( Mutator & m ) override { return m.mutate( this ); } 605 virtual void print( std::ostream & os, Indenter indent = {} ) const override; 606 }; 607 589 608 /// AsmExpr represents a GCC 'asm constraint operand' used in an asm statement: [output] "=f" (result) 590 609 class AsmExpr : public Expression { -
src/SynTree/Mutator.h
r6992f95 re319fc5 80 80 virtual Expression * mutate( CommaExpr * commaExpr ) = 0; 81 81 virtual Expression * mutate( TypeExpr * typeExpr ) = 0; 82 virtual Expression * mutate( DimensionExpr * dimensionExpr ) = 0; 82 83 virtual Expression * mutate( AsmExpr * asmExpr ) = 0; 83 84 virtual Expression * mutate( ImplicitCopyCtorExpr * impCpCtorExpr ) = 0; -
src/SynTree/SynTree.h
r6992f95 re319fc5 85 85 class CommaExpr; 86 86 class TypeExpr; 87 class DimensionExpr; 87 88 class AsmExpr; 88 89 class ImplicitCopyCtorExpr; -
src/SynTree/TypeDecl.cc
r6992f95 re319fc5 33 33 34 34 const char * TypeDecl::typeString() const { 35 static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized array length type" };35 static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized length value" }; 36 36 static_assert( sizeof(kindNames) / sizeof(kindNames[0]) == TypeDecl::NUMBER_OF_KINDS, "typeString: kindNames is out of sync." ); 37 37 assertf( kind < TypeDecl::NUMBER_OF_KINDS, "TypeDecl kind is out of bounds." ); -
src/SynTree/TypeExpr.cc
r6992f95 re319fc5 35 35 } 36 36 37 DimensionExpr::DimensionExpr( std::string name ) : Expression(), name(name) { 38 assertf(name != "0", "Zero is not a valid name"); 39 assertf(name != "1", "One is not a valid name"); 40 } 41 42 DimensionExpr::DimensionExpr( const DimensionExpr & other ) : Expression( other ), name( other.name ) { 43 } 44 45 DimensionExpr::~DimensionExpr() {} 46 47 void DimensionExpr::print( std::ostream & os, Indenter indent ) const { 48 os << "Type-Sys Value: " << get_name(); 49 Expression::print( os, indent ); 50 } 37 51 // Local Variables: // 38 52 // tab-width: 4 // -
src/SynTree/Visitor.h
r6992f95 re319fc5 135 135 virtual void visit( TypeExpr * node ) { visit( const_cast<const TypeExpr *>(node) ); } 136 136 virtual void visit( const TypeExpr * typeExpr ) = 0; 137 virtual void visit( DimensionExpr * node ) { visit( const_cast<const DimensionExpr *>(node) ); } 138 virtual void visit( const DimensionExpr * typeExpr ) = 0; 137 139 virtual void visit( AsmExpr * node ) { visit( const_cast<const AsmExpr *>(node) ); } 138 140 virtual void visit( const AsmExpr * asmExpr ) = 0; -
tests/array-container/array-basic.cfa
r6992f95 re319fc5 61 61 forall( [Nw], [Nx], [Ny], [Nz] ) 62 62 void fillHelloData( array( float, Nw, Nx, Ny, Nz ) & wxyz ) { 63 for (w; z(Nw))64 for (x; z(Nx))65 for (y; z(Ny))66 for (z; z(Nz))63 for (w; Nw) 64 for (x; Nx) 65 for (y; Ny) 66 for (z; Nz) 67 67 wxyz[w][x][y][z] = getMagicNumber(w, x, y, z); 68 68 } 69 69 70 forall( [ Zn]70 forall( [N] 71 71 , S & | sized(S) 72 72 ) 73 float total1d_low( arpk( Zn, S, float, float ) & a ) {73 float total1d_low( arpk(N, S, float, float ) & a ) { 74 74 float total = 0.0f; 75 for (i; z(Zn))75 for (i; N) 76 76 total += a[i]; 77 77 return total; … … 98 98 99 99 expect = 0; 100 for (i; z(Nw))100 for (i; Nw) 101 101 expect += getMagicNumber( i, slice_ix, slice_ix, slice_ix ); 102 102 printf("expect Ws = %f\n", expect); … … 117 117 118 118 expect = 0; 119 for (i; z(Nx))119 for (i; Nx) 120 120 expect += getMagicNumber( slice_ix, i, slice_ix, slice_ix ); 121 121 printf("expect Xs = %f\n", expect); -
tests/array-container/array-md-sbscr-cases.cfa
r6992f95 re319fc5 20 20 forall( [Nw], [Nx], [Ny], [Nz] ) 21 21 void fillHelloData( array( float, Nw, Nx, Ny, Nz ) & wxyz ) { 22 for (w; z(Nw))23 for (x; z(Nx))24 for (y; z(Ny))25 for (z; z(Nz))22 for (w; Nw) 23 for (x; Nx) 24 for (y; Ny) 25 for (z; Nz) 26 26 wxyz[w][x][y][z] = getMagicNumber(w, x, y, z); 27 27 } … … 246 246 assert(( wxyz[[2, 3, 4, 5]] == valExpected )); 247 247 248 for ( i; z(Nw)) {248 for ( i; Nw ) { 249 249 assert(( wxyz[[ i, 3, 4, 5 ]] == getMagicNumber(i, 3, 4, 5) )); 250 250 } 251 251 252 for ( i; z(Nx)) {252 for ( i; Nx ) { 253 253 assert(( wxyz[[ 2, i, 4, 5 ]] == getMagicNumber(2, i, 4, 5) )); 254 254 } 255 255 256 for ( i; z(Ny)) {256 for ( i; Ny ) { 257 257 assert(( wxyz[[ 2, 3, i, 5 ]] == getMagicNumber(2, 3, i, 5) )); 258 258 } 259 259 260 for ( i; z(Nz)) {260 for ( i; Nz ) { 261 261 assert(( wxyz[[ 2, 3, 4, i ]] == getMagicNumber(2, 3, 4, i) )); 262 262 } 263 263 264 for ( i; z(Nw)) {264 for ( i; Nw ) { 265 265 assert(( wxyz[[ i, all, 4, 5 ]][3] == getMagicNumber(i, 3, 4, 5) )); 266 266 } 267 267 268 for ( i; z(Nw)) {268 for ( i; Nw ) { 269 269 assert(( wxyz[[ all, 3, 4, 5 ]][i] == getMagicNumber(i, 3, 4, 5) )); 270 270 } -
tests/device/cpu.cfa
r6992f95 re319fc5 17 17 #include <fstream.hfa> 18 18 #include <device/cpu.hfa> 19 #include <stdlib.hfa> 20 21 #include <errno.h> 22 #include <stdio.h> 23 #include <string.h> 24 #include <unistd.h> 25 19 26 extern "C" { 27 #include <dirent.h> 28 #include <sys/types.h> 29 #include <sys/stat.h> 20 30 #include <sys/sysinfo.h> 31 #include <fcntl.h> 32 } 33 34 // go through a directory calling fn on each file 35 static int iterate_dir( const char * path, void (*fn)(struct dirent * ent) ) { 36 // open the directory 37 DIR *dir = opendir(path); 38 if(dir == 0p) { return ENOTDIR; } 39 40 // call fn for each 41 struct dirent * ent; 42 while ((ent = readdir(dir)) != 0p) { 43 fn( ent ); 44 } 45 46 // no longer need this 47 closedir(dir); 48 return 0; 49 } 50 51 // count the number of directories with the specified prefix 52 // the directories counted have the form '[prefix]N' where prefix is the parameter 53 // and N is an base 10 integer. 54 static int count_prefix_dirs(const char * path, const char * prefix) { 55 // read the directory and find the cpu count 56 // and make sure everything is as expected 57 int max = -1; 58 int count = 0; 59 void lambda(struct dirent * ent) { 60 // were are looking for prefixX, where X is a number 61 // check that it starts with 'cpu 62 char * s = strstr(ent->d_name, prefix); 63 if(s == 0p) { return; } 64 if(s != ent->d_name) { return; } 65 66 // check that the next part is a number 67 s += strlen(prefix); 68 char * end; 69 long int val = strtol(s, &end, 10); 70 if(*end != '\0' || val < 0) { return; } 71 72 // check that it's a directory 73 if(ent->d_type != DT_DIR) { return; } 74 75 // it's a match! 76 max = max(val, max); 77 count++; 78 } 79 iterate_dir(path, lambda); 80 81 /* paranoid */ verifyf(count == max + 1, "Inconsistent %s count, counted %d, but max %s was %d", prefix, count, prefix, (int)max); 82 83 return count; 84 } 85 86 // Count number of cache *indexes* in the system 87 // cache indexes are distinct from cache level as Data or Instruction cache 88 // can share a level but not an index 89 // PITFALL: assumes all cpus have the same indexes as cpu0 90 static int count_cache_indexes(void) { 91 return count_prefix_dirs("/sys/devices/system/cpu/cpu0/cache", "index"); 92 } 93 94 // read information about a spcficic cache index/cpu file into the output buffer 95 static size_t read_cpuidxinfo_into(unsigned cpu, unsigned idx, const char * file, char * out, size_t out_len) { 96 // Pick the file we want and read it 97 char buf[128]; 98 /* paranoid */ __attribute__((unused)) int len = 99 snprintf(buf, 128, "/sys/devices/system/cpu/cpu%u/cache/index%u/%s", cpu, idx, file); 100 /* paranoid */ verifyf(len > 0, "Could not generate '%s' filename for cpu %u, index %u", file, cpu, idx); 101 102 int fd = open(buf, 0, O_RDONLY); 103 /* paranoid */ verifyf(fd > 0, "Could not open file '%s'", buf); 104 105 ssize_t r = read(fd, out, out_len); 106 /* paranoid */ verifyf(r > 0, "Could not read file '%s'", buf); 107 108 /* paranoid */ __attribute__((unused)) int ret = 109 close(fd); 110 /* paranoid */ verifyf(ret == 0, "Could not close file '%s'", buf); 111 112 out[r-1] = '\0'; 113 return r-1; 114 } 115 116 unsigned find_idx() { 117 int idxs = count_cache_indexes(); 118 119 unsigned found_level = 0; 120 unsigned found = -1u; 121 for(i; idxs) { 122 unsigned idx = idxs - 1 - i; 123 char buf[32]; 124 125 // Level is the cache level: higher means bigger and slower 126 read_cpuidxinfo_into(0, idx, "level", buf, 32); 127 char * end; 128 unsigned long level = strtoul(buf, &end, 10); 129 /* paranoid */ verifyf(level <= 250, "Cpu %u has more than 250 levels of cache, that doesn't sound right", 0); 130 /* paranoid */ verify(*end == '\0'); 131 132 if(found_level < level) { 133 found_level = level; 134 found = idx; 135 } 136 } 137 138 /* paranoid */ verify(found != -1u); 139 return found; 21 140 } 22 141 23 142 int main() { 143 //----------------------------------------------------------------------- 24 144 int ret1 = get_nprocs(); 25 145 int ret2 = cpu_info.hthrd_count; … … 31 151 } 32 152 153 //----------------------------------------------------------------------- 154 // Make sure no one has the same self 155 for(ime; cpu_info.hthrd_count) { 156 unsigned me = cpu_info.llc_map[ime].self; 157 { 158 unsigned s = cpu_info.llc_map[ime].start; 159 unsigned e = s + cpu_info.llc_map[ime].count; 160 if(me < s || me >= e) { 161 sout | "CPU" | ime | "outside of it's own map: " | s | "<=" | me | "<" | e; 162 } 163 } 164 165 166 for(ithem; cpu_info.hthrd_count) { 167 if(ime == ithem) continue; 168 169 unsigned them = cpu_info.llc_map[ithem].self; 170 if(me == them) { 171 sout | "CPU" | ime | "has conflicting self id with" | ithem | "(" | me | ")"; 172 } 173 } 174 } 175 176 177 //----------------------------------------------------------------------- 178 unsigned idx = find_idx(); 179 // For all procs check mapping is consistent 180 for(cpu_me; cpu_info.hthrd_count) { 181 char buf_me[32]; 182 size_t len_me = read_cpuidxinfo_into(cpu_me, idx, "shared_cpu_list", buf_me, 32); 183 for(cpu_them; cpu_info.hthrd_count) { 184 if(cpu_me == cpu_them) continue; 185 char buf_them[32]; 186 size_t len_them = read_cpuidxinfo_into(cpu_them, idx, "shared_cpu_list", buf_them, 32); 187 188 bool match_file = len_them == len_me && 0 == strncmp(buf_them, buf_me, len_me); 189 bool match_info = cpu_info.llc_map[cpu_me].start == cpu_info.llc_map[cpu_them].start && cpu_info.llc_map[cpu_me].count == cpu_info.llc_map[cpu_them].count; 190 191 if(match_file != match_info) { 192 sout | "CPU" | cpu_me | "and" | cpu_them | "have inconsitent file and cpu_info"; 193 sout | cpu_me | ": <" | cpu_info.llc_map[cpu_me ].start | "," | cpu_info.llc_map[cpu_me ].count | "> '" | buf_me | "'"; 194 sout | cpu_me | ": <" | cpu_info.llc_map[cpu_them].start | "," | cpu_info.llc_map[cpu_them].count | "> '" | buf_them | "'"; 195 } 196 } 197 } 33 198 }
Note: See TracChangeset
for help on using the changeset viewer.