Changeset c8a0210
- Timestamp:
- Apr 16, 2021, 2:28:09 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 665edf40
- Parents:
- 857a1c6 (diff), 5f6a172 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Files:
-
- 5 added
- 114 edited
-
benchmark/basic/ttst_lock.c (modified) (1 diff)
-
benchmark/benchcltr.hfa (modified) (3 diffs)
-
benchmark/io/http/protocol.cfa (modified) (1 diff)
-
benchmark/io/readv-posix.c (modified) (2 diffs)
-
benchmark/io/readv.cfa (modified) (2 diffs)
-
benchmark/readyQ/cycle.cc (modified) (2 diffs)
-
benchmark/readyQ/cycle.cfa (modified) (2 diffs)
-
benchmark/readyQ/cycle.cpp (modified) (2 diffs)
-
benchmark/readyQ/locality.cc (modified) (2 diffs)
-
benchmark/readyQ/locality.cfa (modified) (2 diffs)
-
benchmark/readyQ/locality.cpp (modified) (2 diffs)
-
benchmark/readyQ/rq_bench.hfa (modified) (1 diff)
-
benchmark/readyQ/rq_bench.hpp (modified) (2 diffs)
-
benchmark/readyQ/yield.cfa (modified) (2 diffs)
-
doc/bibliography/pl.bib (modified) (1 diff)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/dynamic_entropy.hpp (added)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/links.hpp (modified) (1 diff)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/links2.hpp (added)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/ntmove.cpp (added)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list.hpp (modified) (5 diffs)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_good.cpp (modified) (1 diff)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/randbit.cpp (modified) (1 diff)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.cpp (modified) (11 diffs)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi-packed.hpp (modified) (2 diffs)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi.hpp (modified) (1 diff)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/utils.hpp (modified) (4 diffs)
-
doc/theses/thierry_delisle_PhD/code/readyQ_proto/work_stealing.hpp (modified) (7 diffs)
-
libcfa/prelude/builtins.c (modified) (5 diffs)
-
libcfa/src/bits/weakso_locks.cfa (modified) (1 diff)
-
libcfa/src/clock.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/coroutine.cfa (modified) (2 diffs)
-
libcfa/src/concurrency/coroutine.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/invoke.h (modified) (1 diff)
-
libcfa/src/concurrency/io/call.cfa.in (modified) (18 diffs)
-
libcfa/src/concurrency/kernel.cfa (modified) (12 diffs)
-
libcfa/src/concurrency/kernel.hfa (modified) (6 diffs)
-
libcfa/src/concurrency/kernel/startup.cfa (modified) (8 diffs)
-
libcfa/src/concurrency/kernel_private.hfa (modified) (5 diffs)
-
libcfa/src/concurrency/preemption.cfa (modified) (2 diffs)
-
libcfa/src/concurrency/ready_queue.cfa (modified) (18 diffs)
-
libcfa/src/concurrency/ready_subqueue.hfa (modified) (1 diff)
-
libcfa/src/concurrency/stats.cfa (modified) (3 diffs)
-
libcfa/src/concurrency/stats.hfa (modified) (4 diffs)
-
libcfa/src/concurrency/thread.cfa (modified) (5 diffs)
-
libcfa/src/concurrency/thread.hfa (modified) (3 diffs)
-
libcfa/src/exception.c (modified) (3 diffs)
-
libcfa/src/exception.h (modified) (4 diffs)
-
libcfa/src/exception.hfa (modified) (3 diffs)
-
libcfa/src/fstream.cfa (modified) (1 diff)
-
libcfa/src/fstream.hfa (modified) (1 diff)
-
libcfa/src/iostream.cfa (modified) (7 diffs)
-
libcfa/src/iostream.hfa (modified) (4 diffs)
-
libcfa/src/math.hfa (modified) (2 diffs)
-
libcfa/src/time.hfa (modified) (2 diffs)
-
libcfa/src/virtual.c (modified) (2 diffs)
-
src/AST/Expr.cpp (modified) (1 diff)
-
src/AST/Expr.hpp (modified) (1 diff)
-
src/Concurrency/Keywords.cc (modified) (8 diffs)
-
src/Parser/parser.yy (modified) (2 diffs)
-
src/SynTree/Constant.cc (modified) (1 diff)
-
src/SynTree/Constant.h (modified) (1 diff)
-
src/Virtual/ExpandCasts.cc (modified) (7 diffs)
-
src/Virtual/Tables.cc (modified) (4 diffs)
-
src/Virtual/Tables.h (modified) (3 diffs)
-
tests/.expect/KRfunctions.nast.arm64.txt (modified) (1 diff)
-
tests/.expect/KRfunctions.nast.x64.txt (modified) (1 diff)
-
tests/.expect/KRfunctions.nast.x86.txt (modified) (1 diff)
-
tests/.expect/KRfunctions.oast.x64.txt (modified) (1 diff)
-
tests/.expect/declarationSpecifier.arm64.txt (modified) (1 diff)
-
tests/.expect/declarationSpecifier.x64.txt (modified) (1 diff)
-
tests/.expect/declarationSpecifier.x86.txt (modified) (1 diff)
-
tests/.expect/extension.arm64.txt (modified) (1 diff)
-
tests/.expect/extension.x64.txt (modified) (1 diff)
-
tests/.expect/extension.x86.txt (modified) (1 diff)
-
tests/.expect/gccExtensions.arm64.txt (modified) (1 diff)
-
tests/.expect/gccExtensions.x64.txt (modified) (1 diff)
-
tests/.expect/gccExtensions.x86.txt (modified) (1 diff)
-
tests/.expect/math.nast.arm64.txt (modified) (1 diff)
-
tests/.expect/math.nast.x64.txt (modified) (1 diff)
-
tests/.expect/math.nast.x86.txt (modified) (1 diff)
-
tests/concurrent/futures/multi.cfa (modified) (4 diffs)
-
tests/concurrent/spinaphore.cfa (modified) (2 diffs)
-
tests/errors/.expect/completeType.nast.arm64.txt (modified) (9 diffs)
-
tests/exceptions/.expect/resume-threads.txt (modified) (1 diff)
-
tests/exceptions/.expect/resume.txt (modified) (1 diff)
-
tests/exceptions/.expect/terminate-threads.txt (modified) (1 diff)
-
tests/exceptions/.expect/terminate.txt (modified) (1 diff)
-
tests/exceptions/cancel/coroutine.cfa (modified) (3 diffs)
-
tests/exceptions/cancel/thread.cfa (modified) (4 diffs)
-
tests/exceptions/conditional.cfa (modified) (3 diffs)
-
tests/exceptions/data-except.cfa (modified) (1 diff)
-
tests/exceptions/defaults.cfa (modified) (5 diffs)
-
tests/exceptions/finally.cfa (modified) (1 diff)
-
tests/exceptions/interact.cfa (modified) (8 diffs)
-
tests/exceptions/polymorphic.cfa (modified) (5 diffs)
-
tests/exceptions/resume.cfa (modified) (8 diffs)
-
tests/exceptions/terminate.cfa (modified) (8 diffs)
-
tests/exceptions/trash.cfa (modified) (1 diff)
-
tests/exceptions/type-check.cfa (modified) (1 diff)
-
tests/exceptions/virtual-cast.cfa (modified) (4 diffs)
-
tests/exceptions/virtual-poly.cfa (modified) (7 diffs)
-
tests/io/.expect/manipulatorsOutput1.arm64.txt (modified) (1 diff)
-
tests/io/.expect/manipulatorsOutput1.x64.txt (modified) (1 diff)
-
tests/io/.expect/manipulatorsOutput1.x86.txt (modified) (1 diff)
-
tests/io/.expect/manipulatorsOutput2.arm64.txt (modified) (2 diffs)
-
tests/io/.expect/manipulatorsOutput2.x64.txt (modified) (2 diffs)
-
tests/io/.expect/manipulatorsOutput2.x86.txt (modified) (2 diffs)
-
tests/io/.expect/manipulatorsOutput4.x64.txt (added)
-
tests/io/manipulatorsOutput1.cfa (modified) (3 diffs)
-
tests/io/manipulatorsOutput2.cfa (modified) (2 diffs)
-
tests/io/manipulatorsOutput3.cfa (modified) (1 diff)
-
tests/io/manipulatorsOutput4.cfa (added)
-
tests/linking/exception-nothreads.cfa (modified) (1 diff)
-
tests/linking/exception-withthreads.cfa (modified) (1 diff)
-
tests/math.cfa (modified) (2 diffs)
-
tests/meta/.expect/archVast.nast.arm64.txt (modified) (2 diffs)
-
tests/quasiKeyword.cfa (modified) (1 diff)
-
tests/vector_math/.expect/vec4_float.txt (modified) (1 diff)
-
tools/gdb/utils-gdb.py (modified) (6 diffs)
Legend:
- Unmodified
- Added
- Removed
-
benchmark/basic/ttst_lock.c
r857a1c6 rc8a0210 9 9 #define CALIGN __attribute__(( aligned (CACHE_ALIGN) )) 10 10 #define CACHE_ALIGN 128 11 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 11 #if defined( __i386 ) || defined( __x86_64 ) 12 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 13 #elif defined( __ARM_ARCH ) 14 #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) 15 #else 16 #error unsupported architecture 17 #endif 12 18 13 19 typedef uintptr_t TYPE; // addressable word-size -
benchmark/benchcltr.hfa
r857a1c6 rc8a0210 114 114 for() { 115 115 sleep(100`ms); 116 end = getTimeNsec();116 end = timeNsec(); 117 117 Duration delta = end - start; 118 118 /*if(is_tty)*/ { … … 126 126 } 127 127 #else 128 uint64_t getTimeNsec() {128 uint64_t timeNsec() { 129 129 timespec curr; 130 130 clock_gettime( CLOCK_REALTIME, &curr ); … … 140 140 for(;;) { 141 141 usleep(100000); 142 end = getTimeNsec();142 end = timeNsec(); 143 143 uint64_t delta = end - start; 144 144 /*if(is_tty)*/ { -
benchmark/io/http/protocol.cfa
r857a1c6 rc8a0210 249 249 250 250 char buff[100]; 251 Time now = getTimeNsec();251 Time now = timeNsec(); 252 252 strftime( buff, 100, "%a, %d %b %Y %H:%M:%S %Z", now ); 253 253 sout | "Updated date to '" | buff | "'"; -
benchmark/io/readv-posix.c
r857a1c6 rc8a0210 111 111 printf("Starting\n"); 112 112 bool is_tty = isatty(STDOUT_FILENO); 113 start = getTimeNsec();113 start = timeNsec(); 114 114 run = true; 115 115 … … 118 118 119 119 run = false; 120 end = getTimeNsec();120 end = timeNsec(); 121 121 printf("\nDone\n"); 122 122 -
benchmark/io/readv.cfa
r857a1c6 rc8a0210 147 147 printf("Starting\n"); 148 148 bool is_tty = isatty(STDOUT_FILENO); 149 start = getTimeNsec();149 start = timeNsec(); 150 150 run = true; 151 151 … … 156 156 157 157 run = false; 158 end = getTimeNsec();158 end = timeNsec(); 159 159 printf("\nDone\n"); 160 160 } -
benchmark/readyQ/cycle.cc
r857a1c6 rc8a0210 89 89 90 90 bool is_tty = isatty(STDOUT_FILENO); 91 start = getTimeNsec();91 start = timeNsec(); 92 92 93 93 for(int i = 0; i < nthreads; i++) { … … 97 97 98 98 stop = true; 99 end = getTimeNsec();99 end = timeNsec(); 100 100 printf("\nDone\n"); 101 101 -
benchmark/readyQ/cycle.cfa
r857a1c6 rc8a0210 65 65 66 66 bool is_tty = isatty(STDOUT_FILENO); 67 start = getTimeNsec();67 start = timeNsec(); 68 68 69 69 for(i; nthreads) { … … 73 73 74 74 stop = true; 75 end = getTimeNsec();75 end = timeNsec(); 76 76 printf("\nDone\n"); 77 77 -
benchmark/readyQ/cycle.cpp
r857a1c6 rc8a0210 93 93 94 94 bool is_tty = isatty(STDOUT_FILENO); 95 start = getTimeNsec();95 start = timeNsec(); 96 96 97 97 for(int i = 0; i < nthreads; i++) { … … 101 101 102 102 stop = true; 103 end = getTimeNsec();103 end = timeNsec(); 104 104 printf("\nDone\n"); 105 105 -
benchmark/readyQ/locality.cc
r857a1c6 rc8a0210 281 281 282 282 bool is_tty = isatty(STDOUT_FILENO); 283 start = getTimeNsec();283 start = timeNsec(); 284 284 285 285 for(size_t i = 0; i < nthreads; i++) { … … 289 289 290 290 stop = true; 291 end = getTimeNsec();291 end = timeNsec(); 292 292 printf("\nDone\n"); 293 293 -
benchmark/readyQ/locality.cfa
r857a1c6 rc8a0210 232 232 233 233 bool is_tty = isatty(STDOUT_FILENO); 234 start = getTimeNsec();234 start = timeNsec(); 235 235 236 236 for(i; nthreads) { … … 240 240 241 241 stop = true; 242 end = getTimeNsec();242 end = timeNsec(); 243 243 printf("\nDone\n"); 244 244 -
benchmark/readyQ/locality.cpp
r857a1c6 rc8a0210 287 287 288 288 bool is_tty = isatty(STDOUT_FILENO); 289 start = getTimeNsec();289 start = timeNsec(); 290 290 291 291 for(size_t i = 0; i < nthreads; i++) { … … 295 295 296 296 stop = true; 297 end = getTimeNsec();297 end = timeNsec(); 298 298 printf("\nDone\n"); 299 299 -
benchmark/readyQ/rq_bench.hfa
r857a1c6 rc8a0210 73 73 for() { 74 74 sleep(100`ms); 75 Time end = getTimeNsec();75 Time end = timeNsec(); 76 76 Duration delta = end - start; 77 77 if(is_tty) { -
benchmark/readyQ/rq_bench.hpp
r857a1c6 rc8a0210 46 46 } 47 47 48 uint64_t getTimeNsec() {48 uint64_t timeNsec() { 49 49 timespec curr; 50 50 clock_gettime( CLOCK_REALTIME, &curr ); … … 60 60 for(;;) { 61 61 Sleeper::usleep(100000); 62 uint64_t end = getTimeNsec();62 uint64_t end = timeNsec(); 63 63 uint64_t delta = end - start; 64 64 if(is_tty) { -
benchmark/readyQ/yield.cfa
r857a1c6 rc8a0210 66 66 67 67 bool is_tty = isatty(STDOUT_FILENO); 68 start = getTimeNsec();68 start = timeNsec(); 69 69 run = true; 70 70 … … 75 75 76 76 run = false; 77 end = getTimeNsec();77 end = timeNsec(); 78 78 printf("\nDone\n"); 79 79 } -
doc/bibliography/pl.bib
r857a1c6 rc8a0210 1797 1797 } 1798 1798 1799 @article{Delisle2 0,1799 @article{Delisle21, 1800 1800 keywords = {concurrency, Cforall}, 1801 1801 contributer = {pabuhr@plg}, 1802 1802 author = {Thierry Delisle and Peter A. Buhr}, 1803 1803 title = {Advanced Control-flow and Concurrency in \textsf{C}$\mathbf{\forall}$}, 1804 year = 2020,1805 1804 journal = spe, 1806 pages = {1-38}, 1807 note = {\href{https://doi-org.proxy.lib.uwaterloo.ca/10.1002/spe.2925}{https://\-doi-org.proxy.lib.uwaterloo.ca/\-10.1002/\-spe.2925}}, 1808 note = {}, 1805 month = may, 1806 year = 2021, 1807 volume = 51, 1808 number = 5, 1809 pages = {1005-1042}, 1810 note = {\href{https://onlinelibrary.wiley.com/doi/10.1002/spe.2925}{https://\-onlinelibrary.wiley.com/\-doi/\-10.1002/\-spe.2925}}, 1809 1811 } 1810 1812 -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/links.hpp
r857a1c6 rc8a0210 117 117 } 118 118 119 long long ts() const {119 unsigned long long ts() const { 120 120 return before._links.ts; 121 121 } -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list.hpp
r857a1c6 rc8a0210 39 39 while( __builtin_expect(ll.exchange(true),false) ) { 40 40 while(ll.load(std::memory_order_relaxed)) 41 asm volatile("pause");41 Pause(); 42 42 } 43 43 /* paranoid */ assert(ll); … … 93 93 && ready.compare_exchange_weak(copy, n + 1) ) 94 94 break; 95 asm volatile("pause");95 Pause(); 96 96 } 97 97 … … 133 133 // Step 1 : make sure no writer are in the middle of the critical section 134 134 while(lock.load(std::memory_order_relaxed)) 135 asm volatile("pause");135 Pause(); 136 136 137 137 // Fence needed because we don't want to start trying to acquire the lock … … 195 195 // to simply lock their own lock and enter. 196 196 while(lock.load(std::memory_order_relaxed)) 197 asm volatile("pause");197 Pause(); 198 198 199 199 // Step 2 : lock per-proc lock … … 204 204 for(uint_fast32_t i = 0; i < s; i++) { 205 205 while(data[i].lock.load(std::memory_order_relaxed)) 206 asm volatile("pause");206 Pause(); 207 207 } 208 208 -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/processor_list_good.cpp
r857a1c6 rc8a0210 21 21 target = (target - (target % total)) + total; 22 22 while(waiting < target) 23 asm volatile("pause");23 Pause(); 24 24 25 25 assert(waiting < (1ul << 60)); -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/randbit.cpp
r857a1c6 rc8a0210 123 123 target = (target - (target % total)) + total; 124 124 while(waiting < target) 125 asm volatile("pause");125 Pause(); 126 126 127 127 assert(waiting < (1ul << 60)); -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/relaxed_list.cpp
r857a1c6 rc8a0210 206 206 std::cout << "Total ops : " << ops << "(" << global.in << "i, " << global.out << "o, " << global.empty << "e)\n"; 207 207 #ifndef NO_STATS 208 LIST_VARIANT<Node>::stats_print(std::cout );208 LIST_VARIANT<Node>::stats_print(std::cout, duration); 209 209 #endif 210 210 } … … 368 368 369 369 for(Node * & node : nodes) { 370 node = list.pop(); 371 assert(node); 370 node = nullptr; 371 while(!node) { 372 node = list.pop(); 373 } 372 374 local.crc_out += node->value; 373 375 local.out++; … … 691 693 692 694 for(const auto & n : nodes) { 693 local.valmax = max(local.valmax, size_t(n.value));694 local.valmin = min(local.valmin, size_t(n.value));695 local.valmax = std::max(local.valmax, size_t(n.value)); 696 local.valmin = std::min(local.valmin, size_t(n.value)); 695 697 } 696 698 … … 773 775 try { 774 776 arg = optarg = argv[optind]; 775 nnodes = st oul(optarg, &len);777 nnodes = std::stoul(optarg, &len); 776 778 if(len != arg.size()) { throw std::invalid_argument(""); } 777 779 } catch(std::invalid_argument &) { … … 792 794 try { 793 795 arg = optarg = argv[optind]; 794 nnodes = st oul(optarg, &len);796 nnodes = std::stoul(optarg, &len); 795 797 if(len != arg.size()) { throw std::invalid_argument(""); } 796 798 } catch(std::invalid_argument &) { … … 812 814 try { 813 815 arg = optarg = argv[optind]; 814 nnodes = st oul(optarg, &len);816 nnodes = std::stoul(optarg, &len); 815 817 if(len != arg.size()) { throw std::invalid_argument(""); } 816 818 nslots = nnodes; … … 823 825 try { 824 826 arg = optarg = argv[optind]; 825 nnodes = st oul(optarg, &len);827 nnodes = std::stoul(optarg, &len); 826 828 if(len != arg.size()) { throw std::invalid_argument(""); } 827 829 } catch(std::invalid_argument &) { … … 831 833 try { 832 834 arg = optarg = argv[optind + 1]; 833 nslots = st oul(optarg, &len);835 nslots = std::stoul(optarg, &len); 834 836 if(len != arg.size()) { throw std::invalid_argument(""); } 835 837 } catch(std::invalid_argument &) { … … 884 886 case 'd': 885 887 try { 886 duration = st od(optarg, &len);888 duration = std::stod(optarg, &len); 887 889 if(len != arg.size()) { throw std::invalid_argument(""); } 888 890 } catch(std::invalid_argument &) { … … 893 895 case 't': 894 896 try { 895 nthreads = st oul(optarg, &len);897 nthreads = std::stoul(optarg, &len); 896 898 if(len != arg.size()) { throw std::invalid_argument(""); } 897 899 } catch(std::invalid_argument &) { … … 902 904 case 'q': 903 905 try { 904 nqueues = st oul(optarg, &len);906 nqueues = std::stoul(optarg, &len); 905 907 if(len != arg.size()) { throw std::invalid_argument(""); } 906 908 } catch(std::invalid_argument &) { -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi-packed.hpp
r857a1c6 rc8a0210 168 168 for(int i = 0; i < width; i++) { 169 169 int idx = i % hwdith; 170 std::cout << i << " -> " << idx + width << std::endl;171 170 leafs[i].parent = &nodes[ idx ]; 172 171 } … … 174 173 for(int i = 0; i < root; i++) { 175 174 int idx = (i / 2) + hwdith; 176 std::cout << i + width << " -> " << idx + width << std::endl;177 175 nodes[i].parent = &nodes[ idx ]; 178 176 } -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/snzi.hpp
r857a1c6 rc8a0210 159 159 std::cout << "SNZI: " << depth << "x" << width << "(" << mask - 1 << ") " << (sizeof(snzi_t::node) * (root + 1)) << " bytes" << std::endl; 160 160 for(int i = 0; i < root; i++) { 161 std::cout << i << " -> " << (i / base) + width << std::endl;162 161 nodes[i].parent = &nodes[(i / base) + width]; 163 162 } -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/utils.hpp
r857a1c6 rc8a0210 11 11 #include <sys/sysinfo.h> 12 12 13 #include <x86intrin.h> 14 15 // Barrier from 16 class barrier_t { 17 public: 18 barrier_t(size_t total) 19 : waiting(0) 20 , total(total) 21 {} 22 23 void wait(unsigned) { 24 size_t target = waiting++; 25 target = (target - (target % total)) + total; 26 while(waiting < target) 27 asm volatile("pause"); 28 29 assert(waiting < (1ul << 60)); 30 } 31 32 private: 33 std::atomic<size_t> waiting; 34 size_t total; 35 }; 13 // #include <x86intrin.h> 36 14 37 15 // class Random { … … 102 80 }; 103 81 104 static inline long long rdtscl(void) { 105 unsigned int lo, hi; 106 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); 107 return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); 108 } 82 static inline long long int rdtscl(void) { 83 #if defined( __i386 ) || defined( __x86_64 ) 84 unsigned int lo, hi; 85 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); 86 return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); 87 #elif defined( __aarch64__ ) || defined( __arm__ ) 88 // https://github.com/google/benchmark/blob/v1.1.0/src/cycleclock.h#L116 89 long long int virtual_timer_value; 90 asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); 91 return virtual_timer_value; 92 #else 93 #error unsupported hardware architecture 94 #endif 95 } 96 97 #if defined( __i386 ) || defined( __x86_64 ) 98 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 99 #elif defined( __ARM_ARCH ) 100 #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) 101 #else 102 #error unsupported architecture 103 #endif 109 104 110 105 static inline void affinity(int tid) { … … 195 190 } 196 191 192 // Barrier from 193 class barrier_t { 194 public: 195 barrier_t(size_t total) 196 : waiting(0) 197 , total(total) 198 {} 199 200 void wait(unsigned) { 201 size_t target = waiting++; 202 target = (target - (target % total)) + total; 203 while(waiting < target) 204 Pause(); 205 206 assert(waiting < (1ul << 60)); 207 } 208 209 private: 210 std::atomic<size_t> waiting; 211 size_t total; 212 }; 213 197 214 struct spinlock_t { 198 215 std::atomic_bool ll = { false }; … … 201 218 while( __builtin_expect(ll.exchange(true),false) ) { 202 219 while(ll.load(std::memory_order_relaxed)) 203 asm volatile("pause");220 Pause(); 204 221 } 205 222 } -
doc/theses/thierry_delisle_PhD/code/readyQ_proto/work_stealing.hpp
r857a1c6 rc8a0210 6 6 #include <memory> 7 7 #include <mutex> 8 #include <thread> 8 9 #include <type_traits> 9 10 … … 11 12 #include "utils.hpp" 12 13 #include "links.hpp" 14 #include "links2.hpp" 13 15 #include "snzi.hpp" 14 16 17 // #include <x86intrin.h> 18 15 19 using namespace std; 20 21 static const long long lim = 2000; 22 static const unsigned nqueues = 2; 23 24 struct __attribute__((aligned(128))) timestamp_t { 25 volatile unsigned long long val = 0; 26 }; 27 28 template<typename node_t> 29 struct __attribute__((aligned(128))) localQ_t { 30 #ifdef NO_MPSC 31 intrusive_queue_t<node_t> list; 32 33 inline auto ts() { return list.ts(); } 34 inline auto lock() { return list.lock.lock(); } 35 inline auto try_lock() { return list.lock.try_lock(); } 36 inline auto unlock() { return list.lock.unlock(); } 37 38 inline auto push( node_t * node ) { return list.push( node ); } 39 inline auto pop() { return list.pop(); } 40 #else 41 mpsc_queue<node_t> queue = {}; 42 spinlock_t _lock = {}; 43 44 inline auto ts() { auto h = queue.head(); return h ? h->_links.ts : 0ull; } 45 inline auto lock() { return _lock.lock(); } 46 inline auto try_lock() { return _lock.try_lock(); } 47 inline auto unlock() { return _lock.unlock(); } 48 49 inline auto push( node_t * node ) { return queue.push( node ); } 50 inline auto pop() { return queue.pop(); } 51 #endif 52 53 54 }; 16 55 17 56 template<typename node_t> … … 25 64 26 65 work_stealing(unsigned _numThreads, unsigned) 27 : numThreads(_numThreads) 28 , lists(new intrusive_queue_t<node_t>[numThreads]) 29 , snzi( std::log2( numThreads / 2 ), 2 ) 66 : numThreads(_numThreads * nqueues) 67 , lists(new localQ_t<node_t>[numThreads]) 68 // , lists(new intrusive_queue_t<node_t>[numThreads]) 69 , times(new timestamp_t[numThreads]) 70 // , snzi( std::log2( numThreads / 2 ), 2 ) 30 71 31 72 { … … 40 81 __attribute__((noinline, hot)) void push(node_t * node) { 41 82 node->_links.ts = rdtscl(); 42 if( node->_links.hint > numThreads ) { 43 node->_links.hint = tls.rng.next() % numThreads; 44 tls.stat.push.nhint++; 83 // node->_links.ts = 1; 84 85 auto & list = *({ 86 unsigned i; 87 #ifdef NO_MPSC 88 do { 89 #endif 90 tls.stats.push.attempt++; 91 // unsigned r = tls.rng1.next(); 92 unsigned r = tls.it++; 93 if(tls.my_queue == outside) { 94 i = r % numThreads; 95 } else { 96 i = tls.my_queue + (r % nqueues); 97 } 98 #ifdef NO_MPSC 99 } while(!lists[i].try_lock()); 100 #endif 101 &lists[i]; 102 }); 103 104 list.push( node ); 105 #ifdef NO_MPSC 106 list.unlock(); 107 #endif 108 // tls.rng2.set_raw_state( tls.rng1.get_raw_state()); 109 // count++; 110 tls.stats.push.success++; 111 } 112 113 __attribute__((noinline, hot)) node_t * pop() { 114 if(tls.my_queue != outside) { 115 // if( tls.myfriend == outside ) { 116 // auto r = tls.rng1.next(); 117 // tls.myfriend = r % numThreads; 118 // // assert(lists[(tls.it % nqueues) + tls.my_queue].ts() >= lists[((tls.it + 1) % nqueues) + tls.my_queue].ts()); 119 // tls.mytime = std::min(lists[(tls.it % nqueues) + tls.my_queue].ts(), lists[((tls.it + 1) % nqueues) + tls.my_queue].ts()); 120 // // times[tls.myfriend].val = 0; 121 // // lists[tls.myfriend].val = 0; 122 // } 123 // // else if(times[tls.myfriend].val == 0) { 124 // // else if(lists[tls.myfriend].val == 0) { 125 // else if(times[tls.myfriend].val < tls.mytime) { 126 // // else if(times[tls.myfriend].val < lists[(tls.it % nqueues) + tls.my_queue].ts()) { 127 // node_t * n = try_pop(tls.myfriend, tls.stats.pop.help); 128 // tls.stats.help++; 129 // tls.myfriend = outside; 130 // if(n) return n; 131 // } 132 // if( tls.myfriend == outside ) { 133 // auto r = tls.rng1.next(); 134 // tls.myfriend = r % numThreads; 135 // tls.mytime = lists[((tls.it + 1) % nqueues) + tls.my_queue].ts(); 136 // } 137 // else { 138 // if(times[tls.myfriend].val + 1000 < tls.mytime) { 139 // node_t * n = try_pop(tls.myfriend, tls.stats.pop.help); 140 // tls.stats.help++; 141 // if(n) return n; 142 // } 143 // tls.myfriend = outside; 144 // } 145 146 node_t * n = local(); 147 if(n) return n; 45 148 } 46 149 47 unsigned i = node->_links.hint; 48 auto & list = lists[i]; 49 list.lock.lock(); 50 51 if(list.push( node )) { 52 snzi.arrive(i); 150 // try steal 151 for(int i = 0; i < 25; i++) { 152 node_t * n = steal(); 153 if(n) return n; 53 154 } 54 155 55 list.lock.unlock(); 56 } 57 58 __attribute__((noinline, hot)) node_t * pop() { 59 node_t * node; 60 while(true) { 61 if(!snzi.query()) { 62 return nullptr; 63 } 64 65 { 66 unsigned i = tls.my_queue; 67 auto & list = lists[i]; 68 if( list.ts() != 0 ) { 69 list.lock.lock(); 70 if((node = try_pop(i))) { 71 tls.stat.pop.local.success++; 72 break; 73 } 74 else { 75 tls.stat.pop.local.elock++; 76 } 77 } 78 else { 79 tls.stat.pop.local.espec++; 80 } 81 } 82 83 tls.stat.pop.steal.tried++; 84 85 int i = tls.rng.next() % numThreads; 86 auto & list = lists[i]; 87 if( list.ts() == 0 ) { 88 tls.stat.pop.steal.empty++; 89 continue; 90 } 91 92 if( !list.lock.try_lock() ) { 93 tls.stat.pop.steal.locked++; 94 continue; 95 } 96 97 if((node = try_pop(i))) { 98 tls.stat.pop.steal.success++; 99 break; 156 return search(); 157 } 158 159 private: 160 inline node_t * local() { 161 unsigned i = (--tls.it % nqueues) + tls.my_queue; 162 node_t * n = try_pop(i, tls.stats.pop.local); 163 if(n) return n; 164 i = (--tls.it % nqueues) + tls.my_queue; 165 return try_pop(i, tls.stats.pop.local); 166 } 167 168 inline node_t * steal() { 169 unsigned i = tls.rng2.prev() % numThreads; 170 return try_pop(i, tls.stats.pop.steal); 171 } 172 173 inline node_t * search() { 174 unsigned offset = tls.rng2.prev(); 175 for(unsigned i = 0; i < numThreads; i++) { 176 unsigned idx = (offset + i) % numThreads; 177 node_t * thrd = try_pop(idx, tls.stats.pop.search); 178 if(thrd) { 179 return thrd; 100 180 } 101 181 } 102 182 103 #if defined(READ) 104 const unsigned f = READ; 105 if(0 == (tls.it % f)) { 106 unsigned i = tls.it / f; 107 lists[i % numThreads].ts(); 108 } 109 // lists[tls.it].ts(); 110 tls.it++; 111 #endif 112 113 114 return node; 115 } 116 117 private: 118 node_t * try_pop(unsigned i) { 183 return nullptr; 184 } 185 186 private: 187 struct attempt_stat_t { 188 std::size_t attempt = { 0 }; 189 std::size_t elock = { 0 }; 190 std::size_t eempty = { 0 }; 191 std::size_t espec = { 0 }; 192 std::size_t success = { 0 }; 193 }; 194 195 node_t * try_pop(unsigned i, attempt_stat_t & stat) { 196 assert(i < numThreads); 119 197 auto & list = lists[i]; 198 stat.attempt++; 199 200 // If the list is empty, don't try 201 if(list.ts() == 0) { stat.espec++; return nullptr; } 202 203 // If we can't get the lock, move on 204 if( !list.try_lock() ) { stat.elock++; return nullptr; } 120 205 121 206 // If list is empty, unlock and retry 122 207 if( list.ts() == 0 ) { 123 list.lock.unlock(); 208 list.unlock(); 209 stat.eempty++; 124 210 return nullptr; 125 211 } 126 212 127 // Actually pop the list 128 node_t * node; 129 bool emptied; 130 std::tie(node, emptied) = list.pop(); 131 assert(node); 132 133 if(emptied) { 134 snzi.depart(i); 135 } 136 137 // Unlock and return 138 list.lock.unlock(); 139 return node; 213 auto node = list.pop(); 214 list.unlock(); 215 stat.success++; 216 #ifdef NO_MPSC 217 // times[i].val = 1; 218 times[i].val = node.first->_links.ts; 219 // lists[i].val = node.first->_links.ts; 220 return node.first; 221 #else 222 times[i].val = node->_links.ts; 223 return node; 224 #endif 140 225 } 141 226 … … 144 229 145 230 static std::atomic_uint32_t ticket; 231 static const unsigned outside = 0xFFFFFFFF; 232 233 static inline unsigned calc_preferred() { 234 unsigned t = ticket++; 235 if(t == 0) return outside; 236 unsigned i = (t - 1) * nqueues; 237 return i; 238 } 239 146 240 static __attribute__((aligned(128))) thread_local struct TLS { 147 Random rng = { int(rdtscl()) }; 148 unsigned my_queue = ticket++; 241 Random rng1 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) }; 242 Random rng2 = { unsigned(std::hash<std::thread::id>{}(std::this_thread::get_id()) ^ rdtscl()) }; 243 unsigned it = 0; 244 unsigned my_queue = calc_preferred(); 245 unsigned myfriend = outside; 246 unsigned long long int mytime = 0; 149 247 #if defined(READ) 150 248 unsigned it = 0; … … 152 250 struct { 153 251 struct { 154 std::size_t nhint = { 0 }; 252 std::size_t attempt = { 0 }; 253 std::size_t success = { 0 }; 155 254 } push; 156 255 struct { 157 struct { 158 std::size_t success = { 0 }; 159 std::size_t espec = { 0 }; 160 std::size_t elock = { 0 }; 161 } local; 162 struct { 163 std::size_t tried = { 0 }; 164 std::size_t locked = { 0 }; 165 std::size_t empty = { 0 }; 166 std::size_t success = { 0 }; 167 } steal; 256 attempt_stat_t help; 257 attempt_stat_t local; 258 attempt_stat_t steal; 259 attempt_stat_t search; 168 260 } pop; 169 } stat; 261 std::size_t help = { 0 }; 262 } stats; 170 263 } tls; 171 264 172 265 private: 173 266 const unsigned numThreads; 174 std::unique_ptr<intrusive_queue_t<node_t> []> lists; 175 __attribute__((aligned(64))) snzi_t snzi; 267 std::unique_ptr<localQ_t<node_t> []> lists; 268 // std::unique_ptr<intrusive_queue_t<node_t> []> lists; 269 std::unique_ptr<timestamp_t []> times; 270 __attribute__((aligned(128))) std::atomic_size_t count; 176 271 177 272 #ifndef NO_STATS … … 179 274 static struct GlobalStats { 180 275 struct { 181 std::atomic_size_t nhint = { 0 }; 276 std::atomic_size_t attempt = { 0 }; 277 std::atomic_size_t success = { 0 }; 182 278 } push; 183 279 struct { 184 280 struct { 281 std::atomic_size_t attempt = { 0 }; 282 std::atomic_size_t elock = { 0 }; 283 std::atomic_size_t eempty = { 0 }; 284 std::atomic_size_t espec = { 0 }; 185 285 std::atomic_size_t success = { 0 }; 186 std::atomic_size_t espec = { 0 }; 187 std::atomic_size_t elock = { 0 }; 286 } help; 287 struct { 288 std::atomic_size_t attempt = { 0 }; 289 std::atomic_size_t elock = { 0 }; 290 std::atomic_size_t eempty = { 0 }; 291 std::atomic_size_t espec = { 0 }; 292 std::atomic_size_t success = { 0 }; 188 293 } local; 189 294 struct { 190 std::atomic_size_t tried = { 0 }; 191 std::atomic_size_t locked = { 0 }; 192 std::atomic_size_t empty = { 0 }; 295 std::atomic_size_t attempt = { 0 }; 296 std::atomic_size_t elock = { 0 }; 297 std::atomic_size_t eempty = { 0 }; 298 std::atomic_size_t espec = { 0 }; 193 299 std::atomic_size_t success = { 0 }; 194 300 } steal; 301 struct { 302 std::atomic_size_t attempt = { 0 }; 303 std::atomic_size_t elock = { 0 }; 304 std::atomic_size_t eempty = { 0 }; 305 std::atomic_size_t espec = { 0 }; 306 std::atomic_size_t success = { 0 }; 307 } search; 195 308 } pop; 309 std::atomic_size_t help = { 0 }; 196 310 } global_stats; 197 311 198 312 public: 199 313 static void stats_tls_tally() { 200 global_stats.push.nhint += tls.stat.push.nhint; 201 global_stats.pop.local.success += tls.stat.pop.local.success; 202 global_stats.pop.local.espec += tls.stat.pop.local.espec ; 203 global_stats.pop.local.elock += tls.stat.pop.local.elock ; 204 global_stats.pop.steal.tried += tls.stat.pop.steal.tried ; 205 global_stats.pop.steal.locked += tls.stat.pop.steal.locked ; 206 global_stats.pop.steal.empty += tls.stat.pop.steal.empty ; 207 global_stats.pop.steal.success += tls.stat.pop.steal.success; 208 } 209 210 static void stats_print(std::ostream & os ) { 314 global_stats.push.attempt += tls.stats.push.attempt; 315 global_stats.push.success += tls.stats.push.success; 316 global_stats.pop.help .attempt += tls.stats.pop.help .attempt; 317 global_stats.pop.help .elock += tls.stats.pop.help .elock ; 318 global_stats.pop.help .eempty += tls.stats.pop.help .eempty ; 319 global_stats.pop.help .espec += tls.stats.pop.help .espec ; 320 global_stats.pop.help .success += tls.stats.pop.help .success; 321 global_stats.pop.local .attempt += tls.stats.pop.local .attempt; 322 global_stats.pop.local .elock += tls.stats.pop.local .elock ; 323 global_stats.pop.local .eempty += tls.stats.pop.local .eempty ; 324 global_stats.pop.local .espec += tls.stats.pop.local .espec ; 325 global_stats.pop.local .success += tls.stats.pop.local .success; 326 global_stats.pop.steal .attempt += tls.stats.pop.steal .attempt; 327 global_stats.pop.steal .elock += tls.stats.pop.steal .elock ; 328 global_stats.pop.steal .eempty += tls.stats.pop.steal .eempty ; 329 global_stats.pop.steal .espec += tls.stats.pop.steal .espec ; 330 global_stats.pop.steal .success += tls.stats.pop.steal .success; 331 global_stats.pop.search.attempt += tls.stats.pop.search.attempt; 332 global_stats.pop.search.elock += tls.stats.pop.search.elock ; 333 global_stats.pop.search.eempty += tls.stats.pop.search.eempty ; 334 global_stats.pop.search.espec += tls.stats.pop.search.espec ; 335 global_stats.pop.search.success += tls.stats.pop.search.success; 336 global_stats.help += tls.stats.help; 337 } 338 339 static void stats_print(std::ostream & os, double duration ) { 211 340 std::cout << "----- Work Stealing Stats -----" << std::endl; 212 341 213 double stealSucc = double(global_stats.pop.steal.success) / global_stats.pop.steal.tried; 214 os << "Push to new Q : " << std::setw(15) << global_stats.push.nhint << "\n"; 215 os << "Local Pop : " << std::setw(15) << global_stats.pop.local.success << "\n"; 216 os << "Steal Pop : " << std::setw(15) << global_stats.pop.steal.success << "(" << global_stats.pop.local.espec << "s, " << global_stats.pop.local.elock << "l)\n"; 217 os << "Steal Success : " << std::setw(15) << stealSucc << "(" << global_stats.pop.steal.tried << " tries)\n"; 218 os << "Steal Fails : " << std::setw(15) << global_stats.pop.steal.empty << "e, " << global_stats.pop.steal.locked << "l\n"; 342 double push_suc = (100.0 * double(global_stats.push.success) / global_stats.push.attempt); 343 double push_len = double(global_stats.push.attempt ) / global_stats.push.success; 344 os << "Push Pick : " << push_suc << " %, len " << push_len << " (" << global_stats.push.attempt << " / " << global_stats.push.success << ")\n"; 345 346 double hlp_suc = (100.0 * double(global_stats.pop.help.success) / global_stats.pop.help.attempt); 347 double hlp_len = double(global_stats.pop.help.attempt ) / global_stats.pop.help.success; 348 os << "Help : " << hlp_suc << " %, len " << hlp_len << " (" << global_stats.pop.help.attempt << " / " << global_stats.pop.help.success << ")\n"; 349 os << "Help Fail : " << global_stats.pop.help.espec << "s, " << global_stats.pop.help.eempty << "e, " << global_stats.pop.help.elock << "l\n"; 350 351 double pop_suc = (100.0 * double(global_stats.pop.local.success) / global_stats.pop.local.attempt); 352 double pop_len = double(global_stats.pop.local.attempt ) / global_stats.pop.local.success; 353 os << "Local : " << pop_suc << " %, len " << pop_len << " (" << global_stats.pop.local.attempt << " / " << global_stats.pop.local.success << ")\n"; 354 os << "Local Fail : " << global_stats.pop.local.espec << "s, " << global_stats.pop.local.eempty << "e, " << global_stats.pop.local.elock << "l\n"; 355 356 double stl_suc = (100.0 * double(global_stats.pop.steal.success) / global_stats.pop.steal.attempt); 357 double stl_len = double(global_stats.pop.steal.attempt ) / global_stats.pop.steal.success; 358 os << "Steal : " << stl_suc << " %, len " << stl_len << " (" << global_stats.pop.steal.attempt << " / " << global_stats.pop.steal.success << ")\n"; 359 os << "Steal Fail : " << global_stats.pop.steal.espec << "s, " << global_stats.pop.steal.eempty << "e, " << global_stats.pop.steal.elock << "l\n"; 360 361 double srh_suc = (100.0 * double(global_stats.pop.search.success) / global_stats.pop.search.attempt); 362 double srh_len = double(global_stats.pop.search.attempt ) / global_stats.pop.search.success; 363 os << "Search : " << srh_suc << " %, len " << srh_len << " (" << global_stats.pop.search.attempt << " / " << global_stats.pop.search.success << ")\n"; 364 os << "Search Fail : " << global_stats.pop.search.espec << "s, " << global_stats.pop.search.eempty << "e, " << global_stats.pop.search.elock << "l\n"; 365 os << "Helps : " << std::setw(15) << std::scientific << global_stats.help / duration << "/sec (" << global_stats.help << ")\n"; 219 366 } 220 367 private: -
libcfa/prelude/builtins.c
r857a1c6 rc8a0210 9 9 // Author : Peter A. Buhr 10 10 // Created On : Fri Jul 21 16:21:03 2017 11 // Last Modified By : Andrew Beach12 // Last Modified On : Tue Oct 27 14:42:00 202013 // Update Count : 11 111 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Apr 13 17:26:32 2021 13 // Update Count : 117 14 14 // 15 15 … … 125 125 } // distribution 126 126 127 #define __CFA_BASE_COMP_1__() if ( ep== 1 ) return 1128 #define __CFA_BASE_COMP_2__() if ( ep == 2 ) return ep<< (y - 1)127 #define __CFA_BASE_COMP_1__() if ( x == 1 ) return 1 128 #define __CFA_BASE_COMP_2__() if ( x == 2 ) return x << (y - 1) 129 129 #define __CFA_EXP_OVERFLOW__() if ( y >= sizeof(y) * CHAR_BIT ) return 0 130 130 … … 134 134 __CFA_BASE_COMP_2__(); /* special case, positive shifting for integral types */ \ 135 135 __CFA_EXP_OVERFLOW__(); /* immediate overflow, negative exponent > 2^size-1 */ \ 136 typeof( ep) op = 1; /* accumulate odd product */ \136 typeof(x) op = 1; /* accumulate odd product */ \ 137 137 for ( ; y > 1; y >>= 1 ) { /* squaring exponentiation, O(log2 y) */ \ 138 if ( (y & 1) == 1 ) op = op * ep; /* odd ? */ \139 ep = ep * ep; \138 if ( (y & 1) == 1 ) op = op * x; /* odd ? */ \ 139 x = x * x; \ 140 140 } \ 141 return ep* op141 return x * op 142 142 143 143 static inline { 144 long int ?\?( int ep, unsigned int y ) { __CFA_EXP__(); } 145 long int ?\?( long int ep, unsigned long int y ) { __CFA_EXP__(); } 144 long int ?\?( int x, unsigned int y ) { __CFA_EXP__(); } 145 long int ?\?( long int x, unsigned long int y ) { __CFA_EXP__(); } 146 long long int ?\?( long long int x, unsigned long long int y ) { __CFA_EXP__(); } 146 147 // unsigned computation may be faster and larger 147 unsigned long int ?\?( unsigned int ep, unsigned int y ) { __CFA_EXP__(); } 148 unsigned long int ?\?( unsigned long int ep, unsigned long int y ) { __CFA_EXP__(); } 148 unsigned long int ?\?( unsigned int x, unsigned int y ) { __CFA_EXP__(); } 149 unsigned long int ?\?( unsigned long int x, unsigned long int y ) { __CFA_EXP__(); } 150 unsigned long long int ?\?( unsigned long long int x, unsigned long long int y ) { __CFA_EXP__(); } 149 151 } // distribution 150 152 … … 157 159 158 160 static inline forall( OT | { void ?{}( OT & this, one_t ); OT ?*?( OT, OT ); } ) { 159 OT ?\?( OT ep, unsigned int y ) { __CFA_EXP__(); } 160 OT ?\?( OT ep, unsigned long int y ) { __CFA_EXP__(); } 161 OT ?\?( OT x, unsigned int y ) { __CFA_EXP__(); } 162 OT ?\?( OT x, unsigned long int y ) { __CFA_EXP__(); } 163 OT ?\?( OT x, unsigned long long int y ) { __CFA_EXP__(); } 161 164 } // distribution 162 165 … … 166 169 167 170 static inline { 171 long int ?\=?( int & x, unsigned int y ) { x = x \ y; return x; } 168 172 long int ?\=?( long int & x, unsigned long int y ) { x = x \ y; return x; } 173 long long int ?\=?( long long int & x, unsigned long long int y ) { x = x \ y; return x; } 174 unsigned long int ?\=?( unsigned int & x, unsigned int y ) { x = x \ y; return x; } 169 175 unsigned long int ?\=?( unsigned long int & x, unsigned long int y ) { x = x \ y; return x; } 170 int ?\=?( int & x, unsigned long int y ) { x = x \ y; return x; } 171 unsigned int ?\=?( unsigned int & x, unsigned long int y ) { x = x \ y; return x; } 176 unsigned long long int ?\=?( unsigned long long int & x, unsigned long long int y ) { x = x \ y; return x; } 172 177 } // distribution 173 178 -
libcfa/src/bits/weakso_locks.cfa
r857a1c6 rc8a0210 25 25 void unlock( blocking_lock & ) {} 26 26 void on_notify( blocking_lock &, struct $thread * ) {} 27 size_t on_wait( blocking_lock & ) { }27 size_t on_wait( blocking_lock & ) { return 0; } 28 28 void on_wakeup( blocking_lock &, size_t ) {} 29 29 size_t wait_count( blocking_lock & ) { return 0; } -
libcfa/src/clock.hfa
r857a1c6 rc8a0210 10 10 // Created On : Thu Apr 12 14:36:06 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 6 12:49:58 202013 // Update Count : 912 // Last Modified On : Wed Apr 14 17:48:25 2021 13 // Update Count : 20 14 14 // 15 15 … … 32 32 33 33 static inline { 34 void reset Clock( Clock & clk, Duration adj ) with( clk ) {34 void reset( Clock & clk, Duration adj ) with( clk ) { 35 35 offset = adj + __timezone`s; // timezone (global) is (UTC - local time) in seconds 36 } // reset Clock36 } // reset 37 37 38 void ?{}( Clock & clk, Duration adj ) { resetClock( clk, adj ); } 38 void ?{}( Clock & clk ) { reset( clk, (Duration){ 0 } ); } 39 void ?{}( Clock & clk, Duration adj ) { reset( clk, adj ); } 39 40 40 Duration getResNsec() { 41 // System-wide clock that measures real, i.e., wall-clock) time. This clock is affected by discontinuous jumps in 42 // the system time. For example, manual changes of the clock, and incremental adjustments performed by adjtime(3) 43 // and NTP (daylight saving (Fall back). 44 Duration resolutionNsec() { 41 45 struct timespec res; 42 46 clock_getres( CLOCK_REALTIME, &res ); 43 47 return ((int64_t)res.tv_sec * TIMEGRAN + res.tv_nsec)`ns; 44 } // getRes48 } // resolutionNsec 45 49 46 Duration getRes() {50 Duration resolution() { 47 51 struct timespec res; 48 52 clock_getres( CLOCK_REALTIME_COARSE, &res ); 49 53 return ((int64_t)res.tv_sec * TIMEGRAN + res.tv_nsec)`ns; 50 } // getRes54 } // resolution 51 55 52 Time getTimeNsec() {// with nanoseconds56 Time timeNsec() { // with nanoseconds 53 57 timespec curr; 54 58 clock_gettime( CLOCK_REALTIME, &curr ); 55 59 return (Time){ curr }; 56 } // getTimeNsec60 } // timeNsec 57 61 58 Time getTime() {// without nanoseconds62 Time time() { // without nanoseconds 59 63 timespec curr; 60 64 clock_gettime( CLOCK_REALTIME_COARSE, &curr ); 61 65 curr.tv_nsec = 0; 62 66 return (Time){ curr }; 63 } // getTime67 } // time 64 68 65 Time getTime( Clock & clk ) with( clk ) {66 return getTime() + offset;67 } // getTime69 Time time( Clock & clk ) with( clk ) { 70 return time() + offset; 71 } // time 68 72 69 73 Time ?()( Clock & clk ) with( clk ) { // alternative syntax 70 return getTime() + offset;71 } // getTime74 return time() + offset; 75 } // ?() 72 76 73 timeval getTime( Clock & clk ) {77 timeval time( Clock & clk ) { 74 78 return (timeval){ clk() }; 75 } // getTime79 } // time 76 80 77 tm getTime( Clock & clk ) with( clk ) {81 tm time( Clock & clk ) with( clk ) { 78 82 tm ret; 79 localtime_r( getTime( clk ).tv_sec, &ret );83 localtime_r( time( clk ).tv_sec, &ret ); 80 84 return ret; 81 } // getTime85 } // time 82 86 83 Time getCPUTime() { 87 // CFA processor CPU-time watch that ticks when the processor (kernel thread) is running. This watch is affected by 88 // discontinuous jumps when the OS is not running the kernal thread. A duration is returned because the value is 89 // relative and cannot be converted to real-time (wall-clock) time. 90 Duration processor() { 84 91 timespec ts; 85 92 clock_gettime( CLOCK_THREAD_CPUTIME_ID, &ts ); 86 return (Time){ ts }; 87 } // getCPUTime 93 return (Duration){ ts }; 94 } // processor 95 96 // Program CPU-time watch measures CPU time consumed by all processors (kernel threads) in the UNIX process. This 97 // watch is affected by discontinuous jumps when the OS is not running the kernel threads. A duration is returned 98 // because the value is relative and cannot be converted to real-time (wall-clock) time. 99 Duration program() { 100 timespec ts; 101 clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts ); 102 return (Duration){ ts }; 103 } // program 104 105 // Monotonic stopwatch starting at machine boot and includes system suspension. This watch is unaffected by 106 // discontinuous jumps resulting from manual changes of the clock, and incremental adjustments performed by 107 // adjtime(3) and NTP (Fall back). A duration is returned because the value is relative and cannot be converted to 108 // real-time (wall-clock) time. 109 Duration boot() { 110 timespec ts; 111 clock_gettime( CLOCK_BOOTTIME, &ts ); 112 return (Duration){ ts }; 113 } // boot 88 114 } // distribution 89 115 -
libcfa/src/concurrency/coroutine.cfa
r857a1c6 rc8a0210 46 46 47 47 //----------------------------------------------------------------------------- 48 FORALL_DATA_INSTANCE(CoroutineCancelled, (coroutine_t &), (coroutine_t)) 49 50 forall(T &) 51 void mark_exception(CoroutineCancelled(T) *) {} 48 EHM_VIRTUAL_TABLE(SomeCoroutineCancelled, std_coroutine_cancelled); 52 49 53 50 forall(T &) … … 71 68 72 69 // TODO: Remove explitate vtable set once trac#186 is fixed. 73 CoroutineCancelled(T)except;74 except.virtual_table = & get_exception_vtable(&except);70 SomeCoroutineCancelled except; 71 except.virtual_table = &std_coroutine_cancelled; 75 72 except.the_coroutine = &cor; 76 73 except.the_exception = except; 77 throwResume except; 74 // Why does this need a cast? 75 throwResume (SomeCoroutineCancelled &)except; 78 76 79 77 except->virtual_table->free( except ); -
libcfa/src/concurrency/coroutine.hfa
r857a1c6 rc8a0210 22 22 //----------------------------------------------------------------------------- 23 23 // Exception thrown from resume when a coroutine stack is cancelled. 24 FORALL_DATA_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) ( 24 EHM_EXCEPTION(SomeCoroutineCancelled)( 25 void * the_coroutine; 26 exception_t * the_exception; 27 ); 28 29 EHM_EXTERN_VTABLE(SomeCoroutineCancelled, std_coroutine_cancelled); 30 31 EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) ( 25 32 coroutine_t * the_coroutine; 26 33 exception_t * the_exception; … … 37 44 // Anything that implements this trait can be resumed. 38 45 // Anything that is resumed is a coroutine. 39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION( CoroutineCancelled, (T))) {46 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(SomeCoroutineCancelled)) { 40 47 void main(T & this); 41 48 $coroutine * get_coroutine(T & this); -
libcfa/src/concurrency/invoke.h
r857a1c6 rc8a0210 148 148 struct $thread * prev; 149 149 volatile unsigned long long ts; 150 int preferred;151 150 }; 152 151 -
libcfa/src/concurrency/io/call.cfa.in
r857a1c6 rc8a0210 201 201 202 202 sqe->opcode = IORING_OP_{op}; 203 sqe->user_data = ( __u64)(uintptr_t)&future;203 sqe->user_data = (uintptr_t)&future; 204 204 sqe->flags = sflags; 205 205 sqe->ioprio = 0; … … 215 215 asm volatile("": : :"memory"); 216 216 217 verify( sqe->user_data == ( __u64)(uintptr_t)&future );217 verify( sqe->user_data == (uintptr_t)&future ); 218 218 cfa_io_submit( ctx, &idx, 1, 0 != (submit_flags & CFA_IO_LAZY) ); 219 219 #endif … … 238 238 'fd' : 'fd', 239 239 'off' : 'offset', 240 'addr': '( __u64)iov',240 'addr': '(uintptr_t)iov', 241 241 'len' : 'iovcnt', 242 242 }, define = 'CFA_HAVE_PREADV2'), … … 245 245 'fd' : 'fd', 246 246 'off' : 'offset', 247 'addr': '( __u64)iov',247 'addr': '(uintptr_t)iov', 248 248 'len' : 'iovcnt' 249 249 }, define = 'CFA_HAVE_PWRITEV2'), … … 257 257 'addr': 'fd', 258 258 'len': 'op', 259 'off': '( __u64)event'259 'off': '(uintptr_t)event' 260 260 }), 261 261 # CFA_HAVE_IORING_OP_SYNC_FILE_RANGE … … 269 269 Call('SENDMSG', 'ssize_t sendmsg(int sockfd, const struct msghdr *msg, int flags)', { 270 270 'fd': 'sockfd', 271 'addr': '( __u64)(struct msghdr *)msg',271 'addr': '(uintptr_t)(struct msghdr *)msg', 272 272 'len': '1', 273 273 'msg_flags': 'flags' … … 276 276 Call('RECVMSG', 'ssize_t recvmsg(int sockfd, struct msghdr *msg, int flags)', { 277 277 'fd': 'sockfd', 278 'addr': '( __u64)(struct msghdr *)msg',278 'addr': '(uintptr_t)(struct msghdr *)msg', 279 279 'len': '1', 280 280 'msg_flags': 'flags' … … 283 283 Call('SEND', 'ssize_t send(int sockfd, const void *buf, size_t len, int flags)', { 284 284 'fd': 'sockfd', 285 'addr': '( __u64)buf',285 'addr': '(uintptr_t)buf', 286 286 'len': 'len', 287 287 'msg_flags': 'flags' … … 290 290 Call('RECV', 'ssize_t recv(int sockfd, void *buf, size_t len, int flags)', { 291 291 'fd': 'sockfd', 292 'addr': '( __u64)buf',292 'addr': '(uintptr_t)buf', 293 293 'len': 'len', 294 294 'msg_flags': 'flags' … … 297 297 Call('ACCEPT', 'int accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags)', { 298 298 'fd': 'sockfd', 299 'addr': '( __u64)addr',300 'addr2': '( __u64)addrlen',299 'addr': '(uintptr_t)addr', 300 'addr2': '(uintptr_t)addrlen', 301 301 'accept_flags': 'flags' 302 302 }), … … 304 304 Call('CONNECT', 'int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen)', { 305 305 'fd': 'sockfd', 306 'addr': '( __u64)addr',306 'addr': '(uintptr_t)addr', 307 307 'off': 'addrlen' 308 308 }), … … 310 310 Call('FALLOCATE', 'int fallocate(int fd, int mode, off_t offset, off_t len)', { 311 311 'fd': 'fd', 312 'addr': '( __u64)len',312 'addr': '(uintptr_t)len', 313 313 'len': 'mode', 314 314 'off': 'offset' … … 323 323 # CFA_HAVE_IORING_OP_MADVISE 324 324 Call('MADVISE', 'int madvise(void *addr, size_t length, int advice)', { 325 'addr': '( __u64)addr',325 'addr': '(uintptr_t)addr', 326 326 'len': 'length', 327 327 'fadvise_advice': 'advice' … … 330 330 Call('OPENAT', 'int openat(int dirfd, const char *pathname, int flags, mode_t mode)', { 331 331 'fd': 'dirfd', 332 'addr': '( __u64)pathname',332 'addr': '(uintptr_t)pathname', 333 333 'len': 'mode', 334 334 'open_flags': 'flags;' … … 339 339 'addr': 'pathname', 340 340 'len': 'sizeof(*how)', 341 'off': '( __u64)how',341 'off': '(uintptr_t)how', 342 342 }, define = 'CFA_HAVE_OPENAT2'), 343 343 # CFA_HAVE_IORING_OP_CLOSE … … 348 348 Call('STATX', 'int statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf)', { 349 349 'fd': 'dirfd', 350 'off': '( __u64)statxbuf',350 'off': '(uintptr_t)statxbuf', 351 351 'addr': 'pathname', 352 352 'len': 'mask', … … 356 356 Call('READ', 'ssize_t read(int fd, void * buf, size_t count)', { 357 357 'fd': 'fd', 358 'addr': '( __u64)buf',358 'addr': '(uintptr_t)buf', 359 359 'len': 'count' 360 360 }), … … 362 362 Call('WRITE', 'ssize_t write(int fd, void * buf, size_t count)', { 363 363 'fd': 'fd', 364 'addr': '( __u64)buf',364 'addr': '(uintptr_t)buf', 365 365 'len': 'count' 366 366 }), -
libcfa/src/concurrency/kernel.cfa
r857a1c6 rc8a0210 113 113 static void __wake_one(cluster * cltr); 114 114 115 static void push (__cluster_idles& idles, processor & proc);116 static void remove(__cluster_idles& idles, processor & proc);117 static [unsigned idle, unsigned total, * processor] query ( & __cluster_idlesidles );115 static void mark_idle (__cluster_proc_list & idles, processor & proc); 116 static void mark_awake(__cluster_proc_list & idles, processor & proc); 117 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles ); 118 118 119 119 extern void __cfa_io_start( processor * ); … … 189 189 190 190 // Push self to idle stack 191 push(this->cltr->idles, * this);191 mark_idle(this->cltr->procs, * this); 192 192 193 193 // Confirm the ready-queue is empty … … 195 195 if( readyThread ) { 196 196 // A thread was found, cancel the halt 197 remove(this->cltr->idles, * this);197 mark_awake(this->cltr->procs, * this); 198 198 199 199 #if !defined(__CFA_NO_STATISTICS__) … … 225 225 226 226 // We were woken up, remove self from idle 227 remove(this->cltr->idles, * this);227 mark_awake(this->cltr->procs, * this); 228 228 229 229 // DON'T just proceed, start looking again … … 359 359 #if !defined(__CFA_NO_STATISTICS__) 360 360 __tls_stats()->ready.threads.threads++; 361 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this ); 361 362 #endif 362 363 // This is case 2, the racy case, someone tried to run this thread before it finished blocking … … 376 377 #if !defined(__CFA_NO_STATISTICS__) 377 378 __tls_stats()->ready.threads.threads--; 379 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this ); 378 380 #endif 379 381 … … 455 457 if( kernelTLS().this_stats ) { 456 458 __tls_stats()->ready.threads.threads++; 459 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", kernelTLS().this_processor ); 457 460 } 458 461 else { 459 462 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED); 463 __push_stat( cl->stats, cl->stats->ready.threads.threads, true, "Cluster", cl ); 460 464 } 461 465 #endif … … 470 474 471 475 ready_schedule_lock(); 472 $thread * thrd = pop ( this );476 $thread * thrd = pop_fast( this ); 473 477 ready_schedule_unlock(); 474 478 … … 613 617 unsigned idle; 614 618 unsigned total; 615 [idle, total, p] = query (this->idles);619 [idle, total, p] = query_idles(this->procs); 616 620 617 621 // If no one is sleeping, we are done … … 650 654 } 651 655 652 static void push (__cluster_idles& this, processor & proc) {656 static void mark_idle(__cluster_proc_list & this, processor & proc) { 653 657 /* paranoid */ verify( ! __preemption_enabled() ); 654 658 lock( this ); 655 659 this.idle++; 656 660 /* paranoid */ verify( this.idle <= this.total ); 657 658 insert_first(this. list, proc);661 remove(proc); 662 insert_first(this.idles, proc); 659 663 unlock( this ); 660 664 /* paranoid */ verify( ! __preemption_enabled() ); 661 665 } 662 666 663 static void remove(__cluster_idles& this, processor & proc) {667 static void mark_awake(__cluster_proc_list & this, processor & proc) { 664 668 /* paranoid */ verify( ! __preemption_enabled() ); 665 669 lock( this ); 666 670 this.idle--; 667 671 /* paranoid */ verify( this.idle >= 0 ); 668 669 672 remove(proc); 673 insert_last(this.actives, proc); 670 674 unlock( this ); 671 675 /* paranoid */ verify( ! __preemption_enabled() ); 672 676 } 673 677 674 static [unsigned idle, unsigned total, * processor] query( & __cluster_idles this ) { 678 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) { 679 /* paranoid */ verify( ! __preemption_enabled() ); 680 /* paranoid */ verify( ready_schedule_islocked() ); 681 675 682 for() { 676 683 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST); … … 678 685 unsigned idle = this.idle; 679 686 unsigned total = this.total; 680 processor * proc = &this. list`first;687 processor * proc = &this.idles`first; 681 688 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it 682 689 asm volatile("": : :"memory"); … … 684 691 return [idle, total, proc]; 685 692 } 693 694 /* paranoid */ verify( ready_schedule_islocked() ); 695 /* paranoid */ verify( ! __preemption_enabled() ); 686 696 } 687 697 -
libcfa/src/concurrency/kernel.hfa
r857a1c6 rc8a0210 69 69 struct cluster * cltr; 70 70 71 // Id within the cluster 72 unsigned cltr_id; 71 // Ready Queue state per processor 72 struct { 73 unsigned short its; 74 unsigned short itr; 75 unsigned id; 76 unsigned target; 77 unsigned long long int cutoff; 78 } rdq; 73 79 74 80 // Set to true to notify the processor should terminate … … 140 146 // Cluster Tools 141 147 142 // Intrusives lanes which are used by the re laxed ready queue148 // Intrusives lanes which are used by the ready queue 143 149 struct __attribute__((aligned(128))) __intrusive_lane_t; 144 150 void ?{}(__intrusive_lane_t & this); 145 151 void ^?{}(__intrusive_lane_t & this); 146 152 147 // Counter used for wether or not the lanes are all empty 148 struct __attribute__((aligned(128))) __snzi_node_t; 149 struct __snzi_t { 150 unsigned mask; 151 int root; 152 __snzi_node_t * nodes; 153 }; 154 155 void ?{}( __snzi_t & this, unsigned depth ); 156 void ^?{}( __snzi_t & this ); 153 // Aligned timestamps which are used by the relaxed ready queue 154 struct __attribute__((aligned(128))) __timestamp_t; 155 void ?{}(__timestamp_t & this); 156 void ^?{}(__timestamp_t & this); 157 157 158 158 //TODO adjust cache size to ARCHITECTURE 159 159 // Structure holding the relaxed ready queue 160 160 struct __ready_queue_t { 161 // Data tracking how many/which lanes are used162 // Aligned to 128 for cache locality163 __snzi_t snzi;164 165 161 // Data tracking the actual lanes 166 162 // On a seperate cacheline from the used struct since … … 171 167 __intrusive_lane_t * volatile data; 172 168 169 // Array of times 170 __timestamp_t * volatile tscs; 171 173 172 // Number of lanes (empty or not) 174 173 volatile size_t count; … … 180 179 181 180 // Idle Sleep 182 struct __cluster_ idles{181 struct __cluster_proc_list { 183 182 // Spin lock protecting the queue 184 183 volatile uint64_t lock; … … 191 190 192 191 // List of idle processors 193 dlist(processor, processor) list; 192 dlist(processor, processor) idles; 193 194 // List of active processors 195 dlist(processor, processor) actives; 194 196 }; 195 197 … … 207 209 208 210 // List of idle processors 209 __cluster_ idles idles;211 __cluster_proc_list procs; 210 212 211 213 // List of threads -
libcfa/src/concurrency/kernel/startup.cfa
r857a1c6 rc8a0210 268 268 __print_stats( st, mainProcessor->print_stats, "Processor ", mainProcessor->name, (void*)mainProcessor ); 269 269 } 270 #if defined(CFA_STATS_ARRAY) 271 __flush_stat( st, "Processor", mainProcessor ); 272 #endif 270 273 #endif 271 274 … … 348 351 __print_stats( &local_stats, proc->print_stats, "Processor ", proc->name, (void*)proc ); 349 352 } 353 #if defined(CFA_STATS_ARRAY) 354 __flush_stat( &local_stats, "Processor", proc ); 355 #endif 350 356 #endif 351 357 … … 463 469 this.name = name; 464 470 this.cltr = &_cltr; 471 this.rdq.its = 0; 472 this.rdq.itr = 0; 473 this.rdq.id = -1u; 474 this.rdq.target = -1u; 475 this.rdq.cutoff = -1ull; 465 476 do_terminate = false; 466 477 preemption_alarm = 0p; … … 483 494 #endif 484 495 485 lock( this.cltr->idles ); 486 int target = this.cltr->idles.total += 1u; 487 unlock( this.cltr->idles ); 488 489 id = doregister((__processor_id_t*)&this); 490 496 // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue 497 uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this); 498 this.cltr->procs.total += 1u; 499 insert_last(this.cltr->procs.actives, this); 500 501 // Adjust the ready queue size 502 ready_queue_grow( cltr ); 503 504 // Unlock the RWlock 505 ready_mutate_unlock( last_size ); 506 507 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 508 } 509 510 // Not a ctor, it just preps the destruction but should not destroy members 511 static void deinit(processor & this) { 491 512 // Lock the RWlock so no-one pushes/pops while we are changing the queue 492 513 uint_fast32_t last_size = ready_mutate_lock(); 514 this.cltr->procs.total -= 1u; 515 remove(this); 493 516 494 517 // Adjust the ready queue size 495 this.cltr_id = ready_queue_grow( cltr, target ); 496 497 // Unlock the RWlock 498 ready_mutate_unlock( last_size ); 499 500 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 501 } 502 503 // Not a ctor, it just preps the destruction but should not destroy members 504 static void deinit(processor & this) { 505 lock( this.cltr->idles ); 506 int target = this.cltr->idles.total -= 1u; 507 unlock( this.cltr->idles ); 508 509 // Lock the RWlock so no-one pushes/pops while we are changing the queue 510 uint_fast32_t last_size = ready_mutate_lock(); 511 512 // Adjust the ready queue size 513 ready_queue_shrink( this.cltr, target ); 514 515 // Unlock the RWlock 516 ready_mutate_unlock( last_size ); 517 518 // Finally we don't need the read_lock any more 519 unregister((__processor_id_t*)&this); 518 ready_queue_shrink( this.cltr ); 519 520 // Unlock the RWlock and unregister: we don't need the read_lock any more 521 ready_mutate_unregister((__processor_id_t*)&this, last_size ); 520 522 521 523 close(this.idle); … … 560 562 //----------------------------------------------------------------------------- 561 563 // Cluster 562 static void ?{}(__cluster_ idles& this) {564 static void ?{}(__cluster_proc_list & this) { 563 565 this.lock = 0; 564 566 this.idle = 0; 565 567 this.total = 0; 566 (this.list){};567 568 } 568 569 … … 590 591 591 592 // Adjust the ready queue size 592 ready_queue_grow( &this , 0);593 ready_queue_grow( &this ); 593 594 594 595 // Unlock the RWlock … … 605 606 606 607 // Adjust the ready queue size 607 ready_queue_shrink( &this , 0);608 ready_queue_shrink( &this ); 608 609 609 610 // Unlock the RWlock … … 615 616 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this ); 616 617 } 618 #if defined(CFA_STATS_ARRAY) 619 __flush_stat( this.stats, "Cluster", &this ); 620 #endif 617 621 free( this.stats ); 618 622 #endif -
libcfa/src/concurrency/kernel_private.hfa
r857a1c6 rc8a0210 83 83 // Cluster lock API 84 84 //======================================================================= 85 // Cells use by the reader writer lock86 // while not generic it only relies on a opaque pointer87 struct __attribute__((aligned(128))) __scheduler_lock_id_t {88 // Spin lock used as the underlying lock89 volatile bool lock;90 91 // Handle pointing to the proc owning this cell92 // Used for allocating cells and debugging93 __processor_id_t * volatile handle;94 95 #ifdef __CFA_WITH_VERIFY__96 // Debug, check if this is owned for reading97 bool owned;98 #endif99 };100 101 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));102 103 85 // Lock-Free registering/unregistering of threads 104 86 // Register a processor to a given cluster and get its unique id in return 105 unsigned doregister( struct __processor_id_t * proc);87 void register_proc_id( struct __processor_id_t * ); 106 88 107 89 // Unregister a processor from a given cluster using its id, getting back the original pointer 108 void unregister( struct __processor_id_t * proc ); 109 110 //----------------------------------------------------------------------- 111 // Cluster idle lock/unlock 112 static inline void lock(__cluster_idles & this) { 113 for() { 114 uint64_t l = this.lock; 115 if( 116 (0 == (l % 2)) 117 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 118 ) return; 119 Pause(); 120 } 121 } 122 123 static inline void unlock(__cluster_idles & this) { 124 /* paranoid */ verify( 1 == (this.lock % 2) ); 125 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 126 } 90 void unregister_proc_id( struct __processor_id_t * proc ); 127 91 128 92 //======================================================================= … … 152 116 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); 153 117 } 118 119 // Cells use by the reader writer lock 120 // while not generic it only relies on a opaque pointer 121 struct __attribute__((aligned(128))) __scheduler_lock_id_t { 122 // Spin lock used as the underlying lock 123 volatile bool lock; 124 125 // Handle pointing to the proc owning this cell 126 // Used for allocating cells and debugging 127 __processor_id_t * volatile handle; 128 129 #ifdef __CFA_WITH_VERIFY__ 130 // Debug, check if this is owned for reading 131 bool owned; 132 #endif 133 }; 134 135 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t)); 154 136 155 137 //----------------------------------------------------------------------- … … 247 229 void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ ); 248 230 231 //----------------------------------------------------------------------- 232 // Lock-Free registering/unregistering of threads 233 // Register a processor to a given cluster and get its unique id in return 234 // For convenience, also acquires the lock 235 static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) { 236 register_proc_id( proc ); 237 return ready_mutate_lock(); 238 } 239 240 // Unregister a processor from a given cluster using its id, getting back the original pointer 241 // assumes the lock is acquired 242 static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) { 243 ready_mutate_unlock( last_s ); 244 unregister_proc_id( proc ); 245 } 246 247 //----------------------------------------------------------------------- 248 // Cluster idle lock/unlock 249 static inline void lock(__cluster_proc_list & this) { 250 /* paranoid */ verify( ! __preemption_enabled() ); 251 252 // Start by locking the global RWlock so that we know no-one is 253 // adding/removing processors while we mess with the idle lock 254 ready_schedule_lock(); 255 256 // Simple counting lock, acquired, acquired by incrementing the counter 257 // to an odd number 258 for() { 259 uint64_t l = this.lock; 260 if( 261 (0 == (l % 2)) 262 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 263 ) return; 264 Pause(); 265 } 266 267 /* paranoid */ verify( ! __preemption_enabled() ); 268 } 269 270 static inline void unlock(__cluster_proc_list & this) { 271 /* paranoid */ verify( ! __preemption_enabled() ); 272 273 /* paranoid */ verify( 1 == (this.lock % 2) ); 274 // Simple couting lock, release by incrementing to an even number 275 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 276 277 // Release the global lock, which we acquired when locking 278 ready_schedule_unlock(); 279 280 /* paranoid */ verify( ! __preemption_enabled() ); 281 } 282 249 283 //======================================================================= 250 284 // Ready-Queue API 251 285 //----------------------------------------------------------------------- 252 // pop thread from the ready queue of a cluster253 // returns 0p if empty254 __attribute__((hot)) bool query(struct cluster * cltr);255 256 //-----------------------------------------------------------------------257 286 // push thread onto a ready queue for a cluster 258 287 // returns true if the list was previously empty, false otherwise 259 __attribute__((hot)) boolpush(struct cluster * cltr, struct $thread * thrd);288 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd); 260 289 261 290 //----------------------------------------------------------------------- … … 263 292 // returns 0p if empty 264 293 // May return 0p spuriously 265 __attribute__((hot)) struct $thread * pop (struct cluster * cltr);294 __attribute__((hot)) struct $thread * pop_fast(struct cluster * cltr); 266 295 267 296 //----------------------------------------------------------------------- … … 272 301 273 302 //----------------------------------------------------------------------- 274 // remove thread from the ready queue of a cluster275 // returns bool if it wasn't found276 bool remove_head(struct cluster * cltr, struct $thread * thrd);277 278 //-----------------------------------------------------------------------279 303 // Increase the width of the ready queue (number of lanes) by 4 280 unsigned ready_queue_grow (struct cluster * cltr, int target);304 void ready_queue_grow (struct cluster * cltr); 281 305 282 306 //----------------------------------------------------------------------- 283 307 // Decrease the width of the ready queue (number of lanes) by 4 284 void ready_queue_shrink(struct cluster * cltr , int target);308 void ready_queue_shrink(struct cluster * cltr); 285 309 286 310 -
libcfa/src/concurrency/preemption.cfa
r857a1c6 rc8a0210 712 712 static void * alarm_loop( __attribute__((unused)) void * args ) { 713 713 __processor_id_t id; 714 id.id = doregister(&id);714 register_proc_id(&id); 715 715 __cfaabi_tls.this_proc_id = &id; 716 716 … … 773 773 EXIT: 774 774 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 775 unregister(&id);775 register_proc_id(&id); 776 776 777 777 return 0p; -
libcfa/src/concurrency/ready_queue.cfa
r857a1c6 rc8a0210 17 17 // #define __CFA_DEBUG_PRINT_READY_QUEUE__ 18 18 19 // #define USE_SNZI20 19 // #define USE_MPSC 20 21 #define USE_RELAXED_FIFO 22 // #define USE_WORK_STEALING 21 23 22 24 #include "bits/defs.hfa" … … 29 31 #include <unistd.h> 30 32 31 #include "snzi.hfa"32 33 #include "ready_subqueue.hfa" 33 34 … … 40 41 #endif 41 42 42 #define BIAS 4 43 #if defined(USE_RELAXED_FIFO) 44 #define BIAS 4 45 #define READYQ_SHARD_FACTOR 4 46 #define SEQUENTIAL_SHARD 1 47 #elif defined(USE_WORK_STEALING) 48 #define READYQ_SHARD_FACTOR 2 49 #define SEQUENTIAL_SHARD 2 50 #else 51 #error no scheduling strategy selected 52 #endif 53 54 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred); 55 static inline struct $thread * try_pop(struct cluster * cltr, unsigned w); 56 static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j); 57 static inline struct $thread * search(struct cluster * cltr); 58 43 59 44 60 // returns the maximum number of processors the RWLock support … … 94 110 //======================================================================= 95 111 // Lock-Free registering/unregistering of threads 96 unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {112 void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 97 113 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 98 114 … … 108 124 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); 109 125 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); 110 returni;126 proc->id = i; 111 127 } 112 128 } … … 135 151 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); 136 152 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); 137 returnn;138 } 139 140 void unregister ( struct __processor_id_t * proc ) with(*__scheduler_lock) {153 proc->id = n; 154 } 155 156 void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 141 157 unsigned id = proc->id; 142 158 /*paranoid*/ verify(id < ready); … … 193 209 194 210 //======================================================================= 195 // Cforall Re qdy Queue used for scheduling211 // Cforall Ready Queue used for scheduling 196 212 //======================================================================= 197 213 void ?{}(__ready_queue_t & this) with (this) { 198 214 lanes.data = 0p; 215 lanes.tscs = 0p; 199 216 lanes.count = 0; 200 217 } 201 218 202 219 void ^?{}(__ready_queue_t & this) with (this) { 203 verify( 1 == lanes.count ); 204 #ifdef USE_SNZI 205 verify( !query( snzi ) ); 206 #endif 220 verify( SEQUENTIAL_SHARD == lanes.count ); 207 221 free(lanes.data); 222 free(lanes.tscs); 208 223 } 209 224 210 225 //----------------------------------------------------------------------- 211 __attribute__((hot)) bool query(struct cluster * cltr) { 212 #ifdef USE_SNZI 213 return query(cltr->ready_queue.snzi); 214 #endif 215 return true; 216 } 217 218 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { 219 unsigned i; 220 bool local; 221 #if defined(BIAS) 226 #if defined(USE_RELAXED_FIFO) 227 //----------------------------------------------------------------------- 228 // get index from random number with or without bias towards queues 229 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { 230 unsigned i; 231 bool local; 222 232 unsigned rlow = r % BIAS; 223 233 unsigned rhigh = r / BIAS; … … 225 235 // (BIAS - 1) out of BIAS chances 226 236 // Use perferred queues 227 i = preferred + (rhigh % 4);237 i = preferred + (rhigh % READYQ_SHARD_FACTOR); 228 238 local = true; 229 239 } … … 234 244 local = false; 235 245 } 236 #else 237 i = r; 238 local = false; 239 #endif 240 return [i, local]; 241 } 242 243 //----------------------------------------------------------------------- 244 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 245 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 246 247 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 248 249 // write timestamp 250 thrd->link.ts = rdtscl(); 251 252 bool first = false; 253 __attribute__((unused)) bool local; 254 __attribute__((unused)) int preferred; 255 #if defined(BIAS) 256 preferred = 257 //* 258 external ? -1 : kernelTLS().this_processor->cltr_id; 259 /*/ 260 thrd->link.preferred * 4; 261 //*/ 262 #endif 263 264 // Try to pick a lane and lock it 265 unsigned i; 266 do { 267 // Pick the index of a lane 268 // unsigned r = __tls_rand(); 269 unsigned r = __tls_rand_fwd(); 270 [i, local] = idx_from_r(r, preferred); 271 272 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 273 246 return [i, local]; 247 } 248 249 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 250 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 251 252 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 253 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 254 255 // write timestamp 256 thrd->link.ts = rdtscl(); 257 258 bool local; 259 int preferred = external ? -1 : kernelTLS().this_processor->rdq.id; 260 261 // Try to pick a lane and lock it 262 unsigned i; 263 do { 264 // Pick the index of a lane 265 unsigned r = __tls_rand_fwd(); 266 [i, local] = idx_from_r(r, preferred); 267 268 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 269 270 #if !defined(__CFA_NO_STATISTICS__) 271 if(external) { 272 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED); 273 __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED); 274 } 275 else { 276 if(local) __tls_stats()->ready.pick.push.local++; 277 __tls_stats()->ready.pick.push.attempt++; 278 } 279 #endif 280 281 #if defined(USE_MPSC) 282 // mpsc always succeeds 283 } while( false ); 284 #else 285 // If we can't lock it retry 286 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 287 #endif 288 289 // Actually push it 290 push(lanes.data[i], thrd); 291 292 #if !defined(USE_MPSC) 293 // Unlock and return 294 __atomic_unlock( &lanes.data[i].lock ); 295 #endif 296 297 // Mark the current index in the tls rng instance as having an item 298 __tls_rand_advance_bck(); 299 300 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 301 302 // Update statistics 274 303 #if !defined(__CFA_NO_STATISTICS__) 275 304 if(external) { 276 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.l ocal, 1, __ATOMIC_RELAXED);277 __atomic_fetch_add(&cltr->stats->ready.pick.ext. attempt, 1, __ATOMIC_RELAXED);305 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED); 306 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED); 278 307 } 279 308 else { 280 if(local) __tls_stats()->ready.pick.push.l ocal++;281 __tls_stats()->ready.pick.push. attempt++;309 if(local) __tls_stats()->ready.pick.push.lsuccess++; 310 __tls_stats()->ready.pick.push.success++; 282 311 } 283 312 #endif 284 285 #if defined(USE_MPSC) 286 // mpsc always succeeds 287 } while( false ); 288 #else 289 // If we can't lock it retry 290 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 291 #endif 292 293 // Actually push it 294 #ifdef USE_SNZI 295 bool lane_first = 296 #endif 297 298 push(lanes.data[i], thrd); 299 300 #ifdef USE_SNZI 301 // If this lane used to be empty we need to do more 302 if(lane_first) { 303 // Check if the entire queue used to be empty 304 first = !query(snzi); 305 306 // Update the snzi 307 arrive( snzi, i ); 308 } 309 #endif 310 311 #if !defined(USE_MPSC) 312 // Unlock and return 313 __atomic_unlock( &lanes.data[i].lock ); 314 #endif 315 316 // Mark the current index in the tls rng instance as having an item 317 __tls_rand_advance_bck(); 318 319 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 313 } 314 315 // Pop from the ready queue from a given cluster 316 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 317 /* paranoid */ verify( lanes.count > 0 ); 318 /* paranoid */ verify( kernelTLS().this_processor ); 319 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 320 321 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 322 int preferred = kernelTLS().this_processor->rdq.id; 323 324 325 // As long as the list is not empty, try finding a lane that isn't empty and pop from it 326 for(25) { 327 // Pick two lists at random 328 unsigned ri = __tls_rand_bck(); 329 unsigned rj = __tls_rand_bck(); 330 331 unsigned i, j; 332 __attribute__((unused)) bool locali, localj; 333 [i, locali] = idx_from_r(ri, preferred); 334 [j, localj] = idx_from_r(rj, preferred); 335 336 #if !defined(__CFA_NO_STATISTICS__) 337 if(locali && localj) { 338 __tls_stats()->ready.pick.pop.local++; 339 } 340 #endif 341 342 i %= count; 343 j %= count; 344 345 // try popping from the 2 picked lists 346 struct $thread * thrd = try_pop(cltr, i, j); 347 if(thrd) { 348 #if !defined(__CFA_NO_STATISTICS__) 349 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++; 350 #endif 351 return thrd; 352 } 353 } 354 355 // All lanes where empty return 0p 356 return 0p; 357 } 358 359 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { 360 return search(cltr); 361 } 362 #endif 363 #if defined(USE_WORK_STEALING) 364 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 365 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 366 367 const bool external = (!kernelTLS().this_processor) || (cltr != kernelTLS().this_processor->cltr); 368 /* paranoid */ verify(external || kernelTLS().this_processor->rdq.id < lanes.count ); 369 370 // write timestamp 371 thrd->link.ts = rdtscl(); 372 373 // Try to pick a lane and lock it 374 unsigned i; 375 do { 376 if(unlikely(external)) { 377 i = __tls_rand() % lanes.count; 378 } 379 else { 380 processor * proc = kernelTLS().this_processor; 381 unsigned r = proc->rdq.its++; 382 i = proc->rdq.id + (r % READYQ_SHARD_FACTOR); 383 } 384 385 386 #if defined(USE_MPSC) 387 // mpsc always succeeds 388 } while( false ); 389 #else 390 // If we can't lock it retry 391 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 392 #endif 393 394 // Actually push it 395 push(lanes.data[i], thrd); 396 397 #if !defined(USE_MPSC) 398 // Unlock and return 399 __atomic_unlock( &lanes.data[i].lock ); 400 #endif 401 402 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 403 } 404 405 // Pop from the ready queue from a given cluster 406 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 407 /* paranoid */ verify( lanes.count > 0 ); 408 /* paranoid */ verify( kernelTLS().this_processor ); 409 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes.count ); 410 411 processor * proc = kernelTLS().this_processor; 412 413 if(proc->rdq.target == -1u) { 414 proc->rdq.target = __tls_rand() % lanes.count; 415 unsigned it1 = proc->rdq.itr; 416 unsigned it2 = proc->rdq.itr + 1; 417 unsigned idx1 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR); 418 unsigned idx2 = proc->rdq.id + (it1 % READYQ_SHARD_FACTOR); 419 unsigned long long tsc1 = ts(lanes.data[idx1]); 420 unsigned long long tsc2 = ts(lanes.data[idx2]); 421 proc->rdq.cutoff = min(tsc1, tsc2); 422 } 423 else if(lanes.tscs[proc->rdq.target].tv < proc->rdq.cutoff) { 424 $thread * t = try_pop(cltr, proc->rdq.target); 425 proc->rdq.target = -1u; 426 if(t) return t; 427 } 428 429 for(READYQ_SHARD_FACTOR) { 430 unsigned i = proc->rdq.id + (--proc->rdq.itr % READYQ_SHARD_FACTOR); 431 if($thread * t = try_pop(cltr, i)) return t; 432 } 433 return 0p; 434 } 435 436 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 437 for(25) { 438 unsigned i = __tls_rand() % lanes.count; 439 $thread * t = try_pop(cltr, i); 440 if(t) return t; 441 } 442 443 return search(cltr); 444 } 445 #endif 446 447 //======================================================================= 448 // Various Ready Queue utilities 449 //======================================================================= 450 // these function work the same or almost the same 451 // whether they are using work-stealing or relaxed fifo scheduling 452 453 //----------------------------------------------------------------------- 454 // try to pop from a lane given by index w 455 static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) { 456 // Get relevant elements locally 457 __intrusive_lane_t & lane = lanes.data[w]; 458 459 // If list looks empty retry 460 if( is_empty(lane) ) return 0p; 461 462 // If we can't get the lock retry 463 if( !__atomic_try_acquire(&lane.lock) ) return 0p; 464 465 // If list is empty, unlock and retry 466 if( is_empty(lane) ) { 467 __atomic_unlock(&lane.lock); 468 return 0p; 469 } 470 471 // Actually pop the list 472 struct $thread * thrd; 473 thrd = pop(lane); 474 475 /* paranoid */ verify(thrd); 476 /* paranoid */ verify(lane.lock); 477 478 // Unlock and return 479 __atomic_unlock(&lane.lock); 320 480 321 481 // Update statistics 322 482 #if !defined(__CFA_NO_STATISTICS__) 323 if(external) { 324 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED); 325 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED); 326 } 327 else { 328 if(local) __tls_stats()->ready.pick.push.lsuccess++; 329 __tls_stats()->ready.pick.push.success++; 330 } 483 __tls_stats()->ready.pick.pop.success++; 331 484 #endif 332 485 333 // return whether or not the list was empty before this push 334 return first; 335 } 336 337 static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j); 338 static struct $thread * try_pop(struct cluster * cltr, unsigned i); 339 340 // Pop from the ready queue from a given cluster 341 __attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) { 342 /* paranoid */ verify( lanes.count > 0 ); 343 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 344 int preferred; 345 #if defined(BIAS) 346 // Don't bother trying locally too much 347 preferred = kernelTLS().this_processor->cltr_id; 486 #if defined(USE_WORK_STEALING) 487 lanes.tscs[w].tv = thrd->link.ts; 348 488 #endif 349 489 350 351 // As long as the list is not empty, try finding a lane that isn't empty and pop from it 352 #ifdef USE_SNZI 353 while( query(snzi) ) { 354 #else 355 for(25) { 356 #endif 357 // Pick two lists at random 358 // unsigned ri = __tls_rand(); 359 // unsigned rj = __tls_rand(); 360 unsigned ri = __tls_rand_bck(); 361 unsigned rj = __tls_rand_bck(); 362 363 unsigned i, j; 364 __attribute__((unused)) bool locali, localj; 365 [i, locali] = idx_from_r(ri, preferred); 366 [j, localj] = idx_from_r(rj, preferred); 367 368 #if !defined(__CFA_NO_STATISTICS__) 369 if(locali && localj) { 370 __tls_stats()->ready.pick.pop.local++; 371 } 372 #endif 373 374 i %= count; 375 j %= count; 376 377 // try popping from the 2 picked lists 378 struct $thread * thrd = try_pop(cltr, i, j); 379 if(thrd) { 380 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) 381 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++; 382 #endif 383 return thrd; 384 } 385 } 386 387 // All lanes where empty return 0p 388 return 0p; 389 } 390 391 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 490 // return the popped thread 491 return thrd; 492 } 493 494 //----------------------------------------------------------------------- 495 // try to pop from any lanes making sure you don't miss any threads push 496 // before the start of the function 497 static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) { 392 498 /* paranoid */ verify( lanes.count > 0 ); 393 499 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); … … 405 511 } 406 512 407 408 513 //----------------------------------------------------------------------- 409 // Given 2 indexes, pick the list with the oldest push an try to pop from it 410 static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { 411 #if !defined(__CFA_NO_STATISTICS__) 412 __tls_stats()->ready.pick.pop.attempt++; 413 #endif 414 415 // Pick the bet list 416 int w = i; 417 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { 418 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; 419 } 420 421 return try_pop(cltr, w); 422 } 423 424 static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) { 425 // Get relevant elements locally 426 __intrusive_lane_t & lane = lanes.data[w]; 427 428 // If list looks empty retry 429 if( is_empty(lane) ) return 0p; 430 431 // If we can't get the lock retry 432 if( !__atomic_try_acquire(&lane.lock) ) return 0p; 433 434 435 // If list is empty, unlock and retry 436 if( is_empty(lane) ) { 437 __atomic_unlock(&lane.lock); 438 return 0p; 439 } 440 441 // Actually pop the list 442 struct $thread * thrd; 443 thrd = pop(lane); 444 445 /* paranoid */ verify(thrd); 446 /* paranoid */ verify(lane.lock); 447 448 #ifdef USE_SNZI 449 // If this was the last element in the lane 450 if(emptied) { 451 depart( snzi, w ); 452 } 453 #endif 454 455 // Unlock and return 456 __atomic_unlock(&lane.lock); 457 458 // Update statistics 459 #if !defined(__CFA_NO_STATISTICS__) 460 __tls_stats()->ready.pick.pop.success++; 461 #endif 462 463 // Update the thread bias 464 thrd->link.preferred = w / 4; 465 466 // return the popped thread 467 return thrd; 468 } 469 //----------------------------------------------------------------------- 470 471 bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { 472 for(i; lanes.count) { 473 __intrusive_lane_t & lane = lanes.data[i]; 474 475 bool removed = false; 476 477 __atomic_acquire(&lane.lock); 478 if(head(lane)->link.next == thrd) { 479 $thread * pthrd; 480 pthrd = pop(lane); 481 482 /* paranoid */ verify( pthrd == thrd ); 483 484 removed = true; 485 #ifdef USE_SNZI 486 if(emptied) { 487 depart( snzi, i ); 488 } 489 #endif 490 } 491 __atomic_unlock(&lane.lock); 492 493 if( removed ) return true; 494 } 495 return false; 496 } 497 498 //----------------------------------------------------------------------- 499 514 // Check that all the intrusive queues in the data structure are still consistent 500 515 static void check( __ready_queue_t & q ) with (q) { 501 516 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC) … … 522 537 } 523 538 539 //----------------------------------------------------------------------- 540 // Given 2 indexes, pick the list with the oldest push an try to pop from it 541 static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) { 542 #if !defined(__CFA_NO_STATISTICS__) 543 __tls_stats()->ready.pick.pop.attempt++; 544 #endif 545 546 // Pick the bet list 547 int w = i; 548 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) { 549 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j; 550 } 551 552 return try_pop(cltr, w); 553 } 554 524 555 // Call this function of the intrusive list was moved using memcpy 525 556 // fixes the list so that the pointers back to anchors aren't left dangling … … 541 572 } 542 573 574 static void assign_list(unsigned & value, dlist(processor, processor) & list, unsigned count) { 575 processor * it = &list`first; 576 for(unsigned i = 0; i < count; i++) { 577 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 578 it->rdq.id = value; 579 it->rdq.target = -1u; 580 value += READYQ_SHARD_FACTOR; 581 it = &(*it)`next; 582 } 583 } 584 585 static void reassign_cltr_id(struct cluster * cltr) { 586 unsigned preferred = 0; 587 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); 588 assign_list(preferred, cltr->procs.idles , cltr->procs.idle ); 589 } 590 591 static void fix_times( struct cluster * cltr ) with( cltr->ready_queue ) { 592 #if defined(USE_WORK_STEALING) 593 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc); 594 for(i; lanes.count) { 595 lanes.tscs[i].tv = ts(lanes.data[i]); 596 } 597 #endif 598 } 599 543 600 // Grow the ready queue 544 unsigned ready_queue_grow(struct cluster * cltr, int target) { 545 unsigned preferred; 601 void ready_queue_grow(struct cluster * cltr) { 546 602 size_t ncount; 603 int target = cltr->procs.total; 547 604 548 605 /* paranoid */ verify( ready_mutate_islocked() ); … … 554 611 // grow the ready queue 555 612 with( cltr->ready_queue ) { 556 #ifdef USE_SNZI557 ^(snzi){};558 #endif559 560 613 // Find new count 561 614 // Make sure we always have atleast 1 list 562 615 if(target >= 2) { 563 ncount = target * 4; 564 preferred = ncount - 4; 616 ncount = target * READYQ_SHARD_FACTOR; 565 617 } else { 566 ncount = 1; 567 preferred = 0; 618 ncount = SEQUENTIAL_SHARD; 568 619 } 569 620 … … 583 634 // Update original 584 635 lanes.count = ncount; 585 586 #ifdef USE_SNZI 587 // Re-create the snzi 588 snzi{ log2( lanes.count / 8 ) }; 589 for( idx; (size_t)lanes.count ) { 590 if( !is_empty(lanes.data[idx]) ) { 591 arrive(snzi, idx); 592 } 593 } 594 #endif 595 } 636 } 637 638 fix_times(cltr); 639 640 reassign_cltr_id(cltr); 596 641 597 642 // Make sure that everything is consistent … … 601 646 602 647 /* paranoid */ verify( ready_mutate_islocked() ); 603 return preferred;604 648 } 605 649 606 650 // Shrink the ready queue 607 void ready_queue_shrink(struct cluster * cltr , int target) {651 void ready_queue_shrink(struct cluster * cltr) { 608 652 /* paranoid */ verify( ready_mutate_islocked() ); 609 653 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); … … 612 656 /* paranoid */ check( cltr->ready_queue ); 613 657 658 int target = cltr->procs.total; 659 614 660 with( cltr->ready_queue ) { 615 #ifdef USE_SNZI616 ^(snzi){};617 #endif618 619 661 // Remember old count 620 662 size_t ocount = lanes.count; … … 622 664 // Find new count 623 665 // Make sure we always have atleast 1 list 624 lanes.count = target >= 2 ? target * 4: 1;666 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 625 667 /* paranoid */ verify( ocount >= lanes.count ); 626 /* paranoid */ verify( lanes.count == target * 4|| target < 2 );668 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 627 669 628 670 // for printing count the number of displaced threads … … 667 709 fix(lanes.data[idx]); 668 710 } 669 670 #ifdef USE_SNZI 671 // Re-create the snzi 672 snzi{ log2( lanes.count / 8 ) }; 673 for( idx; (size_t)lanes.count ) { 674 if( !is_empty(lanes.data[idx]) ) { 675 arrive(snzi, idx); 676 } 677 } 678 #endif 679 } 711 } 712 713 fix_times(cltr); 714 715 reassign_cltr_id(cltr); 680 716 681 717 // Make sure that everything is consistent -
libcfa/src/concurrency/ready_subqueue.hfa
r857a1c6 rc8a0210 246 246 #endif 247 247 } 248 249 // Aligned timestamps which are used by the relaxed ready queue 250 struct __attribute__((aligned(128))) __timestamp_t { 251 volatile unsigned long long tv; 252 }; 253 254 void ?{}(__timestamp_t & this) { this.tv = 0; } 255 void ^?{}(__timestamp_t & this) {} -
libcfa/src/concurrency/stats.cfa
r857a1c6 rc8a0210 5 5 #include <inttypes.h> 6 6 #include "bits/debug.hfa" 7 #include "bits/locks.hfa" 7 8 #include "stats.hfa" 8 9 … … 44 45 stats->io.calls.errors.busy = 0; 45 46 stats->io.poller.sleeps = 0; 47 #endif 48 49 #if defined(CFA_STATS_ARRAY) 50 stats->array.values = alloc(CFA_STATS_ARRAY); 51 stats->array.cnt = 0; 46 52 #endif 47 53 } … … 151 157 #endif 152 158 } 159 160 #if defined(CFA_STATS_ARRAY) 161 extern "C" { 162 #include <stdio.h> 163 #include <errno.h> 164 #include <sys/stat.h> 165 #include <fcntl.h> 166 } 167 168 void __flush_stat( struct __stats_t * this, const char * name, void * handle) { 169 int ret = mkdir(".cfadata", 0755); 170 if(ret < 0 && errno != EEXIST) abort("Failed to create directory .cfadata: %d\n", errno); 171 172 char filename[100]; 173 snprintf(filename, 100, ".cfadata/%s%p.data", name, handle); 174 175 int fd = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0644); 176 if(fd < 0) abort("Failed to create file %s: %d\n", filename, errno); 177 178 for(i; this->array.cnt) { 179 char line[100]; 180 size_t n = snprintf(line, 100, "%llu, %lld\n", this->array.values[i].ts, this->array.values[i].value); 181 write(fd, line, n); 182 } 183 184 this->array.cnt = 0; 185 close(fd); 186 } 187 188 static __spinlock_t stats_lock; 189 190 void __push_stat( struct __stats_t * this, int64_t value, bool external, const char * name, void * handle ) { 191 if(external) lock(stats_lock __cfaabi_dbg_ctx2); 192 193 if( this->array.cnt >= CFA_STATS_ARRAY ) __flush_stat( this, name, handle ); 194 195 size_t idx = this->array.cnt; 196 this->array.cnt++; 197 198 if(external) unlock(stats_lock); 199 200 this->array.values[idx].ts = rdtscl(); 201 this->array.values[idx].value = value; 202 } 203 #endif 153 204 #endif -
libcfa/src/concurrency/stats.hfa
r857a1c6 rc8a0210 1 1 #pragma once 2 3 // #define CFA_STATS_ARRAY 10000 2 4 3 5 #include <stdint.h> … … 109 111 #endif 110 112 113 #if defined(CFA_STATS_ARRAY) 114 struct __stats_elem_t { 115 long long int ts; 116 int64_t value; 117 }; 118 #endif 119 111 120 struct __attribute__((aligned(128))) __stats_t { 112 121 __stats_readQ_t ready; … … 114 123 __stats_io_t io; 115 124 #endif 125 126 #if defined(CFA_STATS_ARRAY) 127 struct { 128 __stats_elem_t * values; 129 volatile size_t cnt; 130 } array; 131 #endif 132 116 133 }; 117 134 … … 119 136 void __tally_stats( struct __stats_t *, struct __stats_t * ); 120 137 void __print_stats( struct __stats_t *, int, const char *, const char *, void * ); 138 #if defined(CFA_STATS_ARRAY) 139 void __push_stat ( struct __stats_t *, int64_t value, bool external, const char * name, void * handle); 140 void __flush_stat( struct __stats_t *, const char *, void * ); 141 #else 142 static inline void __push_stat ( struct __stats_t *, int64_t, bool, const char *, void * ) {} 143 static inline void __flush_stat( struct __stats_t *, const char *, void * ) {} 144 #endif 121 145 #endif 122 146 -
libcfa/src/concurrency/thread.cfa
r857a1c6 rc8a0210 39 39 link.next = 0p; 40 40 link.prev = 0p; 41 link.preferred = -1;42 41 #if defined( __CFA_WITH_VERIFY__ ) 43 42 canary = 0x0D15EA5E0D15EA5Ep; … … 62 61 } 63 62 64 FORALL_DATA_INSTANCE(ThreadCancelled, (thread_t &), (thread_t)) 63 EHM_VIRTUAL_TABLE(SomeThreadCancelled, std_thread_cancelled); 65 64 66 65 forall(T &) … … 73 72 forall(T &) 74 73 const char * msg(ThreadCancelled(T) *) { 75 return "ThreadCancelled ";74 return "ThreadCancelled(...)"; 76 75 } 77 76 78 77 forall(T &) 79 78 static void default_thread_cancel_handler(ThreadCancelled(T) & ) { 79 // Improve this error message, can I do formatting? 80 80 abort( "Unhandled thread cancellation.\n" ); 81 81 } 82 82 83 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))) 83 static void default_thread_cancel_handler(SomeThreadCancelled & ) { 84 // Improve this error message, can I do formatting? 85 abort( "Unhandled thread cancellation.\n" ); 86 } 87 88 forall(T & | is_thread(T) | IS_EXCEPTION(SomeThreadCancelled)) 84 89 void ?{}( thread_dtor_guard_t & this, 85 T & thrd, void(*cancelHandler)( ThreadCancelled(T)&)) {86 $monitor * m = get_monitor(thrd);90 T & thrd, void(*cancelHandler)(SomeThreadCancelled &)) { 91 $monitor * m = get_monitor(thrd); 87 92 $thread * desc = get_thread(thrd); 88 93 89 94 // Setup the monitor guard 90 95 void (*dtor)(T& mutex this) = ^?{}; 91 bool join = cancelHandler != (void(*)( ThreadCancelled(T)&))0;96 bool join = cancelHandler != (void(*)(SomeThreadCancelled&))0; 92 97 (this.mg){&m, (void(*)())dtor, join}; 93 98 … … 103 108 } 104 109 desc->state = Cancelled; 105 void(*defaultResumptionHandler)( ThreadCancelled(T) &) =110 void(*defaultResumptionHandler)(SomeThreadCancelled &) = 106 111 join ? cancelHandler : default_thread_cancel_handler; 107 112 108 ThreadCancelled(T) except;109 113 // TODO: Remove explitate vtable set once trac#186 is fixed. 110 except.virtual_table = &get_exception_vtable(&except); 114 SomeThreadCancelled except; 115 except.virtual_table = &std_thread_cancelled; 111 116 except.the_thread = &thrd; 112 117 except.the_exception = __cfaehm_cancellation_exception( cancellation ); 113 throwResume except; 118 // Why is this cast required? 119 throwResume (SomeThreadCancelled &)except; 114 120 115 121 except.the_exception->virtual_table->free( except.the_exception ); … … 158 164 159 165 //----------------------------------------------------------------------------- 160 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION( ThreadCancelled, (T)))166 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(SomeThreadCancelled)) 161 167 T & join( T & this ) { 162 168 thread_dtor_guard_t guard = { this, defaultResumptionHandler }; -
libcfa/src/concurrency/thread.hfa
r857a1c6 rc8a0210 32 32 }; 33 33 34 FORALL_DATA_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) ( 34 EHM_EXCEPTION(SomeThreadCancelled) ( 35 void * the_thread; 36 exception_t * the_exception; 37 ); 38 39 EHM_EXTERN_VTABLE(SomeThreadCancelled, std_thread_cancelled); 40 41 EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) ( 35 42 thread_t * the_thread; 36 43 exception_t * the_exception; … … 79 86 }; 80 87 81 forall( T & | is_thread(T) | IS_EXCEPTION( ThreadCancelled, (T)) )82 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)( ThreadCancelled(T)&) );88 forall( T & | is_thread(T) | IS_EXCEPTION(SomeThreadCancelled) ) 89 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(SomeThreadCancelled &) ); 83 90 void ^?{}( thread_dtor_guard_t & this ); 84 91 … … 125 132 //---------- 126 133 // join 127 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION( ThreadCancelled, (T)) )134 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(SomeThreadCancelled) ) 128 135 T & join( T & this ); 129 136 -
libcfa/src/exception.c
r857a1c6 rc8a0210 10 10 // Created On : Mon Jun 26 15:13:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 27 16:27:00 202013 // Update Count : 3 512 // Last Modified On : Wed Feb 24 13:40:00 2021 13 // Update Count : 36 14 14 // 15 15 … … 26 26 #include "concurrency/invoke.h" 27 27 #include "stdhdr/assert.h" 28 #include "virtual.h" 28 29 29 30 #if defined( __ARM_ARCH ) … … 46 47 const _Unwind_Exception_Class __cfaehm_exception_class = 0x4c50575500414643; 47 48 48 // Base exception vtable is abstract, you should not have base exceptions. 49 struct __cfaehm_base_exception_t_vtable 50 ___cfaehm_base_exception_t_vtable_instance = { 51 .parent = NULL, 52 .size = 0, 53 .copy = NULL, 54 .free = NULL, 55 .msg = NULL 49 // Base Exception type id: 50 struct __cfa__parent_vtable __cfatid_exception_t = { 51 NULL, 56 52 }; 57 53 -
libcfa/src/exception.h
r857a1c6 rc8a0210 10 10 // Created On : Mon Jun 26 15:11:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : T ue Oct 27 14:45:00 202013 // Update Count : 1 112 // Last Modified On : Thr Apr 8 15:20:00 2021 13 // Update Count : 12 14 14 // 15 15 … … 29 29 struct __cfaehm_base_exception_t; 30 30 typedef struct __cfaehm_base_exception_t exception_t; 31 struct __cfa__parent_vtable; 31 32 struct __cfaehm_base_exception_t_vtable { 32 const struct __cfa ehm_base_exception_t_vtable * parent;33 const struct __cfa__parent_vtable * __cfavir_typeid; 33 34 size_t size; 34 35 void (*copy)(struct __cfaehm_base_exception_t *this, … … 40 41 struct __cfaehm_base_exception_t_vtable const * virtual_table; 41 42 }; 42 extern struct __cfaehm_base_exception_t_vtable 43 ___cfaehm_base_exception_t_vtable_instance; 43 extern struct __cfa__parent_vtable __cfatid_exception_t; 44 44 45 45 … … 104 104 /* The first field must be a pointer to a virtual table. 105 105 * That virtual table must be a decendent of the base exception virtual table. 106 * The virtual table must point at the prober type-id. 107 * None of these can be enforced in an assertion. 106 108 */ 107 virtualT const & get_exception_vtable(exceptT *);108 // Always returns the virtual table for this type (associated types hack).109 109 }; 110 110 -
libcfa/src/exception.hfa
r857a1c6 rc8a0210 10 10 // Created On : Thu Apr 7 10:25:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : T ue Aug 4 16:22:00 202013 // Update Count : 312 // Last Modified On : Thr Apr 8 15:16:00 2021 13 // Update Count : 4 14 14 // 15 15 … … 18 18 // ----------------------------------------------------------------------------------------------- 19 19 20 // TRIVIAL_EXCEPTION_DECLARATION(exception_name); 21 // Declare a trivial exception, one that adds no fields or features. 22 // This will make the exception visible and may go in a .hfa or .cfa file. 23 #define TRIVIAL_EXCEPTION_DECLARATION(...) \ 24 _EXC_DISPATCH(_TRIVIAL_EXCEPTION_DECLARATION, __VA_ARGS__) 20 // EHM_EXCEPTION(exception_name)(fields...); 21 // Create an exception (a virtual structure that inherits from exception_t) 22 // with the given name and fields. 23 #define EHM_EXCEPTION(exception_name) \ 24 _EHM_TYPE_ID_STRUCT(exception_name, ); \ 25 _EHM_TYPE_ID_VALUE(exception_name, ); \ 26 _EHM_VIRTUAL_TABLE_STRUCT(exception_name, , ); \ 27 _EHM_EXCEPTION_STRUCT(exception_name, , ) 25 28 26 // TRIVIAL_EXCEPTION_INSTANCE(exception_name);27 // Create the trival exception. This must be used exactly once and should be used in a .cfa file,28 // as it creates the unique instance of the virtual table. 29 #define TRIVIAL_EXCEPTION_INSTANCE(...) _EXC_DISPATCH(_TRIVIAL_EXCEPTION_INSTANCE, __VA_ARGS__)29 // EHM_EXTERN_VTABLE(exception_name, table_name); 30 // Forward declare a virtual table called table_name for exception_name type. 31 #define EHM_EXTERN_VTABLE(exception_name, table_name) \ 32 _EHM_EXTERN_VTABLE(exception_name, , table_name) 30 33 31 // TRIVIAL_EXCEPTION(exception_name[, parent_name]); 32 // Does both of the above, a short hand if the exception is only used in one .cfa file. 33 // For legacy reasons this is the only one that official supports having a parent other than the 34 // base exception. This feature may be removed or changed. 35 #define TRIVIAL_EXCEPTION(...) \ 36 _EXC_DISPATCH(_TRIVIAL_EXCEPTION_DECLARATION, __VA_ARGS__); \ 37 _EXC_DISPATCH(_TRIVIAL_EXCEPTION_INSTANCE, __VA_ARGS__) 34 // EHM_VIRTUAL_TABLE(exception_name, table_name); 35 // Define a virtual table called table_name for exception_name type. 36 #define EHM_VIRTUAL_TABLE(exception_name, table_name) \ 37 _EHM_DEFINE_COPY(exception_name, ) \ 38 _EHM_DEFINE_MSG(exception_name, ) \ 39 _EHM_VIRTUAL_TABLE(exception_name, , table_name) 38 40 39 // FORALL_TRIVIAL_EXCEPTION(exception_name, (assertions...), (parameters...)); 40 // Forward declare a polymorphic but otherwise trivial exception type. You must provide the entire 41 // assertion list (exactly what would go in the forall clause) and parameters list (only the 42 // parameter names from the assertion list, same order and comma seperated). This should be 43 // visible where ever use the exception. This just generates the polymorphic framework, see 44 // POLY_VTABLE_DECLARATION to allow instantiations. 45 #define FORALL_TRIVIAL_EXCEPTION(exception_name, assertions, parameters) \ 46 _FORALL_TRIVIAL_EXCEPTION(exception_name, __cfaehm_base_exception_t, assertions, parameters, ) 41 // EHM_FORALL_EXCEPTION(exception_name, (assertions), (parameters))(fields...); 42 // As EHM_EXCEPTION but for polymorphic types instead of monomorphic ones. 43 // The assertions list should include all polymorphic parameters and 44 // assertions inside a parentisized list. Parameters should include all the 45 // polymorphic parameter names inside a parentisized list (same order). 46 #define EHM_FORALL_EXCEPTION(exception_name, assertions, parameters) \ 47 _EHM_TYPE_ID_STRUCT(exception_name, forall assertions); \ 48 _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall assertions, parameters); \ 49 _EHM_EXCEPTION_STRUCT(exception_name, forall assertions, parameters) 47 50 48 // FORALL_TRIVIAL_INSTANCE(exception_name, (assertions...), (parameters...)) 49 // Create the forall trivial exception. The assertion list and parameters must match. 50 // There must be exactly one use of this in a program for each exception type. This just 51 // generates the polymorphic framework, see POLY_VTABLE_INSTANCE to allow instantiations. 52 #define FORALL_TRIVIAL_INSTANCE(exception_name, assertions, parameters) \ 53 _FORALL_CTOR0_INSTANCE(exception_name, assertions, parameters) 51 // EHM_FORALL_EXTERN_VTABLE(exception_name, (arguments), table_name); 52 // As EHM_EXTERN_VTABLE but for polymorphic types instead of monomorphic ones. 53 // Arguments should be the parentisized list of polymorphic arguments. 54 #define EHM_FORALL_EXTERN_VTABLE(exception_name, arguments, table_name) \ 55 _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) 54 56 55 // DATA_EXCEPTION(exception_name)(fields...); 56 // Forward declare an exception that adds fields but no features. The added fields go in the 57 // second argument list. The virtual table instance must be provided later (see VTABLE_INSTANCE). 58 #define DATA_EXCEPTION(...) _EXC_DISPATCH(_DATA_EXCEPTION, __VA_ARGS__) 57 // EHM_FORALL_VIRTUAL_TABLE(exception_name, (arguments), table_name); 58 // As EHM_VIRTUAL_TABLE but for polymorphic types instead of monomorphic ones. 59 // Arguments should be the parentisized list of polymorphic arguments. 60 #define EHM_FORALL_VIRTUAL_TABLE(exception_name, arguments, table_name) \ 61 _EHM_TYPE_ID_VALUE(exception_name, arguments); \ 62 _EHM_DEFINE_COPY(exception_name, arguments) \ 63 _EHM_DEFINE_MSG(exception_name, arguments) \ 64 _EHM_VIRTUAL_TABLE(exception_name, arguments, table_name) 59 65 60 // FORALL_DATA_EXCEPTION(exception_name, (assertions...), (parameters...))(fields...); 61 // Define a polymorphic exception that adds fields but no additional features. The assertion list 62 // and matching parameters must match. Then you can give the list of fields. This should be 63 // visible where ever you use the exception. This just generates the polymorphic framework, see 64 // POLY_VTABLE_DECLARATION to allow instantiations. 65 #define FORALL_DATA_EXCEPTION(exception_name, assertions, parameters) \ 66 _FORALL_DATA_EXCEPTION(exception_name, __cfaehm_base_exception_t, assertions, parameters, ) 66 #define EHM_TYPE_ID(exception_name) _EHM_TYPE_ID_TYPE(exception_name) 67 67 68 // FORALL_DATA_INSTANCE(exception_name, (assertions...), (parameters...)) 69 // Create a polymorphic data exception. The assertion list and parameters must match. This should 70 // appear once in each program. This just generates the polymorphic framework, see 71 // POLY_VTABLE_INSTANCE to allow instantiations. 72 #define FORALL_DATA_INSTANCE(exception_name, assertions, parameters) \ 73 _FORALL_CTOR0_INSTANCE(exception_name, assertions, parameters) 74 75 // VTABLE_DECLARATION(exception_name)([new_features...]); 76 // Declare a virtual table type for an exception with exception_name. You may also add features 77 // (fields on the virtual table) by including them in the second list. 78 #define VTABLE_DECLARATION(...) _EXC_DISPATCH(_VTABLE_DECLARATION, __VA_ARGS__) 79 80 // VTABLE_INSTANCE(exception_name)(msg [, others...]); 81 // Create the instance of the virtual table. There must be exactly one instance of a virtual table 82 // for each exception type. This fills in most of the fields of the virtual table (uses ?=? and 83 // ^?{}) but you must provide the message function and any other fields added in the declaration. 84 #define VTABLE_INSTANCE(...) _EXC_DISPATCH(_VTABLE_INSTANCE, __VA_ARGS__) 85 86 // FORALL_VTABLE_DECLARATION(exception_name, (assertions...), (parameters...))([new_features...]); 87 // Declare a polymorphic virtual table type for an exception with exception_name, the given 88 // assertions and parameters. You may also add features (fields on the virtual table). This just 89 // generates the polymorphic framework, see POLY_VTABLE_DECLARATION to allow instantiations. 90 #define FORALL_VTABLE_DECLARATION(exception_name, assertions, parameters) \ 91 _FORALL_VTABLE_DECLARATION(exception_name, __cfaehm_base_exception_t, assertions, parameters, ) 92 93 // POLY_VTABLE_DECLARATION(exception_name, types...); 94 // Declares that an instantiation for this exception exists for the given types. This should be 95 // visible anywhere you use the instantiation of the exception is used. 96 #define POLY_VTABLE_DECLARATION(exception_name, ...) \ 97 VTABLE_TYPE(exception_name)(__VA_ARGS__) const & get_exception_vtable(exception_name(__VA_ARGS__) *); \ 98 extern VTABLE_TYPE(exception_name)(__VA_ARGS__) VTABLE_NAME(exception_name) 99 100 // POLY_VTABLE_INSTANCE(exception_name, types...)(msg [, others...]); 101 // Creates an instantiation for the given exception for the given types. This should occur only 102 // once in the entire program. You must fill in all features, message and any others given in the 103 // initial declaration. 104 #define POLY_VTABLE_INSTANCE(exception_name, ...) \ 105 _POLY_VTABLE_INSTANCE(exception_name, __cfaehm_base_exception_t, __VA_ARGS__) 106 107 // VTABLE_TYPE(exception_name) | VTABLE_NAME(exception_name) 108 // Get the name of the vtable type or the name of the vtable instance for an exception type. 109 #define VTABLE_TYPE(exception_name) struct _GLUE2(exception_name,_vtable) 110 #define VTABLE_NAME(exception_name) _GLUE3(_,exception_name,_vtable_instance) 111 112 // VTABLE_FIELD(exception_name); 113 // FORALL_VTABLE_FIELD(exception_name, (parameters-or-types)); 114 // The declaration of the virtual table field. Should be the first declaration in a virtual type. 115 #define VTABLE_FIELD(exception_name) VTABLE_TYPE(exception_name) const * virtual_table 116 #define FORALL_VTABLE_FIELD(exception_name, parameters) \ 117 VTABLE_TYPE(exception_name) parameters const * virtual_table 118 119 // VTABLE_INIT(object_reference, exception_name); 120 // Sets a virtual table field on an object to the virtual table instance for the type. 121 #define VTABLE_INIT(this, exception_name) (this).virtual_table = &VTABLE_NAME(exception_name) 122 123 // VTABLE_ASSERTION(exception_name, (parameters...)) 124 // The assertion that there is an instantiation of the vtable for the exception and types. 125 #define VTABLE_ASSERTION(exception_name, parameters) \ 126 { VTABLE_TYPE(exception_name) parameters VTABLE_NAME(exception_name); } 68 #define EHM_MATCH_ALL __cfa__parent_vtable 127 69 128 70 // IS_EXCEPTION(exception_name [, (...parameters)]) … … 135 77 #define IS_TERMINATION_EXCEPTION(...) _IS_EXCEPTION(is_termination_exception, __VA_ARGS__, , ~) 136 78 137 // All internal helper macros begin with an underscore. 138 #define _CLOSE(...) __VA_ARGS__ } 139 #define _GLUE2(left, right) left##right 140 #define _GLUE3(left, middle, right) left##middle##right 141 #define _EXC_DISPATCH(to, ...) to(__VA_ARGS__,__cfaehm_base_exception_t,) 142 #define _UNPACK(...) __VA_ARGS__ 79 // Macros starting with a leading underscore are internal. 143 80 144 #define _TRIVIAL_EXCEPTION_DECLARATION(exception_name, parent_name, ...) \ 145 _VTABLE_DECLARATION(exception_name, parent_name)(); \ 146 struct exception_name { \ 147 VTABLE_FIELD(exception_name); \ 148 }; \ 149 void ?{}(exception_name & this); \ 150 const char * _GLUE2(exception_name,_msg)(exception_name * this) 81 // Create an exception type definition. must be tailing, can be polymorphic. 82 #define _EHM_EXCEPTION_STRUCT(exception_name, forall_clause, parameters) \ 83 forall_clause struct exception_name { \ 84 _EHM_VTABLE_TYPE(exception_name) parameters const * virtual_table; \ 85 _CLOSE 151 86 152 #define _TRIVIAL_EXCEPTION_INSTANCE(exception_name, parent_name, ...) \ 153 void ?{}(exception_name & this) { \ 154 VTABLE_INIT(this, exception_name); \ 155 } \ 156 const char * _GLUE2(exception_name,_msg)(exception_name * this) { \ 157 return #exception_name; \ 158 } \ 159 _VTABLE_INSTANCE(exception_name, parent_name,)(_GLUE2(exception_name,_msg)) 87 // Create a (possibly polymorphic) virtual table forward declaration. 88 #define _EHM_EXTERN_VTABLE(exception_name, arguments, table_name) \ 89 extern const _EHM_VTABLE_TYPE(exception_name) arguments table_name 160 90 161 #define _FORALL_TRIVIAL_EXCEPTION(exception_name, parent_name, assertions, \ 162 parameters, parent_parameters) \ 163 _FORALL_VTABLE_DECLARATION(exception_name, parent_name, assertions, \ 164 parameters, parent_parameters)(); \ 165 forall assertions struct exception_name { \ 166 FORALL_VTABLE_FIELD(exception_name, parameters); \ 167 }; \ 168 _FORALL_CTOR0_DECLARATION(exception_name, assertions, parameters) 169 170 #define _FORALL_CTOR0_DECLARATION(exception_name, assertions, parameters) \ 171 forall(_UNPACK assertions | \ 172 is_exception(exception_name parameters, VTABLE_TYPE(exception_name) parameters)) \ 173 void ?{}(exception_name parameters & this) 174 175 #define _FORALL_CTOR0_INSTANCE(exception_name, assertions, parameters) \ 176 _FORALL_CTOR0_DECLARATION(exception_name, assertions, parameters) { \ 177 (this).virtual_table = &get_exception_vtable(&this); \ 91 // Create a (possibly polymorphic) virtual table definition. 92 #define _EHM_VIRTUAL_TABLE(exception_type, arguments, table_name) \ 93 const _EHM_VTABLE_TYPE(exception_type) arguments table_name @= { \ 94 .__cfavir_typeid : &_EHM_TYPE_ID_NAME(exception_type), \ 95 .size : sizeof(struct exception_type arguments), \ 96 .copy : copy, \ 97 .^?{} : ^?{}, \ 98 .msg : msg, \ 178 99 } 179 100 180 #define _DATA_EXCEPTION(exception_name, parent_name, ...) \ 181 _VTABLE_DECLARATION(exception_name, parent_name)(); \ 182 struct exception_name { \ 183 VTABLE_FIELD(exception_name); \ 184 _CLOSE 101 // Create a (possibly polymorphic) copy function from an assignment operator. 102 #define _EHM_DEFINE_FORALL_COPY(exception_name, forall_clause, parameters) \ 103 forall_clause void copy(exception_name parameters * this, \ 104 exception_name parameters * that) { \ 105 *this = *that; \ 106 } 185 107 186 #define _FORALL_DATA_EXCEPTION(exception_name, parent_name, \ 187 assertions, parameters, parent_parameters) \ 188 _FORALL_VTABLE_DECLARATION(exception_name, parent_name, \ 189 assertions, parameters, parent_parameters)(); \ 190 _FORALL_CTOR0_DECLARATION(exception_name, assertions, parameters); \ 191 forall assertions struct exception_name { \ 192 FORALL_VTABLE_FIELD(exception_name, parameters); \ 193 _CLOSE 108 #define _EHM_DEFINE_COPY(exception_name, arguments) \ 109 void copy(exception_name arguments * this, exception_name arguments * that) { \ 110 *this = *that; \ 111 } 194 112 195 #define _VTABLE_DECLARATION(exception_name, parent_name, ...) \ 196 struct exception_name; \ 197 VTABLE_TYPE(exception_name); \ 198 VTABLE_TYPE(exception_name) const & get_exception_vtable(exception_name *); \ 199 extern VTABLE_TYPE(exception_name) VTABLE_NAME(exception_name); \ 200 VTABLE_TYPE(exception_name) { \ 201 VTABLE_TYPE(parent_name) const * parent; \ 202 size_t size; \ 203 void (*copy)(exception_name * this, exception_name * other); \ 204 void (*^?{})(exception_name & this); \ 205 const char * (*msg)(exception_name * this); \ 206 _CLOSE 113 // Create a (possibly polymorphic) msg function 114 #define _EHM_DEFINE_FORALL_MSG(exception_name, forall_clause, parameters) \ 115 forall_clause const char * msg(exception_name parameters * this) { \ 116 return #exception_name #parameters; \ 117 } 207 118 208 #define _VTABLE_INSTANCE(exception_name, parent_name, ...) \ 209 VTABLE_TYPE(exception_name) const & get_exception_vtable(exception_name *) { \ 210 return VTABLE_NAME(exception_name); \ 211 } \ 212 void _GLUE2(exception_name,_copy)(exception_name * this, exception_name * other) { \ 213 *this = *other; \ 214 } \ 215 VTABLE_TYPE(exception_name) VTABLE_NAME(exception_name) @= { \ 216 &VTABLE_NAME(parent_name), sizeof(exception_name), \ 217 _GLUE2(exception_name,_copy), ^?{}, \ 218 _CLOSE 119 #define _EHM_DEFINE_MSG(exception_name, arguments) \ 120 const char * msg(exception_name arguments * this) { \ 121 return #exception_name #arguments; \ 122 } 219 123 220 #define _FORALL_VTABLE_DECLARATION(exception_name, parent_name, assertions, \ 221 parameters, parent_parameters) \ 222 forall assertions struct exception_name; \ 223 forall assertions VTABLE_TYPE(exception_name) { \ 224 VTABLE_TYPE(parent_name) parent_parameters const * parent; \ 124 // Produces the C compatable name of the virtual table type for a virtual type. 125 #define _EHM_VTABLE_TYPE(type_name) struct _GLUE2(type_name,_vtable) 126 127 // Create the vtable type for exception name. 128 #define _EHM_VIRTUAL_TABLE_STRUCT(exception_name, forall_clause, parameters) \ 129 forall_clause struct exception_name; \ 130 forall_clause _EHM_VTABLE_TYPE(exception_name) { \ 131 _EHM_TYPE_ID_TYPE(exception_name) parameters const * __cfavir_typeid; \ 225 132 size_t size; \ 226 133 void (*copy)(exception_name parameters * this, exception_name parameters * other); \ 227 134 void (*^?{})(exception_name parameters & this); \ 228 135 const char * (*msg)(exception_name parameters * this); \ 229 _CLOSE136 } 230 137 231 #define _POLY_VTABLE_INSTANCE(exception_name, parent_name, ...) \ 232 extern VTABLE_TYPE(exception_name)(__VA_ARGS__) VTABLE_NAME(exception_name); \ 233 VTABLE_TYPE(exception_name)(__VA_ARGS__) const & get_exception_vtable( \ 234 exception_name(__VA_ARGS__) *) { \ 235 return VTABLE_NAME(exception_name); \ 236 } \ 237 void _GLUE2(exception_name,_copy)( \ 238 exception_name(__VA_ARGS__) * this, exception_name(__VA_ARGS__) * other) { \ 239 *this = *other; \ 240 } \ 241 VTABLE_TYPE(exception_name)(__VA_ARGS__) VTABLE_NAME(exception_name) @= { \ 242 &VTABLE_NAME(parent_name), sizeof(exception_name(__VA_ARGS__)), \ 243 _GLUE2(exception_name,_copy), ^?{}, \ 244 _CLOSE 138 // Define the function required to satify the trait for exceptions. 139 #define _EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \ 140 forall_clause inline void mark_exception( \ 141 exception_name parameters const &, \ 142 _EHM_VTABLE_TYPE(exception_name) parameters const &) {} \ 143 144 #define _EHM_TRAIT_FUNCTION2(exception_name, forall_clause, parameters) \ 145 forall_clause _EHM_VTABLE_TYPE(exception_name) parameters const & \ 146 get_exception_vtable(exception_name parameters const & this) 147 148 #define __EHM_TRAIT_FUNCTION(exception_name, forall_clause, parameters) \ 149 forall_clause inline _EHM_VTABLE_TYPE(exception_name) parameters const & \ 150 get_exception_vtable(exception_name parameters const & this) { \ 151 /* This comes before the structure definition, but we know the offset. */ \ 152 /* return (_EHM_VTABLE_TYPE(exception_name) parameters const &)this; */ \ 153 assert(false); \ 154 } 155 156 // Generates a new type-id structure. This is used to mangle the name of the 157 // type-id instance so it also includes polymorphic information. Must be the 158 // direct decendent of exception_t. 159 // The second field is used to recover type information about the exception. 160 #define _EHM_TYPE_ID_STRUCT(exception_name, forall_clause) \ 161 forall_clause _EHM_TYPE_ID_TYPE(exception_name) { \ 162 __cfa__parent_vtable const * parent; \ 163 } 164 165 // Generate a new type-id value. 166 #define _EHM_TYPE_ID_VALUE(exception_name, arguments) \ 167 __attribute__(( section(".gnu.linkonce." "__cfatid_" #exception_name) )) \ 168 _EHM_TYPE_ID_TYPE(exception_name) arguments const \ 169 _EHM_TYPE_ID_NAME(exception_name) = { \ 170 &__cfatid_exception_t, \ 171 } 172 173 // _EHM_TYPE_ID_STRUCT and _EHM_TYPE_ID_VALUE are the two that would need to 174 // be updated to extend the hierarchy if we are still using macros when that 175 // is added. 176 177 // Produce the C compatable name of the type-id type for an exception type. 178 #define _EHM_TYPE_ID_TYPE(exception_name) \ 179 struct _GLUE2(__cfatid_struct_, exception_name) 180 181 // Produce the name of the instance of the type-id for an exception type. 182 #define _EHM_TYPE_ID_NAME(exception_name) _GLUE2(__cfatid_,exception_name) 245 183 246 184 #define _IS_EXCEPTION(kind, exception_name, parameters, ...) \ 247 kind(exception_name parameters, VTABLE_TYPE(exception_name) parameters) 185 kind(exception_name parameters, _EHM_VTABLE_TYPE(exception_name) parameters) 186 187 // Internal helper macros: 188 #define _CLOSE(...) __VA_ARGS__ } 189 #define _GLUE2(left, right) left##right -
libcfa/src/fstream.cfa
r857a1c6 rc8a0210 321 321 322 322 323 EHM_VIRTUAL_TABLE(Open_Failure, Open_Failure_main_table); 323 324 void ?{}( Open_Failure & this, ofstream & ostream ) { 324 VTABLE_INIT(this, Open_Failure);325 this.virtual_table = &Open_Failure_main_table; 325 326 this.ostream = &ostream; 326 327 this.tag = 1; 327 328 } 328 329 void ?{}( Open_Failure & this, ifstream & istream ) { 329 VTABLE_INIT(this, Open_Failure);330 this.virtual_table = &Open_Failure_main_table; 330 331 this.istream = &istream; 331 332 this.tag = 0; 332 333 } 333 const char * Open_Failure_msg(Open_Failure * this) {334 return "Open_Failure";335 }336 VTABLE_INSTANCE(Open_Failure)(Open_Failure_msg);337 334 void throwOpen_Failure( ofstream & ostream ) { 338 335 Open_Failure exc = { ostream }; -
libcfa/src/fstream.hfa
r857a1c6 rc8a0210 133 133 134 134 135 DATA_EXCEPTION(Open_Failure)(135 EHM_EXCEPTION(Open_Failure)( 136 136 union { 137 137 ofstream * ostream; -
libcfa/src/iostream.cfa
r857a1c6 rc8a0210 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Mar 2 14:51:30202113 // Update Count : 1 15112 // Last Modified On : Tue Apr 13 13:05:24 2021 13 // Update Count : 1324 14 14 // 15 15 … … 195 195 int len = snprintf( buf, size, format, ##__VA_ARGS__, val ); \ 196 196 fmt( os, "%s", buf ); \ 197 if ( isfinite( val ) ) { /* if number, always print decimal point */ \197 if ( isfinite( val ) ) { /* if number, print decimal point when no fraction or exponent */ \ 198 198 for ( int i = 0;; i += 1 ) { \ 199 199 if ( i == len ) { fmt( os, "." ); break; } \ 200 if ( buf[i] == '.' ) break;\200 if ( buf[i] == '.' || buf[i] == 'e' || buf[i] == 'E' ) break; /* decimal point or scientific ? */ \ 201 201 } /* for */ \ 202 202 } /* if */ \ … … 525 525 } // distribution 526 526 527 IntegralFMTImpl( signed char, "% *hh ", "% *.*hh " ) 528 IntegralFMTImpl( unsigned char, "% *hh ", "% *.*hh " ) 529 IntegralFMTImpl( signed short int, "% *h ", "% *.*h " ) 530 IntegralFMTImpl( unsigned short int, "% *h ", "% *.*h " ) 531 IntegralFMTImpl( signed int, "% * ", "% *.* " ) 532 IntegralFMTImpl( unsigned int, "% * ", "% *.* " ) 533 IntegralFMTImpl( signed long int, "% *l ", "% *.*l " ) 534 IntegralFMTImpl( unsigned long int, "% *l ", "% *.*l " ) 535 IntegralFMTImpl( signed long long int, "% *ll ", "% *.*ll " ) 536 IntegralFMTImpl( unsigned long long int, "% *ll ", "% *.*ll " ) 537 538 #if 0 539 #if defined( __SIZEOF_INT128__ ) 540 // Default prefix for non-decimal prints is 0b, 0, 0x. 541 #define IntegralFMTImpl128( T, SIGNED, CODE, IFMTNP, IFMTP ) \ 542 forall( ostype & | ostream( ostype ) ) \ 543 static void base10_128( ostype & os, _Ostream_Manip(T) f ) { \ 544 if ( f.val > UINT64_MAX ) { \ 545 unsigned long long int lsig = f.val % P10_UINT64; \ 546 f.val /= P10_UINT64; /* msig */ \ 547 base10_128( os, f ); /* recursion */ \ 548 _Ostream_Manip(unsigned long long int) fmt @= { lsig, 0, 19, 'u', { .all : 0 } }; \ 549 fmt.flags.nobsdp = true; \ 550 /* printf( "fmt1 %c %lld %d\n", fmt.base, fmt.val, fmt.all ); */ \ 551 sepOff( os ); \ 552 (ostype &)(os | fmt); \ 553 } else { \ 554 /* printf( "fmt2 %c %lld %d\n", f.base, (unsigned long long int)f.val, f.all ); */ \ 555 _Ostream_Manip(SIGNED long long int) fmt @= { (SIGNED long long int)f.val, f.wd, f.pc, f.base, { .all : f.all } }; \ 556 (ostype &)(os | fmt); \ 557 } /* if */ \ 558 } /* base10_128 */ \ 559 forall( ostype & | ostream( ostype ) ) { \ 560 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 561 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 562 \ 563 if ( f.base == 'b' | f.base == 'B' | f.base == 'o' | f.base == 'x' | f.base == 'X' ) { \ 564 unsigned long long int msig = (unsigned long long int)(f.val >> 64); \ 565 unsigned long long int lsig = (unsigned long long int)(f.val); \ 566 _Ostream_Manip(SIGNED long long int) fmt @= { msig, f.wd, f.pc, f.base, { .all : f.all } }; \ 567 _Ostream_Manip(unsigned long long int) fmt2 @= { lsig, 0, 0, f.base, { .all : 0 } }; \ 568 if ( msig == 0 ) { \ 569 fmt.val = lsig; \ 570 (ostype &)(os | fmt); \ 571 } else { \ 572 fmt2.flags.pad0 = fmt2.flags.nobsdp = true; \ 573 if ( f.base == 'b' | f.base == 'B' ) { \ 574 if ( fmt.flags.pc && fmt.pc > 64 ) fmt.pc -= 64; else { fmt.flags.pc = false; fmt.pc = 0; } \ 575 if ( fmt.flags.left ) { \ 576 fmt.flags.left = false; \ 577 fmt.wd = 0; \ 578 /* printf( "L %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 579 fmt2.flags.left = true; \ 580 int msigd = high1( msig ); \ 581 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 582 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 2; /* compensate for 0b base specifier */ \ 583 if ( (int)fmt2.wd < 64 ) fmt2.wd = 64; /* cast deals with negative value */ \ 584 fmt2.flags.pc = true; fmt2.pc = 64; \ 585 } else { \ 586 if ( fmt.wd > 64 ) fmt.wd -= 64; \ 587 else fmt.wd = 1; \ 588 /* printf( "R %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 589 fmt2.wd = 64; \ 590 } /* if */ \ 591 /* printf( "C %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 592 (ostype &)(os | fmt | "" | fmt2); \ 593 } else if ( f.base == 'o' ) { \ 594 if ( fmt.flags.pc && fmt.pc > 22 ) fmt.pc -= 22; else { fmt.flags.pc = false; fmt.pc = 0; } \ 595 fmt.val = (unsigned long long int)fmt.val >> 2; \ 596 fmt2.val = ((msig & 0x3) << 1) + ((lsig & 0x8000000000000000U) != 0); \ 597 if ( fmt.flags.left ) { \ 598 fmt.flags.left = false; \ 599 fmt.wd = 0; \ 600 /* printf( "L %llo %llo %llo %d %d '%c' %x %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all, fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 601 (ostype &)(os | fmt | "" | fmt2); \ 602 sepOff( os ); \ 603 fmt2.flags.left = true; \ 604 int msigd = ceiling_div( high1( fmt.val ), 3 ); \ 605 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 606 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 1; /* compensate for 0 base specifier */ \ 607 if ( (int)fmt2.wd < 21 ) fmt2.wd = 21; /* cast deals with negative value */ \ 608 fmt2.flags.pc = true; fmt2.pc = 21; \ 609 } else { \ 610 if ( fmt.wd > 22 ) fmt.wd -= 22; \ 611 else fmt.wd = 1; \ 612 /* printf( "R %llo %llo %llo %d %d '%c' %x %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all, fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 613 (ostype &)(os | fmt | "" | fmt2); \ 614 sepOff( os ); \ 615 fmt2.wd = 21; \ 616 } /* if */ \ 617 fmt2.val = lsig & 0x7fffffffffffffffU; \ 618 /* printf( "\nC %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 619 (ostype &)(os | fmt2); \ 620 } else { /* f.base == 'x' | f.base == 'X' */ \ 621 if ( fmt.flags.pc && fmt.pc > 16 ) fmt.pc -= 16; else { fmt.flags.pc = false; fmt.pc = 0; } \ 622 if ( fmt.flags.left ) { \ 623 fmt.flags.left = false; \ 624 fmt.wd = 0; \ 625 /* printf( "L %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 626 fmt2.flags.left = true; \ 627 int msigd = high1( msig ); \ 628 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 629 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 2; /* compensate for 0x base specifier */ \ 630 if ( (int)fmt2.wd < 16 ) fmt2.wd = 16; /* cast deals with negative value */ \ 631 fmt2.flags.pc = true; fmt2.pc = 16; \ 632 } else { \ 633 if ( fmt.wd > 16 ) fmt.wd -= 16; \ 634 else fmt.wd = 1; \ 635 /* printf( "R %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 636 fmt2.wd = 16; \ 637 } /* if */ \ 638 /* printf( "C %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 639 (ostype &)(os | fmt | "" | fmt2); \ 640 } /* if */ \ 641 } /* if */ \ 642 } else { \ 643 if ( CODE == 'd' ) { \ 644 if ( f.val < 0 ) { fmt( os, "-" ); sepOff( os ); f.val = -f.val; f.flags.sign = false; } \ 645 } /* if */ \ 646 base10_128( os, f ); \ 647 } /* if */ \ 648 return os; \ 649 } /* ?|? */ \ 650 void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); ends( os ); } \ 651 } // distribution 652 653 IntegralFMTImpl128( int128, signed, 'd', "% *ll ", "% *.*ll " ) 654 IntegralFMTImpl128( unsigned int128, unsigned, 'u', "% *ll ", "% *.*ll " ) 655 #endif // __SIZEOF_INT128__ 656 #endif // 0 657 658 #if 1 527 IntegralFMTImpl( signed char, " *hh ", " *.*hh " ) 528 IntegralFMTImpl( unsigned char, " *hh ", " *.*hh " ) 529 IntegralFMTImpl( signed short int, " *h ", " *.*h " ) 530 IntegralFMTImpl( unsigned short int, " *h ", " *.*h " ) 531 IntegralFMTImpl( signed int, " * ", " *.* " ) 532 IntegralFMTImpl( unsigned int, " * ", " *.* " ) 533 IntegralFMTImpl( signed long int, " *l ", " *.*l " ) 534 IntegralFMTImpl( unsigned long int, " *l ", " *.*l " ) 535 IntegralFMTImpl( signed long long int, " *ll ", " *.*ll " ) 536 IntegralFMTImpl( unsigned long long int, " *ll ", " *.*ll " ) 537 538 659 539 #if defined( __SIZEOF_INT128__ ) 660 540 // Default prefix for non-decimal prints is 0b, 0, 0x. … … 746 626 IntegralFMTImpl128( unsigned int128 ) 747 627 #endif // __SIZEOF_INT128__ 748 #endif // 0749 628 750 629 // *********************************** floating point *********************************** 751 630 752 #define PrintWithDP2( os, format, val, ... ) \ 631 static const char *suffixes[] = { 632 "y", "z", "a", "f", "p", "n", "u", "m", "", 633 "K", "M", "G", "T", "P", "E", "Z", "Y" 634 }; 635 #define SUFFIXES_START (-24) /* Smallest power for which there is a suffix defined. */ 636 #define SUFFIXES_END (SUFFIXES_START + (int)((sizeof(suffixes) / sizeof(char *) - 1) * 3)) 637 638 #define PrintWithDP2( os, format, ... ) \ 753 639 { \ 754 enum { size = 48 };\755 char buf[size]; \756 int bufbeg = 0, i, len = snprintf( buf, size, format, ##__VA_ARGS__, val );\757 if ( isfinite( val ) && (f.base != 'g' || f.pc != 0) ) { /* if number, print decimal point*/ \758 for ( i = 0; i < len && buf[i] != '.' && buf[i] != 'e' && buf[i] != 'E'; i += 1 ); /* decimal point or scientific ? */\759 if ( i == len && ! f.flags.nobsdp) { \760 if ( ! f.flags.left ) {\761 buf[i] = '.'; buf[i + 1] = '\0';\762 if ( buf[0] == ' ' ) bufbeg = 1; /* decimal point within width */\763 } else {\764 for ( i = 0; i < len && buf[i] != ' '; i += 1 ); /* trailing blank ? */\765 buf[i] = '.'; \766 if ( i == len ) buf[i + 1] = '\0';\640 if ( ! f.flags.eng ) { \ 641 len = snprintf( buf, size, format, ##__VA_ARGS__ ); \ 642 if ( isfinite( f.val ) && ( f.pc != 0 || ! f.flags.nobsdp ) ) { /* if number, print decimal point when no fraction or exponent */ \ 643 for ( i = 0; i < len && buf[i] != '.' && buf[i] != 'e' && buf[i] != 'E'; i += 1 ); /* decimal point or scientific ? */ \ 644 if ( i == len ) { \ 645 if ( ! f.flags.left ) { \ 646 buf[i] = '.'; buf[i + 1] = '\0'; \ 647 if ( buf[0] == ' ' ) bufbeg = 1; /* decimal point within width */ \ 648 } else { \ 649 for ( i = 0; i < len && buf[i] != ' '; i += 1 ); /* trailing blank ? */ \ 650 buf[i] = '.'; \ 651 if ( i == len ) buf[i + 1] = '\0'; \ 652 } /* if */ \ 767 653 } /* if */ \ 768 654 } /* if */ \ 655 } else { \ 656 int exp10, len2; \ 657 eng( f.val, f.pc, exp10 ); /* changes arguments */ \ 658 if ( ! f.flags.left && f.wd > 1 ) { \ 659 /* Exponent size (number of digits, 'e', optional minus sign) */ \ 660 f.wd -= lrint( floor( log10( abs( exp10 ) ) ) ) + 1 + 1 + (exp10 < 0 ? 1 : 0); \ 661 if ( f.wd < 1 ) f.wd = 1; \ 662 } /* if */ \ 663 len = snprintf( buf, size, format, ##__VA_ARGS__ ); \ 664 if ( f.flags.left ) { \ 665 for ( len -= 1; len > 0 && buf[len] == ' '; len -= 1 ); \ 666 len += 1; \ 667 } /* if */ \ 668 if ( ! f.flags.nobsdp || (exp10 < SUFFIXES_START) || (exp10 > SUFFIXES_END) ) { \ 669 len2 = snprintf( &buf[len], size - len, "e%d", exp10 ); \ 670 } else { \ 671 len2 = snprintf( &buf[len], size - len, "%s", suffixes[(exp10 - SUFFIXES_START) / 3] ); \ 672 } /* if */ \ 673 if ( f.flags.left && len + len2 < f.wd ) buf[len + len2] = ' '; \ 769 674 } /* if */ \ 770 675 fmt( os, "%s", &buf[bufbeg] ); \ … … 773 678 #define FloatingPointFMTImpl( T, DFMTNP, DFMTP ) \ 774 679 forall( ostype & | ostream( ostype ) ) { \ 680 static void eng( T &value, int & pc, int & exp10 ) { \ 681 exp10 = lrint( floor( log10( abs( value ) ) ) ); /* round to desired precision */ \ 682 if ( exp10 < 0 ) exp10 -= 2; \ 683 exp10 = floor( exp10, 3 ); \ 684 value *= pow( 10.0, -exp10 ); \ 685 if ( pc <= 3 ) pc = 3; \ 686 } /* eng */ \ 687 \ 775 688 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 689 enum { size = 48 }; \ 690 char buf[size]; \ 691 int bufbeg = 0, i, len; \ 692 \ 776 693 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 777 char fmtstr[sizeof(DFMTP) ];/* sizeof includes '\0' */ \694 char fmtstr[sizeof(DFMTP) + 8]; /* sizeof includes '\0' */ \ 778 695 if ( ! f.flags.pc ) memcpy( &fmtstr, DFMTNP, sizeof(DFMTNP) ); \ 779 696 else memcpy( &fmtstr, DFMTP, sizeof(DFMTP) ); \ … … 789 706 fmtstr[sizeof(DFMTNP)-2] = f.base; /* sizeof includes '\0' */ \ 790 707 /* printf( "%g %d %s\n", f.val, f.wd, &fmtstr[star]); */ \ 791 PrintWithDP2( os, &fmtstr[star], f. val, f.wd) \708 PrintWithDP2( os, &fmtstr[star], f.wd, f.val ) \ 792 709 } else { /* precision */ \ 793 710 fmtstr[sizeof(DFMTP)-2] = f.base; /* sizeof includes '\0' */ \ 794 711 /* printf( "%g %d %d %s\n", f.val, f.wd, f.pc, &fmtstr[star] ); */ \ 795 PrintWithDP2( os, &fmtstr[star], f. val, f.wd, f.pc) \712 PrintWithDP2( os, &fmtstr[star], f.wd, f.pc, f.val ) \ 796 713 } /* if */ \ 797 714 return os; \ … … 801 718 } // distribution 802 719 803 FloatingPointFMTImpl( double, " % * ", "%*.* " )804 FloatingPointFMTImpl( long double, " % *L ", "%*.*L " )720 FloatingPointFMTImpl( double, " * ", " *.* " ) 721 FloatingPointFMTImpl( long double, " *L ", " *.*L " ) 805 722 806 723 // *********************************** character *********************************** -
libcfa/src/iostream.hfa
r857a1c6 rc8a0210 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Mar 2 14:05:08202113 // Update Count : 3 6912 // Last Modified On : Tue Apr 13 13:05:11 2021 13 // Update Count : 384 14 14 // 15 15 … … 158 158 struct _Ostream_Manip { 159 159 T val; // polymorphic base-type 160 unsigned int wd, pc;// width, precision160 int wd, pc; // width, precision 161 161 char base; // numeric base / floating-point style 162 162 union { 163 163 unsigned char all; 164 164 struct { 165 unsigned char eng:1; // engineering notation 165 166 unsigned char neg:1; // val is negative 166 167 unsigned char pc:1; // precision specified … … 222 223 _Ostream_Manip(T) hex( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'a', { .all : 0 } }; } \ 223 224 _Ostream_Manip(T) sci( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'e', { .all : 0 } }; } \ 224 _Ostream_Manip(T) wd( unsigned int w, T val ) { return (_Ostream_Manip(T))@{ val, w, 0, 'f', { .all : 0 } }; } \ 225 _Ostream_Manip(T) eng( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'g', { .flags.eng : true } }; } \ 226 _Ostream_Manip(T) wd( unsigned int w, T val ) { return (_Ostream_Manip(T))@{ val, w, 0, 'g', { .all : 0 } }; } \ 225 227 _Ostream_Manip(T) wd( unsigned int w, unsigned char pc, T val ) { return (_Ostream_Manip(T))@{ val, w, pc, 'f', { .flags.pc : true } }; } \ 226 228 _Ostream_Manip(T) ws( unsigned int w, unsigned char pc, T val ) { return (_Ostream_Manip(T))@{ val, w, pc, 'g', { .flags.pc : true } }; } \ 227 _Ostream_Manip(T) & wd( unsigned int w, _Ostream_Manip(T) & fmt ) { fmt.wd = w; return fmt; } \ 228 _Ostream_Manip(T) & wd( unsigned int w, unsigned char pc, _Ostream_Manip(T) & fmt ) { fmt.wd = w; fmt.pc = pc; fmt.flags.pc = true; return fmt; } \ 229 _Ostream_Manip(T) & wd( unsigned int w, _Ostream_Manip(T) & fmt ) { if ( fmt.flags.eng ) fmt.base = 'f'; fmt.wd = w; return fmt; } \ 230 _Ostream_Manip(T) & wd( unsigned int w, unsigned char pc, _Ostream_Manip(T) & fmt ) { if ( fmt.flags.eng ) fmt.base = 'f'; fmt.wd = w; fmt.pc = pc; fmt.flags.pc = true; return fmt; } \ 231 _Ostream_Manip(T) & ws( unsigned int w, unsigned char pc, _Ostream_Manip(T) & fmt ) { fmt.wd = w; fmt.pc = pc; fmt.flags.pc = true; return fmt; } \ 229 232 _Ostream_Manip(T) & left( _Ostream_Manip(T) & fmt ) { fmt.flags.left = true; return fmt; } \ 230 233 _Ostream_Manip(T) upcase( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'G', { .all : 0 } }; } \ … … 235 238 _Ostream_Manip(T) nodp( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'g', { .flags.nobsdp : true } }; } \ 236 239 _Ostream_Manip(T) & nodp( _Ostream_Manip(T) & fmt ) { fmt.flags.nobsdp = true; return fmt; } \ 240 _Ostream_Manip(T) unit( T val ) { return (_Ostream_Manip(T))@{ val, 1, 0, 'g', { .flags.nobsdp : true } }; } \ 241 _Ostream_Manip(T) & unit( _Ostream_Manip(T) & fmt ) { fmt.flags.nobsdp = true; return fmt; } \ 237 242 } /* distribution */ \ 238 243 forall( ostype & | ostream( ostype ) ) { \ -
libcfa/src/math.hfa
r857a1c6 rc8a0210 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // math --7 // math.hfa -- 8 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Mon Apr 18 23:37:04 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Aug 24 08:56:20 202013 // Update Count : 1 2612 // Last Modified On : Thu Apr 15 11:47:56 2021 13 // Update Count : 132 14 14 // 15 15 … … 100 100 long double _Complex log( long double _Complex x ) { return clogl( x ); } 101 101 102 // O(1) polymorphic integer log2, using clz, which returns the number of leading 0-bits, starting at the most 103 // significant bit (single instruction on x86) 104 int log2( unsigned int n ) { return n == 0 ? -1 : sizeof(n) * __CHAR_BIT__ - 1 - __builtin_clz( n ); } 105 long int log2( unsigned long int n ) { return n == 0 ? -1 : sizeof(n) * __CHAR_BIT__ - 1 - __builtin_clzl( n ); } 106 long long int log2( unsigned long long int n ) { return n == 0 ? -1 : sizeof(n) * __CHAR_BIT__ - 1 - __builtin_clzll( n ); } 102 107 float log2( float x ) { return log2f( x ); } 103 108 // extern "C" { double log2( double ); } -
libcfa/src/time.hfa
r857a1c6 rc8a0210 10 10 // Created On : Wed Mar 14 23:18:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 17 16:13:00 202013 // Update Count : 66 312 // Last Modified On : Wed Apr 14 09:30:30 2021 13 // Update Count : 664 14 14 // 15 15 … … 29 29 static inline { 30 30 Duration ?=?( Duration & dur, __attribute__((unused)) zero_t ) { return dur{ 0 }; } 31 32 void ?{}( Duration & dur, timeval t ) with( dur ) { tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; } 33 Duration ?=?( Duration & dur, timeval t ) with( dur ) { 34 tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * (TIMEGRAN / 1_000_000LL); 35 return dur; 36 } // ?=? 37 38 void ?{}( Duration & dur, timespec t ) with( dur ) { tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; } 39 Duration ?=?( Duration & dur, timespec t ) with( dur ) { 40 tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; 41 return dur; 42 } // ?=? 31 43 32 44 Duration +?( Duration rhs ) with( rhs ) { return (Duration)@{ +tn }; } -
libcfa/src/virtual.c
r857a1c6 rc8a0210 15 15 16 16 #include "virtual.h" 17 #include "assert.h" 17 18 18 19 int __cfa__is_parent( struct __cfa__parent_vtable const * parent, 19 20 struct __cfa__parent_vtable const * child ) { 21 assert( child ); 20 22 do { 21 23 if ( parent == child ) … … 28 30 void * __cfa__virtual_cast( struct __cfa__parent_vtable const * parent, 29 31 struct __cfa__parent_vtable const * const * child ) { 32 assert( child ); 30 33 return (__cfa__is_parent(parent, *child)) ? (void *)child : (void *)0; 31 34 } -
src/AST/Expr.cpp
r857a1c6 rc8a0210 260 260 } 261 261 262 ConstantExpr * ConstantExpr::from_string( const CodeLocation & loc, const std::string & str ) { 263 const Type * charType = new BasicType( BasicType::Char ); 264 // Adjust the length of the string for the terminator. 265 const Expr * strSize = from_ulong( loc, str.size() + 1 ); 266 const Type * strType = new ArrayType( charType, strSize, FixedLen, StaticDim ); 267 const std::string strValue = "\"" + str + "\""; 268 return new ConstantExpr( loc, strType, strValue, std::nullopt ); 269 } 270 262 271 ConstantExpr * ConstantExpr::null( const CodeLocation & loc, const Type * ptrType ) { 263 272 return new ConstantExpr{ -
src/AST/Expr.hpp
r857a1c6 rc8a0210 438 438 long long int intValue() const; 439 439 440 /// generates a boolean constant of the given bool440 /// Generates a boolean constant of the given bool. 441 441 static ConstantExpr * from_bool( const CodeLocation & loc, bool b ); 442 /// generates an integer constant of the given int442 /// Generates an integer constant of the given int. 443 443 static ConstantExpr * from_int( const CodeLocation & loc, int i ); 444 /// generates an integer constant of the given unsigned long int444 /// Generates an integer constant of the given unsigned long int. 445 445 static ConstantExpr * from_ulong( const CodeLocation & loc, unsigned long i ); 446 /// generates a null pointer value for the given type. void * if omitted. 446 /// Generates a string constant from the given string (char type, unquoted string). 447 static ConstantExpr * from_string( const CodeLocation & loc, const std::string & string ); 448 /// Generates a null pointer value for the given type. void * if omitted. 447 449 static ConstantExpr * null( const CodeLocation & loc, const Type * ptrType = nullptr ); 448 450 -
src/Concurrency/Keywords.cc
r857a1c6 rc8a0210 42 42 43 43 namespace Concurrency { 44 inline static std::string getTypeIdName( std::string const & exception_name ) { 45 return exception_name.empty() ? std::string() : Virtual::typeIdType( exception_name ); 46 } 44 47 inline static std::string getVTableName( std::string const & exception_name ) { 45 return exception_name.empty() ? std::string() : Virtual::vtableTypeName( exception_name);48 return exception_name.empty() ? std::string() : Virtual::vtableTypeName( exception_name ); 46 49 } 47 50 … … 75 78 type_name( type_name ), field_name( field_name ), getter_name( getter_name ), 76 79 context_error( context_error ), exception_name( exception_name ), 80 typeid_name( getTypeIdName( exception_name ) ), 77 81 vtable_name( getVTableName( exception_name ) ), 78 82 needs_main( needs_main ), cast_target( cast_target ) {} … … 84 88 85 89 void handle( StructDecl * ); 90 void addTypeId( StructDecl * ); 86 91 void addVtableForward( StructDecl * ); 87 92 FunctionDecl * forwardDeclare( StructDecl * ); … … 99 104 const std::string context_error; 100 105 const std::string exception_name; 106 const std::string typeid_name; 101 107 const std::string vtable_name; 102 108 bool needs_main; … … 106 112 FunctionDecl * dtor_decl = nullptr; 107 113 StructDecl * except_decl = nullptr; 114 StructDecl * typeid_decl = nullptr; 108 115 StructDecl * vtable_decl = nullptr; 109 116 }; … … 392 399 else if ( !except_decl && exception_name == decl->name && decl->body ) { 393 400 except_decl = decl; 401 } 402 else if ( !typeid_decl && typeid_name == decl->name && decl->body ) { 403 typeid_decl = decl; 394 404 } 395 405 else if ( !vtable_decl && vtable_name == decl->name && decl->body ) { … … 448 458 if( !dtor_decl ) SemanticError( decl, context_error ); 449 459 450 addVtableForward( decl ); 460 if ( !exception_name.empty() ) { 461 if( !typeid_decl ) SemanticError( decl, context_error ); 462 if( !vtable_decl ) SemanticError( decl, context_error ); 463 464 addTypeId( decl ); 465 addVtableForward( decl ); 466 } 451 467 FunctionDecl * func = forwardDeclare( decl ); 452 468 ObjectDecl * field = addField( decl ); … … 454 470 } 455 471 472 void ConcurrentSueKeyword::addTypeId( StructDecl * decl ) { 473 assert( typeid_decl ); 474 StructInstType typeid_type( Type::Const, typeid_decl ); 475 typeid_type.parameters.push_back( new TypeExpr( 476 new StructInstType( noQualifiers, decl ) 477 ) ); 478 declsToAddBefore.push_back( Virtual::makeTypeIdInstance( &typeid_type ) ); 479 } 480 456 481 void ConcurrentSueKeyword::addVtableForward( StructDecl * decl ) { 457 if ( vtable_decl ) { 458 std::list< Expression * > poly_args = { 459 new TypeExpr( new StructInstType( noQualifiers, decl ) ), 460 }; 461 declsToAddBefore.push_back( Virtual::makeGetExceptionForward( 462 vtable_decl->makeInst( poly_args ), 463 except_decl->makeInst( poly_args ) 464 ) ); 465 declsToAddBefore.push_back( Virtual::makeVtableForward( 466 vtable_decl->makeInst( move( poly_args ) ) ) ); 467 // Its only an error if we want a vtable and don't have one. 468 } else if ( ! vtable_name.empty() ) { 469 SemanticError( decl, context_error ); 470 } 482 assert( vtable_decl ); 483 std::list< Expression * > poly_args = { 484 new TypeExpr( new StructInstType( noQualifiers, decl ) ), 485 }; 486 declsToAddBefore.push_back( Virtual::makeGetExceptionForward( 487 vtable_decl->makeInst( poly_args ), 488 except_decl->makeInst( poly_args ) 489 ) ); 490 declsToAddBefore.push_back( Virtual::makeVtableForward( 491 vtable_decl->makeInst( move( poly_args ) ) ) ); 471 492 } 472 493 -
src/Parser/parser.yy
r857a1c6 rc8a0210 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Apr 1 14:43:24 202113 // Update Count : 49 7812 // Last Modified On : Wed Apr 14 18:13:44 2021 13 // Update Count : 4983 14 14 // 15 15 … … 281 281 %token ATTRIBUTE EXTENSION // GCC 282 282 %token IF ELSE SWITCH CASE DEFAULT DO WHILE FOR BREAK CONTINUE GOTO RETURN 283 %token CHOOSE DISABLE ENABLE FALLTHRU FALLTHROUGH TRY THROW THROWRESUME AT WITH WHEN WAITFOR // CFA 283 %token CHOOSE FALLTHRU FALLTHROUGH WITH WHEN WAITFOR // CFA 284 %token DISABLE ENABLE TRY THROW THROWRESUME AT // CFA 284 285 %token ASM // C99, extension ISO/IEC 9899:1999 Section J.5.10(1) 285 286 %token ALIGNAS ALIGNOF GENERIC STATICASSERT // C11 -
src/SynTree/Constant.cc
r857a1c6 rc8a0210 42 42 } 43 43 44 Constant Constant::from_string( const std::string & str ) { 45 Type * charType = new BasicType( noQualifiers, BasicType::Char ); 46 // Adjust the length of the string for the terminator. 47 Expression * strSize = new ConstantExpr( Constant::from_ulong( str.size() + 1 ) ); 48 Type * strType = new ArrayType( noQualifiers, charType, strSize, false, false ); 49 const std::string strValue = "\"" + str + "\""; 50 return Constant( strType, strValue, std::nullopt ); 51 } 52 44 53 Constant Constant::null( Type * ptrtype ) { 45 54 if ( nullptr == ptrtype ) { -
src/SynTree/Constant.h
r857a1c6 rc8a0210 47 47 /// generates an integer constant of the given unsigned long int 48 48 static Constant from_ulong( unsigned long i ); 49 /// generates a string constant from the given string (char type, unquoted string) 50 static Constant from_string( const std::string & string ); 49 51 50 52 /// generates a null pointer value for the given type. void * if omitted. -
src/Virtual/ExpandCasts.cc
r857a1c6 rc8a0210 32 32 namespace Virtual { 33 33 34 static bool is_prefix( const std::string & prefix, const std::string& entire ) { 35 size_t const p_size = prefix.size(); 36 return (p_size < entire.size() && prefix == entire.substr(0, p_size)); 37 } 38 39 static bool is_type_id_object( const ObjectDecl * objectDecl ) { 40 const std::string & objectName = objectDecl->name; 41 return is_prefix( "__cfatid_", objectName ); 42 } 43 34 44 // Indented until the new ast code gets added. 35 45 … … 66 76 }; 67 77 68 /* Currently virtual depends on the rather brittle name matching between69 * a (strict/explicate) virtual type, its vtable type and the vtable70 * instance.71 * A stronger implementation, would probably keep track of those triads72 * and use that information to create better error messages.73 */74 75 namespace {76 77 std::string get_vtable_name( std::string const & name ) {78 return name + "_vtable";79 }80 81 std::string get_vtable_inst_name( std::string const & name ) {82 return std::string("_") + get_vtable_name( name ) + "_instance";83 }84 85 std::string get_vtable_name_root( std::string const & name ) {86 return name.substr(0, name.size() - 7 );87 }88 89 std::string get_vtable_inst_name_root( std::string const & name ) {90 return get_vtable_name_root( name.substr(1, name.size() - 10 ) );91 }92 93 bool is_vtable_inst_name( std::string const & name ) {94 return 17 < name.size() &&95 name == get_vtable_inst_name( get_vtable_inst_name_root( name ) );96 }97 98 } // namespace99 100 78 class VirtualCastCore { 101 Type * pointer_to_pvt(int level_of_indirection) {79 CastExpr * cast_to_type_id( Expression * expr, int level_of_indirection ) { 102 80 Type * type = new StructInstType( 103 81 Type::Qualifiers( Type::Const ), pvt_decl ); … … 105 83 type = new PointerType( noQualifiers, type ); 106 84 } 107 return type;85 return new CastExpr( expr, type ); 108 86 } 109 87 … … 141 119 142 120 void VirtualCastCore::premutate( ObjectDecl * objectDecl ) { 143 if ( is_vtable_inst_name( objectDecl->get_name() ) ) { 144 if ( ObjectDecl * existing = indexer.insert( objectDecl ) ) { 145 std::string msg = "Repeated instance of virtual table, original found at: "; 146 msg += existing->location.filename; 147 msg += ":" + toString( existing->location.first_line ); 148 SemanticError( objectDecl->location, msg ); 149 } 121 if ( is_type_id_object( objectDecl ) ) { 122 // Multiple definitions should be fine because of linkonce. 123 indexer.insert( objectDecl ); 150 124 } 151 125 } … … 170 144 } 171 145 172 /// Get the virtual table type used in a virtual cast. 173 Type * getVirtualTableType( const VirtualCastExpr * castExpr ) { 174 const Type * objectType; 175 if ( auto target = dynamic_cast<const PointerType *>( castExpr->result ) ) { 176 objectType = target->base; 177 } else if ( auto target = dynamic_cast<const ReferenceType *>( castExpr->result ) ) { 178 objectType = target->base; 146 /// Get the base type from a pointer or reference. 147 const Type * getBaseType( const Type * type ) { 148 if ( auto target = dynamic_cast<const PointerType *>( type ) ) { 149 return target->base; 150 } else if ( auto target = dynamic_cast<const ReferenceType *>( type ) ) { 151 return target->base; 179 152 } else { 180 castError( castExpr, "Virtual cast type must be a pointer or reference type." ); 181 } 182 assert( objectType ); 183 184 const StructInstType * structType = dynamic_cast<const StructInstType *>( objectType ); 185 if ( nullptr == structType ) { 186 castError( castExpr, "Virtual cast type must refer to a structure type." ); 187 } 188 const StructDecl * structDecl = structType->baseStruct; 189 assert( structDecl ); 190 191 const ObjectDecl * fieldDecl = nullptr; 192 if ( 0 < structDecl->members.size() ) { 193 const Declaration * memberDecl = structDecl->members.front(); 153 return nullptr; 154 } 155 } 156 157 /* Attempt to follow the "head" field of the structure to get the... 158 * Returns nullptr on error, otherwise owner must free returned node. 159 */ 160 StructInstType * followHeadPointerType( 161 const StructInstType * oldType, 162 const std::string& fieldName, 163 const CodeLocation& errorLocation ) { 164 165 // First section of the function is all about trying to fill this variable in. 166 StructInstType * newType = nullptr; 167 { 168 const StructDecl * oldDecl = oldType->baseStruct; 169 assert( oldDecl ); 170 171 // Helper function for throwing semantic errors. 172 auto throwError = [&fieldName, &errorLocation, &oldDecl](const std::string& message) { 173 const std::string& context = "While following head pointer of " + 174 oldDecl->name + " named '" + fieldName + "': "; 175 SemanticError( errorLocation, context + message ); 176 }; 177 178 if ( oldDecl->members.empty() ) { 179 throwError( "Type has no fields." ); 180 } 181 const Declaration * memberDecl = oldDecl->members.front(); 194 182 assert( memberDecl ); 195 fieldDecl = dynamic_cast<const ObjectDecl *>( memberDecl ); 196 if ( fieldDecl && fieldDecl->name != "virtual_table" ) { 197 fieldDecl = nullptr; 198 } 199 } 200 if ( nullptr == fieldDecl ) { 201 castError( castExpr, "Virtual cast type must have a leading virtual_table field." ); 202 } 203 const PointerType * fieldType = dynamic_cast<const PointerType *>( fieldDecl->type ); 204 if ( nullptr == fieldType ) { 205 castError( castExpr, "Virtual cast type virtual_table field is not a pointer." ); 206 } 207 assert( fieldType->base ); 208 auto virtualStructType = dynamic_cast<const StructInstType *>( fieldType->base ); 209 assert( virtualStructType ); 210 211 // Here is the type, but if it is polymorphic it will have lost information. 212 // (Always a clone so that it may always be deleted.) 213 StructInstType * virtualType = virtualStructType->clone(); 214 if ( ! structType->parameters.empty() ) { 215 deleteAll( virtualType->parameters ); 216 virtualType->parameters.clear(); 217 cloneAll( structType->parameters, virtualType->parameters ); 218 } 219 return virtualType; 183 const ObjectDecl * fieldDecl = dynamic_cast<const ObjectDecl *>( memberDecl ); 184 assert( fieldDecl ); 185 if ( fieldName != fieldDecl->name ) { 186 throwError( "Head field did not have expected name." ); 187 } 188 189 const Type * fieldType = fieldDecl->type; 190 if ( nullptr == fieldType ) { 191 throwError( "Could not get head field." ); 192 } 193 const PointerType * ptrType = dynamic_cast<const PointerType *>( fieldType ); 194 if ( nullptr == ptrType ) { 195 throwError( "First field is not a pointer type." ); 196 } 197 assert( ptrType->base ); 198 newType = dynamic_cast<StructInstType *>( ptrType->base ); 199 if ( nullptr == newType ) { 200 throwError( "First field does not point to a structure type." ); 201 } 202 } 203 204 // Now we can look into copying it. 205 newType = newType->clone(); 206 if ( ! oldType->parameters.empty() ) { 207 deleteAll( newType->parameters ); 208 newType->parameters.clear(); 209 cloneAll( oldType->parameters, newType->parameters ); 210 } 211 return newType; 212 } 213 214 /// Get the type-id type from a virtual type. 215 StructInstType * getTypeIdType( const Type * type, const CodeLocation& errorLocation ) { 216 const StructInstType * typeInst = dynamic_cast<const StructInstType *>( type ); 217 if ( nullptr == typeInst ) { 218 return nullptr; 219 } 220 StructInstType * tableInst = 221 followHeadPointerType( typeInst, "virtual_table", errorLocation ); 222 if ( nullptr == tableInst ) { 223 return nullptr; 224 } 225 StructInstType * typeIdInst = 226 followHeadPointerType( tableInst, "__cfavir_typeid", errorLocation ); 227 delete tableInst; 228 return typeIdInst; 220 229 } 221 230 … … 228 237 assert( pvt_decl ); 229 238 230 const Type * vtable_type = getVirtualTableType( castExpr ); 231 ObjectDecl * table = indexer.lookup( vtable_type ); 232 if ( nullptr == table ) { 233 SemanticError( castLocation( castExpr ), 234 "Could not find virtual table instance." ); 239 const Type * base_type = getBaseType( castExpr->result ); 240 if ( nullptr == base_type ) { 241 castError( castExpr, "Virtual cast target must be a pointer or reference type." ); 242 } 243 const Type * type_id_type = getTypeIdType( base_type, castLocation( castExpr ) ); 244 if ( nullptr == type_id_type ) { 245 castError( castExpr, "Ill formed virtual cast target type." ); 246 } 247 ObjectDecl * type_id = indexer.lookup( type_id_type ); 248 delete type_id_type; 249 if ( nullptr == type_id ) { 250 castError( castExpr, "Virtual cast does not target a virtual type." ); 235 251 } 236 252 237 253 Expression * result = new CastExpr( 238 254 new ApplicationExpr( VariableExpr::functionPointer( vcast_decl ), { 239 new CastExpr( 240 new AddressExpr( new VariableExpr( table ) ), 241 pointer_to_pvt(1) 242 ), 243 new CastExpr( 244 castExpr->get_arg(), 245 pointer_to_pvt(2) 246 ) 255 cast_to_type_id( new AddressExpr( new VariableExpr( type_id ) ), 1 ), 256 cast_to_type_id( castExpr->get_arg(), 2 ), 247 257 } ), 248 258 castExpr->get_result()->clone() … … 252 262 castExpr->set_result( nullptr ); 253 263 delete castExpr; 254 delete vtable_type;255 264 return result; 256 265 } -
src/Virtual/Tables.cc
r857a1c6 rc8a0210 10 10 // Created On : Mon Aug 31 11:11:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : T ue Sep 3 14:56:00 202013 // Update Count : 012 // Last Modified On : Thr Apr 8 15:51:00 2021 13 // Update Count : 1 14 14 // 15 15 … … 22 22 namespace Virtual { 23 23 24 std::string typeIdType( std::string const & type_name ) { 25 return "__cfatid_struct_" + type_name; 26 } 27 28 std::string typeIdName( std::string const & type_name ) { 29 return "__cfatid_" + type_name; 30 } 31 32 static std::string typeIdTypeToInstance( std::string const & type_name ) { 33 return typeIdName(type_name.substr(16)); 34 } 35 24 36 std::string vtableTypeName( std::string const & name ) { 25 37 return name + "_vtable"; 38 } 39 40 std::string baseTypeName( std::string const & vtable_type_name ) { 41 return vtable_type_name.substr(0, vtable_type_name.size() - 7); 26 42 } 27 43 … … 81 97 inits.push_back( 82 98 new SingleInit( new AddressExpr( new NameExpr( parentInstance ) ) ) ); 99 } else if ( std::string( "__cfavir_typeid" ) == field->name ) { 100 std::string const & baseType = baseTypeName( vtableType->name ); 101 std::string const & typeId = typeIdName( baseType ); 102 inits.push_back( new SingleInit( new AddressExpr( new NameExpr( typeId ) ) ) ); 83 103 } else if ( std::string( "size" ) == field->name ) { 84 104 inits.push_back( new SingleInit( new SizeofExpr( objectType->clone() ) ) ); … … 147 167 } 148 168 169 ObjectDecl * makeTypeIdForward() { 170 return nullptr; 149 171 } 172 173 Attribute * linkonce( const std::string & subsection ) { 174 const std::string section = ".gnu.linkonce." + subsection; 175 return new Attribute( "section", { 176 new ConstantExpr( Constant::from_string( section ) ), 177 } ); 178 } 179 180 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ) { 181 assert( typeIdType ); 182 StructInstType * type = typeIdType->clone(); 183 type->tq.is_const = true; 184 std::string const & typeid_name = typeIdTypeToInstance( typeIdType->name ); 185 return new ObjectDecl( 186 typeid_name, 187 noStorageClasses, 188 LinkageSpec::Cforall, 189 /* bitfieldWidth */ nullptr, 190 type, 191 new ListInit( { new SingleInit( 192 new AddressExpr( new NameExpr( "__cfatid_exception_t" ) ) 193 ) } ), 194 { linkonce( typeid_name ) }, 195 noFuncSpecifiers 196 ); 197 } 198 199 } -
src/Virtual/Tables.h
r857a1c6 rc8a0210 10 10 // Created On : Mon Aug 31 11:07:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : T ue Sep 1 14:29:00 202013 // Update Count : 012 // Last Modified On : Thr Apr 8 15:55:00 2021 13 // Update Count : 1 14 14 // 15 15 … … 22 22 namespace Virtual { 23 23 24 std::string typeIdType( std::string const & type_name ); 25 std::string typeIdName( std::string const & type_name ); 24 26 std::string vtableTypeName( std::string const & type_name ); 25 27 std::string instanceName( std::string const & vtable_name ); … … 50 52 */ 51 53 54 ObjectDecl * makeTypeIdInstance( StructInstType const * typeIdType ); 55 /* Build an instance of the type-id from the type of the type-id. 56 * TODO: Should take the parent type. Currently locked to the exception_t. 57 */ 58 52 59 } -
tests/.expect/KRfunctions.nast.arm64.txt
r857a1c6 rc8a0210 104 104 signed int _X1bi_2; 105 105 { 106 signed int *(*_tmp_cp_ret 4)(signed int __param_0, signed int __param_1);107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret 4=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret4)));106 signed int *(*_tmp_cp_ret6)(signed int __param_0, signed int __param_1); 107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret6=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret6))); 108 108 } 109 109 -
tests/.expect/KRfunctions.nast.x64.txt
r857a1c6 rc8a0210 104 104 signed int _X1bi_2; 105 105 { 106 signed int *(*_tmp_cp_ret 4)(signed int __param_0, signed int __param_1);107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret 4=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret4)));106 signed int *(*_tmp_cp_ret6)(signed int __param_0, signed int __param_1); 107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret6=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret6))); 108 108 } 109 109 -
tests/.expect/KRfunctions.nast.x86.txt
r857a1c6 rc8a0210 104 104 signed int _X1bi_2; 105 105 { 106 signed int *(*_tmp_cp_ret 4)(signed int __param_0, signed int __param_1);107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret 4=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret4)));106 signed int *(*_tmp_cp_ret6)(signed int __param_0, signed int __param_1); 107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret6=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret6))); 108 108 } 109 109 -
tests/.expect/KRfunctions.oast.x64.txt
r857a1c6 rc8a0210 104 104 signed int _X1bi_2; 105 105 { 106 signed int *(*_tmp_cp_ret 4)(signed int _X1xi_1, signed int _X1yi_1);107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret 4=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret4)));106 signed int *(*_tmp_cp_ret6)(signed int _X1xi_1, signed int _X1yi_1); 107 ((void)(_X1xFPi_ii__2=(((void)(_tmp_cp_ret6=_X3f10FFPi_ii__iPiPid__1(3, (&_X1ai_2), (&_X1bi_2), 3.5))) , _tmp_cp_ret6))); 108 108 } 109 109 -
tests/.expect/declarationSpecifier.arm64.txt
r857a1c6 rc8a0210 1147 1147 1148 1148 { 1149 signed int _tmp_cp_ret 4;1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);1149 signed int _tmp_cp_ret6; 1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 1151 1151 } 1152 1152 -
tests/.expect/declarationSpecifier.x64.txt
r857a1c6 rc8a0210 1147 1147 1148 1148 { 1149 signed int _tmp_cp_ret 4;1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);1149 signed int _tmp_cp_ret6; 1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 1151 1151 } 1152 1152 -
tests/.expect/declarationSpecifier.x86.txt
r857a1c6 rc8a0210 1147 1147 1148 1148 { 1149 signed int _tmp_cp_ret 4;1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);1149 signed int _tmp_cp_ret6; 1150 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 1151 1151 } 1152 1152 -
tests/.expect/extension.arm64.txt
r857a1c6 rc8a0210 457 457 458 458 { 459 signed int _tmp_cp_ret 4;460 ((void)(((void)(_tmp_cp_ret 4=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret4));459 signed int _tmp_cp_ret6; 460 ((void)(((void)(_tmp_cp_ret6=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret6)); 461 461 } 462 462 -
tests/.expect/extension.x64.txt
r857a1c6 rc8a0210 457 457 458 458 { 459 signed int _tmp_cp_ret 4;460 ((void)(((void)(_tmp_cp_ret 4=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret4));459 signed int _tmp_cp_ret6; 460 ((void)(((void)(_tmp_cp_ret6=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret6)); 461 461 } 462 462 -
tests/.expect/extension.x86.txt
r857a1c6 rc8a0210 457 457 458 458 { 459 signed int _tmp_cp_ret 4;460 ((void)(((void)(_tmp_cp_ret 4=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret4));459 signed int _tmp_cp_ret6; 460 ((void)(((void)(_tmp_cp_ret6=__extension__ _X4fredFi_i__1(3))) , _tmp_cp_ret6)); 461 461 } 462 462 -
tests/.expect/gccExtensions.arm64.txt
r857a1c6 rc8a0210 339 339 340 340 { 341 signed int _tmp_cp_ret 4;342 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);341 signed int _tmp_cp_ret6; 342 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 343 343 } 344 344 -
tests/.expect/gccExtensions.x64.txt
r857a1c6 rc8a0210 339 339 340 340 { 341 signed int _tmp_cp_ret 4;342 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);341 signed int _tmp_cp_ret6; 342 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 343 343 } 344 344 -
tests/.expect/gccExtensions.x86.txt
r857a1c6 rc8a0210 317 317 318 318 { 319 signed int _tmp_cp_ret 4;320 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret 4=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret4)) /* ?{} */);319 signed int _tmp_cp_ret6; 320 ((void)(_X12_retval_maini_1=(((void)(_tmp_cp_ret6=invoke_main(_X4argci_1, _X4argvPPc_1, _X4envpPPc_1))) , _tmp_cp_ret6)) /* ?{} */); 321 321 } 322 322 -
tests/.expect/math.nast.arm64.txt
r857a1c6 rc8a0210 17 17 4 16 18 18 log:0. 0. 0. 0.346574+0.785398i 0.346573590279973+0.785398163397448i 0.346573590279972654708616060729088+0.785398163397448309615660845819876i 19 log2:10 17 23 20 log2:10 17 23 21 log2:10 17 23 22 log2:10. 17. 23. 19 23 log2:3. 3. 3. 20 24 log10:2. 2. 2. -
tests/.expect/math.nast.x64.txt
r857a1c6 rc8a0210 17 17 4 16 18 18 log:0. 0. 0. 0.346574+0.785398i 0.346573590279973+0.785398163397448i 0.346573590279972655+0.78539816339744831i 19 log2:10 17 23 20 log2:10 17 23 21 log2:10 17 23 22 log2:10. 17. 23. 19 23 log2:3. 3. 3. 20 24 log10:2. 2. 2. -
tests/.expect/math.nast.x86.txt
r857a1c6 rc8a0210 17 17 4 16 18 18 log:0. 0. 0. 0.346574+0.785398i 0.346573590279973+0.785398163397448i 0.346573590279972655+0.78539816339744831i 19 log2:10 17 23 20 log2:10 17 23 21 log2:10 17 23 19 22 log2:3. 3. 3. 20 23 log10:2. 2. 2. -
tests/concurrent/futures/multi.cfa
r857a1c6 rc8a0210 5 5 6 6 thread Server { 7 int cnt, iteration;7 int pending, done, iteration; 8 8 multi_future(int) * request; 9 9 }; 10 10 11 11 void ?{}( Server & this ) { 12 this.cnt = 0; 12 ((thread&)this){"Server Thread"}; 13 this.pending = 0; 14 this.done = 0; 13 15 this.iteration = 0; 14 16 this.request = 0p; … … 16 18 17 19 void ^?{}( Server & mutex this ) { 18 assert(this. cnt== 0);19 this.request = 0p;20 assert(this.pending == 0); 21 this.request = 0p; 20 22 } 21 23 … … 24 26 } 25 27 26 void process( Server & mutex this ) { 27 fulfil( *this.request, this.iteration ); 28 this.iteration++; 28 void call( Server & mutex this ) { 29 this.pending++; 29 30 } 30 31 31 void call( Server & mutex this ) {32 this. cnt++;32 void finish( Server & mutex this ) { 33 this.done++; 33 34 } 34 35 35 void finish( Server & mutex this ) { }36 37 36 void main( Server & this ) { 37 MAIN_LOOP: 38 38 for() { 39 39 waitfor( ^?{} : this ) { 40 40 break; 41 41 } 42 or when( this.cnt < NFUTURES ) waitfor( call: this ) { 43 if (this.cnt == NFUTURES) { 44 process(this); 42 or waitfor( call: this ) { 43 if (this.pending != NFUTURES) { continue MAIN_LOOP; } 44 45 this.pending = 0; 46 fulfil( *this.request, this.iteration ); 47 this.iteration++; 48 49 for(NFUTURES) { 50 waitfor( finish: this ); 45 51 } 46 } 47 or waitfor( finish: this ) { 48 if (this.cnt == NFUTURES) { 49 reset( *this.request ); 50 this.cnt = 0; 51 } 52 53 reset( *this.request ); 54 this.done = 0; 52 55 } 53 56 } … … 57 60 Server * the_server; 58 61 thread Worker {}; 62 void ?{}(Worker & this) { 63 ((thread&)this){"Worker Thread"}; 64 } 65 59 66 multi_future(int) * shared_future; 60 67 -
tests/concurrent/spinaphore.cfa
r857a1c6 rc8a0210 49 49 void main(Unblocker & this) { 50 50 this.sum = 0; 51 unsigned me = (unsigned) &this;51 unsigned me = (unsigned)(uintptr_t)&this; 52 52 for(num_unblocks) { 53 53 $thread * t = V(sem, false); 54 54 Blocker * b = from_thread(t); 55 55 b->sum += me; 56 this.sum += (unsigned) b;56 this.sum += (unsigned)(uintptr_t)b; 57 57 unpark(t); 58 58 yield(random(10)); … … 73 73 for(i;num_blockers) { 74 74 for(num_blocks) 75 usum += (unsigned) &blockers[i];75 usum += (unsigned)(uintptr_t)&blockers[i]; 76 76 } 77 77 78 78 for(i;num_unblockers) { 79 79 for(num_unblocks) 80 bsum += (unsigned) &unblockers[i];80 bsum += (unsigned)(uintptr_t)&unblockers[i]; 81 81 } 82 82 -
tests/errors/.expect/completeType.nast.arm64.txt
r857a1c6 rc8a0210 12 12 Application of 13 13 Variable Expression: *?: forall 14 DT: data type14 instance of type DT (not function type) 15 15 function 16 16 ... with parameters … … 21 21 ... with resolved type: 22 22 pointer to forall 23 [unbound]:data type 23 instance of type [unbound] (not function type) 24 function 25 ... with parameters 26 pointer to instance of type [unbound] (not function type) 27 ... returning 28 reference to instance of type [unbound] (not function type) 29 30 ... to arguments 31 Variable Expression: x: pointer to instance of struct A without body 32 ... with resolved type: 33 pointer to instance of struct A without body 34 35 ... with resolved type: 36 reference to instance of struct A without body 37 ... to: nothing 38 ... with resolved type: 39 void 40 (types: 41 void 42 ) 43 Environment:([unbound]DT) -> instance of struct A without body (no widening) 44 45 46 Cost ( 0, 1, 2, 0, 1, -1, 0 ): Generated Cast of: 47 Application of 48 Variable Expression: *?: forall 49 instance of type DT (not function type) 50 function 51 ... with parameters 52 pointer to instance of type DT (not function type) 53 ... returning 54 reference to instance of type DT (not function type) 55 56 ... with resolved type: 57 pointer to forall 58 instance of type [unbound] (not function type) 24 59 function 25 60 ... with parameters … … 41 76 void 42 77 ) 43 Environment:([unbound]) -> instance of struct B with body (no widening) 44 45 46 Cost ( 0, 1, 2, 0, 1, -1, 0 ): Generated Cast of: 47 Application of 48 Variable Expression: *?: forall 49 DT: data type 50 function 51 ... with parameters 52 pointer to instance of type DT (not function type) 53 ... returning 54 reference to instance of type DT (not function type) 55 56 ... with resolved type: 57 pointer to forall 58 [unbound]:data type 59 function 60 ... with parameters 61 pointer to instance of type [unbound] (not function type) 62 ... returning 63 reference to instance of type [unbound] (not function type) 64 65 ... to arguments 66 Variable Expression: x: pointer to instance of struct A without body 67 ... with resolved type: 68 pointer to instance of struct A without body 69 70 ... with resolved type: 71 reference to instance of struct A without body 72 ... to: nothing 73 ... with resolved type: 74 void 75 (types: 76 void 77 ) 78 Environment:([unbound]) -> instance of struct A without body (no widening) 78 Environment:([unbound]DT) -> instance of struct B with body (no widening) 79 79 80 80 … … 113 113 Cost ( 0, 1, 0, 0, 1, -5, 0 ): Application of 114 114 Variable Expression: baz: forall 115 T: sized data type 116 ... with assertions 117 ?=?: pointer to function 115 instance of type T (not function type) 116 with assertions 117 Variable Expression: ?=?: pointer to function 118 ... with parameters 119 reference to instance of type T (not function type) 120 instance of type T (not function type) 121 ... returning 122 instance of type T (not function type) 123 124 ... with resolved type: 125 pointer to function 118 126 ... with parameters 119 127 reference to instance of type T (not function type) … … 122 130 instance of type T (not function type) 123 131 124 ?{}: pointer to function 125 ... with parameters 126 reference to instance of type T (not function type) 127 ... returning nothing 128 129 ?{}: pointer to function 130 ... with parameters 131 reference to instance of type T (not function type) 132 instance of type T (not function type) 133 ... returning nothing 134 135 ^?{}: pointer to function 136 ... with parameters 137 reference to instance of type T (not function type) 138 ... returning nothing 139 132 Variable Expression: ?{}: pointer to function 133 ... with parameters 134 reference to instance of type T (not function type) 135 ... returning nothing 136 137 ... with resolved type: 138 pointer to function 139 ... with parameters 140 reference to instance of type T (not function type) 141 ... returning nothing 142 143 Variable Expression: ?{}: pointer to function 144 ... with parameters 145 reference to instance of type T (not function type) 146 instance of type T (not function type) 147 ... returning nothing 148 149 ... with resolved type: 150 pointer to function 151 ... with parameters 152 reference to instance of type T (not function type) 153 instance of type T (not function type) 154 ... returning nothing 155 156 Variable Expression: ^?{}: pointer to function 157 ... with parameters 158 reference to instance of type T (not function type) 159 ... returning nothing 160 161 ... with resolved type: 162 pointer to function 163 ... with parameters 164 reference to instance of type T (not function type) 165 ... returning nothing 140 166 141 167 function … … 146 172 ... with resolved type: 147 173 pointer to forall 148 [unbound]:sized data type 149 ... with assertions 150 ?=?: pointer to function 174 instance of type [unbound] (not function type) 175 with assertions 176 Variable Expression: ?=?: pointer to function 177 ... with parameters 178 reference to instance of type T (not function type) 179 instance of type T (not function type) 180 ... returning 181 instance of type T (not function type) 182 183 ... with resolved type: 184 pointer to function 151 185 ... with parameters 152 186 reference to instance of type [unbound] (not function type) … … 155 189 instance of type [unbound] (not function type) 156 190 157 ?{}: pointer to function 191 Variable Expression: ?{}: pointer to function 192 ... with parameters 193 reference to instance of type T (not function type) 194 ... returning nothing 195 196 ... with resolved type: 197 pointer to function 158 198 ... with parameters 159 199 reference to instance of type [unbound] (not function type) 160 200 ... returning nothing 161 201 162 ?{}: pointer to function 202 Variable Expression: ?{}: pointer to function 203 ... with parameters 204 reference to instance of type T (not function type) 205 instance of type T (not function type) 206 ... returning nothing 207 208 ... with resolved type: 209 pointer to function 163 210 ... with parameters 164 211 reference to instance of type [unbound] (not function type) … … 166 213 ... returning nothing 167 214 168 ^?{}: pointer to function 215 Variable Expression: ^?{}: pointer to function 216 ... with parameters 217 reference to instance of type T (not function type) 218 ... returning nothing 219 220 ... with resolved type: 221 pointer to function 169 222 ... with parameters 170 223 reference to instance of type [unbound] (not function type) 171 224 ... returning nothing 172 173 225 174 226 function … … 188 240 void 189 241 ) 190 Environment:([unbound] ) -> instance of type T (not function type) (no widening)242 Environment:([unbound]T) -> instance of type T (not function type) (no widening) 191 243 192 244 Could not satisfy assertion: 193 ?=?: pointer to function245 Variable Expression: ?=?: pointer to function 194 246 ... with parameters 195 reference to instance of type [unbound](not function type)196 instance of type [unbound](not function type)247 reference to instance of type T (not function type) 248 instance of type T (not function type) 197 249 ... returning 198 instance of type [unbound] (not function type) 199 250 instance of type T (not function type) 251 252 ... with resolved type: 253 pointer to function 254 ... with parameters 255 reference to instance of type [unbound] (not function type) 256 instance of type [unbound] (not function type) 257 ... returning 258 instance of type [unbound] (not function type) 259 -
tests/exceptions/.expect/resume-threads.txt
r857a1c6 rc8a0210 6 6 7 7 catch-all 8 9 throwing child exception10 inner parent match11 8 12 9 caught yin as yin -
tests/exceptions/.expect/resume.txt
r857a1c6 rc8a0210 6 6 7 7 catch-all 8 9 throwing child exception10 inner parent match11 8 12 9 caught yin as yin -
tests/exceptions/.expect/terminate-threads.txt
r857a1c6 rc8a0210 5 5 6 6 catch-all 7 8 throwing child exception9 inner parent match10 7 11 8 caught yin as yin -
tests/exceptions/.expect/terminate.txt
r857a1c6 rc8a0210 5 5 6 6 catch-all 7 8 throwing child exception9 inner parent match10 7 11 8 caught yin as yin -
tests/exceptions/cancel/coroutine.cfa
r857a1c6 rc8a0210 4 4 #include <exception.hfa> 5 5 6 TRIVIAL_EXCEPTION(internal_error); 6 EHM_EXCEPTION(internal_error)(); 7 EHM_VIRTUAL_TABLE(internal_error, internal_vt); 7 8 8 9 coroutine WillCancel {}; … … 14 15 void main(WillCancel & wc) { 15 16 printf("1"); 16 cancel_stack((internal_error){ });17 cancel_stack((internal_error){&internal_vt}); 17 18 printf("!"); 18 19 } … … 24 25 resume(cancel); 25 26 printf("4"); 26 } catchResume ( CoroutineCancelled(WillCancel)* error) {27 } catchResume (SomeCoroutineCancelled * error) { 27 28 printf("2"); 28 29 if ((virtual internal_error *)error->the_exception) { -
tests/exceptions/cancel/thread.cfa
r857a1c6 rc8a0210 4 4 #include <exception.hfa> 5 5 6 TRIVIAL_EXCEPTION(internal_error); 6 EHM_EXCEPTION(internal_error)(); 7 EHM_VIRTUAL_TABLE(internal_error, internal_vt); 7 8 8 9 thread WillCancel {}; … … 14 15 void main(WillCancel &) { 15 16 printf("1"); 16 cancel_stack((internal_error){ });17 cancel_stack((internal_error){&internal_vt}); 17 18 printf("!"); 18 19 } … … 25 26 join(cancel); 26 27 printf("4"); 27 } catchResume ( ThreadCancelled(WillCancel)* error) {28 } catchResume (SomeThreadCancelled * error) { 28 29 printf("2"); 29 30 if ((virtual internal_error *)error->the_exception) { … … 42 43 } 43 44 printf("4"); 44 } catchResume ( ThreadCancelled(WillCancel)* error) {45 } catchResume (SomeThreadCancelled * error) { 45 46 printf("2"); 46 47 if ((virtual internal_error *)error->the_exception) { -
tests/exceptions/conditional.cfa
r857a1c6 rc8a0210 6 6 #include <exception.hfa> 7 7 8 VTABLE_DECLARATION(num_error)(9 int (*code)(num_error *this);8 EHM_EXCEPTION(num_error)( 9 int num; 10 10 ); 11 11 12 struct num_error { 13 VTABLE_FIELD(num_error); 14 char * msg; 15 int num; 16 }; 17 18 const char * num_error_msg(num_error * this) { 19 if ( ! this->msg ) { 20 static const char * base = "Num Error with code: X"; 21 this->msg = (char *)malloc(22); 22 for (int i = 0 ; (this->msg[i] = base[i]) ; ++i); 23 } 24 this->msg[21] = '0' + this->num; 25 return this->msg; 26 } 27 void ?{}(num_error & this, int num) { 28 VTABLE_INIT(this, num_error); 29 this.msg = 0; 30 this.num = num; 31 } 32 void ?{}(num_error & this, num_error & other) { 33 this.virtual_table = other.virtual_table; 34 this.msg = 0; 35 this.num = other.num; 36 } 37 void ^?{}(num_error & this) { 38 if( this.msg ) free( this.msg ); 39 } 40 int num_error_code( num_error * this ) { 41 return this->num; 42 } 43 44 VTABLE_INSTANCE(num_error)( 45 num_error_msg, 46 num_error_code, 47 ); 12 EHM_VIRTUAL_TABLE(num_error, num_error_vt); 48 13 49 14 void caught_num_error(int expect, num_error * actual) { … … 52 17 53 18 int main(int argc, char * argv[]) { 54 num_error exc = 2;19 num_error exc = {&num_error_vt, 2}; 55 20 56 21 try { 57 22 throw exc; 58 } catch (num_error * error ; 3 == error-> virtual_table->code( error )) {23 } catch (num_error * error ; 3 == error->num ) { 59 24 caught_num_error(3, error); 60 } catch (num_error * error ; 2 == error-> virtual_table->code( error )) {25 } catch (num_error * error ; 2 == error->num ) { 61 26 caught_num_error(2, error); 62 27 } … … 64 29 try { 65 30 throwResume exc; 66 } catchResume (num_error * error ; 3 == error-> virtual_table->code( error )) {31 } catchResume (num_error * error ; 3 == error->num ) { 67 32 caught_num_error(3, error); 68 } catchResume (num_error * error ; 2 == error-> virtual_table->code( error )) {33 } catchResume (num_error * error ; 2 == error->num ) { 69 34 caught_num_error(2, error); 70 35 } -
tests/exceptions/data-except.cfa
r857a1c6 rc8a0210 3 3 #include <exception.hfa> 4 4 5 DATA_EXCEPTION(paired)(5 EHM_EXCEPTION(paired)( 6 6 int first; 7 7 int second; 8 8 ); 9 9 10 void ?{}(paired & this, int first, int second) { 11 VTABLE_INIT(this, paired); 12 this.first = first; 13 this.second = second; 14 } 10 EHM_VIRTUAL_TABLE(paired, paired_vt); 15 11 16 const char * paired_msg(paired * this) { 17 return "paired"; 18 } 19 20 VTABLE_INSTANCE(paired)(paired_msg); 21 22 void throwPaired(int first, int second) { 23 paired exc = {first, second}; 12 const char * virtual_msg(paired * this) { 13 return this->virtual_table->msg(this); 24 14 } 25 15 26 16 int main(int argc, char * argv[]) { 27 paired except = { 3, 13};17 paired except = {&paired_vt, 3, 13}; 28 18 29 19 try { 30 20 throw except; 31 21 } catch (paired * exc) { 32 printf("%s(%d, %d)\n", paired_msg(exc), exc->first, exc->second);22 printf("%s(%d, %d)\n", virtual_msg(exc), exc->first, exc->second); 33 23 ++exc->first; 34 24 } 35 25 36 printf("%s(%d, %d)\n", paired_msg(&except), except.first, except.second);26 printf("%s(%d, %d)\n", virtual_msg(&except), except.first, except.second); 37 27 38 28 try { 39 29 throwResume except; 40 30 } catchResume (paired * exc) { 41 printf("%s(%d, %d)\n", paired_msg(exc), exc->first, exc->second);31 printf("%s(%d, %d)\n", virtual_msg(exc), exc->first, exc->second); 42 32 ++exc->first; 43 33 } 44 34 45 printf("%s(%d, %d)\n", paired_msg(&except), except.first, except.second);35 printf("%s(%d, %d)\n", virtual_msg(&except), except.first, except.second); 46 36 } -
tests/exceptions/defaults.cfa
r857a1c6 rc8a0210 4 4 #include <exception.hfa> 5 5 6 DATA_EXCEPTION(log_message)(6 EHM_EXCEPTION(log_message)( 7 7 char * msg; 8 8 ); 9 9 10 void ?{}(log_message & this, char * msg) { 11 VTABLE_INIT(this, log_message); 12 this.msg = msg; 13 } 14 15 const char * get_log_message(log_message * this) { 10 _EHM_DEFINE_COPY(log_message, ) 11 const char * msg(log_message * this) { 16 12 return this->msg; 17 13 } 18 19 VTABLE_INSTANCE(log_message)(get_log_message); 14 _EHM_VIRTUAL_TABLE(log_message, , log_vt); 20 15 21 16 // Logging messages don't have to be handled. … … 28 23 // We can catch log: 29 24 try { 30 throwResume (log_message){ "Should be printed.\n"};25 throwResume (log_message){&log_vt, "Should be printed.\n"}; 31 26 } catchResume (log_message * this) { 32 27 printf("%s", this->virtual_table->msg(this)); 33 28 } 34 29 // But we don't have to: 35 throwResume (log_message){ "Should not be printed.\n"};30 throwResume (log_message){&log_vt, "Should not be printed.\n"}; 36 31 } 37 32 38 33 // I don't have a good use case for doing the same with termination. 39 TRIVIAL_EXCEPTION(jump);34 EHM_EXCEPTION(jump)(); 40 35 void defaultTerminationHandler(jump &) { 41 36 printf("jump default handler.\n"); 42 37 } 43 38 39 EHM_VIRTUAL_TABLE(jump, jump_vt); 40 44 41 void jump_test(void) { 45 42 try { 46 throw (jump){ };43 throw (jump){&jump_vt}; 47 44 } catch (jump * this) { 48 45 printf("jump catch handler.\n"); 49 46 } 50 throw (jump){ };47 throw (jump){&jump_vt}; 51 48 } 52 49 53 TRIVIAL_EXCEPTION(first); 54 TRIVIAL_EXCEPTION(unhandled_exception); 50 EHM_EXCEPTION(first)(); 51 EHM_VIRTUAL_TABLE(first, first_vt); 52 53 EHM_EXCEPTION(unhandled_exception)(); 54 EHM_VIRTUAL_TABLE(unhandled_exception, unhandled_vt); 55 55 56 56 void unhandled_test(void) { 57 57 forall(T &, V & | is_exception(T, V)) 58 58 void defaultTerminationHandler(T &) { 59 throw (unhandled_exception){ };59 throw (unhandled_exception){&unhandled_vt}; 60 60 } 61 61 void defaultTerminationHandler(unhandled_exception &) { … … 63 63 } 64 64 try { 65 throw (first){ };65 throw (first){&first_vt}; 66 66 } catch (unhandled_exception * t) { 67 67 printf("Catch unhandled_exception.\n"); … … 69 69 } 70 70 71 TRIVIAL_EXCEPTION(second); 71 EHM_EXCEPTION(second)(); 72 EHM_VIRTUAL_TABLE(second, second_vt); 72 73 73 74 void cross_test(void) { 74 75 void defaultTerminationHandler(first &) { 75 76 printf("cross terminate default\n"); 76 throw (second){ };77 throw (second){&second_vt}; 77 78 } 78 79 void defaultResumptionHandler(first &) { 79 80 printf("cross resume default\n"); 80 throwResume (second){ };81 throwResume (second){&second_vt}; 81 82 } 82 83 try { 83 84 printf("cross terminate throw\n"); 84 throw (first){ };85 throw (first){&first_vt}; 85 86 } catch (second *) { 86 87 printf("cross terminate catch\n"); … … 88 89 try { 89 90 printf("cross resume throw\n"); 90 throwResume (first){ };91 throwResume (first){&first_vt}; 91 92 } catchResume (second *) { 92 93 printf("cross resume catch\n"); -
tests/exceptions/finally.cfa
r857a1c6 rc8a0210 4 4 #include "except-io.hfa" 5 5 6 TRIVIAL_EXCEPTION(myth); 6 EHM_EXCEPTION(myth)(); 7 8 EHM_VIRTUAL_TABLE(myth, myth_vt); 7 9 8 10 int main(int argc, char * argv[]) { 9 myth exc ;11 myth exc = {&myth_vt}; 10 12 11 13 try { -
tests/exceptions/interact.cfa
r857a1c6 rc8a0210 4 4 #include "except-io.hfa" 5 5 6 TRIVIAL_EXCEPTION(star); 7 TRIVIAL_EXCEPTION(moon); 6 EHM_EXCEPTION(star)(); 7 EHM_EXCEPTION(moon)(); 8 9 EHM_VIRTUAL_TABLE(star, star_vt); 10 EHM_VIRTUAL_TABLE(moon, moon_vt); 8 11 9 12 int main(int argc, char * argv[]) { 10 13 // Resume falls back to terminate. 11 14 try { 12 throwResume (star){ };15 throwResume (star){&star_vt}; 13 16 } catch (star *) { 14 17 printf("caught as termination\n"); … … 17 20 try { 18 21 loud_region a = "try block with resume throw"; 19 throwResume (star){ };22 throwResume (star){&star_vt}; 20 23 } catch (star *) { 21 24 printf("caught as termination\n"); … … 29 32 try { 30 33 try { 31 throw (star){ };34 throw (star){&star_vt}; 32 35 } catchResume (star *) { 33 36 printf("resume catch on terminate\n"); … … 43 46 try { 44 47 try { 45 throwResume (star){ };48 throwResume (star){&star_vt}; 46 49 } catch (star *) { 47 50 printf("terminate catch on resume\n"); … … 58 61 try { 59 62 try { 60 throw (star){ };63 throw (star){&star_vt}; 61 64 } catchResume (star *) { 62 65 printf("inner resume catch (error)\n"); … … 75 78 try { 76 79 try { 77 throwResume (star){ };80 throwResume (star){&star_vt}; 78 81 } catch (star *) { 79 82 printf("inner termination catch\n"); … … 94 97 try { 95 98 printf("throwing resume moon\n"); 96 throwResume (moon){ };99 throwResume (moon){&moon_vt}; 97 100 } catch (star *) { 98 101 printf("termination catch\n"); 99 102 } 100 103 printf("throwing resume star\n"); 101 throwResume (star){ };104 throwResume (star){&star_vt}; 102 105 } catchResume (star *) { 103 106 printf("resumption star catch\n"); … … 105 108 } catchResume (moon *) { 106 109 printf("resumption moon catch, will terminate\n"); 107 throw (star){ };110 throw (star){&star_vt}; 108 111 } 109 112 } catchResume (star *) { -
tests/exceptions/polymorphic.cfa
r857a1c6 rc8a0210 3 3 #include <exception.hfa> 4 4 5 FORALL_TRIVIAL_EXCEPTION(proxy, (T), (T)); 6 FORALL_TRIVIAL_INSTANCE(proxy, (U), (U)) 5 EHM_FORALL_EXCEPTION(proxy, (T&), (T))(); 7 6 8 const char * msg(proxy(int) * this) { return "proxy(int)"; } 9 const char * msg(proxy(char) * this) { return "proxy(char)"; } 10 POLY_VTABLE_INSTANCE(proxy, int)(msg); 11 POLY_VTABLE_INSTANCE(proxy, char)(msg); 7 EHM_FORALL_VIRTUAL_TABLE(proxy, (int), proxy_int); 8 EHM_FORALL_VIRTUAL_TABLE(proxy, (char), proxy_char); 12 9 13 10 void proxy_test(void) { 11 proxy(int) an_int = {&proxy_int}; 12 proxy(char) a_char = {&proxy_char}; 13 14 14 try { 15 throw (proxy(int)){};15 throw an_int; 16 16 } catch (proxy(int) *) { 17 17 printf("terminate catch\n"); … … 19 19 20 20 try { 21 throwResume (proxy(char)){};21 throwResume a_char; 22 22 } catchResume (proxy(char) *) { 23 23 printf("resume catch\n"); … … 25 25 26 26 try { 27 throw (proxy(char)){};27 throw a_char; 28 28 } catch (proxy(int) *) { 29 29 printf("caught proxy(int)\n"); … … 33 33 } 34 34 35 FORALL_DATA_EXCEPTION(cell, (T), (T))(35 EHM_FORALL_EXCEPTION(cell, (T), (T))( 36 36 T data; 37 37 ); 38 38 39 FORALL_DATA_INSTANCE(cell, (T), (T)) 40 41 const char * msg(cell(int) * this) { return "cell(int)"; } 42 const char * msg(cell(char) * this) { return "cell(char)"; } 43 const char * msg(cell(bool) * this) { return "cell(bool)"; } 44 POLY_VTABLE_INSTANCE(cell, int)(msg); 45 POLY_VTABLE_INSTANCE(cell, char)(msg); 46 POLY_VTABLE_INSTANCE(cell, bool)(msg); 39 EHM_FORALL_VIRTUAL_TABLE(cell, (int), int_cell); 40 EHM_FORALL_VIRTUAL_TABLE(cell, (char), char_cell); 41 EHM_FORALL_VIRTUAL_TABLE(cell, (bool), bool_cell); 47 42 48 43 void cell_test(void) { 49 44 try { 50 cell(int) except; 51 except.data = -7; 45 cell(int) except = {&int_cell, -7}; 52 46 throw except; 53 47 } catch (cell(int) * error) { … … 56 50 57 51 try { 58 cell(bool) ball; 59 ball.data = false; 52 cell(bool) ball = {&bool_cell, false}; 60 53 throwResume ball; 61 54 printf("%i\n", ball.data); -
tests/exceptions/resume.cfa
r857a1c6 rc8a0210 4 4 #include "except-io.hfa" 5 5 6 TRIVIAL_EXCEPTION(yin); 7 TRIVIAL_EXCEPTION(yang); 8 TRIVIAL_EXCEPTION(zen); 9 TRIVIAL_EXCEPTION(moment_of, zen); 6 EHM_EXCEPTION(yin)(); 7 EHM_EXCEPTION(yang)(); 8 EHM_EXCEPTION(zen)(); 9 10 EHM_VIRTUAL_TABLE(yin, yin_vt); 11 EHM_VIRTUAL_TABLE(yang, yang_vt); 12 EHM_VIRTUAL_TABLE(zen, zen_vt); 10 13 11 14 void in_void(void); 12 15 13 16 int main(int argc, char * argv[]) { 17 yin a_yin = {&yin_vt}; 18 yang a_yang = {&yang_vt}; 19 zen a_zen = {&zen_vt}; 20 14 21 // The simple throw catchResume test. 15 22 try { 16 23 loud_exit a = "simple try clause"; 17 24 printf("simple throw\n"); 18 throwResume (zen){};25 throwResume a_zen; 19 26 printf("end of try clause\n"); 20 27 } catchResume (zen * error) { … … 26 33 // Throw catch-all test. 27 34 try { 28 throwResume (zen){};35 throwResume a_zen; 29 36 } catchResume (exception_t * error) { 30 37 printf("catch-all\n"); 31 }32 printf("\n");33 34 // Catch a parent of the given exception.35 try {36 printf("throwing child exception\n");37 throwResume (moment_of){};38 } catchResume (zen *) {39 printf("inner parent match\n");40 } catchResume (moment_of *) {41 printf("outer exact match\n");42 38 } 43 39 printf("\n"); … … 46 42 try { 47 43 try { 48 throwResume (yin){};44 throwResume a_yin; 49 45 } catchResume (zen *) { 50 46 printf("caught yin as zen\n"); … … 62 58 loud_exit a = "rethrow inner try"; 63 59 printf("rethrow inner try\n"); 64 throwResume (zen){};60 throwResume a_zen; 65 61 } catchResume (zen *) { 66 62 loud_exit a = "rethrowing catch clause"; … … 77 73 try { 78 74 try { 79 throwResume (yin){};75 throwResume a_yin; 80 76 } catchResume (yin *) { 81 77 printf("caught yin, will throw yang\n"); 82 throwResume (yang){};78 throwResume a_yang; 83 79 } catchResume (yang *) { 84 80 printf("caught exception from same try\n"); … … 93 89 try { 94 90 printf("throwing first exception\n"); 95 throwResume (yin){};91 throwResume a_yin; 96 92 } catchResume (yin *) { 97 93 printf("caught first exception\n"); 98 94 try { 99 95 printf("throwing second exception\n"); 100 throwResume (yang){};96 throwResume a_yang; 101 97 } catchResume (yang *) { 102 98 printf("caught second exception\n"); … … 114 110 try { 115 111 try { 116 throwResume (zen){};117 throwResume (zen){};112 throwResume a_zen; 113 throwResume a_zen; 118 114 } catchResume (zen *) { 119 115 printf("inner catch\n"); 120 116 } 121 throwResume (zen){};117 throwResume a_zen; 122 118 } catchResume (zen *) { 123 119 printf("outer catch\n"); … … 130 126 // Do a throw and rethrow in a void function. 131 127 void in_void(void) { 128 zen a_zen = {&zen_vt}; 132 129 try { 133 130 try { 134 131 printf("throw\n"); 135 throwResume (zen){};132 throwResume a_zen; 136 133 } catchResume (zen *) { 137 134 printf("rethrow\n"); -
tests/exceptions/terminate.cfa
r857a1c6 rc8a0210 4 4 #include "except-io.hfa" 5 5 6 TRIVIAL_EXCEPTION(yin); 7 TRIVIAL_EXCEPTION(yang); 8 TRIVIAL_EXCEPTION(zen); 9 TRIVIAL_EXCEPTION(moment_of, zen); 6 EHM_EXCEPTION(yin)(); 7 EHM_EXCEPTION(yang)(); 8 EHM_EXCEPTION(zen)(); 9 10 EHM_VIRTUAL_TABLE(yin, yin_vt); 11 EHM_VIRTUAL_TABLE(yang, yang_vt); 12 EHM_VIRTUAL_TABLE(zen, zen_vt); 10 13 11 14 void in_void(void); 12 15 13 16 int main(int argc, char * argv[]) { 17 yin a_yin = {&yin_vt}; 18 yang a_yang = {&yang_vt}; 19 zen a_zen = {&zen_vt}; 20 14 21 // The simple throw catch test. 15 22 try { 16 23 loud_exit a = "simple try clause"; 17 24 printf("simple throw\n"); 18 throw (zen){};25 throw a_zen; 19 26 printf("end of try clause\n"); 20 27 } catch (zen * error) { … … 26 33 // Throw catch-all test. 27 34 try { 28 throw (zen){};35 throw a_zen; 29 36 } catch (exception_t * error) { 30 37 printf("catch-all\n"); 31 }32 printf("\n");33 34 // Catch a parent of the given exception.35 try {36 printf("throwing child exception\n");37 throw (moment_of){};38 } catch (zen *) {39 printf("inner parent match\n");40 } catch (moment_of *) {41 printf("outer exact match\n");42 38 } 43 39 printf("\n"); … … 46 42 try { 47 43 try { 48 throw (yin){};44 throw a_yin; 49 45 } catch (zen *) { 50 46 printf("caught yin as zen\n"); … … 62 58 loud_exit a = "rethrow inner try"; 63 59 printf("rethrow inner try\n"); 64 throw (zen){};60 throw a_zen; 65 61 } catch (zen *) { 66 62 loud_exit a = "rethrowing catch clause"; … … 77 73 try { 78 74 try { 79 throw (yin){};75 throw a_yin; 80 76 } catch (yin *) { 81 77 printf("caught yin, will throw yang\n"); 82 throw (yang){};78 throw a_yang; 83 79 } catch (yang *) { 84 80 printf("caught exception from same try\n"); … … 93 89 try { 94 90 printf("throwing first exception\n"); 95 throw (yin){};91 throw a_yin; 96 92 } catch (yin *) { 97 93 printf("caught first exception\n"); 98 94 try { 99 95 printf("throwing second exception\n"); 100 throw (yang){};96 throw a_yang; 101 97 } catch (yang *) { 102 98 printf("caught second exception\n"); … … 114 110 try { 115 111 try { 116 throw (zen){};117 throw (zen){};112 throw a_zen; 113 throw a_zen; 118 114 } catch (zen *) { 119 115 printf("inner catch\n"); 120 116 } 121 throw (zen){};117 throw a_zen; 122 118 } catch (zen *) { 123 119 printf("outer catch\n"); … … 130 126 // Do a throw and rethrow in a void function. 131 127 void in_void(void) { 128 zen a_zen = {&zen_vt}; 132 129 try { 133 130 try { 134 131 printf("throw\n"); 135 throw (zen){};132 throw a_zen; 136 133 } catch (zen *) { 137 134 printf("rethrow\n"); -
tests/exceptions/trash.cfa
r857a1c6 rc8a0210 3 3 #include <exception.hfa> 4 4 5 TRIVIAL_EXCEPTION(yin); 6 TRIVIAL_EXCEPTION(yang); 5 EHM_EXCEPTION(yin)(); 6 EHM_EXCEPTION(yang)(); 7 8 EHM_VIRTUAL_TABLE(yin, yin_vt); 9 EHM_VIRTUAL_TABLE(yang, yang_vt); 7 10 8 11 int main(int argc, char * argv[]) { 9 12 try { 10 13 try { 11 throw (yin){ };14 throw (yin){&yin_vt}; 12 15 } finally { 13 16 try { 14 throw (yang){ };17 throw (yang){&yang_vt}; 15 18 } catch (yin *) { 16 19 printf("inner yin\n"); -
tests/exceptions/type-check.cfa
r857a1c6 rc8a0210 3 3 #include <exception.hfa> 4 4 5 TRIVIAL_EXCEPTION(truth);5 EHM_EXCEPTION(truth)(); 6 6 7 7 int main(int argc, char * argv[]) { -
tests/exceptions/virtual-cast.cfa
r857a1c6 rc8a0210 12 12 #include <assert.h> 13 13 14 15 16 // Hand defined alpha virtual type: 17 struct __cfatid_struct_alpha { 18 __cfa__parent_vtable const * parent; 19 }; 20 21 __attribute__(( section(".gnu.linkonce.__cfatid_alpha") )) 22 struct __cfatid_struct_alpha __cfatid_alpha = { 23 (__cfa__parent_vtable *)0, 24 }; 25 14 26 struct alpha_vtable { 15 alpha_vtable const * const parent;27 struct __cfatid_struct_alpha const * const __cfavir_typeid; 16 28 char (*code)(void); 17 29 }; … … 27 39 28 40 41 // Hand defined beta virtual type: 42 struct __cfatid_struct_beta { 43 __cfatid_struct_alpha const * parent; 44 }; 45 46 __attribute__(( section(".gnu.linkonce.__cfatid_beta") )) 47 struct __cfatid_struct_beta __cfatid_beta = { 48 &__cfatid_alpha, 49 }; 50 29 51 struct beta_vtable { 30 alpha_vtable const * const parent;52 struct __cfatid_struct_beta const * const __cfavir_typeid; 31 53 char (*code)(void); 32 54 }; … … 42 64 43 65 66 // Hand defined gamma virtual type: 67 struct __cfatid_struct_gamma { 68 __cfatid_struct_beta const * parent; 69 }; 70 71 __attribute__(( section(".gnu.linkonce.__cfatid_gamma") )) 72 struct __cfatid_struct_gamma __cfatid_gamma = { 73 &__cfatid_beta, 74 }; 75 44 76 struct gamma_vtable { 45 beta_vtable const * const parent;77 struct __cfatid_struct_gamma const * const __cfavir_typeid; 46 78 char (*code)(void); 47 79 }; … … 57 89 58 90 extern "C" { 59 alpha_vtable _alpha_vtable_instance = { 0, ret_a };60 beta_vtable _beta_vtable_instance = { &_ alpha_vtable_instance, ret_b };61 gamma_vtable _gamma_vtable_instance = { &_ beta_vtable_instance, ret_g };91 alpha_vtable _alpha_vtable_instance = { &__cfatid_alpha, ret_a }; 92 beta_vtable _beta_vtable_instance = { &__cfatid_beta, ret_b }; 93 gamma_vtable _gamma_vtable_instance = { &__cfatid_gamma, ret_g }; 62 94 } 63 95 -
tests/exceptions/virtual-poly.cfa
r857a1c6 rc8a0210 8 8 #include <assert.h> 9 9 10 11 struct __cfatid_struct_mono_base { 12 __cfa__parent_vtable const * parent; 13 }; 14 15 __attribute__(( section(".gnu.linkonce.__cfatid_mono_base") )) 16 struct __cfatid_struct_mono_base __cfatid_mono_base = { 17 (__cfa__parent_vtable *)0, 18 }; 19 10 20 struct mono_base_vtable { 11 mono_base_vtable const * const parent;21 __cfatid_struct_mono_base const * const __cfavir_typeid; 12 22 }; 13 23 … … 17 27 18 28 forall(T) 29 struct __cfatid_struct_mono_child { 30 __cfatid_struct_mono_base const * parent; 31 }; 32 33 forall(T) 19 34 struct mono_child_vtable { 20 mono_base_vtable const * const parent;35 __cfatid_struct_mono_child(T) const * const __cfavir_typeid; 21 36 }; 22 37 … … 26 41 }; 27 42 28 mono_base_vtable _mono_base_vtable_instance @= { 0 }; 43 __cfatid_struct_mono_child(int) __cfatid_mono_child @= { 44 &__cfatid_mono_base, 45 }; 46 29 47 mono_child_vtable(int) _mono_child_vtable_instance @= { 30 &_ mono_base_vtable_instance48 &__cfatid_mono_child, 31 49 }; 32 50 … … 37 55 } 38 56 57 58 forall(U) 59 struct __cfatid_struct_poly_base { 60 __cfa__parent_vtable const * parent; 61 }; 62 39 63 forall(U) 40 64 struct poly_base_vtable { 41 poly_base_vtable(U) const * const parent;65 __cfatid_struct_poly_base(U) const * const __cfavir_typeid; 42 66 }; 43 67 … … 48 72 49 73 forall(V) 74 struct __cfatid_struct_poly_child { 75 __cfatid_struct_poly_base(V) const * parent; 76 }; 77 78 forall(V) 50 79 struct poly_child_vtable { 51 poly_base_vtable(V) const * const parent;80 __cfatid_struct_poly_child(V) const * const __cfavir_typeid; 52 81 }; 53 82 … … 57 86 }; 58 87 59 poly_base_vtable(int) _poly_base_vtable_instance @= { 0 }; 88 __cfatid_struct_poly_base(int) __cfatid_poly_base @= { 89 (__cfa__parent_vtable *)0, 90 }; 91 __cfatid_struct_poly_child(int) __cfatid_poly_child = { 92 &__cfatid_poly_base, 93 }; 60 94 poly_child_vtable(int) _poly_child_vtable_instance @= { 61 &_ poly_base_vtable_instance95 &__cfatid_poly_child, 62 96 }; 63 /* Resolver bug keeps me from adding these.64 poly_base_vtable(char) _poly_base_vtable_instance @= { 0 };65 poly_child_vtable(char) _poly_child_vtable_instance @= {66 &_poly_base_vtable_instance67 };68 */69 97 70 98 void poly_poly_test() { … … 77 105 mono_poly_test(); 78 106 poly_poly_test(); 79 printf( "done\n" ); // non-empty .expect file107 printf( "done\n" ); 80 108 } -
tests/io/.expect/manipulatorsOutput1.arm64.txt
r857a1c6 rc8a0210 29 29 float 30 30 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 31 0. 3.000000 3.000000 3.537 3.537000 44 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0031 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 32 32 double 33 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0034 0. 3.000000 3.537 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0033 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 34 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 35 35 long double 36 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0037 0. 3.000000 3.53699999999999992184029906638898 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0036 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 37 0. 3. 3 3.53699999999999992184029906638898 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 38 38 39 39 char -
tests/io/.expect/manipulatorsOutput1.x64.txt
r857a1c6 rc8a0210 29 29 float 30 30 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 31 0. 3.000000 3.000000 3.537 3.537000 44 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0031 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 32 32 double 33 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0034 0. 3.000000 3.537 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0033 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 34 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 35 35 long double 36 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0xe.26p-2 0XE.26P-2 3.54e+0037 0. 3.000000 3.53699999999999992 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0xe.26p-2 0XE.26P-2 3.54e+0036 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0xe.26p-2 0XE.26P-2 3.54e+00 37 0. 3. 3 3.53699999999999992 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0xe.26p-2 0XE.26P-2 3.54e+00 38 38 39 39 char -
tests/io/.expect/manipulatorsOutput1.x86.txt
r857a1c6 rc8a0210 29 29 float 30 30 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 31 0. 3.000000 3.000000 3.537 3.537000 44 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0031 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 32 32 double 33 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0034 0. 3.000000 3.537 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+0033 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 34 0. 3. 3 3.537 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0x1.c5p+1 0X1.C5P+1 3.54e+00 35 35 long double 36 0 3.000000 3.537 3.537000 4. 4 3.54 3.54 +3.54 00003.543.54E+00 0xe.26p-2 0XE.26P-2 3.54e+0037 0. 3.000000 3.53699999999999992 3.537000 4 4. 3.54 3.54 +3.54 00003.543.54E+00 0xe.26p-2 0XE.26P-2 3.54e+0036 0 3 3.00000 3.537 3.537 4 4. 3.5 3.5 3 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0xe.26p-2 0XE.26P-2 3.54e+00 37 0. 3. 3 3.53699999999999992 3.537 4. 4 3.5 3.5 3. 3.5 3.5 +3.5 +3.5 000003.5 3.54E+00 0xe.26p-2 0XE.26P-2 3.54e+00 38 38 39 39 char -
tests/io/.expect/manipulatorsOutput2.arm64.txt
r857a1c6 rc8a0210 9 9 -0x1.b8p+4 -0x1.b8p+4 -0x1.b8p+4 10 10 0.000000e+00 2.750000e+01 -2.750000e+01 11 0e0 27.5e0 -27.5e0 11 12 0B11011 0X1B 2.75E-09 0X1.B8P+4 12 13 11011 33 1b 13 14 0. 0 27. 27 27.5 14 +27 -27 +27 -27+27.5 -27.515 +27 -27 +27. -27. +27.5 -27.5 15 16 34 34 34 16 4.000000 4.000000 4.00000017 4. 4. 4. 17 18 ab ab ab 18 19 34567 34567 34567 19 3456. 000000 3456.000000 3456.00000020 3456. 3456. 3456. 20 21 abcde abcde abcde 21 22 034 0034 0000000034 … … 24 25 27.500 27.5 28. 27.50000000 25 26 27.000 27.500 27.5 28. 27.50000000 26 27 27. 000000 27.500000027 27.50027 27 27. 27.5 027 27.500 27 28 234.567 234.57 234.6 235. 28 29 234567. 2.3457e+05 2.346e+05 2.35e+05 -
tests/io/.expect/manipulatorsOutput2.x64.txt
r857a1c6 rc8a0210 9 9 -0x1.b8p+4 -0x1.b8p+4 -0xd.cp+1 10 10 0.000000e+00 2.750000e+01 -2.750000e+01 11 0e0 27.5e0 -27.5e0 11 12 0B11011 0X1B 2.75E-09 0X1.B8P+4 12 13 11011 33 1b 13 14 0. 0 27. 27 27.5 14 +27 -27 +27 -27+27.5 -27.515 +27 -27 +27. -27. +27.5 -27.5 15 16 34 34 34 16 4.000000 4.000000 4.00000017 4. 4. 4. 17 18 ab ab ab 18 19 34567 34567 34567 19 3456. 000000 3456.000000 3456.00000020 3456. 3456. 3456. 20 21 abcde abcde abcde 21 22 034 0034 0000000034 … … 24 25 27.500 27.5 28. 27.50000000 25 26 27.000 27.500 27.5 28. 27.50000000 26 27 27. 000000 27.500000027 27.50027 27 27. 27.5 027 27.500 27 28 234.567 234.57 234.6 235. 28 29 234567. 2.3457e+05 2.346e+05 2.35e+05 -
tests/io/.expect/manipulatorsOutput2.x86.txt
r857a1c6 rc8a0210 9 9 -0x1.b8p+4 -0x1.b8p+4 -0xd.cp+1 10 10 0.000000e+00 2.750000e+01 -2.750000e+01 11 0e2147483646 27.5e0 -27.5e0 11 12 0B11011 0X1B 2.75E-09 0X1.B8P+4 12 13 11011 33 1b 13 14 0. 0 27. 27 27.5 14 +27 -27 +27 -27+27.5 -27.515 +27 -27 +27. -27. +27.5 -27.5 15 16 34 34 34 16 4.000000 4.000000 4.00000017 4. 4. 4. 17 18 ab ab ab 18 19 34567 34567 34567 19 3456. 000000 3456.000000 3456.00000020 3456. 3456. 3456. 20 21 abcde abcde abcde 21 22 034 0034 0000000034 … … 24 25 27.500 27.5 28. 27.50000000 25 26 27.000 27.500 27.5 28. 27.50000000 26 27 27. 000000 27.500000027 27.50027 27 27. 27.5 027 27.500 27 28 234.567 234.57 234.6 235. 28 29 234567. 2.3457e+05 2.346e+05 2.35e+05 -
tests/io/manipulatorsOutput1.cfa
r857a1c6 rc8a0210 7 7 // Created On : Sat Jun 8 18:04:11 2019 8 8 // Last Modified By : Peter A. Buhr 9 // Last Modified On : Fri May 1 11:51:44 202010 // Update Count : 99 // Last Modified On : Sat Apr 10 08:42:15 2021 10 // Update Count : 18 11 11 // 12 12 … … 85 85 sout | "double"; 86 86 double d = 3.537; 87 printf( "%g %#8f %g %8f %#8.0f %8.0f %8.2f %-8.2f %-+#8.2f %08.2F %8.2E %8.2a %8.2A %8.2e\n", 88 0.0, 3.0, d, d, d, d, d, d, d, d, d, d, d, d ); 89 sout | 0.0 | wd(8, 3.0) | d | wd(8, d) | nodp(wd(8,0, d)) | wd(8,0, d) | wd(8,2, d) | nonl; 90 sout | left(wd(8,2, d)) | left(sign(wd(8,2, d))) | pad0(upcase(wd(8,2, d))) | upcase(wd(8,2, sci(d))) | wd(8,2, hex(d)) | upcase(wd(8,2, hex(d))) | wd(8,2, sci(d)); 87 printf( "%g %8g %#8g %g %8g %8.0g %#8.0g %8.2g %#8.2g %-8.2g %-8.2g %-#8.2g %-+8.2g %-+#8.2g %08.2g %8.2E %8.2a %#8.2A %#8.2e\n", 88 0.0, 3.0, 3.0, d, d, d, d, d, d, 3.0, d, d, d, d, d, d, d, d, d ); 89 sout | 0.0 | wd(8, 3.0) | nodp(wd(8, 3.0)) | d | wd(8, d) | ws(8,0, d) | nodp(ws(8,0, d)) | ws(8,2, d) | nodp(ws(8,2, d)) | nonl; 90 sout | left(ws(8,2, 3.0)) | left(ws(8,2, d)) | left(nodp(ws(8,2, d))) | left(sign(ws(8,2, d))) | left(sign(nodp(ws(8,2, d)))) | nonl; 91 sout | pad0(ws(8,2, d)) | upcase(wd(8,2, sci(d))) | wd(8,2, hex(d)) | upcase(wd(8,2, hex(d))) | nodp(wd(8,2, sci(d))); 91 92 92 93 sout | "long double"; 93 94 long double ld = 3.537; 94 printf( "%Lg % #8Lf %Lg %8Lf %#8.0Lf %8.0Lf %8.2Lf %-8.2Lf %-+#8.2Lf %08.2LF %8.2LE %8.2La %8.2LA %8.2Le\n",95 0.0L, 3.0L, ld, ld, ld, ld, ld, ld, ld, ld, ld, ld, ld,ld );96 sout | 0.0L | wd(8, 3.0L) | ld | wd(8, ld) | nodp(wd(8,0, ld)) | wd(8,0, ld) | wd(8,2, ld) | nonl;97 sout | left(w d(8,2, ld)) | left(sign(wd(8,2, ld))) | pad0(upcase(wd(8,2, ld))) | upcase(wd(8,2, sci(ld))) | wd(8,2, hex(ld)) | upcase(wd(8,2, hex(ld))) | wd(8,2, sci(ld));98 95 printf( "%Lg %8Lg %#8Lg %Lg %8Lg %8.0Lg %#8.0Lg %8.2Lg %#8.2Lg %-8.2Lg %-8.2Lg %-#8.2Lg %-+8.2Lg %-+#8.2Lg %08.2Lg %8.2LE %8.2La %#8.2LA %#8.2Le\n", 96 0.0L, 3.0L, 3.0L, ld, ld, ld, ld, ld, ld, 3.0L, ld, ld, ld, ld, ld, ld, ld, ld, ld ); 97 sout | 0.0L | wd(8, 3.0L) | nodp(wd(8, 3.0L)) | ld | wd(8, ld) | ws(8,0, ld) | nodp(ws(8,0, ld)) | ws(8,2, ld) | nodp(ws(8,2, ld)) | nonl; 98 sout | left(ws(8,2, 3.0L)) | left(ws(8,2, ld)) | left(nodp(ws(8,2, ld))) | left(sign(ws(8,2, ld))) | left(sign(nodp(ws(8,2, ld)))) | nonl; 99 sout | pad0(ws(8,2, ld)) | upcase(wd(8,2, sci(ld))) | wd(8,2, hex(ld)) | upcase(wd(8,2, hex(ld))) | nodp(wd(8,2, sci(ld))); 99 100 100 101 sout | nl | "char"; … … 117 118 // Local Variables: // 118 119 // tab-width: 4 // 119 // compile-command: "cfa -Wall -Wextra amanipulatorsOutput1.cfa" //120 // compile-command: "cfa -Wall -Wextra manipulatorsOutput1.cfa" // 120 121 // End: // -
tests/io/manipulatorsOutput2.cfa
r857a1c6 rc8a0210 7 7 // Created On : Sat Jun 8 18:04:11 2019 8 8 // Last Modified By : Peter A. Buhr 9 // Last Modified On : S un Nov 15 08:11:53 202010 // Update Count : 99 // Last Modified On : Sat Apr 10 09:16:09 2021 10 // Update Count : 11 11 11 // 12 12 … … 24 24 sout | hex(-27.5F) | hex(-27.5) | hex(-27.5L); 25 25 sout | sci(0.0) | sci(27.5) | sci(-27.5); 26 sout | eng(0.0) | eng(27.5) | eng(-27.5); 26 27 sout | upcase(bin(27)) | upcase(hex(27)) | upcase(27.5e-10) | upcase(hex(27.5)); 27 28 sout | nobase(bin(27)) | nobase(oct(27)) | nobase(hex(27)); -
tests/io/manipulatorsOutput3.cfa
r857a1c6 rc8a0210 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo 3 // 4 // manipulatorsOutput3.cfa -- 5 // 6 // Author : Peter A. Buhr 7 // Created On : Tue Apr 13 17:54:23 2021 8 // Last Modified By : Peter A. Buhr 9 // Last Modified On : Tue Apr 13 17:54:48 2021 10 // Update Count : 1 11 // 12 1 13 #include <fstream.hfa> 2 14 -
tests/linking/exception-nothreads.cfa
r857a1c6 rc8a0210 17 17 #include <exception.hfa> 18 18 19 TRIVIAL_EXCEPTION(ping); 19 EHM_EXCEPTION(ping)(); 20 EHM_VIRTUAL_TABLE(ping, ping_vt); 20 21 21 22 int main(void) { 22 23 try { 23 throwResume (ping){ };24 throwResume (ping){&ping_vt}; 24 25 } catchResume (ping *) { 25 26 printf("%s threads\n", threading_enabled() ? "with" : "no"); -
tests/linking/exception-withthreads.cfa
r857a1c6 rc8a0210 18 18 #include "../exceptions/with-threads.hfa" 19 19 20 TRIVIAL_EXCEPTION(ping); 20 EHM_EXCEPTION(ping)(); 21 EHM_VIRTUAL_TABLE(ping, ping_vt); 21 22 22 23 int main(void) { 23 24 try { 24 throwResume (ping){ };25 throwResume (ping){&ping_vt}; 25 26 } catchResume (ping *) { 26 27 printf("%s threads\n", threading_enabled() ? "with" : "no"); -
tests/math.cfa
r857a1c6 rc8a0210 10 10 // Created On : Fri Apr 22 14:59:21 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Feb 20 18:00:48 202113 // Update Count : 1 1512 // Last Modified On : Tue Apr 13 21:04:48 2021 13 // Update Count : 123 14 14 // 15 15 … … 69 69 sout | "log:" | log( 1.0F ) | log( 1.0D ) | log( 1.0L ) | nonl; 70 70 sout | log( 1.0F+1.0FI ) | log( 1.0D+1.0DI ) | log( 1.0DL+1.0LI ); 71 sout | "log2:" | log2( 1024 ) | log2( 2 \ 17u ) | log2( 2 \ 23u ); 72 sout | "log2:" | log2( 1024l ) | log2( 2l \ 17u ) | log2( 2l \ 23u ); 73 sout | "log2:" | log2( 1024ll ) | log2( 2ll \ 17u ) | log2( 2ll \ 23u ); 74 #if defined( __SIZEOF_INT128__ ) 75 sout | "log2:" | log2( 1024l128 ) | log2( 2l128 \ 17u ) | log2( 2l128 \ 23u ); 76 #endif // __SIZEOF_INT128__ 71 77 sout | "log2:" | log2( 8.0F ) | log2( 8.0D ) | log2( 8.0L ); 72 78 sout | "log10:" | log10( 100.0F ) | log10( 100.0D ) | log10( 100.0L ); -
tests/meta/.expect/archVast.nast.arm64.txt
r857a1c6 rc8a0210 7 7 char Alternatives are: 8 8 Cost ( 1, 0, 0, 0, 0, 0, 0 ): Explicit Cast of: 9 Variable Expression: FA64: signed int9 Variable Expression: FA64: double 10 10 ... with resolved type: 11 signed int11 double 12 12 ... to: 13 13 char … … 39 39 40 40 Cost ( 1, 0, 0, 0, 0, 0, 0 ): Explicit Cast of: 41 Variable Expression: FA64: double41 Variable Expression: FA64: signed int 42 42 ... with resolved type: 43 double43 signed int 44 44 ... to: 45 45 char -
tests/quasiKeyword.cfa
r857a1c6 rc8a0210 14 14 #include <exception.hfa> 15 15 16 TRIVIAL_EXCEPTION( E);16 EHM_EXCEPTION( E )(); 17 17 18 18 void catch( int i ) {} -
tests/vector_math/.expect/vec4_float.txt
r857a1c6 rc8a0210 6 6 zero-assign:<0.,0.,0.,0.> 7 7 fill-ctor:<1.23,1.23,1.23,1.23> 8 ?-?:<0.02,0.43,-0.999998,-1e-06 .>9 ?-=?:<0.02,0.43,-0.999998,-1e-06 .>10 -?:<-0.02,-0.43,0.999998,1e-06 .>8 ?-?:<0.02,0.43,-0.999998,-1e-06> 9 ?-=?:<0.02,0.43,-0.999998,-1e-06> 10 -?:<-0.02,-0.43,0.999998,1e-06> 11 11 ?+?:<2.3,2.45,-9.2,-12.5> 12 12 ?+=?:<2.3,2.45,-9.2,-12.5> -
tools/gdb/utils-gdb.py
r857a1c6 rc8a0210 23 23 gdb.execute('handle SIGUSR1 nostop noprint pass') 24 24 25 CfaTypes = collections.namedtuple('CfaTypes', 'cluster_ptr processor_ptr thread_ptr int_ptr thread_state ')25 CfaTypes = collections.namedtuple('CfaTypes', 'cluster_ptr processor_ptr thread_ptr int_ptr thread_state yield_state') 26 26 27 27 class ThreadInfo: … … 52 52 # GDB types for various structures/types in CFA 53 53 return CfaTypes(cluster_ptr = gdb.lookup_type('struct cluster').pointer(), 54 processor_ptr = gdb.lookup_type('struct processor').pointer(), 55 thread_ptr = gdb.lookup_type('struct $thread').pointer(), 56 int_ptr = gdb.lookup_type('int').pointer(), 57 thread_state = gdb.lookup_type('enum __Coroutine_State')) 54 processor_ptr = gdb.lookup_type('struct processor').pointer(), 55 thread_ptr = gdb.lookup_type('struct $thread').pointer(), 56 int_ptr = gdb.lookup_type('int').pointer(), 57 thread_state = gdb.lookup_type('enum __Coroutine_State'), 58 yield_state = gdb.lookup_type('enum __Preemption_Reason')) 58 59 59 60 def get_addr(addr): … … 371 372 def print_thread(self, thread, tid, marked): 372 373 cfa_t = get_cfa_types() 373 self.print_formatted(marked, tid, thread['self_cor']['name'].string(), str(thread['state'].cast(cfa_t.thread_state)), str(thread)) 374 ys = str(thread['preempted'].cast(cfa_t.yield_state)) 375 if ys == '_X15__NO_PREEMPTIONKM19__Preemption_Reason_1': 376 state = str(thread['state'].cast(cfa_t.thread_state)) 377 elif ys == '_X18__ALARM_PREEMPTIONKM19__Preemption_Reason_1': 378 state = 'preempted' 379 elif ys == '_X19__MANUAL_PREEMPTIONKM19__Preemption_Reason_1': 380 state = 'yield' 381 elif ys == '_X17__POLL_PREEMPTIONKM19__Preemption_Reason_1': 382 state = 'poll' 383 else: 384 print("error: thread {} in undefined preemption state {}".format(thread, ys)) 385 state = 'error' 386 self.print_formatted(marked, tid, thread['self_cor']['name'].string(), state, str(thread)) 374 387 375 388 def print_threads_by_cluster(self, cluster, print_system = False): … … 480 493 context = thread['context'] 481 494 495 496 497 # must be at frame 0 to set pc register 498 gdb.execute('select-frame 0') 499 if gdb.selected_frame().architecture().name() != 'i386:x86-64': 500 print('gdb debugging only supported for i386:x86-64 for now') 501 return 502 503 # gdb seems to handle things much better if we pretend we just entered the context switch 504 # pretend the pc is __cfactx_switch and adjust the sp, base pointer doesn't need to change 482 505 # lookup for sp,fp and uSwitch 483 xsp = context['SP'] + 4 8506 xsp = context['SP'] + 40 # 40 = 5 64bit registers : %r15, %r14, %r13, %r12, %rbx WARNING: x64 specific 484 507 xfp = context['FP'] 485 508 486 509 # convert string so we can strip out the address 487 510 try: 488 xpc = get_addr(gdb.parse_and_eval('__cfactx_switch').address + 28)511 xpc = get_addr(gdb.parse_and_eval('__cfactx_switch').address) 489 512 except: 490 513 print("here") 491 514 return 492 493 # must be at frame 0 to set pc register494 gdb.execute('select-frame 0')495 515 496 516 # push sp, fp, pc into a global stack … … 503 523 504 524 # update registers for new task 505 print('switching to ') 525 # print('switching to {} ({}) : [{}, {}, {}]'.format(thread['self_cor']['name'].string(), str(thread), str(xsp), str(xfp), str(xpc))) 526 print('switching to thread {} ({})'.format(str(thread), thread['self_cor']['name'].string())) 506 527 gdb.execute('set $rsp={}'.format(xsp)) 507 528 gdb.execute('set $rbp={}'.format(xfp)) … … 552 573 553 574 argv = parse(arg) 554 print(argv)555 575 if argv[0].isdigit(): 556 576 cname = " ".join(argv[1:]) if len(argv) > 1 else None
Note:
See TracChangeset
for help on using the changeset viewer.