Changeset 2fa5bd2
- Timestamp:
- Dec 12, 2019, 10:04:15 AM (3 years ago)
- Branches:
- arm-eh, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- e752e4e
- Parents:
- aca6a54c (diff), 2cd949b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 2 added
- 73 edited
Legend:
- Unmodified
- Added
- Removed
-
Jenkinsfile
raca6a54c r2fa5bd2 102 102 103 103 echo GitLogMessage() 104 105 // This is a complete hack but it solves problems with automake thinking it needs to regenerate makefiles 106 // We fudged automake/missing to handle that but automake stills bakes prints inside the makefiles 107 // and these cause more problems. 108 sh 'find . -name Makefile.in -exec touch {} +' 104 109 } 105 110 } -
automake/missing
-
Property
mode
changed from
100644
to120000
raca6a54c r2fa5bd2 1 #! /bin/sh 2 # Tdelisle : having the Makefiles.in automatically regenerated causes problems 3 # when using multiple versions of automake, even if only on end user machines 4 # therefore I am disabling that feature by commenting this script 5 exit 0 1 /usr/share/automake-1.15/missing -
Property
mode
changed from
-
benchmark/Makefile.in
raca6a54c r2fa5bd2 352 352 LTCFACOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ 353 353 $(LIBTOOLFLAGS) --mode=compile $(CFACC) $(DEFS) \ 354 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(CFAFLAGS) \ 355 $(AM_CFLAGS) $(CFLAGS) 354 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(AM_CFLAGS) $(CFAFLAGS) $(CFLAGS) 356 355 357 356 AM_V_CFA = $(am__v_CFA_@AM_V@) -
driver/cc1.cc
raca6a54c r2fa5bd2 335 335 #endif // __DEBUG_H__ 336 336 337 enum { 338 Color_Auto = 0, 339 Color_Always = 1, 340 Color_Never = 2, 341 } color_arg = Color_Auto; 342 343 const char * color_names[3] = { "--colors=auto", "--colors=always", "--colors=never" }; 344 337 345 // process all the arguments 338 346 … … 341 349 if ( prefix( arg, "-" ) ) { 342 350 // strip inappropriate flags 351 352 if ( prefix( arg, "-fdiagnostics-color=" ) ) { 353 string choice = arg.substr(20); 354 if(choice == "always") color_arg = Color_Always; 355 else if(choice == "never" ) color_arg = Color_Never; 356 else if(choice == "auto" ) color_arg = Color_Auto; 357 } else if ( arg == "-fno-diagnostics-color" ) { 358 color_arg = Color_Auto; 359 } 343 360 344 361 if ( arg == "-quiet" || arg == "-version" || arg == "-fpreprocessed" || … … 440 457 cargs[ncargs++] = cfa_cpp_out.c_str(); 441 458 } // if 459 460 cargs[ncargs++] = color_names[color_arg]; 461 442 462 cargs[ncargs] = nullptr; // terminate argument list 443 463 -
driver/cfa.cc
raca6a54c r2fa5bd2 401 401 args[nargs++] = "-Xlinker"; 402 402 args[nargs++] = "--undefined=__cfaabi_appready_startup"; 403 args[nargs++] = "-z"; 404 args[nargs++] = "execstack"; 403 405 404 406 // include the cfa library in case it is needed … … 409 411 args[nargs++] = "-Wl,--pop-state"; 410 412 args[nargs++] = "-lcfa"; 411 args[nargs++] = "- lpthread";413 args[nargs++] = "-pthread"; 412 414 args[nargs++] = "-ldl"; 413 415 args[nargs++] = "-lrt"; -
libcfa/configure
raca6a54c r2fa5bd2 3000 3000 case $CONFIGURATION in 3001 3001 "debug" ) 3002 CONFIG_CFLAGS="-O g-g"3002 CONFIG_CFLAGS="-O0 -g" 3003 3003 CONFIG_CFAFLAGS="-debug" 3004 3004 CONFIG_BUILDLIB="yes" -
libcfa/configure.ac
raca6a54c r2fa5bd2 68 68 case $CONFIGURATION in 69 69 "debug" ) 70 CONFIG_CFLAGS="-O g-g"70 CONFIG_CFLAGS="-O0 -g" 71 71 CONFIG_CFAFLAGS="-debug" 72 72 CONFIG_BUILDLIB="yes" -
libcfa/prelude/builtins.c
raca6a54c r2fa5bd2 10 10 // Created On : Fri Jul 21 16:21:03 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 25 18:06:52201913 // Update Count : 9712 // Last Modified On : Thu Nov 21 16:31:39 2019 13 // Update Count : 101 14 14 // 15 15 … … 69 69 70 70 // universal typed pointer constant 71 // Compiler issue: there is a problem with anonymous types that do not have a size. 72 static inline forall( dtype DT | sized(DT) ) DT * intptr( uintptr_t addr ) { return (DT *)addr; } 71 static inline forall( dtype DT ) DT * intptr( uintptr_t addr ) { return (DT *)addr; } 73 72 74 73 // exponentiation operator implementation -
libcfa/prelude/sync-builtins.cf
raca6a54c r2fa5bd2 1 1 char __sync_fetch_and_add(volatile char *, char,...); 2 char __sync_fetch_and_add_1(volatile char *, char,...);3 2 signed char __sync_fetch_and_add(volatile signed char *, signed char,...); 4 signed char __sync_fetch_and_add_1(volatile signed char *, signed char,...);5 3 unsigned char __sync_fetch_and_add(volatile unsigned char *, unsigned char,...); 6 unsigned char __sync_fetch_and_add_1(volatile unsigned char *, unsigned char,...);7 4 signed short __sync_fetch_and_add(volatile signed short *, signed short,...); 8 signed short __sync_fetch_and_add_2(volatile signed short *, signed short,...);9 5 unsigned short __sync_fetch_and_add(volatile unsigned short *, unsigned short,...); 10 unsigned short __sync_fetch_and_add_2(volatile unsigned short *, unsigned short,...);11 6 signed int __sync_fetch_and_add(volatile signed int *, signed int,...); 12 signed int __sync_fetch_and_add_4(volatile signed int *, signed int,...);13 7 unsigned int __sync_fetch_and_add(volatile unsigned int *, unsigned int,...); 14 unsigned int __sync_fetch_and_add_4(volatile unsigned int *, unsigned int,...); 8 signed long int __sync_fetch_and_add(volatile signed long int *, signed long int,...); 9 unsigned long int __sync_fetch_and_add(volatile unsigned long int *, unsigned long int,...); 15 10 signed long long int __sync_fetch_and_add(volatile signed long long int *, signed long long int,...); 16 signed long long int __sync_fetch_and_add_8(volatile signed long long int *, signed long long int,...);17 11 unsigned long long int __sync_fetch_and_add(volatile unsigned long long int *, unsigned long long int,...); 18 unsigned long long int __sync_fetch_and_add_8(volatile unsigned long long int *, unsigned long long int,...);19 12 #if defined(__SIZEOF_INT128__) 20 13 signed __int128 __sync_fetch_and_add(volatile signed __int128 *, signed __int128,...); 21 signed __int128 __sync_fetch_and_add_16(volatile signed __int128 *, signed __int128,...);22 14 unsigned __int128 __sync_fetch_and_add(volatile unsigned __int128 *, unsigned __int128,...); 23 unsigned __int128 __sync_fetch_and_add_16(volatile unsigned __int128 *, unsigned __int128,...);24 15 #endif 25 16 26 17 char __sync_fetch_and_sub(volatile char *, char,...); 27 char __sync_fetch_and_sub_1(volatile char *, char,...);28 18 signed char __sync_fetch_and_sub(volatile signed char *, signed char,...); 29 signed char __sync_fetch_and_sub_1(volatile signed char *, signed char,...);30 19 unsigned char __sync_fetch_and_sub(volatile unsigned char *, unsigned char,...); 31 unsigned char __sync_fetch_and_sub_1(volatile unsigned char *, unsigned char,...);32 20 signed short __sync_fetch_and_sub(volatile signed short *, signed short,...); 33 signed short __sync_fetch_and_sub_2(volatile signed short *, signed short,...);34 21 unsigned short __sync_fetch_and_sub(volatile unsigned short *, unsigned short,...); 35 unsigned short __sync_fetch_and_sub_2(volatile unsigned short *, unsigned short,...);36 22 signed int __sync_fetch_and_sub(volatile signed int *, signed int,...); 37 signed int __sync_fetch_and_sub_4(volatile signed int *, signed int,...);38 23 unsigned int __sync_fetch_and_sub(volatile unsigned int *, unsigned int,...); 39 unsigned int __sync_fetch_and_sub_4(volatile unsigned int *, unsigned int,...); 24 signed long int __sync_fetch_and_sub(volatile signed long int *, signed long int,...); 25 unsigned long int __sync_fetch_and_sub(volatile unsigned long int *, unsigned long int,...); 40 26 signed long long int __sync_fetch_and_sub(volatile signed long long int *, signed long long int,...); 41 signed long long int __sync_fetch_and_sub_8(volatile signed long long int *, signed long long int,...);42 27 unsigned long long int __sync_fetch_and_sub(volatile unsigned long long int *, unsigned long long int,...); 43 unsigned long long int __sync_fetch_and_sub_8(volatile unsigned long long int *, unsigned long long int,...);44 28 #if defined(__SIZEOF_INT128__) 45 29 signed __int128 __sync_fetch_and_sub(volatile signed __int128 *, signed __int128,...); 46 signed __int128 __sync_fetch_and_sub_16(volatile signed __int128 *, signed __int128,...);47 30 unsigned __int128 __sync_fetch_and_sub(volatile unsigned __int128 *, unsigned __int128,...); 48 unsigned __int128 __sync_fetch_and_sub_16(volatile unsigned __int128 *, unsigned __int128,...);49 31 #endif 50 32 51 33 char __sync_fetch_and_or(volatile char *, char,...); 52 char __sync_fetch_and_or_1(volatile char *, char,...);53 34 signed char __sync_fetch_and_or(volatile signed char *, signed char,...); 54 signed char __sync_fetch_and_or_1(volatile signed char *, signed char,...);55 35 unsigned char __sync_fetch_and_or(volatile unsigned char *, unsigned char,...); 56 unsigned char __sync_fetch_and_or_1(volatile unsigned char *, unsigned char,...);57 36 signed short __sync_fetch_and_or(volatile signed short *, signed short,...); 58 signed short __sync_fetch_and_or_2(volatile signed short *, signed short,...);59 37 unsigned short __sync_fetch_and_or(volatile unsigned short *, unsigned short,...); 60 unsigned short __sync_fetch_and_or_2(volatile unsigned short *, unsigned short,...);61 38 signed int __sync_fetch_and_or(volatile signed int *, signed int,...); 62 signed int __sync_fetch_and_or_4(volatile signed int *, signed int,...);63 39 unsigned int __sync_fetch_and_or(volatile unsigned int *, unsigned int,...); 64 unsigned int __sync_fetch_and_or_4(volatile unsigned int *, unsigned int,...); 40 signed long int __sync_fetch_and_or(volatile signed long int *, signed long int,...); 41 unsigned long int __sync_fetch_and_or(volatile unsigned long int *, unsigned long int,...); 65 42 signed long long int __sync_fetch_and_or(volatile signed long long int *, signed long long int,...); 66 signed long long int __sync_fetch_and_or_8(volatile signed long long int *, signed long long int,...);67 43 unsigned long long int __sync_fetch_and_or(volatile unsigned long long int *, unsigned long long int,...); 68 unsigned long long int __sync_fetch_and_or_8(volatile unsigned long long int *, unsigned long long int,...);69 44 #if defined(__SIZEOF_INT128__) 70 45 signed __int128 __sync_fetch_and_or(volatile signed __int128 *, signed __int128,...); 71 signed __int128 __sync_fetch_and_or_16(volatile signed __int128 *, signed __int128,...);72 46 unsigned __int128 __sync_fetch_and_or(volatile unsigned __int128 *, unsigned __int128,...); 73 unsigned __int128 __sync_fetch_and_or_16(volatile unsigned __int128 *, unsigned __int128,...);74 47 #endif 75 48 76 49 char __sync_fetch_and_and(volatile char *, char,...); 77 char __sync_fetch_and_and_1(volatile char *, char,...);78 50 signed char __sync_fetch_and_and(volatile signed char *, signed char,...); 79 signed char __sync_fetch_and_and_1(volatile signed char *, signed char,...);80 51 unsigned char __sync_fetch_and_and(volatile unsigned char *, unsigned char,...); 81 unsigned char __sync_fetch_and_and_1(volatile unsigned char *, unsigned char,...);82 52 signed short __sync_fetch_and_and(volatile signed short *, signed short,...); 83 signed short __sync_fetch_and_and_2(volatile signed short *, signed short,...);84 53 unsigned short __sync_fetch_and_and(volatile unsigned short *, unsigned short,...); 85 unsigned short __sync_fetch_and_and_2(volatile unsigned short *, unsigned short,...);86 54 signed int __sync_fetch_and_and(volatile signed int *, signed int,...); 87 signed int __sync_fetch_and_and_4(volatile signed int *, signed int,...);88 55 unsigned int __sync_fetch_and_and(volatile unsigned int *, unsigned int,...); 89 unsigned int __sync_fetch_and_and_4(volatile unsigned int *, unsigned int,...); 56 signed long int __sync_fetch_and_and(volatile signed long int *, signed long int,...); 57 unsigned long int __sync_fetch_and_and(volatile unsigned long int *, unsigned long int,...); 90 58 signed long long int __sync_fetch_and_and(volatile signed long long int *, signed long long int,...); 91 signed long long int __sync_fetch_and_and_8(volatile signed long long int *, signed long long int,...);92 59 unsigned long long int __sync_fetch_and_and(volatile unsigned long long int *, unsigned long long int,...); 93 unsigned long long int __sync_fetch_and_and_8(volatile unsigned long long int *, unsigned long long int,...);94 60 #if defined(__SIZEOF_INT128__) 95 61 signed __int128 __sync_fetch_and_and(volatile signed __int128 *, signed __int128,...); 96 signed __int128 __sync_fetch_and_and_16(volatile signed __int128 *, signed __int128,...);97 62 unsigned __int128 __sync_fetch_and_and(volatile unsigned __int128 *, unsigned __int128,...); 98 unsigned __int128 __sync_fetch_and_and_16(volatile unsigned __int128 *, unsigned __int128,...);99 63 #endif 100 64 101 65 char __sync_fetch_and_xor(volatile char *, char,...); 102 char __sync_fetch_and_xor_1(volatile char *, char,...);103 66 signed char __sync_fetch_and_xor(volatile signed char *, signed char,...); 104 signed char __sync_fetch_and_xor_1(volatile signed char *, signed char,...);105 67 unsigned char __sync_fetch_and_xor(volatile unsigned char *, unsigned char,...); 106 unsigned char __sync_fetch_and_xor_1(volatile unsigned char *, unsigned char,...);107 68 signed short __sync_fetch_and_xor(volatile signed short *, signed short,...); 108 signed short __sync_fetch_and_xor_2(volatile signed short *, signed short,...);109 69 unsigned short __sync_fetch_and_xor(volatile unsigned short *, unsigned short,...); 110 unsigned short __sync_fetch_and_xor_2(volatile unsigned short *, unsigned short,...);111 70 signed int __sync_fetch_and_xor(volatile signed int *, signed int,...); 112 signed int __sync_fetch_and_xor_4(volatile signed int *, signed int,...);113 71 unsigned int __sync_fetch_and_xor(volatile unsigned int *, unsigned int,...); 114 unsigned int __sync_fetch_and_xor_4(volatile unsigned int *, unsigned int,...); 72 signed long int __sync_fetch_and_xor(volatile signed long int *, signed long int,...); 73 unsigned long int __sync_fetch_and_xor(volatile unsigned long int *, unsigned long int,...); 115 74 signed long long int __sync_fetch_and_xor(volatile signed long long int *, signed long long int,...); 116 signed long long int __sync_fetch_and_xor_8(volatile signed long long int *, signed long long int,...);117 75 unsigned long long int __sync_fetch_and_xor(volatile unsigned long long int *, unsigned long long int,...); 118 unsigned long long int __sync_fetch_and_xor_8(volatile unsigned long long int *, unsigned long long int,...);119 76 #if defined(__SIZEOF_INT128__) 120 77 signed __int128 __sync_fetch_and_xor(volatile signed __int128 *, signed __int128,...); 121 signed __int128 __sync_fetch_and_xor_16(volatile signed __int128 *, signed __int128,...);122 78 unsigned __int128 __sync_fetch_and_xor(volatile unsigned __int128 *, unsigned __int128,...); 123 unsigned __int128 __sync_fetch_and_xor_16(volatile unsigned __int128 *, unsigned __int128,...);124 79 #endif 125 80 126 81 char __sync_fetch_and_nand(volatile char *, char,...); 127 char __sync_fetch_and_nand_1(volatile char *, char,...);128 82 signed char __sync_fetch_and_nand(volatile signed char *, signed char,...); 129 signed char __sync_fetch_and_nand_1(volatile signed char *, signed char,...);130 83 unsigned char __sync_fetch_and_nand(volatile unsigned char *, unsigned char,...); 131 unsigned char __sync_fetch_and_nand_1(volatile unsigned char *, unsigned char,...);132 84 signed short __sync_fetch_and_nand(volatile signed short *, signed short,...); 133 signed short __sync_fetch_and_nand_2(volatile signed short *, signed short,...);134 85 unsigned short __sync_fetch_and_nand(volatile unsigned short *, unsigned short,...); 135 unsigned short __sync_fetch_and_nand_2(volatile unsigned short *, unsigned short,...);136 86 signed int __sync_fetch_and_nand(volatile signed int *, signed int,...); 137 signed int __sync_fetch_and_nand_4(volatile signed int *, signed int,...);138 87 unsigned int __sync_fetch_and_nand(volatile unsigned int *, unsigned int,...); 139 unsigned int __sync_fetch_and_nand_4(volatile unsigned int *, unsigned int,...); 88 signed long int __sync_fetch_and_nand(volatile signed long int *, signed long int,...); 89 unsigned long int __sync_fetch_and_nand(volatile unsigned long int *, unsigned long int,...); 140 90 signed long long int __sync_fetch_and_nand(volatile signed long long int *, signed long long int,...); 141 signed long long int __sync_fetch_and_nand_8(volatile signed long long int *, signed long long int,...);142 91 unsigned long long int __sync_fetch_and_nand(volatile unsigned long long int *, unsigned long long int,...); 143 unsigned long long int __sync_fetch_and_nand_8(volatile unsigned long long int *, unsigned long long int,...);144 92 #if defined(__SIZEOF_INT128__) 145 93 signed __int128 __sync_fetch_and_nand(volatile signed __int128 *, signed __int128,...); 146 signed __int128 __sync_fetch_and_nand_16(volatile signed __int128 *, signed __int128,...);147 94 unsigned __int128 __sync_fetch_and_nand(volatile unsigned __int128 *, unsigned __int128,...); 148 unsigned __int128 __sync_fetch_and_nand_16(volatile unsigned __int128 *, unsigned __int128,...);149 95 #endif 150 96 151 97 char __sync_add_and_fetch(volatile char *, char,...); 152 char __sync_add_and_fetch_1(volatile char *, char,...);153 98 signed char __sync_add_and_fetch(volatile signed char *, signed char,...); 154 signed char __sync_add_and_fetch_1(volatile signed char *, signed char,...);155 99 unsigned char __sync_add_and_fetch(volatile unsigned char *, unsigned char,...); 156 unsigned char __sync_add_and_fetch_1(volatile unsigned char *, unsigned char,...);157 100 signed short __sync_add_and_fetch(volatile signed short *, signed short,...); 158 signed short __sync_add_and_fetch_2(volatile signed short *, signed short,...);159 101 unsigned short __sync_add_and_fetch(volatile unsigned short *, unsigned short,...); 160 unsigned short __sync_add_and_fetch_2(volatile unsigned short *, unsigned short,...);161 102 signed int __sync_add_and_fetch(volatile signed int *, signed int,...); 162 signed int __sync_add_and_fetch_4(volatile signed int *, signed int,...);163 103 signed int __sync_add_and_fetch(volatile signed int *, signed int,...); 164 signed int __sync_add_and_fetch_4(volatile signed int *, signed int,...); 104 signed long int __sync_add_and_fetch(volatile signed long int *, signed long int,...); 105 unsigned long int __sync_add_and_fetch(volatile unsigned long int *, unsigned long int,...); 165 106 signed long long int __sync_add_and_fetch(volatile signed long long int *, signed long long int,...); 166 signed long long int __sync_add_and_fetch_8(volatile signed long long int *, signed long long int,...);167 107 unsigned long long int __sync_add_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 168 unsigned long long int __sync_add_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);169 108 #if defined(__SIZEOF_INT128__) 170 109 signed __int128 __sync_add_and_fetch(volatile signed __int128 *, signed __int128,...); 171 signed __int128 __sync_add_and_fetch_16(volatile signed __int128 *, signed __int128,...);172 110 unsigned __int128 __sync_add_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 173 unsigned __int128 __sync_add_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);174 111 #endif 175 112 176 113 char __sync_sub_and_fetch(volatile char *, char,...); 177 char __sync_sub_and_fetch_1(volatile char *, char,...);178 114 signed char __sync_sub_and_fetch(volatile signed char *, signed char,...); 179 signed char __sync_sub_and_fetch_1(volatile signed char *, signed char,...);180 115 unsigned char __sync_sub_and_fetch(volatile unsigned char *, unsigned char,...); 181 unsigned char __sync_sub_and_fetch_1(volatile unsigned char *, unsigned char,...);182 116 signed short __sync_sub_and_fetch(volatile signed short *, signed short,...); 183 signed short __sync_sub_and_fetch_2(volatile signed short *, signed short,...);184 117 unsigned short __sync_sub_and_fetch(volatile unsigned short *, unsigned short,...); 185 unsigned short __sync_sub_and_fetch_2(volatile unsigned short *, unsigned short,...);186 118 signed int __sync_sub_and_fetch(volatile signed int *, signed int,...); 187 signed int __sync_sub_and_fetch_4(volatile signed int *, signed int,...);188 119 unsigned int __sync_sub_and_fetch(volatile unsigned int *, unsigned int,...); 189 unsigned int __sync_sub_and_fetch_4(volatile unsigned int *, unsigned int,...); 120 signed long int __sync_sub_and_fetch(volatile signed long int *, signed long int,...); 121 unsigned long int __sync_sub_and_fetch(volatile unsigned long int *, unsigned long int,...); 190 122 signed long long int __sync_sub_and_fetch(volatile signed long long int *, signed long long int,...); 191 signed long long int __sync_sub_and_fetch_8(volatile signed long long int *, signed long long int,...);192 123 unsigned long long int __sync_sub_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 193 unsigned long long int __sync_sub_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);194 124 #if defined(__SIZEOF_INT128__) 195 125 signed __int128 __sync_sub_and_fetch(volatile signed __int128 *, signed __int128,...); 196 signed __int128 __sync_sub_and_fetch_16(volatile signed __int128 *, signed __int128,...);197 126 unsigned __int128 __sync_sub_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 198 unsigned __int128 __sync_sub_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);199 127 #endif 200 128 201 129 char __sync_or_and_fetch(volatile char *, char,...); 202 char __sync_or_and_fetch_1(volatile char *, char,...);203 130 signed char __sync_or_and_fetch(volatile signed char *, signed char,...); 204 signed char __sync_or_and_fetch_1(volatile signed char *, signed char,...);205 131 unsigned char __sync_or_and_fetch(volatile unsigned char *, unsigned char,...); 206 unsigned char __sync_or_and_fetch_1(volatile unsigned char *, unsigned char,...);207 132 signed short __sync_or_and_fetch(volatile signed short *, signed short,...); 208 signed short __sync_or_and_fetch_2(volatile signed short *, signed short,...);209 133 unsigned short __sync_or_and_fetch(volatile unsigned short *, unsigned short,...); 210 unsigned short __sync_or_and_fetch_2(volatile unsigned short *, unsigned short,...);211 134 signed int __sync_or_and_fetch(volatile signed int *, signed int,...); 212 signed int __sync_or_and_fetch_4(volatile signed int *, signed int,...);213 135 unsigned int __sync_or_and_fetch(volatile unsigned int *, unsigned int,...); 214 unsigned int __sync_or_and_fetch_4(volatile unsigned int *, unsigned int,...); 136 signed long int __sync_or_and_fetch(volatile signed long int *, signed long int,...); 137 unsigned long int __sync_or_and_fetch(volatile unsigned long int *, unsigned long int,...); 215 138 signed long long int __sync_or_and_fetch(volatile signed long long int *, signed long long int,...); 216 signed long long int __sync_or_and_fetch_8(volatile signed long long int *, signed long long int,...);217 139 unsigned long long int __sync_or_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 218 unsigned long long int __sync_or_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);219 140 #if defined(__SIZEOF_INT128__) 220 141 signed __int128 __sync_or_and_fetch(volatile signed __int128 *, signed __int128,...); 221 signed __int128 __sync_or_and_fetch_16(volatile signed __int128 *, signed __int128,...);222 142 unsigned __int128 __sync_or_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 223 unsigned __int128 __sync_or_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);224 143 #endif 225 144 226 145 char __sync_and_and_fetch(volatile char *, char,...); 227 char __sync_and_and_fetch_1(volatile char *, char,...);228 146 signed char __sync_and_and_fetch(volatile signed char *, signed char,...); 229 signed char __sync_and_and_fetch_1(volatile signed char *, signed char,...);230 147 unsigned char __sync_and_and_fetch(volatile unsigned char *, unsigned char,...); 231 unsigned char __sync_and_and_fetch_1(volatile unsigned char *, unsigned char,...);232 148 signed short __sync_and_and_fetch(volatile signed short *, signed short,...); 233 signed short __sync_and_and_fetch_2(volatile signed short *, signed short,...);234 149 unsigned short __sync_and_and_fetch(volatile unsigned short *, unsigned short,...); 235 unsigned short __sync_and_and_fetch_2(volatile unsigned short *, unsigned short,...);236 150 signed int __sync_and_and_fetch(volatile signed int *, signed int,...); 237 signed int __sync_and_and_fetch_4(volatile signed int *, signed int,...);238 151 unsigned int __sync_and_and_fetch(volatile unsigned int *, unsigned int,...); 239 unsigned int __sync_and_and_fetch_4(volatile unsigned int *, unsigned int,...); 152 signed long int __sync_and_and_fetch(volatile signed long int *, signed long int,...); 153 unsigned long int __sync_and_and_fetch(volatile unsigned long int *, unsigned long int,...); 240 154 signed long long int __sync_and_and_fetch(volatile signed long long int *, signed long long int,...); 241 signed long long int __sync_and_and_fetch_8(volatile signed long long int *, signed long long int,...);242 155 unsigned long long int __sync_and_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 243 unsigned long long int __sync_and_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);244 156 #if defined(__SIZEOF_INT128__) 245 157 signed __int128 __sync_and_and_fetch(volatile signed __int128 *, signed __int128,...); 246 signed __int128 __sync_and_and_fetch_16(volatile signed __int128 *, signed __int128,...);247 158 unsigned __int128 __sync_and_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 248 unsigned __int128 __sync_and_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);249 159 #endif 250 160 251 161 char __sync_xor_and_fetch(volatile char *, char,...); 252 char __sync_xor_and_fetch_1(volatile char *, char,...);253 162 signed char __sync_xor_and_fetch(volatile signed char *, signed char,...); 254 signed char __sync_xor_and_fetch_1(volatile signed char *, signed char,...);255 163 unsigned char __sync_xor_and_fetch(volatile unsigned char *, unsigned char,...); 256 unsigned char __sync_xor_and_fetch_1(volatile unsigned char *, unsigned char,...);257 164 signed short __sync_xor_and_fetch(volatile signed short *, signed short,...); 258 signed short __sync_xor_and_fetch_2(volatile signed short *, signed short,...);259 165 unsigned short __sync_xor_and_fetch(volatile unsigned short *, unsigned short,...); 260 unsigned short __sync_xor_and_fetch_2(volatile unsigned short *, unsigned short,...);261 166 signed int __sync_xor_and_fetch(volatile signed int *, signed int,...); 262 signed int __sync_xor_and_fetch_4(volatile signed int *, signed int,...);263 167 unsigned int __sync_xor_and_fetch(volatile unsigned int *, unsigned int,...); 264 unsigned int __sync_xor_and_fetch_4(volatile unsigned int *, unsigned int,...); 168 signed long int __sync_xor_and_fetch(volatile signed long int *, signed long int,...); 169 unsigned long int __sync_xor_and_fetch(volatile unsigned long int *, unsigned long int,...); 265 170 signed long long int __sync_xor_and_fetch(volatile signed long long int *, signed long long int,...); 266 signed long long int __sync_xor_and_fetch_8(volatile signed long long int *, signed long long int,...);267 171 unsigned long long int __sync_xor_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 268 unsigned long long int __sync_xor_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);269 172 #if defined(__SIZEOF_INT128__) 270 173 signed __int128 __sync_xor_and_fetch(volatile signed __int128 *, signed __int128,...); 271 signed __int128 __sync_xor_and_fetch_16(volatile signed __int128 *, signed __int128,...);272 174 unsigned __int128 __sync_xor_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 273 unsigned __int128 __sync_xor_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);274 175 #endif 275 176 276 177 char __sync_nand_and_fetch(volatile char *, char,...); 277 char __sync_nand_and_fetch_1(volatile char *, char,...);278 178 signed char __sync_nand_and_fetch(volatile signed char *, signed char,...); 279 signed char __sync_nand_and_fetch_1(volatile signed char *, signed char,...);280 179 unsigned char __sync_nand_and_fetch(volatile unsigned char *, unsigned char,...); 281 unsigned char __sync_nand_and_fetch_1(volatile unsigned char *, unsigned char,...);282 180 signed short __sync_nand_and_fetch(volatile signed short *, signed short,...); 283 signed short __sync_nand_and_fetch_2(volatile signed short *, signed short,...);284 181 unsigned short __sync_nand_and_fetch(volatile unsigned short *, unsigned short,...); 285 unsigned short __sync_nand_and_fetch_2(volatile unsigned short *, unsigned short,...);286 182 signed int __sync_nand_and_fetch(volatile signed int *, signed int,...); 287 signed int __sync_nand_and_fetch_4(volatile signed int *, signed int,...);288 183 unsigned int __sync_nand_and_fetch(volatile unsigned int *, unsigned int,...); 289 unsigned int __sync_nand_and_fetch_4(volatile unsigned int *, unsigned int,...); 184 signed long int __sync_nand_and_fetch(volatile signed long int *, signed long int,...); 185 unsigned long int __sync_nand_and_fetch(volatile unsigned long int *, unsigned long int,...); 290 186 signed long long int __sync_nand_and_fetch(volatile signed long long int *, signed long long int,...); 291 signed long long int __sync_nand_and_fetch_8(volatile signed long long int *, signed long long int,...);292 187 unsigned long long int __sync_nand_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 293 unsigned long long int __sync_nand_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);294 188 #if defined(__SIZEOF_INT128__) 295 189 signed __int128 __sync_nand_and_fetch(volatile signed __int128 *, signed __int128,...); 296 signed __int128 __sync_nand_and_fetch_16(volatile signed __int128 *, signed __int128,...);297 190 unsigned __int128 __sync_nand_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 298 unsigned __int128 __sync_nand_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);299 191 #endif 300 192 301 193 _Bool __sync_bool_compare_and_swap(volatile char *, char, char,...); 302 _Bool __sync_bool_compare_and_swap_1(volatile char *, char, char,...);303 194 _Bool __sync_bool_compare_and_swap(volatile signed char *, signed char, signed char,...); 304 _Bool __sync_bool_compare_and_swap_1(volatile signed char *, signed char, signed char,...);305 195 _Bool __sync_bool_compare_and_swap(volatile unsigned char *, unsigned char, unsigned char,...); 306 _Bool __sync_bool_compare_and_swap_1(volatile unsigned char *, unsigned char, unsigned char,...);307 196 _Bool __sync_bool_compare_and_swap(volatile short *, signed short, signed short,...); 308 _Bool __sync_bool_compare_and_swap_2(volatile short *, signed short, signed short,...);309 197 _Bool __sync_bool_compare_and_swap(volatile short *, unsigned short, unsigned short,...); 310 _Bool __sync_bool_compare_and_swap_2(volatile short *, unsigned short, unsigned short,...);311 198 _Bool __sync_bool_compare_and_swap(volatile signed int *, signed int, signed int,...); 312 _Bool __sync_bool_compare_and_swap_4(volatile signed int *, signed int, signed int,...);313 199 _Bool __sync_bool_compare_and_swap(volatile unsigned int *, unsigned int, unsigned int,...); 314 _Bool __sync_bool_compare_and_swap_4(volatile unsigned int *, unsigned int, unsigned int,...); 200 _Bool __sync_bool_compare_and_swap(volatile signed long int *, signed long int, signed long int,...); 201 _Bool __sync_bool_compare_and_swap(volatile unsigned long int *, unsigned long int, unsigned long int,...); 315 202 _Bool __sync_bool_compare_and_swap(volatile signed long long int *, signed long long int, signed long long int,...); 316 _Bool __sync_bool_compare_and_swap_8(volatile signed long long int *, signed long long int, signed long long int,...);317 203 _Bool __sync_bool_compare_and_swap(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...); 318 _Bool __sync_bool_compare_and_swap_8(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...);319 204 #if defined(__SIZEOF_INT128__) 320 205 _Bool __sync_bool_compare_and_swap(volatile signed __int128 *, signed __int128, signed __int128,...); 321 _Bool __sync_bool_compare_and_swap_16(volatile signed __int128 *, signed __int128, signed __int128,...);322 206 _Bool __sync_bool_compare_and_swap(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...); 323 _Bool __sync_bool_compare_and_swap_16(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...);324 207 #endif 325 208 forall(dtype T) _Bool __sync_bool_compare_and_swap(T * volatile *, T *, T*, ...); 326 209 327 210 char __sync_val_compare_and_swap(volatile char *, char, char,...); 328 char __sync_val_compare_and_swap_1(volatile char *, char, char,...);329 211 signed char __sync_val_compare_and_swap(volatile signed char *, signed char, signed char,...); 330 signed char __sync_val_compare_and_swap_1(volatile signed char *, signed char, signed char,...);331 212 unsigned char __sync_val_compare_and_swap(volatile unsigned char *, unsigned char, unsigned char,...); 332 unsigned char __sync_val_compare_and_swap_1(volatile unsigned char *, unsigned char, unsigned char,...);333 213 signed short __sync_val_compare_and_swap(volatile signed short *, signed short, signed short,...); 334 signed short __sync_val_compare_and_swap_2(volatile signed short *, signed short, signed short,...);335 214 unsigned short __sync_val_compare_and_swap(volatile unsigned short *, unsigned short, unsigned short,...); 336 unsigned short __sync_val_compare_and_swap_2(volatile unsigned short *, unsigned short, unsigned short,...);337 215 signed int __sync_val_compare_and_swap(volatile signed int *, signed int, signed int,...); 338 signed int __sync_val_compare_and_swap_4(volatile signed int *, signed int, signed int,...);339 216 unsigned int __sync_val_compare_and_swap(volatile unsigned int *, unsigned int, unsigned int,...); 340 unsigned int __sync_val_compare_and_swap_4(volatile unsigned int *, unsigned int, unsigned int,...); 217 signed long int __sync_val_compare_and_swap(volatile signed long int *, signed long int, signed long int,...); 218 unsigned long int __sync_val_compare_and_swap(volatile unsigned long int *, unsigned long int, unsigned long int,...); 341 219 signed long long int __sync_val_compare_and_swap(volatile signed long long int *, signed long long int, signed long long int,...); 342 signed long long int __sync_val_compare_and_swap_8(volatile signed long long int *, signed long long int, signed long long int,...);343 220 unsigned long long int __sync_val_compare_and_swap(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...); 344 unsigned long long int __sync_val_compare_and_swap_8(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...);345 221 #if defined(__SIZEOF_INT128__) 346 222 signed __int128 __sync_val_compare_and_swap(volatile signed __int128 *, signed __int128, signed __int128,...); 347 signed __int128 __sync_val_compare_and_swap_16(volatile signed __int128 *, signed __int128, signed __int128,...);348 223 unsigned __int128 __sync_val_compare_and_swap(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...); 349 unsigned __int128 __sync_val_compare_and_swap_16(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...);350 224 #endif 351 225 forall(dtype T) T * __sync_val_compare_and_swap(T * volatile *, T *, T*,...); 352 226 353 227 char __sync_lock_test_and_set(volatile char *, char,...); 354 char __sync_lock_test_and_set_1(volatile char *, char,...);355 228 signed char __sync_lock_test_and_set(volatile signed char *, signed char,...); 356 signed char __sync_lock_test_and_set_1(volatile signed char *, signed char,...);357 229 unsigned char __sync_lock_test_and_set(volatile unsigned char *, unsigned char,...); 358 unsigned char __sync_lock_test_and_set_1(volatile unsigned char *, unsigned char,...);359 230 signed short __sync_lock_test_and_set(volatile signed short *, signed short,...); 360 signed short __sync_lock_test_and_set_2(volatile signed short *, signed short,...);361 231 unsigned short __sync_lock_test_and_set(volatile unsigned short *, unsigned short,...); 362 unsigned short __sync_lock_test_and_set_2(volatile unsigned short *, unsigned short,...);363 232 signed int __sync_lock_test_and_set(volatile signed int *, signed int,...); 364 signed int __sync_lock_test_and_set_4(volatile signed int *, signed int,...);365 233 unsigned int __sync_lock_test_and_set(volatile unsigned int *, unsigned int,...); 366 unsigned int __sync_lock_test_and_set_4(volatile unsigned int *, unsigned int,...); 234 signed long int __sync_lock_test_and_set(volatile signed long int *, signed long int,...); 235 unsigned long int __sync_lock_test_and_set(volatile unsigned long int *, unsigned long int,...); 367 236 signed long long int __sync_lock_test_and_set(volatile signed long long int *, signed long long int,...); 368 signed long long int __sync_lock_test_and_set_8(volatile signed long long int *, signed long long int,...);369 237 unsigned long long int __sync_lock_test_and_set(volatile unsigned long long int *, unsigned long long int,...); 370 unsigned long long int __sync_lock_test_and_set_8(volatile unsigned long long int *, unsigned long long int,...);371 238 #if defined(__SIZEOF_INT128__) 372 239 signed __int128 __sync_lock_test_and_set(volatile signed __int128 *, signed __int128,...); 373 signed __int128 __sync_lock_test_and_set_16(volatile signed __int128 *, signed __int128,...);374 240 unsigned __int128 __sync_lock_test_and_set(volatile unsigned __int128 *, unsigned __int128,...); 375 unsigned __int128 __sync_lock_test_and_set_16(volatile unsigned __int128 *, unsigned __int128,...);376 241 #endif 377 242 378 243 void __sync_lock_release(volatile char *,...); 379 void __sync_lock_release_1(volatile char *,...);380 244 void __sync_lock_release(volatile signed char *,...); 381 void __sync_lock_release_1(volatile signed char *,...);382 245 void __sync_lock_release(volatile unsigned char *,...); 383 void __sync_lock_release_1(volatile unsigned char *,...);384 246 void __sync_lock_release(volatile signed short *,...); 385 void __sync_lock_release_2(volatile signed short *,...);386 247 void __sync_lock_release(volatile unsigned short *,...); 387 void __sync_lock_release_2(volatile unsigned short *,...);388 248 void __sync_lock_release(volatile signed int *,...); 389 void __sync_lock_release_4(volatile signed int *,...);390 249 void __sync_lock_release(volatile unsigned int *,...); 391 void __sync_lock_release_4(volatile unsigned int *,...); 250 void __sync_lock_release(volatile signed long int *,...); 251 void __sync_lock_release(volatile unsigned long int *,...); 392 252 void __sync_lock_release(volatile signed long long int *,...); 393 void __sync_lock_release_8(volatile signed long long int *,...);394 253 void __sync_lock_release(volatile unsigned long long int *,...); 395 void __sync_lock_release_8(volatile unsigned long long int *,...);396 254 #if defined(__SIZEOF_INT128__) 397 255 void __sync_lock_release(volatile signed __int128 *,...); 398 void __sync_lock_release_16(volatile signed __int128 *,...);399 256 void __sync_lock_release(volatile unsigned __int128 *,...); 400 void __sync_lock_release_16(volatile unsigned __int128 *,...);401 257 #endif 402 258 … … 414 270 _Bool __atomic_test_and_set(volatile signed int *, int); 415 271 _Bool __atomic_test_and_set(volatile unsigned int *, int); 272 _Bool __atomic_test_and_set(volatile signed long int *, int); 273 _Bool __atomic_test_and_set(volatile unsigned long int *, int); 416 274 _Bool __atomic_test_and_set(volatile signed long long int *, int); 417 275 _Bool __atomic_test_and_set(volatile unsigned long long int *, int); … … 429 287 void __atomic_clear(volatile signed int *, int); 430 288 void __atomic_clear(volatile unsigned int *, int); 289 void __atomic_clear(volatile signed long int *, int); 290 void __atomic_clear(volatile unsigned long int *, int); 431 291 void __atomic_clear(volatile signed long long int *, int); 432 292 void __atomic_clear(volatile unsigned long long int *, int); … … 436 296 #endif 437 297 298 _Bool __atomic_exchange_n(volatile _Bool *, _Bool, int); 299 void __atomic_exchange(volatile _Bool *, volatile _Bool *, volatile _Bool *, int); 438 300 char __atomic_exchange_n(volatile char *, char, int); 439 char __atomic_exchange_1(volatile char *, char, int);440 301 void __atomic_exchange(volatile char *, volatile char *, volatile char *, int); 441 302 signed char __atomic_exchange_n(volatile signed char *, signed char, int); 442 signed char __atomic_exchange_1(volatile signed char *, signed char, int);443 303 void __atomic_exchange(volatile signed char *, volatile signed char *, volatile signed char *, int); 444 304 unsigned char __atomic_exchange_n(volatile unsigned char *, unsigned char, int); 445 unsigned char __atomic_exchange_1(volatile unsigned char *, unsigned char, int);446 305 void __atomic_exchange(volatile unsigned char *, volatile unsigned char *, volatile unsigned char *, int); 447 306 signed short __atomic_exchange_n(volatile signed short *, signed short, int); 448 signed short __atomic_exchange_2(volatile signed short *, signed short, int);449 307 void __atomic_exchange(volatile signed short *, volatile signed short *, volatile signed short *, int); 450 308 unsigned short __atomic_exchange_n(volatile unsigned short *, unsigned short, int); 451 unsigned short __atomic_exchange_2(volatile unsigned short *, unsigned short, int);452 309 void __atomic_exchange(volatile unsigned short *, volatile unsigned short *, volatile unsigned short *, int); 453 310 signed int __atomic_exchange_n(volatile signed int *, signed int, int); 454 signed int __atomic_exchange_4(volatile signed int *, signed int, int);455 311 void __atomic_exchange(volatile signed int *, volatile signed int *, volatile signed int *, int); 456 312 unsigned int __atomic_exchange_n(volatile unsigned int *, unsigned int, int); 457 unsigned int __atomic_exchange_4(volatile unsigned int *, unsigned int, int);458 313 void __atomic_exchange(volatile unsigned int *, volatile unsigned int *, volatile unsigned int *, int); 314 signed long int __atomic_exchange_n(volatile signed long int *, signed long int, int); 315 void __atomic_exchange(volatile signed long int *, volatile signed long int *, volatile signed long int *, int); 316 unsigned long int __atomic_exchange_n(volatile unsigned long int *, unsigned long int, int); 317 void __atomic_exchange(volatile unsigned long int *, volatile unsigned long int *, volatile unsigned long int *, int); 459 318 signed long long int __atomic_exchange_n(volatile signed long long int *, signed long long int, int); 460 signed long long int __atomic_exchange_8(volatile signed long long int *, signed long long int, int);461 319 void __atomic_exchange(volatile signed long long int *, volatile signed long long int *, volatile signed long long int *, int); 462 320 unsigned long long int __atomic_exchange_n(volatile unsigned long long int *, unsigned long long int, int); 463 unsigned long long int __atomic_exchange_8(volatile unsigned long long int *, unsigned long long int, int);464 321 void __atomic_exchange(volatile unsigned long long int *, volatile unsigned long long int *, volatile unsigned long long int *, int); 465 322 #if defined(__SIZEOF_INT128__) 466 323 signed __int128 __atomic_exchange_n(volatile signed __int128 *, signed __int128, int); 467 signed __int128 __atomic_exchange_16(volatile signed __int128 *, signed __int128, int);468 324 void __atomic_exchange(volatile signed __int128 *, volatile signed __int128 *, volatile signed __int128 *, int); 469 325 unsigned __int128 __atomic_exchange_n(volatile unsigned __int128 *, unsigned __int128, int); 470 unsigned __int128 __atomic_exchange_16(volatile unsigned __int128 *, unsigned __int128, int);471 326 void __atomic_exchange(volatile unsigned __int128 *, volatile unsigned __int128 *, volatile unsigned __int128 *, int); 472 327 #endif … … 477 332 void __atomic_load(const volatile _Bool *, volatile _Bool *, int); 478 333 char __atomic_load_n(const volatile char *, int); 479 char __atomic_load_1(const volatile char *, int);480 334 void __atomic_load(const volatile char *, volatile char *, int); 481 335 signed char __atomic_load_n(const volatile signed char *, int); 482 signed char __atomic_load_1(const volatile signed char *, int);483 336 void __atomic_load(const volatile signed char *, volatile signed char *, int); 484 337 unsigned char __atomic_load_n(const volatile unsigned char *, int); 485 unsigned char __atomic_load_1(const volatile unsigned char *, int);486 338 void __atomic_load(const volatile unsigned char *, volatile unsigned char *, int); 487 339 signed short __atomic_load_n(const volatile signed short *, int); 488 signed short __atomic_load_2(const volatile signed short *, int);489 340 void __atomic_load(const volatile signed short *, volatile signed short *, int); 490 341 unsigned short __atomic_load_n(const volatile unsigned short *, int); 491 unsigned short __atomic_load_2(const volatile unsigned short *, int);492 342 void __atomic_load(const volatile unsigned short *, volatile unsigned short *, int); 493 343 signed int __atomic_load_n(const volatile signed int *, int); 494 signed int __atomic_load_4(const volatile signed int *, int);495 344 void __atomic_load(const volatile signed int *, volatile signed int *, int); 496 345 unsigned int __atomic_load_n(const volatile unsigned int *, int); 497 unsigned int __atomic_load_4(const volatile unsigned int *, int);498 346 void __atomic_load(const volatile unsigned int *, volatile unsigned int *, int); 347 signed long int __atomic_load_n(const volatile signed long int *, int); 348 void __atomic_load(const volatile signed long int *, volatile signed long int *, int); 349 unsigned long int __atomic_load_n(const volatile unsigned long int *, int); 350 void __atomic_load(const volatile unsigned long int *, volatile unsigned long int *, int); 499 351 signed long long int __atomic_load_n(const volatile signed long long int *, int); 500 signed long long int __atomic_load_8(const volatile signed long long int *, int);501 352 void __atomic_load(const volatile signed long long int *, volatile signed long long int *, int); 502 353 unsigned long long int __atomic_load_n(const volatile unsigned long long int *, int); 503 unsigned long long int __atomic_load_8(const volatile unsigned long long int *, int);504 354 void __atomic_load(const volatile unsigned long long int *, volatile unsigned long long int *, int); 505 355 #if defined(__SIZEOF_INT128__) 506 356 signed __int128 __atomic_load_n(const volatile signed __int128 *, int); 507 signed __int128 __atomic_load_16(const volatile signed __int128 *, int);508 357 void __atomic_load(const volatile signed __int128 *, volatile signed __int128 *, int); 509 358 unsigned __int128 __atomic_load_n(const volatile unsigned __int128 *, int); 510 unsigned __int128 __atomic_load_16(const volatile unsigned __int128 *, int);511 359 void __atomic_load(const volatile unsigned __int128 *, volatile unsigned __int128 *, int); 512 360 #endif … … 515 363 516 364 _Bool __atomic_compare_exchange_n(volatile char *, char *, char, _Bool, int, int); 517 _Bool __atomic_compare_exchange_1(volatile char *, char *, char, _Bool, int, int);518 365 _Bool __atomic_compare_exchange (volatile char *, char *, char *, _Bool, int, int); 519 366 _Bool __atomic_compare_exchange_n(volatile signed char *, signed char *, signed char, _Bool, int, int); 520 _Bool __atomic_compare_exchange_1(volatile signed char *, signed char *, signed char, _Bool, int, int);521 367 _Bool __atomic_compare_exchange (volatile signed char *, signed char *, signed char *, _Bool, int, int); 522 368 _Bool __atomic_compare_exchange_n(volatile unsigned char *, unsigned char *, unsigned char, _Bool, int, int); 523 _Bool __atomic_compare_exchange_1(volatile unsigned char *, unsigned char *, unsigned char, _Bool, int, int);524 369 _Bool __atomic_compare_exchange (volatile unsigned char *, unsigned char *, unsigned char *, _Bool, int, int); 525 370 _Bool __atomic_compare_exchange_n(volatile signed short *, signed short *, signed short, _Bool, int, int); 526 _Bool __atomic_compare_exchange_2(volatile signed short *, signed short *, signed short, _Bool, int, int);527 371 _Bool __atomic_compare_exchange (volatile signed short *, signed short *, signed short *, _Bool, int, int); 528 372 _Bool __atomic_compare_exchange_n(volatile unsigned short *, unsigned short *, unsigned short, _Bool, int, int); 529 _Bool __atomic_compare_exchange_2(volatile unsigned short *, unsigned short *, unsigned short, _Bool, int, int);530 373 _Bool __atomic_compare_exchange (volatile unsigned short *, unsigned short *, unsigned short *, _Bool, int, int); 531 374 _Bool __atomic_compare_exchange_n(volatile signed int *, signed int *, signed int, _Bool, int, int); 532 _Bool __atomic_compare_exchange_4(volatile signed int *, signed int *, signed int, _Bool, int, int);533 375 _Bool __atomic_compare_exchange (volatile signed int *, signed int *, signed int *, _Bool, int, int); 534 376 _Bool __atomic_compare_exchange_n(volatile unsigned int *, unsigned int *, unsigned int, _Bool, int, int); 535 _Bool __atomic_compare_exchange_4(volatile unsigned int *, unsigned int *, unsigned int, _Bool, int, int);536 377 _Bool __atomic_compare_exchange (volatile unsigned int *, unsigned int *, unsigned int *, _Bool, int, int); 378 _Bool __atomic_compare_exchange_n(volatile signed long int *, signed long int *, signed long int, _Bool, int, int); 379 _Bool __atomic_compare_exchange (volatile signed long int *, signed long int *, signed long int *, _Bool, int, int); 380 _Bool __atomic_compare_exchange_n(volatile unsigned long int *, unsigned long int *, unsigned long int, _Bool, int, int); 381 _Bool __atomic_compare_exchange (volatile unsigned long int *, unsigned long int *, unsigned long int *, _Bool, int, int); 537 382 _Bool __atomic_compare_exchange_n(volatile signed long long int *, signed long long int *, signed long long int, _Bool, int, int); 538 _Bool __atomic_compare_exchange_8(volatile signed long long int *, signed long long int *, signed long long int, _Bool, int, int);539 383 _Bool __atomic_compare_exchange (volatile signed long long int *, signed long long int *, signed long long int *, _Bool, int, int); 540 384 _Bool __atomic_compare_exchange_n(volatile unsigned long long int *, unsigned long long int *, unsigned long long int, _Bool, int, int); 541 _Bool __atomic_compare_exchange_8(volatile unsigned long long int *, unsigned long long int *, unsigned long long int, _Bool, int, int);542 385 _Bool __atomic_compare_exchange (volatile unsigned long long int *, unsigned long long int *, unsigned long long int *, _Bool, int, int); 543 386 #if defined(__SIZEOF_INT128__) 544 387 _Bool __atomic_compare_exchange_n (volatile signed __int128 *, signed __int128 *, signed __int128, _Bool, int, int); 545 _Bool __atomic_compare_exchange_16(volatile signed __int128 *, signed __int128 *, signed __int128, _Bool, int, int);546 388 _Bool __atomic_compare_exchange (volatile signed __int128 *, signed __int128 *, signed __int128 *, _Bool, int, int); 547 389 _Bool __atomic_compare_exchange_n (volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128, _Bool, int, int); 548 _Bool __atomic_compare_exchange_16(volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128, _Bool, int, int);549 390 _Bool __atomic_compare_exchange (volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128 *, _Bool, int, int); 550 391 #endif … … 555 396 void __atomic_store(volatile _Bool *, _Bool *, int); 556 397 void __atomic_store_n(volatile char *, char, int); 557 void __atomic_store_1(volatile char *, char, int);558 398 void __atomic_store(volatile char *, char *, int); 559 399 void __atomic_store_n(volatile signed char *, signed char, int); 560 void __atomic_store_1(volatile signed char *, signed char, int);561 400 void __atomic_store(volatile signed char *, signed char *, int); 562 401 void __atomic_store_n(volatile unsigned char *, unsigned char, int); 563 void __atomic_store_1(volatile unsigned char *, unsigned char, int);564 402 void __atomic_store(volatile unsigned char *, unsigned char *, int); 565 403 void __atomic_store_n(volatile signed short *, signed short, int); 566 void __atomic_store_2(volatile signed short *, signed short, int);567 404 void __atomic_store(volatile signed short *, signed short *, int); 568 405 void __atomic_store_n(volatile unsigned short *, unsigned short, int); 569 void __atomic_store_2(volatile unsigned short *, unsigned short, int);570 406 void __atomic_store(volatile unsigned short *, unsigned short *, int); 571 407 void __atomic_store_n(volatile signed int *, signed int, int); 572 void __atomic_store_4(volatile signed int *, signed int, int);573 408 void __atomic_store(volatile signed int *, signed int *, int); 574 409 void __atomic_store_n(volatile unsigned int *, unsigned int, int); 575 void __atomic_store_4(volatile unsigned int *, unsigned int, int);576 410 void __atomic_store(volatile unsigned int *, unsigned int *, int); 411 void __atomic_store_n(volatile signed long int *, signed long int, int); 412 void __atomic_store(volatile signed long int *, signed long int *, int); 413 void __atomic_store_n(volatile unsigned long int *, unsigned long int, int); 414 void __atomic_store(volatile unsigned long int *, unsigned long int *, int); 577 415 void __atomic_store_n(volatile signed long long int *, signed long long int, int); 578 void __atomic_store_8(volatile signed long long int *, signed long long int, int);579 416 void __atomic_store(volatile signed long long int *, signed long long int *, int); 580 417 void __atomic_store_n(volatile unsigned long long int *, unsigned long long int, int); 581 void __atomic_store_8(volatile unsigned long long int *, unsigned long long int, int);582 418 void __atomic_store(volatile unsigned long long int *, unsigned long long int *, int); 583 419 #if defined(__SIZEOF_INT128__) 584 420 void __atomic_store_n(volatile signed __int128 *, signed __int128, int); 585 void __atomic_store_16(volatile signed __int128 *, signed __int128, int);586 421 void __atomic_store(volatile signed __int128 *, signed __int128 *, int); 587 422 void __atomic_store_n(volatile unsigned __int128 *, unsigned __int128, int); 588 void __atomic_store_16(volatile unsigned __int128 *, unsigned __int128, int);589 423 void __atomic_store(volatile unsigned __int128 *, unsigned __int128 *, int); 590 424 #endif … … 593 427 594 428 char __atomic_add_fetch (volatile char *, char, int); 595 char __atomic_add_fetch_1(volatile char *, char, int);596 429 signed char __atomic_add_fetch (volatile signed char *, signed char, int); 597 signed char __atomic_add_fetch_1(volatile signed char *, signed char, int);598 430 unsigned char __atomic_add_fetch (volatile unsigned char *, unsigned char, int); 599 unsigned char __atomic_add_fetch_1(volatile unsigned char *, unsigned char, int);600 431 signed short __atomic_add_fetch (volatile signed short *, signed short, int); 601 signed short __atomic_add_fetch_2(volatile signed short *, signed short, int);602 432 unsigned short __atomic_add_fetch (volatile unsigned short *, unsigned short, int); 603 unsigned short __atomic_add_fetch_2(volatile unsigned short *, unsigned short, int);604 433 signed int __atomic_add_fetch (volatile signed int *, signed int, int); 605 signed int __atomic_add_fetch_4(volatile signed int *, signed int, int);606 434 unsigned int __atomic_add_fetch (volatile unsigned int *, unsigned int, int); 607 unsigned int __atomic_add_fetch_4(volatile unsigned int *, unsigned int, int); 435 signed long int __atomic_add_fetch (volatile signed long int *, signed long int, int); 436 unsigned long int __atomic_add_fetch (volatile unsigned long int *, unsigned long int, int); 608 437 signed long long int __atomic_add_fetch (volatile signed long long int *, signed long long int, int); 609 signed long long int __atomic_add_fetch_8(volatile signed long long int *, signed long long int, int);610 438 unsigned long long int __atomic_add_fetch (volatile unsigned long long int *, unsigned long long int, int); 611 unsigned long long int __atomic_add_fetch_8(volatile unsigned long long int *, unsigned long long int, int);612 439 #if defined(__SIZEOF_INT128__) 613 440 signed __int128 __atomic_add_fetch (volatile signed __int128 *, signed __int128, int); 614 signed __int128 __atomic_add_fetch_16(volatile signed __int128 *, signed __int128, int);615 441 unsigned __int128 __atomic_add_fetch (volatile unsigned __int128 *, unsigned __int128, int); 616 unsigned __int128 __atomic_add_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);617 442 #endif 618 443 619 444 char __atomic_sub_fetch (volatile char *, char, int); 620 char __atomic_sub_fetch_1(volatile char *, char, int);621 445 signed char __atomic_sub_fetch (volatile signed char *, signed char, int); 622 signed char __atomic_sub_fetch_1(volatile signed char *, signed char, int);623 446 unsigned char __atomic_sub_fetch (volatile unsigned char *, unsigned char, int); 624 unsigned char __atomic_sub_fetch_1(volatile unsigned char *, unsigned char, int);625 447 signed short __atomic_sub_fetch (volatile signed short *, signed short, int); 626 signed short __atomic_sub_fetch_2(volatile signed short *, signed short, int);627 448 unsigned short __atomic_sub_fetch (volatile unsigned short *, unsigned short, int); 628 unsigned short __atomic_sub_fetch_2(volatile unsigned short *, unsigned short, int);629 449 signed int __atomic_sub_fetch (volatile signed int *, signed int, int); 630 signed int __atomic_sub_fetch_4(volatile signed int *, signed int, int);631 450 unsigned int __atomic_sub_fetch (volatile unsigned int *, unsigned int, int); 632 unsigned int __atomic_sub_fetch_4(volatile unsigned int *, unsigned int, int); 451 signed long long int __atomic_sub_fetch (volatile signed long int *, signed long int, int); 452 unsigned long long int __atomic_sub_fetch (volatile unsigned long int *, unsigned long int, int); 633 453 signed long long int __atomic_sub_fetch (volatile signed long long int *, signed long long int, int); 634 signed long long int __atomic_sub_fetch_8(volatile signed long long int *, signed long long int, int);635 454 unsigned long long int __atomic_sub_fetch (volatile unsigned long long int *, unsigned long long int, int); 636 unsigned long long int __atomic_sub_fetch_8(volatile unsigned long long int *, unsigned long long int, int);637 455 #if defined(__SIZEOF_INT128__) 638 456 signed __int128 __atomic_sub_fetch (volatile signed __int128 *, signed __int128, int); 639 signed __int128 __atomic_sub_fetch_16(volatile signed __int128 *, signed __int128, int);640 457 unsigned __int128 __atomic_sub_fetch (volatile unsigned __int128 *, unsigned __int128, int); 641 unsigned __int128 __atomic_sub_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);642 458 #endif 643 459 644 460 char __atomic_and_fetch (volatile char *, char, int); 645 char __atomic_and_fetch_1(volatile char *, char, int);646 461 signed char __atomic_and_fetch (volatile signed char *, signed char, int); 647 signed char __atomic_and_fetch_1(volatile signed char *, signed char, int);648 462 unsigned char __atomic_and_fetch (volatile unsigned char *, unsigned char, int); 649 unsigned char __atomic_and_fetch_1(volatile unsigned char *, unsigned char, int);650 463 signed short __atomic_and_fetch (volatile signed short *, signed short, int); 651 signed short __atomic_and_fetch_2(volatile signed short *, signed short, int);652 464 unsigned short __atomic_and_fetch (volatile unsigned short *, unsigned short, int); 653 unsigned short __atomic_and_fetch_2(volatile unsigned short *, unsigned short, int);654 465 signed int __atomic_and_fetch (volatile signed int *, signed int, int); 655 signed int __atomic_and_fetch_4(volatile signed int *, signed int, int);656 466 unsigned int __atomic_and_fetch (volatile unsigned int *, unsigned int, int); 657 unsigned int __atomic_and_fetch_4(volatile unsigned int *, unsigned int, int); 467 signed long int __atomic_and_fetch (volatile signed long int *, signed long int, int); 468 unsigned long int __atomic_and_fetch (volatile unsigned long int *, unsigned long int, int); 658 469 signed long long int __atomic_and_fetch (volatile signed long long int *, signed long long int, int); 659 signed long long int __atomic_and_fetch_8(volatile signed long long int *, signed long long int, int);660 470 unsigned long long int __atomic_and_fetch (volatile unsigned long long int *, unsigned long long int, int); 661 unsigned long long int __atomic_and_fetch_8(volatile unsigned long long int *, unsigned long long int, int);662 471 #if defined(__SIZEOF_INT128__) 663 472 signed __int128 __atomic_and_fetch (volatile signed __int128 *, signed __int128, int); 664 signed __int128 __atomic_and_fetch_16(volatile signed __int128 *, signed __int128, int);665 473 unsigned __int128 __atomic_and_fetch (volatile unsigned __int128 *, unsigned __int128, int); 666 unsigned __int128 __atomic_and_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);667 474 #endif 668 475 669 476 char __atomic_nand_fetch (volatile char *, char, int); 670 char __atomic_nand_fetch_1(volatile char *, char, int);671 477 signed char __atomic_nand_fetch (volatile signed char *, signed char, int); 672 signed char __atomic_nand_fetch_1(volatile signed char *, signed char, int);673 478 unsigned char __atomic_nand_fetch (volatile unsigned char *, unsigned char, int); 674 unsigned char __atomic_nand_fetch_1(volatile unsigned char *, unsigned char, int);675 479 signed short __atomic_nand_fetch (volatile signed short *, signed short, int); 676 signed short __atomic_nand_fetch_2(volatile signed short *, signed short, int);677 480 unsigned short __atomic_nand_fetch (volatile unsigned short *, unsigned short, int); 678 unsigned short __atomic_nand_fetch_2(volatile unsigned short *, unsigned short, int);679 481 signed int __atomic_nand_fetch (volatile signed int *, signed int, int); 680 signed int __atomic_nand_fetch_4(volatile signed int *, signed int, int);681 482 unsigned int __atomic_nand_fetch (volatile unsigned int *, unsigned int, int); 682 unsigned int __atomic_nand_fetch_4(volatile unsigned int *, unsigned int, int); 483 signed long int __atomic_nand_fetch (volatile signed long int *, signed long int, int); 484 unsigned long int __atomic_nand_fetch (volatile unsigned long int *, unsigned long int, int); 683 485 signed long long int __atomic_nand_fetch (volatile signed long long int *, signed long long int, int); 684 signed long long int __atomic_nand_fetch_8(volatile signed long long int *, signed long long int, int);685 486 unsigned long long int __atomic_nand_fetch (volatile unsigned long long int *, unsigned long long int, int); 686 unsigned long long int __atomic_nand_fetch_8(volatile unsigned long long int *, unsigned long long int, int);687 487 #if defined(__SIZEOF_INT128__) 688 488 signed __int128 __atomic_nand_fetch (volatile signed __int128 *, signed __int128, int); 689 signed __int128 __atomic_nand_fetch_16(volatile signed __int128 *, signed __int128, int);690 489 unsigned __int128 __atomic_nand_fetch (volatile unsigned __int128 *, unsigned __int128, int); 691 unsigned __int128 __atomic_nand_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);692 490 #endif 693 491 694 492 char __atomic_xor_fetch (volatile char *, char, int); 695 char __atomic_xor_fetch_1(volatile char *, char, int);696 493 signed char __atomic_xor_fetch (volatile signed char *, signed char, int); 697 signed char __atomic_xor_fetch_1(volatile signed char *, signed char, int);698 494 unsigned char __atomic_xor_fetch (volatile unsigned char *, unsigned char, int); 699 unsigned char __atomic_xor_fetch_1(volatile unsigned char *, unsigned char, int);700 495 signed short __atomic_xor_fetch (volatile signed short *, signed short, int); 701 signed short __atomic_xor_fetch_2(volatile signed short *, signed short, int);702 496 unsigned short __atomic_xor_fetch (volatile unsigned short *, unsigned short, int); 703 unsigned short __atomic_xor_fetch_2(volatile unsigned short *, unsigned short, int);704 497 signed int __atomic_xor_fetch (volatile signed int *, signed int, int); 705 signed int __atomic_xor_fetch_4(volatile signed int *, signed int, int);706 498 unsigned int __atomic_xor_fetch (volatile unsigned int *, unsigned int, int); 707 unsigned int __atomic_xor_fetch_4(volatile unsigned int *, unsigned int, int); 499 signed long int __atomic_xor_fetch (volatile signed long int *, signed long int, int); 500 unsigned long int __atomic_xor_fetch (volatile unsigned long int *, unsigned long int, int); 708 501 signed long long int __atomic_xor_fetch (volatile signed long long int *, signed long long int, int); 709 signed long long int __atomic_xor_fetch_8(volatile signed long long int *, signed long long int, int);710 502 unsigned long long int __atomic_xor_fetch (volatile unsigned long long int *, unsigned long long int, int); 711 unsigned long long int __atomic_xor_fetch_8(volatile unsigned long long int *, unsigned long long int, int);712 503 #if defined(__SIZEOF_INT128__) 713 504 signed __int128 __atomic_xor_fetch (volatile signed __int128 *, signed __int128, int); 714 signed __int128 __atomic_xor_fetch_16(volatile signed __int128 *, signed __int128, int);715 505 unsigned __int128 __atomic_xor_fetch (volatile unsigned __int128 *, unsigned __int128, int); 716 unsigned __int128 __atomic_xor_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);717 506 #endif 718 507 719 508 char __atomic_or_fetch (volatile char *, char, int); 720 char __atomic_or_fetch_1(volatile char *, char, int);721 509 signed char __atomic_or_fetch (volatile signed char *, signed char, int); 722 signed char __atomic_or_fetch_1(volatile signed char *, signed char, int);723 510 unsigned char __atomic_or_fetch (volatile unsigned char *, unsigned char, int); 724 unsigned char __atomic_or_fetch_1(volatile unsigned char *, unsigned char, int);725 511 signed short __atomic_or_fetch (volatile signed short *, signed short, int); 726 signed short __atomic_or_fetch_2(volatile signed short *, signed short, int);727 512 unsigned short __atomic_or_fetch (volatile unsigned short *, unsigned short, int); 728 unsigned short __atomic_or_fetch_2(volatile unsigned short *, unsigned short, int);729 513 signed int __atomic_or_fetch (volatile signed int *, signed int, int); 730 signed int __atomic_or_fetch_4(volatile signed int *, signed int, int);731 514 unsigned int __atomic_or_fetch (volatile unsigned int *, unsigned int, int); 732 unsigned int __atomic_or_fetch_4(volatile unsigned int *, unsigned int, int); 515 signed long int __atomic_or_fetch (volatile signed long int *, signed long int, int); 516 unsigned long int __atomic_or_fetch (volatile unsigned long int *, unsigned long int, int); 733 517 signed long long int __atomic_or_fetch (volatile signed long long int *, signed long long int, int); 734 signed long long int __atomic_or_fetch_8(volatile signed long long int *, signed long long int, int);735 518 unsigned long long int __atomic_or_fetch (volatile unsigned long long int *, unsigned long long int, int); 736 unsigned long long int __atomic_or_fetch_8(volatile unsigned long long int *, unsigned long long int, int);737 519 #if defined(__SIZEOF_INT128__) 738 520 signed __int128 __atomic_or_fetch (volatile signed __int128 *, signed __int128, int); 739 signed __int128 __atomic_or_fetch_16(volatile signed __int128 *, signed __int128, int);740 521 unsigned __int128 __atomic_or_fetch (volatile unsigned __int128 *, unsigned __int128, int); 741 unsigned __int128 __atomic_or_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);742 522 #endif 743 523 744 524 char __atomic_fetch_add (volatile char *, char, int); 745 char __atomic_fetch_add_1(volatile char *, char, int);746 525 signed char __atomic_fetch_add (volatile signed char *, signed char, int); 747 signed char __atomic_fetch_add_1(volatile signed char *, signed char, int);748 526 unsigned char __atomic_fetch_add (volatile unsigned char *, unsigned char, int); 749 unsigned char __atomic_fetch_add_1(volatile unsigned char *, unsigned char, int);750 527 signed short __atomic_fetch_add (volatile signed short *, signed short, int); 751 signed short __atomic_fetch_add_2(volatile signed short *, signed short, int);752 528 unsigned short __atomic_fetch_add (volatile unsigned short *, unsigned short, int); 753 unsigned short __atomic_fetch_add_2(volatile unsigned short *, unsigned short, int);754 529 signed int __atomic_fetch_add (volatile signed int *, signed int, int); 755 signed int __atomic_fetch_add_4(volatile signed int *, signed int, int);756 530 unsigned int __atomic_fetch_add (volatile unsigned int *, unsigned int, int); 757 unsigned int __atomic_fetch_add_4(volatile unsigned int *, unsigned int, int); 531 signed long int __atomic_fetch_add (volatile signed long int *, signed long int, int); 532 unsigned long int __atomic_fetch_add (volatile unsigned long int *, unsigned long int, int); 758 533 signed long long int __atomic_fetch_add (volatile signed long long int *, signed long long int, int); 759 signed long long int __atomic_fetch_add_8(volatile signed long long int *, signed long long int, int);760 534 unsigned long long int __atomic_fetch_add (volatile unsigned long long int *, unsigned long long int, int); 761 unsigned long long int __atomic_fetch_add_8(volatile unsigned long long int *, unsigned long long int, int);762 535 #if defined(__SIZEOF_INT128__) 763 536 signed __int128 __atomic_fetch_add (volatile signed __int128 *, signed __int128, int); 764 signed __int128 __atomic_fetch_add_16(volatile signed __int128 *, signed __int128, int);765 537 unsigned __int128 __atomic_fetch_add (volatile unsigned __int128 *, unsigned __int128, int); 766 unsigned __int128 __atomic_fetch_add_16(volatile unsigned __int128 *, unsigned __int128, int);767 538 #endif 768 539 769 540 char __atomic_fetch_sub (volatile char *, char, int); 770 char __atomic_fetch_sub_1(volatile char *, char, int);771 541 signed char __atomic_fetch_sub (volatile signed char *, signed char, int); 772 signed char __atomic_fetch_sub_1(volatile signed char *, signed char, int);773 542 unsigned char __atomic_fetch_sub (volatile unsigned char *, unsigned char, int); 774 unsigned char __atomic_fetch_sub_1(volatile unsigned char *, unsigned char, int);775 543 signed short __atomic_fetch_sub (volatile signed short *, signed short, int); 776 signed short __atomic_fetch_sub_2(volatile signed short *, signed short, int);777 544 unsigned short __atomic_fetch_sub (volatile unsigned short *, unsigned short, int); 778 unsigned short __atomic_fetch_sub_2(volatile unsigned short *, unsigned short, int);779 545 signed int __atomic_fetch_sub (volatile signed int *, signed int, int); 780 signed int __atomic_fetch_sub_4(volatile signed int *, signed int, int);781 546 unsigned int __atomic_fetch_sub (volatile unsigned int *, unsigned int, int); 782 unsigned int __atomic_fetch_sub_4(volatile unsigned int *, unsigned int, int); 547 signed long int __atomic_fetch_sub (volatile signed long int *, signed long int, int); 548 unsigned long int __atomic_fetch_sub (volatile unsigned long int *, unsigned long int, int); 783 549 signed long long int __atomic_fetch_sub (volatile signed long long int *, signed long long int, int); 784 signed long long int __atomic_fetch_sub_8(volatile signed long long int *, signed long long int, int);785 550 unsigned long long int __atomic_fetch_sub (volatile unsigned long long int *, unsigned long long int, int); 786 unsigned long long int __atomic_fetch_sub_8(volatile unsigned long long int *, unsigned long long int, int);787 551 #if defined(__SIZEOF_INT128__) 788 552 signed __int128 __atomic_fetch_sub (volatile signed __int128 *, signed __int128, int); 789 signed __int128 __atomic_fetch_sub_16(volatile signed __int128 *, signed __int128, int);790 553 unsigned __int128 __atomic_fetch_sub (volatile unsigned __int128 *, unsigned __int128, int); 791 unsigned __int128 __atomic_fetch_sub_16(volatile unsigned __int128 *, unsigned __int128, int);792 554 #endif 793 555 794 556 char __atomic_fetch_and (volatile char *, char, int); 795 char __atomic_fetch_and_1(volatile char *, char, int);796 557 signed char __atomic_fetch_and (volatile signed char *, signed char, int); 797 signed char __atomic_fetch_and_1(volatile signed char *, signed char, int);798 558 unsigned char __atomic_fetch_and (volatile unsigned char *, unsigned char, int); 799 unsigned char __atomic_fetch_and_1(volatile unsigned char *, unsigned char, int);800 559 signed short __atomic_fetch_and (volatile signed short *, signed short, int); 801 signed short __atomic_fetch_and_2(volatile signed short *, signed short, int);802 560 unsigned short __atomic_fetch_and (volatile unsigned short *, unsigned short, int); 803 unsigned short __atomic_fetch_and_2(volatile unsigned short *, unsigned short, int);804 561 signed int __atomic_fetch_and (volatile signed int *, signed int, int); 805 signed int __atomic_fetch_and_4(volatile signed int *, signed int, int);806 562 unsigned int __atomic_fetch_and (volatile unsigned int *, unsigned int, int); 807 unsigned int __atomic_fetch_and_4(volatile unsigned int *, unsigned int, int); 563 signed long int __atomic_fetch_and (volatile signed long int *, signed long int, int); 564 unsigned long int __atomic_fetch_and (volatile unsigned long int *, unsigned long int, int); 808 565 signed long long int __atomic_fetch_and (volatile signed long long int *, signed long long int, int); 809 signed long long int __atomic_fetch_and_8(volatile signed long long int *, signed long long int, int);810 566 unsigned long long int __atomic_fetch_and (volatile unsigned long long int *, unsigned long long int, int); 811 unsigned long long int __atomic_fetch_and_8(volatile unsigned long long int *, unsigned long long int, int);812 567 #if defined(__SIZEOF_INT128__) 813 568 signed __int128 __atomic_fetch_and (volatile signed __int128 *, signed __int128, int); 814 signed __int128 __atomic_fetch_and_16(volatile signed __int128 *, signed __int128, int);815 569 unsigned __int128 __atomic_fetch_and (volatile unsigned __int128 *, unsigned __int128, int); 816 unsigned __int128 __atomic_fetch_and_16(volatile unsigned __int128 *, unsigned __int128, int);817 570 #endif 818 571 819 572 char __atomic_fetch_nand (volatile char *, char, int); 820 char __atomic_fetch_nand_1(volatile char *, char, int);821 573 signed char __atomic_fetch_nand (volatile signed char *, signed char, int); 822 signed char __atomic_fetch_nand_1(volatile signed char *, signed char, int);823 574 unsigned char __atomic_fetch_nand (volatile unsigned char *, unsigned char, int); 824 unsigned char __atomic_fetch_nand_1(volatile unsigned char *, unsigned char, int);825 575 signed short __atomic_fetch_nand (volatile signed short *, signed short, int); 826 signed short __atomic_fetch_nand_2(volatile signed short *, signed short, int);827 576 unsigned short __atomic_fetch_nand (volatile unsigned short *, unsigned short, int); 828 unsigned short __atomic_fetch_nand_2(volatile unsigned short *, unsigned short, int);829 577 signed int __atomic_fetch_nand (volatile signed int *, signed int, int); 830 signed int __atomic_fetch_nand_4(volatile signed int *, signed int, int);831 578 unsigned int __atomic_fetch_nand (volatile unsigned int *, unsigned int, int); 832 unsigned int __atomic_fetch_nand_4(volatile unsigned int *, unsigned int, int); 579 signed long int __atomic_fetch_nand (volatile signed long int *, signed long int, int); 580 unsigned long int __atomic_fetch_nand (volatile unsigned long int *, unsigned long int, int); 833 581 signed long long int __atomic_fetch_nand (volatile signed long long int *, signed long long int, int); 834 signed long long int __atomic_fetch_nand_8(volatile signed long long int *, signed long long int, int);835 582 unsigned long long int __atomic_fetch_nand (volatile unsigned long long int *, unsigned long long int, int); 836 unsigned long long int __atomic_fetch_nand_8(volatile unsigned long long int *, unsigned long long int, int);837 583 #if defined(__SIZEOF_INT128__) 838 584 signed __int128 __atomic_fetch_nand (volatile signed __int128 *, signed __int128, int); 839 signed __int128 __atomic_fetch_nand_16(volatile signed __int128 *, signed __int128, int);840 585 unsigned __int128 __atomic_fetch_nand (volatile unsigned __int128 *, unsigned __int128, int); 841 unsigned __int128 __atomic_fetch_nand_16(volatile unsigned __int128 *, unsigned __int128, int);842 586 #endif 843 587 844 588 char __atomic_fetch_xor (volatile char *, char, int); 845 char __atomic_fetch_xor_1(volatile char *, char, int);846 589 signed char __atomic_fetch_xor (volatile signed char *, signed char, int); 847 signed char __atomic_fetch_xor_1(volatile signed char *, signed char, int);848 590 unsigned char __atomic_fetch_xor (volatile unsigned char *, unsigned char, int); 849 unsigned char __atomic_fetch_xor_1(volatile unsigned char *, unsigned char, int);850 591 signed short __atomic_fetch_xor (volatile signed short *, signed short, int); 851 signed short __atomic_fetch_xor_2(volatile signed short *, signed short, int);852 592 unsigned short __atomic_fetch_xor (volatile unsigned short *, unsigned short, int); 853 unsigned short __atomic_fetch_xor_2(volatile unsigned short *, unsigned short, int);854 593 signed int __atomic_fetch_xor (volatile signed int *, signed int, int); 855 signed int __atomic_fetch_xor_4(volatile signed int *, signed int, int);856 594 unsigned int __atomic_fetch_xor (volatile unsigned int *, unsigned int, int); 857 unsigned int __atomic_fetch_xor_4(volatile unsigned int *, unsigned int, int); 595 signed long int __atomic_fetch_xor (volatile signed long int *, signed long int, int); 596 unsigned long int __atomic_fetch_xor (volatile unsigned long int *, unsigned long int, int); 858 597 signed long long int __atomic_fetch_xor (volatile signed long long int *, signed long long int, int); 859 signed long long int __atomic_fetch_xor_8(volatile signed long long int *, signed long long int, int);860 598 unsigned long long int __atomic_fetch_xor (volatile unsigned long long int *, unsigned long long int, int); 861 unsigned long long int __atomic_fetch_xor_8(volatile unsigned long long int *, unsigned long long int, int);862 599 #if defined(__SIZEOF_INT128__) 863 600 signed __int128 __atomic_fetch_xor (volatile signed __int128 *, signed __int128, int); 864 signed __int128 __atomic_fetch_xor_16(volatile signed __int128 *, signed __int128, int);865 601 unsigned __int128 __atomic_fetch_xor (volatile unsigned __int128 *, unsigned __int128, int); 866 unsigned __int128 __atomic_fetch_xor_16(volatile unsigned __int128 *, unsigned __int128, int);867 602 #endif 868 603 869 604 char __atomic_fetch_or (volatile char *, char, int); 870 char __atomic_fetch_or_1(volatile char *, char, int);871 605 signed char __atomic_fetch_or (volatile signed char *, signed char, int); 872 signed char __atomic_fetch_or_1(volatile signed char *, signed char, int);873 606 unsigned char __atomic_fetch_or (volatile unsigned char *, unsigned char, int); 874 unsigned char __atomic_fetch_or_1(volatile unsigned char *, unsigned char, int);875 607 signed short __atomic_fetch_or (volatile signed short *, signed short, int); 876 signed short __atomic_fetch_or_2(volatile signed short *, signed short, int);877 608 unsigned short __atomic_fetch_or (volatile unsigned short *, unsigned short, int); 878 unsigned short __atomic_fetch_or_2(volatile unsigned short *, unsigned short, int);879 609 signed int __atomic_fetch_or (volatile signed int *, signed int, int); 880 signed int __atomic_fetch_or_4(volatile signed int *, signed int, int);881 610 unsigned int __atomic_fetch_or (volatile unsigned int *, unsigned int, int); 882 unsigned int __atomic_fetch_or_4(volatile unsigned int *, unsigned int, int); 611 signed long int __atomic_fetch_or (volatile signed long int *, signed long int, int); 612 unsigned long int __atomic_fetch_or (volatile unsigned long int *, unsigned long int, int); 883 613 signed long long int __atomic_fetch_or (volatile signed long long int *, signed long long int, int); 884 signed long long int __atomic_fetch_or_8(volatile signed long long int *, signed long long int, int);885 614 unsigned long long int __atomic_fetch_or (volatile unsigned long long int *, unsigned long long int, int); 886 unsigned long long int __atomic_fetch_or_8(volatile unsigned long long int *, unsigned long long int, int);887 615 #if defined(__SIZEOF_INT128__) 888 616 signed __int128 __atomic_fetch_or (volatile signed __int128 *, signed __int128, int); 889 signed __int128 __atomic_fetch_or_16(volatile signed __int128 *, signed __int128, int);890 617 unsigned __int128 __atomic_fetch_or (volatile unsigned __int128 *, unsigned __int128, int); 891 unsigned __int128 __atomic_fetch_or_16(volatile unsigned __int128 *, unsigned __int128, int);892 618 #endif 893 619 -
libcfa/src/Makefile.am
raca6a54c r2fa5bd2 33 33 # The built sources must not depend on the installed headers 34 34 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 36 36 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 37 37 CFACC = @CFACC@ -
libcfa/src/Makefile.in
raca6a54c r2fa5bd2 416 416 LTCFACOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ 417 417 $(LIBTOOLFLAGS) --mode=compile $(CFACC) $(DEFS) \ 418 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(CFAFLAGS) \ 419 $(AM_CFLAGS) $(CFLAGS) 418 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(AM_CFLAGS) $(CFAFLAGS) $(CFLAGS) 420 419 421 420 AM_V_CFA = $(am__v_CFA_@AM_V@) … … 445 444 # The built sources must not depend on the installed headers 446 445 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 447 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@446 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 448 447 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 449 448 @BUILDLIB_FALSE@headers_nosrc = -
libcfa/src/assert.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jul 20 15:10:26 201713 // Update Count : 212 // Last Modified On : Thu Nov 21 17:09:26 2019 13 // Update Count : 5 14 14 // 15 15 … … 17 17 #include <stdarg.h> // varargs 18 18 #include <stdio.h> // fprintf 19 #include <unistd.h> // STDERR_FILENO 19 20 #include "bits/debug.hfa" 20 21 … … 26 27 // called by macro assert in assert.h 27 28 void __assert_fail( const char *assertion, const char *file, unsigned int line, const char *function ) { 28 __cfaabi_ dbg_bits_print_safe(CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file );29 __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file ); 29 30 abort(); 30 31 } … … 32 33 // called by macro assertf 33 34 void __assert_fail_f( const char *assertion, const char *file, unsigned int line, const char *function, const char *fmt, ... ) { 34 __cfaabi_ dbg_bits_acquire();35 __cfaabi_ dbg_bits_print_nolock(CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );35 __cfaabi_bits_acquire(); 36 __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file ); 36 37 37 38 va_list args; 38 39 va_start( args, fmt ); 39 __cfaabi_ dbg_bits_print_vararg(fmt, args );40 __cfaabi_bits_print_vararg( STDERR_FILENO, fmt, args ); 40 41 va_end( args ); 41 42 42 __cfaabi_ dbg_bits_print_nolock("\n" );43 __cfaabi_ dbg_bits_release();43 __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); 44 __cfaabi_bits_release(); 44 45 abort(); 45 46 } -
libcfa/src/bits/align.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 21 23:05:35 201713 // Update Count : 212 // Last Modified On : Sat Nov 16 18:58:22 2019 13 // Update Count : 3 14 14 // 15 15 // This library is free software; you can redistribute it and/or modify it … … 33 33 34 34 // Minimum size used to align memory boundaries for memory allocations. 35 #define libAlign() (sizeof(double)) 35 //#define libAlign() (sizeof(double)) 36 // gcc-7 uses xmms instructions, which require 16 byte alignment. 37 #define libAlign() (16) 36 38 37 39 // Check for power of 2 -
libcfa/src/bits/debug.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Thu Mar 30 12:30:01 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 14 22:17:35201913 // Update Count : 412 // Last Modified On : Thu Nov 21 17:16:30 2019 13 // Update Count : 10 14 14 // 15 15 … … 28 28 extern "C" { 29 29 30 void __cfaabi_ dbg_bits_write(const char *in_buffer, int len ) {30 void __cfaabi_bits_write( int fd, const char *in_buffer, int len ) { 31 31 // ensure all data is written 32 32 for ( int count = 0, retcode; count < len; count += retcode ) { … … 34 34 35 35 for ( ;; ) { 36 retcode = write( STDERR_FILENO, in_buffer, len - count );36 retcode = write( fd, in_buffer, len - count ); 37 37 38 38 // not a timer interrupt ? … … 44 44 } 45 45 46 void __cfaabi_ dbg_bits_acquire() __attribute__((__weak__)) {}47 void __cfaabi_ dbg_bits_release() __attribute__((__weak__)) {}46 void __cfaabi_bits_acquire() __attribute__((__weak__)) {} 47 void __cfaabi_bits_release() __attribute__((__weak__)) {} 48 48 49 void __cfaabi_ dbg_bits_print_safe ( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) )) {49 void __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 50 50 va_list args; 51 51 52 52 va_start( args, fmt ); 53 __cfaabi_ dbg_bits_acquire();53 __cfaabi_bits_acquire(); 54 54 55 55 int len = vsnprintf( buffer, buffer_size, fmt, args ); 56 __cfaabi_ dbg_bits_write(buffer, len );56 __cfaabi_bits_write( fd, buffer, len ); 57 57 58 __cfaabi_ dbg_bits_release();58 __cfaabi_bits_release(); 59 59 va_end( args ); 60 60 } 61 61 62 void __cfaabi_ dbg_bits_print_nolock( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) )) {62 void __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 63 63 va_list args; 64 64 … … 66 66 67 67 int len = vsnprintf( buffer, buffer_size, fmt, args ); 68 __cfaabi_ dbg_bits_write(buffer, len );68 __cfaabi_bits_write( fd, buffer, len ); 69 69 70 70 va_end( args ); 71 71 } 72 72 73 void __cfaabi_ dbg_bits_print_vararg(const char fmt[], va_list args ) {73 void __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list args ) { 74 74 int len = vsnprintf( buffer, buffer_size, fmt, args ); 75 __cfaabi_ dbg_bits_write(buffer, len );75 __cfaabi_bits_write( fd, buffer, len ); 76 76 } 77 77 78 void __cfaabi_ dbg_bits_print_buffer( char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 3, 4) )) {78 void __cfaabi_bits_print_buffer( int fd, char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )) { 79 79 va_list args; 80 80 … … 82 82 83 83 int len = vsnprintf( in_buffer, in_buffer_size, fmt, args ); 84 __cfaabi_ dbg_bits_write(in_buffer, len );84 __cfaabi_bits_write( fd, in_buffer, len ); 85 85 86 86 va_end( args ); -
libcfa/src/bits/debug.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 8 12:35:19 201813 // Update Count : 212 // Last Modified On : Thu Nov 21 17:06:58 2019 13 // Update Count : 8 14 14 // 15 15 … … 38 38 #include <stdio.h> 39 39 40 extern void __cfaabi_dbg_bits_write(const char *buffer, int len );41 extern void __cfaabi_dbg_bits_acquire();42 extern void __cfaabi_dbg_bits_release();43 extern void __cfaabi_dbg_bits_print_safe ( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) ));44 extern void __cfaabi_dbg_bits_print_nolock( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) ));45 extern void __cfaabi_dbg_bits_print_vararg(const char fmt[], va_list arg );46 extern void __cfaabi_dbg_bits_print_buffer( char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 3, 4) ));40 extern void __cfaabi_bits_write( int fd, const char *buffer, int len ); 41 extern void __cfaabi_bits_acquire(); 42 extern void __cfaabi_bits_release(); 43 extern void __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 44 extern void __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 45 extern void __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list arg ); 46 extern void __cfaabi_bits_print_buffer( int fd, char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )); 47 47 #ifdef __cforall 48 48 } … … 50 50 51 51 #ifdef __CFA_DEBUG_PRINT__ 52 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_ dbg_bits_write(buffer, len )53 #define __cfaabi_dbg_acquire() __cfaabi_ dbg_bits_acquire()54 #define __cfaabi_dbg_release() __cfaabi_ dbg_bits_release()55 #define __cfaabi_dbg_print_safe(...) __cfaabi_ dbg_bits_print_safe (__VA_ARGS__)56 #define __cfaabi_dbg_print_nolock(...) __cfaabi_ dbg_bits_print_nolock (__VA_ARGS__)57 #define __cfaabi_dbg_print_buffer(...) __cfaabi_ dbg_bits_print_buffer (__VA_ARGS__)58 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_ dbg_bits_write( __dbg_text, __dbg_len );59 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_dbg_ bits_write( __dbg_text, __dbg_len );52 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_bits_write( STDERR_FILENO, buffer, len ) 53 #define __cfaabi_dbg_acquire() __cfaabi_bits_acquire() 54 #define __cfaabi_dbg_release() __cfaabi_bits_release() 55 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe (__VA_ARGS__) 56 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock (__VA_ARGS__) 57 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer (__VA_ARGS__) 58 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( __dbg_text, __dbg_len ); 59 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_dbg_write( __dbg_text, __dbg_len ); 60 60 #else 61 61 #define __cfaabi_dbg_write(...) ((void)0) -
libcfa/src/bits/defs.hfa
raca6a54c r2fa5bd2 47 47 #define OPTIONAL_THREAD __attribute__((weak)) 48 48 #endif 49 50 static inline long long rdtscl(void) { 51 unsigned int lo, hi; 52 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); 53 return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); 54 } -
libcfa/src/concurrency/alarm.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri May 25 06:25:47 201813 // Update Count : 6 712 // Last Modified On : Tue Dec 3 22:47:24 2019 13 // Update Count : 68 14 14 // 15 15 … … 40 40 void __kernel_set_timer( Duration alarm ) { 41 41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv); 42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL);42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, 0p ); 43 43 } 44 44 … … 113 113 this->tail = &this->head; 114 114 } 115 head->next = NULL;115 head->next = 0p; 116 116 } 117 117 verify( validate( this ) ); … … 127 127 this->tail = it; 128 128 } 129 n->next = NULL;129 n->next = 0p; 130 130 131 131 verify( validate( this ) ); -
libcfa/src/concurrency/coroutine.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:20:57 201813 // Update Count : 912 // Last Modified On : Thu Dec 5 14:37:29 2019 13 // Update Count : 15 14 14 // 15 15 … … 90 90 91 91 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) { 92 (this.context){ NULL, NULL};92 (this.context){0p, 0p}; 93 93 (this.stack){storage, storageSize}; 94 94 this.name = name; 95 95 state = Start; 96 starter = NULL;97 last = NULL;98 cancellation = NULL;96 starter = 0p; 97 last = 0p; 98 cancellation = 0p; 99 99 } 100 100 … … 131 131 132 132 [void *, size_t] __stack_alloc( size_t storageSize ) { 133 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment133 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 134 134 assert(__page_size != 0l); 135 135 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; … … 157 157 158 158 void __stack_prepare( __stack_info_t * this, size_t create_size ) { 159 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment159 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 160 160 bool userStack; 161 161 void * storage; -
libcfa/src/concurrency/coroutine.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:49:39201913 // Update Count : 912 // Last Modified On : Tue Dec 3 22:47:58 2019 13 // Update Count : 10 14 14 // 15 15 … … 38 38 void ^?{}( coroutine_desc & this ); 39 39 40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", NULL, 0 }; }41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", NULL, stackSize }; }40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 42 42 static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, NULL, 0 }; }44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, 0p, 0 }; } 44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, 0p, stackSize }; } 45 45 46 46 //----------------------------------------------------------------------------- … … 89 89 src->state = Active; 90 90 91 if( unlikely(src->cancellation != NULL) ) {91 if( unlikely(src->cancellation != 0p) ) { 92 92 _CtxCoroutine_Unwind(src->cancellation, src); 93 93 } … … 128 128 coroutine_desc * dst = get_coroutine(cor); 129 129 130 if( unlikely(dst->context.SP == NULL) ) {130 if( unlikely(dst->context.SP == 0p) ) { 131 131 __stack_prepare(&dst->stack, 65000); 132 132 CtxStart(&cor, CtxInvokeCoroutine); -
libcfa/src/concurrency/invoke.h
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 18:19:13 201913 // Update Count : 4 012 // Last Modified On : Thu Dec 5 16:26:03 2019 13 // Update Count : 44 14 14 // 15 15 … … 46 46 #ifdef __cforall 47 47 extern "Cforall" { 48 extern thread_local struct KernelThreadData {48 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 49 49 struct thread_desc * volatile this_thread; 50 50 struct processor * volatile this_processor; … … 55 55 volatile bool in_progress; 56 56 } preemption_state; 57 58 uint32_t rand_seed; 57 59 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 58 60 } … … 205 207 206 208 static inline void ?{}(__monitor_group_t & this) { 207 (this.data){ NULL};209 (this.data){0p}; 208 210 (this.size){0}; 209 211 (this.func){NULL}; -
libcfa/src/concurrency/kernel.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 20 17:21:23201913 // Update Count : 2512 // Last Modified On : Thu Dec 5 16:25:52 2019 13 // Update Count : 52 14 14 // 15 15 … … 26 26 #include <signal.h> 27 27 #include <unistd.h> 28 #include <limits.h> // PTHREAD_STACK_MIN 29 #include <sys/mman.h> // mprotect 28 30 } 29 31 … … 40 42 //----------------------------------------------------------------------------- 41 43 // Some assembly required 42 #if 44 #if defined( __i386 ) 43 45 #define CtxGet( ctx ) \ 44 46 __asm__ volatile ( \ … … 123 125 124 126 extern "C" { 125 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 126 128 } 127 129 … … 131 133 // Global state 132 134 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 135 NULL, // cannot use 0p 133 136 NULL, 134 NULL,135 { 1, false, false }137 { 1, false, false }, 138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work 136 139 }; 137 140 … … 139 142 // Struct to steal stack 140 143 struct current_stack_info_t { 141 __stack_t * storage; // pointer to stack object142 void * base;// base of stack143 void * limit;// stack grows towards stack limit144 void * context;// address of cfa_context_t144 __stack_t * storage; // pointer to stack object 145 void * base; // base of stack 146 void * limit; // stack grows towards stack limit 147 void * context; // address of cfa_context_t 145 148 }; 146 149 … … 171 174 name = "Main Thread"; 172 175 state = Start; 173 starter = NULL;174 last = NULL;175 cancellation = NULL;176 starter = 0p; 177 last = 0p; 178 cancellation = 0p; 176 179 } 177 180 … … 184 187 self_mon.recursion = 1; 185 188 self_mon_p = &self_mon; 186 next = NULL;187 188 node.next = NULL;189 node.prev = NULL;189 next = 0p; 190 191 node.next = 0p; 192 node.prev = 0p; 190 193 doregister(curr_cluster, this); 191 194 … … 211 214 terminated{ 0 }; 212 215 do_terminate = false; 213 preemption_alarm = NULL;216 preemption_alarm = 0p; 214 217 pending_preemption = false; 215 218 runner.proc = &this; … … 231 234 } 232 235 233 pthread_join( kernel_thread, NULL ); 236 pthread_join( kernel_thread, 0p ); 237 free( this.stack ); 234 238 } 235 239 … … 260 264 //Main of the processor contexts 261 265 void main(processorCtx_t & runner) { 266 // Because of a bug, we couldn't initialized the seed on construction 267 // Do it here 268 kernelTLS.rand_seed ^= rdtscl(); 269 262 270 processor * this = runner.proc; 263 271 verify(this); … … 273 281 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 274 282 275 thread_desc * readyThread = NULL; 276 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 277 { 283 thread_desc * readyThread = 0p; 284 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 278 285 readyThread = nextThread( this->cltr ); 279 286 280 if(readyThread) 281 { 287 if(readyThread) { 282 288 verify( ! kernelTLS.preemption_state.enabled ); 283 289 … … 290 296 291 297 spin_count = 0; 292 } 293 else 294 { 298 } else { 295 299 // spin(this, &spin_count); 296 300 halt(this); … … 405 409 processor * proc = (processor *) arg; 406 410 kernelTLS.this_processor = proc; 407 kernelTLS.this_thread = NULL;411 kernelTLS.this_thread = 0p; 408 412 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 409 413 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 418 422 419 423 //Set global state 420 kernelTLS.this_thread = NULL;424 kernelTLS.this_thread = 0p; 421 425 422 426 //We now have a proper context from which to schedule threads … … 434 438 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner); 435 439 436 return NULL; 440 return 0p; 441 } 442 443 static void Abort( int ret, const char * func ) { 444 if ( ret ) { // pthread routines return errno values 445 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) ); 446 } // if 447 } // Abort 448 449 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 450 pthread_attr_t attr; 451 452 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 453 454 size_t stacksize; 455 // default stack size, normally defined by shell limit 456 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 457 assert( stacksize >= PTHREAD_STACK_MIN ); 458 459 void * stack; 460 __cfaabi_dbg_debug_do( 461 stack = memalign( __page_size, stacksize + __page_size ); 462 // pthread has no mechanism to create the guard page in user supplied stack. 463 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 464 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 465 } // if 466 ); 467 __cfaabi_dbg_no_debug_do( 468 stack = malloc( stacksize ); 469 ); 470 471 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); 472 473 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" ); 474 return stack; 437 475 } 438 476 … … 440 478 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 441 479 442 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );480 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this ); 443 481 444 482 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); … … 497 535 verify( ! kernelTLS.preemption_state.enabled ); 498 536 499 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );537 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 500 538 501 539 with( *thrd->curr_cluster ) { … … 676 714 void ?{}(processorCtx_t & this, processor * proc) { 677 715 (this.__cor){ "Processor" }; 678 this.__cor.starter = NULL;716 this.__cor.starter = 0p; 679 717 this.proc = proc; 680 718 } … … 685 723 terminated{ 0 }; 686 724 do_terminate = false; 687 preemption_alarm = NULL;725 preemption_alarm = 0p; 688 726 pending_preemption = false; 689 727 kernel_thread = pthread_self(); … … 819 857 if(thrd) { 820 858 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 821 __cfaabi_ dbg_bits_write(abort_text, len );859 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 822 860 823 861 if ( &thrd->self_cor != thrd->curr_cor ) { 824 862 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); 825 __cfaabi_ dbg_bits_write(abort_text, len );863 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 826 864 } 827 865 else { 828 __cfaabi_ dbg_bits_write(".\n", 2 );866 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); 829 867 } 830 868 } 831 869 else { 832 870 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 833 __cfaabi_ dbg_bits_write(abort_text, len );871 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 834 872 } 835 873 } … … 842 880 843 881 extern "C" { 844 void __cfaabi_ dbg_bits_acquire() {882 void __cfaabi_bits_acquire() { 845 883 lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); 846 884 } 847 885 848 void __cfaabi_ dbg_bits_release() {886 void __cfaabi_bits_release() { 849 887 unlock( kernel_debug_lock ); 850 888 } … … 879 917 880 918 void V(semaphore & this) with( this ) { 881 thread_desc * thrd = NULL;919 thread_desc * thrd = 0p; 882 920 lock( lock __cfaabi_dbg_ctx2 ); 883 921 count += 1; -
libcfa/src/concurrency/kernel.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 11:39:17201913 // Update Count : 1 612 // Last Modified On : Wed Dec 4 07:54:51 2019 13 // Update Count : 18 14 14 // 15 15 … … 20 20 #include "invoke.h" 21 21 #include "time_t.hfa" 22 #include "coroutine.hfa" 22 23 23 24 extern "C" { … … 88 89 static inline void ?{}(FinishAction & this) { 89 90 this.action_code = No_Action; 90 this.thrd = NULL;91 this.lock = NULL;91 this.thrd = 0p; 92 this.lock = 0p; 92 93 } 93 94 static inline void ^?{}(FinishAction &) {} … … 134 135 semaphore terminated; 135 136 137 // pthread Stack 138 void * stack; 139 136 140 // Link lists fields 137 141 struct __dbg_node_proc { -
libcfa/src/concurrency/kernel_private.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 29 14:06:40 201813 // Update Count : 312 // Last Modified On : Sat Nov 30 19:25:02 2019 13 // Update Count : 8 14 14 // 15 15 … … 57 57 void main(processorCtx_t *); 58 58 59 void * create_pthread( pthread_t *, void * (*)(void *), void * ); 60 59 61 static inline void wake_fast(processor * this) { 60 62 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this); … … 101 103 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 102 104 105 static inline uint32_t tls_rand() { 106 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 107 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; 108 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7; 109 return kernelTLS.rand_seed; 110 } 111 103 112 104 113 void doregister( struct cluster & cltr ); -
libcfa/src/concurrency/monitor.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 14:30:26 201813 // Update Count : 912 // Last Modified On : Wed Dec 4 07:55:14 2019 13 // Update Count : 10 14 14 // 15 15 … … 363 363 this.waiting_thread = waiting_thread; 364 364 this.count = count; 365 this.next = NULL;365 this.next = 0p; 366 366 this.user_info = user_info; 367 367 } … … 369 369 void ?{}(__condition_criterion_t & this ) with( this ) { 370 370 ready = false; 371 target = NULL;372 owner = NULL;373 next = NULL;371 target = 0p; 372 owner = 0p; 373 next = 0p; 374 374 } 375 375 … … 378 378 this.target = target; 379 379 this.owner = &owner; 380 this.next = NULL;380 this.next = 0p; 381 381 } 382 382 … … 387 387 388 388 // Check that everything is as expected 389 assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );389 assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 390 390 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 391 391 verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count ); … … 449 449 450 450 // Lock all monitors 451 lock_all( this.monitors, NULL, count );451 lock_all( this.monitors, 0p, count ); 452 452 453 453 //Pop the head of the waiting queue … … 471 471 472 472 //Check that everything is as expected 473 verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );473 verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 474 474 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 475 475 … … 674 674 675 675 static inline void reset_mask( monitor_desc * this ) { 676 this->mask.accepted = NULL;677 this->mask.data = NULL;676 this->mask.accepted = 0p; 677 this->mask.data = 0p; 678 678 this->mask.size = 0; 679 679 } … … 816 816 } 817 817 818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL);819 return ready2run ? node->waiting_thread : NULL;818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p ); 819 return ready2run ? node->waiting_thread : 0p; 820 820 } 821 821 … … 824 824 if( !this.monitors ) { 825 825 // __cfaabi_dbg_print_safe( "Branding\n" ); 826 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );826 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); 827 827 this.monitor_count = thrd->monitors.size; 828 828 -
libcfa/src/concurrency/monitor.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 7 18:06:45 201713 // Update Count : 1 012 // Last Modified On : Wed Dec 4 07:55:32 2019 13 // Update Count : 11 14 14 // 15 15 … … 31 31 entry_queue{}; 32 32 signal_stack{}; 33 owner = NULL;33 owner = 0p; 34 34 recursion = 0; 35 mask.accepted = NULL;36 mask.data = NULL;35 mask.accepted = 0p; 36 mask.data = 0p; 37 37 mask.size = 0; 38 dtor_node = NULL;38 dtor_node = 0p; 39 39 } 40 41 static inline void ^?{}(monitor_desc & ) {} 40 42 41 43 struct monitor_guard_t { … … 120 122 121 123 static inline void ?{}( condition & this ) { 122 this.monitors = NULL;124 this.monitors = 0p; 123 125 this.monitor_count = 0; 124 126 } -
libcfa/src/concurrency/mutex.cfa
raca6a54c r2fa5bd2 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:37:11 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:37:51 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:39 2019 15 // Update Count : 1 16 16 // 17 17 … … 73 73 this.lock{}; 74 74 this.blocked_threads{}; 75 this.owner = NULL;75 this.owner = 0p; 76 76 this.recursion_count = 0; 77 77 } … … 83 83 void lock(recursive_mutex_lock & this) with(this) { 84 84 lock( lock __cfaabi_dbg_ctx2 ); 85 if( owner == NULL) {85 if( owner == 0p ) { 86 86 owner = kernelTLS.this_thread; 87 87 recursion_count = 1; … … 101 101 bool ret = false; 102 102 lock( lock __cfaabi_dbg_ctx2 ); 103 if( owner == NULL) {103 if( owner == 0p ) { 104 104 owner = kernelTLS.this_thread; 105 105 recursion_count = 1; -
libcfa/src/concurrency/mutex.hfa
raca6a54c r2fa5bd2 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:24:09 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:24:12 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:53 2019 15 // Update Count : 1 16 16 // 17 17 … … 110 110 111 111 static inline void ?{}(lock_scope(L) & this) { 112 this.locks = NULL;112 this.locks = 0p; 113 113 this.count = 0; 114 114 } -
libcfa/src/concurrency/preemption.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 5 17:35:49 201813 // Update Count : 3712 // Last Modified On : Thu Dec 5 16:34:05 2019 13 // Update Count : 43 14 14 // 15 15 … … 24 24 #include <string.h> 25 25 #include <unistd.h> 26 #include <limits.h> // PTHREAD_STACK_MIN 26 27 } 27 28 … … 64 65 event_kernel_t * event_kernel; // kernel public handle to even kernel 65 66 static pthread_t alarm_thread; // pthread handle to alarm thread 67 static void * alarm_stack; // pthread stack for alarm thread 66 68 67 69 static void ?{}(event_kernel_t & this) with( this ) { … … 81 83 // Get next expired node 82 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 83 if( !alarms->head ) return NULL;// If no alarms return null84 if( alarms->head->alarm >= currtime ) return NULL;// If alarms head not expired return null85 return pop(alarms); 85 if( !alarms->head ) return 0p; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 86 88 } 87 89 88 90 // Tick one frame of the Discrete Event Simulation for alarms 89 91 static void tick_preemption() { 90 alarm_node_t * node = NULL;// Used in the while loop but cannot be declared in the while condition91 alarm_list_t * alarms = &event_kernel->alarms; 92 Time currtime = __kernel_get_time(); // Check current time once so weeverything "happens at once"92 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 Time currtime = __kernel_get_time(); // Check current time once so everything "happens at once" 93 95 94 96 //Loop throught every thing expired … … 243 245 sigaddset( &mask, sig ); 244 246 245 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL) == -1 ) {247 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 246 248 abort( "internal error, pthread_sigmask" ); 247 249 } … … 254 256 sigaddset( &mask, sig ); 255 257 256 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {258 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 257 259 abort( "internal error, pthread_sigmask" ); 258 260 } … … 301 303 302 304 // Setup proper signal handlers 303 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); 305 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler 304 306 305 307 signal_block( SIGALRM ); 306 308 307 pthread_create( &alarm_thread, NULL, alarm_loop, NULL);309 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p ); 308 310 } 309 311 … … 316 318 sigset_t mask; 317 319 sigfillset( &mask ); 318 sigprocmask( SIG_BLOCK, &mask, NULL);320 sigprocmask( SIG_BLOCK, &mask, 0p ); 319 321 320 322 // Notify the alarm thread of the shutdown … … 323 325 324 326 // Wait for the preemption thread to finish 325 pthread_join( alarm_thread, NULL ); 327 328 pthread_join( alarm_thread, 0p ); 329 free( alarm_stack ); 326 330 327 331 // Preemption is now fully stopped … … 380 384 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 381 385 #endif 382 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL) == -1 ) {386 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) { 383 387 abort( "internal error, sigprocmask" ); 384 388 } … … 399 403 sigset_t mask; 400 404 sigfillset(&mask); 401 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {405 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 402 406 abort( "internal error, pthread_sigmask" ); 403 407 } … … 420 424 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );} 421 425 continue; 422 426 case EINVAL : 423 427 abort( "Timeout was invalid." ); 424 428 default: … … 453 457 EXIT: 454 458 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 455 return NULL;459 return 0p; 456 460 } 457 461 … … 466 470 sigset_t oldset; 467 471 int ret; 468 ret = pthread_sigmask(0, NULL, &oldset);472 ret = pthread_sigmask(0, 0p, &oldset); 469 473 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 470 474 -
libcfa/src/concurrency/thread.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:19:52 201813 // Update Count : 812 // Last Modified On : Wed Dec 4 09:17:49 2019 13 // Update Count : 9 14 14 // 15 15 … … 33 33 // Thread ctors and dtors 34 34 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 35 context{ NULL, NULL};35 context{ 0p, 0p }; 36 36 self_cor{ name, storage, storageSize }; 37 37 state = Start; … … 41 41 self_mon_p = &self_mon; 42 42 curr_cluster = &cl; 43 next = NULL;43 next = 0p; 44 44 45 node.next = NULL;46 node.prev = NULL;45 node.next = 0p; 46 node.prev = 0p; 47 47 doregister(curr_cluster, this); 48 48 -
libcfa/src/concurrency/thread.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:51:33201913 // Update Count : 512 // Last Modified On : Wed Dec 4 09:18:14 2019 13 // Update Count : 6 14 14 // 15 15 … … 61 61 void ^?{}(thread_desc & this); 62 62 63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 65 65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 65000 }; }67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, NULL, stackSize }; }66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 68 68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 65000 }; }70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 65000 }; }71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 72 72 73 73 //----------------------------------------------------------------------------- -
libcfa/src/fstream.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Sep 10 22:19:56 201913 // Update Count : 35 412 // Last Modified On : Fri Nov 29 06:56:46 2019 13 // Update Count : 355 14 14 // 15 15 … … 66 66 } // ?{} 67 67 68 void ^?{}( ofstream & os ) { 69 close( os ); 70 } // ^?{} 71 68 72 void sepOn( ofstream & os ) { os.sepOnOff = ! getNL( os ); } 69 73 void sepOff( ofstream & os ) { os.sepOnOff = false; } … … 195 199 } // ?{} 196 200 201 void ^?{}( ifstream & is ) { 202 close( is ); 203 } // ^?{} 204 197 205 void nlOn( ifstream & os ) { os.nlOnOff = true; } 198 206 void nlOff( ifstream & os ) { os.nlOnOff = false; } -
libcfa/src/fstream.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 15 18:10:23201913 // Update Count : 16 712 // Last Modified On : Fri Nov 29 06:56:02 2019 13 // Update Count : 168 14 14 // 15 15 … … 72 72 void ?{}( ofstream & os, const char * name, const char * mode ); 73 73 void ?{}( ofstream & os, const char * name ); 74 void ^?{}( ofstream & os ); 74 75 75 76 extern ofstream & sout, & stdout, & serr, & stderr; // aliases … … 101 102 void ?{}( ifstream & is, const char * name, const char * mode ); 102 103 void ?{}( ifstream & is, const char * name ); 104 void ^?{}( ifstream & is ); 103 105 104 106 extern ifstream & sin, & stdin; // aliases -
libcfa/src/heap.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Oct 18 07:42:09201913 // Update Count : 55612 // Last Modified On : Wed Dec 4 21:42:46 2019 13 // Update Count : 646 14 14 // 15 15 … … 18 18 #include <stdio.h> // snprintf, fileno 19 19 #include <errno.h> // errno 20 #include <string.h> // memset, memcpy 20 21 extern "C" { 21 22 #include <sys/mman.h> // mmap, munmap … … 27 28 #include "bits/locks.hfa" // __spinlock_t 28 29 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "stdlib.hfa" // bsearchl30 //#include "stdlib.hfa" // bsearchl 30 31 #include "malloc.h" 31 32 33 #define MIN(x, y) (y > x ? x : y) 32 34 33 35 static bool traceHeap = false; 34 36 35 inline bool traceHeap() { 36 return traceHeap; 37 } // traceHeap 37 inline bool traceHeap() { return traceHeap; } 38 38 39 39 bool traceHeapOn() { … … 49 49 } // traceHeapOff 50 50 51 52 static bool checkFree = false; 53 54 inline bool checkFree() { 55 return checkFree; 56 } // checkFree 57 58 bool checkFreeOn() { 59 bool temp = checkFree; 60 checkFree = true; 51 bool traceHeapTerm() { return false; } 52 53 54 static bool prtFree = false; 55 56 inline bool prtFree() { 57 return prtFree; 58 } // prtFree 59 60 bool prtFreeOn() { 61 bool temp = prtFree; 62 prtFree = true; 61 63 return temp; 62 } // checkFreeOn63 64 bool checkFreeOff() {65 bool temp = checkFree;66 checkFree = false;64 } // prtFreeOn 65 66 bool prtFreeOff() { 67 bool temp = prtFree; 68 prtFree = false; 67 69 return temp; 68 } // checkFreeOff 69 70 71 // static bool traceHeapTerm = false; 72 73 // inline bool traceHeapTerm() { 74 // return traceHeapTerm; 75 // } // traceHeapTerm 76 77 // bool traceHeapTermOn() { 78 // bool temp = traceHeapTerm; 79 // traceHeapTerm = true; 80 // return temp; 81 // } // traceHeapTermOn 82 83 // bool traceHeapTermOff() { 84 // bool temp = traceHeapTerm; 85 // traceHeapTerm = false; 86 // return temp; 87 // } // traceHeapTermOff 70 } // prtFreeOff 88 71 89 72 90 73 enum { 74 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 75 // the brk address is extended by the extension amount. 76 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), 77 78 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 79 // values greater than or equal to this value are mmap from the operating system. 91 80 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 92 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),93 81 }; 94 82 … … 105 93 static unsigned int allocFree; // running total of allocations minus frees 106 94 107 static void checkUnfreed() {95 static void prtUnfreed() { 108 96 if ( allocFree != 0 ) { 109 97 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 110 //char helpText[512];111 //int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"112 //"Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",113 //(long int)getpid(), allocFree, allocFree ); // always print the UNIX pid114 // __cfaabi_dbg_bits_write( helpText, len );115 } // if 116 } // checkUnfreed98 char helpText[512]; 99 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n" 100 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 101 (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid 102 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 103 } // if 104 } // prtUnfreed 117 105 118 106 extern "C" { … … 123 111 void heapAppStop() { // called by __cfaabi_appready_startdown 124 112 fclose( stdin ); fclose( stdout ); 125 checkUnfreed();113 prtUnfreed(); 126 114 } // heapAppStop 127 115 } // extern "C" 128 116 #endif // __CFA_DEBUG__ 117 129 118 130 119 // statically allocated variables => zero filled. … … 134 123 static unsigned int maxBucketsUsed; // maximum number of buckets in use 135 124 136 137 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa138 #define ALIGN 16139 125 140 126 #define SPINLOCK 0 … … 147 133 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 148 134 // Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 149 enum { NoBucketSizes = 9 3}; // number of buckets sizes135 enum { NoBucketSizes = 91 }; // number of buckets sizes 150 136 151 137 struct HeapManager { … … 194 180 } kind; // Kind 195 181 } header; // Header 196 char pad[ ALIGN- sizeof( Header )];182 char pad[libAlign() - sizeof( Header )]; 197 183 char data[0]; // storage 198 184 }; // Storage 199 185 200 static_assert( ALIGN >= sizeof( Storage ), "ALIGN< sizeof( Storage )" );186 static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" ); 201 187 202 188 struct FreeHeader { … … 228 214 #define __STATISTICS__ 229 215 216 // Bucket size must be multiple of 16. 230 217 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 231 218 static const unsigned int bucketSizes[] @= { // different bucket sizes 232 16, 32, 48, 64, 233 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, 234 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896, 235 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144, 236 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 237 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 238 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 239 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 240 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 241 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 242 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792, 243 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016, 244 4_194_304 + sizeof(HeapManager.Storage) 219 16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4 220 96, 112, 128 + sizeof(HeapManager.Storage), // 3 221 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 222 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 223 640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4 224 1_536, 2_048 + sizeof(HeapManager.Storage), // 2 225 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4 226 6_144, 8_192 + sizeof(HeapManager.Storage), // 2 227 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8 228 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8 229 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8 230 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8 231 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8 232 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8 233 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4 234 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8 235 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4 245 236 }; 246 237 … … 251 242 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 252 243 #endif // FASTLOOKUP 244 253 245 static int mmapFd = -1; // fake or actual fd for anonymous file 254 255 256 246 #ifdef __CFA_DEBUG__ 257 247 static bool heapBoot = 0; // detect recursion during boot … … 259 249 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 260 250 261 // #comment TD : The return type of this function should be commented262 static inline bool setMmapStart( size_t value ) {263 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;264 mmapStart = value; // set global265 266 // find the closest bucket size less than or equal to the mmapStart size267 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search268 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?269 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?270 return false;271 } // setMmapStart272 273 274 static void ?{}( HeapManager & manager ) with ( manager ) {275 pageSize = sysconf( _SC_PAGESIZE );276 277 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists278 freeLists[i].blockSize = bucketSizes[i];279 } // for280 281 #ifdef FASTLOOKUP282 unsigned int idx = 0;283 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {284 if ( i > bucketSizes[idx] ) idx += 1;285 lookup[i] = idx;286 } // for287 #endif // FASTLOOKUP288 289 if ( setMmapStart( default_mmap_start() ) ) {290 abort( "HeapManager : internal error, mmap start initialization failure." );291 } // if292 heapExpand = default_heap_expansion();293 294 char * End = (char *)sbrk( 0 );295 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment296 heapBegin = heapEnd = sbrk( 0 ); // get new start point297 } // HeapManager298 299 300 static void ^?{}( HeapManager & ) {301 #ifdef __STATISTICS__302 // if ( traceHeapTerm() ) {303 // printStats();304 // if ( checkfree() ) checkFree( heapManager, true );305 // } // if306 #endif // __STATISTICS__307 } // ~HeapManager308 309 310 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));311 void memory_startup( void ) {312 #ifdef __CFA_DEBUG__313 if ( unlikely( heapBoot ) ) { // check for recursion during system boot314 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.315 abort( "boot() : internal error, recursively invoked during system boot." );316 } // if317 heapBoot = true;318 #endif // __CFA_DEBUG__319 320 //assert( heapManager.heapBegin != 0 );321 //heapManager{};322 if ( heapManager.heapBegin == 0 ) heapManager{};323 } // memory_startup324 325 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));326 void memory_shutdown( void ) {327 ^heapManager{};328 } // memory_shutdown329 330 251 331 252 #ifdef __STATISTICS__ 332 static unsigned long long int mmap_storage; // heap statistics counters 253 // Heap statistics counters. 254 static unsigned long long int mmap_storage; 333 255 static unsigned int mmap_calls; 334 256 static unsigned long long int munmap_storage; … … 348 270 static unsigned long long int realloc_storage; 349 271 static unsigned int realloc_calls; 350 351 static int statfd; // statistics file descriptor (changed by malloc_stats_fd) 352 272 // Statistics file descriptor (changed by malloc_stats_fd). 273 static int statfd = STDERR_FILENO; // default stderr 353 274 354 275 // Use "write" because streams may be shutdown when calls are made. 355 276 static void printStats() { 356 277 char helpText[512]; 357 __cfaabi_ dbg_bits_print_buffer(helpText, sizeof(helpText),278 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 358 279 "\nHeap statistics:\n" 359 280 " malloc: calls %u / storage %llu\n" … … 405 326 sbrk_calls, sbrk_storage 406 327 ); 407 return write( fileno( stream ), helpText, len ); // -1 => error 328 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit 329 return len; 408 330 } // printStatsXML 409 331 #endif // __STATISTICS__ 332 410 333 411 334 // #comment TD : Is this the samething as Out-of-Memory? … … 418 341 419 342 static inline void checkAlign( size_t alignment ) { 420 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {421 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment);343 if ( alignment < libAlign() || ! libPow2( alignment ) ) { 344 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 422 345 } // if 423 346 } // checkAlign … … 431 354 432 355 433 static inline void checkHeader( bool check, const char * name, void * addr ) { 434 if ( unlikely( check ) ) { // bad address ? 435 abort( "Attempt to %s storage %p with address outside the heap.\n" 436 "Possible cause is duplicate free on same block or overwriting of memory.", 437 name, addr ); 438 } // if 439 } // checkHeader 440 441 // #comment TD : function should be commented and/or have a more evocative name 442 // this isn't either a check or a constructor which is what I would expect this function to be 443 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) { 444 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 445 size_t offset = header->kind.fake.offset; 446 alignment = header->kind.fake.alignment & -2; // remove flag from value 447 #ifdef __CFA_DEBUG__ 448 checkAlign( alignment ); // check alignment 449 #endif // __CFA_DEBUG__ 450 header = (HeapManager.Storage.Header *)((char *)header - offset); 451 } // if 452 } // fakeHeader 453 454 // #comment TD : Why is this a define 455 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 456 457 static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 458 header = headerAddr( addr ); 459 460 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 461 fakeHeader( header, size, alignment ); 462 size = header->kind.real.blockSize & -3; // mmap size 463 return true; 464 } // if 465 466 #ifdef __CFA_DEBUG__ 467 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 468 #endif // __CFA_DEBUG__ 469 470 // #comment TD : This code looks weird... 471 // It's called as the first statement of both branches of the last if, with the same parameters in all cases 472 473 // header may be safe to dereference 474 fakeHeader( header, size, alignment ); 475 #ifdef __CFA_DEBUG__ 476 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 477 #endif // __CFA_DEBUG__ 478 479 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 480 #ifdef __CFA_DEBUG__ 481 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 482 abort( "Attempt to %s storage %p with corrupted header.\n" 483 "Possible cause is duplicate free on same block or overwriting of header information.", 484 name, addr ); 485 } // if 486 #endif // __CFA_DEBUG__ 487 size = freeElem->blockSize; 488 return false; 489 } // headers 490 491 492 static inline void * extend( size_t size ) with ( heapManager ) { 493 lock( extlock __cfaabi_dbg_ctx2 ); 494 ptrdiff_t rem = heapRemaining - size; 495 if ( rem < 0 ) { 496 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 497 498 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 499 if ( sbrk( increase ) == (void *)-1 ) { 500 unlock( extlock ); 501 errno = ENOMEM; 502 return 0; 503 } // if 504 #ifdef __STATISTICS__ 505 sbrk_calls += 1; 506 sbrk_storage += increase; 507 #endif // __STATISTICS__ 508 #ifdef __CFA_DEBUG__ 509 // Set new memory to garbage so subsequent uninitialized usages might fail. 510 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 511 #endif // __CFA_DEBUG__ 512 rem = heapRemaining + increase - size; 513 } // if 514 515 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 516 heapRemaining = rem; 517 heapEnd = (char *)heapEnd + size; 518 unlock( extlock ); 519 return block; 520 } // extend 521 522 356 // thunk problem 523 357 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 524 358 size_t l = 0, m, h = dim; … … 535 369 536 370 371 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 372 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 373 mmapStart = value; // set global 374 375 // find the closest bucket size less than or equal to the mmapStart size 376 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 377 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 378 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 379 return false; 380 } // setMmapStart 381 382 383 static inline void checkHeader( bool check, const char * name, void * addr ) { 384 if ( unlikely( check ) ) { // bad address ? 385 abort( "Attempt to %s storage %p with address outside the heap.\n" 386 "Possible cause is duplicate free on same block or overwriting of memory.", 387 name, addr ); 388 } // if 389 } // checkHeader 390 391 392 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 393 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 394 size_t offset = header->kind.fake.offset; 395 alignment = header->kind.fake.alignment & -2; // remove flag from value 396 #ifdef __CFA_DEBUG__ 397 checkAlign( alignment ); // check alignment 398 #endif // __CFA_DEBUG__ 399 header = (HeapManager.Storage.Header *)((char *)header - offset); 400 } // if 401 } // fakeHeader 402 403 404 // <-------+----------------------------------------------------> bsize (bucket size) 405 // |header |addr 406 //================================================================================== 407 // | alignment 408 // <-----------------<------------+-----------------------------> bsize (bucket size) 409 // |fake-header | addr 410 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 411 412 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 413 // |header |addr 414 //================================================================================== 415 // | alignment 416 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 417 // |fake-header |addr 418 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 419 420 421 static inline bool headers( const char * name __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 422 header = headerAddr( addr ); 423 424 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 425 fakeHeader( header, alignment ); 426 size = header->kind.real.blockSize & -3; // mmap size 427 return true; 428 } // if 429 430 #ifdef __CFA_DEBUG__ 431 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 432 #endif // __CFA_DEBUG__ 433 434 // header may be safe to dereference 435 fakeHeader( header, alignment ); 436 #ifdef __CFA_DEBUG__ 437 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 438 #endif // __CFA_DEBUG__ 439 440 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 441 #ifdef __CFA_DEBUG__ 442 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 443 abort( "Attempt to %s storage %p with corrupted header.\n" 444 "Possible cause is duplicate free on same block or overwriting of header information.", 445 name, addr ); 446 } // if 447 #endif // __CFA_DEBUG__ 448 size = freeElem->blockSize; 449 return false; 450 } // headers 451 452 453 static inline void * extend( size_t size ) with ( heapManager ) { 454 lock( extlock __cfaabi_dbg_ctx2 ); 455 ptrdiff_t rem = heapRemaining - size; 456 if ( rem < 0 ) { 457 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 458 459 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 460 if ( sbrk( increase ) == (void *)-1 ) { 461 unlock( extlock ); 462 errno = ENOMEM; 463 return 0p; 464 } // if 465 #ifdef __STATISTICS__ 466 sbrk_calls += 1; 467 sbrk_storage += increase; 468 #endif // __STATISTICS__ 469 #ifdef __CFA_DEBUG__ 470 // Set new memory to garbage so subsequent uninitialized usages might fail. 471 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 472 #endif // __CFA_DEBUG__ 473 rem = heapRemaining + increase - size; 474 } // if 475 476 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 477 heapRemaining = rem; 478 heapEnd = (char *)heapEnd + size; 479 unlock( extlock ); 480 return block; 481 } // extend 482 483 537 484 static inline void * doMalloc( size_t size ) with ( heapManager ) { 538 485 HeapManager.Storage * block; // pointer to new block of storage … … 541 488 // along with the block and is a multiple of the alignment size. 542 489 543 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0 ;490 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p; 544 491 size_t tsize = size + sizeof(HeapManager.Storage); 545 492 if ( likely( tsize < mmapStart ) ) { // small size => sbrk … … 574 521 block = freeElem->freeList.pop(); 575 522 #endif // SPINLOCK 576 if ( unlikely( block == 0 ) ) {// no free block ?523 if ( unlikely( block == 0p ) ) { // no free block ? 577 524 #if defined( SPINLOCK ) 578 525 unlock( freeElem->lock ); … … 583 530 584 531 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 585 if ( unlikely( block == 0 ) ) return 0;586 532 if ( unlikely( block == 0p ) ) return 0p; 533 #if defined( SPINLOCK ) 587 534 } else { 588 535 freeElem->freeList = block->header.kind.real.next; 589 536 unlock( freeElem->lock ); 590 537 #endif // SPINLOCK 591 538 } // if 592 539 593 540 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 594 541 } else { // large size => mmap 595 if ( unlikely( size > ~0ul - pageSize ) ) return 0 ;542 if ( unlikely( size > ~0ul - pageSize ) ) return 0p; 596 543 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size 597 544 #ifdef __STATISTICS__ … … 611 558 } // if 612 559 613 void * a rea= &(block->data); // adjust off header to user bytes560 void * addr = &(block->data); // adjust off header to user bytes 614 561 615 562 #ifdef __CFA_DEBUG__ 616 assert( ((uintptr_t)a rea& (libAlign() - 1)) == 0 ); // minimum alignment ?563 assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? 617 564 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); 618 565 if ( traceHeap() ) { 619 566 enum { BufferSize = 64 }; 620 567 char helpText[BufferSize]; 621 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", a rea, size, tsize );622 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", a rea, size );623 __cfaabi_ dbg_bits_write( helpText, len );568 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 569 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size ); 570 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 624 571 } // if 625 572 #endif // __CFA_DEBUG__ 626 573 627 return a rea;574 return addr; 628 575 } // doMalloc 629 576 … … 631 578 static inline void doFree( void * addr ) with ( heapManager ) { 632 579 #ifdef __CFA_DEBUG__ 633 if ( unlikely( heapManager.heapBegin == 0 ) ) {580 if ( unlikely( heapManager.heapBegin == 0p ) ) { 634 581 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 635 582 } // if … … 677 624 char helpText[BufferSize]; 678 625 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); 679 __cfaabi_ dbg_bits_write( helpText, len );626 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 680 627 } // if 681 628 #endif // __CFA_DEBUG__ … … 683 630 684 631 685 size_t checkFree( HeapManager & manager ) with ( manager ) {632 size_t prtFree( HeapManager & manager ) with ( manager ) { 686 633 size_t total = 0; 687 634 #ifdef __STATISTICS__ 688 __cfaabi_ dbg_bits_acquire();689 __cfaabi_ dbg_bits_print_nolock("\nBin lists (bin size : free blocks on list)\n" );635 __cfaabi_bits_acquire(); 636 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); 690 637 #endif // __STATISTICS__ 691 638 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { … … 696 643 697 644 #if defined( SPINLOCK ) 698 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0 ; p = p->header.kind.real.next ) {645 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 699 646 #else 700 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0 ; p = p->header.kind.real.next.top ) {647 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) { 701 648 #endif // SPINLOCK 702 649 total += size; … … 707 654 708 655 #ifdef __STATISTICS__ 709 __cfaabi_ dbg_bits_print_nolock("%7zu, %-7u ", size, N );710 if ( (i + 1) % 8 == 0 ) __cfaabi_ dbg_bits_print_nolock("\n" );656 __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N ); 657 if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); 711 658 #endif // __STATISTICS__ 712 659 } // for 713 660 #ifdef __STATISTICS__ 714 __cfaabi_ dbg_bits_print_nolock("\ntotal free blocks:%zu\n", total );715 __cfaabi_ dbg_bits_release();661 __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total ); 662 __cfaabi_bits_release(); 716 663 #endif // __STATISTICS__ 717 664 return (char *)heapEnd - (char *)heapBegin - total; 718 } // checkFree 665 } // prtFree 666 667 668 static void ?{}( HeapManager & manager ) with ( manager ) { 669 pageSize = sysconf( _SC_PAGESIZE ); 670 671 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 672 freeLists[i].blockSize = bucketSizes[i]; 673 } // for 674 675 #ifdef FASTLOOKUP 676 unsigned int idx = 0; 677 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 678 if ( i > bucketSizes[idx] ) idx += 1; 679 lookup[i] = idx; 680 } // for 681 #endif // FASTLOOKUP 682 683 if ( setMmapStart( default_mmap_start() ) ) { 684 abort( "HeapManager : internal error, mmap start initialization failure." ); 685 } // if 686 heapExpand = default_heap_expansion(); 687 688 char * end = (char *)sbrk( 0 ); 689 sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 690 heapBegin = heapEnd = sbrk( 0 ); // get new start point 691 } // HeapManager 692 693 694 static void ^?{}( HeapManager & ) { 695 #ifdef __STATISTICS__ 696 if ( traceHeapTerm() ) { 697 printStats(); 698 // if ( prtfree() ) prtFree( heapManager, true ); 699 } // if 700 #endif // __STATISTICS__ 701 } // ~HeapManager 702 703 704 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 705 void memory_startup( void ) { 706 #ifdef __CFA_DEBUG__ 707 if ( unlikely( heapBoot ) ) { // check for recursion during system boot 708 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 709 abort( "boot() : internal error, recursively invoked during system boot." ); 710 } // if 711 heapBoot = true; 712 #endif // __CFA_DEBUG__ 713 714 //assert( heapManager.heapBegin != 0 ); 715 //heapManager{}; 716 if ( heapManager.heapBegin == 0p ) heapManager{}; 717 } // memory_startup 718 719 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 720 void memory_shutdown( void ) { 721 ^heapManager{}; 722 } // memory_shutdown 719 723 720 724 721 725 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 722 726 //assert( heapManager.heapBegin != 0 ); 723 if ( unlikely( heapManager.heapBegin == 0 ) ) heapManager{}; // called before memory_startup ?724 void * a rea= doMalloc( size );725 if ( unlikely( a rea == 0) ) errno = ENOMEM; // POSIX726 return a rea;727 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 728 void * addr = doMalloc( size ); 729 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 730 return addr; 727 731 } // mallocNoStats 732 733 734 static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) { 735 size_t size = noOfElems * elemSize; 736 char * addr = (char *)mallocNoStats( size ); 737 if ( unlikely( addr == 0p ) ) return 0p; 738 739 HeapManager.Storage.Header * header; 740 HeapManager.FreeHeader * freeElem; 741 size_t bsize, alignment; 742 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment ); 743 #ifndef __CFA_DEBUG__ 744 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 745 if ( ! mapped ) 746 #endif // __CFA_DEBUG__ 747 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 748 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 749 // `-header`-addr `-size 750 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros 751 752 header->kind.real.blockSize |= 2; // mark as zero filled 753 return addr; 754 } // callocNoStats 728 755 729 756 … … 745 772 // subtract libAlign() because it is already the minimum alignment 746 773 // add sizeof(Storage) for fake header 747 // #comment TD : this is the only place that calls doMalloc without calling mallocNoStats, why ? 748 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 749 if ( unlikely( area == 0 ) ) return area; 774 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 775 if ( unlikely( addr == 0p ) ) return addr; 750 776 751 777 // address in the block of the "next" alignment address 752 char * user = (char *)libCeiling( (uintptr_t)(a rea+ sizeof(HeapManager.Storage)), alignment );778 char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment ); 753 779 754 780 // address of header from malloc 755 HeapManager.Storage.Header * realHeader = headerAddr( a rea);781 HeapManager.Storage.Header * realHeader = headerAddr( addr ); 756 782 // address of fake header * before* the alignment location 757 783 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); … … 763 789 return user; 764 790 } // memalignNoStats 791 792 793 static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) { 794 size_t size = noOfElems * elemSize; 795 char * addr = (char *)memalignNoStats( alignment, size ); 796 if ( unlikely( addr == 0p ) ) return 0p; 797 HeapManager.Storage.Header * header; 798 HeapManager.FreeHeader * freeElem; 799 size_t bsize; 800 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment ); 801 #ifndef __CFA_DEBUG__ 802 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 803 if ( ! mapped ) 804 #endif // __CFA_DEBUG__ 805 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 806 header->kind.real.blockSize |= 2; // mark as zero filled 807 808 return addr; 809 } // cmemalignNoStats 765 810 766 811 … … 776 821 extern "C" { 777 822 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not 778 // initialized. If size is 0, then malloc() returns either NULL, or a unique pointer value that can later be823 // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be 779 824 // successfully passed to free(). 780 825 void * malloc( size_t size ) { … … 788 833 789 834 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to 790 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either NULL, or a835 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a 791 836 // unique pointer value that can later be successfully passed to free(). 792 837 void * calloc( size_t noOfElems, size_t elemSize ) { 793 size_t size = noOfElems * elemSize;794 838 #ifdef __STATISTICS__ 795 839 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); 796 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST ); 797 #endif // __STATISTICS__ 798 799 char * area = (char *)mallocNoStats( size ); 800 if ( unlikely( area == 0 ) ) return 0; 840 __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 841 #endif // __STATISTICS__ 842 843 return callocNoStats( noOfElems, elemSize ); 844 } // calloc 845 846 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 847 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 848 // is larger than the old size, the added memory will not be initialized. If ptr is 0p, then the call is 849 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call 850 // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(), 851 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 852 void * realloc( void * oaddr, size_t size ) { 853 #ifdef __STATISTICS__ 854 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 855 #endif // __STATISTICS__ 856 857 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 858 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 859 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 801 860 802 861 HeapManager.Storage.Header * header; 803 862 HeapManager.FreeHeader * freeElem; 804 size_t asize, alignment; 805 bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment ); 806 #ifndef __CFA_DEBUG__ 807 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 808 if ( ! mapped ) 809 #endif // __CFA_DEBUG__ 810 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros 811 812 header->kind.real.blockSize |= 2; // mark as zero filled 813 return area; 814 } // calloc 815 816 // #comment TD : Document this function 817 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 818 size_t size = noOfElems * elemSize; 819 #ifdef __STATISTICS__ 820 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 821 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); 822 #endif // __STATISTICS__ 823 824 char * area = (char *)memalignNoStats( alignment, size ); 825 if ( unlikely( area == 0 ) ) return 0; 826 HeapManager.Storage.Header * header; 827 HeapManager.FreeHeader * freeElem; 828 size_t asize; 829 bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment ); 830 #ifndef __CFA_DEBUG__ 831 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 832 if ( ! mapped ) 833 #endif // __CFA_DEBUG__ 834 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros 835 header->kind.real.blockSize |= 2; // mark as zero filled 836 837 return area; 838 } // cmemalign 839 840 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 841 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 842 // is larger than the old size, the added memory will not be initialized. If ptr is NULL, then the call is 843 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not NULL, then the call 844 // is equivalent to free(ptr). Unless ptr is NULL, it must have been returned by an earlier call to malloc(), 845 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 846 void * realloc( void * addr, size_t size ) { 847 #ifdef __STATISTICS__ 848 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 849 #endif // __STATISTICS__ 850 851 if ( unlikely( addr == 0 ) ) return mallocNoStats( size ); // special cases 852 if ( unlikely( size == 0 ) ) { free( addr ); return 0; } 853 854 HeapManager.Storage.Header * header; 855 HeapManager.FreeHeader * freeElem; 856 size_t asize, alignment = 0; 857 headers( "realloc", addr, header, freeElem, asize, alignment ); 858 859 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block 860 if ( usize >= size ) { // already sufficient storage 863 size_t bsize, oalign = 0; 864 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 865 866 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 867 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 868 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know 869 // where to start filling, i.e., do not overwrite existing values in space. 870 // 861 871 // This case does not result in a new profiler entry because the previous one still exists and it must match with 862 872 // the free for this memory. Hence, this realloc does not appear in the profiler output. 863 return addr;873 return oaddr; 864 874 } // if 865 875 … … 868 878 #endif // __STATISTICS__ 869 879 870 void * area; 871 if ( unlikely( alignment != 0 ) ) { // previous request memalign? 872 area = memalign( alignment, size ); // create new aligned area 880 // change size and copy old content to new storage 881 882 void * naddr; 883 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 884 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 885 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 886 } else { 887 naddr = memalignNoStats( oalign, size ); // create new aligned area 888 } // if 873 889 } else { 874 area = mallocNoStats( size ); // create new area 890 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 891 naddr = callocNoStats( 1, size ); // create new area 892 } else { 893 naddr = mallocNoStats( size ); // create new area 894 } // if 875 895 } // if 876 if ( unlikely( area == 0 ) ) return 0; 877 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ? 878 assert( (header->kind.real.blockSize & 1) == 0 ); 879 bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment ); 880 #ifndef __CFA_DEBUG__ 881 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 882 if ( ! mapped ) 883 #endif // __CFA_DEBUG__ 884 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part 885 header->kind.real.blockSize |= 2; // mark new request as zero fill 886 } // if 887 memcpy( area, addr, usize ); // copy bytes 888 free( addr ); 889 return area; 896 if ( unlikely( naddr == 0p ) ) return 0p; 897 898 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 899 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 900 // To preserve prior fill, the entire bucket must be copied versus the size. 901 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 902 free( oaddr ); 903 return naddr; 890 904 } // realloc 891 905 … … 898 912 #endif // __STATISTICS__ 899 913 900 void * area = memalignNoStats( alignment, size ); 901 902 return area; 914 return memalignNoStats( alignment, size ); 903 915 } // memalign 916 917 918 // The cmemalign() function is the same as calloc() with memory alignment. 919 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 920 #ifdef __STATISTICS__ 921 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 922 __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 923 #endif // __STATISTICS__ 924 925 return cmemalignNoStats( alignment, noOfElems, elemSize ); 926 } // cmemalign 904 927 905 928 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a … … 912 935 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The 913 936 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of 914 // sizeof(void *). If size is 0, then posix_memalign() returns either NULL, or a unique pointer value that can later937 // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later 915 938 // be successfully passed to free(3). 916 939 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 917 940 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 918 941 * memptr = memalign( alignment, size ); 919 if ( unlikely( * memptr == 0 ) ) return ENOMEM;942 if ( unlikely( * memptr == 0p ) ) return ENOMEM; 920 943 return 0; 921 944 } // posix_memalign … … 930 953 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to 931 954 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior 932 // occurs. If ptr is NULL, no operation is performed.955 // occurs. If ptr is 0p, no operation is performed. 933 956 void free( void * addr ) { 934 957 #ifdef __STATISTICS__ … … 936 959 #endif // __STATISTICS__ 937 960 938 // #comment TD : To decrease nesting I would but the special case in the 939 // else instead, plus it reads more naturally to have the 940 // short / normal case instead 941 if ( unlikely( addr == 0 ) ) { // special case 942 #ifdef __CFA_DEBUG__ 943 if ( traceHeap() ) { 944 #define nullmsg "Free( 0x0 ) size:0\n" 945 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf. 946 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 ); 947 } // if 948 #endif // __CFA_DEBUG__ 961 if ( unlikely( addr == 0p ) ) { // special case 962 // #ifdef __CFA_DEBUG__ 963 // if ( traceHeap() ) { 964 // #define nullmsg "Free( 0x0 ) size:0\n" 965 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf. 966 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 ); 967 // } // if 968 // #endif // __CFA_DEBUG__ 949 969 return; 950 970 } // exit … … 953 973 } // free 954 974 955 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see 956 // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that 957 // parameter. 958 int mallopt( int option, int value ) { 959 choose( option ) { 960 case M_TOP_PAD: 961 if ( setHeapExpand( value ) ) fallthru default; 962 case M_MMAP_THRESHOLD: 963 if ( setMmapStart( value ) ) fallthru default; 964 default: 965 // #comment TD : 1 for unsopported feels wrong 966 return 1; // success, or unsupported 967 } // switch 968 return 0; // error 969 } // mallopt 970 971 // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a 972 // suitable argument). 973 int malloc_trim( size_t ) { 974 return 0; // => impossible to release memory 975 } // malloc_trim 976 977 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to 978 // a block of memory allocated by malloc(3) or a related function. 979 size_t malloc_usable_size( void * addr ) { 980 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size 981 982 HeapManager.Storage.Header * header; 983 HeapManager.FreeHeader * freeElem; 984 size_t size, alignment; 985 986 headers( "malloc_usable_size", addr, header, freeElem, size, alignment ); 987 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block 988 return usize; 989 } // malloc_usable_size 990 991 992 // The malloc_alignment() function returns the alignment of the allocation. 975 976 // The malloc_alignment() function returns the alignment of the allocation. 993 977 size_t malloc_alignment( void * addr ) { 994 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment978 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 995 979 HeapManager.Storage.Header * header = headerAddr( addr ); 996 980 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? … … 1002 986 1003 987 1004 988 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc(). 1005 989 bool malloc_zero_fill( void * addr ) { 1006 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill990 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1007 991 HeapManager.Storage.Header * header = headerAddr( addr ); 1008 992 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? … … 1013 997 1014 998 1015 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1016 // related functions. 999 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to 1000 // a block of memory allocated by malloc(3) or a related function. 1001 size_t malloc_usable_size( void * addr ) { 1002 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1003 HeapManager.Storage.Header * header; 1004 HeapManager.FreeHeader * freeElem; 1005 size_t bsize, alignment; 1006 1007 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1008 return dataStorage( bsize, addr, header ); // data storage in bucket 1009 } // malloc_usable_size 1010 1011 1012 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1013 // related functions. 1017 1014 void malloc_stats( void ) { 1018 1015 #ifdef __STATISTICS__ 1019 1016 printStats(); 1020 if ( checkFree() ) checkFree( heapManager );1017 if ( prtFree() ) prtFree( heapManager ); 1021 1018 #endif // __STATISTICS__ 1022 1019 } // malloc_stats 1023 1020 1024 1021 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics. 1025 int malloc_stats_fd( int fd ) {1022 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1026 1023 #ifdef __STATISTICS__ 1027 1024 int temp = statfd; … … 1033 1030 } // malloc_stats_fd 1034 1031 1032 1033 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see 1034 // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that 1035 // parameter. 1036 int mallopt( int option, int value ) { 1037 choose( option ) { 1038 case M_TOP_PAD: 1039 if ( setHeapExpand( value ) ) return 1; 1040 case M_MMAP_THRESHOLD: 1041 if ( setMmapStart( value ) ) return 1; 1042 } // switch 1043 return 0; // error, unsupported 1044 } // mallopt 1045 1046 // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a 1047 // suitable argument). 1048 int malloc_trim( size_t ) { 1049 return 0; // => impossible to release memory 1050 } // malloc_trim 1051 1052 1035 1053 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation 1036 1054 // implementation in the caller. The string is printed on the file stream stream. The exported string includes 1037 1055 // information about all arenas (see malloc(3)). 1038 1056 int malloc_info( int options, FILE * stream ) { 1057 if ( options != 0 ) { errno = EINVAL; return -1; } 1039 1058 return printStatsXML( stream ); 1040 1059 } // malloc_info … … 1046 1065 // structure is returned as the function result. (It is the caller's responsibility to free(3) this memory.) 1047 1066 void * malloc_get_state( void ) { 1048 return 0 ; // unsupported1067 return 0p; // unsupported 1049 1068 } // malloc_get_state 1050 1069 … … 1058 1077 1059 1078 1079 // Must have CFA linkage to overload with C linkage realloc. 1080 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1081 #ifdef __STATISTICS__ 1082 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1083 #endif // __STATISTICS__ 1084 1085 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1086 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 1087 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 1088 1089 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1090 #ifdef __CFA_DEBUG__ 1091 else 1092 checkAlign( nalign ); // check alignment 1093 #endif // __CFA_DEBUG__ 1094 1095 HeapManager.Storage.Header * header; 1096 HeapManager.FreeHeader * freeElem; 1097 size_t bsize, oalign = 0; 1098 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1099 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1100 1101 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out 1102 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1103 return realloc( oaddr, size ); 1104 } // if 1105 1106 #ifdef __STATISTICS__ 1107 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1108 #endif // __STATISTICS__ 1109 1110 // change size and copy old content to new storage 1111 1112 void * naddr; 1113 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1114 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1115 } else { 1116 naddr = memalignNoStats( nalign, size ); // create new aligned area 1117 } // if 1118 1119 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1120 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 1121 // To preserve prior fill, the entire bucket must be copied versus the size. 1122 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1123 free( oaddr ); 1124 return naddr; 1125 } // realloc 1126 1127 1060 1128 // Local Variables: // 1061 1129 // tab-width: 4 // -
libcfa/src/interpose.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Wed Mar 29 16:10:31 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S un Jul 14 22:57:16201913 // Update Count : 11 612 // Last Modified On : Sat Nov 30 07:09:42 2019 13 // Update Count : 119 14 14 // 15 15 … … 163 163 abort_lastframe = kernel_abort_lastframe(); 164 164 len = snprintf( abort_text, abort_text_size, "Cforall Runtime error (UNIX pid:%ld) ", (long int)getpid() ); // use UNIX pid (versus getPid) 165 __cfaabi_dbg_ bits_write( abort_text, len );165 __cfaabi_dbg_write( abort_text, len ); 166 166 167 167 if ( fmt ) { … … 171 171 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 172 172 va_end( args ); 173 __cfaabi_dbg_ bits_write( abort_text, len );173 __cfaabi_dbg_write( abort_text, len ); 174 174 175 175 if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text 176 __cfaabi_dbg_ bits_write( "\n", 1 );176 __cfaabi_dbg_write( "\n", 1 ); 177 177 } 178 178 } … … 194 194 // find executable name 195 195 *index( messages[0], '(' ) = '\0'; 196 __cfaabi_ dbg_bits_print_nolock("Stack back trace for: %s\n", messages[0]);197 198 for ( int i = Start; i < size - abort_lastframe && messages != NULL; i += 1 ) {199 char * name = NULL, * offset_begin = NULL, * offset_end = NULL;196 __cfaabi_bits_print_nolock( STDERR_FILENO, "Stack back trace for: %s\n", messages[0]); 197 198 for ( int i = Start; i < size - abort_lastframe && messages != 0p; i += 1 ) { 199 char * name = 0p, * offset_begin = 0p, * offset_end = 0p; 200 200 201 201 for ( char * p = messages[i]; *p; ++p ) { 202 //__cfaabi_ dbg_bits_print_nolock( "X %s\n", p);202 //__cfaabi_bits_print_nolock( "X %s\n", p); 203 203 // find parantheses and +offset 204 204 if ( *p == '(' ) { … … 220 220 *offset_end++ = '\0'; 221 221 222 __cfaabi_ dbg_bits_print_nolock("(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end);222 __cfaabi_bits_print_nolock( STDERR_FILENO, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 223 223 } else { // otherwise, print the whole line 224 __cfaabi_ dbg_bits_print_nolock("(%i) %s\n", frameNo, messages[i] );224 __cfaabi_bits_print_nolock( STDERR_FILENO, "(%i) %s\n", frameNo, messages[i] ); 225 225 } 226 226 } -
libcfa/src/startup.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Tue Jul 24 16:21:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 25 16:42:01 201813 // Update Count : 1 112 // Last Modified On : Sat Nov 30 07:07:56 2019 13 // Update Count : 13 14 14 // 15 15 16 16 #include "startup.hfa" 17 #include <unistd.h> 18 17 #include <time.h> // tzset 19 18 20 19 extern "C" { 21 20 static void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) )); 22 21 void __cfaabi_appready_startup( void ) { 22 tzset(); // initialize time global variables 23 23 #ifdef __CFA_DEBUG__ 24 24 extern void heapAppStart(); -
libcfa/src/stdlib.cfa
raca6a54c r2fa5bd2 10 10 // Created On : Thu Jan 28 17:10:29 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Oct 22 08:57:52201913 // Update Count : 4 7812 // Last Modified On : Wed Nov 20 17:22:47 2019 13 // Update Count : 485 14 14 // 15 15 … … 30 30 T * alloc_set( T ptr[], size_t dim, char fill ) { // realloc array with fill 31 31 size_t olen = malloc_usable_size( ptr ); // current allocation 32 char * nptr = (char*)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc32 void * nptr = (void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 33 33 size_t nlen = malloc_usable_size( nptr ); // new allocation 34 34 if ( nlen > olen ) { // larger ? 35 memset( nptr + olen, (int)fill, nlen - olen ); // initialize added storage35 memset( (char *)nptr + olen, (int)fill, nlen - olen ); // initialize added storage 36 36 } // if 37 37 return (T *)nptr; 38 38 } // alloc_set 39 39 40 T * alloc_align( T ptr[], size_t align ) { // aligned realloc array41 char * nptr;42 size_t alignment = malloc_alignment( ptr );43 if ( align != alignment && (uintptr_t)ptr % align != 0 ) {44 size_t olen = malloc_usable_size( ptr ); // current allocation45 nptr = (char *)memalign( align, olen );46 size_t nlen = malloc_usable_size( nptr ); // new allocation47 size_t lnth = olen < nlen ? olen : nlen; // min48 memcpy( nptr, ptr, lnth ); // initialize storage49 free( ptr );50 } else {51 nptr = (char *)ptr;52 } // if53 return (T *)nptr;54 } // alloc_align55 56 T * alloc_align( T ptr[], size_t align, size_t dim ) { // aligned realloc array57 char * nptr;58 size_t alignment = malloc_alignment( ptr );59 if ( align != alignment ) {60 size_t olen = malloc_usable_size( ptr ); // current allocation61 nptr = (char *)memalign( align, dim * sizeof(T) );62 size_t nlen = malloc_usable_size( nptr ); // new allocation63 size_t lnth = olen < nlen ? olen : nlen; // min64 memcpy( nptr, ptr, lnth ); // initialize storage65 free( ptr );66 } else {67 nptr = (char *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc68 } // if69 return (T *)nptr;70 } // alloc_align71 72 40 T * alloc_align_set( T ptr[], size_t align, char fill ) { // aligned realloc with fill 73 41 size_t olen = malloc_usable_size( ptr ); // current allocation 74 char * nptr = alloc_align( ptr, align ); 42 void * nptr = (void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 43 // char * nptr = alloc_align( ptr, align ); 75 44 size_t nlen = malloc_usable_size( nptr ); // new allocation 76 45 if ( nlen > olen ) { // larger ? 77 memset( nptr + olen, (int)fill, nlen - olen ); // initialize added storage46 memset( (char *)nptr + olen, (int)fill, nlen - olen ); // initialize added storage 78 47 } // if 79 48 return (T *)nptr; -
libcfa/src/stdlib.hfa
raca6a54c r2fa5bd2 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Oct 20 22:57:33201913 // Update Count : 39012 // Last Modified On : Fri Nov 29 23:08:02 2019 13 // Update Count : 400 14 14 // 15 15 … … 28 28 } // extern "C" 29 29 30 void * realloc( void * oaddr, size_t nalign, size_t size ); // CFA heap 31 30 32 //--------------------------------------- 31 33 … … 50 52 } // calloc 51 53 52 T * realloc( T * ptr, size_t size ) { 53 if ( unlikely( ptr == 0 ) ) return malloc(); 54 T * realloc( T * ptr, size_t size ) { // CFA realloc, eliminate return-type cast 54 55 return (T *)(void *)realloc( (void *)ptr, size ); // C realloc 55 56 } // realloc … … 59 60 } // memalign 60 61 62 T * cmemalign( size_t align, size_t dim ) { 63 return (T *)cmemalign( align, dim, sizeof(T) ); // CFA cmemalign 64 } // cmemalign 65 61 66 T * aligned_alloc( size_t align ) { 62 67 return (T *)aligned_alloc( align, sizeof(T) ); // C aligned_alloc … … 79 84 80 85 T * alloc( T ptr[], size_t dim ) { // realloc 81 return realloc( ptr, dim * sizeof(T) );86 return (T *)(void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 82 87 } // alloc 83 88 … … 118 123 } // alloc_align 119 124 125 T * alloc_align( T ptr[], size_t align ) { // aligned realloc array 126 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 127 } // alloc_align 128 129 T * alloc_align( T ptr[], size_t align, size_t dim ) { // aligned realloc array 130 return (T *)(void *)realloc( (void *)ptr, align, dim * sizeof(T) ); // CFA realloc 131 } // alloc_align 132 120 133 T * alloc_align_set( size_t align, char fill ) { 121 134 return (T *)memset( (T *)alloc_align( align ), (int)fill, sizeof(T) ); // initialize with fill value … … 142 155 143 156 forall( dtype T | sized(T) ) { 144 T * alloc_align( T ptr[], size_t align ); // realign145 T * alloc_align( T ptr[], size_t align, size_t dim ); // aligned realloc array146 157 T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ); // aligned realloc array with fill 147 158 } // distribution … … 199 210 200 211 static inline { 201 int ato( const char * sptr ) { return (int)strtol( sptr, 0 , 10 ); }202 unsigned int ato( const char * sptr ) { return (unsigned int)strtoul( sptr, 0 , 10 ); }203 long int ato( const char * sptr ) { return strtol( sptr, 0 , 10 ); }204 unsigned long int ato( const char * sptr ) { return strtoul( sptr, 0 , 10 ); }205 long long int ato( const char * sptr ) { return strtoll( sptr, 0 , 10 ); }206 unsigned long long int ato( const char * sptr ) { return strtoull( sptr, 0 , 10 ); }207 208 float ato( const char * sptr ) { return strtof( sptr, 0 ); }209 double ato( const char * sptr ) { return strtod( sptr, 0 ); }210 long double ato( const char * sptr ) { return strtold( sptr, 0 ); }211 212 float _Complex ato( const char * sptr ) { return strto( sptr, NULL); }213 double _Complex ato( const char * sptr ) { return strto( sptr, NULL); }214 long double _Complex ato( const char * sptr ) { return strto( sptr, NULL); }212 int ato( const char * sptr ) { return (int)strtol( sptr, 0p, 10 ); } 213 unsigned int ato( const char * sptr ) { return (unsigned int)strtoul( sptr, 0p, 10 ); } 214 long int ato( const char * sptr ) { return strtol( sptr, 0p, 10 ); } 215 unsigned long int ato( const char * sptr ) { return strtoul( sptr, 0p, 10 ); } 216 long long int ato( const char * sptr ) { return strtoll( sptr, 0p, 10 ); } 217 unsigned long long int ato( const char * sptr ) { return strtoull( sptr, 0p, 10 ); } 218 219 float ato( const char * sptr ) { return strtof( sptr, 0p ); } 220 double ato( const char * sptr ) { return strtod( sptr, 0p ); } 221 long double ato( const char * sptr ) { return strtold( sptr, 0p ); } 222 223 float _Complex ato( const char * sptr ) { return strto( sptr, 0p ); } 224 double _Complex ato( const char * sptr ) { return strto( sptr, 0p ); } 225 long double _Complex ato( const char * sptr ) { return strto( sptr, 0p ); } 215 226 } // distribution 216 227 -
longrun_tests/Makefile.in
raca6a54c r2fa5bd2 486 486 LTCFACOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ 487 487 $(LIBTOOLFLAGS) --mode=compile $(CFACC) $(DEFS) \ 488 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(CFAFLAGS) \ 489 $(AM_CFLAGS) $(CFLAGS) 488 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(AM_CFLAGS) $(CFAFLAGS) $(CFLAGS) 490 489 491 490 AM_V_CFA = $(am__v_CFA_@AM_V@) -
src/AST/Convert.cpp
raca6a54c r2fa5bd2 887 887 auto expr = visitBaseExpr( node, 888 888 new AsmExpr( 889 get<Expression>().accept1(node->inout),889 new std::string(node->inout), 890 890 get<Expression>().accept1(node->constraint), 891 891 get<Expression>().accept1(node->operand) … … 2258 2258 new ast::AsmExpr( 2259 2259 old->location, 2260 GET_ACCEPT_1(inout, Expr),2260 old->inout, 2261 2261 GET_ACCEPT_1(constraint, Expr), 2262 2262 GET_ACCEPT_1(operand, Expr) -
src/AST/Expr.hpp
raca6a54c r2fa5bd2 556 556 class AsmExpr final : public Expr { 557 557 public: 558 ptr<Expr>inout;558 std::string inout; 559 559 ptr<Expr> constraint; 560 560 ptr<Expr> operand; 561 561 562 AsmExpr( const CodeLocation & loc, const Expr *io, const Expr * con, const Expr * op )562 AsmExpr( const CodeLocation & loc, const std::string & io, const Expr * con, const Expr * op ) 563 563 : Expr( loc ), inout( io ), constraint( con ), operand( op ) {} 564 564 -
src/AST/Pass.impl.hpp
raca6a54c r2fa5bd2 1300 1300 maybe_accept( node, &AsmExpr::result ); 1301 1301 } 1302 maybe_accept( node, &AsmExpr::inout );1303 1302 maybe_accept( node, &AsmExpr::constraint ); 1304 1303 maybe_accept( node, &AsmExpr::operand ); -
src/AST/Print.cpp
raca6a54c r2fa5bd2 1011 1011 os << "Asm Expression:" << endl; 1012 1012 ++indent; 1013 if ( node->inout ) node->inout->accept( *this );1013 if ( !node->inout.empty() ) os << "[" << node->inout << "] "; 1014 1014 if ( node->constraint ) node->constraint->accept( *this ); 1015 1015 if ( node->operand ) node->operand->accept( *this ); -
src/CodeGen/CodeGenerator.cc
raca6a54c r2fa5bd2 786 786 787 787 void CodeGenerator::postvisit( AsmExpr * asmExpr ) { 788 if ( asmExpr->get_inout() ) {788 if ( !asmExpr->inout.empty() ) { 789 789 output << "[ "; 790 asmExpr->get_inout()->accept( *visitor );790 output << asmExpr->inout; 791 791 output << " ] "; 792 792 } // if 793 asmExpr-> get_constraint()->accept( *visitor );793 asmExpr->constraint->accept( *visitor ); 794 794 output << " ( "; 795 asmExpr-> get_operand()->accept( *visitor );795 asmExpr->operand->accept( *visitor ); 796 796 output << " )"; 797 797 } -
src/Common/PassVisitor.impl.h
raca6a54c r2fa5bd2 2452 2452 2453 2453 indexerScopedAccept( node->result , *this ); 2454 maybeAccept_impl ( node->inout , *this );2455 2454 maybeAccept_impl ( node->constraint, *this ); 2456 2455 maybeAccept_impl ( node->operand , *this ); … … 2464 2463 2465 2464 indexerScopedAccept( node->result , *this ); 2466 maybeAccept_impl ( node->inout , *this );2467 2465 maybeAccept_impl ( node->constraint, *this ); 2468 2466 maybeAccept_impl ( node->operand , *this ); … … 2477 2475 indexerScopedMutate( node->env , *this ); 2478 2476 indexerScopedMutate( node->result , *this ); 2479 maybeMutate_impl ( node->inout , *this );2480 2477 maybeMutate_impl ( node->constraint, *this ); 2481 2478 maybeMutate_impl ( node->operand , *this ); -
src/Common/SemanticError.cc
raca6a54c r2fa5bd2 149 149 // Helpers 150 150 namespace ErrorHelpers { 151 Colors colors = Colors::Auto; 152 153 static inline bool with_colors() { 154 return colors == Colors::Auto ? isatty( STDERR_FILENO ) : bool(colors); 155 } 156 151 157 const std::string & error_str() { 152 static std::string str = isatty( STDERR_FILENO) ? "\e[31merror:\e[39m " : "error: ";158 static std::string str = with_colors() ? "\e[31merror:\e[39m " : "error: "; 153 159 return str; 154 160 } 155 161 156 162 const std::string & warning_str() { 157 static std::string str = isatty( STDERR_FILENO) ? "\e[95mwarning:\e[39m " : "warning: ";163 static std::string str = with_colors() ? "\e[95mwarning:\e[39m " : "warning: "; 158 164 return str; 159 165 } 160 166 161 167 const std::string & bold_ttycode() { 162 static std::string str = isatty( STDERR_FILENO) ? "\e[1m" : "";168 static std::string str = with_colors() ? "\e[1m" : ""; 163 169 return str; 164 170 } 165 171 166 172 const std::string & reset_font_ttycode() { 167 static std::string str = isatty( STDERR_FILENO) ? "\e[0m" : "";173 static std::string str = with_colors() ? "\e[0m" : ""; 168 174 return str; 169 175 } -
src/Common/SemanticError.h
raca6a54c r2fa5bd2 97 97 // Helpers 98 98 namespace ErrorHelpers { 99 enum class Colors { 100 Never = false, 101 Always = true, 102 Auto, 103 }; 104 105 extern Colors colors; 106 99 107 const std::string & error_str(); 100 108 const std::string & warning_str(); -
src/Concurrency/Keywords.cc
raca6a54c r2fa5bd2 59 59 60 60 Declaration * postmutate( StructDecl * decl ); 61 DeclarationWithType * postmutate( FunctionDecl * decl ); 61 62 62 63 void handle( StructDecl * ); … … 77 78 KeywordCastExpr::Target cast_target; 78 79 79 StructDecl* type_decl = nullptr; 80 StructDecl * type_decl = nullptr; 81 FunctionDecl * dtor_decl = nullptr; 80 82 }; 81 83 … … 97 99 "__thrd", 98 100 "get_thread", 99 "thread keyword requires threads to be in scope, add #include <thread.hfa> ",101 "thread keyword requires threads to be in scope, add #include <thread.hfa>\n", 100 102 true, 101 103 KeywordCastExpr::Thread … … 129 131 "__cor", 130 132 "get_coroutine", 131 "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa> ",133 "coroutine keyword requires coroutines to be in scope, add #include <coroutine.hfa>\n", 132 134 true, 133 135 KeywordCastExpr::Coroutine … … 161 163 "__mon", 162 164 "get_monitor", 163 "monitor keyword requires monitors to be in scope, add #include <monitor.hfa> ",165 "monitor keyword requires monitors to be in scope, add #include <monitor.hfa>\n", 164 166 false, 165 167 KeywordCastExpr::Monitor … … 284 286 } 285 287 288 DeclarationWithType * ConcurrentSueKeyword::postmutate( FunctionDecl * decl ) { 289 if( !type_decl ) return decl; 290 if( !CodeGen::isDestructor( decl->name ) ) return decl; 291 292 auto params = decl->type->parameters; 293 if( params.size() != 1 ) return decl; 294 295 auto type = dynamic_cast<ReferenceType*>( params.front()->get_type() ); 296 if( !type ) return decl; 297 298 auto stype = dynamic_cast<StructInstType*>( type->base ); 299 if( !stype ) return decl; 300 if( stype->baseStruct != type_decl ) return decl; 301 302 if( !dtor_decl ) dtor_decl = decl; 303 return decl; 304 } 305 286 306 Expression * ConcurrentSueKeyword::postmutate( KeywordCastExpr * cast ) { 287 307 if ( cast_target == cast->target ) { 288 308 // convert (thread &)t to (thread_desc &)*get_thread(t), etc. 289 309 if( !type_decl ) SemanticError( cast, context_error ); 290 Expression * arg = cast->arg; 291 cast->arg = nullptr; 292 delete cast; 293 return new CastExpr( 294 UntypedExpr::createDeref( 295 new UntypedExpr( new NameExpr( getter_name ), { arg } ) 296 ), 297 new ReferenceType( 298 noQualifiers, 299 new StructInstType( noQualifiers, type_decl ) ) 300 ); 310 if( !dtor_decl ) SemanticError( cast, context_error ); 311 assert( cast->result == nullptr ); 312 cast->set_result( new ReferenceType( noQualifiers, new StructInstType( noQualifiers, type_decl ) ) ); 313 cast->concrete_target.field = field_name; 314 cast->concrete_target.getter = getter_name; 301 315 } 302 316 return cast; … … 308 322 309 323 if( !type_decl ) SemanticError( decl, context_error ); 324 if( !dtor_decl ) SemanticError( decl, context_error ); 310 325 311 326 FunctionDecl * func = forwardDeclare( decl ); -
src/ControlStruct/MLEMutator.cc
raca6a54c r2fa5bd2 231 231 232 232 Statement *MLEMutator::mutateLoop( Statement *bodyLoop, Entry &e ) { 233 // only generate these when needed 234 if( !e.isContUsed() && !e.isBreakUsed() ) return bodyLoop; 235 233 236 // ensure loop body is a block 234 CompoundStmt *newBody; 235 if ( ! (newBody = dynamic_cast<CompoundStmt *>( bodyLoop )) ) { 236 newBody = new CompoundStmt(); 237 newBody->get_kids().push_back( bodyLoop ); 238 } // if 239 240 // only generate these when needed 237 CompoundStmt * newBody = new CompoundStmt(); 238 newBody->get_kids().push_back( bodyLoop ); 241 239 242 240 if ( e.isContUsed() ) { -
src/GenPoly/Lvalue.cc
raca6a54c r2fa5bd2 60 60 } 61 61 62 struct ReferenceConversions final : public WithStmtsToAdd {62 struct ReferenceConversions final : public WithStmtsToAdd, public WithGuards { 63 63 Expression * postmutate( CastExpr * castExpr ); 64 64 Expression * postmutate( AddressExpr * addrExpr ); … … 71 71 72 72 struct FixIntrinsicResult final : public WithGuards { 73 enum { 74 NoSkip, 75 Skip, 76 SkipInProgress 77 } skip = NoSkip; 78 79 void premutate( AsmExpr * ) { GuardValue( skip ); skip = Skip; } 80 void premutate( ApplicationExpr * ) { GuardValue( skip ); skip = (skip == Skip) ? SkipInProgress : NoSkip; } 81 82 73 83 Expression * postmutate( ApplicationExpr * appExpr ); 74 84 void premutate( FunctionDecl * funcDecl ); … … 162 172 163 173 Expression * FixIntrinsicResult::postmutate( ApplicationExpr * appExpr ) { 164 if ( isIntrinsicReference( appExpr ) ) {174 if ( skip != SkipInProgress && isIntrinsicReference( appExpr ) ) { 165 175 // eliminate reference types from intrinsic applications - now they return lvalues 166 176 ReferenceType * result = strict_dynamic_cast< ReferenceType * >( appExpr->result ); -
src/Parser/parser.yy
raca6a54c r2fa5bd2 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S un Aug 4 21:48:23201913 // Update Count : 43 6412 // Last Modified On : Sat Dec 7 10:43:44 2019 13 // Update Count : 4394 14 14 // 15 15 … … 211 211 } // forCtrl 212 212 213 KeywordCastExpr::Target Aggregate2Target( DeclarationNode::Aggregate aggr ) { 214 KeywordCastExpr::Target target; 215 switch ( aggr ) { 216 case DeclarationNode::Coroutine: target = KeywordCastExpr::Coroutine; break; 217 case DeclarationNode::Monitor: target = KeywordCastExpr::Monitor; break; 218 case DeclarationNode::Thread: target = KeywordCastExpr::Thread; break; 219 default: abort(); 220 } // switch 221 return target; 222 } // Aggregate2Target 223 213 224 214 225 bool forall = false, yyy = false; // aggregate have one or more forall qualifiers ? … … 365 376 %type<decl> abstract_parameter_declaration 366 377 367 %type<aggKey> aggregate_key 378 %type<aggKey> aggregate_key aggregate_data aggregate_control 368 379 %type<decl> aggregate_type aggregate_type_nobody 369 380 … … 650 661 | postfix_expression '.' '[' field_name_list ']' // CFA, tuple field selector 651 662 { $$ = new ExpressionNode( build_fieldSel( $1, build_tuple( $4 ) ) ); } 663 | postfix_expression '.' aggregate_control 664 { $$ = new ExpressionNode( build_keyword_cast( Aggregate2Target( $3 ), $1 ) ); } 652 665 | postfix_expression ARROW identifier 653 666 { $$ = new ExpressionNode( build_pfieldSel( $1, build_varref( $3 ) ) ); } … … 793 806 | '(' type_no_function ')' cast_expression 794 807 { $$ = new ExpressionNode( build_cast( $2, $4 ) ); } 795 // keyword cast cannot be grouped because of reduction in aggregate_key 796 | '(' GENERATOR '&' ')' cast_expression // CFA 797 { $$ = new ExpressionNode( build_keyword_cast( KeywordCastExpr::Coroutine, $5 ) ); } 798 | '(' COROUTINE '&' ')' cast_expression // CFA 799 { $$ = new ExpressionNode( build_keyword_cast( KeywordCastExpr::Coroutine, $5 ) ); } 800 | '(' THREAD '&' ')' cast_expression // CFA 801 { $$ = new ExpressionNode( build_keyword_cast( KeywordCastExpr::Thread, $5 ) ); } 802 | '(' MONITOR '&' ')' cast_expression // CFA 803 { $$ = new ExpressionNode( build_keyword_cast( KeywordCastExpr::Monitor, $5 ) ); } 808 | '(' aggregate_control '&' ')' cast_expression // CFA 809 { $$ = new ExpressionNode( build_keyword_cast( Aggregate2Target( $2 ), $5 ) ); } 804 810 // VIRTUAL cannot be opt because of look ahead issues 805 811 | '(' VIRTUAL ')' cast_expression // CFA … … 1423 1429 asm_operand: // GCC 1424 1430 string_literal '(' constant_expression ')' 1425 { $$ = new ExpressionNode( new AsmExpr( maybeMoveBuild< Expression >( (ExpressionNode *)nullptr ), $1, maybeMoveBuild< Expression >( $3 ) ) ); }1426 | '[' constant_expression']' string_literal '(' constant_expression ')'1427 { $$ = new ExpressionNode( new AsmExpr( maybeMoveBuild< Expression >( $2 ), $4, maybeMoveBuild< Expression >( $6 ) ) ); }1431 { $$ = new ExpressionNode( new AsmExpr( nullptr, $1, maybeMoveBuild< Expression >( $3 ) ) ); } 1432 | '[' IDENTIFIER ']' string_literal '(' constant_expression ')' 1433 { $$ = new ExpressionNode( new AsmExpr( $2, $4, maybeMoveBuild< Expression >( $6 ) ) ); } 1428 1434 ; 1429 1435 … … 2059 2065 2060 2066 aggregate_key: 2067 aggregate_data 2068 | aggregate_control 2069 ; 2070 2071 aggregate_data: 2061 2072 STRUCT 2062 2073 { yyy = true; $$ = DeclarationNode::Struct; } 2063 2074 | UNION 2064 2075 { yyy = true; $$ = DeclarationNode::Union; } 2065 | EXCEPTION 2076 | EXCEPTION // CFA 2066 2077 { yyy = true; $$ = DeclarationNode::Exception; } 2067 | GENERATOR 2078 ; 2079 2080 aggregate_control: // CFA 2081 GENERATOR 2068 2082 { yyy = true; $$ = DeclarationNode::Coroutine; } 2069 2083 | COROUTINE … … 2096 2110 distInl( $3 ); 2097 2111 } 2112 | INLINE aggregate_control ';' // CFA 2113 { SemanticError( yylloc, "INLINE aggregate control currently unimplemented." ); $$ = nullptr; } 2098 2114 | typedef_declaration ';' // CFA 2099 2115 | cfa_field_declaring_list ';' // CFA, new style field declaration -
src/ResolvExpr/AlternativeFinder.cc
raca6a54c r2fa5bd2 69 69 void postvisit( CastExpr * castExpr ); 70 70 void postvisit( VirtualCastExpr * castExpr ); 71 void postvisit( KeywordCastExpr * castExpr ); 71 72 void postvisit( UntypedMemberExpr * memberExpr ); 72 73 void postvisit( MemberExpr * memberExpr ); … … 1255 1256 } 1256 1257 1258 void AlternativeFinder::Finder::postvisit( KeywordCastExpr * castExpr ) { 1259 assertf( castExpr->get_result(), "Cast target should have been set in Validate." ); 1260 auto ref = dynamic_cast<ReferenceType*>(castExpr->get_result()); 1261 assert(ref); 1262 auto inst = dynamic_cast<StructInstType*>(ref->base); 1263 assert(inst); 1264 auto target = inst->baseStruct; 1265 1266 AlternativeFinder finder( indexer, env ); 1267 1268 auto pick_alternatives = [target, this](AltList & found, bool expect_ref) { 1269 for(auto & alt : found) { 1270 Type * expr = alt.expr->get_result(); 1271 if(expect_ref) { 1272 auto res = dynamic_cast<ReferenceType*>(expr); 1273 if(!res) { continue; } 1274 expr = res->base; 1275 } 1276 1277 if(auto insttype = dynamic_cast<TypeInstType*>(expr)) { 1278 auto td = alt.env.lookup(insttype->name); 1279 if(!td) { continue; } 1280 expr = td->type; 1281 } 1282 1283 if(auto base = dynamic_cast<StructInstType*>(expr)) { 1284 if(base->baseStruct == target) { 1285 alternatives.push_back( 1286 std::move(alt) 1287 ); 1288 } 1289 } 1290 } 1291 }; 1292 1293 try { 1294 // Attempt 1 : turn (thread&)X into (thread_desc&)X.__thrd 1295 // Clone is purely for memory management 1296 std::unique_ptr<Expression> tech1 { new UntypedMemberExpr(new NameExpr(castExpr->concrete_target.field), castExpr->arg->clone()) }; 1297 1298 // don't prune here, since it's guaranteed all alternatives will have the same type 1299 finder.findWithoutPrune( tech1.get() ); 1300 pick_alternatives(finder.alternatives, false); 1301 1302 return; 1303 } catch(SemanticErrorException & ) {} 1304 1305 // Fallback : turn (thread&)X into (thread_desc&)get_thread(X) 1306 std::unique_ptr<Expression> fallback { UntypedExpr::createDeref( new UntypedExpr(new NameExpr(castExpr->concrete_target.getter), { castExpr->arg->clone() })) }; 1307 // don't prune here, since it's guaranteed all alternatives will have the same type 1308 finder.findWithoutPrune( fallback.get() ); 1309 1310 pick_alternatives(finder.alternatives, true); 1311 1312 // Whatever happens here, we have no more fallbacks 1313 } 1314 1257 1315 namespace { 1258 1316 /// Gets name from untyped member expression (member must be NameExpr) -
src/ResolvExpr/Resolver.cc
raca6a54c r2fa5bd2 485 485 visit_children = false; 486 486 findVoidExpression( asmExpr->operand, indexer ); 487 if ( asmExpr->get_inout() ) {488 findVoidExpression( asmExpr->inout, indexer );489 } // if490 487 } 491 488 … … 1365 1362 asmExpr = ast::mutate_field( 1366 1363 asmExpr, &ast::AsmExpr::operand, findVoidExpression( asmExpr->operand, symtab ) ); 1367 1368 if ( asmExpr->inout ) {1369 asmExpr = ast::mutate_field(1370 asmExpr, &ast::AsmExpr::inout, findVoidExpression( asmExpr->inout, symtab ) );1371 }1372 1364 1373 1365 return asmExpr; -
src/SynTree/Expression.cc
raca6a54c r2fa5bd2 527 527 } 528 528 529 AsmExpr::AsmExpr( const AsmExpr & other ) : Expression( other ), inout( maybeClone( other.inout )), constraint( maybeClone( other.constraint ) ), operand( maybeClone( other.operand ) ) {}529 AsmExpr::AsmExpr( const AsmExpr & other ) : Expression( other ), inout( other.inout ), constraint( maybeClone( other.constraint ) ), operand( maybeClone( other.operand ) ) {} 530 530 531 531 532 532 void AsmExpr::print( std::ostream & os, Indenter indent ) const { 533 533 os << "Asm Expression: " << std::endl; 534 if ( inout ) inout->print( os, indent+1 );534 if ( !inout.empty() ) os << "[" << inout << "] "; 535 535 if ( constraint ) constraint->print( os, indent+1 ); 536 536 if ( operand ) operand->print( os, indent+1 ); -
src/SynTree/Expression.h
raca6a54c r2fa5bd2 231 231 enum Target { 232 232 Coroutine, Thread, Monitor, NUMBER_OF_TARGETS 233 } target; 233 }; 234 struct Concrete { 235 std::string field; 236 std::string getter; 237 }; 238 Target target; 239 Concrete concrete_target; 234 240 235 241 KeywordCastExpr( Expression * arg, Target target ); … … 575 581 class AsmExpr : public Expression { 576 582 public: 577 Expression *inout;583 std::string inout; 578 584 Expression * constraint; 579 585 Expression * operand; 580 586 581 AsmExpr( Expression * inout, Expression * constraint, Expression * operand ) : inout( inout ), constraint( constraint ), operand( operand ) {}587 AsmExpr( const std::string * _inout, Expression * constraint, Expression * operand ) : inout( _inout ? *_inout : "" ), constraint( constraint ), operand( operand ) { delete _inout; } 582 588 AsmExpr( const AsmExpr & other ); 583 virtual ~AsmExpr() { delete inout; delete constraint; delete operand; }; 584 585 Expression * get_inout() const { return inout; } 586 void set_inout( Expression * newValue ) { inout = newValue; } 587 588 Expression * get_constraint() const { return constraint; } 589 void set_constraint( Expression * newValue ) { constraint = newValue; } 590 591 Expression * get_operand() const { return operand; } 592 void set_operand( Expression * newValue ) { operand = newValue; } 589 virtual ~AsmExpr() { delete constraint; delete operand; }; 593 590 594 591 virtual AsmExpr * clone() const override { return new AsmExpr( * this ); } -
src/cfa.make
raca6a54c r2fa5bd2 4 4 LTCFACOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ 5 5 $(LIBTOOLFLAGS) --mode=compile $(CFACC) $(DEFS) \ 6 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(CFAFLAGS) \ 7 $(AM_CFLAGS) $(CFLAGS) 6 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(AM_CFLAGS) $(CFAFLAGS) $(CFLAGS) 8 7 9 8 AM_V_CFA = $(am__v_CFA_@AM_V@) -
src/main.cc
raca6a54c r2fa5bd2 407 407 408 408 409 static const char optstring[] = ": hlLmNnpP:S:twW:D:";409 static const char optstring[] = ":c:ghlLmNnpP:S:twW:D:"; 410 410 411 411 enum { PreludeDir = 128 }; 412 412 static struct option long_opts[] = { 413 { "colors", required_argument, nullptr, 'c' }, 414 { "gdb", no_argument, nullptr, 'g' }, 413 415 { "help", no_argument, nullptr, 'h' }, 414 416 { "libcfa", no_argument, nullptr, 'l' }, … … 422 424 { "statistics", required_argument, nullptr, 'S' }, 423 425 { "tree", no_argument, nullptr, 't' }, 424 { "gdb", no_argument, nullptr, 'g' },425 426 { "", no_argument, nullptr, 0 }, // -w 426 427 { "", no_argument, nullptr, 0 }, // -W … … 430 431 431 432 static const char * description[] = { 432 "print help message", // -h 433 "generate libcfa.c", // -l 434 "generate line marks", // -L 435 "do not replace main", // -m 436 "do not generate line marks", // -N 437 "do not read prelude", // -n 433 "diagnostic color: never, always, or auto.", // -c 434 "wait for gdb to attach", // -g 435 "print help message", // -h 436 "generate libcfa.c", // -l 437 "generate line marks", // -L 438 "do not replace main", // -m 439 "do not generate line marks", // -N 440 "do not read prelude", // -n 438 441 "generate prototypes for prelude functions", // -p 439 "print", 442 "print", // -P 440 443 "<directory> prelude directory for debug/nodebug", // no flag 441 444 "<option-list> enable profiling information:\n counters,heap,time,all,none", // -S 442 "building cfa standard lib", // -t 443 "wait for gdb to attach", // -g 444 "", // -w 445 "", // -W 446 "", // -D 445 "building cfa standard lib", // -t 446 "", // -w 447 "", // -W 448 "", // -D 447 449 }; // description 448 450 … … 512 514 while ( (c = getopt_long( argc, argv, optstring, long_opts, nullptr )) != -1 ) { 513 515 switch ( c ) { 516 case 'c': // diagnostic colors 517 if ( strcmp( optarg, "always" ) == 0 ) { 518 ErrorHelpers::colors = ErrorHelpers::Colors::Always; 519 } else if ( strcmp( optarg, "never" ) == 0 ) { 520 ErrorHelpers::colors = ErrorHelpers::Colors::Never; 521 } else if ( strcmp( optarg, "auto" ) == 0 ) { 522 ErrorHelpers::colors = ErrorHelpers::Colors::Auto; 523 } // if 524 break; 514 525 case 'h': // help message 515 526 usage( argv ); // no return -
tests/.expect/alloc.txt
raca6a54c r2fa5bd2 30 30 CFA resize array alloc 31 31 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 32 CFA resize array alloc , fill32 CFA resize array alloc 33 33 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 34 CFA resize array alloc , fill34 CFA resize array alloc 35 35 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 36 36 CFA resize array alloc, fill 37 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x 1010101 0x1010101 0x1010101 0x10101010xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede37 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0xdeadbeef 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0x1010101 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 0xdededede 38 38 39 39 C memalign 42 42.5 -
tests/.expect/gccExtensions.x64.txt
raca6a54c r2fa5bd2 12 12 asm volatile ( "mov %1, %0\n\t" "add $1, %0" : "=" "r" ( _X3dsti_2 ) : : ); 13 13 asm volatile ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ) : "r" ( _X3srci_2 ) : ); 14 asm ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ), "=r" ( _X3srci_2 ) : [ _X3srci_2] "r" ( _X3dsti_2 ) : "r0" );14 asm ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ), "=r" ( _X3srci_2 ) : [ src ] "r" ( _X3dsti_2 ) : "r0" ); 15 15 L2: L1: asm goto ( "frob %%r5, %1; jc %l[L1]; mov (%2), %%r5" : : "r" ( _X3srci_2 ), "r" ( (&_X3dsti_2) ) : "r5", "memory" : L1, L2 ); 16 16 double _Complex _X2c1Cd_2; -
tests/.expect/gccExtensions.x86.txt
raca6a54c r2fa5bd2 12 12 asm volatile ( "mov %1, %0\n\t" "add $1, %0" : "=" "r" ( _X3dsti_2 ) : : ); 13 13 asm volatile ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ) : "r" ( _X3srci_2 ) : ); 14 asm ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ), "=r" ( _X3srci_2 ) : [ _X3srci_2] "r" ( _X3dsti_2 ) : "r0" );14 asm ( "mov %1, %0\n\t" "add $1, %0" : "=r" ( _X3dsti_2 ), "=r" ( _X3srci_2 ) : [ src ] "r" ( _X3dsti_2 ) : "r0" ); 15 15 L2: L1: asm goto ( "frob %%r5, %1; jc %l[L1]; mov (%2), %%r5" : : "r" ( _X3srci_2 ), "r" ( (&_X3dsti_2) ) : "r5", "memory" : L1, L2 ); 16 16 double _Complex _X2c1Cd_2; -
tests/.expect/references.txt
raca6a54c r2fa5bd2 36 36 3 37 37 3 9 { 1., 7. }, [1, 2, 3] 38 4 38 39 Destructing a Y 39 40 Destructing a Y -
tests/Makefile.am
raca6a54c r2fa5bd2 46 46 47 47 # adjust CC to current flags 48 CC = $(if $(DISTCC_CFA_PATH),distcc $(DISTCC_CFA_PATH) ,$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS})48 CC = $(if $(DISTCC_CFA_PATH),distcc $(DISTCC_CFA_PATH) ${ARCH_FLAGS},$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS}) 49 49 CFACC = $(CC) 50 50