- Timestamp:
- Feb 25, 2020, 1:17:33 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 7dc2e015
- Parents:
- 9fb8f01 (diff), dd9e1ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa
- Files:
-
- 1 added
- 63 edited
-
Makefile.in (modified) (2 diffs)
-
automake/missing (modified) (1 diff, 1 prop)
-
configure (modified) (6 diffs)
-
configure.ac (modified) (2 diffs)
-
prelude/Makefile.am (modified) (5 diffs)
-
prelude/Makefile.in (modified) (9 diffs)
-
prelude/builtins.c (modified) (2 diffs)
-
prelude/extras.regx (modified) (1 diff)
-
prelude/extras.regx2 (added)
-
prelude/prototypes.awk (modified) (4 diffs)
-
prelude/sync-builtins.cf (modified) (8 diffs)
-
src/Makefile.am (modified) (3 diffs)
-
src/Makefile.in (modified) (8 diffs)
-
src/assert.cfa (modified) (3 diffs)
-
src/bits/align.hfa (modified) (2 diffs)
-
src/bits/containers.hfa (modified) (8 diffs)
-
src/bits/debug.cfa (modified) (6 diffs)
-
src/bits/debug.hfa (modified) (4 diffs)
-
src/bits/defs.hfa (modified) (3 diffs)
-
src/bits/locks.hfa (modified) (5 diffs)
-
src/bits/signal.hfa (modified) (2 diffs)
-
src/clock.hfa (modified) (3 diffs)
-
src/concurrency/CtxSwitch-arm.S (modified) (2 diffs)
-
src/concurrency/CtxSwitch-i386.S (modified) (2 diffs)
-
src/concurrency/CtxSwitch-x86_64.S (modified) (3 diffs)
-
src/concurrency/alarm.cfa (modified) (5 diffs)
-
src/concurrency/alarm.hfa (modified) (3 diffs)
-
src/concurrency/coroutine.cfa (modified) (9 diffs)
-
src/concurrency/coroutine.hfa (modified) (12 diffs)
-
src/concurrency/invoke.c (modified) (7 diffs)
-
src/concurrency/invoke.h (modified) (12 diffs)
-
src/concurrency/kernel.cfa (modified) (40 diffs)
-
src/concurrency/kernel.hfa (modified) (10 diffs)
-
src/concurrency/kernel_private.hfa (modified) (4 diffs)
-
src/concurrency/monitor.cfa (modified) (42 diffs)
-
src/concurrency/monitor.hfa (modified) (8 diffs)
-
src/concurrency/mutex.cfa (modified) (12 diffs)
-
src/concurrency/mutex.hfa (modified) (5 diffs)
-
src/concurrency/preemption.cfa (modified) (20 diffs)
-
src/concurrency/thread.cfa (modified) (6 diffs)
-
src/concurrency/thread.hfa (modified) (3 diffs)
-
src/exception.c (modified) (15 diffs)
-
src/executor.cfa (modified) (2 diffs)
-
src/fstream.cfa (modified) (11 diffs)
-
src/fstream.hfa (modified) (5 diffs)
-
src/gmp.hfa (modified) (5 diffs)
-
src/heap.cfa (modified) (40 diffs)
-
src/interpose.cfa (modified) (11 diffs)
-
src/iostream.cfa (modified) (43 diffs)
-
src/iostream.hfa (modified) (10 diffs)
-
src/math.hfa (modified) (2 diffs)
-
src/rational.cfa (modified) (2 diffs)
-
src/startup.cfa (modified) (3 diffs)
-
src/stdhdr/assert.h (modified) (2 diffs)
-
src/stdhdr/bfdlink.h (modified) (1 diff)
-
src/stdhdr/hwloc.h (modified) (1 diff)
-
src/stdhdr/krb5.h (modified) (1 diff)
-
src/stdhdr/math.h (modified) (1 diff)
-
src/stdhdr/sys/ucontext.h (modified) (1 diff)
-
src/stdlib.cfa (modified) (8 diffs)
-
src/stdlib.hfa (modified) (7 diffs)
-
src/time.cfa (modified) (8 diffs)
-
src/time.hfa (modified) (6 diffs)
-
src/time_t.hfa (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/Makefile.in
r9fb8f01 r3d5701e 231 231 CFACC = @CFACC@ 232 232 CFACPP = @CFACPP@ 233 CFADIR_HASH = @CFADIR_HASH@ 233 234 CFA_BINDIR = @CFA_BINDIR@ 234 235 CFA_INCDIR = @CFA_INCDIR@ … … 274 275 LIPO = @LIPO@ 275 276 LN_S = @LN_S@ 277 LOCAL_CC1 = @LOCAL_CC1@ 278 LOCAL_CFACC = @LOCAL_CFACC@ 276 279 LTLIBOBJS = @LTLIBOBJS@ 277 280 LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ -
libcfa/automake/missing
-
Property mode
changed from
120000to100644
r9fb8f01 r3d5701e 1 /usr/share/automake-1.15/missing 1 #! /bin/sh 2 # Tdelisle : having the Makefiles.in automatically regenerated causes problems 3 # when using multiple versions of automake, even if only on end user machines 4 # therefore I am disabling that feature by commenting this script 5 exit 0 -
Property mode
changed from
-
libcfa/configure
r9fb8f01 r3d5701e 707 707 CONFIG_CFLAGS 708 708 ARCH_FLAGS 709 CFADIR_HASH 710 LOCAL_CC1 711 LOCAL_CFACC 709 712 CFACPP 710 713 CFACC 714 ENABLE_DISTCC_FALSE 715 ENABLE_DISTCC_TRUE 711 716 CFA_VERSION 712 717 DRIVER_DIR … … 783 788 enable_option_checking 784 789 enable_silent_rules 790 enable_distcc 785 791 with_cfa_name 786 792 enable_shared … … 1445 1451 --enable-silent-rules less verbose build output (undo: "make V=1") 1446 1452 --disable-silent-rules verbose build output (undo: "make V=0") 1453 --enable-distcc whether or not to enable distributed compilation 1447 1454 --enable-shared[=PKGS] build shared libraries [default=yes] 1448 1455 --enable-static[=PKGS] build static libraries [default=yes] … … 2941 2948 2942 2949 2943 CFACC=${DRIVER_DIR}cfa 2950 # Check whether --enable-distcc was given. 2951 if test "${enable_distcc+set}" = set; then : 2952 enableval=$enable_distcc; enable_distcc=$enableval 2953 else 2954 enable_distcc=no 2955 fi 2956 2957 2958 echo -n "checking for distributated build... " 2959 if test x$enable_distcc = xno; then 2960 CFACC=${DRIVER_DIR}cfa 2961 echo "no" 2962 else 2963 tools="$(readlink -m $ac_confdir/)/../tools/build" 2964 config=$(basename $(readlink -f .)) 2965 echo "$tools/distcc_hash $config" 2966 CFADIR_HASH=$($tools/distcc_hash $config) 2967 CFACC="distcc ~/.cfadistcc/${CFADIR_HASH}/cfa" 2968 echo "yes (hash=${CFADIR_HASH})" 2969 fi 2944 2970 CFACPP=${DRIVER_DIR}cfa-cpp 2971 LOCAL_CFACC=${DRIVER_DIR}cfa 2972 LOCAL_CC1=${DRIVER_DIR}cc1 2973 2974 if test x$enable_distcc = xyes; then 2975 ENABLE_DISTCC_TRUE= 2976 ENABLE_DISTCC_FALSE='#' 2977 else 2978 ENABLE_DISTCC_TRUE='#' 2979 ENABLE_DISTCC_FALSE= 2980 fi 2981 2982 2983 2984 2985 2945 2986 2946 2987 … … 2959 3000 case $CONFIGURATION in 2960 3001 "debug" ) 2961 CONFIG_CFLAGS="-O g-g"3002 CONFIG_CFLAGS="-O0 -g" 2962 3003 CONFIG_CFAFLAGS="-debug" 2963 3004 CONFIG_BUILDLIB="yes" … … 16982 17023 fi 16983 17024 17025 if test -z "${ENABLE_DISTCC_TRUE}" && test -z "${ENABLE_DISTCC_FALSE}"; then 17026 as_fn_error $? "conditional \"ENABLE_DISTCC\" was never defined. 17027 Usually this means the macro was only invoked conditionally." "$LINENO" 5 17028 fi 16984 17029 if test -z "${BUILDLIB_TRUE}" && test -z "${BUILDLIB_FALSE}"; then 16985 17030 as_fn_error $? "conditional \"BUILDLIB\" was never defined. -
libcfa/configure.ac
r9fb8f01 r3d5701e 27 27 AC_ARG_VAR(CFA_VERSION, [The long version of cfa]) 28 28 29 CFACC=${DRIVER_DIR}cfa 29 AC_ARG_ENABLE(distcc, 30 [ --enable-distcc whether or not to enable distributed compilation], 31 enable_distcc=$enableval, enable_distcc=no) 32 33 echo -n "checking for distributated build... " 34 if test x$enable_distcc = xno; then 35 CFACC=${DRIVER_DIR}cfa 36 echo "no" 37 else 38 tools="$(readlink -m $ac_confdir/)/../tools/build" 39 config=$(basename $(readlink -f .)) 40 echo "$tools/distcc_hash $config" 41 CFADIR_HASH=$($tools/distcc_hash $config) 42 CFACC="distcc ~/.cfadistcc/${CFADIR_HASH}/cfa" 43 echo "yes (hash=${CFADIR_HASH})" 44 fi 30 45 CFACPP=${DRIVER_DIR}cfa-cpp 46 LOCAL_CFACC=${DRIVER_DIR}cfa 47 LOCAL_CC1=${DRIVER_DIR}cc1 48 49 AM_CONDITIONAL([ENABLE_DISTCC], [test x$enable_distcc = xyes]) 50 31 51 AC_SUBST(CFACC) 32 52 AC_SUBST(CFACPP) 53 AC_SUBST(LOCAL_CFACC) 54 AC_SUBST(LOCAL_CC1) 55 AC_SUBST(CFADIR_HASH) 33 56 AC_SUBST(CFA_VERSION) 34 57 … … 45 68 case $CONFIGURATION in 46 69 "debug" ) 47 CONFIG_CFLAGS="-O g-g"70 CONFIG_CFLAGS="-O0 -g" 48 71 CONFIG_CFAFLAGS="-debug" 49 72 CONFIG_BUILDLIB="yes" -
libcfa/prelude/Makefile.am
r9fb8f01 r3d5701e 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Wed Dec 14 15:00:35 201614 ## Update Count : 20 513 ## Last Modified On : Mon Feb 3 21:27:18 2020 14 ## Update Count : 208 15 15 ############################################################################### 16 16 … … 23 23 cfalib_DATA = gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c 24 24 25 CC = @ CFACC@25 CC = @LOCAL_CFACC@ 26 26 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@ 27 27 AM_CFAFLAGS = @CONFIG_CFAFLAGS@ … … 36 36 extras.cf : ${srcdir}/extras.regx ${srcdir}/extras.c 37 37 ${AM_V_GEN}gcc ${AM_CFLAGS} -E ${srcdir}/extras.c | grep -f ${srcdir}/extras.regx > extras.cf 38 ${AM_V_GEN}gcc ${AM_CFLAGS} -E ${srcdir}/extras.c | grep -zo -f ${srcdir}/extras.regx2 | tr '\0' '\n' >> extras.cf 38 39 39 40 # create forward declarations for gcc builtins … … 54 55 55 56 # create forward declarations for cfa builtins 56 builtins.cf : builtins.c ${CC}57 builtins.cf : builtins.c @LOCAL_CFACC@ 57 58 ${AM_V_GEN}gcc ${AM_CFLAGS} -E -P ${<} -o ${@} -MD -MP -MF $(DEPDIR)/builtins.Po -D__cforall 58 59 ${AM_V_at}sed -i 's/builtins.o/builtins.cf/g' $(DEPDIR)/builtins.Po … … 68 69 MOSTLYCLEANFILES = bootloader.c builtins.cf extras.cf gcc-builtins.c gcc-builtins.cf prelude.cfa 69 70 MAINTAINERCLEANFILES = ${addprefix ${libdir}/,${cfalib_DATA}} ${addprefix ${libdir}/,${lib_LIBRARIES}} 71 72 if ENABLE_DISTCC 73 distribution: @LOCAL_CFACC@ @LOCAL_CC1@ @CFACPP@ gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c $(srcdir)/../../tools/build/push2dist.sh 74 ${AM_V_GEN}$(srcdir)/../../tools/build/push2dist.sh @CFADIR_HASH@ 75 @echo "Dummy file to track distribution to remote hosts" > ${@} 76 77 all: all-am distribution 78 endif ENABLE_DISTCC -
libcfa/prelude/Makefile.in
r9fb8f01 r3d5701e 1 # Makefile.in generated by automake 1.1 5from Makefile.am.1 # Makefile.in generated by automake 1.16.1 from Makefile.am. 2 2 # @configure_input@ 3 3 4 # Copyright (C) 1994-201 4Free Software Foundation, Inc.4 # Copyright (C) 1994-2018 Free Software Foundation, Inc. 5 5 6 6 # This Makefile.in is free software; the Free Software Foundation … … 167 167 AUTOMAKE = @AUTOMAKE@ 168 168 AWK = @AWK@ 169 CC = @ CFACC@169 CC = @LOCAL_CFACC@ 170 170 CCAS = @CCAS@ 171 171 CCASDEPMODE = @CCASDEPMODE@ … … 174 174 CFACC = @CFACC@ 175 175 CFACPP = @CFACPP@ 176 CFADIR_HASH = @CFADIR_HASH@ 176 177 CFA_BINDIR = @CFA_BINDIR@ 177 178 CFA_INCDIR = @CFA_INCDIR@ … … 217 218 LIPO = @LIPO@ 218 219 LN_S = @LN_S@ 220 LOCAL_CC1 = @LOCAL_CC1@ 221 LOCAL_CFACC = @LOCAL_CFACC@ 219 222 LTLIBOBJS = @LTLIBOBJS@ 220 223 LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ … … 328 331 cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ 329 332 *) \ 330 echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__ depfiles_maybe)'; \331 cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__ depfiles_maybe);; \333 echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ 334 cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ 332 335 esac; 333 336 … … 374 377 375 378 376 distdir: $(DISTFILES) 379 distdir: $(BUILT_SOURCES) 380 $(MAKE) $(AM_MAKEFLAGS) distdir-am 381 382 distdir-am: $(DISTFILES) 377 383 @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ 378 384 topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ … … 537 543 extras.cf : ${srcdir}/extras.regx ${srcdir}/extras.c 538 544 ${AM_V_GEN}gcc ${AM_CFLAGS} -E ${srcdir}/extras.c | grep -f ${srcdir}/extras.regx > extras.cf 545 ${AM_V_GEN}gcc ${AM_CFLAGS} -E ${srcdir}/extras.c | grep -zo -f ${srcdir}/extras.regx2 | tr '\0' '\n' >> extras.cf 539 546 540 547 # create forward declarations for gcc builtins … … 555 562 556 563 # create forward declarations for cfa builtins 557 builtins.cf : builtins.c ${CC}564 builtins.cf : builtins.c @LOCAL_CFACC@ 558 565 ${AM_V_GEN}gcc ${AM_CFLAGS} -E -P ${<} -o ${@} -MD -MP -MF $(DEPDIR)/builtins.Po -D__cforall 559 566 ${AM_V_at}sed -i 's/builtins.o/builtins.cf/g' $(DEPDIR)/builtins.Po … … 566 573 maintainer-clean-local : 567 574 rm -rf $(DEPDIR) 575 576 @ENABLE_DISTCC_TRUE@distribution: @LOCAL_CFACC@ @LOCAL_CC1@ @CFACPP@ gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c $(srcdir)/../../tools/build/push2dist.sh 577 @ENABLE_DISTCC_TRUE@ ${AM_V_GEN}$(srcdir)/../../tools/build/push2dist.sh @CFADIR_HASH@ 578 @ENABLE_DISTCC_TRUE@ @echo "Dummy file to track distribution to remote hosts" > ${@} 579 580 @ENABLE_DISTCC_TRUE@all: all-am distribution 568 581 569 582 # Tell versions [3.59,3.63) of GNU make to not export all variables. -
libcfa/prelude/builtins.c
r9fb8f01 r3d5701e 10 10 // Created On : Fri Jul 21 16:21:03 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 25 18:06:52201913 // Update Count : 9712 // Last Modified On : Thu Nov 21 16:31:39 2019 13 // Update Count : 101 14 14 // 15 15 … … 69 69 70 70 // universal typed pointer constant 71 // Compiler issue: there is a problem with anonymous types that do not have a size. 72 static inline forall( dtype DT | sized(DT) ) DT * intptr( uintptr_t addr ) { return (DT *)addr; } 71 static inline forall( dtype DT ) DT * intptr( uintptr_t addr ) { return (DT *)addr; } 73 72 74 73 // exponentiation operator implementation -
libcfa/prelude/extras.regx
r9fb8f01 r3d5701e 19 19 typedef.* uint32_t; 20 20 typedef.* uint64_t; 21 typedef.* __uint_least16_t; 22 typedef.* __uint_least32_t; 21 23 typedef.* char16_t; 22 24 typedef.* char32_t; 23 25 typedef.* wchar_t; 24 extern.*\*malloc\(.*\).*25 extern.* free\(.*\).*26 extern.* exit\(.*\).*27 extern.* atexit\(.*\).*28 extern.* abort\(.*\).*29 extern.* printf\(.*\).* -
libcfa/prelude/prototypes.awk
r9fb8f01 r3d5701e 10 10 # Created On : Sat May 16 07:57:37 2015 11 11 # Last Modified By : Peter A. Buhr 12 # Last Modified On : Thu Jun 6 20:46:28 201913 # Update Count : 3 412 # Last Modified On : Sat Feb 8 09:46:58 2020 13 # Update Count : 36 14 14 # 15 15 … … 17 17 18 18 BEGIN { 19 FS = "[( )]"19 FS = "[( )]" 20 20 # order so string search is longest string 21 21 i=-1 … … 84 84 85 85 /BT_FN/ { 86 for (i = 1; i <= NF; i ++) {87 if( match($i, "BT_FN") != 0 ) {88 prototypes[$i] = $i89 }86 for (i = 1; i <= NF; i += 1 ) { 87 if ( match($i, "BT_FN") != 0 ) { 88 prototypes[$i] = $i 89 } 90 90 } 91 }91 } 92 92 93 93 END { … … 103 103 104 104 for ( prototype in prototypes ) { 105 # printf( "//\"%s\"\n", prototype )106 if ( index( "BT_LAST", prototype ) == 1 ) {107 continue108 } # if105 # printf( "//\"%s\"\n", prototype ) 106 if ( index( "BT_LAST", prototype ) == 1 ) { 107 continue 108 } # if 109 109 110 printf( "#define %s(NAME) FUNC_SIMPLE(", prototype )110 printf( "#define %s(NAME) FUNC_SIMPLE(", prototype ) 111 111 112 if ( sub( "BT_FN_", "", prototype ) == 0 ) {113 printf( "\n********** BAD MACRO NAME \"%s\" **********\n", prototype )114 exit 0115 } # if112 if ( sub( "BT_FN_", "", prototype ) == 0 ) { 113 printf( "\n********** BAD MACRO NAME \"%s\" **********\n", prototype ) 114 exit 0 115 } # if 116 116 117 # generate function return type as macro 118 for ( t = 0; t < N; t += 1 ) { # find longest match 119 type = types[t]; 120 if ( index( prototype, type ) == 1 ) { # found match 121 printf( "BT_%s, NAME", type ) 122 sub( type, "", prototype ) 123 break; 117 # generate function return type as macro 118 for ( t = 0; t < N; t += 1 ) { # find longest match 119 type = types[t]; 120 if ( index( prototype, type ) == 1 ) { # found match 121 printf( "BT_%s, NAME", type ) 122 sub( type, "", prototype ) 123 break; 124 } # if 125 } # for 126 127 # generate function parameter types as macro 128 if ( index( prototype, "VAR" ) != 2 ) { # C-style empty parameters ? 129 for ( p = 0; length( prototype ) > 0; p += 1 ) { # until all parameters types are removed 130 sub( "_", "", prototype) # remove "_" 131 printf( ", ", type ) 132 temp = prototype 133 for ( t = 0; t < N; t += 1 ) { # find longest match 134 type = types[t]; 135 if ( index( prototype, type ) == 1 ) { # found match 136 printf( "BT_%s", type ) 137 sub( type, "", prototype ) 138 break; 139 } # if 140 } # for 141 if ( temp == prototype ) { # no match found for parameter in macro table 142 printf( "\n********** MISSING TYPE \"%s\" **********\n", prototype ) 143 exit 0 144 } # if 145 } # for 124 146 } # if 125 } # for 126 127 # generate function parameter types as macro 128 if ( index( prototype, "VAR" ) != 2 ) { # C-style empty parameters ? 129 for ( p = 0; length( prototype ) > 0; p += 1 ) { # until all parameters types are removed 130 sub( "_", "", prototype) # remove "_" 131 printf( ", ", type ) 132 temp = prototype 133 for ( t = 0; t < N; t += 1 ) { # find longest match 134 type = types[t]; 135 if ( index( prototype, type ) == 1 ) { # found match 136 printf( "BT_%s", type ) 137 sub( type, "", prototype ) 138 break; 139 } # if 140 } # for 141 if ( temp == prototype ) { # no match found for parameter in macro table 142 printf( "\n********** MISSING TYPE \"%s\" **********\n", prototype ) 143 exit 0 144 } # if 145 } # for 146 } # if 147 printf( ")\n" ) 147 printf( ")\n" ) 148 148 } # for 149 149 -
libcfa/prelude/sync-builtins.cf
r9fb8f01 r3d5701e 1 1 char __sync_fetch_and_add(volatile char *, char,...); 2 char __sync_fetch_and_add_1(volatile char *, char,...);3 2 signed char __sync_fetch_and_add(volatile signed char *, signed char,...); 4 signed char __sync_fetch_and_add_1(volatile signed char *, signed char,...);5 3 unsigned char __sync_fetch_and_add(volatile unsigned char *, unsigned char,...); 6 unsigned char __sync_fetch_and_add_1(volatile unsigned char *, unsigned char,...);7 4 signed short __sync_fetch_and_add(volatile signed short *, signed short,...); 8 signed short __sync_fetch_and_add_2(volatile signed short *, signed short,...);9 5 unsigned short __sync_fetch_and_add(volatile unsigned short *, unsigned short,...); 10 unsigned short __sync_fetch_and_add_2(volatile unsigned short *, unsigned short,...);11 6 signed int __sync_fetch_and_add(volatile signed int *, signed int,...); 12 signed int __sync_fetch_and_add_4(volatile signed int *, signed int,...);13 7 unsigned int __sync_fetch_and_add(volatile unsigned int *, unsigned int,...); 14 unsigned int __sync_fetch_and_add_4(volatile unsigned int *, unsigned int,...); 8 signed long int __sync_fetch_and_add(volatile signed long int *, signed long int,...); 9 unsigned long int __sync_fetch_and_add(volatile unsigned long int *, unsigned long int,...); 15 10 signed long long int __sync_fetch_and_add(volatile signed long long int *, signed long long int,...); 16 signed long long int __sync_fetch_and_add_8(volatile signed long long int *, signed long long int,...);17 11 unsigned long long int __sync_fetch_and_add(volatile unsigned long long int *, unsigned long long int,...); 18 unsigned long long int __sync_fetch_and_add_8(volatile unsigned long long int *, unsigned long long int,...);19 12 #if defined(__SIZEOF_INT128__) 20 13 signed __int128 __sync_fetch_and_add(volatile signed __int128 *, signed __int128,...); 21 signed __int128 __sync_fetch_and_add_16(volatile signed __int128 *, signed __int128,...);22 14 unsigned __int128 __sync_fetch_and_add(volatile unsigned __int128 *, unsigned __int128,...); 23 unsigned __int128 __sync_fetch_and_add_16(volatile unsigned __int128 *, unsigned __int128,...);24 15 #endif 25 16 26 17 char __sync_fetch_and_sub(volatile char *, char,...); 27 char __sync_fetch_and_sub_1(volatile char *, char,...);28 18 signed char __sync_fetch_and_sub(volatile signed char *, signed char,...); 29 signed char __sync_fetch_and_sub_1(volatile signed char *, signed char,...);30 19 unsigned char __sync_fetch_and_sub(volatile unsigned char *, unsigned char,...); 31 unsigned char __sync_fetch_and_sub_1(volatile unsigned char *, unsigned char,...);32 20 signed short __sync_fetch_and_sub(volatile signed short *, signed short,...); 33 signed short __sync_fetch_and_sub_2(volatile signed short *, signed short,...);34 21 unsigned short __sync_fetch_and_sub(volatile unsigned short *, unsigned short,...); 35 unsigned short __sync_fetch_and_sub_2(volatile unsigned short *, unsigned short,...);36 22 signed int __sync_fetch_and_sub(volatile signed int *, signed int,...); 37 signed int __sync_fetch_and_sub_4(volatile signed int *, signed int,...);38 23 unsigned int __sync_fetch_and_sub(volatile unsigned int *, unsigned int,...); 39 unsigned int __sync_fetch_and_sub_4(volatile unsigned int *, unsigned int,...); 24 signed long int __sync_fetch_and_sub(volatile signed long int *, signed long int,...); 25 unsigned long int __sync_fetch_and_sub(volatile unsigned long int *, unsigned long int,...); 40 26 signed long long int __sync_fetch_and_sub(volatile signed long long int *, signed long long int,...); 41 signed long long int __sync_fetch_and_sub_8(volatile signed long long int *, signed long long int,...);42 27 unsigned long long int __sync_fetch_and_sub(volatile unsigned long long int *, unsigned long long int,...); 43 unsigned long long int __sync_fetch_and_sub_8(volatile unsigned long long int *, unsigned long long int,...);44 28 #if defined(__SIZEOF_INT128__) 45 29 signed __int128 __sync_fetch_and_sub(volatile signed __int128 *, signed __int128,...); 46 signed __int128 __sync_fetch_and_sub_16(volatile signed __int128 *, signed __int128,...);47 30 unsigned __int128 __sync_fetch_and_sub(volatile unsigned __int128 *, unsigned __int128,...); 48 unsigned __int128 __sync_fetch_and_sub_16(volatile unsigned __int128 *, unsigned __int128,...);49 31 #endif 50 32 51 33 char __sync_fetch_and_or(volatile char *, char,...); 52 char __sync_fetch_and_or_1(volatile char *, char,...);53 34 signed char __sync_fetch_and_or(volatile signed char *, signed char,...); 54 signed char __sync_fetch_and_or_1(volatile signed char *, signed char,...);55 35 unsigned char __sync_fetch_and_or(volatile unsigned char *, unsigned char,...); 56 unsigned char __sync_fetch_and_or_1(volatile unsigned char *, unsigned char,...);57 36 signed short __sync_fetch_and_or(volatile signed short *, signed short,...); 58 signed short __sync_fetch_and_or_2(volatile signed short *, signed short,...);59 37 unsigned short __sync_fetch_and_or(volatile unsigned short *, unsigned short,...); 60 unsigned short __sync_fetch_and_or_2(volatile unsigned short *, unsigned short,...);61 38 signed int __sync_fetch_and_or(volatile signed int *, signed int,...); 62 signed int __sync_fetch_and_or_4(volatile signed int *, signed int,...);63 39 unsigned int __sync_fetch_and_or(volatile unsigned int *, unsigned int,...); 64 unsigned int __sync_fetch_and_or_4(volatile unsigned int *, unsigned int,...); 40 signed long int __sync_fetch_and_or(volatile signed long int *, signed long int,...); 41 unsigned long int __sync_fetch_and_or(volatile unsigned long int *, unsigned long int,...); 65 42 signed long long int __sync_fetch_and_or(volatile signed long long int *, signed long long int,...); 66 signed long long int __sync_fetch_and_or_8(volatile signed long long int *, signed long long int,...);67 43 unsigned long long int __sync_fetch_and_or(volatile unsigned long long int *, unsigned long long int,...); 68 unsigned long long int __sync_fetch_and_or_8(volatile unsigned long long int *, unsigned long long int,...);69 44 #if defined(__SIZEOF_INT128__) 70 45 signed __int128 __sync_fetch_and_or(volatile signed __int128 *, signed __int128,...); 71 signed __int128 __sync_fetch_and_or_16(volatile signed __int128 *, signed __int128,...);72 46 unsigned __int128 __sync_fetch_and_or(volatile unsigned __int128 *, unsigned __int128,...); 73 unsigned __int128 __sync_fetch_and_or_16(volatile unsigned __int128 *, unsigned __int128,...);74 47 #endif 75 48 76 49 char __sync_fetch_and_and(volatile char *, char,...); 77 char __sync_fetch_and_and_1(volatile char *, char,...);78 50 signed char __sync_fetch_and_and(volatile signed char *, signed char,...); 79 signed char __sync_fetch_and_and_1(volatile signed char *, signed char,...);80 51 unsigned char __sync_fetch_and_and(volatile unsigned char *, unsigned char,...); 81 unsigned char __sync_fetch_and_and_1(volatile unsigned char *, unsigned char,...);82 52 signed short __sync_fetch_and_and(volatile signed short *, signed short,...); 83 signed short __sync_fetch_and_and_2(volatile signed short *, signed short,...);84 53 unsigned short __sync_fetch_and_and(volatile unsigned short *, unsigned short,...); 85 unsigned short __sync_fetch_and_and_2(volatile unsigned short *, unsigned short,...);86 54 signed int __sync_fetch_and_and(volatile signed int *, signed int,...); 87 signed int __sync_fetch_and_and_4(volatile signed int *, signed int,...);88 55 unsigned int __sync_fetch_and_and(volatile unsigned int *, unsigned int,...); 89 unsigned int __sync_fetch_and_and_4(volatile unsigned int *, unsigned int,...); 56 signed long int __sync_fetch_and_and(volatile signed long int *, signed long int,...); 57 unsigned long int __sync_fetch_and_and(volatile unsigned long int *, unsigned long int,...); 90 58 signed long long int __sync_fetch_and_and(volatile signed long long int *, signed long long int,...); 91 signed long long int __sync_fetch_and_and_8(volatile signed long long int *, signed long long int,...);92 59 unsigned long long int __sync_fetch_and_and(volatile unsigned long long int *, unsigned long long int,...); 93 unsigned long long int __sync_fetch_and_and_8(volatile unsigned long long int *, unsigned long long int,...);94 60 #if defined(__SIZEOF_INT128__) 95 61 signed __int128 __sync_fetch_and_and(volatile signed __int128 *, signed __int128,...); 96 signed __int128 __sync_fetch_and_and_16(volatile signed __int128 *, signed __int128,...);97 62 unsigned __int128 __sync_fetch_and_and(volatile unsigned __int128 *, unsigned __int128,...); 98 unsigned __int128 __sync_fetch_and_and_16(volatile unsigned __int128 *, unsigned __int128,...);99 63 #endif 100 64 101 65 char __sync_fetch_and_xor(volatile char *, char,...); 102 char __sync_fetch_and_xor_1(volatile char *, char,...);103 66 signed char __sync_fetch_and_xor(volatile signed char *, signed char,...); 104 signed char __sync_fetch_and_xor_1(volatile signed char *, signed char,...);105 67 unsigned char __sync_fetch_and_xor(volatile unsigned char *, unsigned char,...); 106 unsigned char __sync_fetch_and_xor_1(volatile unsigned char *, unsigned char,...);107 68 signed short __sync_fetch_and_xor(volatile signed short *, signed short,...); 108 signed short __sync_fetch_and_xor_2(volatile signed short *, signed short,...);109 69 unsigned short __sync_fetch_and_xor(volatile unsigned short *, unsigned short,...); 110 unsigned short __sync_fetch_and_xor_2(volatile unsigned short *, unsigned short,...);111 70 signed int __sync_fetch_and_xor(volatile signed int *, signed int,...); 112 signed int __sync_fetch_and_xor_4(volatile signed int *, signed int,...);113 71 unsigned int __sync_fetch_and_xor(volatile unsigned int *, unsigned int,...); 114 unsigned int __sync_fetch_and_xor_4(volatile unsigned int *, unsigned int,...); 72 signed long int __sync_fetch_and_xor(volatile signed long int *, signed long int,...); 73 unsigned long int __sync_fetch_and_xor(volatile unsigned long int *, unsigned long int,...); 115 74 signed long long int __sync_fetch_and_xor(volatile signed long long int *, signed long long int,...); 116 signed long long int __sync_fetch_and_xor_8(volatile signed long long int *, signed long long int,...);117 75 unsigned long long int __sync_fetch_and_xor(volatile unsigned long long int *, unsigned long long int,...); 118 unsigned long long int __sync_fetch_and_xor_8(volatile unsigned long long int *, unsigned long long int,...);119 76 #if defined(__SIZEOF_INT128__) 120 77 signed __int128 __sync_fetch_and_xor(volatile signed __int128 *, signed __int128,...); 121 signed __int128 __sync_fetch_and_xor_16(volatile signed __int128 *, signed __int128,...);122 78 unsigned __int128 __sync_fetch_and_xor(volatile unsigned __int128 *, unsigned __int128,...); 123 unsigned __int128 __sync_fetch_and_xor_16(volatile unsigned __int128 *, unsigned __int128,...);124 79 #endif 125 80 126 81 char __sync_fetch_and_nand(volatile char *, char,...); 127 char __sync_fetch_and_nand_1(volatile char *, char,...);128 82 signed char __sync_fetch_and_nand(volatile signed char *, signed char,...); 129 signed char __sync_fetch_and_nand_1(volatile signed char *, signed char,...);130 83 unsigned char __sync_fetch_and_nand(volatile unsigned char *, unsigned char,...); 131 unsigned char __sync_fetch_and_nand_1(volatile unsigned char *, unsigned char,...);132 84 signed short __sync_fetch_and_nand(volatile signed short *, signed short,...); 133 signed short __sync_fetch_and_nand_2(volatile signed short *, signed short,...);134 85 unsigned short __sync_fetch_and_nand(volatile unsigned short *, unsigned short,...); 135 unsigned short __sync_fetch_and_nand_2(volatile unsigned short *, unsigned short,...);136 86 signed int __sync_fetch_and_nand(volatile signed int *, signed int,...); 137 signed int __sync_fetch_and_nand_4(volatile signed int *, signed int,...);138 87 unsigned int __sync_fetch_and_nand(volatile unsigned int *, unsigned int,...); 139 unsigned int __sync_fetch_and_nand_4(volatile unsigned int *, unsigned int,...); 88 signed long int __sync_fetch_and_nand(volatile signed long int *, signed long int,...); 89 unsigned long int __sync_fetch_and_nand(volatile unsigned long int *, unsigned long int,...); 140 90 signed long long int __sync_fetch_and_nand(volatile signed long long int *, signed long long int,...); 141 signed long long int __sync_fetch_and_nand_8(volatile signed long long int *, signed long long int,...);142 91 unsigned long long int __sync_fetch_and_nand(volatile unsigned long long int *, unsigned long long int,...); 143 unsigned long long int __sync_fetch_and_nand_8(volatile unsigned long long int *, unsigned long long int,...);144 92 #if defined(__SIZEOF_INT128__) 145 93 signed __int128 __sync_fetch_and_nand(volatile signed __int128 *, signed __int128,...); 146 signed __int128 __sync_fetch_and_nand_16(volatile signed __int128 *, signed __int128,...);147 94 unsigned __int128 __sync_fetch_and_nand(volatile unsigned __int128 *, unsigned __int128,...); 148 unsigned __int128 __sync_fetch_and_nand_16(volatile unsigned __int128 *, unsigned __int128,...);149 95 #endif 150 96 151 97 char __sync_add_and_fetch(volatile char *, char,...); 152 char __sync_add_and_fetch_1(volatile char *, char,...);153 98 signed char __sync_add_and_fetch(volatile signed char *, signed char,...); 154 signed char __sync_add_and_fetch_1(volatile signed char *, signed char,...);155 99 unsigned char __sync_add_and_fetch(volatile unsigned char *, unsigned char,...); 156 unsigned char __sync_add_and_fetch_1(volatile unsigned char *, unsigned char,...);157 100 signed short __sync_add_and_fetch(volatile signed short *, signed short,...); 158 signed short __sync_add_and_fetch_2(volatile signed short *, signed short,...);159 101 unsigned short __sync_add_and_fetch(volatile unsigned short *, unsigned short,...); 160 unsigned short __sync_add_and_fetch_2(volatile unsigned short *, unsigned short,...);161 102 signed int __sync_add_and_fetch(volatile signed int *, signed int,...); 162 signed int __sync_add_and_fetch_4(volatile signed int *, signed int,...);163 103 signed int __sync_add_and_fetch(volatile signed int *, signed int,...); 164 signed int __sync_add_and_fetch_4(volatile signed int *, signed int,...); 104 signed long int __sync_add_and_fetch(volatile signed long int *, signed long int,...); 105 unsigned long int __sync_add_and_fetch(volatile unsigned long int *, unsigned long int,...); 165 106 signed long long int __sync_add_and_fetch(volatile signed long long int *, signed long long int,...); 166 signed long long int __sync_add_and_fetch_8(volatile signed long long int *, signed long long int,...);167 107 unsigned long long int __sync_add_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 168 unsigned long long int __sync_add_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);169 108 #if defined(__SIZEOF_INT128__) 170 109 signed __int128 __sync_add_and_fetch(volatile signed __int128 *, signed __int128,...); 171 signed __int128 __sync_add_and_fetch_16(volatile signed __int128 *, signed __int128,...);172 110 unsigned __int128 __sync_add_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 173 unsigned __int128 __sync_add_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);174 111 #endif 175 112 176 113 char __sync_sub_and_fetch(volatile char *, char,...); 177 char __sync_sub_and_fetch_1(volatile char *, char,...);178 114 signed char __sync_sub_and_fetch(volatile signed char *, signed char,...); 179 signed char __sync_sub_and_fetch_1(volatile signed char *, signed char,...);180 115 unsigned char __sync_sub_and_fetch(volatile unsigned char *, unsigned char,...); 181 unsigned char __sync_sub_and_fetch_1(volatile unsigned char *, unsigned char,...);182 116 signed short __sync_sub_and_fetch(volatile signed short *, signed short,...); 183 signed short __sync_sub_and_fetch_2(volatile signed short *, signed short,...);184 117 unsigned short __sync_sub_and_fetch(volatile unsigned short *, unsigned short,...); 185 unsigned short __sync_sub_and_fetch_2(volatile unsigned short *, unsigned short,...);186 118 signed int __sync_sub_and_fetch(volatile signed int *, signed int,...); 187 signed int __sync_sub_and_fetch_4(volatile signed int *, signed int,...);188 119 unsigned int __sync_sub_and_fetch(volatile unsigned int *, unsigned int,...); 189 unsigned int __sync_sub_and_fetch_4(volatile unsigned int *, unsigned int,...); 120 signed long int __sync_sub_and_fetch(volatile signed long int *, signed long int,...); 121 unsigned long int __sync_sub_and_fetch(volatile unsigned long int *, unsigned long int,...); 190 122 signed long long int __sync_sub_and_fetch(volatile signed long long int *, signed long long int,...); 191 signed long long int __sync_sub_and_fetch_8(volatile signed long long int *, signed long long int,...);192 123 unsigned long long int __sync_sub_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 193 unsigned long long int __sync_sub_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);194 124 #if defined(__SIZEOF_INT128__) 195 125 signed __int128 __sync_sub_and_fetch(volatile signed __int128 *, signed __int128,...); 196 signed __int128 __sync_sub_and_fetch_16(volatile signed __int128 *, signed __int128,...);197 126 unsigned __int128 __sync_sub_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 198 unsigned __int128 __sync_sub_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);199 127 #endif 200 128 201 129 char __sync_or_and_fetch(volatile char *, char,...); 202 char __sync_or_and_fetch_1(volatile char *, char,...);203 130 signed char __sync_or_and_fetch(volatile signed char *, signed char,...); 204 signed char __sync_or_and_fetch_1(volatile signed char *, signed char,...);205 131 unsigned char __sync_or_and_fetch(volatile unsigned char *, unsigned char,...); 206 unsigned char __sync_or_and_fetch_1(volatile unsigned char *, unsigned char,...);207 132 signed short __sync_or_and_fetch(volatile signed short *, signed short,...); 208 signed short __sync_or_and_fetch_2(volatile signed short *, signed short,...);209 133 unsigned short __sync_or_and_fetch(volatile unsigned short *, unsigned short,...); 210 unsigned short __sync_or_and_fetch_2(volatile unsigned short *, unsigned short,...);211 134 signed int __sync_or_and_fetch(volatile signed int *, signed int,...); 212 signed int __sync_or_and_fetch_4(volatile signed int *, signed int,...);213 135 unsigned int __sync_or_and_fetch(volatile unsigned int *, unsigned int,...); 214 unsigned int __sync_or_and_fetch_4(volatile unsigned int *, unsigned int,...); 136 signed long int __sync_or_and_fetch(volatile signed long int *, signed long int,...); 137 unsigned long int __sync_or_and_fetch(volatile unsigned long int *, unsigned long int,...); 215 138 signed long long int __sync_or_and_fetch(volatile signed long long int *, signed long long int,...); 216 signed long long int __sync_or_and_fetch_8(volatile signed long long int *, signed long long int,...);217 139 unsigned long long int __sync_or_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 218 unsigned long long int __sync_or_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);219 140 #if defined(__SIZEOF_INT128__) 220 141 signed __int128 __sync_or_and_fetch(volatile signed __int128 *, signed __int128,...); 221 signed __int128 __sync_or_and_fetch_16(volatile signed __int128 *, signed __int128,...);222 142 unsigned __int128 __sync_or_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 223 unsigned __int128 __sync_or_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);224 143 #endif 225 144 226 145 char __sync_and_and_fetch(volatile char *, char,...); 227 char __sync_and_and_fetch_1(volatile char *, char,...);228 146 signed char __sync_and_and_fetch(volatile signed char *, signed char,...); 229 signed char __sync_and_and_fetch_1(volatile signed char *, signed char,...);230 147 unsigned char __sync_and_and_fetch(volatile unsigned char *, unsigned char,...); 231 unsigned char __sync_and_and_fetch_1(volatile unsigned char *, unsigned char,...);232 148 signed short __sync_and_and_fetch(volatile signed short *, signed short,...); 233 signed short __sync_and_and_fetch_2(volatile signed short *, signed short,...);234 149 unsigned short __sync_and_and_fetch(volatile unsigned short *, unsigned short,...); 235 unsigned short __sync_and_and_fetch_2(volatile unsigned short *, unsigned short,...);236 150 signed int __sync_and_and_fetch(volatile signed int *, signed int,...); 237 signed int __sync_and_and_fetch_4(volatile signed int *, signed int,...);238 151 unsigned int __sync_and_and_fetch(volatile unsigned int *, unsigned int,...); 239 unsigned int __sync_and_and_fetch_4(volatile unsigned int *, unsigned int,...); 152 signed long int __sync_and_and_fetch(volatile signed long int *, signed long int,...); 153 unsigned long int __sync_and_and_fetch(volatile unsigned long int *, unsigned long int,...); 240 154 signed long long int __sync_and_and_fetch(volatile signed long long int *, signed long long int,...); 241 signed long long int __sync_and_and_fetch_8(volatile signed long long int *, signed long long int,...);242 155 unsigned long long int __sync_and_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 243 unsigned long long int __sync_and_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);244 156 #if defined(__SIZEOF_INT128__) 245 157 signed __int128 __sync_and_and_fetch(volatile signed __int128 *, signed __int128,...); 246 signed __int128 __sync_and_and_fetch_16(volatile signed __int128 *, signed __int128,...);247 158 unsigned __int128 __sync_and_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 248 unsigned __int128 __sync_and_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);249 159 #endif 250 160 251 161 char __sync_xor_and_fetch(volatile char *, char,...); 252 char __sync_xor_and_fetch_1(volatile char *, char,...);253 162 signed char __sync_xor_and_fetch(volatile signed char *, signed char,...); 254 signed char __sync_xor_and_fetch_1(volatile signed char *, signed char,...);255 163 unsigned char __sync_xor_and_fetch(volatile unsigned char *, unsigned char,...); 256 unsigned char __sync_xor_and_fetch_1(volatile unsigned char *, unsigned char,...);257 164 signed short __sync_xor_and_fetch(volatile signed short *, signed short,...); 258 signed short __sync_xor_and_fetch_2(volatile signed short *, signed short,...);259 165 unsigned short __sync_xor_and_fetch(volatile unsigned short *, unsigned short,...); 260 unsigned short __sync_xor_and_fetch_2(volatile unsigned short *, unsigned short,...);261 166 signed int __sync_xor_and_fetch(volatile signed int *, signed int,...); 262 signed int __sync_xor_and_fetch_4(volatile signed int *, signed int,...);263 167 unsigned int __sync_xor_and_fetch(volatile unsigned int *, unsigned int,...); 264 unsigned int __sync_xor_and_fetch_4(volatile unsigned int *, unsigned int,...); 168 signed long int __sync_xor_and_fetch(volatile signed long int *, signed long int,...); 169 unsigned long int __sync_xor_and_fetch(volatile unsigned long int *, unsigned long int,...); 265 170 signed long long int __sync_xor_and_fetch(volatile signed long long int *, signed long long int,...); 266 signed long long int __sync_xor_and_fetch_8(volatile signed long long int *, signed long long int,...);267 171 unsigned long long int __sync_xor_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 268 unsigned long long int __sync_xor_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);269 172 #if defined(__SIZEOF_INT128__) 270 173 signed __int128 __sync_xor_and_fetch(volatile signed __int128 *, signed __int128,...); 271 signed __int128 __sync_xor_and_fetch_16(volatile signed __int128 *, signed __int128,...);272 174 unsigned __int128 __sync_xor_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 273 unsigned __int128 __sync_xor_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);274 175 #endif 275 176 276 177 char __sync_nand_and_fetch(volatile char *, char,...); 277 char __sync_nand_and_fetch_1(volatile char *, char,...);278 178 signed char __sync_nand_and_fetch(volatile signed char *, signed char,...); 279 signed char __sync_nand_and_fetch_1(volatile signed char *, signed char,...);280 179 unsigned char __sync_nand_and_fetch(volatile unsigned char *, unsigned char,...); 281 unsigned char __sync_nand_and_fetch_1(volatile unsigned char *, unsigned char,...);282 180 signed short __sync_nand_and_fetch(volatile signed short *, signed short,...); 283 signed short __sync_nand_and_fetch_2(volatile signed short *, signed short,...);284 181 unsigned short __sync_nand_and_fetch(volatile unsigned short *, unsigned short,...); 285 unsigned short __sync_nand_and_fetch_2(volatile unsigned short *, unsigned short,...);286 182 signed int __sync_nand_and_fetch(volatile signed int *, signed int,...); 287 signed int __sync_nand_and_fetch_4(volatile signed int *, signed int,...);288 183 unsigned int __sync_nand_and_fetch(volatile unsigned int *, unsigned int,...); 289 unsigned int __sync_nand_and_fetch_4(volatile unsigned int *, unsigned int,...); 184 signed long int __sync_nand_and_fetch(volatile signed long int *, signed long int,...); 185 unsigned long int __sync_nand_and_fetch(volatile unsigned long int *, unsigned long int,...); 290 186 signed long long int __sync_nand_and_fetch(volatile signed long long int *, signed long long int,...); 291 signed long long int __sync_nand_and_fetch_8(volatile signed long long int *, signed long long int,...);292 187 unsigned long long int __sync_nand_and_fetch(volatile unsigned long long int *, unsigned long long int,...); 293 unsigned long long int __sync_nand_and_fetch_8(volatile unsigned long long int *, unsigned long long int,...);294 188 #if defined(__SIZEOF_INT128__) 295 189 signed __int128 __sync_nand_and_fetch(volatile signed __int128 *, signed __int128,...); 296 signed __int128 __sync_nand_and_fetch_16(volatile signed __int128 *, signed __int128,...);297 190 unsigned __int128 __sync_nand_and_fetch(volatile unsigned __int128 *, unsigned __int128,...); 298 unsigned __int128 __sync_nand_and_fetch_16(volatile unsigned __int128 *, unsigned __int128,...);299 191 #endif 300 192 301 193 _Bool __sync_bool_compare_and_swap(volatile char *, char, char,...); 302 _Bool __sync_bool_compare_and_swap_1(volatile char *, char, char,...);303 194 _Bool __sync_bool_compare_and_swap(volatile signed char *, signed char, signed char,...); 304 _Bool __sync_bool_compare_and_swap_1(volatile signed char *, signed char, signed char,...);305 195 _Bool __sync_bool_compare_and_swap(volatile unsigned char *, unsigned char, unsigned char,...); 306 _Bool __sync_bool_compare_and_swap_1(volatile unsigned char *, unsigned char, unsigned char,...);307 196 _Bool __sync_bool_compare_and_swap(volatile short *, signed short, signed short,...); 308 _Bool __sync_bool_compare_and_swap_2(volatile short *, signed short, signed short,...);309 197 _Bool __sync_bool_compare_and_swap(volatile short *, unsigned short, unsigned short,...); 310 _Bool __sync_bool_compare_and_swap_2(volatile short *, unsigned short, unsigned short,...);311 198 _Bool __sync_bool_compare_and_swap(volatile signed int *, signed int, signed int,...); 312 _Bool __sync_bool_compare_and_swap_4(volatile signed int *, signed int, signed int,...);313 199 _Bool __sync_bool_compare_and_swap(volatile unsigned int *, unsigned int, unsigned int,...); 314 _Bool __sync_bool_compare_and_swap_4(volatile unsigned int *, unsigned int, unsigned int,...); 200 _Bool __sync_bool_compare_and_swap(volatile signed long int *, signed long int, signed long int,...); 201 _Bool __sync_bool_compare_and_swap(volatile unsigned long int *, unsigned long int, unsigned long int,...); 315 202 _Bool __sync_bool_compare_and_swap(volatile signed long long int *, signed long long int, signed long long int,...); 316 _Bool __sync_bool_compare_and_swap_8(volatile signed long long int *, signed long long int, signed long long int,...);317 203 _Bool __sync_bool_compare_and_swap(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...); 318 _Bool __sync_bool_compare_and_swap_8(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...);319 204 #if defined(__SIZEOF_INT128__) 320 205 _Bool __sync_bool_compare_and_swap(volatile signed __int128 *, signed __int128, signed __int128,...); 321 _Bool __sync_bool_compare_and_swap_16(volatile signed __int128 *, signed __int128, signed __int128,...);322 206 _Bool __sync_bool_compare_and_swap(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...); 323 _Bool __sync_bool_compare_and_swap_16(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...);324 207 #endif 325 208 forall(dtype T) _Bool __sync_bool_compare_and_swap(T * volatile *, T *, T*, ...); 326 209 327 210 char __sync_val_compare_and_swap(volatile char *, char, char,...); 328 char __sync_val_compare_and_swap_1(volatile char *, char, char,...);329 211 signed char __sync_val_compare_and_swap(volatile signed char *, signed char, signed char,...); 330 signed char __sync_val_compare_and_swap_1(volatile signed char *, signed char, signed char,...);331 212 unsigned char __sync_val_compare_and_swap(volatile unsigned char *, unsigned char, unsigned char,...); 332 unsigned char __sync_val_compare_and_swap_1(volatile unsigned char *, unsigned char, unsigned char,...);333 213 signed short __sync_val_compare_and_swap(volatile signed short *, signed short, signed short,...); 334 signed short __sync_val_compare_and_swap_2(volatile signed short *, signed short, signed short,...);335 214 unsigned short __sync_val_compare_and_swap(volatile unsigned short *, unsigned short, unsigned short,...); 336 unsigned short __sync_val_compare_and_swap_2(volatile unsigned short *, unsigned short, unsigned short,...);337 215 signed int __sync_val_compare_and_swap(volatile signed int *, signed int, signed int,...); 338 signed int __sync_val_compare_and_swap_4(volatile signed int *, signed int, signed int,...);339 216 unsigned int __sync_val_compare_and_swap(volatile unsigned int *, unsigned int, unsigned int,...); 340 unsigned int __sync_val_compare_and_swap_4(volatile unsigned int *, unsigned int, unsigned int,...); 217 signed long int __sync_val_compare_and_swap(volatile signed long int *, signed long int, signed long int,...); 218 unsigned long int __sync_val_compare_and_swap(volatile unsigned long int *, unsigned long int, unsigned long int,...); 341 219 signed long long int __sync_val_compare_and_swap(volatile signed long long int *, signed long long int, signed long long int,...); 342 signed long long int __sync_val_compare_and_swap_8(volatile signed long long int *, signed long long int, signed long long int,...);343 220 unsigned long long int __sync_val_compare_and_swap(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...); 344 unsigned long long int __sync_val_compare_and_swap_8(volatile unsigned long long int *, unsigned long long int, unsigned long long int,...);345 221 #if defined(__SIZEOF_INT128__) 346 222 signed __int128 __sync_val_compare_and_swap(volatile signed __int128 *, signed __int128, signed __int128,...); 347 signed __int128 __sync_val_compare_and_swap_16(volatile signed __int128 *, signed __int128, signed __int128,...);348 223 unsigned __int128 __sync_val_compare_and_swap(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...); 349 unsigned __int128 __sync_val_compare_and_swap_16(volatile unsigned __int128 *, unsigned __int128, unsigned __int128,...);350 224 #endif 351 225 forall(dtype T) T * __sync_val_compare_and_swap(T * volatile *, T *, T*,...); 352 226 353 227 char __sync_lock_test_and_set(volatile char *, char,...); 354 char __sync_lock_test_and_set_1(volatile char *, char,...);355 228 signed char __sync_lock_test_and_set(volatile signed char *, signed char,...); 356 signed char __sync_lock_test_and_set_1(volatile signed char *, signed char,...);357 229 unsigned char __sync_lock_test_and_set(volatile unsigned char *, unsigned char,...); 358 unsigned char __sync_lock_test_and_set_1(volatile unsigned char *, unsigned char,...);359 230 signed short __sync_lock_test_and_set(volatile signed short *, signed short,...); 360 signed short __sync_lock_test_and_set_2(volatile signed short *, signed short,...);361 231 unsigned short __sync_lock_test_and_set(volatile unsigned short *, unsigned short,...); 362 unsigned short __sync_lock_test_and_set_2(volatile unsigned short *, unsigned short,...);363 232 signed int __sync_lock_test_and_set(volatile signed int *, signed int,...); 364 signed int __sync_lock_test_and_set_4(volatile signed int *, signed int,...);365 233 unsigned int __sync_lock_test_and_set(volatile unsigned int *, unsigned int,...); 366 unsigned int __sync_lock_test_and_set_4(volatile unsigned int *, unsigned int,...); 234 signed long int __sync_lock_test_and_set(volatile signed long int *, signed long int,...); 235 unsigned long int __sync_lock_test_and_set(volatile unsigned long int *, unsigned long int,...); 367 236 signed long long int __sync_lock_test_and_set(volatile signed long long int *, signed long long int,...); 368 signed long long int __sync_lock_test_and_set_8(volatile signed long long int *, signed long long int,...);369 237 unsigned long long int __sync_lock_test_and_set(volatile unsigned long long int *, unsigned long long int,...); 370 unsigned long long int __sync_lock_test_and_set_8(volatile unsigned long long int *, unsigned long long int,...);371 238 #if defined(__SIZEOF_INT128__) 372 239 signed __int128 __sync_lock_test_and_set(volatile signed __int128 *, signed __int128,...); 373 signed __int128 __sync_lock_test_and_set_16(volatile signed __int128 *, signed __int128,...);374 240 unsigned __int128 __sync_lock_test_and_set(volatile unsigned __int128 *, unsigned __int128,...); 375 unsigned __int128 __sync_lock_test_and_set_16(volatile unsigned __int128 *, unsigned __int128,...);376 241 #endif 377 242 378 243 void __sync_lock_release(volatile char *,...); 379 void __sync_lock_release_1(volatile char *,...);380 244 void __sync_lock_release(volatile signed char *,...); 381 void __sync_lock_release_1(volatile signed char *,...);382 245 void __sync_lock_release(volatile unsigned char *,...); 383 void __sync_lock_release_1(volatile unsigned char *,...);384 246 void __sync_lock_release(volatile signed short *,...); 385 void __sync_lock_release_2(volatile signed short *,...);386 247 void __sync_lock_release(volatile unsigned short *,...); 387 void __sync_lock_release_2(volatile unsigned short *,...);388 248 void __sync_lock_release(volatile signed int *,...); 389 void __sync_lock_release_4(volatile signed int *,...);390 249 void __sync_lock_release(volatile unsigned int *,...); 391 void __sync_lock_release_4(volatile unsigned int *,...); 250 void __sync_lock_release(volatile signed long int *,...); 251 void __sync_lock_release(volatile unsigned long int *,...); 392 252 void __sync_lock_release(volatile signed long long int *,...); 393 void __sync_lock_release_8(volatile signed long long int *,...);394 253 void __sync_lock_release(volatile unsigned long long int *,...); 395 void __sync_lock_release_8(volatile unsigned long long int *,...);396 254 #if defined(__SIZEOF_INT128__) 397 255 void __sync_lock_release(volatile signed __int128 *,...); 398 void __sync_lock_release_16(volatile signed __int128 *,...);399 256 void __sync_lock_release(volatile unsigned __int128 *,...); 400 void __sync_lock_release_16(volatile unsigned __int128 *,...);401 257 #endif 402 258 … … 414 270 _Bool __atomic_test_and_set(volatile signed int *, int); 415 271 _Bool __atomic_test_and_set(volatile unsigned int *, int); 272 _Bool __atomic_test_and_set(volatile signed long int *, int); 273 _Bool __atomic_test_and_set(volatile unsigned long int *, int); 416 274 _Bool __atomic_test_and_set(volatile signed long long int *, int); 417 275 _Bool __atomic_test_and_set(volatile unsigned long long int *, int); … … 429 287 void __atomic_clear(volatile signed int *, int); 430 288 void __atomic_clear(volatile unsigned int *, int); 289 void __atomic_clear(volatile signed long int *, int); 290 void __atomic_clear(volatile unsigned long int *, int); 431 291 void __atomic_clear(volatile signed long long int *, int); 432 292 void __atomic_clear(volatile unsigned long long int *, int); … … 436 296 #endif 437 297 298 _Bool __atomic_exchange_n(volatile _Bool *, _Bool, int); 299 void __atomic_exchange(volatile _Bool *, volatile _Bool *, volatile _Bool *, int); 438 300 char __atomic_exchange_n(volatile char *, char, int); 439 char __atomic_exchange_1(volatile char *, char, int);440 301 void __atomic_exchange(volatile char *, volatile char *, volatile char *, int); 441 302 signed char __atomic_exchange_n(volatile signed char *, signed char, int); 442 signed char __atomic_exchange_1(volatile signed char *, signed char, int);443 303 void __atomic_exchange(volatile signed char *, volatile signed char *, volatile signed char *, int); 444 304 unsigned char __atomic_exchange_n(volatile unsigned char *, unsigned char, int); 445 unsigned char __atomic_exchange_1(volatile unsigned char *, unsigned char, int);446 305 void __atomic_exchange(volatile unsigned char *, volatile unsigned char *, volatile unsigned char *, int); 447 306 signed short __atomic_exchange_n(volatile signed short *, signed short, int); 448 signed short __atomic_exchange_2(volatile signed short *, signed short, int);449 307 void __atomic_exchange(volatile signed short *, volatile signed short *, volatile signed short *, int); 450 308 unsigned short __atomic_exchange_n(volatile unsigned short *, unsigned short, int); 451 unsigned short __atomic_exchange_2(volatile unsigned short *, unsigned short, int);452 309 void __atomic_exchange(volatile unsigned short *, volatile unsigned short *, volatile unsigned short *, int); 453 310 signed int __atomic_exchange_n(volatile signed int *, signed int, int); 454 signed int __atomic_exchange_4(volatile signed int *, signed int, int);455 311 void __atomic_exchange(volatile signed int *, volatile signed int *, volatile signed int *, int); 456 312 unsigned int __atomic_exchange_n(volatile unsigned int *, unsigned int, int); 457 unsigned int __atomic_exchange_4(volatile unsigned int *, unsigned int, int);458 313 void __atomic_exchange(volatile unsigned int *, volatile unsigned int *, volatile unsigned int *, int); 314 signed long int __atomic_exchange_n(volatile signed long int *, signed long int, int); 315 void __atomic_exchange(volatile signed long int *, volatile signed long int *, volatile signed long int *, int); 316 unsigned long int __atomic_exchange_n(volatile unsigned long int *, unsigned long int, int); 317 void __atomic_exchange(volatile unsigned long int *, volatile unsigned long int *, volatile unsigned long int *, int); 459 318 signed long long int __atomic_exchange_n(volatile signed long long int *, signed long long int, int); 460 signed long long int __atomic_exchange_8(volatile signed long long int *, signed long long int, int);461 319 void __atomic_exchange(volatile signed long long int *, volatile signed long long int *, volatile signed long long int *, int); 462 320 unsigned long long int __atomic_exchange_n(volatile unsigned long long int *, unsigned long long int, int); 463 unsigned long long int __atomic_exchange_8(volatile unsigned long long int *, unsigned long long int, int);464 321 void __atomic_exchange(volatile unsigned long long int *, volatile unsigned long long int *, volatile unsigned long long int *, int); 465 322 #if defined(__SIZEOF_INT128__) 466 323 signed __int128 __atomic_exchange_n(volatile signed __int128 *, signed __int128, int); 467 signed __int128 __atomic_exchange_16(volatile signed __int128 *, signed __int128, int);468 324 void __atomic_exchange(volatile signed __int128 *, volatile signed __int128 *, volatile signed __int128 *, int); 469 325 unsigned __int128 __atomic_exchange_n(volatile unsigned __int128 *, unsigned __int128, int); 470 unsigned __int128 __atomic_exchange_16(volatile unsigned __int128 *, unsigned __int128, int);471 326 void __atomic_exchange(volatile unsigned __int128 *, volatile unsigned __int128 *, volatile unsigned __int128 *, int); 472 327 #endif … … 477 332 void __atomic_load(const volatile _Bool *, volatile _Bool *, int); 478 333 char __atomic_load_n(const volatile char *, int); 479 char __atomic_load_1(const volatile char *, int);480 334 void __atomic_load(const volatile char *, volatile char *, int); 481 335 signed char __atomic_load_n(const volatile signed char *, int); 482 signed char __atomic_load_1(const volatile signed char *, int);483 336 void __atomic_load(const volatile signed char *, volatile signed char *, int); 484 337 unsigned char __atomic_load_n(const volatile unsigned char *, int); 485 unsigned char __atomic_load_1(const volatile unsigned char *, int);486 338 void __atomic_load(const volatile unsigned char *, volatile unsigned char *, int); 487 339 signed short __atomic_load_n(const volatile signed short *, int); 488 signed short __atomic_load_2(const volatile signed short *, int);489 340 void __atomic_load(const volatile signed short *, volatile signed short *, int); 490 341 unsigned short __atomic_load_n(const volatile unsigned short *, int); 491 unsigned short __atomic_load_2(const volatile unsigned short *, int);492 342 void __atomic_load(const volatile unsigned short *, volatile unsigned short *, int); 493 343 signed int __atomic_load_n(const volatile signed int *, int); 494 signed int __atomic_load_4(const volatile signed int *, int);495 344 void __atomic_load(const volatile signed int *, volatile signed int *, int); 496 345 unsigned int __atomic_load_n(const volatile unsigned int *, int); 497 unsigned int __atomic_load_4(const volatile unsigned int *, int);498 346 void __atomic_load(const volatile unsigned int *, volatile unsigned int *, int); 347 signed long int __atomic_load_n(const volatile signed long int *, int); 348 void __atomic_load(const volatile signed long int *, volatile signed long int *, int); 349 unsigned long int __atomic_load_n(const volatile unsigned long int *, int); 350 void __atomic_load(const volatile unsigned long int *, volatile unsigned long int *, int); 499 351 signed long long int __atomic_load_n(const volatile signed long long int *, int); 500 signed long long int __atomic_load_8(const volatile signed long long int *, int);501 352 void __atomic_load(const volatile signed long long int *, volatile signed long long int *, int); 502 353 unsigned long long int __atomic_load_n(const volatile unsigned long long int *, int); 503 unsigned long long int __atomic_load_8(const volatile unsigned long long int *, int);504 354 void __atomic_load(const volatile unsigned long long int *, volatile unsigned long long int *, int); 505 355 #if defined(__SIZEOF_INT128__) 506 356 signed __int128 __atomic_load_n(const volatile signed __int128 *, int); 507 signed __int128 __atomic_load_16(const volatile signed __int128 *, int);508 357 void __atomic_load(const volatile signed __int128 *, volatile signed __int128 *, int); 509 358 unsigned __int128 __atomic_load_n(const volatile unsigned __int128 *, int); 510 unsigned __int128 __atomic_load_16(const volatile unsigned __int128 *, int);511 359 void __atomic_load(const volatile unsigned __int128 *, volatile unsigned __int128 *, int); 512 360 #endif … … 515 363 516 364 _Bool __atomic_compare_exchange_n(volatile char *, char *, char, _Bool, int, int); 517 _Bool __atomic_compare_exchange_1(volatile char *, char *, char, _Bool, int, int);518 365 _Bool __atomic_compare_exchange (volatile char *, char *, char *, _Bool, int, int); 519 366 _Bool __atomic_compare_exchange_n(volatile signed char *, signed char *, signed char, _Bool, int, int); 520 _Bool __atomic_compare_exchange_1(volatile signed char *, signed char *, signed char, _Bool, int, int);521 367 _Bool __atomic_compare_exchange (volatile signed char *, signed char *, signed char *, _Bool, int, int); 522 368 _Bool __atomic_compare_exchange_n(volatile unsigned char *, unsigned char *, unsigned char, _Bool, int, int); 523 _Bool __atomic_compare_exchange_1(volatile unsigned char *, unsigned char *, unsigned char, _Bool, int, int);524 369 _Bool __atomic_compare_exchange (volatile unsigned char *, unsigned char *, unsigned char *, _Bool, int, int); 525 370 _Bool __atomic_compare_exchange_n(volatile signed short *, signed short *, signed short, _Bool, int, int); 526 _Bool __atomic_compare_exchange_2(volatile signed short *, signed short *, signed short, _Bool, int, int);527 371 _Bool __atomic_compare_exchange (volatile signed short *, signed short *, signed short *, _Bool, int, int); 528 372 _Bool __atomic_compare_exchange_n(volatile unsigned short *, unsigned short *, unsigned short, _Bool, int, int); 529 _Bool __atomic_compare_exchange_2(volatile unsigned short *, unsigned short *, unsigned short, _Bool, int, int);530 373 _Bool __atomic_compare_exchange (volatile unsigned short *, unsigned short *, unsigned short *, _Bool, int, int); 531 374 _Bool __atomic_compare_exchange_n(volatile signed int *, signed int *, signed int, _Bool, int, int); 532 _Bool __atomic_compare_exchange_4(volatile signed int *, signed int *, signed int, _Bool, int, int);533 375 _Bool __atomic_compare_exchange (volatile signed int *, signed int *, signed int *, _Bool, int, int); 534 376 _Bool __atomic_compare_exchange_n(volatile unsigned int *, unsigned int *, unsigned int, _Bool, int, int); 535 _Bool __atomic_compare_exchange_4(volatile unsigned int *, unsigned int *, unsigned int, _Bool, int, int);536 377 _Bool __atomic_compare_exchange (volatile unsigned int *, unsigned int *, unsigned int *, _Bool, int, int); 378 _Bool __atomic_compare_exchange_n(volatile signed long int *, signed long int *, signed long int, _Bool, int, int); 379 _Bool __atomic_compare_exchange (volatile signed long int *, signed long int *, signed long int *, _Bool, int, int); 380 _Bool __atomic_compare_exchange_n(volatile unsigned long int *, unsigned long int *, unsigned long int, _Bool, int, int); 381 _Bool __atomic_compare_exchange (volatile unsigned long int *, unsigned long int *, unsigned long int *, _Bool, int, int); 537 382 _Bool __atomic_compare_exchange_n(volatile signed long long int *, signed long long int *, signed long long int, _Bool, int, int); 538 _Bool __atomic_compare_exchange_8(volatile signed long long int *, signed long long int *, signed long long int, _Bool, int, int);539 383 _Bool __atomic_compare_exchange (volatile signed long long int *, signed long long int *, signed long long int *, _Bool, int, int); 540 384 _Bool __atomic_compare_exchange_n(volatile unsigned long long int *, unsigned long long int *, unsigned long long int, _Bool, int, int); 541 _Bool __atomic_compare_exchange_8(volatile unsigned long long int *, unsigned long long int *, unsigned long long int, _Bool, int, int);542 385 _Bool __atomic_compare_exchange (volatile unsigned long long int *, unsigned long long int *, unsigned long long int *, _Bool, int, int); 543 386 #if defined(__SIZEOF_INT128__) 544 387 _Bool __atomic_compare_exchange_n (volatile signed __int128 *, signed __int128 *, signed __int128, _Bool, int, int); 545 _Bool __atomic_compare_exchange_16(volatile signed __int128 *, signed __int128 *, signed __int128, _Bool, int, int);546 388 _Bool __atomic_compare_exchange (volatile signed __int128 *, signed __int128 *, signed __int128 *, _Bool, int, int); 547 389 _Bool __atomic_compare_exchange_n (volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128, _Bool, int, int); 548 _Bool __atomic_compare_exchange_16(volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128, _Bool, int, int);549 390 _Bool __atomic_compare_exchange (volatile unsigned __int128 *, unsigned __int128 *, unsigned __int128 *, _Bool, int, int); 550 391 #endif … … 555 396 void __atomic_store(volatile _Bool *, _Bool *, int); 556 397 void __atomic_store_n(volatile char *, char, int); 557 void __atomic_store_1(volatile char *, char, int);558 398 void __atomic_store(volatile char *, char *, int); 559 399 void __atomic_store_n(volatile signed char *, signed char, int); 560 void __atomic_store_1(volatile signed char *, signed char, int);561 400 void __atomic_store(volatile signed char *, signed char *, int); 562 401 void __atomic_store_n(volatile unsigned char *, unsigned char, int); 563 void __atomic_store_1(volatile unsigned char *, unsigned char, int);564 402 void __atomic_store(volatile unsigned char *, unsigned char *, int); 565 403 void __atomic_store_n(volatile signed short *, signed short, int); 566 void __atomic_store_2(volatile signed short *, signed short, int);567 404 void __atomic_store(volatile signed short *, signed short *, int); 568 405 void __atomic_store_n(volatile unsigned short *, unsigned short, int); 569 void __atomic_store_2(volatile unsigned short *, unsigned short, int);570 406 void __atomic_store(volatile unsigned short *, unsigned short *, int); 571 407 void __atomic_store_n(volatile signed int *, signed int, int); 572 void __atomic_store_4(volatile signed int *, signed int, int);573 408 void __atomic_store(volatile signed int *, signed int *, int); 574 409 void __atomic_store_n(volatile unsigned int *, unsigned int, int); 575 void __atomic_store_4(volatile unsigned int *, unsigned int, int);576 410 void __atomic_store(volatile unsigned int *, unsigned int *, int); 411 void __atomic_store_n(volatile signed long int *, signed long int, int); 412 void __atomic_store(volatile signed long int *, signed long int *, int); 413 void __atomic_store_n(volatile unsigned long int *, unsigned long int, int); 414 void __atomic_store(volatile unsigned long int *, unsigned long int *, int); 577 415 void __atomic_store_n(volatile signed long long int *, signed long long int, int); 578 void __atomic_store_8(volatile signed long long int *, signed long long int, int);579 416 void __atomic_store(volatile signed long long int *, signed long long int *, int); 580 417 void __atomic_store_n(volatile unsigned long long int *, unsigned long long int, int); 581 void __atomic_store_8(volatile unsigned long long int *, unsigned long long int, int);582 418 void __atomic_store(volatile unsigned long long int *, unsigned long long int *, int); 583 419 #if defined(__SIZEOF_INT128__) 584 420 void __atomic_store_n(volatile signed __int128 *, signed __int128, int); 585 void __atomic_store_16(volatile signed __int128 *, signed __int128, int);586 421 void __atomic_store(volatile signed __int128 *, signed __int128 *, int); 587 422 void __atomic_store_n(volatile unsigned __int128 *, unsigned __int128, int); 588 void __atomic_store_16(volatile unsigned __int128 *, unsigned __int128, int);589 423 void __atomic_store(volatile unsigned __int128 *, unsigned __int128 *, int); 590 424 #endif … … 593 427 594 428 char __atomic_add_fetch (volatile char *, char, int); 595 char __atomic_add_fetch_1(volatile char *, char, int);596 429 signed char __atomic_add_fetch (volatile signed char *, signed char, int); 597 signed char __atomic_add_fetch_1(volatile signed char *, signed char, int);598 430 unsigned char __atomic_add_fetch (volatile unsigned char *, unsigned char, int); 599 unsigned char __atomic_add_fetch_1(volatile unsigned char *, unsigned char, int);600 431 signed short __atomic_add_fetch (volatile signed short *, signed short, int); 601 signed short __atomic_add_fetch_2(volatile signed short *, signed short, int);602 432 unsigned short __atomic_add_fetch (volatile unsigned short *, unsigned short, int); 603 unsigned short __atomic_add_fetch_2(volatile unsigned short *, unsigned short, int);604 433 signed int __atomic_add_fetch (volatile signed int *, signed int, int); 605 signed int __atomic_add_fetch_4(volatile signed int *, signed int, int);606 434 unsigned int __atomic_add_fetch (volatile unsigned int *, unsigned int, int); 607 unsigned int __atomic_add_fetch_4(volatile unsigned int *, unsigned int, int); 435 signed long int __atomic_add_fetch (volatile signed long int *, signed long int, int); 436 unsigned long int __atomic_add_fetch (volatile unsigned long int *, unsigned long int, int); 608 437 signed long long int __atomic_add_fetch (volatile signed long long int *, signed long long int, int); 609 signed long long int __atomic_add_fetch_8(volatile signed long long int *, signed long long int, int);610 438 unsigned long long int __atomic_add_fetch (volatile unsigned long long int *, unsigned long long int, int); 611 unsigned long long int __atomic_add_fetch_8(volatile unsigned long long int *, unsigned long long int, int);612 439 #if defined(__SIZEOF_INT128__) 613 440 signed __int128 __atomic_add_fetch (volatile signed __int128 *, signed __int128, int); 614 signed __int128 __atomic_add_fetch_16(volatile signed __int128 *, signed __int128, int);615 441 unsigned __int128 __atomic_add_fetch (volatile unsigned __int128 *, unsigned __int128, int); 616 unsigned __int128 __atomic_add_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);617 442 #endif 618 443 619 444 char __atomic_sub_fetch (volatile char *, char, int); 620 char __atomic_sub_fetch_1(volatile char *, char, int);621 445 signed char __atomic_sub_fetch (volatile signed char *, signed char, int); 622 signed char __atomic_sub_fetch_1(volatile signed char *, signed char, int);623 446 unsigned char __atomic_sub_fetch (volatile unsigned char *, unsigned char, int); 624 unsigned char __atomic_sub_fetch_1(volatile unsigned char *, unsigned char, int);625 447 signed short __atomic_sub_fetch (volatile signed short *, signed short, int); 626 signed short __atomic_sub_fetch_2(volatile signed short *, signed short, int);627 448 unsigned short __atomic_sub_fetch (volatile unsigned short *, unsigned short, int); 628 unsigned short __atomic_sub_fetch_2(volatile unsigned short *, unsigned short, int);629 449 signed int __atomic_sub_fetch (volatile signed int *, signed int, int); 630 signed int __atomic_sub_fetch_4(volatile signed int *, signed int, int);631 450 unsigned int __atomic_sub_fetch (volatile unsigned int *, unsigned int, int); 632 unsigned int __atomic_sub_fetch_4(volatile unsigned int *, unsigned int, int); 451 signed long long int __atomic_sub_fetch (volatile signed long int *, signed long int, int); 452 unsigned long long int __atomic_sub_fetch (volatile unsigned long int *, unsigned long int, int); 633 453 signed long long int __atomic_sub_fetch (volatile signed long long int *, signed long long int, int); 634 signed long long int __atomic_sub_fetch_8(volatile signed long long int *, signed long long int, int);635 454 unsigned long long int __atomic_sub_fetch (volatile unsigned long long int *, unsigned long long int, int); 636 unsigned long long int __atomic_sub_fetch_8(volatile unsigned long long int *, unsigned long long int, int);637 455 #if defined(__SIZEOF_INT128__) 638 456 signed __int128 __atomic_sub_fetch (volatile signed __int128 *, signed __int128, int); 639 signed __int128 __atomic_sub_fetch_16(volatile signed __int128 *, signed __int128, int);640 457 unsigned __int128 __atomic_sub_fetch (volatile unsigned __int128 *, unsigned __int128, int); 641 unsigned __int128 __atomic_sub_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);642 458 #endif 643 459 644 460 char __atomic_and_fetch (volatile char *, char, int); 645 char __atomic_and_fetch_1(volatile char *, char, int);646 461 signed char __atomic_and_fetch (volatile signed char *, signed char, int); 647 signed char __atomic_and_fetch_1(volatile signed char *, signed char, int);648 462 unsigned char __atomic_and_fetch (volatile unsigned char *, unsigned char, int); 649 unsigned char __atomic_and_fetch_1(volatile unsigned char *, unsigned char, int);650 463 signed short __atomic_and_fetch (volatile signed short *, signed short, int); 651 signed short __atomic_and_fetch_2(volatile signed short *, signed short, int);652 464 unsigned short __atomic_and_fetch (volatile unsigned short *, unsigned short, int); 653 unsigned short __atomic_and_fetch_2(volatile unsigned short *, unsigned short, int);654 465 signed int __atomic_and_fetch (volatile signed int *, signed int, int); 655 signed int __atomic_and_fetch_4(volatile signed int *, signed int, int);656 466 unsigned int __atomic_and_fetch (volatile unsigned int *, unsigned int, int); 657 unsigned int __atomic_and_fetch_4(volatile unsigned int *, unsigned int, int); 467 signed long int __atomic_and_fetch (volatile signed long int *, signed long int, int); 468 unsigned long int __atomic_and_fetch (volatile unsigned long int *, unsigned long int, int); 658 469 signed long long int __atomic_and_fetch (volatile signed long long int *, signed long long int, int); 659 signed long long int __atomic_and_fetch_8(volatile signed long long int *, signed long long int, int);660 470 unsigned long long int __atomic_and_fetch (volatile unsigned long long int *, unsigned long long int, int); 661 unsigned long long int __atomic_and_fetch_8(volatile unsigned long long int *, unsigned long long int, int);662 471 #if defined(__SIZEOF_INT128__) 663 472 signed __int128 __atomic_and_fetch (volatile signed __int128 *, signed __int128, int); 664 signed __int128 __atomic_and_fetch_16(volatile signed __int128 *, signed __int128, int);665 473 unsigned __int128 __atomic_and_fetch (volatile unsigned __int128 *, unsigned __int128, int); 666 unsigned __int128 __atomic_and_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);667 474 #endif 668 475 669 476 char __atomic_nand_fetch (volatile char *, char, int); 670 char __atomic_nand_fetch_1(volatile char *, char, int);671 477 signed char __atomic_nand_fetch (volatile signed char *, signed char, int); 672 signed char __atomic_nand_fetch_1(volatile signed char *, signed char, int);673 478 unsigned char __atomic_nand_fetch (volatile unsigned char *, unsigned char, int); 674 unsigned char __atomic_nand_fetch_1(volatile unsigned char *, unsigned char, int);675 479 signed short __atomic_nand_fetch (volatile signed short *, signed short, int); 676 signed short __atomic_nand_fetch_2(volatile signed short *, signed short, int);677 480 unsigned short __atomic_nand_fetch (volatile unsigned short *, unsigned short, int); 678 unsigned short __atomic_nand_fetch_2(volatile unsigned short *, unsigned short, int);679 481 signed int __atomic_nand_fetch (volatile signed int *, signed int, int); 680 signed int __atomic_nand_fetch_4(volatile signed int *, signed int, int);681 482 unsigned int __atomic_nand_fetch (volatile unsigned int *, unsigned int, int); 682 unsigned int __atomic_nand_fetch_4(volatile unsigned int *, unsigned int, int); 483 signed long int __atomic_nand_fetch (volatile signed long int *, signed long int, int); 484 unsigned long int __atomic_nand_fetch (volatile unsigned long int *, unsigned long int, int); 683 485 signed long long int __atomic_nand_fetch (volatile signed long long int *, signed long long int, int); 684 signed long long int __atomic_nand_fetch_8(volatile signed long long int *, signed long long int, int);685 486 unsigned long long int __atomic_nand_fetch (volatile unsigned long long int *, unsigned long long int, int); 686 unsigned long long int __atomic_nand_fetch_8(volatile unsigned long long int *, unsigned long long int, int);687 487 #if defined(__SIZEOF_INT128__) 688 488 signed __int128 __atomic_nand_fetch (volatile signed __int128 *, signed __int128, int); 689 signed __int128 __atomic_nand_fetch_16(volatile signed __int128 *, signed __int128, int);690 489 unsigned __int128 __atomic_nand_fetch (volatile unsigned __int128 *, unsigned __int128, int); 691 unsigned __int128 __atomic_nand_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);692 490 #endif 693 491 694 492 char __atomic_xor_fetch (volatile char *, char, int); 695 char __atomic_xor_fetch_1(volatile char *, char, int);696 493 signed char __atomic_xor_fetch (volatile signed char *, signed char, int); 697 signed char __atomic_xor_fetch_1(volatile signed char *, signed char, int);698 494 unsigned char __atomic_xor_fetch (volatile unsigned char *, unsigned char, int); 699 unsigned char __atomic_xor_fetch_1(volatile unsigned char *, unsigned char, int);700 495 signed short __atomic_xor_fetch (volatile signed short *, signed short, int); 701 signed short __atomic_xor_fetch_2(volatile signed short *, signed short, int);702 496 unsigned short __atomic_xor_fetch (volatile unsigned short *, unsigned short, int); 703 unsigned short __atomic_xor_fetch_2(volatile unsigned short *, unsigned short, int);704 497 signed int __atomic_xor_fetch (volatile signed int *, signed int, int); 705 signed int __atomic_xor_fetch_4(volatile signed int *, signed int, int);706 498 unsigned int __atomic_xor_fetch (volatile unsigned int *, unsigned int, int); 707 unsigned int __atomic_xor_fetch_4(volatile unsigned int *, unsigned int, int); 499 signed long int __atomic_xor_fetch (volatile signed long int *, signed long int, int); 500 unsigned long int __atomic_xor_fetch (volatile unsigned long int *, unsigned long int, int); 708 501 signed long long int __atomic_xor_fetch (volatile signed long long int *, signed long long int, int); 709 signed long long int __atomic_xor_fetch_8(volatile signed long long int *, signed long long int, int);710 502 unsigned long long int __atomic_xor_fetch (volatile unsigned long long int *, unsigned long long int, int); 711 unsigned long long int __atomic_xor_fetch_8(volatile unsigned long long int *, unsigned long long int, int);712 503 #if defined(__SIZEOF_INT128__) 713 504 signed __int128 __atomic_xor_fetch (volatile signed __int128 *, signed __int128, int); 714 signed __int128 __atomic_xor_fetch_16(volatile signed __int128 *, signed __int128, int);715 505 unsigned __int128 __atomic_xor_fetch (volatile unsigned __int128 *, unsigned __int128, int); 716 unsigned __int128 __atomic_xor_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);717 506 #endif 718 507 719 508 char __atomic_or_fetch (volatile char *, char, int); 720 char __atomic_or_fetch_1(volatile char *, char, int);721 509 signed char __atomic_or_fetch (volatile signed char *, signed char, int); 722 signed char __atomic_or_fetch_1(volatile signed char *, signed char, int);723 510 unsigned char __atomic_or_fetch (volatile unsigned char *, unsigned char, int); 724 unsigned char __atomic_or_fetch_1(volatile unsigned char *, unsigned char, int);725 511 signed short __atomic_or_fetch (volatile signed short *, signed short, int); 726 signed short __atomic_or_fetch_2(volatile signed short *, signed short, int);727 512 unsigned short __atomic_or_fetch (volatile unsigned short *, unsigned short, int); 728 unsigned short __atomic_or_fetch_2(volatile unsigned short *, unsigned short, int);729 513 signed int __atomic_or_fetch (volatile signed int *, signed int, int); 730 signed int __atomic_or_fetch_4(volatile signed int *, signed int, int);731 514 unsigned int __atomic_or_fetch (volatile unsigned int *, unsigned int, int); 732 unsigned int __atomic_or_fetch_4(volatile unsigned int *, unsigned int, int); 515 signed long int __atomic_or_fetch (volatile signed long int *, signed long int, int); 516 unsigned long int __atomic_or_fetch (volatile unsigned long int *, unsigned long int, int); 733 517 signed long long int __atomic_or_fetch (volatile signed long long int *, signed long long int, int); 734 signed long long int __atomic_or_fetch_8(volatile signed long long int *, signed long long int, int);735 518 unsigned long long int __atomic_or_fetch (volatile unsigned long long int *, unsigned long long int, int); 736 unsigned long long int __atomic_or_fetch_8(volatile unsigned long long int *, unsigned long long int, int);737 519 #if defined(__SIZEOF_INT128__) 738 520 signed __int128 __atomic_or_fetch (volatile signed __int128 *, signed __int128, int); 739 signed __int128 __atomic_or_fetch_16(volatile signed __int128 *, signed __int128, int);740 521 unsigned __int128 __atomic_or_fetch (volatile unsigned __int128 *, unsigned __int128, int); 741 unsigned __int128 __atomic_or_fetch_16(volatile unsigned __int128 *, unsigned __int128, int);742 522 #endif 743 523 744 524 char __atomic_fetch_add (volatile char *, char, int); 745 char __atomic_fetch_add_1(volatile char *, char, int);746 525 signed char __atomic_fetch_add (volatile signed char *, signed char, int); 747 signed char __atomic_fetch_add_1(volatile signed char *, signed char, int);748 526 unsigned char __atomic_fetch_add (volatile unsigned char *, unsigned char, int); 749 unsigned char __atomic_fetch_add_1(volatile unsigned char *, unsigned char, int);750 527 signed short __atomic_fetch_add (volatile signed short *, signed short, int); 751 signed short __atomic_fetch_add_2(volatile signed short *, signed short, int);752 528 unsigned short __atomic_fetch_add (volatile unsigned short *, unsigned short, int); 753 unsigned short __atomic_fetch_add_2(volatile unsigned short *, unsigned short, int);754 529 signed int __atomic_fetch_add (volatile signed int *, signed int, int); 755 signed int __atomic_fetch_add_4(volatile signed int *, signed int, int);756 530 unsigned int __atomic_fetch_add (volatile unsigned int *, unsigned int, int); 757 unsigned int __atomic_fetch_add_4(volatile unsigned int *, unsigned int, int); 531 signed long int __atomic_fetch_add (volatile signed long int *, signed long int, int); 532 unsigned long int __atomic_fetch_add (volatile unsigned long int *, unsigned long int, int); 758 533 signed long long int __atomic_fetch_add (volatile signed long long int *, signed long long int, int); 759 signed long long int __atomic_fetch_add_8(volatile signed long long int *, signed long long int, int);760 534 unsigned long long int __atomic_fetch_add (volatile unsigned long long int *, unsigned long long int, int); 761 unsigned long long int __atomic_fetch_add_8(volatile unsigned long long int *, unsigned long long int, int);762 535 #if defined(__SIZEOF_INT128__) 763 536 signed __int128 __atomic_fetch_add (volatile signed __int128 *, signed __int128, int); 764 signed __int128 __atomic_fetch_add_16(volatile signed __int128 *, signed __int128, int);765 537 unsigned __int128 __atomic_fetch_add (volatile unsigned __int128 *, unsigned __int128, int); 766 unsigned __int128 __atomic_fetch_add_16(volatile unsigned __int128 *, unsigned __int128, int);767 538 #endif 768 539 769 540 char __atomic_fetch_sub (volatile char *, char, int); 770 char __atomic_fetch_sub_1(volatile char *, char, int);771 541 signed char __atomic_fetch_sub (volatile signed char *, signed char, int); 772 signed char __atomic_fetch_sub_1(volatile signed char *, signed char, int);773 542 unsigned char __atomic_fetch_sub (volatile unsigned char *, unsigned char, int); 774 unsigned char __atomic_fetch_sub_1(volatile unsigned char *, unsigned char, int);775 543 signed short __atomic_fetch_sub (volatile signed short *, signed short, int); 776 signed short __atomic_fetch_sub_2(volatile signed short *, signed short, int);777 544 unsigned short __atomic_fetch_sub (volatile unsigned short *, unsigned short, int); 778 unsigned short __atomic_fetch_sub_2(volatile unsigned short *, unsigned short, int);779 545 signed int __atomic_fetch_sub (volatile signed int *, signed int, int); 780 signed int __atomic_fetch_sub_4(volatile signed int *, signed int, int);781 546 unsigned int __atomic_fetch_sub (volatile unsigned int *, unsigned int, int); 782 unsigned int __atomic_fetch_sub_4(volatile unsigned int *, unsigned int, int); 547 signed long int __atomic_fetch_sub (volatile signed long int *, signed long int, int); 548 unsigned long int __atomic_fetch_sub (volatile unsigned long int *, unsigned long int, int); 783 549 signed long long int __atomic_fetch_sub (volatile signed long long int *, signed long long int, int); 784 signed long long int __atomic_fetch_sub_8(volatile signed long long int *, signed long long int, int);785 550 unsigned long long int __atomic_fetch_sub (volatile unsigned long long int *, unsigned long long int, int); 786 unsigned long long int __atomic_fetch_sub_8(volatile unsigned long long int *, unsigned long long int, int);787 551 #if defined(__SIZEOF_INT128__) 788 552 signed __int128 __atomic_fetch_sub (volatile signed __int128 *, signed __int128, int); 789 signed __int128 __atomic_fetch_sub_16(volatile signed __int128 *, signed __int128, int);790 553 unsigned __int128 __atomic_fetch_sub (volatile unsigned __int128 *, unsigned __int128, int); 791 unsigned __int128 __atomic_fetch_sub_16(volatile unsigned __int128 *, unsigned __int128, int);792 554 #endif 793 555 794 556 char __atomic_fetch_and (volatile char *, char, int); 795 char __atomic_fetch_and_1(volatile char *, char, int);796 557 signed char __atomic_fetch_and (volatile signed char *, signed char, int); 797 signed char __atomic_fetch_and_1(volatile signed char *, signed char, int);798 558 unsigned char __atomic_fetch_and (volatile unsigned char *, unsigned char, int); 799 unsigned char __atomic_fetch_and_1(volatile unsigned char *, unsigned char, int);800 559 signed short __atomic_fetch_and (volatile signed short *, signed short, int); 801 signed short __atomic_fetch_and_2(volatile signed short *, signed short, int);802 560 unsigned short __atomic_fetch_and (volatile unsigned short *, unsigned short, int); 803 unsigned short __atomic_fetch_and_2(volatile unsigned short *, unsigned short, int);804 561 signed int __atomic_fetch_and (volatile signed int *, signed int, int); 805 signed int __atomic_fetch_and_4(volatile signed int *, signed int, int);806 562 unsigned int __atomic_fetch_and (volatile unsigned int *, unsigned int, int); 807 unsigned int __atomic_fetch_and_4(volatile unsigned int *, unsigned int, int); 563 signed long int __atomic_fetch_and (volatile signed long int *, signed long int, int); 564 unsigned long int __atomic_fetch_and (volatile unsigned long int *, unsigned long int, int); 808 565 signed long long int __atomic_fetch_and (volatile signed long long int *, signed long long int, int); 809 signed long long int __atomic_fetch_and_8(volatile signed long long int *, signed long long int, int);810 566 unsigned long long int __atomic_fetch_and (volatile unsigned long long int *, unsigned long long int, int); 811 unsigned long long int __atomic_fetch_and_8(volatile unsigned long long int *, unsigned long long int, int);812 567 #if defined(__SIZEOF_INT128__) 813 568 signed __int128 __atomic_fetch_and (volatile signed __int128 *, signed __int128, int); 814 signed __int128 __atomic_fetch_and_16(volatile signed __int128 *, signed __int128, int);815 569 unsigned __int128 __atomic_fetch_and (volatile unsigned __int128 *, unsigned __int128, int); 816 unsigned __int128 __atomic_fetch_and_16(volatile unsigned __int128 *, unsigned __int128, int);817 570 #endif 818 571 819 572 char __atomic_fetch_nand (volatile char *, char, int); 820 char __atomic_fetch_nand_1(volatile char *, char, int);821 573 signed char __atomic_fetch_nand (volatile signed char *, signed char, int); 822 signed char __atomic_fetch_nand_1(volatile signed char *, signed char, int);823 574 unsigned char __atomic_fetch_nand (volatile unsigned char *, unsigned char, int); 824 unsigned char __atomic_fetch_nand_1(volatile unsigned char *, unsigned char, int);825 575 signed short __atomic_fetch_nand (volatile signed short *, signed short, int); 826 signed short __atomic_fetch_nand_2(volatile signed short *, signed short, int);827 576 unsigned short __atomic_fetch_nand (volatile unsigned short *, unsigned short, int); 828 unsigned short __atomic_fetch_nand_2(volatile unsigned short *, unsigned short, int);829 577 signed int __atomic_fetch_nand (volatile signed int *, signed int, int); 830 signed int __atomic_fetch_nand_4(volatile signed int *, signed int, int);831 578 unsigned int __atomic_fetch_nand (volatile unsigned int *, unsigned int, int); 832 unsigned int __atomic_fetch_nand_4(volatile unsigned int *, unsigned int, int); 579 signed long int __atomic_fetch_nand (volatile signed long int *, signed long int, int); 580 unsigned long int __atomic_fetch_nand (volatile unsigned long int *, unsigned long int, int); 833 581 signed long long int __atomic_fetch_nand (volatile signed long long int *, signed long long int, int); 834 signed long long int __atomic_fetch_nand_8(volatile signed long long int *, signed long long int, int);835 582 unsigned long long int __atomic_fetch_nand (volatile unsigned long long int *, unsigned long long int, int); 836 unsigned long long int __atomic_fetch_nand_8(volatile unsigned long long int *, unsigned long long int, int);837 583 #if defined(__SIZEOF_INT128__) 838 584 signed __int128 __atomic_fetch_nand (volatile signed __int128 *, signed __int128, int); 839 signed __int128 __atomic_fetch_nand_16(volatile signed __int128 *, signed __int128, int);840 585 unsigned __int128 __atomic_fetch_nand (volatile unsigned __int128 *, unsigned __int128, int); 841 unsigned __int128 __atomic_fetch_nand_16(volatile unsigned __int128 *, unsigned __int128, int);842 586 #endif 843 587 844 588 char __atomic_fetch_xor (volatile char *, char, int); 845 char __atomic_fetch_xor_1(volatile char *, char, int);846 589 signed char __atomic_fetch_xor (volatile signed char *, signed char, int); 847 signed char __atomic_fetch_xor_1(volatile signed char *, signed char, int);848 590 unsigned char __atomic_fetch_xor (volatile unsigned char *, unsigned char, int); 849 unsigned char __atomic_fetch_xor_1(volatile unsigned char *, unsigned char, int);850 591 signed short __atomic_fetch_xor (volatile signed short *, signed short, int); 851 signed short __atomic_fetch_xor_2(volatile signed short *, signed short, int);852 592 unsigned short __atomic_fetch_xor (volatile unsigned short *, unsigned short, int); 853 unsigned short __atomic_fetch_xor_2(volatile unsigned short *, unsigned short, int);854 593 signed int __atomic_fetch_xor (volatile signed int *, signed int, int); 855 signed int __atomic_fetch_xor_4(volatile signed int *, signed int, int);856 594 unsigned int __atomic_fetch_xor (volatile unsigned int *, unsigned int, int); 857 unsigned int __atomic_fetch_xor_4(volatile unsigned int *, unsigned int, int); 595 signed long int __atomic_fetch_xor (volatile signed long int *, signed long int, int); 596 unsigned long int __atomic_fetch_xor (volatile unsigned long int *, unsigned long int, int); 858 597 signed long long int __atomic_fetch_xor (volatile signed long long int *, signed long long int, int); 859 signed long long int __atomic_fetch_xor_8(volatile signed long long int *, signed long long int, int);860 598 unsigned long long int __atomic_fetch_xor (volatile unsigned long long int *, unsigned long long int, int); 861 unsigned long long int __atomic_fetch_xor_8(volatile unsigned long long int *, unsigned long long int, int);862 599 #if defined(__SIZEOF_INT128__) 863 600 signed __int128 __atomic_fetch_xor (volatile signed __int128 *, signed __int128, int); 864 signed __int128 __atomic_fetch_xor_16(volatile signed __int128 *, signed __int128, int);865 601 unsigned __int128 __atomic_fetch_xor (volatile unsigned __int128 *, unsigned __int128, int); 866 unsigned __int128 __atomic_fetch_xor_16(volatile unsigned __int128 *, unsigned __int128, int);867 602 #endif 868 603 869 604 char __atomic_fetch_or (volatile char *, char, int); 870 char __atomic_fetch_or_1(volatile char *, char, int);871 605 signed char __atomic_fetch_or (volatile signed char *, signed char, int); 872 signed char __atomic_fetch_or_1(volatile signed char *, signed char, int);873 606 unsigned char __atomic_fetch_or (volatile unsigned char *, unsigned char, int); 874 unsigned char __atomic_fetch_or_1(volatile unsigned char *, unsigned char, int);875 607 signed short __atomic_fetch_or (volatile signed short *, signed short, int); 876 signed short __atomic_fetch_or_2(volatile signed short *, signed short, int);877 608 unsigned short __atomic_fetch_or (volatile unsigned short *, unsigned short, int); 878 unsigned short __atomic_fetch_or_2(volatile unsigned short *, unsigned short, int);879 609 signed int __atomic_fetch_or (volatile signed int *, signed int, int); 880 signed int __atomic_fetch_or_4(volatile signed int *, signed int, int);881 610 unsigned int __atomic_fetch_or (volatile unsigned int *, unsigned int, int); 882 unsigned int __atomic_fetch_or_4(volatile unsigned int *, unsigned int, int); 611 signed long int __atomic_fetch_or (volatile signed long int *, signed long int, int); 612 unsigned long int __atomic_fetch_or (volatile unsigned long int *, unsigned long int, int); 883 613 signed long long int __atomic_fetch_or (volatile signed long long int *, signed long long int, int); 884 signed long long int __atomic_fetch_or_8(volatile signed long long int *, signed long long int, int);885 614 unsigned long long int __atomic_fetch_or (volatile unsigned long long int *, unsigned long long int, int); 886 unsigned long long int __atomic_fetch_or_8(volatile unsigned long long int *, unsigned long long int, int);887 615 #if defined(__SIZEOF_INT128__) 888 616 signed __int128 __atomic_fetch_or (volatile signed __int128 *, signed __int128, int); 889 signed __int128 __atomic_fetch_or_16(volatile signed __int128 *, signed __int128, int);890 617 unsigned __int128 __atomic_fetch_or (volatile unsigned __int128 *, unsigned __int128, int); 891 unsigned __int128 __atomic_fetch_or_16(volatile unsigned __int128 *, unsigned __int128, int);892 618 #endif 893 619 -
libcfa/src/Makefile.am
r9fb8f01 r3d5701e 32 32 # use -no-include-stdhdr to prevent rebuild cycles 33 33 # The built sources must not depend on the installed headers 34 AM_CFAFLAGS = -quiet - in-tree -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb)@CONFIG_CFAFLAGS@35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@34 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 36 36 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 37 37 CFACC = @CFACC@ … … 64 64 # add dependency of cfa files 65 65 libobjs = $(addsuffix .lo, $(basename $(filter %.cfa,$(libsrc)))) 66 $(libobjs) : @ CFACC@ @CFACPP@ prelude.cfa66 $(libobjs) : @LOCAL_CFACC@ @CFACPP@ prelude.cfa 67 67 68 68 thread_libobjs = $(addsuffix .lo, $(basename $(filter %.cfa,$(thread_libsrc)))) 69 $(thread_libobjs) : @ CFACC@ @CFACPP@ prelude.cfa69 $(thread_libobjs) : @LOCAL_CFACC@ @CFACPP@ prelude.cfa 70 70 71 71 … … 86 86 87 87 88 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @CFACC@ @CFACPP@ 89 ${AM_V_GEN}$(CFACOMPILE) -quiet -in-tree -XCFA -l ${<} -c -o ${@} 88 if ENABLE_DISTCC 90 89 91 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @CFACC@ @CFACPP@ 90 ../prelude/distribution: @LOCAL_CFACC@ @LOCAL_CC1@ @CFACPP@ ../prelude/gcc-builtins.cf ../prelude/builtins.cf ../prelude/extras.cf ../prelude/prelude.cfa ../prelude/bootloader.c $(srcdir)/../../tools/build/push2dist.sh 91 @+make -C ../prelude distribution 92 93 prelude.o prelude.lo $(libobjs) $(thread_libobjs) : ../prelude/distribution 94 95 endif ENABLE_DISTCC 96 97 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 98 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA -l ${<} -c -o ${@} 99 100 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 92 101 ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \ 93 $(CFACOMPILE) -quiet -in-tree -XCFA -l ${<} -c -o ${@} 94 102 $(CFACOMPILE) -quiet -XCFA -l ${<} -c -o ${@} 95 103 96 104 #---------------------------------------------------------------------------------------------------------------- -
libcfa/src/Makefile.in
r9fb8f01 r3d5701e 284 284 CFACC = @CFACC@ 285 285 CFACPP = @CFACPP@ 286 CFADIR_HASH = @CFADIR_HASH@ 286 287 CFA_BINDIR = @CFA_BINDIR@ 287 288 CFA_INCDIR = @CFA_INCDIR@ … … 327 328 LIPO = @LIPO@ 328 329 LN_S = @LN_S@ 330 LOCAL_CC1 = @LOCAL_CC1@ 331 LOCAL_CFACC = @LOCAL_CFACC@ 329 332 LTLIBOBJS = @LTLIBOBJS@ 330 333 LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ … … 413 416 LTCFACOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ 414 417 $(LIBTOOLFLAGS) --mode=compile $(CFACC) $(DEFS) \ 415 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(CFAFLAGS) \ 416 $(AM_CFLAGS) $(CFLAGS) 418 $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CFAFLAGS) $(AM_CFLAGS) $(CFAFLAGS) $(CFLAGS) 417 419 418 420 AM_V_CFA = $(am__v_CFA_@AM_V@) … … 420 422 am__v_CFA_0 = @echo " CFA " $@; 421 423 am__v_CFA_1 = 422 AM_V_JAVAC = $(am__v_JAVAC_@AM_V@)423 am__v_JAVAC_ = $(am__v_JAVAC_@AM_DEFAULT_V@)424 am__v_JAVAC_0 = @echo " JAVAC " $@;425 am__v_JAVAC_1 =426 AM_V_GOC = $(am__v_GOC_@AM_V@)427 am__v_GOC_ = $(am__v_GOC_@AM_DEFAULT_V@)428 am__v_GOC_0 = @echo " GOC " $@;429 am__v_GOC_1 =430 424 UPPCC = u++ 431 425 UPPCOMPILE = $(UPPCC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_UPPFLAGS) $(UPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_CFLAGS) $(CFLAGS) … … 434 428 am__v_UPP_0 = @echo " UPP " $@; 435 429 am__v_UPP_1 = 430 AM_V_GOC = $(am__v_GOC_@AM_V@) 431 am__v_GOC_ = $(am__v_GOC_@AM_DEFAULT_V@) 432 am__v_GOC_0 = @echo " GOC " $@; 433 am__v_GOC_1 = 434 AM_V_RUST = $(am__v_RUST_@AM_V@) 435 am__v_RUST_ = $(am__v_RUST_@AM_DEFAULT_V@) 436 am__v_RUST_0 = @echo " RUST " $@; 437 am__v_RUST_1 = 438 AM_V_NODEJS = $(am__v_NODEJS_@AM_V@) 439 am__v_NODEJS_ = $(am__v_NODEJS_@AM_DEFAULT_V@) 440 am__v_NODEJS_0 = @echo " NODEJS " $@; 441 am__v_NODEJS_1 = 442 AM_V_JAVAC = $(am__v_JAVAC_@AM_V@) 443 am__v_JAVAC_ = $(am__v_JAVAC_@AM_DEFAULT_V@) 444 am__v_JAVAC_0 = @echo " JAVAC " $@; 445 am__v_JAVAC_1 = 436 446 lib_LTLIBRARIES = libcfa.la libcfathread.la 437 447 gdbwaittarget = "" … … 441 451 # use -no-include-stdhdr to prevent rebuild cycles 442 452 # The built sources must not depend on the installed headers 443 AM_CFAFLAGS = -quiet - in-tree -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb)@CONFIG_CFAFLAGS@444 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@453 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 454 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 445 455 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 446 456 @BUILDLIB_FALSE@headers_nosrc = … … 937 947 $(LTCFACOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ 938 948 $(am__mv) $$depbase.Tpo $$depbase.Plo 939 $(libobjs) : @ CFACC@ @CFACPP@ prelude.cfa940 $(thread_libobjs) : @ CFACC@ @CFACPP@ prelude.cfa949 $(libobjs) : @LOCAL_CFACC@ @CFACPP@ prelude.cfa 950 $(thread_libobjs) : @LOCAL_CFACC@ @CFACPP@ prelude.cfa 941 951 942 952 -include $(libdeps) … … 944 954 -include $(thread_libdeps) 945 955 946 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @CFACC@ @CFACPP@ 947 ${AM_V_GEN}$(CFACOMPILE) -quiet -in-tree -XCFA -l ${<} -c -o ${@} 948 949 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @CFACC@ @CFACPP@ 956 @ENABLE_DISTCC_TRUE@../prelude/distribution: @LOCAL_CFACC@ @LOCAL_CC1@ @CFACPP@ ../prelude/gcc-builtins.cf ../prelude/builtins.cf ../prelude/extras.cf ../prelude/prelude.cfa ../prelude/bootloader.c $(srcdir)/../../tools/build/push2dist.sh 957 @ENABLE_DISTCC_TRUE@ @+make -C ../prelude distribution 958 959 @ENABLE_DISTCC_TRUE@prelude.o prelude.lo $(libobjs) $(thread_libobjs) : ../prelude/distribution 960 961 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 962 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA -l ${<} -c -o ${@} 963 964 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 950 965 ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \ 951 $(CFACOMPILE) -quiet - in-tree -XCFA -l ${<} -c -o ${@}966 $(CFACOMPILE) -quiet -XCFA -l ${<} -c -o ${@} 952 967 953 968 #---------------------------------------------------------------------------------------------------------------- -
libcfa/src/assert.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Jul 20 15:10:26 201713 // Update Count : 212 // Last Modified On : Tue Feb 4 13:00:18 2020 13 // Update Count : 6 14 14 // 15 15 … … 17 17 #include <stdarg.h> // varargs 18 18 #include <stdio.h> // fprintf 19 #include <unistd.h> // STDERR_FILENO 19 20 #include "bits/debug.hfa" 20 21 … … 25 26 26 27 // called by macro assert in assert.h 27 void __assert_fail( const char *assertion, const char *file, unsigned int line, const char *function) {28 __cfaabi_ dbg_bits_print_safe(CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file );28 void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) { 29 __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file ); 29 30 abort(); 30 31 } 31 32 32 33 // called by macro assertf 33 void __assert_fail_f( const char *assertion, const char *file, unsigned int line, const char *function, const char *fmt, ... ) {34 __cfaabi_ dbg_bits_acquire();35 __cfaabi_ dbg_bits_print_nolock(CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );34 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) { 35 __cfaabi_bits_acquire(); 36 __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file ); 36 37 37 38 va_list args; 38 39 va_start( args, fmt ); 39 __cfaabi_ dbg_bits_print_vararg(fmt, args );40 __cfaabi_bits_print_vararg( STDERR_FILENO, fmt, args ); 40 41 va_end( args ); 41 42 42 __cfaabi_ dbg_bits_print_nolock("\n" );43 __cfaabi_ dbg_bits_release();43 __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); 44 __cfaabi_bits_release(); 44 45 abort(); 45 46 } -
libcfa/src/bits/align.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 21 23:05:35 201713 // Update Count : 212 // Last Modified On : Sat Nov 16 18:58:22 2019 13 // Update Count : 3 14 14 // 15 15 // This library is free software; you can redistribute it and/or modify it … … 33 33 34 34 // Minimum size used to align memory boundaries for memory allocations. 35 #define libAlign() (sizeof(double)) 35 //#define libAlign() (sizeof(double)) 36 // gcc-7 uses xmms instructions, which require 16 byte alignment. 37 #define libAlign() (16) 36 38 37 39 // Check for power of 2 -
libcfa/src/bits/containers.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Oct 31 16:38:50 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed J un 26 08:52:20 201913 // Update Count : 412 // Last Modified On : Wed Jan 15 07:42:35 2020 13 // Update Count : 28 14 14 15 15 #pragma once … … 44 44 45 45 forall(dtype T | sized(T)) 46 static inline T & ?[?]( __small_array(T) & this, __lock_size_t idx) {46 static inline T & ?[?]( __small_array(T) & this, __lock_size_t idx ) { 47 47 return ((typeof(this.data))this.data)[idx]; 48 48 } 49 49 50 50 forall(dtype T | sized(T)) 51 static inline T & ?[?]( const __small_array(T) & this, __lock_size_t idx) {51 static inline T & ?[?]( const __small_array(T) & this, __lock_size_t idx ) { 52 52 return ((typeof(this.data))this.data)[idx]; 53 53 } 54 54 55 forall(dtype T) 56 static inline T * begin( const __small_array(T) & this ) { 57 return ((typeof(this.data))this.data); 58 } 59 55 60 forall(dtype T | sized(T)) 56 static inline T* begin( const __small_array(T) & this ) { 57 return ((typeof(this.data))this.data); 58 } 59 60 forall(dtype T | sized(T)) 61 static inline T* end( const __small_array(T) & this ) { 61 static inline T * end( const __small_array(T) & this ) { 62 62 return ((typeof(this.data))this.data) + this.size; 63 63 } … … 70 70 #ifdef __cforall 71 71 trait is_node(dtype T) { 72 T *& get_next( T& );72 T *& get_next( T & ); 73 73 }; 74 74 #endif … … 97 97 forall(dtype T) 98 98 static inline void ?{}( __stack(T) & this ) { 99 (this.top){ NULL }; 100 } 101 102 forall(dtype T | is_node(T) | sized(T)) 103 static inline void push( __stack(T) & this, T * val ) { 104 verify( !get_next( *val ) ); 105 get_next( *val ) = this.top; 106 this.top = val; 107 } 108 109 forall(dtype T | is_node(T) | sized(T)) 110 static inline T * pop( __stack(T) & this ) { 111 T * top = this.top; 112 if( top ) { 113 this.top = get_next( *top ); 114 get_next( *top ) = NULL; 115 } 116 return top; 117 } 118 119 forall(dtype T | is_node(T)) 120 static inline int ?!=?( const __stack(T) & this, __attribute__((unused)) zero_t zero ) { 121 return this.top != 0; 99 (this.top){ 0p }; 100 } 101 102 static inline forall( dtype T | is_node(T) ) { 103 void push( __stack(T) & this, T * val ) { 104 verify( !get_next( *val ) ); 105 get_next( *val ) = this.top; 106 this.top = val; 107 } 108 109 T * pop( __stack(T) & this ) { 110 T * top = this.top; 111 if( top ) { 112 this.top = get_next( *top ); 113 get_next( *top ) = 0p; 114 } 115 return top; 116 } 117 118 int ?!=?( const __stack(T) & this, __attribute__((unused)) zero_t zero ) { 119 return this.top != 0; 120 } 122 121 } 123 122 #endif … … 145 144 146 145 #ifdef __cforall 147 148 forall(dtype T) 149 static inline void ?{}( __queue(T) & this ) with( this ) { 150 head{ NULL }; 151 tail{ &head }; 152 } 153 154 forall(dtype T | is_node(T) | sized(T)) 155 static inline void append( __queue(T) & this, T * val ) with( this ) { 156 verify(tail != NULL); 157 *tail = val; 158 tail = &get_next( *val ); 159 } 160 161 forall(dtype T | is_node(T) | sized(T)) 162 static inline T * pop_head( __queue(T) & this ) { 163 T * head = this.head; 164 if( head ) { 165 this.head = get_next( *head ); 166 if( !get_next( *head ) ) { 167 this.tail = &this.head; 168 } 169 get_next( *head ) = NULL; 170 } 171 return head; 172 } 173 174 forall(dtype T | is_node(T) | sized(T)) 175 static inline T * remove( __queue(T) & this, T ** it ) with( this ) { 176 T * val = *it; 177 verify( val ); 178 179 (*it) = get_next( *val ); 180 181 if( tail == &get_next( *val ) ) { 182 tail = it; 183 } 184 185 get_next( *val ) = NULL; 186 187 verify( (head == NULL) == (&head == tail) ); 188 verify( *tail == NULL ); 189 return val; 190 } 191 192 forall(dtype T | is_node(T)) 193 static inline int ?!=?( const __queue(T) & this, __attribute__((unused)) zero_t zero ) { 194 return this.head != 0; 146 static inline forall( dtype T | is_node(T) ) { 147 void ?{}( __queue(T) & this ) with( this ) { 148 head{ 1p }; 149 tail{ &head }; 150 verify(*tail == 1p); 151 } 152 153 void append( __queue(T) & this, T * val ) with( this ) { 154 verify(tail != 0p); 155 verify(*tail == 1p); 156 *tail = val; 157 tail = &get_next( *val ); 158 *tail = 1p; 159 } 160 161 T * pop_head( __queue(T) & this ) { 162 verify(*this.tail == 1p); 163 T * head = this.head; 164 if( head != 1p ) { 165 this.head = get_next( *head ); 166 if( get_next( *head ) == 1p ) { 167 this.tail = &this.head; 168 } 169 get_next( *head ) = 0p; 170 verify(*this.tail == 1p); 171 return head; 172 } 173 verify(*this.tail == 1p); 174 return 0p; 175 } 176 177 T * remove( __queue(T) & this, T ** it ) with( this ) { 178 T * val = *it; 179 verify( val ); 180 181 (*it) = get_next( *val ); 182 183 if( tail == &get_next( *val ) ) { 184 tail = it; 185 } 186 187 get_next( *val ) = 0p; 188 189 verify( (head == 1p) == (&head == tail) ); 190 verify( *tail == 1p ); 191 return val; 192 } 193 194 int ?!=?( const __queue(T) & this, __attribute__((unused)) zero_t zero ) { 195 return this.head != 0; 196 } 195 197 } 196 198 #endif … … 223 225 224 226 #ifdef __cforall 225 226 forall(dtype T | sized(T)) 227 forall(dtype T ) 227 228 static inline [void] ?{}( __dllist(T) & this, * [T * & next, T * & prev] ( T & ) __get ) { 228 this.head{ NULL};229 this.head{ 0p }; 229 230 this.__get = __get; 230 231 } … … 232 233 #define next 0 233 234 #define prev 1 234 forall(dtype T | sized(T)) 235 static inline void push_front( __dllist(T) & this, T & node ) with( this ) { 236 verify(__get); 237 if ( head ) { 238 __get( node ).next = head; 239 __get( node ).prev = __get( *head ).prev; 240 // inserted node must be consistent before it is seen 235 static inline forall(dtype T) { 236 void push_front( __dllist(T) & this, T & node ) with( this ) { 237 verify(__get); 238 if ( head ) { 239 __get( node ).next = head; 240 __get( node ).prev = __get( *head ).prev; 241 // inserted node must be consistent before it is seen 242 // prevent code movement across barrier 243 asm( "" : : : "memory" ); 244 __get( *head ).prev = &node; 245 T & _prev = *__get( node ).prev; 246 __get( _prev ).next = &node; 247 } else { 248 __get( node ).next = &node; 249 __get( node ).prev = &node; 250 } 251 241 252 // prevent code movement across barrier 242 253 asm( "" : : : "memory" ); 243 __get( *head ).prev = &node; 244 T & _prev = *__get( node ).prev; 245 __get( _prev ).next = &node; 246 } 247 else { 248 __get( node ).next = &node; 249 __get( node ).prev = &node; 250 } 251 252 // prevent code movement across barrier 253 asm( "" : : : "memory" ); 254 head = &node; 255 } 256 257 forall(dtype T | sized(T)) 258 static inline void remove( __dllist(T) & this, T & node ) with( this ) { 259 verify(__get); 260 if ( &node == head ) { 261 if ( __get( *head ).next == head ) { 262 head = NULL; 263 } 264 else { 265 head = __get( *head ).next; 266 } 267 } 268 __get( *__get( node ).next ).prev = __get( node ).prev; 269 __get( *__get( node ).prev ).next = __get( node ).next; 270 __get( node ).next = NULL; 271 __get( node ).prev = NULL; 272 } 273 274 forall(dtype T | sized(T)) 275 static inline int ?!=?( const __dllist(T) & this, __attribute__((unused)) zero_t zero ) { 276 return this.head != 0; 254 head = &node; 255 } 256 257 void remove( __dllist(T) & this, T & node ) with( this ) { 258 verify(__get); 259 if ( &node == head ) { 260 if ( __get( *head ).next == head ) { 261 head = 0p; 262 } else { 263 head = __get( *head ).next; 264 } 265 } 266 __get( *__get( node ).next ).prev = __get( node ).prev; 267 __get( *__get( node ).prev ).next = __get( node ).next; 268 __get( node ).next = 0p; 269 __get( node ).prev = 0p; 270 } 271 272 int ?!=?( const __dllist(T) & this, __attribute__((unused)) zero_t zero ) { 273 return this.head != 0; 274 } 277 275 } 278 276 #undef next … … 286 284 287 285 #endif 286 287 // Local Variables: // 288 // tab-width: 4 // 289 // End: // -
libcfa/src/bits/debug.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Thu Mar 30 12:30:01 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 14 22:17:35 201913 // Update Count : 412 // Last Modified On : Tue Feb 4 13:03:16 2020 13 // Update Count : 11 14 14 // 15 15 … … 27 27 28 28 extern "C" { 29 30 void __cfaabi_dbg_bits_write( const char *in_buffer, int len ) { 29 void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) { 31 30 // ensure all data is written 32 31 for ( int count = 0, retcode; count < len; count += retcode ) { … … 34 33 35 34 for ( ;; ) { 36 retcode = write( STDERR_FILENO, in_buffer, len - count );35 retcode = write( fd, in_buffer, len - count ); 37 36 38 37 // not a timer interrupt ? … … 44 43 } 45 44 46 void __cfaabi_ dbg_bits_acquire() __attribute__((__weak__)) {}47 void __cfaabi_ dbg_bits_release() __attribute__((__weak__)) {}45 void __cfaabi_bits_acquire() __attribute__((__weak__)) {} 46 void __cfaabi_bits_release() __attribute__((__weak__)) {} 48 47 49 void __cfaabi_ dbg_bits_print_safe ( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) )) {48 void __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 50 49 va_list args; 51 50 52 51 va_start( args, fmt ); 53 __cfaabi_ dbg_bits_acquire();52 __cfaabi_bits_acquire(); 54 53 55 54 int len = vsnprintf( buffer, buffer_size, fmt, args ); 56 __cfaabi_ dbg_bits_write(buffer, len );55 __cfaabi_bits_write( fd, buffer, len ); 57 56 58 __cfaabi_ dbg_bits_release();57 __cfaabi_bits_release(); 59 58 va_end( args ); 60 59 } 61 60 62 void __cfaabi_ dbg_bits_print_nolock( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) )) {61 void __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 63 62 va_list args; 64 63 … … 66 65 67 66 int len = vsnprintf( buffer, buffer_size, fmt, args ); 68 __cfaabi_ dbg_bits_write(buffer, len );67 __cfaabi_bits_write( fd, buffer, len ); 69 68 70 69 va_end( args ); 71 70 } 72 71 73 void __cfaabi_ dbg_bits_print_vararg(const char fmt[], va_list args ) {72 void __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list args ) { 74 73 int len = vsnprintf( buffer, buffer_size, fmt, args ); 75 __cfaabi_ dbg_bits_write(buffer, len );74 __cfaabi_bits_write( fd, buffer, len ); 76 75 } 77 76 78 void __cfaabi_ dbg_bits_print_buffer( char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 3, 4) )) {77 void __cfaabi_bits_print_buffer( int fd, char in_buffer[], int in_buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )) { 79 78 va_list args; 80 79 … … 82 81 83 82 int len = vsnprintf( in_buffer, in_buffer_size, fmt, args ); 84 __cfaabi_ dbg_bits_write(in_buffer, len );83 __cfaabi_bits_write( fd, in_buffer, len ); 85 84 86 85 va_end( args ); -
libcfa/src/bits/debug.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Feb 8 12:35:19 201813 // Update Count : 212 // Last Modified On : Tue Feb 4 12:29:21 2020 13 // Update Count : 9 14 14 // 15 15 … … 21 21 #define __cfaabi_dbg_ctx __PRETTY_FUNCTION__ 22 22 #define __cfaabi_dbg_ctx2 , __PRETTY_FUNCTION__ 23 #define __cfaabi_dbg_ctx_param const char * caller24 #define __cfaabi_dbg_ctx_param2 , const char * caller23 #define __cfaabi_dbg_ctx_param const char caller[] 24 #define __cfaabi_dbg_ctx_param2 , const char caller[] 25 25 #else 26 26 #define __cfaabi_dbg_debug_do(...) … … 38 38 #include <stdio.h> 39 39 40 extern void __cfaabi_dbg_bits_write( const char *buffer, int len );41 extern void __cfaabi_dbg_bits_acquire();42 extern void __cfaabi_dbg_bits_release();43 extern void __cfaabi_dbg_bits_print_safe ( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) ));44 extern void __cfaabi_dbg_bits_print_nolock( const char fmt[], ... ) __attribute__(( format(printf, 1, 2) ));45 extern void __cfaabi_dbg_bits_print_vararg(const char fmt[], va_list arg );46 extern void __cfaabi_dbg_bits_print_buffer( char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 3, 4) ));40 extern void __cfaabi_bits_write( int fd, const char buffer[], int len ); 41 extern void __cfaabi_bits_acquire(); 42 extern void __cfaabi_bits_release(); 43 extern void __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 44 extern void __cfaabi_bits_print_nolock( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )); 45 extern void __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list arg ); 46 extern void __cfaabi_bits_print_buffer( int fd, char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )); 47 47 #ifdef __cforall 48 48 } … … 50 50 51 51 #ifdef __CFA_DEBUG_PRINT__ 52 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_ dbg_bits_write(buffer, len )53 #define __cfaabi_dbg_acquire() __cfaabi_ dbg_bits_acquire()54 #define __cfaabi_dbg_release() __cfaabi_ dbg_bits_release()55 #define __cfaabi_dbg_print_safe(...) __cfaabi_ dbg_bits_print_safe (__VA_ARGS__)56 #define __cfaabi_dbg_print_nolock(...) __cfaabi_ dbg_bits_print_nolock (__VA_ARGS__)57 #define __cfaabi_dbg_print_buffer(...) __cfaabi_ dbg_bits_print_buffer (__VA_ARGS__)58 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_ dbg_bits_write( __dbg_text, __dbg_len );59 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_dbg_ bits_write( __dbg_text, __dbg_len );52 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_bits_write( STDERR_FILENO, buffer, len ) 53 #define __cfaabi_dbg_acquire() __cfaabi_bits_acquire() 54 #define __cfaabi_dbg_release() __cfaabi_bits_release() 55 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe (__VA_ARGS__) 56 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock (__VA_ARGS__) 57 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer (__VA_ARGS__) 58 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( __dbg_text, __dbg_len ); 59 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_dbg_write( __dbg_text, __dbg_len ); 60 60 #else 61 61 #define __cfaabi_dbg_write(...) ((void)0) -
libcfa/src/bits/defs.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Thu Nov 9 13:24:10 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Feb 8 16:22:41 201813 // Update Count : 812 // Last Modified On : Tue Jan 28 22:38:27 2020 13 // Update Count : 9 14 14 // 15 15 … … 34 34 35 35 #ifdef __cforall 36 void abort ( const char fmt[], ... ) __attribute__ (( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 36 void abort( const char fmt[], ... ) __attribute__ (( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 37 void abort( bool signalAbort, const char fmt[], ... ) __attribute__ (( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 37 38 extern "C" { 38 39 #endif … … 47 48 #define OPTIONAL_THREAD __attribute__((weak)) 48 49 #endif 50 51 static inline long long rdtscl(void) { 52 unsigned int lo, hi; 53 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); 54 return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 ); 55 } -
libcfa/src/bits/locks.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Oct 31 15:14:38 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 11 15:42:24 201813 // Update Count : 1 012 // Last Modified On : Tue Feb 4 13:03:19 2020 13 // Update Count : 11 14 14 // 15 15 … … 54 54 55 55 #ifdef __CFA_DEBUG__ 56 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name);56 void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]); 57 57 #else 58 58 #define __cfaabi_dbg_record(x, y) 59 59 #endif 60 60 } 61 62 extern void yield( unsigned int );63 61 64 62 static inline void ?{}( __spinlock_t & this ) { … … 68 66 // Lock the spinlock, return false if already acquired 69 67 static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 68 disable_interrupts(); 70 69 bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 71 70 if( result ) { 72 disable_interrupts();73 71 __cfaabi_dbg_record( this, caller ); 72 } else { 73 enable_interrupts_noPoll(); 74 74 } 75 75 return result; … … 83 83 #endif 84 84 85 disable_interrupts(); 85 86 for ( unsigned int i = 1;; i += 1 ) { 86 87 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; … … 98 99 #endif 99 100 } 100 disable_interrupts();101 101 __cfaabi_dbg_record( this, caller ); 102 102 } 103 103 104 104 static inline void unlock( __spinlock_t & this ) { 105 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 105 106 enable_interrupts_noPoll(); 106 __atomic_clear( &this.lock, __ATOMIC_RELEASE );107 107 } 108 108 -
libcfa/src/bits/signal.hfa
r9fb8f01 r3d5701e 37 37 38 38 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 39 sigemptyset( &act.sa_mask ); 40 sigaddset( &act.sa_mask, SIGALRM ); // disabled during signal handler 41 sigaddset( &act.sa_mask, SIGUSR1 ); 42 sigaddset( &act.sa_mask, SIGSEGV ); 43 sigaddset( &act.sa_mask, SIGBUS ); 44 sigaddset( &act.sa_mask, SIGILL ); 45 sigaddset( &act.sa_mask, SIGFPE ); 46 sigaddset( &act.sa_mask, SIGHUP ); // revert to default on second delivery 47 sigaddset( &act.sa_mask, SIGTERM ); 48 sigaddset( &act.sa_mask, SIGINT ); 39 49 act.sa_flags = flags; 40 50 41 if ( sigaction( sig, &act, NULL) == -1 ) {51 if ( sigaction( sig, &act, 0p ) == -1 ) { 42 52 __cfaabi_dbg_print_buffer_decl( 43 53 " __cfaabi_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", … … 45 55 ); 46 56 _exit( EXIT_FAILURE ); 47 } 57 } // if 48 58 } 49 50 // Sigaction wrapper : restore default handler51 static void __cfaabi_sigdefault( int sig ) {52 struct sigaction act;53 54 act.sa_handler = SIG_DFL;55 act.sa_flags = 0;56 sigemptyset( &act.sa_mask );57 58 if ( sigaction( sig, &act, NULL ) == -1 ) {59 __cfaabi_dbg_print_buffer_decl(60 " __cfaabi_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n",61 sig, errno, strerror( errno )62 );63 _exit( EXIT_FAILURE );64 }65 } -
libcfa/src/clock.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Thu Apr 12 14:36:06 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 13 21:21:13 201913 // Update Count : 812 // Last Modified On : Mon Jan 6 12:49:58 2020 13 // Update Count : 9 14 14 // 15 15 16 16 #include <time.hfa> 17 18 17 19 18 //######################### C time ######################### … … 26 25 static inline tm * localtime_r( time_t tp, tm * result ) { return localtime_r( &tp, result ); } 27 26 28 29 27 //######################### Clock ######################### 30 28 31 29 struct Clock { // private 32 30 Duration offset; // for virtual clock: contains offset from real-time 33 int clocktype; // implementation only -1 (virtual), CLOCK_REALTIME34 31 }; 35 32 36 33 static inline { 37 void resetClock( Clock & clk ) with( clk ) {38 clocktype = CLOCK_REALTIME_COARSE;39 } // Clock::resetClock40 41 34 void resetClock( Clock & clk, Duration adj ) with( clk ) { 42 clocktype = -1;43 35 offset = adj + __timezone`s; // timezone (global) is (UTC - local time) in seconds 44 36 } // resetClock 45 37 46 void ?{}( Clock & clk ) { resetClock( clk ); }47 38 void ?{}( Clock & clk, Duration adj ) { resetClock( clk, adj ); } 48 39 … … 89 80 return ret; 90 81 } // getTime 82 83 Time getCPUTime() { 84 timespec ts; 85 clock_gettime( CLOCK_THREAD_CPUTIME_ID, &ts ); 86 return (Time){ ts }; 87 } // getCPUTime 91 88 } // distribution 92 89 -
libcfa/src/concurrency/CtxSwitch-arm.S
r9fb8f01 r3d5701e 13 13 .text 14 14 .align 2 15 .global CtxSwitch16 .type CtxSwitch, %function15 .global __cfactx_switch 16 .type __cfactx_switch, %function 17 17 18 CtxSwitch:18 __cfactx_switch: 19 19 @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification) 20 20 @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved … … 52 52 mov r15, r14 53 53 #endif // R9_SPECIAL 54 54 55 55 .text 56 56 .align 2 57 .global CtxInvokeStub58 .type CtxInvokeStub, %function57 .global __cfactx_invoke_stub 58 .type __cfactx_invoke_stub, %function 59 59 60 CtxInvokeStub:60 __cfactx_invoke_stub: 61 61 ldmfd r13!, {r0-r1} 62 62 mov r15, r1 -
libcfa/src/concurrency/CtxSwitch-i386.S
r9fb8f01 r3d5701e 43 43 .text 44 44 .align 2 45 .globl CtxSwitch46 .type CtxSwitch, @function47 CtxSwitch:45 .globl __cfactx_switch 46 .type __cfactx_switch, @function 47 __cfactx_switch: 48 48 49 49 // Copy the "from" context argument from the stack to register eax … … 83 83 84 84 ret 85 .size CtxSwitch, .-CtxSwitch85 .size __cfactx_switch, .-__cfactx_switch 86 86 87 87 // Local Variables: // -
libcfa/src/concurrency/CtxSwitch-x86_64.S
r9fb8f01 r3d5701e 44 44 .text 45 45 .align 2 46 .globl CtxSwitch47 .type CtxSwitch, @function48 CtxSwitch:46 .globl __cfactx_switch 47 .type __cfactx_switch, @function 48 __cfactx_switch: 49 49 50 50 // Save volatile registers on the stack. … … 77 77 78 78 ret 79 .size CtxSwitch, .-CtxSwitch79 .size __cfactx_switch, .-__cfactx_switch 80 80 81 81 //----------------------------------------------------------------------------- … … 83 83 .text 84 84 .align 2 85 .globl CtxInvokeStub86 .type CtxInvokeStub, @function87 CtxInvokeStub:85 .globl __cfactx_invoke_stub 86 .type __cfactx_invoke_stub, @function 87 __cfactx_invoke_stub: 88 88 movq %rbx, %rdi 89 jmp *%r12 90 .size CtxInvokeStub, .-CtxInvokeStub 89 movq %r12, %rsi 90 jmp *%r13 91 .size __cfactx_invoke_stub, .-__cfactx_invoke_stub 91 92 92 93 // Local Variables: // -
libcfa/src/concurrency/alarm.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri May 25 06:25:47 201813 // Update Count : 6 712 // Last Modified On : Sun Jan 5 08:41:36 2020 13 // Update Count : 69 14 14 // 15 15 … … 39 39 40 40 void __kernel_set_timer( Duration alarm ) { 41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm .tv);42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL);41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm`ns); 42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, 0p ); 43 43 } 44 44 … … 47 47 //============================================================================================= 48 48 49 void ?{}( alarm_node_t & this, thread_desc* thrd, Time alarm, Duration period ) with( this ) {49 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ) with( this ) { 50 50 this.thrd = thrd; 51 51 this.alarm = alarm; … … 113 113 this->tail = &this->head; 114 114 } 115 head->next = NULL;115 head->next = 0p; 116 116 } 117 117 verify( validate( this ) ); … … 127 127 this->tail = it; 128 128 } 129 n->next = NULL;129 n->next = 0p; 130 130 131 131 verify( validate( this ) ); -
libcfa/src/concurrency/alarm.hfa
r9fb8f01 r3d5701e 23 23 #include "time.hfa" 24 24 25 struct thread_desc;25 struct $thread; 26 26 struct processor; 27 27 … … 43 43 44 44 union { 45 thread_desc* thrd; // thrd who created event45 $thread * thrd; // thrd who created event 46 46 processor * proc; // proc who created event 47 47 }; … … 53 53 typedef alarm_node_t ** __alarm_it_t; 54 54 55 void ?{}( alarm_node_t & this, thread_desc* thrd, Time alarm, Duration period );55 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ); 56 56 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 57 57 void ^?{}( alarm_node_t & this ); -
libcfa/src/concurrency/coroutine.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:20:57 201813 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:25 2020 13 // Update Count : 16 14 14 // 15 15 … … 37 37 38 38 extern "C" { 39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 40 40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 41 41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 89 89 } 90 90 91 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {92 (this.context){ NULL, NULL};91 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) { 92 (this.context){0p, 0p}; 93 93 (this.stack){storage, storageSize}; 94 94 this.name = name; 95 95 state = Start; 96 starter = NULL;97 last = NULL;98 cancellation = NULL;99 } 100 101 void ^?{}( coroutine_desc& this) {96 starter = 0p; 97 last = 0p; 98 cancellation = 0p; 99 } 100 101 void ^?{}($coroutine& this) { 102 102 if(this.state != Halted && this.state != Start && this.state != Primed) { 103 coroutine_desc* src = TL_GET( this_thread )->curr_cor;104 coroutine_desc* dst = &this;103 $coroutine * src = TL_GET( this_thread )->curr_cor; 104 $coroutine * dst = &this; 105 105 106 106 struct _Unwind_Exception storage; … … 115 115 } 116 116 117 CoroutineCtxSwitch( src, dst );117 $ctx_switch( src, dst ); 118 118 } 119 119 } … … 123 123 forall(dtype T | is_coroutine(T)) 124 124 void prime(T& cor) { 125 coroutine_desc* this = get_coroutine(cor);125 $coroutine* this = get_coroutine(cor); 126 126 assert(this->state == Start); 127 127 … … 131 131 132 132 [void *, size_t] __stack_alloc( size_t storageSize ) { 133 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment133 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 134 134 assert(__page_size != 0l); 135 135 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; … … 157 157 158 158 void __stack_prepare( __stack_info_t * this, size_t create_size ) { 159 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment159 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 160 160 bool userStack; 161 161 void * storage; … … 187 187 // is not inline (We can't inline Cforall in C) 188 188 extern "C" { 189 void __suspend_internal(void) { 190 suspend(); 191 } 192 193 void __leave_coroutine( coroutine_desc * src ) { 194 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 189 void __cfactx_cor_leave( struct $coroutine * src ) { 190 $coroutine * starter = src->cancellation != 0 ? src->last : src->starter; 195 191 196 192 src->state = Halted; … … 205 201 src->name, src, starter->name, starter ); 206 202 207 CoroutineCtxSwitch( src, starter ); 203 $ctx_switch( src, starter ); 204 } 205 206 struct $coroutine * __cfactx_cor_finish(void) { 207 struct $coroutine * cor = kernelTLS.this_thread->curr_cor; 208 209 if(cor->state == Primed) { 210 suspend(); 211 } 212 213 cor->state = Active; 214 215 return cor; 208 216 } 209 217 } -
libcfa/src/concurrency/coroutine.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:49:39 201913 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 11 14 14 // 15 15 … … 25 25 trait is_coroutine(dtype T) { 26 26 void main(T & this); 27 coroutine_desc* get_coroutine(T & this);27 $coroutine * get_coroutine(T & this); 28 28 }; 29 29 30 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X& this) { return &this.__cor; } void main(X& this)30 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this) 31 31 32 32 //----------------------------------------------------------------------------- … … 35 35 // void ^?{}( coStack_t & this ); 36 36 37 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize );38 void ^?{}( coroutine_desc& this );37 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ); 38 void ^?{}( $coroutine & this ); 39 39 40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", NULL, 0 }; }41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", NULL, stackSize }; }42 static inline void ?{}( coroutine_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; }43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, NULL, 0 }; }44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }40 static inline void ?{}( $coroutine & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 41 static inline void ?{}( $coroutine & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 42 static inline void ?{}( $coroutine & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( $coroutine & this, const char name[]) { this{ name, 0p, 0 }; } 44 static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 45 45 46 46 //----------------------------------------------------------------------------- … … 54 54 void prime(T & cor); 55 55 56 static inline struct coroutine_desc* active_coroutine() { return TL_GET( this_thread )->curr_cor; }56 static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread )->curr_cor; } 57 57 58 58 //----------------------------------------------------------------------------- … … 61 61 // Start coroutine routines 62 62 extern "C" { 63 forall(dtype T | is_coroutine(T)) 64 void CtxInvokeCoroutine(T * this); 63 void __cfactx_invoke_coroutine(void (*main)(void *), void * this); 65 64 66 forall(dtype T | is_coroutine(T))67 void CtxStart(T * this, void ( *invoke)(T*));65 forall(dtype T) 66 void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 68 67 69 extern void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));68 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 70 69 71 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");70 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 72 71 } 73 72 74 73 // Private wrappers for context switch and stack creation 75 74 // Wrapper for co 76 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {75 static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) { 77 76 // set state of current coroutine to inactive 78 77 src->state = src->state == Halted ? Halted : Inactive; … … 83 82 // context switch to specified coroutine 84 83 verify( dst->context.SP ); 85 CtxSwitch( &src->context, &dst->context );86 // when CtxSwitch returns we are back in the src coroutine84 __cfactx_switch( &src->context, &dst->context ); 85 // when __cfactx_switch returns we are back in the src coroutine 87 86 88 87 // set state of new coroutine to active 89 88 src->state = Active; 90 89 91 if( unlikely(src->cancellation != NULL) ) {92 _ CtxCoroutine_Unwind(src->cancellation, src);90 if( unlikely(src->cancellation != 0p) ) { 91 __cfactx_coroutine_unwind(src->cancellation, src); 93 92 } 94 93 } … … 103 102 // will also migrate which means this value will 104 103 // stay in syn with the TLS 105 coroutine_desc* src = TL_GET( this_thread )->curr_cor;104 $coroutine * src = TL_GET( this_thread )->curr_cor; 106 105 107 106 assertf( src->last != 0, … … 114 113 src->name, src, src->last->name, src->last ); 115 114 116 CoroutineCtxSwitch( src, src->last );115 $ctx_switch( src, src->last ); 117 116 } 118 117 … … 125 124 // will also migrate which means this value will 126 125 // stay in syn with the TLS 127 coroutine_desc* src = TL_GET( this_thread )->curr_cor;128 coroutine_desc* dst = get_coroutine(cor);126 $coroutine * src = TL_GET( this_thread )->curr_cor; 127 $coroutine * dst = get_coroutine(cor); 129 128 130 if( unlikely(dst->context.SP == NULL) ) { 129 if( unlikely(dst->context.SP == 0p) ) { 130 TL_GET( this_thread )->curr_cor = dst; 131 131 __stack_prepare(&dst->stack, 65000); 132 CtxStart(&cor, CtxInvokeCoroutine); 132 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 133 TL_GET( this_thread )->curr_cor = src; 133 134 } 134 135 … … 146 147 147 148 // always done for performance testing 148 CoroutineCtxSwitch( src, dst );149 $ctx_switch( src, dst ); 149 150 150 151 return cor; 151 152 } 152 153 153 static inline void resume( coroutine_desc * dst) {154 static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) { 154 155 // optimization : read TLS once and reuse it 155 156 // Safety note: this is preemption safe since if … … 157 158 // will also migrate which means this value will 158 159 // stay in syn with the TLS 159 coroutine_desc* src = TL_GET( this_thread )->curr_cor;160 $coroutine * src = TL_GET( this_thread )->curr_cor; 160 161 161 162 // not resuming self ? … … 171 172 172 173 // always done for performance testing 173 CoroutineCtxSwitch( src, dst );174 $ctx_switch( src, dst ); 174 175 } 175 176 -
libcfa/src/concurrency/invoke.c
r9fb8f01 r3d5701e 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern void __suspend_internal(void);32 extern void __ leave_coroutine( struct coroutine_desc* );33 extern void __ finish_creation( struct thread_desc *);34 extern void __leave_thread_monitor( struct thread_desc * this ); 31 extern struct $coroutine * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct $coroutine * ); 33 extern void __cfactx_thrd_leave(); 34 35 35 extern void disable_interrupts() OPTIONAL_THREAD; 36 36 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 37 37 38 void CtxInvokeCoroutine(38 void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 struct coroutine_desc *(*get_coroutine)(void *),41 40 void *this 42 41 ) { 43 struct coroutine_desc* cor = get_coroutine( this ); 42 // Finish setting up the coroutine by setting its state 43 struct $coroutine * cor = __cfactx_cor_finish(); 44 44 45 if(cor->state == Primed) { 46 __suspend_internal(); 47 } 48 49 cor->state = Active; 50 45 // Call the main of the coroutine 51 46 main( this ); 52 47 53 48 //Final suspend, should never return 54 __ leave_coroutine( cor );49 __cfactx_cor_leave( cor ); 55 50 __cabi_abort( "Resumed dead coroutine" ); 56 51 } 57 52 58 static _Unwind_Reason_Code _ CtxCoroutine_UnwindStop(53 static _Unwind_Reason_Code __cfactx_coroutine_unwindstop( 59 54 __attribute((__unused__)) int version, 60 55 _Unwind_Action actions, … … 67 62 // We finished unwinding the coroutine, 68 63 // leave it 69 __ leave_coroutine( param );64 __cfactx_cor_leave( param ); 70 65 __cabi_abort( "Resumed dead coroutine" ); 71 66 } … … 75 70 } 76 71 77 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) __attribute__ ((__noreturn__));78 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) {79 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _ CtxCoroutine_UnwindStop, cor );72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) { 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 80 75 printf("UNWIND ERROR %d after force unwind\n", ret); 81 76 abort(); 82 77 } 83 78 84 void CtxInvokeThread( 85 void (*dtor)(void *), 79 void __cfactx_invoke_thread( 86 80 void (*main)(void *), 87 struct thread_desc *(*get_thread)(void *),88 81 void *this 89 82 ) { 90 // Fetch the thread handle from the user defined thread structure91 struct thread_desc* thrd = get_thread( this );92 93 // First suspend, once the thread arrives here,94 // the function pointer to main can be invalidated without risk95 __finish_creation( thrd );96 97 83 // Officially start the thread by enabling preemption 98 84 enable_interrupts( __cfaabi_dbg_ctx ); … … 108 94 // The order of these 4 operations is very important 109 95 //Final suspend, should never return 110 __ leave_thread_monitor( thrd);96 __cfactx_thrd_leave(); 111 97 __cabi_abort( "Resumed dead thread" ); 112 98 } 113 99 114 115 void CtxStart( 100 void __cfactx_start( 116 101 void (*main)(void *), 117 struct coroutine_desc *(*get_coroutine)(void *),102 struct $coroutine * cor, 118 103 void *this, 119 104 void (*invoke)(void *) 120 105 ) { 121 struct coroutine_desc * cor = get_coroutine( this );122 106 struct __stack_t * stack = cor->stack.storage; 123 107 … … 138 122 139 123 fs->dummyReturn = NULL; 140 fs->argument[0] = this; // argument to invoke 124 fs->argument[0] = main; // argument to invoke 125 fs->argument[1] = this; // argument to invoke 141 126 fs->rturn = invoke; 142 127 … … 155 140 156 141 fs->dummyReturn = NULL; 157 fs->rturn = CtxInvokeStub; 158 fs->fixedRegisters[0] = this; 159 fs->fixedRegisters[1] = invoke; 142 fs->rturn = __cfactx_invoke_stub; 143 fs->fixedRegisters[0] = main; 144 fs->fixedRegisters[1] = this; 145 fs->fixedRegisters[2] = invoke; 160 146 161 147 #elif defined( __ARM_ARCH ) 162 148 #error ARM needs to be upgrade to use to parameters like X86/X64 (A.K.A. : I broke this and do not know how to fix it) 163 149 struct FakeStack { 164 150 float fpRegs[16]; // floating point registers … … 172 158 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 173 159 174 fs->intRegs[8] = CtxInvokeStub;160 fs->intRegs[8] = __cfactx_invoke_stub; 175 161 fs->arg[0] = this; 176 162 fs->arg[1] = invoke; -
libcfa/src/concurrency/invoke.h
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 18:19:13 201913 // Update Count : 4 012 // Last Modified On : Thu Dec 5 16:26:03 2019 13 // Update Count : 44 14 14 // 15 15 … … 46 46 #ifdef __cforall 47 47 extern "Cforall" { 48 extern thread_local struct KernelThreadData {49 struct thread_desc* volatile this_thread;48 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 49 struct $thread * volatile this_thread; 50 50 struct processor * volatile this_processor; 51 51 … … 55 55 volatile bool in_progress; 56 56 } preemption_state; 57 58 uint32_t rand_seed; 57 59 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 58 60 } … … 90 92 }; 91 93 92 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 93 94 struct coroutine_desc { 95 // context that is switch during a CtxSwitch 94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION }; 96 97 struct $coroutine { 98 // context that is switch during a __cfactx_switch 96 99 struct __stack_context_t context; 97 100 … … 106 109 107 110 // first coroutine to resume this one 108 struct coroutine_desc* starter;111 struct $coroutine * starter; 109 112 110 113 // last coroutine to resume this one 111 struct coroutine_desc* last;114 struct $coroutine * last; 112 115 113 116 // If non-null stack must be unwound with this exception … … 125 128 }; 126 129 127 struct monitor_desc{130 struct $monitor { 128 131 // spinlock to protect internal data 129 132 struct __spinlock_t lock; 130 133 131 134 // current owner of the monitor 132 struct thread_desc* owner;135 struct $thread * owner; 133 136 134 137 // queue of threads that are blocked waiting for the monitor 135 __queue_t(struct thread_desc) entry_queue;138 __queue_t(struct $thread) entry_queue; 136 139 137 140 // stack of conditions to run next once we exit the monitor … … 150 153 struct __monitor_group_t { 151 154 // currently held monitors 152 __cfa_anonymous_object( __small_array_t( monitor_desc*) );155 __cfa_anonymous_object( __small_array_t($monitor*) ); 153 156 154 157 // last function that acquired monitors … … 156 159 }; 157 160 158 struct thread_desc{161 struct $thread { 159 162 // Core threading fields 160 // context that is switch during a CtxSwitch163 // context that is switch during a __cfactx_switch 161 164 struct __stack_context_t context; 162 165 163 166 // current execution status for coroutine 164 enum coroutine_state state; 167 volatile int state; 168 enum __Preemption_Reason preempted; 165 169 166 170 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 167 171 168 172 // coroutine body used to store context 169 struct coroutine_descself_cor;173 struct $coroutine self_cor; 170 174 171 175 // current active context 172 struct coroutine_desc* curr_cor;176 struct $coroutine * curr_cor; 173 177 174 178 // monitor body used for mutual exclusion 175 struct monitor_descself_mon;179 struct $monitor self_mon; 176 180 177 181 // pointer to monitor with sufficient lifetime for current monitors 178 struct monitor_desc* self_mon_p;182 struct $monitor * self_mon_p; 179 183 180 184 // pointer to the cluster on which the thread is running … … 186 190 // Link lists fields 187 191 // instrusive link field for threads 188 struct thread_desc* next;192 struct $thread * next; 189 193 190 194 struct { 191 struct thread_desc* next;192 struct thread_desc* prev;195 struct $thread * next; 196 struct $thread * prev; 193 197 } node; 194 198 }; … … 196 200 #ifdef __cforall 197 201 extern "Cforall" { 198 static inline thread_desc *& get_next( thread_desc & this) {202 static inline $thread *& get_next( $thread & this ) __attribute__((const)) { 199 203 return this.next; 200 204 } 201 205 202 static inline [ thread_desc *&, thread_desc *& ] __get( thread_desc & this) {206 static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) { 203 207 return this.node.[next, prev]; 204 208 } 205 209 206 210 static inline void ?{}(__monitor_group_t & this) { 207 (this.data){ NULL};211 (this.data){0p}; 208 212 (this.size){0}; 209 213 (this.func){NULL}; 210 214 } 211 215 212 static inline void ?{}(__monitor_group_t & this, struct monitor_desc** data, __lock_size_t size, fptr_t func) {216 static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) { 213 217 (this.data){data}; 214 218 (this.size){size}; … … 216 220 } 217 221 218 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {222 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) { 219 223 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 220 224 if( lhs.size != rhs.size ) return false; … … 250 254 251 255 // assembler routines that performs the context switch 252 extern void CtxInvokeStub( void );253 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");256 extern void __cfactx_invoke_stub( void ); 257 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 254 258 // void CtxStore ( void * this ) asm ("CtxStore"); 255 259 // void CtxRet ( void * dst ) asm ("CtxRet"); -
libcfa/src/concurrency/kernel.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Jun 20 17:21:23 201913 // Update Count : 2512 // Last Modified On : Tue Feb 4 13:03:15 2020 13 // Update Count : 58 14 14 // 15 15 … … 26 26 #include <signal.h> 27 27 #include <unistd.h> 28 #include <limits.h> // PTHREAD_STACK_MIN 29 #include <sys/mman.h> // mprotect 28 30 } 29 31 … … 40 42 //----------------------------------------------------------------------------- 41 43 // Some assembly required 42 #if defined( __i386 )44 #if defined( __i386 ) 43 45 #define CtxGet( ctx ) \ 44 46 __asm__ volatile ( \ … … 108 110 //----------------------------------------------------------------------------- 109 111 //Start and stop routine for the kernel, declared first to make sure they run first 110 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));111 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 112 114 113 115 //----------------------------------------------------------------------------- … … 115 117 KERNEL_STORAGE(cluster, mainCluster); 116 118 KERNEL_STORAGE(processor, mainProcessor); 117 KERNEL_STORAGE( thread_desc, mainThread);119 KERNEL_STORAGE($thread, mainThread); 118 120 KERNEL_STORAGE(__stack_t, mainThreadCtx); 119 121 120 122 cluster * mainCluster; 121 123 processor * mainProcessor; 122 thread_desc* mainThread;124 $thread * mainThread; 123 125 124 126 extern "C" { 125 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 126 128 } 127 129 … … 131 133 // Global state 132 134 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 135 NULL, // cannot use 0p 133 136 NULL, 134 NULL,135 { 1, false, false }137 { 1, false, false }, 138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work 136 139 }; 137 140 … … 139 142 // Struct to steal stack 140 143 struct current_stack_info_t { 141 __stack_t * storage; // pointer to stack object142 void * base;// base of stack143 void * limit;// stack grows towards stack limit144 void * context;// address of cfa_context_t144 __stack_t * storage; // pointer to stack object 145 void * base; // base of stack 146 void * limit; // stack grows towards stack limit 147 void * context; // address of cfa_context_t 145 148 }; 146 149 … … 161 164 // Main thread construction 162 165 163 void ?{}( coroutine_desc& this, current_stack_info_t * info) with( this ) {166 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) { 164 167 stack.storage = info->storage; 165 168 with(*stack.storage) { … … 171 174 name = "Main Thread"; 172 175 state = Start; 173 starter = NULL;174 last = NULL;175 cancellation = NULL;176 } 177 178 void ?{}( thread_desc& this, current_stack_info_t * info) with( this ) {176 starter = 0p; 177 last = 0p; 178 cancellation = 0p; 179 } 180 181 void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 179 182 state = Start; 180 183 self_cor{ info }; … … 184 187 self_mon.recursion = 1; 185 188 self_mon_p = &self_mon; 186 next = NULL;187 188 node.next = NULL;189 node.prev = NULL;189 next = 0p; 190 191 node.next = 0p; 192 node.prev = 0p; 190 193 doregister(curr_cluster, this); 191 194 … … 205 208 } 206 209 207 static void start(processor * this); 208 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) { 210 static void * __invoke_processor(void * arg); 211 212 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 209 213 this.name = name; 210 214 this.cltr = &cltr; 211 215 terminated{ 0 }; 216 destroyer = 0p; 212 217 do_terminate = false; 213 preemption_alarm = NULL;218 preemption_alarm = 0p; 214 219 pending_preemption = false; 215 220 runner.proc = &this; … … 217 222 idleLock{}; 218 223 219 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 220 229 } 221 230 … … 231 240 } 232 241 233 pthread_join( kernel_thread, NULL ); 234 } 235 236 void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) { 242 pthread_join( kernel_thread, 0p ); 243 free( this.stack ); 244 } 245 246 void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) { 237 247 this.name = name; 238 248 this.preemption_rate = preemption_rate; … … 254 264 // Kernel Scheduling logic 255 265 //============================================================================================= 256 static void runThread(processor * this, thread_desc * dst);257 static void finishRunning(processor * this);258 static void halt(processor * this);266 static $thread * __next_thread(cluster * this); 267 static void __run_thread(processor * this, $thread * dst); 268 static void __halt(processor * this); 259 269 260 270 //Main of the processor contexts 261 271 void main(processorCtx_t & runner) { 272 // Because of a bug, we couldn't initialized the seed on construction 273 // Do it here 274 kernelTLS.rand_seed ^= rdtscl(); 275 262 276 processor * this = runner.proc; 263 277 verify(this); … … 273 287 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 274 288 275 thread_desc * readyThread = NULL; 276 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 277 { 278 readyThread = nextThread( this->cltr ); 279 280 if(readyThread) 281 { 282 verify( ! kernelTLS.preemption_state.enabled ); 283 284 runThread(this, readyThread); 285 286 verify( ! kernelTLS.preemption_state.enabled ); 287 288 //Some actions need to be taken from the kernel 289 finishRunning(this); 289 $thread * readyThread = 0p; 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 291 readyThread = __next_thread( this->cltr ); 292 293 if(readyThread) { 294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 295 /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 297 298 __run_thread(this, readyThread); 299 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 290 301 291 302 spin_count = 0; 292 } 293 else 294 { 303 } else { 295 304 // spin(this, &spin_count); 296 halt(this);305 __halt(this); 297 306 } 298 307 } … … 314 323 // runThread runs a thread by context switching 315 324 // from the processor coroutine to the target thread 316 static void runThread(processor * this, thread_desc * thrd_dst) { 317 coroutine_desc * proc_cor = get_coroutine(this->runner); 318 319 // Reset the terminating actions here 320 this->finish.action_code = No_Action; 325 static void __run_thread(processor * this, $thread * thrd_dst) { 326 $coroutine * proc_cor = get_coroutine(this->runner); 321 327 322 328 // Update global state 323 329 kernelTLS.this_thread = thrd_dst; 324 330 325 // set state of processor coroutine to inactive and the thread to active 326 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 327 thrd_dst->state = Active; 328 329 // set context switch to the thread that the processor is executing 330 verify( thrd_dst->context.SP ); 331 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 332 // when CtxSwitch returns we are back in the processor coroutine 333 334 // set state of processor coroutine to active and the thread to inactive 335 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 331 // set state of processor coroutine to inactive 332 verify(proc_cor->state == Active); 333 proc_cor->state = Inactive; 334 335 // Actually run the thread 336 RUNNING: while(true) { 337 if(unlikely(thrd_dst->preempted)) { 338 thrd_dst->preempted = __NO_PREEMPTION; 339 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 340 } else { 341 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); 342 thrd_dst->state = Active; 343 } 344 345 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 346 347 // set context switch to the thread that the processor is executing 348 verify( thrd_dst->context.SP ); 349 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 350 // when __cfactx_switch returns we are back in the processor coroutine 351 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 353 354 355 // We just finished running a thread, there are a few things that could have happened. 356 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 357 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 358 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 359 // 4 - Preempted 360 // In case 1, we may have won a race so we can't write to the state again. 361 // In case 2, we lost the race so we now own the thread. 362 // In case 3, we lost the race but can just reschedule the thread. 363 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 365 // The thread was preempted, reschedule it and reset the flag 366 __schedule_thread( thrd_dst ); 367 break RUNNING; 368 } 369 370 // set state of processor coroutine to active and the thread to inactive 371 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 372 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 373 switch(old_state) { 374 case Halted: 375 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 376 thrd_dst->state = Halted; 377 378 // We may need to wake someone up here since 379 unpark( this->destroyer ); 380 this->destroyer = 0p; 381 break RUNNING; 382 case Active: 383 // This is case 1, the regular case, nothing more is needed 384 break RUNNING; 385 case Rerun: 386 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 387 // In this case, just run it again. 388 continue RUNNING; 389 default: 390 // This makes no sense, something is wrong abort 391 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 392 } 393 } 394 395 // Just before returning to the processor, set the processor coroutine to active 336 396 proc_cor->state = Active; 337 397 } 338 398 339 399 // KERNEL_ONLY 340 static void returnToKernel() { 341 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 342 thread_desc * thrd_src = kernelTLS.this_thread; 343 344 // set state of current coroutine to inactive 345 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 346 proc_cor->state = Active; 347 int local_errno = *__volatile_errno(); 348 #if defined( __i386 ) || defined( __x86_64 ) 349 __x87_store; 350 #endif 351 352 // set new coroutine that the processor is executing 353 // and context switch to it 354 verify( proc_cor->context.SP ); 355 CtxSwitch( &thrd_src->context, &proc_cor->context ); 356 357 // set state of new coroutine to active 358 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 359 thrd_src->state = Active; 360 361 #if defined( __i386 ) || defined( __x86_64 ) 362 __x87_load; 363 #endif 364 *__volatile_errno() = local_errno; 365 } 366 367 // KERNEL_ONLY 368 // Once a thread has finished running, some of 369 // its final actions must be executed from the kernel 370 static void finishRunning(processor * this) with( this->finish ) { 371 verify( ! kernelTLS.preemption_state.enabled ); 372 choose( action_code ) { 373 case No_Action: 374 break; 375 case Release: 376 unlock( *lock ); 377 case Schedule: 378 ScheduleThread( thrd ); 379 case Release_Schedule: 380 unlock( *lock ); 381 ScheduleThread( thrd ); 382 case Release_Multi: 383 for(int i = 0; i < lock_count; i++) { 384 unlock( *locks[i] ); 385 } 386 case Release_Multi_Schedule: 387 for(int i = 0; i < lock_count; i++) { 388 unlock( *locks[i] ); 389 } 390 for(int i = 0; i < thrd_count; i++) { 391 ScheduleThread( thrds[i] ); 392 } 393 case Callback: 394 callback(); 395 default: 396 abort("KERNEL ERROR: Unexpected action to run after thread"); 397 } 400 void returnToKernel() { 401 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 402 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 403 $thread * thrd_src = kernelTLS.this_thread; 404 405 // Run the thread on this processor 406 { 407 int local_errno = *__volatile_errno(); 408 #if defined( __i386 ) || defined( __x86_64 ) 409 __x87_store; 410 #endif 411 verify( proc_cor->context.SP ); 412 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 413 #if defined( __i386 ) || defined( __x86_64 ) 414 __x87_load; 415 #endif 416 *__volatile_errno() = local_errno; 417 } 418 419 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 398 420 } 399 421 … … 402 424 // This is the entry point for processors (kernel threads) 403 425 // It effectively constructs a coroutine by stealing the pthread stack 404 static void * CtxInvokeProcessor(void * arg) {426 static void * __invoke_processor(void * arg) { 405 427 processor * proc = (processor *) arg; 406 428 kernelTLS.this_processor = proc; 407 kernelTLS.this_thread = NULL;429 kernelTLS.this_thread = 0p; 408 430 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 409 431 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 418 440 419 441 //Set global state 420 kernelTLS.this_thread = NULL;442 kernelTLS.this_thread = 0p; 421 443 422 444 //We now have a proper context from which to schedule threads … … 434 456 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner); 435 457 436 return NULL; 437 } 438 439 static void start(processor * this) { 440 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 441 442 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 443 444 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 458 return 0p; 459 } 460 461 static void Abort( int ret, const char func[] ) { 462 if ( ret ) { // pthread routines return errno values 463 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) ); 464 } // if 465 } // Abort 466 467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 468 pthread_attr_t attr; 469 470 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 471 472 size_t stacksize; 473 // default stack size, normally defined by shell limit 474 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 475 assert( stacksize >= PTHREAD_STACK_MIN ); 476 477 void * stack; 478 __cfaabi_dbg_debug_do( 479 stack = memalign( __page_size, stacksize + __page_size ); 480 // pthread has no mechanism to create the guard page in user supplied stack. 481 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 482 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 483 } // if 484 ); 485 __cfaabi_dbg_no_debug_do( 486 stack = malloc( stacksize ); 487 ); 488 489 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); 490 491 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" ); 492 return stack; 445 493 } 446 494 447 495 // KERNEL_ONLY 448 voidkernel_first_resume( processor * this ) {449 thread_desc* src = mainThread;450 coroutine_desc* dst = get_coroutine(this->runner);496 static void __kernel_first_resume( processor * this ) { 497 $thread * src = mainThread; 498 $coroutine * dst = get_coroutine(this->runner); 451 499 452 500 verify( ! kernelTLS.preemption_state.enabled ); 453 501 502 kernelTLS.this_thread->curr_cor = dst; 454 503 __stack_prepare( &dst->stack, 65000 ); 455 CtxStart(&this->runner, CtxInvokeCoroutine);504 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 456 505 457 506 verify( ! kernelTLS.preemption_state.enabled ); … … 465 514 // context switch to specified coroutine 466 515 verify( dst->context.SP ); 467 CtxSwitch( &src->context, &dst->context ); 468 // when CtxSwitch returns we are back in the src coroutine 516 __cfactx_switch( &src->context, &dst->context ); 517 // when __cfactx_switch returns we are back in the src coroutine 518 519 mainThread->curr_cor = &mainThread->self_cor; 469 520 470 521 // set state of new coroutine to active … … 475 526 476 527 // KERNEL_ONLY 477 voidkernel_last_resume( processor * this ) {478 coroutine_desc* src = &mainThread->self_cor;479 coroutine_desc* dst = get_coroutine(this->runner);528 static void __kernel_last_resume( processor * this ) { 529 $coroutine * src = &mainThread->self_cor; 530 $coroutine * dst = get_coroutine(this->runner); 480 531 481 532 verify( ! kernelTLS.preemption_state.enabled ); … … 484 535 485 536 // context switch to the processor 486 CtxSwitch( &src->context, &dst->context );537 __cfactx_switch( &src->context, &dst->context ); 487 538 } 488 539 489 540 //----------------------------------------------------------------------------- 490 541 // Scheduler routines 491 492 542 // KERNEL ONLY 493 void ScheduleThread( thread_desc * thrd ) { 494 verify( thrd ); 495 verify( thrd->state != Halted ); 496 497 verify( ! kernelTLS.preemption_state.enabled ); 498 499 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 500 501 with( *thrd->curr_cluster ) { 502 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 503 bool was_empty = !(ready_queue != 0); 504 append( ready_queue, thrd ); 505 unlock( ready_queue_lock ); 506 507 if(was_empty) { 508 lock (proc_list_lock __cfaabi_dbg_ctx2); 509 if(idles) { 510 wake_fast(idles.head); 511 } 512 unlock (proc_list_lock); 543 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) { 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 546 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 547 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 548 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 549 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 550 /* paranoid */ #endif 551 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 552 553 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 554 bool was_empty = !(ready_queue != 0); 555 append( ready_queue, thrd ); 556 unlock( ready_queue_lock ); 557 558 if(was_empty) { 559 lock (proc_list_lock __cfaabi_dbg_ctx2); 560 if(idles) { 561 wake_fast(idles.head); 513 562 } 514 else if( struct processor * idle = idles.head ) {515 wake_fast(idle);516 }517 518 } 519 520 verify( ! kernelTLS.preemption_state.enabled );563 unlock (proc_list_lock); 564 } 565 else if( struct processor * idle = idles.head ) { 566 wake_fast(idle); 567 } 568 569 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 521 570 } 522 571 523 572 // KERNEL ONLY 524 thread_desc * nextThread(cluster * this) with( *this ) { 525 verify( ! kernelTLS.preemption_state.enabled ); 573 static $thread * __next_thread(cluster * this) with( *this ) { 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 575 526 576 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 527 thread_desc* head = pop_head( ready_queue );577 $thread * head = pop_head( ready_queue ); 528 578 unlock( ready_queue_lock ); 529 verify( ! kernelTLS.preemption_state.enabled ); 579 580 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 530 581 return head; 531 582 } 532 583 533 void BlockInternal() { 584 void unpark( $thread * thrd ) { 585 if( !thrd ) return; 586 534 587 disable_interrupts(); 535 verify( ! kernelTLS.preemption_state.enabled ); 588 static_assert(sizeof(thrd->state) == sizeof(int)); 589 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 590 switch(old_state) { 591 case Active: 592 // Wake won the race, the thread will reschedule/rerun itself 593 break; 594 case Inactive: 595 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 596 597 // Wake lost the race, 598 thrd->state = Inactive; 599 __schedule_thread( thrd ); 600 break; 601 case Rerun: 602 abort("More than one thread attempted to schedule thread %p\n", thrd); 603 break; 604 case Halted: 605 case Start: 606 case Primed: 607 default: 608 // This makes no sense, something is wrong abort 609 abort(); 610 } 611 enable_interrupts( __cfaabi_dbg_ctx ); 612 } 613 614 void park( void ) { 615 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 616 disable_interrupts(); 617 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 618 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 619 536 620 returnToKernel(); 537 verify( ! kernelTLS.preemption_state.enabled ); 621 622 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 538 623 enable_interrupts( __cfaabi_dbg_ctx ); 539 } 540 541 void BlockInternal( __spinlock_t * lock ) { 624 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 625 626 } 627 628 // KERNEL ONLY 629 void __leave_thread() { 630 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 631 returnToKernel(); 632 abort(); 633 } 634 635 // KERNEL ONLY 636 bool force_yield( __Preemption_Reason reason ) { 637 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 542 638 disable_interrupts(); 543 with( *kernelTLS.this_processor ) { 544 finish.action_code = Release; 545 finish.lock = lock; 546 } 547 548 verify( ! kernelTLS.preemption_state.enabled ); 549 returnToKernel(); 550 verify( ! kernelTLS.preemption_state.enabled ); 551 552 enable_interrupts( __cfaabi_dbg_ctx ); 553 } 554 555 void BlockInternal( thread_desc * thrd ) { 556 disable_interrupts(); 557 with( * kernelTLS.this_processor ) { 558 finish.action_code = Schedule; 559 finish.thrd = thrd; 560 } 561 562 verify( ! kernelTLS.preemption_state.enabled ); 563 returnToKernel(); 564 verify( ! kernelTLS.preemption_state.enabled ); 565 566 enable_interrupts( __cfaabi_dbg_ctx ); 567 } 568 569 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 570 assert(thrd); 571 disable_interrupts(); 572 with( * kernelTLS.this_processor ) { 573 finish.action_code = Release_Schedule; 574 finish.lock = lock; 575 finish.thrd = thrd; 576 } 577 578 verify( ! kernelTLS.preemption_state.enabled ); 579 returnToKernel(); 580 verify( ! kernelTLS.preemption_state.enabled ); 581 582 enable_interrupts( __cfaabi_dbg_ctx ); 583 } 584 585 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 586 disable_interrupts(); 587 with( * kernelTLS.this_processor ) { 588 finish.action_code = Release_Multi; 589 finish.locks = locks; 590 finish.lock_count = count; 591 } 592 593 verify( ! kernelTLS.preemption_state.enabled ); 594 returnToKernel(); 595 verify( ! kernelTLS.preemption_state.enabled ); 596 597 enable_interrupts( __cfaabi_dbg_ctx ); 598 } 599 600 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 601 disable_interrupts(); 602 with( *kernelTLS.this_processor ) { 603 finish.action_code = Release_Multi_Schedule; 604 finish.locks = locks; 605 finish.lock_count = lock_count; 606 finish.thrds = thrds; 607 finish.thrd_count = thrd_count; 608 } 609 610 verify( ! kernelTLS.preemption_state.enabled ); 611 returnToKernel(); 612 verify( ! kernelTLS.preemption_state.enabled ); 613 614 enable_interrupts( __cfaabi_dbg_ctx ); 615 } 616 617 void BlockInternal(__finish_callback_fptr_t callback) { 618 disable_interrupts(); 619 with( *kernelTLS.this_processor ) { 620 finish.action_code = Callback; 621 finish.callback = callback; 622 } 623 624 verify( ! kernelTLS.preemption_state.enabled ); 625 returnToKernel(); 626 verify( ! kernelTLS.preemption_state.enabled ); 627 628 enable_interrupts( __cfaabi_dbg_ctx ); 629 } 630 631 // KERNEL ONLY 632 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 633 verify( ! kernelTLS.preemption_state.enabled ); 634 with( * kernelTLS.this_processor ) { 635 finish.action_code = thrd ? Release_Schedule : Release; 636 finish.lock = lock; 637 finish.thrd = thrd; 638 } 639 640 returnToKernel(); 639 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 640 641 $thread * thrd = kernelTLS.this_thread; 642 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 643 644 // SKULLDUGGERY: It is possible that we are preempting this thread just before 645 // it was going to park itself. If that is the case and it is already using the 646 // intrusive fields then we can't use them to preempt the thread 647 // If that is the case, abandon the preemption. 648 bool preempted = false; 649 if(thrd->next == 0p) { 650 preempted = true; 651 thrd->preempted = reason; 652 returnToKernel(); 653 } 654 655 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 656 enable_interrupts_noPoll(); 657 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 658 659 return preempted; 641 660 } 642 661 … … 646 665 //----------------------------------------------------------------------------- 647 666 // Kernel boot procedures 648 static void kernel_startup(void) {667 static void __kernel_startup(void) { 649 668 verify( ! kernelTLS.preemption_state.enabled ); 650 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 664 683 // SKULLDUGGERY: the mainThread steals the process main thread 665 684 // which will then be scheduled by the mainProcessor normally 666 mainThread = ( thread_desc*)&storage_mainThread;685 mainThread = ($thread *)&storage_mainThread; 667 686 current_stack_info_t info; 668 687 info.storage = (__stack_t*)&storage_mainThreadCtx; … … 676 695 void ?{}(processorCtx_t & this, processor * proc) { 677 696 (this.__cor){ "Processor" }; 678 this.__cor.starter = NULL;697 this.__cor.starter = 0p; 679 698 this.proc = proc; 680 699 } … … 685 704 terminated{ 0 }; 686 705 do_terminate = false; 687 preemption_alarm = NULL;706 preemption_alarm = 0p; 688 707 pending_preemption = false; 689 708 kernel_thread = pthread_self(); … … 707 726 // Add the main thread to the ready queue 708 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 709 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 710 729 711 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 712 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that731 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 713 732 // mainThread is on the ready queue when this call is made. 714 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 715 734 716 735 … … 724 743 } 725 744 726 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 727 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 728 747 … … 735 754 // which is currently here 736 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 737 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 738 757 mainThread->self_cor.state = Halted; 739 758 … … 761 780 // Kernel Quiescing 762 781 //============================================================================================= 763 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 764 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 765 784 … … 803 822 sigemptyset( &mask ); 804 823 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 805 sigsuspend( &mask ); // block the processor to prevent further damage during abort 806 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 824 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals 825 sigsuspend( &mask ); // block the processor to prevent further damage during abort 826 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 807 827 } 808 828 else { … … 815 835 816 836 void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) { 817 thread_desc* thrd = kernel_data;837 $thread * thrd = kernel_data; 818 838 819 839 if(thrd) { 820 840 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 821 __cfaabi_ dbg_bits_write(abort_text, len );841 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 822 842 823 843 if ( &thrd->self_cor != thrd->curr_cor ) { 824 844 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); 825 __cfaabi_ dbg_bits_write(abort_text, len );845 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 826 846 } 827 847 else { 828 __cfaabi_ dbg_bits_write(".\n", 2 );848 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); 829 849 } 830 850 } 831 851 else { 832 852 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 833 __cfaabi_ dbg_bits_write(abort_text, len );853 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 834 854 } 835 855 } … … 842 862 843 863 extern "C" { 844 void __cfaabi_ dbg_bits_acquire() {864 void __cfaabi_bits_acquire() { 845 865 lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); 846 866 } 847 867 848 void __cfaabi_ dbg_bits_release() {868 void __cfaabi_bits_release() { 849 869 unlock( kernel_debug_lock ); 850 870 } … … 871 891 872 892 // atomically release spin lock and block 873 BlockInternal( &lock ); 893 unlock( lock ); 894 park(); 874 895 } 875 896 else { … … 879 900 880 901 void V(semaphore & this) with( this ) { 881 thread_desc * thrd = NULL;902 $thread * thrd = 0p; 882 903 lock( lock __cfaabi_dbg_ctx2 ); 883 904 count += 1; … … 890 911 891 912 // make new owner 892 WakeThread( thrd );913 unpark( thrd ); 893 914 } 894 915 … … 907 928 } 908 929 909 void doregister( cluster * cltr, thread_desc& thrd ) {930 void doregister( cluster * cltr, $thread & thrd ) { 910 931 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 911 932 cltr->nthreads += 1; … … 914 935 } 915 936 916 void unregister( cluster * cltr, thread_desc& thrd ) {937 void unregister( cluster * cltr, $thread & thrd ) { 917 938 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 918 939 remove(cltr->threads, thrd ); … … 939 960 __cfaabi_dbg_debug_do( 940 961 extern "C" { 941 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {962 void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]) { 942 963 this.prev_name = prev_name; 943 964 this.prev_thrd = kernelTLS.this_thread; … … 948 969 //----------------------------------------------------------------------------- 949 970 // Debug 950 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 951 972 return true; 952 973 } -
libcfa/src/concurrency/kernel.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 11:39:17 201913 // Update Count : 1612 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 22 14 14 // 15 15 … … 20 20 #include "invoke.h" 21 21 #include "time_t.hfa" 22 #include "coroutine.hfa" 22 23 23 24 extern "C" { … … 31 32 __spinlock_t lock; 32 33 int count; 33 __queue_t( thread_desc) waiting;34 __queue_t($thread) waiting; 34 35 }; 35 36 … … 43 44 // Processor 44 45 extern struct cluster * mainCluster; 45 46 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };47 48 typedef void (*__finish_callback_fptr_t)(void);49 50 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)51 struct FinishAction {52 FinishOpCode action_code;53 /*54 // Union of possible actions55 union {56 // Option 1 : locks and threads57 struct {58 // 1 thread or N thread59 union {60 thread_desc * thrd;61 struct {62 thread_desc ** thrds;63 unsigned short thrd_count;64 };65 };66 // 1 lock or N lock67 union {68 __spinlock_t * lock;69 struct {70 __spinlock_t ** locks;71 unsigned short lock_count;72 };73 };74 };75 // Option 2 : action pointer76 __finish_callback_fptr_t callback;77 };78 /*/79 thread_desc * thrd;80 thread_desc ** thrds;81 unsigned short thrd_count;82 __spinlock_t * lock;83 __spinlock_t ** locks;84 unsigned short lock_count;85 __finish_callback_fptr_t callback;86 //*/87 };88 static inline void ?{}(FinishAction & this) {89 this.action_code = No_Action;90 this.thrd = NULL;91 this.lock = NULL;92 }93 static inline void ^?{}(FinishAction &) {}94 46 95 47 // Processor … … 115 67 // RunThread data 116 68 // Action to do after a thread is ran 117 struct FinishAction finish;69 $thread * destroyer; 118 70 119 71 // Preemption data … … 134 86 semaphore terminated; 135 87 88 // pthread Stack 89 void * stack; 90 136 91 // Link lists fields 137 92 struct __dbg_node_proc { … … 146 101 }; 147 102 148 void ?{}(processor & this, const char * name, struct cluster & cltr);103 void ?{}(processor & this, const char name[], struct cluster & cltr); 149 104 void ^?{}(processor & this); 150 105 151 106 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 152 107 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 153 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; }108 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 154 109 155 static inline [processor *&, processor *& ] __get( processor & this ) { 156 return this.node.[next, prev]; 157 } 110 static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; } 158 111 159 112 //----------------------------------------------------------------------------- … … 164 117 165 118 // Ready queue for threads 166 __queue_t( thread_desc) ready_queue;119 __queue_t($thread) ready_queue; 167 120 168 121 // Name of the cluster … … 180 133 // List of threads 181 134 __spinlock_t thread_list_lock; 182 __dllist_t(struct thread_desc) threads;135 __dllist_t(struct $thread) threads; 183 136 unsigned int nthreads; 184 137 … … 191 144 extern Duration default_preemption(); 192 145 193 void ?{} (cluster & this, const char * name, Duration preemption_rate);146 void ?{} (cluster & this, const char name[], Duration preemption_rate); 194 147 void ^?{}(cluster & this); 195 148 196 149 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 197 150 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 198 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; }151 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 199 152 200 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 201 return this.node.[next, prev]; 202 } 153 static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } 203 154 204 155 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE -
libcfa/src/concurrency/kernel_private.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 29 14:06:40 201813 // Update Count : 312 // Last Modified On : Sat Nov 30 19:25:02 2019 13 // Update Count : 8 14 14 // 15 15 … … 31 31 } 32 32 33 void ScheduleThread( thread_desc * ); 34 static inline void WakeThread( thread_desc * thrd ) { 35 if( !thrd ) return; 36 37 disable_interrupts(); 38 ScheduleThread( thrd ); 39 enable_interrupts( __cfaabi_dbg_ctx ); 40 } 41 thread_desc * nextThread(cluster * this); 33 void __schedule_thread( $thread * ) __attribute__((nonnull (1))); 42 34 43 35 //Block current thread and release/wake-up the following resources 44 void BlockInternal(void); 45 void BlockInternal(__spinlock_t * lock); 46 void BlockInternal(thread_desc * thrd); 47 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 48 void BlockInternal(__spinlock_t * locks [], unsigned short count); 49 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 50 void BlockInternal(__finish_callback_fptr_t callback); 51 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 36 void __leave_thread() __attribute__((noreturn)); 52 37 53 38 //----------------------------------------------------------------------------- 54 39 // Processor 55 40 void main(processorCtx_t *); 41 42 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 56 43 57 44 static inline void wake_fast(processor * this) { … … 84 71 // Threads 85 72 extern "C" { 86 forall(dtype T | is_thread(T)) 87 void CtxInvokeThread(T * this); 73 void __cfactx_invoke_thread(void (*main)(void *), void * this); 88 74 } 89 75 90 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);91 92 76 __cfaabi_dbg_debug_do( 93 extern void __cfaabi_dbg_thread_register ( thread_desc* thrd );94 extern void __cfaabi_dbg_thread_unregister( thread_desc* thrd );77 extern void __cfaabi_dbg_thread_register ( $thread * thrd ); 78 extern void __cfaabi_dbg_thread_unregister( $thread * thrd ); 95 79 ) 96 80 … … 99 83 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 100 84 85 static inline uint32_t __tls_rand() { 86 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 87 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; 88 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7; 89 return kernelTLS.rand_seed; 90 } 91 101 92 102 93 void doregister( struct cluster & cltr ); 103 94 void unregister( struct cluster & cltr ); 104 95 105 void doregister( struct cluster * cltr, struct thread_desc& thrd );106 void unregister( struct cluster * cltr, struct thread_desc& thrd );96 void doregister( struct cluster * cltr, struct $thread & thrd ); 97 void unregister( struct cluster * cltr, struct $thread & thrd ); 107 98 108 99 void doregister( struct cluster * cltr, struct processor * proc ); -
libcfa/src/concurrency/monitor.cfa
r9fb8f01 r3d5701e 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // monitor_desc.c --7 // $monitor.c -- 8 8 // 9 9 // Author : Thierry Delisle 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 14:30:26 201813 // Update Count : 912 // Last Modified On : Wed Dec 4 07:55:14 2019 13 // Update Count : 10 14 14 // 15 15 … … 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void set_owner ( monitor_desc * this, thread_desc* owner );30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc* owner );31 static inline void set_mask ( monitor_desc* storage [], __lock_size_t count, const __waitfor_mask_t & mask );32 static inline void reset_mask( monitor_desc* this );33 34 static inline thread_desc * next_thread( monitor_desc* this );35 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & monitors );29 static inline void __set_owner ( $monitor * this, $thread * owner ); 30 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner ); 31 static inline void set_mask ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 static inline void reset_mask( $monitor * this ); 33 34 static inline $thread * next_thread( $monitor * this ); 35 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors ); 36 36 37 37 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 38 static inline void lock_all ( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );38 static inline void lock_all ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 39 39 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 40 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count );41 42 static inline void save ( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );43 static inline void restore( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );44 45 static inline void init ( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );46 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 48 static inline thread_desc* check_condition ( __condition_criterion_t * );40 static inline void unlock_all( $monitor * locks [], __lock_size_t count ); 41 42 static inline void save ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 43 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 44 45 static inline void init ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 46 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 48 static inline $thread * check_condition ( __condition_criterion_t * ); 49 49 static inline void brand_condition ( condition & ); 50 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc* monitors [], __lock_size_t count );50 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count ); 51 51 52 52 forall(dtype T | sized( T )) 53 53 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 54 54 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 55 static inline __lock_size_t aggregate ( monitor_desc* storage [], const __waitfor_mask_t & mask );55 static inline __lock_size_t aggregate ( $monitor * storage [], const __waitfor_mask_t & mask ); 56 56 57 57 //----------------------------------------------------------------------------- … … 68 68 69 69 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 70 monitor_desc** monitors = mons; /* Save the targeted monitors */ \70 $monitor ** monitors = mons; /* Save the targeted monitors */ \ 71 71 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 72 72 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 80 80 //----------------------------------------------------------------------------- 81 81 // Enter/Leave routines 82 83 84 extern "C" { 85 // Enter single monitor 86 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 87 // Lock the monitor spinlock 88 lock( this->lock __cfaabi_dbg_ctx2 ); 89 // Interrupts disable inside critical section 90 thread_desc * thrd = kernelTLS.this_thread; 91 92 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 93 94 if( !this->owner ) { 95 // No one has the monitor, just take it 96 set_owner( this, thrd ); 97 98 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 99 } 100 else if( this->owner == thrd) { 101 // We already have the monitor, just note how many times we took it 102 this->recursion += 1; 103 104 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 105 } 106 else if( is_accepted( this, group) ) { 107 // Some one was waiting for us, enter 108 set_owner( this, thrd ); 109 110 // Reset mask 111 reset_mask( this ); 112 113 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 114 } 115 else { 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 117 118 // Some one else has the monitor, wait in line for it 119 append( this->entry_queue, thrd ); 120 121 BlockInternal( &this->lock ); 122 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 124 125 // BlockInternal will unlock spinlock, no need to unlock ourselves 126 return; 127 } 82 // Enter single monitor 83 static void __enter( $monitor * this, const __monitor_group_t & group ) { 84 // Lock the monitor spinlock 85 lock( this->lock __cfaabi_dbg_ctx2 ); 86 // Interrupts disable inside critical section 87 $thread * thrd = kernelTLS.this_thread; 88 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 90 91 if( !this->owner ) { 92 // No one has the monitor, just take it 93 __set_owner( this, thrd ); 94 95 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 96 } 97 else if( this->owner == thrd) { 98 // We already have the monitor, just note how many times we took it 99 this->recursion += 1; 100 101 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 102 } 103 else if( is_accepted( this, group) ) { 104 // Some one was waiting for us, enter 105 __set_owner( this, thrd ); 106 107 // Reset mask 108 reset_mask( this ); 109 110 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 111 } 112 else { 113 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 114 115 // Some one else has the monitor, wait in line for it 116 /* paranoid */ verify( thrd->next == 0p ); 117 append( this->entry_queue, thrd ); 118 /* paranoid */ verify( thrd->next == 1p ); 119 120 unlock( this->lock ); 121 park(); 128 122 129 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 124 131 // Release the lock and leave 125 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 126 return; 127 } 128 129 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 131 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 132 /* paranoid */ verify( this->lock.lock ); 133 134 // Release the lock and leave 135 unlock( this->lock ); 136 return; 137 } 138 139 static void __dtor_enter( $monitor * this, fptr_t func ) { 140 // Lock the monitor spinlock 141 lock( this->lock __cfaabi_dbg_ctx2 ); 142 // Interrupts disable inside critical section 143 $thread * thrd = kernelTLS.this_thread; 144 145 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 146 147 148 if( !this->owner ) { 149 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 150 151 // No one has the monitor, just take it 152 __set_owner( this, thrd ); 153 154 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 155 132 156 unlock( this->lock ); 133 157 return; 134 158 } 135 136 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 137 // Lock the monitor spinlock 138 lock( this->lock __cfaabi_dbg_ctx2 ); 139 // Interrupts disable inside critical section 140 thread_desc * thrd = kernelTLS.this_thread; 141 142 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 143 144 145 if( !this->owner ) { 146 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 147 148 // No one has the monitor, just take it 149 set_owner( this, thrd ); 150 151 unlock( this->lock ); 152 return; 159 else if( this->owner == thrd) { 160 // We already have the monitor... but where about to destroy it so the nesting will fail 161 // Abort! 162 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 163 } 164 165 __lock_size_t count = 1; 166 $monitor ** monitors = &this; 167 __monitor_group_t group = { &this, 1, func }; 168 if( is_accepted( this, group) ) { 169 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 170 171 // Wake the thread that is waiting for this 172 __condition_criterion_t * urgent = pop( this->signal_stack ); 173 /* paranoid */ verify( urgent ); 174 175 // Reset mask 176 reset_mask( this ); 177 178 // Create the node specific to this wait operation 179 wait_ctx_primed( thrd, 0 ) 180 181 // Some one else has the monitor, wait for him to finish and then run 182 unlock( this->lock ); 183 184 // Release the next thread 185 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 186 unpark( urgent->owner->waiting_thread ); 187 188 // Park current thread waiting 189 park(); 190 191 // Some one was waiting for us, enter 192 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 } 194 else { 195 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 196 197 wait_ctx( thrd, 0 ) 198 this->dtor_node = &waiter; 199 200 // Some one else has the monitor, wait in line for it 201 /* paranoid */ verify( thrd->next == 0p ); 202 append( this->entry_queue, thrd ); 203 /* paranoid */ verify( thrd->next == 1p ); 204 unlock( this->lock ); 205 206 // Park current thread waiting 207 park(); 208 209 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 210 return; 211 } 212 213 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 214 215 } 216 217 // Leave single monitor 218 void __leave( $monitor * this ) { 219 // Lock the monitor spinlock 220 lock( this->lock __cfaabi_dbg_ctx2 ); 221 222 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 223 224 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 225 226 // Leaving a recursion level, decrement the counter 227 this->recursion -= 1; 228 229 // If we haven't left the last level of recursion 230 // it means we don't need to do anything 231 if( this->recursion != 0) { 232 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 233 unlock( this->lock ); 234 return; 235 } 236 237 // Get the next thread, will be null on low contention monitor 238 $thread * new_owner = next_thread( this ); 239 240 // Check the new owner is consistent with who we wake-up 241 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 242 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 243 244 // We can now let other threads in safely 245 unlock( this->lock ); 246 247 //We need to wake-up the thread 248 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 249 unpark( new_owner ); 250 } 251 252 // Leave single monitor for the last time 253 void __dtor_leave( $monitor * this ) { 254 __cfaabi_dbg_debug_do( 255 if( TL_GET( this_thread ) != this->owner ) { 256 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 153 257 } 154 else if( this->owner == thrd) { 155 // We already have the monitor... but where about to destroy it so the nesting will fail 156 // Abort! 157 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 258 if( this->recursion != 1 ) { 259 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 158 260 } 159 160 __lock_size_t count = 1; 161 monitor_desc ** monitors = &this; 162 __monitor_group_t group = { &this, 1, func }; 163 if( is_accepted( this, group) ) { 164 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 165 166 // Wake the thread that is waiting for this 167 __condition_criterion_t * urgent = pop( this->signal_stack ); 168 verify( urgent ); 169 170 // Reset mask 171 reset_mask( this ); 172 173 // Create the node specific to this wait operation 174 wait_ctx_primed( thrd, 0 ) 175 176 // Some one else has the monitor, wait for him to finish and then run 177 BlockInternal( &this->lock, urgent->owner->waiting_thread ); 178 179 // Some one was waiting for us, enter 180 set_owner( this, thrd ); 181 } 182 else { 183 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 184 185 wait_ctx( thrd, 0 ) 186 this->dtor_node = &waiter; 187 188 // Some one else has the monitor, wait in line for it 189 append( this->entry_queue, thrd ); 190 BlockInternal( &this->lock ); 191 192 // BlockInternal will unlock spinlock, no need to unlock ourselves 193 return; 194 } 195 196 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 197 198 } 199 200 // Leave single monitor 201 void __leave_monitor_desc( monitor_desc * this ) { 202 // Lock the monitor spinlock 203 lock( this->lock __cfaabi_dbg_ctx2 ); 204 205 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 206 207 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 208 209 // Leaving a recursion level, decrement the counter 210 this->recursion -= 1; 211 212 // If we haven't left the last level of recursion 213 // it means we don't need to do anything 214 if( this->recursion != 0) { 215 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 216 unlock( this->lock ); 217 return; 218 } 219 220 // Get the next thread, will be null on low contention monitor 221 thread_desc * new_owner = next_thread( this ); 222 223 // We can now let other threads in safely 224 unlock( this->lock ); 225 226 //We need to wake-up the thread 227 WakeThread( new_owner ); 228 } 229 230 // Leave single monitor for the last time 231 void __leave_dtor_monitor_desc( monitor_desc * this ) { 232 __cfaabi_dbg_debug_do( 233 if( TL_GET( this_thread ) != this->owner ) { 234 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 235 } 236 if( this->recursion != 1 ) { 237 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 238 } 239 ) 240 } 241 261 ) 262 } 263 264 extern "C" { 242 265 // Leave the thread monitor 243 266 // last routine called by a thread. 244 267 // Should never return 245 void __leave_thread_monitor( thread_desc * thrd ) { 246 monitor_desc * this = &thrd->self_mon; 268 void __cfactx_thrd_leave() { 269 $thread * thrd = TL_GET( this_thread ); 270 $monitor * this = &thrd->self_mon; 247 271 248 272 // Lock the monitor now … … 251 275 disable_interrupts(); 252 276 253 thrd->s elf_cor.state = Halted;254 255 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );277 thrd->state = Halted; 278 279 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 256 280 257 281 // Leaving a recursion level, decrement the counter … … 263 287 264 288 // Fetch the next thread, can be null 265 thread_desc * new_owner = next_thread( this ); 266 267 // Leave the thread, this will unlock the spinlock 268 // Use leave thread instead of BlockInternal which is 269 // specialized for this case and supports null new_owner 270 LeaveThread( &this->lock, new_owner ); 289 $thread * new_owner = next_thread( this ); 290 291 // Release the monitor lock 292 unlock( this->lock ); 293 294 // Unpark the next owner if needed 295 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 296 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 297 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 298 /* paranoid */ verify( thrd->state == Halted ); 299 300 kernelTLS.this_processor->destroyer = new_owner; 301 302 // Leave the thread 303 __leave_thread(); 271 304 272 305 // Control flow should never reach here! … … 278 311 static inline void enter( __monitor_group_t monitors ) { 279 312 for( __lock_size_t i = 0; i < monitors.size; i++) { 280 __enter _monitor_desc( monitors[i], monitors );313 __enter( monitors[i], monitors ); 281 314 } 282 315 } … … 284 317 // Leave multiple monitor 285 318 // relies on the monitor array being sorted 286 static inline void leave( monitor_desc* monitors [], __lock_size_t count) {319 static inline void leave($monitor * monitors [], __lock_size_t count) { 287 320 for( __lock_size_t i = count - 1; i >= 0; i--) { 288 __leave _monitor_desc( monitors[i] );321 __leave( monitors[i] ); 289 322 } 290 323 } … … 292 325 // Ctor for monitor guard 293 326 // Sorts monitors before entering 294 void ?{}( monitor_guard_t & this, monitor_desc* m [], __lock_size_t count, fptr_t func ) {295 thread_desc* thrd = TL_GET( this_thread );327 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) { 328 $thread * thrd = TL_GET( this_thread ); 296 329 297 330 // Store current array … … 333 366 // Ctor for monitor guard 334 367 // Sorts monitors before entering 335 void ?{}( monitor_dtor_guard_t & this, monitor_desc* m [], fptr_t func ) {368 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) { 336 369 // optimization 337 thread_desc* thrd = TL_GET( this_thread );370 $thread * thrd = TL_GET( this_thread ); 338 371 339 372 // Store current array … … 346 379 (thrd->monitors){m, 1, func}; 347 380 348 __ enter_monitor_dtor( this.m, func );381 __dtor_enter( this.m, func ); 349 382 } 350 383 … … 352 385 void ^?{}( monitor_dtor_guard_t & this ) { 353 386 // Leave the monitors in order 354 __ leave_dtor_monitor_desc( this.m );387 __dtor_leave( this.m ); 355 388 356 389 // Restore thread context … … 360 393 //----------------------------------------------------------------------------- 361 394 // Internal scheduling types 362 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info ) {395 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 363 396 this.waiting_thread = waiting_thread; 364 397 this.count = count; 365 this.next = NULL;398 this.next = 0p; 366 399 this.user_info = user_info; 367 400 } … … 369 402 void ?{}(__condition_criterion_t & this ) with( this ) { 370 403 ready = false; 371 target = NULL;372 owner = NULL;373 next = NULL;374 } 375 376 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t & owner ) {404 target = 0p; 405 owner = 0p; 406 next = 0p; 407 } 408 409 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) { 377 410 this.ready = false; 378 411 this.target = target; 379 412 this.owner = &owner; 380 this.next = NULL;413 this.next = 0p; 381 414 } 382 415 … … 387 420 388 421 // Check that everything is as expected 389 assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );422 assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 390 423 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 391 424 verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count ); … … 399 432 // Append the current wait operation to the ones already queued on the condition 400 433 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 434 /* paranoid */ verify( waiter.next == 0p ); 401 435 append( this.blocked, &waiter ); 436 /* paranoid */ verify( waiter.next == 1p ); 402 437 403 438 // Lock all monitors (aggregates the locks as well) … … 406 441 // Find the next thread(s) to run 407 442 __lock_size_t thread_count = 0; 408 thread_desc* threads[ count ];443 $thread * threads[ count ]; 409 444 __builtin_memset( threads, 0, sizeof( threads ) ); 410 445 … … 414 449 // Remove any duplicate threads 415 450 for( __lock_size_t i = 0; i < count; i++) { 416 thread_desc* new_owner = next_thread( monitors[i] );451 $thread * new_owner = next_thread( monitors[i] ); 417 452 insert_unique( threads, thread_count, new_owner ); 418 453 } 419 454 455 // Unlock the locks, we don't need them anymore 456 for(int i = 0; i < count; i++) { 457 unlock( *locks[i] ); 458 } 459 460 // Wake the threads 461 for(int i = 0; i < thread_count; i++) { 462 unpark( threads[i] ); 463 } 464 420 465 // Everything is ready to go to sleep 421 BlockInternal( locks, count, threads, thread_count);466 park(); 422 467 423 468 // We are back, restore the owners and recursions … … 434 479 //Some more checking in debug 435 480 __cfaabi_dbg_debug_do( 436 thread_desc* this_thrd = TL_GET( this_thread );481 $thread * this_thrd = TL_GET( this_thread ); 437 482 if ( this.monitor_count != this_thrd->monitors.size ) { 438 483 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 449 494 450 495 // Lock all monitors 451 lock_all( this.monitors, NULL, count );496 lock_all( this.monitors, 0p, count ); 452 497 453 498 //Pop the head of the waiting queue … … 471 516 472 517 //Check that everything is as expected 473 verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );518 verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 474 519 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 475 520 … … 488 533 489 534 //Find the thread to run 490 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 491 set_owner( monitors, count, signallee ); 535 $thread * signallee = pop_head( this.blocked )->waiting_thread; 536 /* paranoid */ verify( signallee->next == 0p ); 537 __set_owner( monitors, count, signallee ); 492 538 493 539 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 494 540 541 // unlock all the monitors 542 unlock_all( locks, count ); 543 544 // unpark the thread we signalled 545 unpark( signallee ); 546 495 547 //Everything is ready to go to sleep 496 BlockInternal( locks, count, &signallee, 1);548 park(); 497 549 498 550 … … 535 587 // Create one! 536 588 __lock_size_t max = count_max( mask ); 537 monitor_desc* mon_storage[max];589 $monitor * mon_storage[max]; 538 590 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 539 591 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 553 605 { 554 606 // Check if the entry queue 555 thread_desc* next; int index;607 $thread * next; int index; 556 608 [next, index] = search_entry_queue( mask, monitors, count ); 557 609 … … 563 615 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 564 616 565 monitor_desc* mon2dtor = accepted[0];617 $monitor * mon2dtor = accepted[0]; 566 618 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 567 619 … … 589 641 590 642 // Set the owners to be the next thread 591 set_owner( monitors, count, next ); 592 593 // Everything is ready to go to sleep 594 BlockInternal( locks, count, &next, 1 ); 643 __set_owner( monitors, count, next ); 644 645 // unlock all the monitors 646 unlock_all( locks, count ); 647 648 // unpark the thread we signalled 649 unpark( next ); 650 651 //Everything is ready to go to sleep 652 park(); 595 653 596 654 // We are back, restore the owners and recursions … … 630 688 } 631 689 690 // unlock all the monitors 691 unlock_all( locks, count ); 692 632 693 //Everything is ready to go to sleep 633 BlockInternal( locks, count);694 park(); 634 695 635 696 … … 648 709 // Utilities 649 710 650 static inline void set_owner( monitor_desc * this, thread_desc* owner ) {651 / / __cfaabi_dbg_print_safe( "Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner);711 static inline void __set_owner( $monitor * this, $thread * owner ) { 712 /* paranoid */ verify( this->lock.lock ); 652 713 653 714 //Pass the monitor appropriately … … 658 719 } 659 720 660 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 661 monitors[0]->owner = owner; 662 monitors[0]->recursion = 1; 721 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) { 722 /* paranoid */ verify ( monitors[0]->lock.lock ); 723 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 724 monitors[0]->owner = owner; 725 monitors[0]->recursion = 1; 663 726 for( __lock_size_t i = 1; i < count; i++ ) { 664 monitors[i]->owner = owner; 665 monitors[i]->recursion = 0; 666 } 667 } 668 669 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 727 /* paranoid */ verify ( monitors[i]->lock.lock ); 728 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 729 monitors[i]->owner = owner; 730 monitors[i]->recursion = 0; 731 } 732 } 733 734 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 670 735 for( __lock_size_t i = 0; i < count; i++) { 671 736 storage[i]->mask = mask; … … 673 738 } 674 739 675 static inline void reset_mask( monitor_desc* this ) {676 this->mask.accepted = NULL;677 this->mask.data = NULL;740 static inline void reset_mask( $monitor * this ) { 741 this->mask.accepted = 0p; 742 this->mask.data = 0p; 678 743 this->mask.size = 0; 679 744 } 680 745 681 static inline thread_desc * next_thread( monitor_desc* this ) {746 static inline $thread * next_thread( $monitor * this ) { 682 747 //Check the signaller stack 683 748 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 687 752 //regardless of if we are ready to baton pass, 688 753 //we need to set the monitor as in use 689 set_owner( this, urgent->owner->waiting_thread ); 754 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 755 __set_owner( this, urgent->owner->waiting_thread ); 690 756 691 757 return check_condition( urgent ); … … 694 760 // No signaller thread 695 761 // Get the next thread in the entry_queue 696 thread_desc * new_owner = pop_head( this->entry_queue ); 697 set_owner( this, new_owner ); 762 $thread * new_owner = pop_head( this->entry_queue ); 763 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 764 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 765 __set_owner( this, new_owner ); 698 766 699 767 return new_owner; 700 768 } 701 769 702 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & group ) {770 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) { 703 771 __acceptable_t * it = this->mask.data; // Optim 704 772 __lock_size_t count = this->mask.size; … … 722 790 } 723 791 724 static inline void init( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {792 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 725 793 for( __lock_size_t i = 0; i < count; i++) { 726 794 (criteria[i]){ monitors[i], waiter }; … … 730 798 } 731 799 732 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {800 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 733 801 for( __lock_size_t i = 0; i < count; i++) { 734 802 (criteria[i]){ monitors[i], waiter }; … … 746 814 } 747 815 748 static inline void lock_all( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {816 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 749 817 for( __lock_size_t i = 0; i < count; i++ ) { 750 818 __spinlock_t * l = &source[i]->lock; … … 760 828 } 761 829 762 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count ) {830 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) { 763 831 for( __lock_size_t i = 0; i < count; i++ ) { 764 832 unlock( locks[i]->lock ); … … 767 835 768 836 static inline void save( 769 monitor_desc* ctx [],837 $monitor * ctx [], 770 838 __lock_size_t count, 771 839 __attribute((unused)) __spinlock_t * locks [], … … 780 848 781 849 static inline void restore( 782 monitor_desc* ctx [],850 $monitor * ctx [], 783 851 __lock_size_t count, 784 852 __spinlock_t * locks [], … … 798 866 // 2 - Checks if all the monitors are ready to run 799 867 // if so return the thread to run 800 static inline thread_desc* check_condition( __condition_criterion_t * target ) {868 static inline $thread * check_condition( __condition_criterion_t * target ) { 801 869 __condition_node_t * node = target->owner; 802 870 unsigned short count = node->count; … … 816 884 } 817 885 818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL);819 return ready2run ? node->waiting_thread : NULL;886 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p ); 887 return ready2run ? node->waiting_thread : 0p; 820 888 } 821 889 822 890 static inline void brand_condition( condition & this ) { 823 thread_desc* thrd = TL_GET( this_thread );891 $thread * thrd = TL_GET( this_thread ); 824 892 if( !this.monitors ) { 825 893 // __cfaabi_dbg_print_safe( "Branding\n" ); 826 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );894 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); 827 895 this.monitor_count = thrd->monitors.size; 828 896 829 this.monitors = ( monitor_desc**)malloc( this.monitor_count * sizeof( *this.monitors ) );897 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 830 898 for( int i = 0; i < this.monitor_count; i++ ) { 831 899 this.monitors[i] = thrd->monitors[i]; … … 834 902 } 835 903 836 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc* monitors [], __lock_size_t count ) {837 838 __queue_t( thread_desc) & entry_queue = monitors[0]->entry_queue;904 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) { 905 906 __queue_t($thread) & entry_queue = monitors[0]->entry_queue; 839 907 840 908 // For each thread in the entry-queue 841 for( thread_desc** thrd_it = &entry_queue.head;842 *thrd_it ;909 for( $thread ** thrd_it = &entry_queue.head; 910 *thrd_it != 1p; 843 911 thrd_it = &(*thrd_it)->next 844 912 ) { … … 883 951 } 884 952 885 static inline __lock_size_t aggregate( monitor_desc* storage [], const __waitfor_mask_t & mask ) {953 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) { 886 954 __lock_size_t size = 0; 887 955 for( __lock_size_t i = 0; i < mask.size; i++ ) { -
libcfa/src/concurrency/monitor.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 7 18:06:45 201713 // Update Count : 1 012 // Last Modified On : Wed Dec 4 07:55:32 2019 13 // Update Count : 11 14 14 // 15 15 … … 23 23 24 24 trait is_monitor(dtype T) { 25 monitor_desc* get_monitor( T & );25 $monitor * get_monitor( T & ); 26 26 void ^?{}( T & mutex ); 27 27 }; 28 28 29 static inline void ?{}( monitor_desc& this) with( this ) {29 static inline void ?{}($monitor & this) with( this ) { 30 30 lock{}; 31 31 entry_queue{}; 32 32 signal_stack{}; 33 owner = NULL;33 owner = 0p; 34 34 recursion = 0; 35 mask.accepted = NULL;36 mask.data = NULL;35 mask.accepted = 0p; 36 mask.data = 0p; 37 37 mask.size = 0; 38 dtor_node = NULL;38 dtor_node = 0p; 39 39 } 40 40 41 static inline void ^?{}($monitor & ) {} 42 41 43 struct monitor_guard_t { 42 monitor_desc** m;44 $monitor ** m; 43 45 __lock_size_t count; 44 46 __monitor_group_t prev; 45 47 }; 46 48 47 void ?{}( monitor_guard_t & this, monitor_desc** m, __lock_size_t count, void (*func)() );49 void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() ); 48 50 void ^?{}( monitor_guard_t & this ); 49 51 50 52 struct monitor_dtor_guard_t { 51 monitor_desc* m;53 $monitor * m; 52 54 __monitor_group_t prev; 53 55 }; 54 56 55 void ?{}( monitor_dtor_guard_t & this, monitor_desc** m, void (*func)() );57 void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)() ); 56 58 void ^?{}( monitor_dtor_guard_t & this ); 57 59 … … 70 72 71 73 // The monitor this criterion concerns 72 monitor_desc* target;74 $monitor * target; 73 75 74 76 // The parent node to which this criterion belongs … … 85 87 struct __condition_node_t { 86 88 // Thread that needs to be woken when all criteria are met 87 thread_desc* waiting_thread;89 $thread * waiting_thread; 88 90 89 91 // Array of criteria (Criterions are contiguous in memory) … … 104 106 } 105 107 106 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info );108 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ); 107 109 void ?{}(__condition_criterion_t & this ); 108 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t * owner );110 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner ); 109 111 110 112 struct condition { … … 113 115 114 116 // Array of monitor pointers (Monitors are NOT contiguous in memory) 115 monitor_desc** monitors;117 $monitor ** monitors; 116 118 117 119 // Number of monitors in the array … … 120 122 121 123 static inline void ?{}( condition & this ) { 122 this.monitors = NULL;124 this.monitors = 0p; 123 125 this.monitor_count = 0; 124 126 } … … 131 133 bool signal ( condition & this ); 132 134 bool signal_block( condition & this ); 133 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }135 static inline bool is_empty ( condition & this ) { return this.blocked.head == 1p; } 134 136 uintptr_t front ( condition & this ); 135 137 -
libcfa/src/concurrency/mutex.cfa
r9fb8f01 r3d5701e 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:37:11 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:37:51 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:39 2019 15 // Update Count : 1 16 16 // 17 17 … … 40 40 if( is_locked ) { 41 41 append( blocked_threads, kernelTLS.this_thread ); 42 BlockInternal( &lock ); 42 unlock( lock ); 43 park(); 43 44 } 44 45 else { … … 62 63 lock( this.lock __cfaabi_dbg_ctx2 ); 63 64 this.is_locked = (this.blocked_threads != 0); 64 WakeThread(65 unpark( 65 66 pop_head( this.blocked_threads ) 66 67 ); … … 73 74 this.lock{}; 74 75 this.blocked_threads{}; 75 this.owner = NULL;76 this.owner = 0p; 76 77 this.recursion_count = 0; 77 78 } … … 83 84 void lock(recursive_mutex_lock & this) with(this) { 84 85 lock( lock __cfaabi_dbg_ctx2 ); 85 if( owner == NULL) {86 if( owner == 0p ) { 86 87 owner = kernelTLS.this_thread; 87 88 recursion_count = 1; … … 94 95 else { 95 96 append( blocked_threads, kernelTLS.this_thread ); 96 BlockInternal( &lock ); 97 unlock( lock ); 98 park(); 97 99 } 98 100 } … … 101 103 bool ret = false; 102 104 lock( lock __cfaabi_dbg_ctx2 ); 103 if( owner == NULL) {105 if( owner == 0p ) { 104 106 owner = kernelTLS.this_thread; 105 107 recursion_count = 1; … … 118 120 recursion_count--; 119 121 if( recursion_count == 0 ) { 120 thread_desc* thrd = pop_head( blocked_threads );122 $thread * thrd = pop_head( blocked_threads ); 121 123 owner = thrd; 122 124 recursion_count = (thrd ? 1 : 0); 123 WakeThread( thrd );125 unpark( thrd ); 124 126 } 125 127 unlock( lock ); … … 138 140 void notify_one(condition_variable & this) with(this) { 139 141 lock( lock __cfaabi_dbg_ctx2 ); 140 WakeThread(142 unpark( 141 143 pop_head( this.blocked_threads ) 142 144 ); … … 147 149 lock( lock __cfaabi_dbg_ctx2 ); 148 150 while(this.blocked_threads) { 149 WakeThread(151 unpark( 150 152 pop_head( this.blocked_threads ) 151 153 ); … … 157 159 lock( this.lock __cfaabi_dbg_ctx2 ); 158 160 append( this.blocked_threads, kernelTLS.this_thread ); 159 BlockInternal( &this.lock ); 161 unlock( this.lock ); 162 park(); 160 163 } 161 164 … … 164 167 lock( this.lock __cfaabi_dbg_ctx2 ); 165 168 append( this.blocked_threads, kernelTLS.this_thread ); 166 void __unlock(void) { 167 unlock(l); 168 unlock(this.lock); 169 } 170 BlockInternal( __unlock ); 169 unlock(l); 170 unlock(this.lock); 171 park(); 171 172 lock(l); 172 173 } -
libcfa/src/concurrency/mutex.hfa
r9fb8f01 r3d5701e 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:24:09 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:24:12 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:53 2019 15 // Update Count : 1 16 16 // 17 17 … … 36 36 37 37 // List of blocked threads 38 __queue_t(struct thread_desc) blocked_threads;38 __queue_t(struct $thread) blocked_threads; 39 39 40 40 // Locked flag … … 55 55 56 56 // List of blocked threads 57 __queue_t(struct thread_desc) blocked_threads;57 __queue_t(struct $thread) blocked_threads; 58 58 59 59 // Current thread owning the lock 60 struct thread_desc* owner;60 struct $thread * owner; 61 61 62 62 // Number of recursion level … … 83 83 84 84 // List of blocked threads 85 __queue_t(struct thread_desc) blocked_threads;85 __queue_t(struct $thread) blocked_threads; 86 86 }; 87 87 … … 110 110 111 111 static inline void ?{}(lock_scope(L) & this) { 112 this.locks = NULL;112 this.locks = 0p; 113 113 this.count = 0; 114 114 } -
libcfa/src/concurrency/preemption.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 5 17:35:49 201813 // Update Count : 3712 // Last Modified On : Thu Dec 5 16:34:05 2019 13 // Update Count : 43 14 14 // 15 15 … … 24 24 #include <string.h> 25 25 #include <unistd.h> 26 #include <limits.h> // PTHREAD_STACK_MIN 26 27 } 27 28 … … 38 39 // FwdDeclarations : timeout handlers 39 40 static void preempt( processor * this ); 40 static void timeout( thread_desc* this );41 static void timeout( $thread * this ); 41 42 42 43 // FwdDeclarations : Signal handlers … … 64 65 event_kernel_t * event_kernel; // kernel public handle to even kernel 65 66 static pthread_t alarm_thread; // pthread handle to alarm thread 67 static void * alarm_stack; // pthread stack for alarm thread 66 68 67 69 static void ?{}(event_kernel_t & this) with( this ) { … … 81 83 // Get next expired node 82 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 83 if( !alarms->head ) return NULL;// If no alarms return null84 if( alarms->head->alarm >= currtime ) return NULL;// If alarms head not expired return null85 return pop(alarms); // Otherwise just pop head85 if( !alarms->head ) return 0p; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 86 88 } 87 89 88 90 // Tick one frame of the Discrete Event Simulation for alarms 89 91 static void tick_preemption() { 90 alarm_node_t * node = NULL;// Used in the while loop but cannot be declared in the while condition91 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading92 Time currtime = __kernel_get_time(); // Check current time once so weeverything "happens at once"92 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 Time currtime = __kernel_get_time(); // Check current time once so everything "happens at once" 93 95 94 96 //Loop throught every thing expired … … 182 184 183 185 // Enable interrupts by decrementing the counter 184 // If counter reaches 0, execute any pending CtxSwitch186 // If counter reaches 0, execute any pending __cfactx_switch 185 187 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 186 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 187 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store188 189 189 190 with( kernelTLS.preemption_state ){ … … 207 208 if( proc->pending_preemption ) { 208 209 proc->pending_preemption = false; 209 BlockInternal( thrd);210 force_yield( __POLL_PREEMPTION ); 210 211 } 211 212 } … … 217 218 218 219 // Disable interrupts by incrementint the counter 219 // Don't execute any pending CtxSwitch even if counter reaches 0220 // Don't execute any pending __cfactx_switch even if counter reaches 0 220 221 void enable_interrupts_noPoll() { 221 222 unsigned short prev = kernelTLS.preemption_state.disable_count; … … 243 244 sigaddset( &mask, sig ); 244 245 245 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL) == -1 ) {246 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 246 247 abort( "internal error, pthread_sigmask" ); 247 248 } … … 254 255 sigaddset( &mask, sig ); 255 256 256 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {257 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 257 258 abort( "internal error, pthread_sigmask" ); 258 259 } … … 266 267 267 268 // reserved for future use 268 static void timeout( thread_desc* this ) {269 static void timeout( $thread * this ) { 269 270 //TODO : implement waking threads 270 271 } 271 272 272 273 // KERNEL ONLY 273 // Check if a CtxSwitch signal handler shoud defer274 // Check if a __cfactx_switch signal handler shoud defer 274 275 // If true : preemption is safe 275 276 // If false : preemption is unsafe and marked as pending … … 301 302 302 303 // Setup proper signal handlers 303 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler304 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 304 305 305 306 signal_block( SIGALRM ); 306 307 307 pthread_create( &alarm_thread, NULL, alarm_loop, NULL);308 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 308 309 } 309 310 … … 316 317 sigset_t mask; 317 318 sigfillset( &mask ); 318 sigprocmask( SIG_BLOCK, &mask, NULL);319 sigprocmask( SIG_BLOCK, &mask, 0p ); 319 320 320 321 // Notify the alarm thread of the shutdown … … 323 324 324 325 // Wait for the preemption thread to finish 325 pthread_join( alarm_thread, NULL ); 326 327 pthread_join( alarm_thread, 0p ); 328 free( alarm_stack ); 326 329 327 330 // Preemption is now fully stopped … … 380 383 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 381 384 #endif 382 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL) == -1 ) {385 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) { 383 386 abort( "internal error, sigprocmask" ); 384 387 } … … 390 393 // Preemption can occur here 391 394 392 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch395 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 393 396 } 394 397 … … 399 402 sigset_t mask; 400 403 sigfillset(&mask); 401 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {404 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 402 405 abort( "internal error, pthread_sigmask" ); 403 406 } … … 420 423 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );} 421 424 continue; 422 case EINVAL :425 case EINVAL : 423 426 abort( "Timeout was invalid." ); 424 427 default: … … 453 456 EXIT: 454 457 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 455 return NULL;458 return 0p; 456 459 } 457 460 … … 466 469 sigset_t oldset; 467 470 int ret; 468 ret = pthread_sigmask(0, NULL, &oldset);471 ret = pthread_sigmask(0, 0p, &oldset); 469 472 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 470 473 -
libcfa/src/concurrency/thread.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:19:52 201813 // Update Count : 812 // Last Modified On : Wed Dec 4 09:17:49 2019 13 // Update Count : 9 14 14 // 15 15 … … 23 23 #include "invoke.h" 24 24 25 extern "C" {26 #include <fenv.h>27 #include <stddef.h>28 }29 30 //extern volatile thread_local processor * this_processor;31 32 25 //----------------------------------------------------------------------------- 33 26 // Thread ctors and dtors 34 void ?{}( thread_desc& this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {35 context{ NULL, NULL};27 void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 28 context{ 0p, 0p }; 36 29 self_cor{ name, storage, storageSize }; 37 30 state = Start; 31 preempted = __NO_PREEMPTION; 38 32 curr_cor = &self_cor; 39 33 self_mon.owner = &this; … … 41 35 self_mon_p = &self_mon; 42 36 curr_cluster = &cl; 43 next = NULL;37 next = 0p; 44 38 45 node.next = NULL;46 node.prev = NULL;39 node.next = 0p; 40 node.prev = 0p; 47 41 doregister(curr_cluster, this); 48 42 … … 50 44 } 51 45 52 void ^?{}( thread_desc& this) with( this ) {46 void ^?{}($thread& this) with( this ) { 53 47 unregister(curr_cluster, this); 54 48 ^self_cor{}; 55 49 } 56 50 51 //----------------------------------------------------------------------------- 52 // Starting and stopping threads 53 forall( dtype T | is_thread(T) ) 54 void __thrd_start( T & this, void (*main_p)(T &) ) { 55 $thread * this_thrd = get_thread(this); 56 57 disable_interrupts(); 58 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); 59 60 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 61 verify( this_thrd->context.SP ); 62 63 __schedule_thread(this_thrd); 64 enable_interrupts( __cfaabi_dbg_ctx ); 65 } 66 67 //----------------------------------------------------------------------------- 68 // Support for threads that don't ues the thread keyword 57 69 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 58 70 void ?{}( scoped(T)& this ) with( this ) { 59 71 handle{}; 60 __thrd_start(handle );72 __thrd_start(handle, main); 61 73 } 62 74 … … 64 76 void ?{}( scoped(T)& this, P params ) with( this ) { 65 77 handle{ params }; 66 __thrd_start(handle );78 __thrd_start(handle, main); 67 79 } 68 80 … … 72 84 } 73 85 74 //-----------------------------------------------------------------------------75 // Starting and stopping threads76 forall( dtype T | is_thread(T) )77 void __thrd_start( T& this ) {78 thread_desc * this_thrd = get_thread(this);79 thread_desc * curr_thrd = TL_GET( this_thread );80 81 disable_interrupts();82 CtxStart(&this, CtxInvokeThread);83 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];84 verify( this_thrd->context.SP );85 CtxSwitch( &curr_thrd->context, &this_thrd->context );86 87 ScheduleThread(this_thrd);88 enable_interrupts( __cfaabi_dbg_ctx );89 }90 91 extern "C" {92 // KERNEL ONLY93 void __finish_creation(thread_desc * this) {94 // set new coroutine that the processor is executing95 // and context switch to it96 verify( kernelTLS.this_thread != this );97 verify( kernelTLS.this_thread->context.SP );98 CtxSwitch( &this->context, &kernelTLS.this_thread->context );99 }100 }101 102 void yield( void ) {103 // Safety note : This could cause some false positives due to preemption104 verify( TL_GET( preemption_state.enabled ) );105 BlockInternal( TL_GET( this_thread ) );106 // Safety note : This could cause some false positives due to preemption107 verify( TL_GET( preemption_state.enabled ) );108 }109 110 void yield( unsigned times ) {111 for( unsigned i = 0; i < times; i++ ) {112 yield();113 }114 }115 116 86 // Local Variables: // 117 87 // mode: c // -
libcfa/src/concurrency/thread.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:51:33201913 // Update Count : 512 // Last Modified On : Wed Dec 4 09:18:14 2019 13 // Update Count : 6 14 14 // 15 15 … … 28 28 void ^?{}(T& mutex this); 29 29 void main(T& this); 30 thread_desc* get_thread(T& this);30 $thread* get_thread(T& this); 31 31 }; 32 32 33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this) 33 // define that satisfies the trait without using the thread keyword 34 #define DECL_THREAD(X) $thread* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this) 35 36 // Inline getters for threads/coroutines/monitors 37 forall( dtype T | is_thread(T) ) 38 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 34 39 35 40 forall( dtype T | is_thread(T) ) 36 static inline coroutine_desc* get_coroutine(T & this) { 37 return &get_thread(this)->self_cor; 38 } 41 static inline $monitor * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 39 42 40 forall( dtype T | is_thread(T) ) 41 static inline monitor_desc* get_monitor(T & this) { 42 return &get_thread(this)->self_mon; 43 } 43 static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; } 44 static inline $monitor * get_monitor ($thread * this) __attribute__((const)) { return &this->self_mon; } 44 45 45 static inline coroutine_desc* get_coroutine(thread_desc * this) { 46 return &this->self_cor; 47 } 48 49 static inline monitor_desc* get_monitor(thread_desc * this) { 50 return &this->self_mon; 51 } 52 46 //----------------------------------------------------------------------------- 47 // forward declarations needed for threads 53 48 extern struct cluster * mainCluster; 54 49 55 50 forall( dtype T | is_thread(T) ) 56 void __thrd_start( T & this );51 void __thrd_start( T & this, void (*)(T &) ); 57 52 58 53 //----------------------------------------------------------------------------- 59 54 // Ctors and dtors 60 void ?{}( thread_desc& this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );61 void ^?{}( thread_desc& this);55 void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 56 void ^?{}($thread & this); 62 57 63 static inline void ?{}( thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }64 static inline void ?{}( thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }65 static inline void ?{}( thread_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }66 static inline void ?{}( thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 65000 }; }67 static inline void ?{}( thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, NULL, stackSize }; }68 static inline void ?{}( thread_desc& this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; }69 static inline void ?{}( thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 65000 }; }70 static inline void ?{}( thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 65000 }; }71 static inline void ?{}( thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }58 static inline void ?{}($thread & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 59 static inline void ?{}($thread & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 60 static inline void ?{}($thread & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 61 static inline void ?{}($thread & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 62 static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 63 static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 64 static inline void ?{}($thread & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 65 static inline void ?{}($thread & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 66 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 72 67 73 68 //----------------------------------------------------------------------------- … … 88 83 void ^?{}( scoped(T)& this ); 89 84 90 void yield(); 91 void yield( unsigned times ); 85 //----------------------------------------------------------------------------- 86 // Thread getters 87 static inline struct $thread * active_thread () { return TL_GET( this_thread ); } 92 88 93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 89 //----------------------------------------------------------------------------- 90 // Scheduler API 91 92 //---------- 93 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 94 void park( void ); 95 96 //---------- 97 // Unpark a thread, if the thread is already blocked, schedule it 98 // if the thread is not yet block, signal that it should rerun immediately 99 void unpark( $thread * this ); 100 101 forall( dtype T | is_thread(T) ) 102 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );} 103 104 //---------- 105 // Yield: force thread to block and be rescheduled 106 bool force_yield( enum __Preemption_Reason ); 107 108 static inline void yield() { 109 force_yield(__MANUAL_PREEMPTION); 110 } 111 112 // Yield: yield N times 113 static inline void yield( unsigned times ) { 114 for( times ) { 115 yield(); 116 } 117 } 94 118 95 119 // Local Variables: // -
libcfa/src/exception.c
r9fb8f01 r3d5701e 69 69 70 70 71 // This macro should be the only thing that needs to change across machines. Used in the personality function, way down72 // in termination.71 // This macro should be the only thing that needs to change across machines. 72 // Used in the personality function, way down in termination. 73 73 // struct _Unwind_Context * -> _Unwind_Reason_Code(*)(exception_t *) 74 74 #define MATCHER_FROM_CONTEXT(ptr_to_context) \ … … 102 102 } 103 103 104 // Do we control where exceptions get thrown even with concurency? If not these are not quite thread safe, the cleanup 105 // hook has to be added after the node is built but before it is made the top node. 104 // Do we control where exceptions get thrown even with concurency? 105 // If not these are not quite thread safe, the cleanup hook has to 106 // be added after the node is built but before it is made the top node. 106 107 107 108 void __cfaabi_ehm__try_resume_setup(struct __cfaabi_ehm__try_resume_node * node, … … 212 213 _Unwind_Reason_Code ret = _Unwind_RaiseException( &this_exception_storage ); 213 214 214 // If we reach here it means something happened. For resumption to work we need to find a way to return back to 215 // here. Most of them will probably boil down to setting a global flag and making the phase 1 either stop or 216 // fail. Causing an error on purpose may help avoiding unnecessary work but it might have some weird side 217 // effects. If we just pretend no handler was found that would work but may be expensive for no reason since we 218 // will always search the whole stack. 215 // If we reach here it means something happened. For resumption to work we need to find a way 216 // to return back to here. Most of them will probably boil down to setting a global flag and 217 // making the phase 1 either stop or fail. Causing an error on purpose may help avoiding 218 // unnecessary work but it might have some weird side effects. If we just pretend no handler 219 // was found that would work but may be expensive for no reason since we will always search 220 // the whole stack. 219 221 220 222 if( ret == _URC_END_OF_STACK ) { 221 // No proper handler was found. This can be handled in several way. C++ calls std::terminate Here we222 // force unwind the stack, basically raising a cancellation.223 // No proper handler was found. This can be handled in many ways, C++ calls std::terminate. 224 // Here we force unwind the stack, basically raising a cancellation. 223 225 printf("Uncaught exception %p\n", &this_exception_storage); 224 226 … … 228 230 } 229 231 230 // We did not simply reach the end of the stack without finding a handler. Something wen't wrong232 // We did not simply reach the end of the stack without finding a handler. This is an error. 231 233 printf("UNWIND ERROR %d after raise exception\n", ret); 232 234 abort(); … … 246 248 } 247 249 248 #if defined(PIC) 249 #warning Exceptions not yet supported when using Position-Independent Code 250 __attribute__((noinline)) 251 void __cfaabi_ehm__try_terminate(void (*try_block)(), 252 void (*catch_block)(int index, exception_t * except), 253 __attribute__((unused)) int (*match_block)(exception_t * except)) { 254 abort(); 255 } 256 #else 257 // This is our personality routine. For every stack frame anotated with ".cfi_personality 0x3,__gcfa_personality_v0". 258 // This function will be called twice when unwinding. Once in the search phased and once in the cleanup phase. 250 #pragma GCC push_options 251 #pragma GCC optimize("O0") 252 253 // This is our personality routine. For every stack frame annotated with 254 // ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding. 255 // Once in the search phase and once in the cleanup phase. 259 256 _Unwind_Reason_Code __gcfa_personality_v0 ( 260 257 int version, _Unwind_Action actions, unsigned long long exceptionClass, … … 264 261 265 262 //__cfaabi_dbg_print_safe("CFA: 0x%lx\n", _Unwind_GetCFA(context)); 266 __cfaabi_dbg_print_safe("Personality function (%d, %x, %llu, %p, %p):", version, actions, exceptionClass, unwind_exception, context); 263 __cfaabi_dbg_print_safe("Personality function (%d, %x, %llu, %p, %p):", 264 version, actions, exceptionClass, unwind_exception, context); 267 265 268 266 // If we've reached the end of the stack then there is nothing much we can do... … … 291 289 // Get the instuction pointer and a reading pointer into the exception table 292 290 lsda_header_info lsd_info; 293 const unsigned char * cur_ptr = parse_lsda_header( context, lsd, &lsd_info);291 const unsigned char * cur_ptr = parse_lsda_header(context, lsd, &lsd_info); 294 292 _Unwind_Ptr instruction_ptr = _Unwind_GetIP( context ); 295 293 … … 302 300 303 301 // Decode the common stuff we have in here 304 cur_ptr = read_encoded_value (0, lsd_info.call_site_encoding, cur_ptr, &callsite_start);305 cur_ptr = read_encoded_value (0, lsd_info.call_site_encoding, cur_ptr, &callsite_len);306 cur_ptr = read_encoded_value (0, lsd_info.call_site_encoding, cur_ptr, &callsite_landing_pad);307 cur_ptr = read_uleb128 (cur_ptr, &callsite_action);302 cur_ptr = read_encoded_value(0, lsd_info.call_site_encoding, cur_ptr, &callsite_start); 303 cur_ptr = read_encoded_value(0, lsd_info.call_site_encoding, cur_ptr, &callsite_len); 304 cur_ptr = read_encoded_value(0, lsd_info.call_site_encoding, cur_ptr, &callsite_landing_pad); 305 cur_ptr = read_uleb128(cur_ptr, &callsite_action); 308 306 309 307 // Have we reach the correct frame info yet? … … 316 314 void * ep = (void*)lsd_info.Start + callsite_start + callsite_len; 317 315 void * ip = (void*)instruction_ptr; 318 __cfaabi_dbg_print_safe("\nfound %p - %p (%p, %p, %p), looking for %p\n", bp, ep, ls, cs, cl, ip); 316 __cfaabi_dbg_print_safe("\nfound %p - %p (%p, %p, %p), looking for %p\n", 317 bp, ep, ls, cs, cl, ip); 319 318 #endif // __CFA_DEBUG_PRINT__ 320 319 continue; 321 320 } 322 321 323 // Have we gone too far 322 // Have we gone too far? 324 323 if( lsd_info.Start + callsite_start > instruction_ptr ) { 325 324 printf(" gone too far"); … … 331 330 // Which phase are we in 332 331 if (actions & _UA_SEARCH_PHASE) { 333 // Search phase, this means we probably found a potential handler and must check if it is a match334 335 // If we have arbitrarily decided that 0 means nothing to do and 1 means there is a potential handler336 // This doesn't seem to conflict the gcc default behavior332 // In search phase, these means we found a potential handler we must check. 333 334 // We have arbitrarily decided that 0 means nothing to do and 1 means there is 335 // a potential handler. This doesn't seem to conflict the gcc default behavior. 337 336 if (callsite_action != 0) { 338 337 // Now we want to run some code to see if the handler matches … … 351 350 // The current apprach uses one exception table entry per try block 352 351 _uleb128_t imatcher; 353 // Get the relative offset to the 354 cur_ptr = read_uleb128 (cur_ptr, &imatcher); 355 356 // Get a function pointer from the relative offset and call it 357 // _Unwind_Reason_Code (*matcher)() = (_Unwind_Reason_Code (*)())lsd_info.LPStart + imatcher; 352 // Get the relative offset to the {...}? 353 cur_ptr = read_uleb128(cur_ptr, &imatcher); 358 354 359 355 _Unwind_Reason_Code (*matcher)(exception_t *) = … … 414 410 } 415 411 416 // Try statements are hoisted out see comments for details. With this could probably be unique and simply linked from417 // libcfa but there is one problem left, see the exception table for details412 // Try statements are hoisted out see comments for details. While this could probably be unique 413 // and simply linked from libcfa but there is one problem left, see the exception table for details 418 414 __attribute__((noinline)) 419 415 void __cfaabi_ehm__try_terminate(void (*try_block)(), … … 428 424 // assembly works. 429 425 430 // Setup the personality routine 426 // Setup the personality routine and exception table. 427 #ifdef __PIC__ 428 asm volatile (".cfi_personality 0x9b,CFA.ref.__gcfa_personality_v0"); 429 asm volatile (".cfi_lsda 0x1b, .LLSDACFA2"); 430 #else 431 431 asm volatile (".cfi_personality 0x3,__gcfa_personality_v0"); 432 // Setup the exception table433 432 asm volatile (".cfi_lsda 0x3, .LLSDACFA2"); 433 #endif 434 434 435 435 // Label which defines the start of the area for which the handler is setup. … … 442 442 asm volatile goto ("" : : : : CATCH ); 443 443 444 // Normal return 444 // Normal return for when there is no throw. 445 445 return; 446 446 … … 459 459 } 460 460 461 // Exception table data we need to generate. While this is almost generic, the custom data refers to foo_try_match try 462 // match, which is no way generic. Some more works need to be done if we want to have a single call to the try routine. 463 461 // Exception table data we need to generate. While this is almost generic, the custom data refers 462 // to {*}try_terminate, which is no way generic. Some more works need to be done if we want to 463 // have a single call to the try routine. 464 465 #ifdef __PIC__ 464 466 #if defined( __i386 ) || defined( __x86_64 ) 465 467 asm ( 466 // HEADER468 // HEADER 467 469 ".LFECFA1:\n" 468 470 " .globl __gcfa_personality_v0\n" 469 471 " .section .gcc_except_table,\"a\",@progbits\n" 470 ".LLSDACFA2:\n" //TABLE header 472 // TABLE HEADER (important field is the BODY length at the end) 473 ".LLSDACFA2:\n" 471 474 " .byte 0xff\n" 472 475 " .byte 0xff\n" 473 476 " .byte 0x1\n" 474 " .uleb128 .LLSDACSECFA2-.LLSDACSBCFA2\n" // BODY length 475 // Body uses language specific data and therefore could be modified arbitrarily 476 ".LLSDACSBCFA2:\n" // BODY start 477 " .uleb128 .TRYSTART-__cfaabi_ehm__try_terminate\n" // Handled area start (relative to start of function) 478 " .uleb128 .TRYEND-.TRYSTART\n" // Handled area length 479 " .uleb128 .CATCH-__cfaabi_ehm__try_terminate\n" // Hanlder landing pad adress (relative to start of function) 480 " .uleb128 1\n" // Action code, gcc seems to use always 0 481 ".LLSDACSECFA2:\n" // BODY end 482 " .text\n" // TABLE footer 477 " .uleb128 .LLSDACSECFA2-.LLSDACSBCFA2\n" 478 // BODY (language specific data) 479 // This uses language specific data and can be modified arbitrarily 480 // We use handled area offset, handled area length, 481 // handler landing pad offset and 1 (action code, gcc seems to use 0). 482 ".LLSDACSBCFA2:\n" 483 " .uleb128 .TRYSTART-__cfaabi_ehm__try_terminate\n" 484 " .uleb128 .TRYEND-.TRYSTART\n" 485 " .uleb128 .CATCH-__cfaabi_ehm__try_terminate\n" 486 " .uleb128 1\n" 487 ".LLSDACSECFA2:\n" 488 // TABLE FOOTER 489 " .text\n" 490 " .size __cfaabi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n" 491 ); 492 493 // Somehow this piece of helps with the resolution of debug symbols. 494 __attribute__((unused)) static const int dummy = 0; 495 496 asm ( 497 // Add a hidden symbol which points at the function. 498 " .hidden CFA.ref.__gcfa_personality_v0\n" 499 " .weak CFA.ref.__gcfa_personality_v0\n" 500 // No clue what this does specifically 501 " .section .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n" 502 " .align 8\n" 503 " .type CFA.ref.__gcfa_personality_v0, @object\n" 504 " .size CFA.ref.__gcfa_personality_v0, 8\n" 505 "CFA.ref.__gcfa_personality_v0:\n" 506 #if defined( __x86_64 ) 507 " .quad __gcfa_personality_v0\n" 508 #else // then __i386 509 " .long __gcfa_personality_v0\n" 510 #endif 511 ); 512 #else 513 #error Exception Handling: unknown architecture for position independent code. 514 #endif // __i386 || __x86_64 515 #else // __PIC__ 516 #if defined( __i386 ) || defined( __x86_64 ) 517 asm ( 518 // HEADER 519 ".LFECFA1:\n" 520 " .globl __gcfa_personality_v0\n" 521 " .section .gcc_except_table,\"a\",@progbits\n" 522 // TABLE HEADER (important field is the BODY length at the end) 523 ".LLSDACFA2:\n" 524 " .byte 0xff\n" 525 " .byte 0xff\n" 526 " .byte 0x1\n" 527 " .uleb128 .LLSDACSECFA2-.LLSDACSBCFA2\n" 528 // BODY (language specific data) 529 ".LLSDACSBCFA2:\n" 530 // Handled area start (relative to start of function) 531 " .uleb128 .TRYSTART-__cfaabi_ehm__try_terminate\n" 532 // Handled area length 533 " .uleb128 .TRYEND-.TRYSTART\n" 534 // Handler landing pad address (relative to start of function) 535 " .uleb128 .CATCH-__cfaabi_ehm__try_terminate\n" 536 // Action code, gcc seems to always use 0. 537 " .uleb128 1\n" 538 // TABLE FOOTER 539 ".LLSDACSECFA2:\n" 540 " .text\n" 483 541 " .size __cfaabi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n" 484 542 " .ident \"GCC: (Ubuntu 6.2.0-3ubuntu11~16.04) 6.2.0 20160901\"\n" 485 //" .section .note.GNU-stack,\"x\",@progbits\n"543 " .section .note.GNU-stack,\"x\",@progbits\n" 486 544 ); 545 #else 546 #error Exception Handling: unknown architecture for position dependent code. 487 547 #endif // __i386 || __x86_64 488 #endif //PIC 548 #endif // __PIC__ 549 550 #pragma GCC pop_options -
libcfa/src/executor.cfa
r9fb8f01 r3d5701e 8 8 #include <stdio.h> 9 9 10 forall( otype T | is_node(T) | is_monitor(T) ) {11 monitor Buffer { // unbounded buffer12 __queue_t( T ) queue; // unbounded list of work requests13 condition delay;14 }; // Buffer15 10 forall( dtype T ) 11 monitor Buffer { // unbounded buffer 12 __queue_t( T ) queue; // unbounded list of work requests 13 condition delay; 14 }; // Buffer 15 forall( dtype T | is_node(T) ) { 16 16 void insert( Buffer( T ) & mutex buf, T * elem ) with(buf) { 17 17 append( queue, elem ); // insert element into buffer … … 20 20 21 21 T * remove( Buffer( T ) & mutex buf ) with(buf) { 22 if ( ! queue) wait( delay ); // no request to process ? => wait23 return pop_head( queue );22 if ( queue.head != 0 ) wait( delay ); // no request to process ? => wait 23 // return pop_head( queue ); 24 24 } // remove 25 25 } // distribution -
libcfa/src/fstream.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 15 18:11:26 201913 // Update Count : 3 4912 // Last Modified On : Fri Feb 7 19:01:01 2020 13 // Update Count : 363 14 14 // 15 15 … … 32 32 33 33 void ?{}( ofstream & os, void * file ) { 34 os.file = file; 35 os.sepDefault = true; 36 os.sepOnOff = false; 37 os.nlOnOff = true; 38 os.prt = false; 39 os.sawNL = false; 34 os.$file = file; 35 os.$sepDefault = true; 36 os.$sepOnOff = false; 37 os.$nlOnOff = true; 38 os.$prt = false; 39 os.$sawNL = false; 40 $sepSetCur( os, sepGet( os ) ); 40 41 sepSet( os, " " ); 41 sepSetCur( os, sepGet( os ) );42 42 sepSetTuple( os, ", " ); 43 43 } // ?{} 44 44 45 45 // private 46 bool sepPrt( ofstream & os ) { setNL( os, false ); return os.sepOnOff; }47 void sepReset( ofstream & os ) { os.sepOnOff = os.sepDefault; }48 void sepReset( ofstream & os, bool reset ) { os.sepDefault = reset; os.sepOnOff = os.sepDefault; }49 const char * sepGetCur( ofstream & os ) { return os.sepCur; }50 void sepSetCur( ofstream & os, const char * sepCur ) { os.sepCur = sepCur; }51 bool getNL( ofstream & os ) { return os.sawNL; }52 void setNL( ofstream & os, bool state ) { os.sawNL = state; }53 bool getANL( ofstream & os ) { return os.nlOnOff; }54 bool getPrt( ofstream & os ) { return os.prt; }55 void setPrt( ofstream & os, bool state ) { os.prt = state; }46 bool $sepPrt( ofstream & os ) { $setNL( os, false ); return os.$sepOnOff; } 47 void $sepReset( ofstream & os ) { os.$sepOnOff = os.$sepDefault; } 48 void $sepReset( ofstream & os, bool reset ) { os.$sepDefault = reset; os.$sepOnOff = os.$sepDefault; } 49 const char * $sepGetCur( ofstream & os ) { return os.$sepCur; } 50 void $sepSetCur( ofstream & os, const char sepCur[] ) { os.$sepCur = sepCur; } 51 bool $getNL( ofstream & os ) { return os.$sawNL; } 52 void $setNL( ofstream & os, bool state ) { os.$sawNL = state; } 53 bool $getANL( ofstream & os ) { return os.$nlOnOff; } 54 bool $getPrt( ofstream & os ) { return os.$prt; } 55 void $setPrt( ofstream & os, bool state ) { os.$prt = state; } 56 56 57 57 // public 58 void ?{}( ofstream & os ) { os. file = 0; }59 60 void ?{}( ofstream & os, const char * name, const char * mode) {58 void ?{}( ofstream & os ) { os.$file = 0p; } 59 60 void ?{}( ofstream & os, const char name[], const char mode[] ) { 61 61 open( os, name, mode ); 62 62 } // ?{} 63 63 64 void ?{}( ofstream & os, const char * name) {64 void ?{}( ofstream & os, const char name[] ) { 65 65 open( os, name, "w" ); 66 66 } // ?{} 67 67 68 void sepOn( ofstream & os ) { os.sepOnOff = ! getNL( os ); } 69 void sepOff( ofstream & os ) { os.sepOnOff = false; } 68 void ^?{}( ofstream & os ) { 69 close( os ); 70 } // ^?{} 71 72 void sepOn( ofstream & os ) { os.$sepOnOff = ! $getNL( os ); } 73 void sepOff( ofstream & os ) { os.$sepOnOff = false; } 70 74 71 75 bool sepDisable( ofstream & os ) { 72 bool temp = os. sepDefault;73 os. sepDefault = false;74 sepReset( os );76 bool temp = os.$sepDefault; 77 os.$sepDefault = false; 78 $sepReset( os ); 75 79 return temp; 76 80 } // sepDisable 77 81 78 82 bool sepEnable( ofstream & os ) { 79 bool temp = os. sepDefault;80 os. sepDefault = true;81 if ( os. sepOnOff ) sepReset( os );// start of line ?83 bool temp = os.$sepDefault; 84 os.$sepDefault = true; 85 if ( os.$sepOnOff ) $sepReset( os ); // start of line ? 82 86 return temp; 83 87 } // sepEnable 84 88 85 void nlOn( ofstream & os ) { os. nlOnOff = true; }86 void nlOff( ofstream & os ) { os. nlOnOff = false; }87 88 const char * sepGet( ofstream & os ) { return os. separator; }89 void sepSet( ofstream & os, const char * s) {89 void nlOn( ofstream & os ) { os.$nlOnOff = true; } 90 void nlOff( ofstream & os ) { os.$nlOnOff = false; } 91 92 const char * sepGet( ofstream & os ) { return os.$separator; } 93 void sepSet( ofstream & os, const char s[] ) { 90 94 assert( s ); 91 strncpy( os. separator, s, sepSize - 1 );92 os. separator[sepSize - 1] = '\0';95 strncpy( os.$separator, s, sepSize - 1 ); 96 os.$separator[sepSize - 1] = '\0'; 93 97 } // sepSet 94 98 95 const char * sepGetTuple( ofstream & os ) { return os. tupleSeparator; }96 void sepSetTuple( ofstream & os, const char * s) {99 const char * sepGetTuple( ofstream & os ) { return os.$tupleSeparator; } 100 void sepSetTuple( ofstream & os, const char s[] ) { 97 101 assert( s ); 98 strncpy( os. tupleSeparator, s, sepSize - 1 );99 os. tupleSeparator[sepSize - 1] = '\0';102 strncpy( os.$tupleSeparator, s, sepSize - 1 ); 103 os.$tupleSeparator[sepSize - 1] = '\0'; 100 104 } // sepSet 101 105 102 106 void ends( ofstream & os ) { 103 if ( getANL( os ) ) nl( os );104 else setPrt( os, false ); // turn off107 if ( $getANL( os ) ) nl( os ); 108 else $setPrt( os, false ); // turn off 105 109 if ( &os == &exit ) exit( EXIT_FAILURE ); 106 110 if ( &os == &abort ) abort(); … … 108 112 109 113 int fail( ofstream & os ) { 110 return os. file == 0 || ferror( (FILE *)(os.file) );114 return os.$file == 0 || ferror( (FILE *)(os.$file) ); 111 115 } // fail 112 116 113 117 int flush( ofstream & os ) { 114 return fflush( (FILE *)(os. file) );118 return fflush( (FILE *)(os.$file) ); 115 119 } // flush 116 120 117 void open( ofstream & os, const char * name, const char * mode) {121 void open( ofstream & os, const char name[], const char mode[] ) { 118 122 FILE * file = fopen( name, mode ); 119 123 #ifdef __CFA_DEBUG__ 120 if ( file == 0 ) {124 if ( file == 0p ) { 121 125 abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno ); 122 126 } // if … … 125 129 } // open 126 130 127 void open( ofstream & os, const char * name) {131 void open( ofstream & os, const char name[] ) { 128 132 open( os, name, "w" ); 129 133 } // open 130 134 131 135 void close( ofstream & os ) { 132 if ( (FILE *)(os. file) == stdout || (FILE *)(os.file) == stderr ) return;133 134 if ( fclose( (FILE *)(os. file) ) == EOF ) {136 if ( (FILE *)(os.$file) == stdout || (FILE *)(os.$file) == stderr ) return; 137 138 if ( fclose( (FILE *)(os.$file) ) == EOF ) { 135 139 abort | IO_MSG "close output" | nl | strerror( errno ); 136 140 } // if 137 141 } // close 138 142 139 ofstream & write( ofstream & os, const char * data, size_t size ) {143 ofstream & write( ofstream & os, const char data[], size_t size ) { 140 144 if ( fail( os ) ) { 141 145 abort | IO_MSG "attempt write I/O on failed stream"; 142 146 } // if 143 147 144 if ( fwrite( data, 1, size, (FILE *)(os. file) ) != size ) {148 if ( fwrite( data, 1, size, (FILE *)(os.$file) ) != size ) { 145 149 abort | IO_MSG "write" | nl | strerror( errno ); 146 150 } // if … … 151 155 va_list args; 152 156 va_start( args, format ); 153 int len = vfprintf( (FILE *)(os. file), format, args );157 int len = vfprintf( (FILE *)(os.$file), format, args ); 154 158 if ( len == EOF ) { 155 if ( ferror( (FILE *)(os. file) ) ) {159 if ( ferror( (FILE *)(os.$file) ) ) { 156 160 abort | IO_MSG "invalid write"; 157 161 } // if … … 159 163 va_end( args ); 160 164 161 setPrt( os, true );// called in output cascade162 sepReset( os );// reset separator165 $setPrt( os, true ); // called in output cascade 166 $sepReset( os ); // reset separator 163 167 return len; 164 168 } // fmt 165 169 166 static ofstream soutFile = { (FILE *) (&_IO_2_1_stdout_)};170 static ofstream soutFile = { (FILE *)stdout }; 167 171 ofstream & sout = soutFile, & stdout = soutFile; 168 static ofstream serrFile = { (FILE *) (&_IO_2_1_stderr_)};172 static ofstream serrFile = { (FILE *)stderr }; 169 173 ofstream & serr = serrFile, & stderr = serrFile; 170 174 171 static ofstream exitFile = { (FILE *) (&_IO_2_1_stdout_)};175 static ofstream exitFile = { (FILE *)stdout }; 172 176 ofstream & exit = exitFile; 173 static ofstream abortFile = { (FILE *) (&_IO_2_1_stderr_)};177 static ofstream abortFile = { (FILE *)stderr }; 174 178 ofstream & abort = abortFile; 175 179 … … 180 184 // private 181 185 void ?{}( ifstream & is, void * file ) { 182 is. file = file;183 is. nlOnOff = false;186 is.$file = file; 187 is.$nlOnOff = false; 184 188 } // ?{} 185 189 186 190 // public 187 void ?{}( ifstream & is ) { is.file = 0; }188 189 void ?{}( ifstream & is, const char * name, const char * mode) {191 void ?{}( ifstream & is ) { is.$file = 0p; } 192 193 void ?{}( ifstream & is, const char name[], const char mode[] ) { 190 194 open( is, name, mode ); 191 195 } // ?{} 192 196 193 void ?{}( ifstream & is, const char * name) {197 void ?{}( ifstream & is, const char name[] ) { 194 198 open( is, name, "r" ); 195 199 } // ?{} 196 200 197 void nlOn( ifstream & os ) { os.nlOnOff = true; } 198 void nlOff( ifstream & os ) { os.nlOnOff = false; } 199 bool getANL( ifstream & os ) { return os.nlOnOff; } 201 void ^?{}( ifstream & is ) { 202 close( is ); 203 } // ^?{} 204 205 void nlOn( ifstream & os ) { os.$nlOnOff = true; } 206 void nlOff( ifstream & os ) { os.$nlOnOff = false; } 207 bool getANL( ifstream & os ) { return os.$nlOnOff; } 200 208 201 209 int fail( ifstream & is ) { 202 return is. file == 0 || ferror( (FILE *)(is.file) );210 return is.$file == 0p || ferror( (FILE *)(is.$file) ); 203 211 } // fail 204 212 205 213 int eof( ifstream & is ) { 206 return feof( (FILE *)(is. file) );214 return feof( (FILE *)(is.$file) ); 207 215 } // eof 208 216 209 void open( ifstream & is, const char * name, const char * mode) {217 void open( ifstream & is, const char name[], const char mode[] ) { 210 218 FILE * file = fopen( name, mode ); 211 219 #ifdef __CFA_DEBUG__ 212 if ( file == 0 ) {220 if ( file == 0p ) { 213 221 abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno ); 214 222 } // if 215 223 #endif // __CFA_DEBUG__ 216 is. file = file;217 } // open 218 219 void open( ifstream & is, const char * name) {224 is.$file = file; 225 } // open 226 227 void open( ifstream & is, const char name[] ) { 220 228 open( is, name, "r" ); 221 229 } // open 222 230 223 231 void close( ifstream & is ) { 224 if ( (FILE *)(is. file) == stdin ) return;225 226 if ( fclose( (FILE *)(is. file) ) == EOF ) {232 if ( (FILE *)(is.$file) == stdin ) return; 233 234 if ( fclose( (FILE *)(is.$file) ) == EOF ) { 227 235 abort | IO_MSG "close input" | nl | strerror( errno ); 228 236 } // if … … 234 242 } // if 235 243 236 if ( fread( data, size, 1, (FILE *)(is. file) ) == 0 ) {244 if ( fread( data, size, 1, (FILE *)(is.$file) ) == 0 ) { 237 245 abort | IO_MSG "read" | nl | strerror( errno ); 238 246 } // if … … 245 253 } // if 246 254 247 if ( ungetc( c, (FILE *)(is. file) ) == EOF ) {255 if ( ungetc( c, (FILE *)(is.$file) ) == EOF ) { 248 256 abort | IO_MSG "ungetc" | nl | strerror( errno ); 249 257 } // if … … 255 263 256 264 va_start( args, format ); 257 int len = vfscanf( (FILE *)(is. file), format, args );265 int len = vfscanf( (FILE *)(is.$file), format, args ); 258 266 if ( len == EOF ) { 259 if ( ferror( (FILE *)(is. file) ) ) {267 if ( ferror( (FILE *)(is.$file) ) ) { 260 268 abort | IO_MSG "invalid read"; 261 269 } // if … … 265 273 } // fmt 266 274 267 268 static ifstream sinFile = { (FILE *)(&_IO_2_1_stdin_) }; 275 static ifstream sinFile = { (FILE *)stdin }; 269 276 ifstream & sin = sinFile, & stdin = sinFile; 270 277 -
libcfa/src/fstream.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 15 18:10:23 201913 // Update Count : 1 6712 // Last Modified On : Mon Feb 17 08:29:23 2020 13 // Update Count : 175 14 14 // 15 15 … … 24 24 enum { sepSize = 16 }; 25 25 struct ofstream { 26 void * file;27 bool sepDefault;28 bool sepOnOff;29 bool nlOnOff;30 bool prt; // print text31 bool sawNL;32 const char * sepCur;33 char separator[sepSize];34 char tupleSeparator[sepSize];26 void * $file; 27 bool $sepDefault; 28 bool $sepOnOff; 29 bool $nlOnOff; 30 bool $prt; // print text 31 bool $sawNL; 32 const char * $sepCur; 33 char $separator[sepSize]; 34 char $tupleSeparator[sepSize]; 35 35 }; // ofstream 36 36 37 37 // private 38 bool sepPrt( ofstream & );39 void sepReset( ofstream & );40 void sepReset( ofstream &, bool );41 const char * sepGetCur( ofstream & );42 void sepSetCur( ofstream &, const char *);43 bool getNL( ofstream & );44 void setNL( ofstream &, bool );45 bool getANL( ofstream & );46 bool getPrt( ofstream & );47 void setPrt( ofstream &, bool );38 bool $sepPrt( ofstream & ); 39 void $sepReset( ofstream & ); 40 void $sepReset( ofstream &, bool ); 41 const char * $sepGetCur( ofstream & ); 42 void $sepSetCur( ofstream &, const char [] ); 43 bool $getNL( ofstream & ); 44 void $setNL( ofstream &, bool ); 45 bool $getANL( ofstream & ); 46 bool $getPrt( ofstream & ); 47 void $setPrt( ofstream &, bool ); 48 48 49 49 // public … … 56 56 57 57 const char * sepGet( ofstream & ); 58 void sepSet( ofstream &, const char *);58 void sepSet( ofstream &, const char [] ); 59 59 const char * sepGetTuple( ofstream & ); 60 void sepSetTuple( ofstream &, const char *);60 void sepSetTuple( ofstream &, const char [] ); 61 61 62 62 void ends( ofstream & os ); 63 63 int fail( ofstream & ); 64 64 int flush( ofstream & ); 65 void open( ofstream &, const char * name, const char * mode);66 void open( ofstream &, const char * name);65 void open( ofstream &, const char name[], const char mode[] ); 66 void open( ofstream &, const char name[] ); 67 67 void close( ofstream & ); 68 ofstream & write( ofstream &, const char * data, size_t size );69 int fmt( ofstream &, const char format[], ... ) ;68 ofstream & write( ofstream &, const char data[], size_t size ); 69 int fmt( ofstream &, const char format[], ... ) __attribute__(( format(printf, 2, 3) )); 70 70 71 71 void ?{}( ofstream & os ); 72 void ?{}( ofstream & os, const char * name, const char * mode ); 73 void ?{}( ofstream & os, const char * name ); 72 void ?{}( ofstream & os, const char name[], const char mode[] ); 73 void ?{}( ofstream & os, const char name[] ); 74 void ^?{}( ofstream & os ); 74 75 75 76 extern ofstream & sout, & stdout, & serr, & stderr; // aliases … … 81 82 82 83 struct ifstream { 83 void * file;84 bool nlOnOff;84 void * $file; 85 bool $nlOnOff; 85 86 }; // ifstream 86 87 … … 91 92 int fail( ifstream & is ); 92 93 int eof( ifstream & is ); 93 void open( ifstream & is, const char * name, const char * mode);94 void open( ifstream & is, const char * name);94 void open( ifstream & is, const char name[], const char mode[] ); 95 void open( ifstream & is, const char name[] ); 95 96 void close( ifstream & is ); 96 97 ifstream & read( ifstream & is, char * data, size_t size ); 97 98 ifstream & ungetc( ifstream & is, char c ); 98 int fmt( ifstream &, const char format[], ... ) ;99 int fmt( ifstream &, const char format[], ... ) __attribute__(( format(scanf, 2, 3) )); 99 100 100 101 void ?{}( ifstream & is ); 101 void ?{}( ifstream & is, const char * name, const char * mode ); 102 void ?{}( ifstream & is, const char * name ); 102 void ?{}( ifstream & is, const char name[], const char mode[] ); 103 void ?{}( ifstream & is, const char name[] ); 104 void ^?{}( ifstream & is ); 103 105 104 106 extern ifstream & sin, & stdin; // aliases -
libcfa/src/gmp.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Apr 19 08:43:43 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S at Jul 13 15:25:05 201913 // Update Count : 2712 // Last Modified On : Sun Feb 9 09:56:54 2020 13 // Update Count : 31 14 14 // 15 15 … … 24 24 25 25 static inline { 26 // constructor 26 // constructor, zero_t/one_t are unnecessary because of relationship with signed/unsigned int 27 27 void ?{}( Int & this ) { mpz_init( this.mpz ); } 28 28 void ?{}( Int & this, Int init ) { mpz_init_set( this.mpz, init.mpz ); } 29 void ?{}( Int & this, zero_t ) { mpz_init_set_si( this.mpz, 0 ); }30 void ?{}( Int & this, one_t ) { mpz_init_set_si( this.mpz, 1 ); }31 29 void ?{}( Int & this, signed long int init ) { mpz_init_set_si( this.mpz, init ); } 32 30 void ?{}( Int & this, unsigned long int init ) { mpz_init_set_ui( this.mpz, init ); } 33 void ?{}( Int & this, const char * val) { if ( mpz_init_set_str( this.mpz, val, 0 ) ) abort(); }31 void ?{}( Int & this, const char val[] ) { if ( mpz_init_set_str( this.mpz, val, 0 ) ) abort(); } 34 32 void ^?{}( Int & this ) { mpz_clear( this.mpz ); } 35 33 … … 37 35 Int ?`mp( signed long int init ) { return (Int){ init }; } 38 36 Int ?`mp( unsigned long int init ) { return (Int){ init }; } 39 Int ?`mp( const char * init) { return (Int){ init }; }37 Int ?`mp( const char init[] ) { return (Int){ init }; } 40 38 41 39 // assignment … … 43 41 Int ?=?( Int & lhs, long int rhs ) { mpz_set_si( lhs.mpz, rhs ); return lhs; } 44 42 Int ?=?( Int & lhs, unsigned long int rhs ) { mpz_set_ui( lhs.mpz, rhs ); return lhs; } 45 Int ?=?( Int & lhs, const char * rhs) { if ( mpz_set_str( lhs.mpz, rhs, 0 ) ) { abort | "invalid string conversion"; } return lhs; }43 Int ?=?( Int & lhs, const char rhs[] ) { if ( mpz_set_str( lhs.mpz, rhs, 0 ) ) { abort | "invalid string conversion"; } return lhs; } 46 44 47 45 char ?=?( char & lhs, Int rhs ) { char val = mpz_get_si( rhs.mpz ); lhs = val; return lhs; } … … 265 263 forall( dtype ostype | ostream( ostype ) ) { 266 264 ostype & ?|?( ostype & os, Int mp ) { 267 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );265 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 268 266 gmp_printf( "%Zd", mp.mpz ); 269 267 sepOn( os ); -
libcfa/src/heap.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 24 13:12:45 201913 // Update Count : 55012 // Last Modified On : Tue Feb 4 10:04:51 2020 13 // Update Count : 648 14 14 // 15 15 … … 18 18 #include <stdio.h> // snprintf, fileno 19 19 #include <errno.h> // errno 20 #include <string.h> // memset, memcpy 20 21 extern "C" { 21 22 #include <sys/mman.h> // mmap, munmap … … 27 28 #include "bits/locks.hfa" // __spinlock_t 28 29 #include "startup.hfa" // STARTUP_PRIORITY_MEMORY 29 #include "stdlib.hfa" // bsearchl30 //#include "stdlib.hfa" // bsearchl 30 31 #include "malloc.h" 31 32 33 #define MIN(x, y) (y > x ? x : y) 32 34 33 35 static bool traceHeap = false; 34 36 35 inline bool traceHeap() { 36 return traceHeap; 37 } // traceHeap 37 inline bool traceHeap() { return traceHeap; } 38 38 39 39 bool traceHeapOn() { … … 49 49 } // traceHeapOff 50 50 51 52 static bool checkFree = false; 53 54 inline bool checkFree() { 55 return checkFree; 56 } // checkFree 57 58 bool checkFreeOn() { 59 bool temp = checkFree; 60 checkFree = true; 51 bool traceHeapTerm() { return false; } 52 53 54 static bool prtFree = false; 55 56 inline bool prtFree() { 57 return prtFree; 58 } // prtFree 59 60 bool prtFreeOn() { 61 bool temp = prtFree; 62 prtFree = true; 61 63 return temp; 62 } // checkFreeOn63 64 bool checkFreeOff() {65 bool temp = checkFree;66 checkFree = false;64 } // prtFreeOn 65 66 bool prtFreeOff() { 67 bool temp = prtFree; 68 prtFree = false; 67 69 return temp; 68 } // checkFreeOff 69 70 71 // static bool traceHeapTerm = false; 72 73 // inline bool traceHeapTerm() { 74 // return traceHeapTerm; 75 // } // traceHeapTerm 76 77 // bool traceHeapTermOn() { 78 // bool temp = traceHeapTerm; 79 // traceHeapTerm = true; 80 // return temp; 81 // } // traceHeapTermOn 82 83 // bool traceHeapTermOff() { 84 // bool temp = traceHeapTerm; 85 // traceHeapTerm = false; 86 // return temp; 87 // } // traceHeapTermOff 70 } // prtFreeOff 88 71 89 72 90 73 enum { 74 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 75 // the brk address is extended by the extension amount. 76 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024), 77 78 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; 79 // values greater than or equal to this value are mmap from the operating system. 91 80 __CFA_DEFAULT_MMAP_START__ = (512 * 1024 + 1), 92 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),93 81 }; 94 82 … … 105 93 static unsigned int allocFree; // running total of allocations minus frees 106 94 107 static void checkUnfreed() {95 static void prtUnfreed() { 108 96 if ( allocFree != 0 ) { 109 97 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 110 //char helpText[512];111 //int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n"112 //"Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",113 //(long int)getpid(), allocFree, allocFree ); // always print the UNIX pid114 // __cfaabi_dbg_bits_write( helpText, len );115 } // if 116 } // checkUnfreed98 char helpText[512]; 99 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %u(0x%x) bytes of storage allocated but not freed.\n" 100 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 101 (long int)getpid(), allocFree, allocFree ); // always print the UNIX pid 102 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 103 } // if 104 } // prtUnfreed 117 105 118 106 extern "C" { … … 123 111 void heapAppStop() { // called by __cfaabi_appready_startdown 124 112 fclose( stdin ); fclose( stdout ); 125 checkUnfreed();113 prtUnfreed(); 126 114 } // heapAppStop 127 115 } // extern "C" 128 116 #endif // __CFA_DEBUG__ 117 129 118 130 119 // statically allocated variables => zero filled. … … 134 123 static unsigned int maxBucketsUsed; // maximum number of buckets in use 135 124 136 137 // #comment TD : This defined is significantly different from the __ALIGN__ define from locks.hfa138 #define ALIGN 16139 125 140 126 #define SPINLOCK 0 … … 147 133 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 148 134 // Break recusion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 149 enum { NoBucketSizes = 9 3}; // number of buckets sizes135 enum { NoBucketSizes = 91 }; // number of buckets sizes 150 136 151 137 struct HeapManager { … … 194 180 } kind; // Kind 195 181 } header; // Header 196 char pad[ ALIGN- sizeof( Header )];182 char pad[libAlign() - sizeof( Header )]; 197 183 char data[0]; // storage 198 184 }; // Storage 199 185 200 static_assert( ALIGN >= sizeof( Storage ), "ALIGN< sizeof( Storage )" );186 static_assert( libAlign() >= sizeof( Storage ), "libAlign() < sizeof( Storage )" ); 201 187 202 188 struct FreeHeader { … … 228 214 #define __STATISTICS__ 229 215 216 // Bucket size must be multiple of 16. 230 217 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 231 218 static const unsigned int bucketSizes[] @= { // different bucket sizes 232 16, 32, 48, 64, 233 64 + sizeof(HeapManager.Storage), 96, 112, 128, 128 + sizeof(HeapManager.Storage), 160, 192, 224, 234 256 + sizeof(HeapManager.Storage), 320, 384, 448, 512 + sizeof(HeapManager.Storage), 640, 768, 896, 235 1_024 + sizeof(HeapManager.Storage), 1_536, 2_048 + sizeof(HeapManager.Storage), 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), 6_144, 236 8_192 + sizeof(HeapManager.Storage), 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 237 16_384 + sizeof(HeapManager.Storage), 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 238 32_768 + sizeof(HeapManager.Storage), 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 239 65_536 + sizeof(HeapManager.Storage), 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 240 131_072 + sizeof(HeapManager.Storage), 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 241 262_144 + sizeof(HeapManager.Storage), 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 242 524_288 + sizeof(HeapManager.Storage), 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), 1_179_648, 1_310_720, 1_441_792, 243 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), 2_621_440, 3_145_728, 3_670_016, 244 4_194_304 + sizeof(HeapManager.Storage) 219 16, 32, 48, 64 + sizeof(HeapManager.Storage), // 4 220 96, 112, 128 + sizeof(HeapManager.Storage), // 3 221 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 222 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 223 640, 768, 896, 1_024 + sizeof(HeapManager.Storage), // 4 224 1_536, 2_048 + sizeof(HeapManager.Storage), // 2 225 2_560, 3_072, 3_584, 4_096 + sizeof(HeapManager.Storage), // 4 226 6_144, 8_192 + sizeof(HeapManager.Storage), // 2 227 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(HeapManager.Storage), // 8 228 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(HeapManager.Storage), // 8 229 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(HeapManager.Storage), // 8 230 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(HeapManager.Storage), // 8 231 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(HeapManager.Storage), // 8 232 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(HeapManager.Storage), // 8 233 655_360, 786_432, 917_504, 1_048_576 + sizeof(HeapManager.Storage), // 4 234 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(HeapManager.Storage), // 8 235 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(HeapManager.Storage), // 4 245 236 }; 246 237 … … 251 242 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 252 243 #endif // FASTLOOKUP 244 253 245 static int mmapFd = -1; // fake or actual fd for anonymous file 254 255 256 246 #ifdef __CFA_DEBUG__ 257 247 static bool heapBoot = 0; // detect recursion during boot … … 259 249 static HeapManager heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 260 250 261 // #comment TD : The return type of this function should be commented262 static inline bool setMmapStart( size_t value ) {263 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;264 mmapStart = value; // set global265 266 // find the closest bucket size less than or equal to the mmapStart size267 maxBucketsUsed = bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search268 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ?269 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ?270 return false;271 } // setMmapStart272 273 274 static void ?{}( HeapManager & manager ) with ( manager ) {275 pageSize = sysconf( _SC_PAGESIZE );276 277 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists278 freeLists[i].blockSize = bucketSizes[i];279 } // for280 281 #ifdef FASTLOOKUP282 unsigned int idx = 0;283 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) {284 if ( i > bucketSizes[idx] ) idx += 1;285 lookup[i] = idx;286 } // for287 #endif // FASTLOOKUP288 289 if ( setMmapStart( default_mmap_start() ) ) {290 abort( "HeapManager : internal error, mmap start initialization failure." );291 } // if292 heapExpand = default_heap_expansion();293 294 char * End = (char *)sbrk( 0 );295 sbrk( (char *)libCeiling( (long unsigned int)End, libAlign() ) - End ); // move start of heap to multiple of alignment296 heapBegin = heapEnd = sbrk( 0 ); // get new start point297 } // HeapManager298 299 300 static void ^?{}( HeapManager & ) {301 #ifdef __STATISTICS__302 // if ( traceHeapTerm() ) {303 // printStats();304 // if ( checkfree() ) checkFree( heapManager, true );305 // } // if306 #endif // __STATISTICS__307 } // ~HeapManager308 309 310 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) ));311 void memory_startup( void ) {312 #ifdef __CFA_DEBUG__313 if ( unlikely( heapBoot ) ) { // check for recursion during system boot314 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.315 abort( "boot() : internal error, recursively invoked during system boot." );316 } // if317 heapBoot = true;318 #endif // __CFA_DEBUG__319 320 //assert( heapManager.heapBegin != 0 );321 //heapManager{};322 if ( heapManager.heapBegin == 0 ) heapManager{};323 } // memory_startup324 325 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) ));326 void memory_shutdown( void ) {327 ^heapManager{};328 } // memory_shutdown329 330 251 331 252 #ifdef __STATISTICS__ 332 static unsigned long long int mmap_storage; // heap statistics counters 253 // Heap statistics counters. 254 static unsigned long long int mmap_storage; 333 255 static unsigned int mmap_calls; 334 256 static unsigned long long int munmap_storage; … … 348 270 static unsigned long long int realloc_storage; 349 271 static unsigned int realloc_calls; 350 351 static int statfd; // statistics file descriptor (changed by malloc_stats_fd) 352 272 // Statistics file descriptor (changed by malloc_stats_fd). 273 static int statfd = STDERR_FILENO; // default stderr 353 274 354 275 // Use "write" because streams may be shutdown when calls are made. 355 276 static void printStats() { 356 277 char helpText[512]; 357 __cfaabi_ dbg_bits_print_buffer(helpText, sizeof(helpText),278 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 358 279 "\nHeap statistics:\n" 359 280 " malloc: calls %u / storage %llu\n" … … 405 326 sbrk_calls, sbrk_storage 406 327 ); 407 return write( fileno( stream ), helpText, len ); // -1 => error 328 __cfaabi_bits_write( fileno( stream ), helpText, len ); // ensures all bytes written or exit 329 return len; 408 330 } // printStatsXML 409 331 #endif // __STATISTICS__ 410 332 411 // #comment TD : Is this the samething as Out-of-Memory? 412 static inline void noMemory() {413 abort( "Heap memory exhausted at %zu bytes.\n"414 "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.",415 ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) );416 } // noMemory333 334 // static inline void noMemory() { 335 // abort( "Heap memory exhausted at %zu bytes.\n" 336 // "Possible cause is very large memory allocation and/or large amount of unfreed storage allocated by the program or system/library routines.", 337 // ((char *)(sbrk( 0 )) - (char *)(heapManager.heapBegin)) ); 338 // } // noMemory 417 339 418 340 419 341 static inline void checkAlign( size_t alignment ) { 420 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) {421 abort( "Alignment %zu for memory allocation is less than sizeof(void *) and/or not a power of 2.", alignment);342 if ( alignment < libAlign() || ! libPow2( alignment ) ) { 343 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 422 344 } // if 423 345 } // checkAlign … … 431 353 432 354 433 static inline void checkHeader( bool check, const char * name, void * addr ) { 434 if ( unlikely( check ) ) { // bad address ? 435 abort( "Attempt to %s storage %p with address outside the heap.\n" 436 "Possible cause is duplicate free on same block or overwriting of memory.", 437 name, addr ); 438 } // if 439 } // checkHeader 440 441 // #comment TD : function should be commented and/or have a more evocative name 442 // this isn't either a check or a constructor which is what I would expect this function to be 443 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & size, size_t & alignment ) { 444 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 445 size_t offset = header->kind.fake.offset; 446 alignment = header->kind.fake.alignment & -2; // remove flag from value 447 #ifdef __CFA_DEBUG__ 448 checkAlign( alignment ); // check alignment 449 #endif // __CFA_DEBUG__ 450 header = (HeapManager.Storage.Header *)((char *)header - offset); 451 } // if 452 } // fakeHeader 453 454 // #comment TD : Why is this a define 455 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 456 457 static inline bool headers( const char * name, void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 458 header = headerAddr( addr ); 459 460 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 461 fakeHeader( header, size, alignment ); 462 size = header->kind.real.blockSize & -3; // mmap size 463 return true; 464 } // if 465 466 #ifdef __CFA_DEBUG__ 467 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 468 #endif // __CFA_DEBUG__ 469 470 // #comment TD : This code looks weird... 471 // It's called as the first statement of both branches of the last if, with the same parameters in all cases 472 473 // header may be safe to dereference 474 fakeHeader( header, size, alignment ); 475 #ifdef __CFA_DEBUG__ 476 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 477 #endif // __CFA_DEBUG__ 478 479 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 480 #ifdef __CFA_DEBUG__ 481 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 482 abort( "Attempt to %s storage %p with corrupted header.\n" 483 "Possible cause is duplicate free on same block or overwriting of header information.", 484 name, addr ); 485 } // if 486 #endif // __CFA_DEBUG__ 487 size = freeElem->blockSize; 488 return false; 489 } // headers 490 491 492 static inline void * extend( size_t size ) with ( heapManager ) { 493 lock( extlock __cfaabi_dbg_ctx2 ); 494 ptrdiff_t rem = heapRemaining - size; 495 if ( rem < 0 ) { 496 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 497 498 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 499 if ( sbrk( increase ) == (void *)-1 ) { 500 unlock( extlock ); 501 errno = ENOMEM; 502 return 0; 503 } // if 504 #ifdef __STATISTICS__ 505 sbrk_calls += 1; 506 sbrk_storage += increase; 507 #endif // __STATISTICS__ 508 #ifdef __CFA_DEBUG__ 509 // Set new memory to garbage so subsequent uninitialized usages might fail. 510 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 511 #endif // __CFA_DEBUG__ 512 rem = heapRemaining + increase - size; 513 } // if 514 515 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 516 heapRemaining = rem; 517 heapEnd = (char *)heapEnd + size; 518 unlock( extlock ); 519 return block; 520 } // extend 521 522 355 // thunk problem 523 356 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { 524 357 size_t l = 0, m, h = dim; … … 535 368 536 369 370 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 371 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true; 372 mmapStart = value; // set global 373 374 // find the closest bucket size less than or equal to the mmapStart size 375 maxBucketsUsed = Bsearchl( (unsigned int)mmapStart, bucketSizes, NoBucketSizes ); // binary search 376 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 377 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 378 return false; 379 } // setMmapStart 380 381 382 static inline void checkHeader( bool check, const char name[], void * addr ) { 383 if ( unlikely( check ) ) { // bad address ? 384 abort( "Attempt to %s storage %p with address outside the heap.\n" 385 "Possible cause is duplicate free on same block or overwriting of memory.", 386 name, addr ); 387 } // if 388 } // checkHeader 389 390 391 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 392 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 393 size_t offset = header->kind.fake.offset; 394 alignment = header->kind.fake.alignment & -2; // remove flag from value 395 #ifdef __CFA_DEBUG__ 396 checkAlign( alignment ); // check alignment 397 #endif // __CFA_DEBUG__ 398 header = (HeapManager.Storage.Header *)((char *)header - offset); 399 } // if 400 } // fakeHeader 401 402 403 // <-------+----------------------------------------------------> bsize (bucket size) 404 // |header |addr 405 //================================================================================== 406 // | alignment 407 // <-----------------<------------+-----------------------------> bsize (bucket size) 408 // |fake-header | addr 409 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 410 411 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 412 // |header |addr 413 //================================================================================== 414 // | alignment 415 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 416 // |fake-header |addr 417 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 418 419 420 static inline bool headers( const char name[] __attribute__(( unused )), void * addr, HeapManager.Storage.Header *& header, HeapManager.FreeHeader *& freeElem, size_t & size, size_t & alignment ) with ( heapManager ) { 421 header = headerAddr( addr ); 422 423 if ( unlikely( heapEnd < addr ) ) { // mmapped ? 424 fakeHeader( header, alignment ); 425 size = header->kind.real.blockSize & -3; // mmap size 426 return true; 427 } // if 428 429 #ifdef __CFA_DEBUG__ 430 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 431 #endif // __CFA_DEBUG__ 432 433 // header may be safe to dereference 434 fakeHeader( header, alignment ); 435 #ifdef __CFA_DEBUG__ 436 checkHeader( header < (HeapManager.Storage.Header *)heapBegin || (HeapManager.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 437 #endif // __CFA_DEBUG__ 438 439 freeElem = (HeapManager.FreeHeader *)((size_t)header->kind.real.home & -3); 440 #ifdef __CFA_DEBUG__ 441 if ( freeElem < &freeLists[0] || &freeLists[NoBucketSizes] <= freeElem ) { 442 abort( "Attempt to %s storage %p with corrupted header.\n" 443 "Possible cause is duplicate free on same block or overwriting of header information.", 444 name, addr ); 445 } // if 446 #endif // __CFA_DEBUG__ 447 size = freeElem->blockSize; 448 return false; 449 } // headers 450 451 452 static inline void * extend( size_t size ) with ( heapManager ) { 453 lock( extlock __cfaabi_dbg_ctx2 ); 454 ptrdiff_t rem = heapRemaining - size; 455 if ( rem < 0 ) { 456 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 457 458 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 459 if ( sbrk( increase ) == (void *)-1 ) { 460 unlock( extlock ); 461 errno = ENOMEM; 462 return 0p; 463 } // if 464 #ifdef __STATISTICS__ 465 sbrk_calls += 1; 466 sbrk_storage += increase; 467 #endif // __STATISTICS__ 468 #ifdef __CFA_DEBUG__ 469 // Set new memory to garbage so subsequent uninitialized usages might fail. 470 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 471 #endif // __CFA_DEBUG__ 472 rem = heapRemaining + increase - size; 473 } // if 474 475 HeapManager.Storage * block = (HeapManager.Storage *)heapEnd; 476 heapRemaining = rem; 477 heapEnd = (char *)heapEnd + size; 478 unlock( extlock ); 479 return block; 480 } // extend 481 482 537 483 static inline void * doMalloc( size_t size ) with ( heapManager ) { 538 484 HeapManager.Storage * block; // pointer to new block of storage … … 541 487 // along with the block and is a multiple of the alignment size. 542 488 489 if ( unlikely( size > ~0ul - sizeof(HeapManager.Storage) ) ) return 0p; 543 490 size_t tsize = size + sizeof(HeapManager.Storage); 544 491 if ( likely( tsize < mmapStart ) ) { // small size => sbrk … … 573 520 block = freeElem->freeList.pop(); 574 521 #endif // SPINLOCK 575 if ( unlikely( block == 0 ) ) {// no free block ?522 if ( unlikely( block == 0p ) ) { // no free block ? 576 523 #if defined( SPINLOCK ) 577 524 unlock( freeElem->lock ); … … 582 529 583 530 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 584 if ( unlikely( block == 0 ) ) return 0;585 #if defined( SPINLOCK )531 if ( unlikely( block == 0p ) ) return 0p; 532 #if defined( SPINLOCK ) 586 533 } else { 587 534 freeElem->freeList = block->header.kind.real.next; 588 535 unlock( freeElem->lock ); 589 #endif // SPINLOCK536 #endif // SPINLOCK 590 537 } // if 591 538 592 539 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 593 540 } else { // large size => mmap 541 if ( unlikely( size > ~0ul - pageSize ) ) return 0p; 594 542 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size 595 543 #ifdef __STATISTICS__ … … 609 557 } // if 610 558 611 void * a rea= &(block->data); // adjust off header to user bytes559 void * addr = &(block->data); // adjust off header to user bytes 612 560 613 561 #ifdef __CFA_DEBUG__ 614 assert( ((uintptr_t)a rea& (libAlign() - 1)) == 0 ); // minimum alignment ?562 assert( ((uintptr_t)addr & (libAlign() - 1)) == 0 ); // minimum alignment ? 615 563 __atomic_add_fetch( &allocFree, tsize, __ATOMIC_SEQ_CST ); 616 564 if ( traceHeap() ) { 617 565 enum { BufferSize = 64 }; 618 566 char helpText[BufferSize]; 619 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", a rea, size, tsize );620 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", a rea, size );621 __cfaabi_ dbg_bits_write( helpText, len );567 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 568 // int len = snprintf( helpText, BufferSize, "Malloc %p %zu\n", addr, size ); 569 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 622 570 } // if 623 571 #endif // __CFA_DEBUG__ 624 572 625 return a rea;573 return addr; 626 574 } // doMalloc 627 575 … … 629 577 static inline void doFree( void * addr ) with ( heapManager ) { 630 578 #ifdef __CFA_DEBUG__ 631 if ( unlikely( heapManager.heapBegin == 0 ) ) {579 if ( unlikely( heapManager.heapBegin == 0p ) ) { 632 580 abort( "doFree( %p ) : internal error, called before heap is initialized.", addr ); 633 581 } // if … … 675 623 char helpText[BufferSize]; 676 624 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size ); 677 __cfaabi_ dbg_bits_write( helpText, len );625 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 678 626 } // if 679 627 #endif // __CFA_DEBUG__ … … 681 629 682 630 683 size_t checkFree( HeapManager & manager ) with ( manager ) {631 size_t prtFree( HeapManager & manager ) with ( manager ) { 684 632 size_t total = 0; 685 633 #ifdef __STATISTICS__ 686 __cfaabi_ dbg_bits_acquire();687 __cfaabi_ dbg_bits_print_nolock("\nBin lists (bin size : free blocks on list)\n" );634 __cfaabi_bits_acquire(); 635 __cfaabi_bits_print_nolock( STDERR_FILENO, "\nBin lists (bin size : free blocks on list)\n" ); 688 636 #endif // __STATISTICS__ 689 637 for ( unsigned int i = 0; i < maxBucketsUsed; i += 1 ) { … … 694 642 695 643 #if defined( SPINLOCK ) 696 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0 ; p = p->header.kind.real.next ) {644 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 697 645 #else 698 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0 ; p = p->header.kind.real.next.top ) {646 for ( HeapManager.Storage * p = freeLists[i].freeList.top(); p != 0p; p = p->header.kind.real.next.top ) { 699 647 #endif // SPINLOCK 700 648 total += size; … … 705 653 706 654 #ifdef __STATISTICS__ 707 __cfaabi_ dbg_bits_print_nolock("%7zu, %-7u ", size, N );708 if ( (i + 1) % 8 == 0 ) __cfaabi_ dbg_bits_print_nolock("\n" );655 __cfaabi_bits_print_nolock( STDERR_FILENO, "%7zu, %-7u ", size, N ); 656 if ( (i + 1) % 8 == 0 ) __cfaabi_bits_print_nolock( STDERR_FILENO, "\n" ); 709 657 #endif // __STATISTICS__ 710 658 } // for 711 659 #ifdef __STATISTICS__ 712 __cfaabi_ dbg_bits_print_nolock("\ntotal free blocks:%zu\n", total );713 __cfaabi_ dbg_bits_release();660 __cfaabi_bits_print_nolock( STDERR_FILENO, "\ntotal free blocks:%zu\n", total ); 661 __cfaabi_bits_release(); 714 662 #endif // __STATISTICS__ 715 663 return (char *)heapEnd - (char *)heapBegin - total; 716 } // checkFree 664 } // prtFree 665 666 667 static void ?{}( HeapManager & manager ) with ( manager ) { 668 pageSize = sysconf( _SC_PAGESIZE ); 669 670 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists 671 freeLists[i].blockSize = bucketSizes[i]; 672 } // for 673 674 #ifdef FASTLOOKUP 675 unsigned int idx = 0; 676 for ( unsigned int i = 0; i < LookupSizes; i += 1 ) { 677 if ( i > bucketSizes[idx] ) idx += 1; 678 lookup[i] = idx; 679 } // for 680 #endif // FASTLOOKUP 681 682 if ( setMmapStart( default_mmap_start() ) ) { 683 abort( "HeapManager : internal error, mmap start initialization failure." ); 684 } // if 685 heapExpand = default_heap_expansion(); 686 687 char * end = (char *)sbrk( 0 ); 688 sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 689 heapBegin = heapEnd = sbrk( 0 ); // get new start point 690 } // HeapManager 691 692 693 static void ^?{}( HeapManager & ) { 694 #ifdef __STATISTICS__ 695 if ( traceHeapTerm() ) { 696 printStats(); 697 // if ( prtfree() ) prtFree( heapManager, true ); 698 } // if 699 #endif // __STATISTICS__ 700 } // ~HeapManager 701 702 703 static void memory_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_MEMORY ) )); 704 void memory_startup( void ) { 705 #ifdef __CFA_DEBUG__ 706 if ( unlikely( heapBoot ) ) { // check for recursion during system boot 707 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 708 abort( "boot() : internal error, recursively invoked during system boot." ); 709 } // if 710 heapBoot = true; 711 #endif // __CFA_DEBUG__ 712 713 //assert( heapManager.heapBegin != 0 ); 714 //heapManager{}; 715 if ( heapManager.heapBegin == 0p ) heapManager{}; 716 } // memory_startup 717 718 static void memory_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_MEMORY ) )); 719 void memory_shutdown( void ) { 720 ^heapManager{}; 721 } // memory_shutdown 717 722 718 723 719 724 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 720 725 //assert( heapManager.heapBegin != 0 ); 721 if ( unlikely( heapManager.heapBegin == 0 ) ) heapManager{}; // called before memory_startup ?722 void * a rea= doMalloc( size );723 if ( unlikely( a rea == 0) ) errno = ENOMEM; // POSIX724 return a rea;726 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 727 void * addr = doMalloc( size ); 728 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 729 return addr; 725 730 } // mallocNoStats 731 732 733 static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) { 734 size_t size = noOfElems * elemSize; 735 char * addr = (char *)mallocNoStats( size ); 736 if ( unlikely( addr == 0p ) ) return 0p; 737 738 HeapManager.Storage.Header * header; 739 HeapManager.FreeHeader * freeElem; 740 size_t bsize, alignment; 741 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment ); 742 #ifndef __CFA_DEBUG__ 743 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 744 if ( ! mapped ) 745 #endif // __CFA_DEBUG__ 746 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 747 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 748 // `-header`-addr `-size 749 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) ); // set to zeros 750 751 header->kind.real.blockSize |= 2; // mark as zero filled 752 return addr; 753 } // callocNoStats 726 754 727 755 … … 743 771 // subtract libAlign() because it is already the minimum alignment 744 772 // add sizeof(Storage) for fake header 745 // #comment TD : this is the only place that calls doMalloc without calling mallocNoStats, why ? 746 char * area = (char *)doMalloc( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 747 if ( unlikely( area == 0 ) ) return area; 773 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 774 if ( unlikely( addr == 0p ) ) return addr; 748 775 749 776 // address in the block of the "next" alignment address 750 char * user = (char *)libCeiling( (uintptr_t)(a rea+ sizeof(HeapManager.Storage)), alignment );777 char * user = (char *)libCeiling( (uintptr_t)(addr + sizeof(HeapManager.Storage)), alignment ); 751 778 752 779 // address of header from malloc 753 HeapManager.Storage.Header * realHeader = headerAddr( a rea);780 HeapManager.Storage.Header * realHeader = headerAddr( addr ); 754 781 // address of fake header * before* the alignment location 755 782 HeapManager.Storage.Header * fakeHeader = headerAddr( user ); … … 761 788 return user; 762 789 } // memalignNoStats 790 791 792 static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) { 793 size_t size = noOfElems * elemSize; 794 char * addr = (char *)memalignNoStats( alignment, size ); 795 if ( unlikely( addr == 0p ) ) return 0p; 796 HeapManager.Storage.Header * header; 797 HeapManager.FreeHeader * freeElem; 798 size_t bsize; 799 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment ); 800 #ifndef __CFA_DEBUG__ 801 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 802 if ( ! mapped ) 803 #endif // __CFA_DEBUG__ 804 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 805 header->kind.real.blockSize |= 2; // mark as zero filled 806 807 return addr; 808 } // cmemalignNoStats 763 809 764 810 … … 774 820 extern "C" { 775 821 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not 776 // initialized. If size is 0, then malloc() returns either NULL, or a unique pointer value that can later be822 // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be 777 823 // successfully passed to free(). 778 824 void * malloc( size_t size ) { … … 786 832 787 833 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to 788 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either NULL, or a834 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a 789 835 // unique pointer value that can later be successfully passed to free(). 790 836 void * calloc( size_t noOfElems, size_t elemSize ) { 791 size_t size = noOfElems * elemSize;792 837 #ifdef __STATISTICS__ 793 838 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); 794 __atomic_add_fetch( &calloc_storage, size, __ATOMIC_SEQ_CST ); 795 #endif // __STATISTICS__ 796 797 char * area = (char *)mallocNoStats( size ); 798 if ( unlikely( area == 0 ) ) return 0; 839 __atomic_add_fetch( &calloc_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 840 #endif // __STATISTICS__ 841 842 return callocNoStats( noOfElems, elemSize ); 843 } // calloc 844 845 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 846 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 847 // is larger than the old size, the added memory will not be initialized. If ptr is 0p, then the call is 848 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call 849 // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(), 850 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 851 void * realloc( void * oaddr, size_t size ) { 852 #ifdef __STATISTICS__ 853 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 854 #endif // __STATISTICS__ 855 856 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 857 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 858 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 799 859 800 860 HeapManager.Storage.Header * header; 801 861 HeapManager.FreeHeader * freeElem; 802 size_t asize, alignment; 803 bool mapped __attribute__(( unused )) = headers( "calloc", area, header, freeElem, asize, alignment ); 804 #ifndef __CFA_DEBUG__ 805 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 806 if ( ! mapped ) 807 #endif // __CFA_DEBUG__ 808 memset( area, '\0', asize - sizeof(HeapManager.Storage) ); // set to zeros 809 810 header->kind.real.blockSize |= 2; // mark as zero filled 811 return area; 812 } // calloc 813 814 // #comment TD : Document this function 815 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 816 size_t size = noOfElems * elemSize; 817 #ifdef __STATISTICS__ 818 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 819 __atomic_add_fetch( &cmemalign_storage, size, __ATOMIC_SEQ_CST ); 820 #endif // __STATISTICS__ 821 822 char * area = (char *)memalignNoStats( alignment, size ); 823 if ( unlikely( area == 0 ) ) return 0; 824 HeapManager.Storage.Header * header; 825 HeapManager.FreeHeader * freeElem; 826 size_t asize; 827 bool mapped __attribute__(( unused )) = headers( "cmemalign", area, header, freeElem, asize, alignment ); 828 #ifndef __CFA_DEBUG__ 829 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 830 if ( ! mapped ) 831 #endif // __CFA_DEBUG__ 832 memset( area, '\0', asize - ( (char *)area - (char *)header ) ); // set to zeros 833 header->kind.real.blockSize |= 2; // mark as zero filled 834 835 return area; 836 } // cmemalign 837 838 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 839 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 840 // is larger than the old size, the added memory will not be initialized. If ptr is NULL, then the call is 841 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not NULL, then the call 842 // is equivalent to free(ptr). Unless ptr is NULL, it must have been returned by an earlier call to malloc(), 843 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 844 void * realloc( void * addr, size_t size ) { 845 #ifdef __STATISTICS__ 846 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 847 #endif // __STATISTICS__ 848 849 if ( unlikely( addr == 0 ) ) return mallocNoStats( size ); // special cases 850 if ( unlikely( size == 0 ) ) { free( addr ); return 0; } 851 852 HeapManager.Storage.Header * header; 853 HeapManager.FreeHeader * freeElem; 854 size_t asize, alignment = 0; 855 headers( "realloc", addr, header, freeElem, asize, alignment ); 856 857 size_t usize = asize - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block 858 if ( usize >= size ) { // already sufficient storage 862 size_t bsize, oalign = 0; 863 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 864 865 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 866 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 867 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know 868 // where to start filling, i.e., do not overwrite existing values in space. 869 // 859 870 // This case does not result in a new profiler entry because the previous one still exists and it must match with 860 871 // the free for this memory. Hence, this realloc does not appear in the profiler output. 861 return addr;872 return oaddr; 862 873 } // if 863 874 … … 866 877 #endif // __STATISTICS__ 867 878 868 void * area; 869 if ( unlikely( alignment != 0 ) ) { // previous request memalign? 870 area = memalign( alignment, size ); // create new aligned area 879 // change size and copy old content to new storage 880 881 void * naddr; 882 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 883 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 884 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 885 } else { 886 naddr = memalignNoStats( oalign, size ); // create new aligned area 887 } // if 871 888 } else { 872 area = mallocNoStats( size ); // create new area 889 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 890 naddr = callocNoStats( 1, size ); // create new area 891 } else { 892 naddr = mallocNoStats( size ); // create new area 893 } // if 873 894 } // if 874 if ( unlikely( area == 0 ) ) return 0; 875 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill (calloc/cmemalign) ? 876 assert( (header->kind.real.blockSize & 1) == 0 ); 877 bool mapped __attribute__(( unused )) = headers( "realloc", area, header, freeElem, asize, alignment ); 878 #ifndef __CFA_DEBUG__ 879 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 880 if ( ! mapped ) 881 #endif // __CFA_DEBUG__ 882 memset( (char *)area + usize, '\0', asize - ( (char *)area - (char *)header ) - usize ); // zero-fill back part 883 header->kind.real.blockSize |= 2; // mark new request as zero fill 884 } // if 885 memcpy( area, addr, usize ); // copy bytes 886 free( addr ); 887 return area; 895 if ( unlikely( naddr == 0p ) ) return 0p; 896 897 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 898 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 899 // To preserve prior fill, the entire bucket must be copied versus the size. 900 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 901 free( oaddr ); 902 return naddr; 888 903 } // realloc 889 890 904 891 905 // The obsolete function memalign() allocates size bytes and returns a pointer to the allocated memory. The memory … … 897 911 #endif // __STATISTICS__ 898 912 899 void * area = memalignNoStats( alignment, size ); 900 901 return area; 913 return memalignNoStats( alignment, size ); 902 914 } // memalign 915 916 917 // The cmemalign() function is the same as calloc() with memory alignment. 918 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) { 919 #ifdef __STATISTICS__ 920 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 921 __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 922 #endif // __STATISTICS__ 923 924 return cmemalignNoStats( alignment, noOfElems, elemSize ); 925 } // cmemalign 903 926 904 927 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a … … 911 934 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The 912 935 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of 913 // sizeof(void *). If size is 0, then posix_memalign() returns either NULL, or a unique pointer value that can later936 // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later 914 937 // be successfully passed to free(3). 915 938 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 916 939 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 917 940 * memptr = memalign( alignment, size ); 918 if ( unlikely( * memptr == 0 ) ) return ENOMEM;941 if ( unlikely( * memptr == 0p ) ) return ENOMEM; 919 942 return 0; 920 943 } // posix_memalign … … 929 952 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to 930 953 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior 931 // occurs. If ptr is NULL, no operation is performed.954 // occurs. If ptr is 0p, no operation is performed. 932 955 void free( void * addr ) { 933 956 #ifdef __STATISTICS__ … … 935 958 #endif // __STATISTICS__ 936 959 937 // #comment TD : To decrease nesting I would but the special case in the 938 // else instead, plus it reads more naturally to have the 939 // short / normal case instead 940 if ( unlikely( addr == 0 ) ) { // special case 941 #ifdef __CFA_DEBUG__ 942 if ( traceHeap() ) { 943 #define nullmsg "Free( 0x0 ) size:0\n" 944 // Do not debug print free( 0 ), as it can cause recursive entry from sprintf. 945 __cfaabi_dbg_bits_write( nullmsg, sizeof(nullmsg) - 1 ); 946 } // if 947 #endif // __CFA_DEBUG__ 960 if ( unlikely( addr == 0p ) ) { // special case 961 // #ifdef __CFA_DEBUG__ 962 // if ( traceHeap() ) { 963 // #define nullmsg "Free( 0x0 ) size:0\n" 964 // // Do not debug print free( 0p ), as it can cause recursive entry from sprintf. 965 // __cfaabi_dbg_write( nullmsg, sizeof(nullmsg) - 1 ); 966 // } // if 967 // #endif // __CFA_DEBUG__ 948 968 return; 949 969 } // exit … … 951 971 doFree( addr ); 952 972 } // free 973 974 975 // The malloc_alignment() function returns the alignment of the allocation. 976 size_t malloc_alignment( void * addr ) { 977 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 978 HeapManager.Storage.Header * header = headerAddr( addr ); 979 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 980 return header->kind.fake.alignment & -2; // remove flag from value 981 } else { 982 return libAlign (); // minimum alignment 983 } // if 984 } // malloc_alignment 985 986 987 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc(). 988 bool malloc_zero_fill( void * addr ) { 989 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 990 HeapManager.Storage.Header * header = headerAddr( addr ); 991 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 992 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset); 993 } // if 994 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ? 995 } // malloc_zero_fill 996 997 998 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to 999 // a block of memory allocated by malloc(3) or a related function. 1000 size_t malloc_usable_size( void * addr ) { 1001 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1002 HeapManager.Storage.Header * header; 1003 HeapManager.FreeHeader * freeElem; 1004 size_t bsize, alignment; 1005 1006 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1007 return dataStorage( bsize, addr, header ); // data storage in bucket 1008 } // malloc_usable_size 1009 1010 1011 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1012 // related functions. 1013 void malloc_stats( void ) { 1014 #ifdef __STATISTICS__ 1015 printStats(); 1016 if ( prtFree() ) prtFree( heapManager ); 1017 #endif // __STATISTICS__ 1018 } // malloc_stats 1019 1020 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics. 1021 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1022 #ifdef __STATISTICS__ 1023 int temp = statfd; 1024 statfd = fd; 1025 return temp; 1026 #else 1027 return -1; 1028 #endif // __STATISTICS__ 1029 } // malloc_stats_fd 1030 953 1031 954 1032 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see … … 958 1036 choose( option ) { 959 1037 case M_TOP_PAD: 960 if ( setHeapExpand( value ) ) fallthru default;1038 if ( setHeapExpand( value ) ) return 1; 961 1039 case M_MMAP_THRESHOLD: 962 if ( setMmapStart( value ) ) fallthru default; 963 default: 964 // #comment TD : 1 for unsopported feels wrong 965 return 1; // success, or unsupported 1040 if ( setMmapStart( value ) ) return 1; 966 1041 } // switch 967 return 0; // error 1042 return 0; // error, unsupported 968 1043 } // mallopt 969 1044 … … 974 1049 } // malloc_trim 975 1050 976 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to977 // a block of memory allocated by malloc(3) or a related function.978 size_t malloc_usable_size( void * addr ) {979 if ( unlikely( addr == 0 ) ) return 0; // null allocation has 0 size980 981 HeapManager.Storage.Header * header;982 HeapManager.FreeHeader * freeElem;983 size_t size, alignment;984 985 headers( "malloc_usable_size", addr, header, freeElem, size, alignment );986 size_t usize = size - ( (char *)addr - (char *)header ); // compute the amount of user storage in the block987 return usize;988 } // malloc_usable_size989 990 991 // The malloc_alignment() function returns the alignment of the allocation.992 size_t malloc_alignment( void * addr ) {993 if ( unlikely( addr == 0 ) ) return libAlign(); // minimum alignment994 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );995 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?996 return header->kind.fake.alignment & -2; // remove flag from value997 } else {998 return libAlign (); // minimum alignment999 } // if1000 } // malloc_alignment1001 1002 1003 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc().1004 bool malloc_zero_fill( void * addr ) {1005 if ( unlikely( addr == 0 ) ) return false; // null allocation is not zero fill1006 1007 HeapManager.Storage.Header * header = (HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) );1008 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1009 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);1010 } // if1011 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign) ?1012 } // malloc_zero_fill1013 1014 1015 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and1016 // related functions.1017 void malloc_stats( void ) {1018 #ifdef __STATISTICS__1019 printStats();1020 if ( checkFree() ) checkFree( heapManager );1021 #endif // __STATISTICS__1022 } // malloc_stats1023 1024 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes the statistics.1025 int malloc_stats_fd( int fd ) {1026 #ifdef __STATISTICS__1027 int temp = statfd;1028 statfd = fd;1029 return temp;1030 #else1031 return -1;1032 #endif // __STATISTICS__1033 } // malloc_stats_fd1034 1051 1035 1052 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation … … 1037 1054 // information about all arenas (see malloc(3)). 1038 1055 int malloc_info( int options, FILE * stream ) { 1056 if ( options != 0 ) { errno = EINVAL; return -1; } 1039 1057 return printStatsXML( stream ); 1040 1058 } // malloc_info … … 1046 1064 // structure is returned as the function result. (It is the caller's responsibility to free(3) this memory.) 1047 1065 void * malloc_get_state( void ) { 1048 return 0 ; // unsupported1066 return 0p; // unsupported 1049 1067 } // malloc_get_state 1050 1068 … … 1058 1076 1059 1077 1078 // Must have CFA linkage to overload with C linkage realloc. 1079 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1080 #ifdef __STATISTICS__ 1081 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1082 #endif // __STATISTICS__ 1083 1084 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1085 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 1086 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 1087 1088 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1089 #ifdef __CFA_DEBUG__ 1090 else 1091 checkAlign( nalign ); // check alignment 1092 #endif // __CFA_DEBUG__ 1093 1094 HeapManager.Storage.Header * header; 1095 HeapManager.FreeHeader * freeElem; 1096 size_t bsize, oalign = 0; 1097 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1098 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1099 1100 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out 1101 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1102 return realloc( oaddr, size ); 1103 } // if 1104 1105 #ifdef __STATISTICS__ 1106 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1107 #endif // __STATISTICS__ 1108 1109 // change size and copy old content to new storage 1110 1111 void * naddr; 1112 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1113 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1114 } else { 1115 naddr = memalignNoStats( nalign, size ); // create new aligned area 1116 } // if 1117 1118 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1119 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 1120 // To preserve prior fill, the entire bucket must be copied versus the size. 1121 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1122 free( oaddr ); 1123 return naddr; 1124 } // realloc 1125 1126 1060 1127 // Local Variables: // 1061 1128 // tab-width: 4 // -
libcfa/src/interpose.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed Mar 29 16:10:31 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 14 22:57:16 201913 // Update Count : 1 1612 // Last Modified On : Mon Feb 17 10:18:53 2020 13 // Update Count : 166 14 14 // 15 15 … … 29 29 #include "bits/signal.hfa" // sigHandler_? 30 30 #include "startup.hfa" // STARTUP_PRIORITY_CORE 31 #include <assert.h> 31 32 32 33 //============================================================================================= … … 40 41 41 42 typedef void (* generic_fptr_t)(void); 42 generic_fptr_t interpose_symbol( const char * symbol, const char * version) {43 generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) { 43 44 const char * error; 44 45 … … 95 96 void __cfaabi_interpose_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_CORE ) )); 96 97 void __cfaabi_interpose_startup( void ) { 97 const char *version = NULL;98 const char *version = 0p; 98 99 99 100 preload_libgcc(); … … 105 106 #pragma GCC diagnostic pop 106 107 108 // As a precaution (and necessity), errors that result in termination are delivered on a separate stack because 109 // task stacks might be very small (4K) and the signal delivery corrupts memory to the point that a clean 110 // shutdown is impossible. Also, when a stack overflow encounters the non-accessible sentinel page (debug only) 111 // and generates a segment fault, the signal cannot be delivered on the sentinel page. Finally, calls to abort 112 // print a stack trace that uses substantial stack space. 113 114 #define MINSTKSZ SIGSTKSZ * 8 115 static char stack[MINSTKSZ] __attribute__(( aligned (16) )); 116 static stack_t ss; 117 118 ss.ss_sp = stack; 119 ss.ss_size = MINSTKSZ; 120 ss.ss_flags = 0; 121 if ( sigaltstack( &ss, 0p ) == -1 ) { 122 abort( "__cfaabi_interpose_startup : internal error, sigaltstack error(%d) %s.", errno, strerror( errno ) ); 123 } // if 124 107 125 // Failure handler 108 __cfaabi_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 109 __cfaabi_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 110 __cfaabi_sigaction( SIGILL , sigHandler_ill , SA_SIGINFO ); 111 __cfaabi_sigaction( SIGFPE , sigHandler_fpe , SA_SIGINFO ); 112 __cfaabi_sigaction( SIGABRT, sigHandler_abrt, SA_SIGINFO | SA_RESETHAND); 113 __cfaabi_sigaction( SIGTERM, sigHandler_term , SA_SIGINFO ); 114 __cfaabi_sigaction( SIGINT , sigHandler_term , SA_SIGINFO ); 126 __cfaabi_sigaction( SIGSEGV, sigHandler_segv, SA_SIGINFO | SA_ONSTACK ); 127 __cfaabi_sigaction( SIGBUS , sigHandler_segv, SA_SIGINFO | SA_ONSTACK ); 128 __cfaabi_sigaction( SIGILL , sigHandler_ill , SA_SIGINFO | SA_ONSTACK ); 129 __cfaabi_sigaction( SIGFPE , sigHandler_fpe , SA_SIGINFO | SA_ONSTACK ); 130 __cfaabi_sigaction( SIGTERM, sigHandler_term, SA_SIGINFO | SA_ONSTACK | SA_RESETHAND ); // one shot handler, return to default 131 __cfaabi_sigaction( SIGINT , sigHandler_term, SA_SIGINFO | SA_ONSTACK | SA_RESETHAND ); 132 __cfaabi_sigaction( SIGABRT, sigHandler_term, SA_SIGINFO | SA_ONSTACK | SA_RESETHAND ); 133 __cfaabi_sigaction( SIGHUP , sigHandler_term, SA_SIGINFO | SA_ONSTACK | SA_RESETHAND ); // terminal hangup 115 134 } 116 135 } … … 123 142 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 124 143 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 144 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 125 145 126 146 extern "C" { 127 147 void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 128 abort( NULL);148 abort( false, "%s", "" ); 129 149 } 130 150 … … 132 152 va_list argp; 133 153 va_start( argp, fmt ); 134 abort( f mt, argp );154 abort( false, fmt, argp ); 135 155 va_end( argp ); 136 156 } … … 141 161 } 142 162 143 void * kernel_abort ( void ) __attribute__(( __nothrow__, __leaf__, __weak__ )) { return NULL; } 144 void kernel_abort_msg( void * data, char * buffer, int size ) __attribute__(( __nothrow__, __leaf__, __weak__ )) {} 163 void * kernel_abort( void ) __attribute__(( __nothrow__, __leaf__, __weak__ )) { return 0p; } 164 void kernel_abort_msg( void * data, char buffer[], int size ) __attribute__(( __nothrow__, __leaf__, __weak__ )) {} 165 // See concurrency/kernel.cfa for strong definition used in multi-processor mode. 145 166 int kernel_abort_lastframe( void ) __attribute__(( __nothrow__, __leaf__, __weak__ )) { return 4; } 146 167 147 168 enum { abort_text_size = 1024 }; 148 169 static char abort_text[ abort_text_size ]; 149 static int abort_lastframe; 150 151 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )) { 152 va_list args; 153 va_start( args, fmt ); 154 vfprintf( stderr, fmt, args ); 155 va_end( args ); 156 __cabi_libc.exit( status ); 157 } 158 159 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { 160 void * kernel_data = kernel_abort(); // must be done here to lock down kernel 161 int len; 162 163 abort_lastframe = kernel_abort_lastframe(); 164 len = snprintf( abort_text, abort_text_size, "Cforall Runtime error (UNIX pid:%ld) ", (long int)getpid() ); // use UNIX pid (versus getPid) 165 __cfaabi_dbg_bits_write( abort_text, len ); 166 167 if ( fmt ) { 168 va_list args; 169 va_start( args, fmt ); 170 171 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 172 va_end( args ); 173 __cfaabi_dbg_bits_write( abort_text, len ); 174 175 if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text 176 __cfaabi_dbg_bits_write( "\n", 1 ); 177 } 178 } 179 180 kernel_abort_msg( kernel_data, abort_text, abort_text_size ); 181 __cabi_libc.abort(); 182 } 183 184 static void __cfaabi_backtrace() { 185 enum { 186 Frames = 50, // maximum number of stack frames 187 Start = 8, // skip first N stack frames 188 }; 170 171 static void __cfaabi_backtrace( int start ) { 172 enum { Frames = 50, }; // maximum number of stack frames 173 int last = kernel_abort_lastframe(); // skip last N stack frames 189 174 190 175 void * array[Frames]; 191 176 size_t size = backtrace( array, Frames ); 192 char ** messages = backtrace_symbols( array, size ); 193 194 // find executable name 195 *index( messages[0], '(' ) = '\0'; 196 __cfaabi_dbg_bits_print_nolock( "Stack back trace for: %s\n", messages[0]); 197 198 for ( int i = Start; i < size - abort_lastframe && messages != NULL; i += 1 ) { 199 char * name = NULL, * offset_begin = NULL, * offset_end = NULL; 200 201 for ( char * p = messages[i]; *p; ++p ) { 202 //__cfaabi_dbg_bits_print_nolock( "X %s\n", p); 203 // find parantheses and +offset 177 char ** messages = backtrace_symbols( array, size ); // does not demangle names 178 179 *index( messages[0], '(' ) = '\0'; // find executable name 180 __cfaabi_bits_print_nolock( STDERR_FILENO, "Stack back trace for: %s\n", messages[0]); 181 182 for ( unsigned int i = start; i < size - last && messages != 0p; i += 1 ) { 183 char * name = 0p, * offset_begin = 0p, * offset_end = 0p; 184 185 for ( char * p = messages[i]; *p; p += 1 ) { // find parantheses and +offset 186 //__cfaabi_bits_print_nolock( "X %s\n", p); 204 187 if ( *p == '(' ) { 205 188 name = p; … … 212 195 } 213 196 214 // if line contains symbol print it215 int frameNo = i - Start;197 // if line contains symbol, print it 198 int frameNo = i - start; 216 199 if ( name && offset_begin && offset_end && name < offset_begin ) { 217 // delimit strings 218 *name++ = '\0'; 200 *name++ = '\0'; // delimit strings 219 201 *offset_begin++ = '\0'; 220 202 *offset_end++ = '\0'; 221 203 222 __cfaabi_ dbg_bits_print_nolock("(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end);204 __cfaabi_bits_print_nolock( STDERR_FILENO, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 223 205 } else { // otherwise, print the whole line 224 __cfaabi_ dbg_bits_print_nolock("(%i) %s\n", frameNo, messages[i] );206 __cfaabi_bits_print_nolock( STDERR_FILENO, "(%i) %s\n", frameNo, messages[i] ); 225 207 } 226 208 } … … 228 210 } 229 211 212 void exit( int status, const char fmt[], ... ) { 213 va_list args; 214 va_start( args, fmt ); 215 vfprintf( stderr, fmt, args ); 216 va_end( args ); 217 __cabi_libc.exit( status ); 218 } 219 220 void abort( bool signalAbort, const char fmt[], ... ) { 221 void * kernel_data = kernel_abort(); // must be done here to lock down kernel 222 int len; 223 224 signal( SIGABRT, SIG_DFL ); // prevent final "real" abort from recursing to handler 225 226 len = snprintf( abort_text, abort_text_size, "Cforall Runtime error (UNIX pid:%ld) ", (long int)getpid() ); // use UNIX pid (versus getPid) 227 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 228 229 assert( fmt ); 230 va_list args; 231 va_start( args, fmt ); 232 233 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 234 va_end( args ); 235 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 236 237 if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text 238 __cfaabi_dbg_write( "\n", 1 ); 239 } // if 240 kernel_abort_msg( kernel_data, abort_text, abort_text_size ); 241 242 __cfaabi_backtrace( signalAbort ? 4 : 2 ); 243 244 __cabi_libc.abort(); // print stack trace in handler 245 } 246 247 void abort( const char fmt[], ... ) { 248 va_list args; 249 va_start( args, fmt ); 250 abort( false, fmt, args ); 251 va_end( args ); 252 } 253 230 254 void sigHandler_segv( __CFA_SIGPARMS__ ) { 231 abort( "Addressing invalid memory at location %p\n" 232 "Possible cause is reading outside the address space or writing to a protected area within the address space with an invalid pointer or subscript.\n", 233 sfp->si_addr ); 255 if ( sfp->si_addr == 0p ) { 256 abort( true, "Null pointer (0p) dereference.\n" ); 257 } else { 258 abort( true, "%s at memory location %p.\n" 259 "Possible cause is reading outside the address space or writing to a protected area within the address space with an invalid pointer or subscript.\n", 260 (sig == SIGSEGV ? "Segment fault" : "Bus error"), sfp->si_addr ); 261 } 234 262 } 235 263 236 264 void sigHandler_ill( __CFA_SIGPARMS__ ) { 237 abort( "Executing illegal instruction at location %p.\n"265 abort( true, "Executing illegal instruction at location %p.\n" 238 266 "Possible cause is stack corruption.\n", 239 267 sfp->si_addr ); … … 251 279 default: msg = "unknown"; 252 280 } // choose 253 abort( "Computation error %s at location %p.\n", msg, sfp->si_addr ); 254 } 255 256 void sigHandler_abrt( __CFA_SIGPARMS__ ) { 257 __cfaabi_backtrace(); 258 259 // reset default signal handler 260 __cfaabi_sigdefault( SIGABRT ); 261 262 raise( SIGABRT ); 281 abort( true, "Computation error %s at location %p.\n", msg, sfp->si_addr ); 263 282 } 264 283 265 284 void sigHandler_term( __CFA_SIGPARMS__ ) { 266 abort( "Application stopped by %s signal.", sig == SIGINT ? "an interrupt (SIGINT)" : "a terminate (SIGTERM)");285 abort( true, "Application interrupted by signal: %s.\n", strsignal( sig ) ); 267 286 } 268 287 -
libcfa/src/iostream.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 13 08:07:59 201913 // Update Count : 82 112 // Last Modified On : Thu Feb 20 15:53:23 2020 13 // Update Count : 829 14 14 // 15 15 … … 19 19 #include <stdio.h> 20 20 #include <stdbool.h> // true/false 21 #include <stdint.h> // UINT64_MAX 21 22 //#include <string.h> // strlen, strcmp 22 23 extern size_t strlen (const char *__s) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1))); … … 35 36 forall( dtype ostype | ostream( ostype ) ) { 36 37 ostype & ?|?( ostype & os, zero_t ) { 37 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );38 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 38 39 fmt( os, "%d", 0n ); 39 40 return os; … … 44 45 45 46 ostype & ?|?( ostype & os, one_t ) { 46 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );47 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 47 48 fmt( os, "%d", 1n ); 48 49 return os; … … 53 54 54 55 ostype & ?|?( ostype & os, bool b ) { 55 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );56 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 56 57 fmt( os, "%s", b ? "true" : "false" ); 57 58 return os; … … 63 64 ostype & ?|?( ostype & os, char c ) { 64 65 fmt( os, "%c", c ); 65 if ( c == '\n' ) setNL( os, true );66 if ( c == '\n' ) $setNL( os, true ); 66 67 return sepOff( os ); 67 68 } // ?|? … … 71 72 72 73 ostype & ?|?( ostype & os, signed char sc ) { 73 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );74 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 74 75 fmt( os, "%hhd", sc ); 75 76 return os; … … 80 81 81 82 ostype & ?|?( ostype & os, unsigned char usc ) { 82 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );83 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 83 84 fmt( os, "%hhu", usc ); 84 85 return os; … … 89 90 90 91 ostype & ?|?( ostype & os, short int si ) { 91 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );92 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 92 93 fmt( os, "%hd", si ); 93 94 return os; … … 98 99 99 100 ostype & ?|?( ostype & os, unsigned short int usi ) { 100 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );101 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 101 102 fmt( os, "%hu", usi ); 102 103 return os; … … 107 108 108 109 ostype & ?|?( ostype & os, int i ) { 109 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );110 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 110 111 fmt( os, "%d", i ); 111 112 return os; … … 116 117 117 118 ostype & ?|?( ostype & os, unsigned int ui ) { 118 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );119 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 119 120 fmt( os, "%u", ui ); 120 121 return os; … … 125 126 126 127 ostype & ?|?( ostype & os, long int li ) { 127 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );128 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 128 129 fmt( os, "%ld", li ); 129 130 return os; … … 134 135 135 136 ostype & ?|?( ostype & os, unsigned long int uli ) { 136 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );137 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 137 138 fmt( os, "%lu", uli ); 138 139 return os; … … 143 144 144 145 ostype & ?|?( ostype & os, long long int lli ) { 145 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );146 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 146 147 fmt( os, "%lld", lli ); 147 148 return os; … … 152 153 153 154 ostype & ?|?( ostype & os, unsigned long long int ulli ) { 154 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );155 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 155 156 fmt( os, "%llu", ulli ); 156 157 return os; … … 159 160 (ostype &)(os | ulli); ends( os ); 160 161 } // ?|? 162 163 #if defined( __SIZEOF_INT128__ ) 164 // UINT64_MAX 18_446_744_073_709_551_615_ULL 165 #define P10_UINT64 10_000_000_000_000_000_000_ULL // 19 zeroes 166 167 static void base10_128( ostype & os, unsigned int128 val ) { 168 if ( val > UINT64_MAX ) { 169 base10_128( os, val / P10_UINT64 ); // recursive 170 fmt( os, "%.19lu", (uint64_t)(val % P10_UINT64) ); 171 } else { 172 fmt( os, "%lu", (uint64_t)val ); 173 } // if 174 } // base10_128 175 176 static void base10_128( ostype & os, int128 val ) { 177 if ( val < 0 ) { 178 fmt( os, "-" ); // leading negative sign 179 val = -val; 180 } // if 181 base10_128( os, (unsigned int128)val ); // print zero/positive value 182 } // base10_128 183 184 ostype & ?|?( ostype & os, int128 llli ) { 185 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 186 base10_128( os, llli ); 187 return os; 188 } // ?|? 189 void & ?|?( ostype & os, int128 llli ) { 190 (ostype &)(os | llli); ends( os ); 191 } // ?|? 192 193 ostype & ?|?( ostype & os, unsigned int128 ullli ) { 194 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 195 base10_128( os, ullli ); 196 return os; 197 } // ?|? 198 void & ?|?( ostype & os, unsigned int128 ullli ) { 199 (ostype &)(os | ullli); ends( os ); 200 } // ?|? 201 #endif // __SIZEOF_INT128__ 161 202 162 203 #define PrintWithDP( os, format, val, ... ) \ … … 175 216 176 217 ostype & ?|?( ostype & os, float f ) { 177 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );218 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 178 219 PrintWithDP( os, "%g", f ); 179 220 return os; … … 184 225 185 226 ostype & ?|?( ostype & os, double d ) { 186 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );227 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 187 228 PrintWithDP( os, "%.*lg", d, DBL_DIG ); 188 229 return os; … … 193 234 194 235 ostype & ?|?( ostype & os, long double ld ) { 195 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );236 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 196 237 PrintWithDP( os, "%.*Lg", ld, LDBL_DIG ); 197 238 return os; … … 202 243 203 244 ostype & ?|?( ostype & os, float _Complex fc ) { 204 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );245 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 205 246 // os | crealf( fc ) | nonl; 206 247 PrintWithDP( os, "%g", crealf( fc ) ); … … 214 255 215 256 ostype & ?|?( ostype & os, double _Complex dc ) { 216 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );257 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 217 258 // os | creal( dc ) | nonl; 218 259 PrintWithDP( os, "%.*lg", creal( dc ), DBL_DIG ); … … 226 267 227 268 ostype & ?|?( ostype & os, long double _Complex ldc ) { 228 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );269 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 229 270 // os | creall( ldc ) || nonl; 230 271 PrintWithDP( os, "%.*Lg", creall( ldc ), LDBL_DIG ); … … 237 278 } // ?|? 238 279 239 ostype & ?|?( ostype & os, const char * str) {280 ostype & ?|?( ostype & os, const char str[] ) { 240 281 enum { Open = 1, Close, OpenClose }; 241 282 static const unsigned char mask[256] @= { … … 257 298 // first character IS NOT spacing or closing punctuation => add left separator 258 299 unsigned char ch = str[0]; // must make unsigned 259 if ( sepPrt( os ) && mask[ ch ] != Close && mask[ ch ] != OpenClose ) {260 fmt( os, "%s", sepGetCur( os ) );300 if ( $sepPrt( os ) && mask[ ch ] != Close && mask[ ch ] != OpenClose ) { 301 fmt( os, "%s", $sepGetCur( os ) ); 261 302 } // if 262 303 263 304 // if string starts line, must reset to determine open state because separator is off 264 sepReset( os );// reset separator305 $sepReset( os ); // reset separator 265 306 266 307 // last character IS spacing or opening punctuation => turn off separator for next item 267 308 size_t len = strlen( str ); 268 309 ch = str[len - 1]; // must make unsigned 269 if ( sepPrt( os ) && mask[ ch ] != Open && mask[ ch ] != OpenClose ) {310 if ( $sepPrt( os ) && mask[ ch ] != Open && mask[ ch ] != OpenClose ) { 270 311 sepOn( os ); 271 312 } else { 272 313 sepOff( os ); 273 314 } // if 274 if ( ch == '\n' ) setNL( os, true ); // check *AFTER*sepPrt call above as it resets NL flag315 if ( ch == '\n' ) $setNL( os, true ); // check *AFTER* $sepPrt call above as it resets NL flag 275 316 return write( os, str, len ); 276 317 } // ?|? 277 void ?|?( ostype & os, const char * str ) { 318 319 void ?|?( ostype & os, const char str[] ) { 278 320 (ostype &)(os | str); ends( os ); 279 321 } // ?|? 280 322 281 323 // ostype & ?|?( ostype & os, const char16_t * str ) { 282 // if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );324 // if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 283 325 // fmt( os, "%ls", str ); 284 326 // return os; … … 287 329 // #if ! ( __ARM_ARCH_ISA_ARM == 1 && __ARM_32BIT_STATE == 1 ) // char32_t == wchar_t => ambiguous 288 330 // ostype & ?|?( ostype & os, const char32_t * str ) { 289 // if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );331 // if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 290 332 // fmt( os, "%ls", str ); 291 333 // return os; … … 294 336 295 337 // ostype & ?|?( ostype & os, const wchar_t * str ) { 296 // if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );338 // if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 297 339 // fmt( os, "%ls", str ); 298 340 // return os; … … 300 342 301 343 ostype & ?|?( ostype & os, const void * p ) { 302 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );344 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 303 345 fmt( os, "%p", p ); 304 346 return os; … … 315 357 void ?|?( ostype & os, ostype & (* manip)( ostype & ) ) { 316 358 (ostype &)(manip( os )); 317 if ( getPrt( os ) ) ends( os );// something printed ?318 setPrt( os, false ); // turn off359 if ( $getPrt( os ) ) ends( os ); // something printed ? 360 $setPrt( os, false ); // turn off 319 361 } // ?|? 320 362 … … 329 371 ostype & nl( ostype & os ) { 330 372 (ostype &)(os | '\n'); 331 setPrt( os, false ); // turn off332 setNL( os, true );373 $setPrt( os, false ); // turn off 374 $setNL( os, true ); 333 375 flush( os ); 334 376 return sepOff( os ); // prepare for next line … … 336 378 337 379 ostype & nonl( ostype & os ) { 338 setPrt( os, false ); // turn off380 $setPrt( os, false ); // turn off 339 381 return os; 340 382 } // nonl … … 375 417 ostype & ?|?( ostype & os, T arg, Params rest ) { 376 418 (ostype &)(os | arg); // print first argument 377 sepSetCur( os, sepGetTuple( os ) );// switch to tuple separator419 $sepSetCur( os, sepGetTuple( os ) ); // switch to tuple separator 378 420 (ostype &)(os | rest); // print remaining arguments 379 sepSetCur( os, sepGet( os ) ); // switch to regular separator421 $sepSetCur( os, sepGet( os ) ); // switch to regular separator 380 422 return os; 381 423 } // ?|? … … 383 425 // (ostype &)(?|?( os, arg, rest )); ends( os ); 384 426 (ostype &)(os | arg); // print first argument 385 sepSetCur( os, sepGetTuple( os ) );// switch to tuple separator427 $sepSetCur( os, sepGetTuple( os ) ); // switch to tuple separator 386 428 (ostype &)(os | rest); // print remaining arguments 387 sepSetCur( os, sepGet( os ) ); // switch to regular separator429 $sepSetCur( os, sepGet( os ) ); // switch to regular separator 388 430 ends( os ); 389 431 } // ?|? … … 414 456 forall( dtype ostype | ostream( ostype ) ) { \ 415 457 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 416 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) ); \458 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 417 459 \ 418 460 if ( f.base == 'b' || f.base == 'B' ) { /* bespoke binary format */ \ … … 463 505 \ 464 506 if ( ! f.flags.pc ) { /* no precision */ \ 465 /* printf( "%s\n", &fmtstr[star] ); */ \466 507 fmtstr[sizeof(IFMTNP)-2] = f.base; /* sizeof includes '\0' */ \ 508 /* printf( "%s %c %c\n", &fmtstr[star], f.base, CODE ); */ \ 467 509 fmt( os, &fmtstr[star], f.wd, f.val ); \ 468 510 } else { /* precision */ \ 469 511 fmtstr[sizeof(IFMTP)-2] = f.base; /* sizeof includes '\0' */ \ 470 /* printf( "%s \n", &fmtstr[star]); */ \512 /* printf( "%s %c %c\n", &fmtstr[star], f.base, CODE ); */ \ 471 513 fmt( os, &fmtstr[star], f.wd, f.pc, f.val ); \ 472 514 } /* if */ \ … … 486 528 IntegralFMTImpl( signed long long int, 'd', "% *ll ", "% *.*ll " ) 487 529 IntegralFMTImpl( unsigned long long int, 'u', "% *ll ", "% *.*ll " ) 530 531 532 #if defined( __SIZEOF_INT128__ ) 533 // Default prefix for non-decimal prints is 0b, 0, 0x. 534 #define IntegralFMTImpl128( T, SIGNED, CODE, IFMTNP, IFMTP ) \ 535 forall( dtype ostype | ostream( ostype ) ) \ 536 static void base10_128( ostype & os, _Ostream_Manip(T) fmt ) { \ 537 if ( fmt.val > UINT64_MAX ) { \ 538 fmt.val /= P10_UINT64; \ 539 base10_128( os, fmt ); /* recursive */ \ 540 _Ostream_Manip(unsigned long long int) fmt2 @= { (uint64_t)(fmt.val % P10_UINT64), 0, 19, 'u', { .all : 0 } }; \ 541 fmt2.flags.nobsdp = true; \ 542 printf( "fmt2 %c %lld %d\n", fmt2.base, fmt2.val, fmt2.all ); \ 543 sepOff( os ); \ 544 (ostype &)(os | fmt2); \ 545 } else { \ 546 printf( "fmt %c %lld %d\n", fmt.base, fmt.val, fmt.all ); \ 547 (ostype &)(os | fmt); \ 548 } /* if */ \ 549 } /* base10_128 */ \ 550 forall( dtype ostype | ostream( ostype ) ) { \ 551 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 552 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 553 \ 554 if ( f.base == 'b' | f.base == 'o' | f.base == 'x' | f.base == 'X' ) { \ 555 unsigned long long int msig = (unsigned long long int)(f.val >> 64); \ 556 unsigned long long int lsig = (unsigned long long int)(f.val); \ 557 _Ostream_Manip(SIGNED long long int) fmt @= { msig, f.wd, f.pc, f.base, { .all : f.all } }; \ 558 _Ostream_Manip(unsigned long long int) fmt2 @= { lsig, 0, 0, f.base, { .all : 0 } }; \ 559 if ( msig == 0 ) { \ 560 fmt.val = lsig; \ 561 (ostype &)(os | fmt); \ 562 } else { \ 563 fmt2.flags.pad0 = fmt2.flags.nobsdp = true; \ 564 if ( f.base == 'b' ) { \ 565 if ( f.wd > 64 ) fmt.wd = f.wd - 64; \ 566 fmt2.wd = 64; \ 567 (ostype &)(os | fmt | "" | fmt2); \ 568 } else if ( f.base == 'o' ) { \ 569 fmt.val = (unsigned long long int)fmt.val >> 2; \ 570 if ( f.wd > 21 ) fmt.wd = f.wd - 21; \ 571 fmt2.wd = 1; \ 572 fmt2.val = ((msig & 0x3) << 1) + 1; \ 573 (ostype &)(os | fmt | "" | fmt2); \ 574 sepOff( os ); \ 575 fmt2.wd = 21; \ 576 fmt2.val = lsig & 0x7fffffffffffffff; \ 577 (ostype &)(os | fmt2); \ 578 } else { \ 579 if ( f.flags.left ) { \ 580 if ( f.wd > 16 ) fmt2.wd = f.wd - 16; \ 581 fmt.wd = 16; \ 582 } else { \ 583 if ( f.wd > 16 ) fmt.wd = f.wd - 16; \ 584 fmt2.wd = 16; \ 585 } /* if */ \ 586 (ostype &)(os | fmt | "" | fmt2); \ 587 } /* if */ \ 588 } /* if */ \ 589 } else { \ 590 base10_128( os, f ); \ 591 } /* if */ \ 592 return os; \ 593 } /* ?|? */ \ 594 void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); ends( os ); } \ 595 } // distribution 596 597 IntegralFMTImpl128( int128, signed, 'd', "% *ll ", "% *.*ll " ) 598 IntegralFMTImpl128( unsigned int128, unsigned, 'u', "% *ll ", "% *.*ll " ) 599 #endif // __SIZEOF_INT128__ 488 600 489 601 //*********************************** floating point *********************************** … … 513 625 forall( dtype ostype | ostream( ostype ) ) { \ 514 626 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 515 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) ); \627 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 516 628 char fmtstr[sizeof(DFMTP)]; /* sizeof includes '\0' */ \ 517 629 if ( ! f.flags.pc ) memcpy( &fmtstr, DFMTNP, sizeof(DFMTNP) ); \ … … 536 648 return os; \ 537 649 } /* ?|? */ \ 650 \ 538 651 void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); ends( os ); } \ 539 652 } // distribution … … 555 668 } // if 556 669 557 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );670 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 558 671 559 672 #define CFMTNP "% * " … … 571 684 return os; 572 685 } // ?|? 686 573 687 void ?|?( ostype & os, _Ostream_Manip(char) f ) { (ostype &)(os | f); ends( os ); } 574 688 } // distribution … … 592 706 } // if 593 707 594 if ( sepPrt( os ) ) fmt( os, "%s",sepGetCur( os ) );708 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); 595 709 596 710 #define SFMTNP "% * " … … 616 730 return os; 617 731 } // ?|? 732 618 733 void ?|?( ostype & os, _Ostream_Manip(const char *) f ) { (ostype &)(os | f); ends( os ); } 619 734 } // distribution … … 735 850 } // ?|? 736 851 737 // istype & ?|?( istype & is, const char * fmt) {852 // istype & ?|?( istype & is, const char fmt[] ) { 738 853 // fmt( is, fmt, "" ); 739 854 // return is; -
libcfa/src/iostream.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 12 12:08:38 201913 // Update Count : 33 412 // Last Modified On : Thu Feb 20 15:30:56 2020 13 // Update Count : 337 14 14 // 15 15 … … 24 24 trait ostream( dtype ostype ) { 25 25 // private 26 bool sepPrt( ostype & ); // get separator state (on/off)27 void sepReset( ostype & ); // set separator state to default state28 void sepReset( ostype &, bool ); // set separator and default state29 const char * sepGetCur( ostype & );// get current separator string30 void sepSetCur( ostype &, const char *); // set current separator string31 bool getNL( ostype & );// check newline32 void setNL( ostype &, bool ); // saw newline33 bool getANL( ostype & ); // get auto newline (on/off)34 bool getPrt( ostype & ); // get fmt called in output cascade35 void setPrt( ostype &, bool ); // set fmt called in output cascade26 bool $sepPrt( ostype & ); // get separator state (on/off) 27 void $sepReset( ostype & ); // set separator state to default state 28 void $sepReset( ostype &, bool ); // set separator and default state 29 const char * $sepGetCur( ostype & ); // get current separator string 30 void $sepSetCur( ostype &, const char [] ); // set current separator string 31 bool $getNL( ostype & ); // check newline 32 void $setNL( ostype &, bool ); // saw newline 33 bool $getANL( ostype & ); // get auto newline (on/off) 34 bool $getPrt( ostype & ); // get fmt called in output cascade 35 void $setPrt( ostype &, bool ); // set fmt called in output cascade 36 36 // public 37 37 void sepOn( ostype & ); // turn separator state on … … 43 43 44 44 const char * sepGet( ostype & ); // get separator string 45 void sepSet( ostype &, const char *); // set separator to string (15 character maximum)45 void sepSet( ostype &, const char [] ); // set separator to string (15 character maximum) 46 46 const char * sepGetTuple( ostype & ); // get tuple separator string 47 void sepSetTuple( ostype &, const char * );// set tuple separator to string (15 character maximum)47 void sepSetTuple( ostype &, const char [] ); // set tuple separator to string (15 character maximum) 48 48 49 49 void ends( ostype & os ); // end of output statement 50 50 int fail( ostype & ); 51 51 int flush( ostype & ); 52 void open( ostype & os, const char * name, const char * mode);52 void open( ostype & os, const char name[], const char mode[] ); 53 53 void close( ostype & os ); 54 ostype & write( ostype &, const char *, size_t );54 ostype & write( ostype &, const char [], size_t ); 55 55 int fmt( ostype &, const char format[], ... ) __attribute__(( format(printf, 2, 3) )); 56 56 }; // ostream … … 98 98 ostype & ?|?( ostype &, unsigned long long int ); 99 99 void ?|?( ostype &, unsigned long long int ); 100 #if defined( __SIZEOF_INT128__ ) 101 ostype & ?|?( ostype &, int128 ); 102 void ?|?( ostype &, int128 ); 103 ostype & ?|?( ostype &, unsigned int128 ); 104 void ?|?( ostype &, unsigned int128 ); 105 #endif // __SIZEOF_INT128__ 100 106 101 107 ostype & ?|?( ostype &, float ); … … 113 119 void ?|?( ostype &, long double _Complex ); 114 120 115 ostype & ?|?( ostype &, const char *);116 void ?|?( ostype &, const char *);121 ostype & ?|?( ostype &, const char [] ); 122 void ?|?( ostype &, const char [] ); 117 123 // ostype & ?|?( ostype &, const char16_t * ); 118 124 #if ! ( __ARM_ARCH_ISA_ARM == 1 && __ARM_32BIT_STATE == 1 ) // char32_t == wchar_t => ambiguous … … 206 212 IntegralFMTDecl( signed long long int, 'd' ) 207 213 IntegralFMTDecl( unsigned long long int, 'u' ) 214 #if defined( __SIZEOF_INT128__ ) 215 IntegralFMTDecl( int128, 'd' ) 216 IntegralFMTDecl( unsigned int128, 'u' ) 217 #endif 208 218 209 219 //*********************************** floating point *********************************** … … 256 266 257 267 static inline { 258 _Ostream_Manip(const char *) bin( const char * s) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'b', { .all : 0 } }; }259 _Ostream_Manip(const char *) oct( const char * s) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'o', { .all : 0 } }; }260 _Ostream_Manip(const char *) hex( const char * s) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'x', { .all : 0 } }; }261 _Ostream_Manip(const char *) wd( unsigned int w, const char * s) { return (_Ostream_Manip(const char *))@{ s, w, 0, 's', { .all : 0 } }; }262 _Ostream_Manip(const char *) wd( unsigned int w, unsigned char pc, const char * s) { return (_Ostream_Manip(const char *))@{ s, w, pc, 's', { .flags.pc : true } }; }268 _Ostream_Manip(const char *) bin( const char s[] ) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'b', { .all : 0 } }; } 269 _Ostream_Manip(const char *) oct( const char s[] ) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'o', { .all : 0 } }; } 270 _Ostream_Manip(const char *) hex( const char s[] ) { return (_Ostream_Manip(const char *))@{ s, 1, 0, 'x', { .all : 0 } }; } 271 _Ostream_Manip(const char *) wd( unsigned int w, const char s[] ) { return (_Ostream_Manip(const char *))@{ s, w, 0, 's', { .all : 0 } }; } 272 _Ostream_Manip(const char *) wd( unsigned int w, unsigned char pc, const char s[] ) { return (_Ostream_Manip(const char *))@{ s, w, pc, 's', { .flags.pc : true } }; } 263 273 _Ostream_Manip(const char *) & wd( unsigned int w, _Ostream_Manip(const char *) & fmt ) { fmt.wd = w; return fmt; } 264 274 _Ostream_Manip(const char *) & wd( unsigned int w, unsigned char pc, _Ostream_Manip(const char *) & fmt ) { fmt.wd = w; fmt.pc = pc; fmt.flags.pc = true; return fmt; } … … 281 291 int fail( istype & ); 282 292 int eof( istype & ); 283 void open( istype & is, const char * name);293 void open( istype & is, const char name[] ); 284 294 void close( istype & is ); 285 295 istype & read( istype &, char *, size_t ); … … 316 326 istype & ?|?( istype &, long double _Complex & ); 317 327 318 // istype & ?|?( istype &, const char *);328 // istype & ?|?( istype &, const char [] ); 319 329 istype & ?|?( istype &, char * ); 320 330 … … 343 353 static inline { 344 354 _Istream_Cstr skip( unsigned int n ) { return (_Istream_Cstr){ 0p, 0p, n, { .all : 0 } }; } 345 _Istream_Cstr skip( const char * scanset) { return (_Istream_Cstr){ 0p, scanset, -1, { .all : 0 } }; }346 _Istream_Cstr incl( const char * scanset, char * s ) { return (_Istream_Cstr){ s, scanset, -1, { .flags.inex : false } }; }347 _Istream_Cstr & incl( const char * scanset, _Istream_Cstr & fmt ) { fmt.scanset = scanset; fmt.flags.inex = false; return fmt; }348 _Istream_Cstr excl( const char * scanset, char * s ) { return (_Istream_Cstr){ s, scanset, -1, { .flags.inex : true } }; }349 _Istream_Cstr & excl( const char * scanset, _Istream_Cstr & fmt ) { fmt.scanset = scanset; fmt.flags.inex = true; return fmt; }350 _Istream_Cstr ignore( const char * s) { return (_Istream_Cstr)@{ s, 0p, -1, { .flags.ignore : true } }; }355 _Istream_Cstr skip( const char scanset[] ) { return (_Istream_Cstr){ 0p, scanset, -1, { .all : 0 } }; } 356 _Istream_Cstr incl( const char scanset[], char * s ) { return (_Istream_Cstr){ s, scanset, -1, { .flags.inex : false } }; } 357 _Istream_Cstr & incl( const char scanset[], _Istream_Cstr & fmt ) { fmt.scanset = scanset; fmt.flags.inex = false; return fmt; } 358 _Istream_Cstr excl( const char scanset[], char * s ) { return (_Istream_Cstr){ s, scanset, -1, { .flags.inex : true } }; } 359 _Istream_Cstr & excl( const char scanset[], _Istream_Cstr & fmt ) { fmt.scanset = scanset; fmt.flags.inex = true; return fmt; } 360 _Istream_Cstr ignore( const char s[] ) { return (_Istream_Cstr)@{ s, 0p, -1, { .flags.ignore : true } }; } 351 361 _Istream_Cstr & ignore( _Istream_Cstr & fmt ) { fmt.flags.ignore = true; return fmt; } 352 _Istream_Cstr wdi( unsigned int w, char * s) { return (_Istream_Cstr)@{ s, 0p, w, { .all : 0 } }; }362 _Istream_Cstr wdi( unsigned int w, char s[] ) { return (_Istream_Cstr)@{ s, 0p, w, { .all : 0 } }; } 353 363 _Istream_Cstr & wdi( unsigned int w, _Istream_Cstr & fmt ) { fmt.wd = w; return fmt; } 354 364 } // distribution -
libcfa/src/math.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Apr 18 23:37:04 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 13 11:02:15 201813 // Update Count : 11 612 // Last Modified On : Tue Feb 4 10:27:11 2020 13 // Update Count : 117 14 14 // 15 15 … … 51 51 static inline long double fdim( long double x, long double y ) { return fdiml( x, y ); } 52 52 53 static inline float nan( const char * tag) { return nanf( tag ); }54 // extern "C" { double nan( const char *); }55 static inline long double nan( const char * tag) { return nanl( tag ); }53 static inline float nan( const char tag[] ) { return nanf( tag ); } 54 // extern "C" { double nan( const char [] ); } 55 static inline long double nan( const char tag[] ) { return nanl( tag ); } 56 56 57 57 //---------------------- Exponential ---------------------- -
libcfa/src/rational.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed Apr 6 17:54:28 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 12 18:12:08 201913 // Update Count : 18 412 // Last Modified On : Sat Feb 8 17:56:36 2020 13 // Update Count : 187 14 14 // 15 15 … … 56 56 } // rational 57 57 58 void ?{}( Rational(RationalImpl) & r, zero_t ) { 59 r{ (RationalImpl){0}, (RationalImpl){1} }; 60 } // rational 61 62 void ?{}( Rational(RationalImpl) & r, one_t ) { 63 r{ (RationalImpl){1}, (RationalImpl){1} }; 64 } // rational 58 65 59 66 // getter for numerator/denominator -
libcfa/src/startup.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jul 24 16:21:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 25 16:42:01 201813 // Update Count : 1112 // Last Modified On : Tue Feb 4 13:03:18 2020 13 // Update Count : 30 14 14 // 15 15 16 #include <time.h> // tzset 16 17 #include "startup.hfa" 17 #include <unistd.h>18 19 18 20 19 extern "C" { 21 staticvoid __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) ));20 void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) )); 22 21 void __cfaabi_appready_startup( void ) { 22 tzset(); // initialize time global variables 23 23 #ifdef __CFA_DEBUG__ 24 24 extern void heapAppStart(); … … 27 27 } // __cfaabi_appready_startup 28 28 29 staticvoid __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) ));29 void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) )); 30 30 void __cfaabi_appready_shutdown( void ) { 31 31 #ifdef __CFA_DEBUG__ … … 41 41 struct __spinlock_t; 42 42 extern "C" { 43 void __cfaabi_dbg_record(struct __spinlock_t & this, const char * prev_name) __attribute__(( weak )) {}43 void __cfaabi_dbg_record(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {} 44 44 } 45 45 -
libcfa/src/stdhdr/assert.h
r9fb8f01 r3d5701e 10 10 // Created On : Mon Jul 4 23:25:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 31 23:09:32 201713 // Update Count : 1 312 // Last Modified On : Tue Feb 4 12:58:49 2020 13 // Update Count : 15 14 14 // 15 15 … … 27 27 #define assertf( expr, fmt, ... ) ((expr) ? ((void)0) : __assert_fail_f(__VSTRINGIFY__(expr), __FILE__, __LINE__, __PRETTY_FUNCTION__, fmt, ## __VA_ARGS__ )) 28 28 29 void __assert_fail_f( const char *assertion, const char *file, unsigned int line, const char *function, const char *fmt, ... ) __attribute__((noreturn, format( printf, 5, 6) ));29 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) __attribute__((noreturn, format( printf, 5, 6) )); 30 30 #endif 31 31 -
libcfa/src/stdhdr/bfdlink.h
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jul 18 07:26:04 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 22 13:49:30 201813 // Update Count : 412 // Last Modified On : Fri Feb 7 19:05:08 2020 13 // Update Count : 6 14 14 // 15 15 16 16 // include file uses the CFA keyword "with". 17 17 #if ! defined( with ) // nesting ? 18 #define with ` with`// make keyword an identifier18 #define with ``with // make keyword an identifier 19 19 #define __CFA_BFDLINK_H__ 20 20 #endif -
libcfa/src/stdhdr/hwloc.h
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jul 18 07:45:00 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 22 13:49:58 201813 // Update Count : 412 // Last Modified On : Fri Feb 7 19:05:18 2020 13 // Update Count : 6 14 14 // 15 15 16 16 // include file uses the CFA keyword "thread". 17 17 #if ! defined( thread ) // nesting ? 18 #define thread ` thread`// make keyword an identifier18 #define thread ``thread // make keyword an identifier 19 19 #define __CFA_HWLOC_H__ 20 20 #endif -
libcfa/src/stdhdr/krb5.h
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jul 18 07:55:44 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 22 13:50:24 201813 // Update Count : 412 // Last Modified On : Fri Feb 7 19:05:35 2020 13 // Update Count : 6 14 14 // 15 15 16 16 // include file uses the CFA keyword "enable". 17 17 #if ! defined( enable ) // nesting ? 18 #define enable ` enable`// make keyword an identifier18 #define enable ``enable // make keyword an identifier 19 19 #define __CFA_KRB5_H__ 20 20 #endif -
libcfa/src/stdhdr/math.h
r9fb8f01 r3d5701e 10 10 // Created On : Mon Jul 4 23:25:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 22 18:16:07 201813 // Update Count : 1 312 // Last Modified On : Fri Feb 7 19:05:27 2020 13 // Update Count : 15 14 14 // 15 15 16 16 extern "C" { 17 17 #if ! defined( exception ) // nesting ? 18 #define exception ` exception`// make keyword an identifier18 #define exception ``exception // make keyword an identifier 19 19 #define __CFA_MATH_H__ 20 20 #endif -
libcfa/src/stdhdr/sys/ucontext.h
r9fb8f01 r3d5701e 10 10 // Created On : Thu Feb 8 23:48:16 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 8 23:50:44 201813 // Update Count : 412 // Last Modified On : Fri Feb 7 19:05:41 2020 13 // Update Count : 6 14 14 // 15 15 16 16 #if ! defined( ftype ) // nesting ? 17 #define ftype ` ftype`// make keyword an identifier17 #define ftype ``ftype // make keyword an identifier 18 18 #define __CFA_UCONTEXT_H__ 19 19 #endif -
libcfa/src/stdlib.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Thu Jan 28 17:10:29 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jun 24 17:34:44 201913 // Update Count : 4 6212 // Last Modified On : Tue Feb 4 08:27:08 2020 13 // Update Count : 486 14 14 // 15 15 … … 21 21 #include <string.h> // memcpy, memset 22 22 #include <malloc.h> // malloc_usable_size 23 #include <math.h> // fabsf, fabs, fabsl23 //#include <math.h> // fabsf, fabs, fabsl 24 24 #include <complex.h> // _Complex_I 25 25 #include <assert.h> … … 27 27 //--------------------------------------- 28 28 29 // resize, non-array types 30 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill ) { 31 size_t olen = malloc_usable_size( ptr ); // current allocation 32 char * nptr = (void *)realloc( (void *)ptr, dim * (size_t)sizeof(T) ); // C realloc 33 size_t nlen = malloc_usable_size( nptr ); // new allocation 34 if ( nlen > olen ) { // larger ? 35 memset( nptr + olen, (int)fill, nlen - olen ); // initialize added storage 36 } // 37 return (T *)nptr; 38 } // alloc 29 forall( dtype T | sized(T) ) { 30 T * alloc_set( T ptr[], size_t dim, char fill ) { // realloc array with fill 31 size_t olen = malloc_usable_size( ptr ); // current allocation 32 void * nptr = (void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 33 size_t nlen = malloc_usable_size( nptr ); // new allocation 34 if ( nlen > olen ) { // larger ? 35 memset( (char *)nptr + olen, (int)fill, nlen - olen ); // initialize added storage 36 } // if 37 return (T *)nptr; 38 } // alloc_set 39 40 T * alloc_align_set( T ptr[], size_t align, char fill ) { // aligned realloc with fill 41 size_t olen = malloc_usable_size( ptr ); // current allocation 42 void * nptr = (void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 43 // char * nptr = alloc_align( ptr, align ); 44 size_t nlen = malloc_usable_size( nptr ); // new allocation 45 if ( nlen > olen ) { // larger ? 46 memset( (char *)nptr + olen, (int)fill, nlen - olen ); // initialize added storage 47 } // if 48 return (T *)nptr; 49 } // alloc_align_set 50 } // distribution 39 51 40 52 // allocation/deallocation and constructor/destructor, non-array types 41 53 forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) 42 54 T * new( Params p ) { 43 return &(*malloc()){ p }; // run constructor55 return &(*malloc()){ p }; // run constructor 44 56 } // new 45 57 … … 47 59 void delete( T * ptr ) { 48 60 if ( ptr ) { // ignore null 49 ^(*ptr){}; // run destructor61 ^(*ptr){}; // run destructor 50 62 free( ptr ); 51 63 } // if … … 55 67 void delete( T * ptr, Params rest ) { 56 68 if ( ptr ) { // ignore null 57 ^(*ptr){}; // run destructor69 ^(*ptr){}; // run destructor 58 70 free( ptr ); 59 71 } // if … … 95 107 //--------------------------------------- 96 108 97 float _Complex strto( const char * sptr, char ** eptr ) {109 float _Complex strto( const char sptr[], char ** eptr ) { 98 110 float re, im; 99 111 char * eeptr; … … 106 118 } // strto 107 119 108 double _Complex strto( const char * sptr, char ** eptr ) {120 double _Complex strto( const char sptr[], char ** eptr ) { 109 121 double re, im; 110 122 char * eeptr; … … 117 129 } // strto 118 130 119 long double _Complex strto( const char * sptr, char ** eptr ) {131 long double _Complex strto( const char sptr[], char ** eptr ) { 120 132 long double re, im; 121 133 char * eeptr; -
libcfa/src/stdlib.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Jul 23 14:14:59 201913 // Update Count : 37312 // Last Modified On : Tue Feb 4 08:27:01 2020 13 // Update Count : 401 14 14 // 15 15 … … 25 25 void * memset( void * dest, int fill, size_t size ); // string.h 26 26 void * memcpy( void * dest, const void * src, size_t size ); // string.h 27 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ); // CFA 27 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ); // CFA heap 28 28 } // extern "C" 29 30 void * realloc( void * oaddr, size_t nalign, size_t size ); // CFA heap 29 31 30 32 //--------------------------------------- … … 50 52 } // calloc 51 53 52 T * realloc( T * ptr, size_t size ) { 53 if ( unlikely( ptr == 0 ) ) return malloc(); 54 return (T *)(void *)realloc( (void *)ptr, size ); 54 T * realloc( T * ptr, size_t size ) { // CFA realloc, eliminate return-type cast 55 return (T *)(void *)realloc( (void *)ptr, size ); // C realloc 55 56 } // realloc 56 57 57 58 T * memalign( size_t align ) { 58 return (T *)memalign( align, sizeof(T) ); 59 return (T *)memalign( align, sizeof(T) ); // C memalign 59 60 } // memalign 60 61 62 T * cmemalign( size_t align, size_t dim ) { 63 return (T *)cmemalign( align, dim, sizeof(T) ); // CFA cmemalign 64 } // cmemalign 65 61 66 T * aligned_alloc( size_t align ) { 62 return (T *)aligned_alloc( align, sizeof(T) ); 67 return (T *)aligned_alloc( align, sizeof(T) ); // C aligned_alloc 63 68 } // aligned_alloc 64 69 … … 67 72 } // posix_memalign 68 73 69 70 74 // Cforall dynamic allocation 71 75 … … 74 78 } // alloc 75 79 76 T * alloc( char fill ) {77 T * ptr;78 if ( _Alignof(T) <= libAlign() ) ptr = (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc79 else ptr = (T *)memalign( _Alignof(T), sizeof(T) );80 return (T *)memset( ptr, (int)fill, sizeof(T) ); // initialize with fill value81 } // alloc82 83 80 T * alloc( size_t dim ) { 84 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc81 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); 85 82 else return (T *)memalign( _Alignof(T), dim * sizeof(T) ); 86 83 } // alloc 87 84 88 T * alloc( size_t dim, char fill ) { 85 T * alloc( T ptr[], size_t dim ) { // realloc 86 return (T *)(void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 87 } // alloc 88 89 T * alloc_set( char fill ) { 90 return (T *)memset( (T *)alloc(), (int)fill, sizeof(T) ); // initialize with fill value 91 } // alloc 92 93 T * alloc_set( T fill ) { 94 return (T *)memcpy( (T *)alloc(), &fill, sizeof(T) ); // initialize with fill value 95 } // alloc 96 97 T * alloc_set( size_t dim, char fill ) { 89 98 return (T *)memset( (T *)alloc( dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 90 99 } // alloc 91 100 92 T * alloc( T ptr[], size_t dim ) { 93 return realloc( ptr, dim * sizeof(T) ); 94 } // alloc 95 } // distribution 96 97 98 static inline forall( dtype T | sized(T) ) { 99 T * align_alloc( size_t align ) { 101 T * alloc_set( size_t dim, T fill ) { 102 T * r = (T *)alloc( dim ); 103 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 104 return r; 105 } // alloc 106 107 T * alloc_set( size_t dim, const T fill[] ) { 108 return (T *)memcpy( (T *)alloc( dim ), fill, dim * sizeof(T) ); // initialize with fill value 109 } // alloc 110 } // distribution 111 112 forall( dtype T | sized(T) ) { 113 T * alloc_set( T ptr[], size_t dim, char fill ); // realloc array with fill 114 } // distribution 115 116 static inline forall( dtype T | sized(T) ) { 117 T * alloc_align( size_t align ) { 100 118 return (T *)memalign( align, sizeof(T) ); 101 } // align_alloc 102 103 T * align_alloc( size_t align, char fill ) { 104 T * ptr = (T *)memalign( align, sizeof(T) ); 105 return (T *)memset( ptr, (int)fill, sizeof(T) ); 106 } // align_alloc 107 108 T * align_alloc( size_t align, size_t dim ) { 119 } // alloc_align 120 121 T * alloc_align( size_t align, size_t dim ) { 109 122 return (T *)memalign( align, dim * sizeof(T) ); 110 } // align_alloc 111 112 T * align_alloc( size_t align, size_t dim, char fill ) { 113 if ( fill == '\0' ) { 114 return (T *)cmemalign( align, dim, sizeof(T) ); 115 } else { 116 return (T *)memset( (T *)memalign( align, dim * sizeof(T) ), (int)fill, dim * sizeof(T) ); 117 } // if 118 } // align_alloc 119 } // distribution 120 121 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill ); 122 123 } // alloc_align 124 125 T * alloc_align( T ptr[], size_t align ) { // aligned realloc array 126 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 127 } // alloc_align 128 129 T * alloc_align( T ptr[], size_t align, size_t dim ) { // aligned realloc array 130 return (T *)(void *)realloc( (void *)ptr, align, dim * sizeof(T) ); // CFA realloc 131 } // alloc_align 132 133 T * alloc_align_set( size_t align, char fill ) { 134 return (T *)memset( (T *)alloc_align( align ), (int)fill, sizeof(T) ); // initialize with fill value 135 } // alloc_align 136 137 T * alloc_align_set( size_t align, T fill ) { 138 return (T *)memcpy( (T *)alloc_align( align ), &fill, sizeof(T) ); // initialize with fill value 139 } // alloc_align 140 141 T * alloc_align_set( size_t align, size_t dim, char fill ) { 142 return (T *)memset( (T *)alloc_align( align, dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 143 } // alloc_align 144 145 T * alloc_align_set( size_t align, size_t dim, T fill ) { 146 T * r = (T *)alloc_align( align, dim ); 147 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 148 return r; 149 } // alloc_align 150 151 T * alloc_align_set( size_t align, size_t dim, const T fill[] ) { 152 return (T *)memcpy( (T *)alloc_align( align, dim ), fill, dim * sizeof(T) ); 153 } // alloc_align 154 } // distribution 155 156 forall( dtype T | sized(T) ) { 157 T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ); // aligned realloc array with fill 158 } // distribution 123 159 124 160 static inline forall( dtype T | sized(T) ) { 125 161 // data, non-array types 126 127 162 T * memset( T * dest, char fill ) { 128 163 return (T *)memset( dest, fill, sizeof(T) ); … … 136 171 static inline forall( dtype T | sized(T) ) { 137 172 // data, array types 138 139 173 T * amemset( T dest[], char fill, size_t dim ) { 140 174 return (T *)(void *)memset( dest, fill, dim * sizeof(T) ); // C memset … … 159 193 160 194 static inline { 161 int strto( const char * sptr, char ** eptr, int base ) { return (int)strtol( sptr, eptr, base ); }162 unsigned int strto( const char * sptr, char ** eptr, int base ) { return (unsigned int)strtoul( sptr, eptr, base ); }163 long int strto( const char * sptr, char ** eptr, int base ) { return strtol( sptr, eptr, base ); }164 unsigned long int strto( const char * sptr, char ** eptr, int base ) { return strtoul( sptr, eptr, base ); }165 long long int strto( const char * sptr, char ** eptr, int base ) { return strtoll( sptr, eptr, base ); }166 unsigned long long int strto( const char * sptr, char ** eptr, int base ) { return strtoull( sptr, eptr, base ); }167 168 float strto( const char * sptr, char ** eptr ) { return strtof( sptr, eptr ); }169 double strto( const char * sptr, char ** eptr ) { return strtod( sptr, eptr ); }170 long double strto( const char * sptr, char ** eptr ) { return strtold( sptr, eptr ); }171 } // distribution 172 173 float _Complex strto( const char * sptr, char ** eptr );174 double _Complex strto( const char * sptr, char ** eptr );175 long double _Complex strto( const char * sptr, char ** eptr );195 int strto( const char sptr[], char ** eptr, int base ) { return (int)strtol( sptr, eptr, base ); } 196 unsigned int strto( const char sptr[], char ** eptr, int base ) { return (unsigned int)strtoul( sptr, eptr, base ); } 197 long int strto( const char sptr[], char ** eptr, int base ) { return strtol( sptr, eptr, base ); } 198 unsigned long int strto( const char sptr[], char ** eptr, int base ) { return strtoul( sptr, eptr, base ); } 199 long long int strto( const char sptr[], char ** eptr, int base ) { return strtoll( sptr, eptr, base ); } 200 unsigned long long int strto( const char sptr[], char ** eptr, int base ) { return strtoull( sptr, eptr, base ); } 201 202 float strto( const char sptr[], char ** eptr ) { return strtof( sptr, eptr ); } 203 double strto( const char sptr[], char ** eptr ) { return strtod( sptr, eptr ); } 204 long double strto( const char sptr[], char ** eptr ) { return strtold( sptr, eptr ); } 205 } // distribution 206 207 float _Complex strto( const char sptr[], char ** eptr ); 208 double _Complex strto( const char sptr[], char ** eptr ); 209 long double _Complex strto( const char sptr[], char ** eptr ); 176 210 177 211 static inline { 178 int ato( const char * sptr ) { return (int)strtol( sptr, 0, 10 ); }179 unsigned int ato( const char * sptr ) { return (unsigned int)strtoul( sptr, 0, 10 ); }180 long int ato( const char * sptr ) { return strtol( sptr, 0, 10 ); }181 unsigned long int ato( const char * sptr ) { return strtoul( sptr, 0, 10 ); }182 long long int ato( const char * sptr ) { return strtoll( sptr, 0, 10 ); }183 unsigned long long int ato( const char * sptr ) { return strtoull( sptr, 0, 10 ); }184 185 float ato( const char * sptr ) { return strtof( sptr, 0); }186 double ato( const char * sptr ) { return strtod( sptr, 0); }187 long double ato( const char * sptr ) { return strtold( sptr, 0); }188 189 float _Complex ato( const char * sptr ) { return strto( sptr, NULL); }190 double _Complex ato( const char * sptr ) { return strto( sptr, NULL); }191 long double _Complex ato( const char * sptr ) { return strto( sptr, NULL); }212 int ato( const char sptr[] ) { return (int)strtol( sptr, 0p, 10 ); } 213 unsigned int ato( const char sptr[] ) { return (unsigned int)strtoul( sptr, 0p, 10 ); } 214 long int ato( const char sptr[] ) { return strtol( sptr, 0p, 10 ); } 215 unsigned long int ato( const char sptr[] ) { return strtoul( sptr, 0p, 10 ); } 216 long long int ato( const char sptr[] ) { return strtoll( sptr, 0p, 10 ); } 217 unsigned long long int ato( const char sptr[] ) { return strtoull( sptr, 0p, 10 ); } 218 219 float ato( const char sptr[] ) { return strtof( sptr, 0p ); } 220 double ato( const char sptr[] ) { return strtod( sptr, 0p ); } 221 long double ato( const char sptr[] ) { return strtold( sptr, 0p ); } 222 223 float _Complex ato( const char sptr[] ) { return strto( sptr, 0p ); } 224 double _Complex ato( const char sptr[] ) { return strto( sptr, 0p ); } 225 long double _Complex ato( const char sptr[] ) { return strto( sptr, 0p ); } 192 226 } // distribution 193 227 -
libcfa/src/time.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Mar 27 13:33:14 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 13 08:41:55 201913 // Update Count : 6512 // Last Modified On : Tue Feb 4 08:24:18 2020 13 // Update Count : 70 14 14 // 15 15 … … 33 33 forall( dtype ostype | ostream( ostype ) ) { 34 34 ostype & ?|?( ostype & os, Duration dur ) with( dur ) { 35 (ostype &)(os | t v/ TIMEGRAN); // print seconds36 long int ns = (t v < 0 ? -tv : tv) % TIMEGRAN; // compute nanoseconds35 (ostype &)(os | tn / TIMEGRAN); // print seconds 36 long int ns = (tn < 0 ? -tn : tn) % TIMEGRAN; // compute nanoseconds 37 37 if ( ns != 0 ) { // some ? 38 38 char buf[16]; … … 52 52 53 53 #ifdef __CFA_DEBUG__ 54 static void tabort( int year, int month, int day, int hour, int min, int sec, int nsec ) {54 static void tabort( int year, int month, int day, int hour, int min, int sec, int64_t nsec ) { 55 55 abort | "Attempt to create Time( year=" | year | "(>=1970), month=" | month | "(1-12), day=" | day | "(1-31), hour=" | hour | "(0-23), min=" | min | "(0-59), sec=" | sec 56 | "(0-60), nsec=" | nsec | "(0-999_999_999), which exceeds range 00:00:00 UTC, January 1, 1970 to 03:14:07 UTC, January 19, 2038.";56 | "(0-60), nsec=" | nsec | "(0-999_999_999), which is not in the range 00:00:00 UTC, January 1, 1970 to 03:14:07 UTC, January 19, 2038, where month and day have 1 origin."; 57 57 } // tabort 58 58 #endif // __CFA_DEBUG__ 59 59 60 void ?{}( Time & time, int year, int month, int day, int hour, int min, int sec, int nsec ) with( time ) {60 void ?{}( Time & time, int year, int month, int day, int hour, int min, int sec, int64_t nsec ) with( time ) { 61 61 tm tm; 62 62 63 tm.tm_isdst = -1; // let mktime determine if alternate timezone is in effect63 // Values can be in any range (+/-) but result must be in the epoch. 64 64 tm.tm_year = year - 1900; // mktime uses 1900 as its starting point 65 #ifdef __CFA_DEBUG__ 66 if ( month < 1 || 12 < month ) { 67 tabort( year, month, day, hour, min, sec, nsec ); 68 } // if 69 #endif // __CFA_DEBUG__ 65 // Make month in range 1-12 to match with day. 70 66 tm.tm_mon = month - 1; // mktime uses range 0-11 71 #ifdef __CFA_DEBUG__72 if ( day < 1 || 31 < day ) {73 tabort( year, month, day, hour, min, sec, nsec );74 } // if75 #endif // __CFA_DEBUG__76 67 tm.tm_mday = day; // mktime uses range 1-31 77 68 tm.tm_hour = hour; 78 69 tm.tm_min = min; 79 70 tm.tm_sec = sec; 71 tm.tm_isdst = -1; // let mktime determine if alternate timezone is in effect 80 72 time_t epochsec = mktime( &tm ); 81 73 #ifdef __CFA_DEBUG__ 82 if ( epochsec == (time_t)-1 ) {74 if ( epochsec <= (time_t)-1 ) { // MUST BE LESS THAN OR EQUAL! 83 75 tabort( year, month, day, hour, min, sec, nsec ); 84 76 } // if 85 77 #endif // __CFA_DEBUG__ 86 t v= (int64_t)(epochsec) * TIMEGRAN + nsec; // convert to nanoseconds78 tn = (int64_t)(epochsec) * TIMEGRAN + nsec; // convert to nanoseconds 87 79 #ifdef __CFA_DEBUG__ 88 if ( t v> 2147483647LL * TIMEGRAN ) { // between 00:00:00 UTC, January 1, 1970 and 03:14:07 UTC, January 19, 2038.80 if ( tn > 2147483647LL * TIMEGRAN ) { // between 00:00:00 UTC, January 1, 1970 and 03:14:07 UTC, January 19, 2038. 89 81 tabort( year, month, day, hour, min, sec, nsec ); 90 82 } // if … … 93 85 94 86 char * yy_mm_dd( Time time, char * buf ) with( time ) { 95 time_t s = t v/ TIMEGRAN;87 time_t s = tn / TIMEGRAN; 96 88 tm tm; 97 89 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 … … 108 100 109 101 char * mm_dd_yy( Time time, char * buf ) with( time ) { 110 time_t s = t v/ TIMEGRAN;102 time_t s = tn / TIMEGRAN; 111 103 tm tm; 112 104 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 … … 123 115 124 116 char * dd_mm_yy( Time time, char * buf ) with( time ) { 125 time_t s = t v/ TIMEGRAN;117 time_t s = tn / TIMEGRAN; 126 118 tm tm; 127 119 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 … … 137 129 } // dd_mm_yy 138 130 139 size_t strftime( char * buf, size_t size, const char * fmt, Time time ) with( time ) {140 time_t s = t v/ TIMEGRAN;131 size_t strftime( char buf[], size_t size, const char fmt[], Time time ) with( time ) { 132 time_t s = tn / TIMEGRAN; 141 133 tm tm; 142 134 gmtime_r( &s, &tm ); … … 147 139 ostype & ?|?( ostype & os, Time time ) with( time ) { 148 140 char buf[32]; // at least 26 149 time_t s = t v/ TIMEGRAN;141 time_t s = tn / TIMEGRAN; 150 142 ctime_r( &s, (char *)&buf ); // 26 characters: "Wed Jun 30 21:49:08 1993\n" 151 143 buf[24] = '\0'; // remove trailing '\n' 152 long int ns = (t v < 0 ? -tv : tv) % TIMEGRAN; // compute nanoseconds144 long int ns = (tn < 0 ? -tn : tn) % TIMEGRAN; // compute nanoseconds 153 145 if ( ns == 0 ) { // none ? 154 146 (ostype &)(os | buf); // print date/time/year -
libcfa/src/time.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Wed Mar 14 23:18:57 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Sep 22 12:25:34 201813 // Update Count : 6 4312 // Last Modified On : Tue Feb 4 08:24:32 2020 13 // Update Count : 654 14 14 // 15 15 … … 32 32 Duration ?=?( Duration & dur, __attribute__((unused)) zero_t ) { return dur{ 0 }; } 33 33 34 Duration +?( Duration rhs ) with( rhs ) { return (Duration)@{ +tv}; }35 Duration ?+?( Duration & lhs, Duration rhs ) { return (Duration)@{ lhs.t v + rhs.tv}; }34 Duration +?( Duration rhs ) with( rhs ) { return (Duration)@{ +tn }; } 35 Duration ?+?( Duration & lhs, Duration rhs ) { return (Duration)@{ lhs.tn + rhs.tn }; } 36 36 Duration ?+=?( Duration & lhs, Duration rhs ) { lhs = lhs + rhs; return lhs; } 37 37 38 Duration -?( Duration rhs ) with( rhs ) { return (Duration)@{ -t v}; }39 Duration ?-?( Duration & lhs, Duration rhs ) { return (Duration)@{ lhs.t v - rhs.tv}; }38 Duration -?( Duration rhs ) with( rhs ) { return (Duration)@{ -tn }; } 39 Duration ?-?( Duration & lhs, Duration rhs ) { return (Duration)@{ lhs.tn - rhs.tn }; } 40 40 Duration ?-=?( Duration & lhs, Duration rhs ) { lhs = lhs - rhs; return lhs; } 41 41 42 Duration ?*?( Duration lhs, int64_t rhs ) { return (Duration)@{ lhs.t v* rhs }; }43 Duration ?*?( int64_t lhs, Duration rhs ) { return (Duration)@{ lhs * rhs.t v}; }42 Duration ?*?( Duration lhs, int64_t rhs ) { return (Duration)@{ lhs.tn * rhs }; } 43 Duration ?*?( int64_t lhs, Duration rhs ) { return (Duration)@{ lhs * rhs.tn }; } 44 44 Duration ?*=?( Duration & lhs, int64_t rhs ) { lhs = lhs * rhs; return lhs; } 45 45 46 int64_t ?/?( Duration lhs, Duration rhs ) { return lhs.t v / rhs.tv; }47 Duration ?/?( Duration lhs, int64_t rhs ) { return (Duration)@{ lhs.t v/ rhs }; }46 int64_t ?/?( Duration lhs, Duration rhs ) { return lhs.tn / rhs.tn; } 47 Duration ?/?( Duration lhs, int64_t rhs ) { return (Duration)@{ lhs.tn / rhs }; } 48 48 Duration ?/=?( Duration & lhs, int64_t rhs ) { lhs = lhs / rhs; return lhs; } 49 double div( Duration lhs, Duration rhs ) { return (double)lhs.t v / (double)rhs.tv; }50 51 Duration ?%?( Duration lhs, Duration rhs ) { return (Duration)@{ lhs.t v % rhs.tv}; }49 double div( Duration lhs, Duration rhs ) { return (double)lhs.tn / (double)rhs.tn; } 50 51 Duration ?%?( Duration lhs, Duration rhs ) { return (Duration)@{ lhs.tn % rhs.tn }; } 52 52 Duration ?%=?( Duration & lhs, Duration rhs ) { lhs = lhs % rhs; return lhs; } 53 53 54 bool ?==?( Duration lhs, Duration rhs ) { return lhs.t v == rhs.tv; }55 bool ?!=?( Duration lhs, Duration rhs ) { return lhs.t v != rhs.tv; }56 bool ?<? ( Duration lhs, Duration rhs ) { return lhs.t v < rhs.tv; }57 bool ?<=?( Duration lhs, Duration rhs ) { return lhs.t v <= rhs.tv; }58 bool ?>? ( Duration lhs, Duration rhs ) { return lhs.t v > rhs.tv; }59 bool ?>=?( Duration lhs, Duration rhs ) { return lhs.t v >= rhs.tv; }60 61 bool ?==?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v== 0; }62 bool ?!=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v!= 0; }63 bool ?<? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v< 0; }64 bool ?<=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v<= 0; }65 bool ?>? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v> 0; }66 bool ?>=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.t v>= 0; }67 68 Duration abs( Duration rhs ) { return rhs.t v>= 0 ? rhs : -rhs; }54 bool ?==?( Duration lhs, Duration rhs ) { return lhs.tn == rhs.tn; } 55 bool ?!=?( Duration lhs, Duration rhs ) { return lhs.tn != rhs.tn; } 56 bool ?<? ( Duration lhs, Duration rhs ) { return lhs.tn < rhs.tn; } 57 bool ?<=?( Duration lhs, Duration rhs ) { return lhs.tn <= rhs.tn; } 58 bool ?>? ( Duration lhs, Duration rhs ) { return lhs.tn > rhs.tn; } 59 bool ?>=?( Duration lhs, Duration rhs ) { return lhs.tn >= rhs.tn; } 60 61 bool ?==?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn == 0; } 62 bool ?!=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn != 0; } 63 bool ?<? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn < 0; } 64 bool ?<=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn <= 0; } 65 bool ?>? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn > 0; } 66 bool ?>=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tn >= 0; } 67 68 Duration abs( Duration rhs ) { return rhs.tn >= 0 ? rhs : -rhs; } 69 69 70 70 Duration ?`ns( int64_t nsec ) { return (Duration)@{ nsec }; } … … 82 82 Duration ?`w( double weeks ) { return (Duration)@{ weeks * (7LL * 24LL * 60LL * 60LL * TIMEGRAN) }; } 83 83 84 int64_t ?`ns( Duration dur ) { return dur.t v; }85 int64_t ?`us( Duration dur ) { return dur.t v/ (TIMEGRAN / 1_000_000LL); }86 int64_t ?`ms( Duration dur ) { return dur.t v/ (TIMEGRAN / 1_000LL); }87 int64_t ?`s( Duration dur ) { return dur.t v/ TIMEGRAN; }88 int64_t ?`m( Duration dur ) { return dur.t v/ (60LL * TIMEGRAN); }89 int64_t ?`h( Duration dur ) { return dur.t v/ (60LL * 60LL * TIMEGRAN); }90 int64_t ?`d( Duration dur ) { return dur.t v/ (24LL * 60LL * 60LL * TIMEGRAN); }91 int64_t ?`w( Duration dur ) { return dur.t v/ (7LL * 24LL * 60LL * 60LL * TIMEGRAN); }92 93 Duration max( Duration lhs, Duration rhs ) { return (lhs.t v < rhs.tv) ? rhs : lhs;}94 Duration min( Duration lhs, Duration rhs ) { return !(rhs.t v < lhs.tv) ? lhs : rhs;}84 int64_t ?`ns( Duration dur ) { return dur.tn; } 85 int64_t ?`us( Duration dur ) { return dur.tn / (TIMEGRAN / 1_000_000LL); } 86 int64_t ?`ms( Duration dur ) { return dur.tn / (TIMEGRAN / 1_000LL); } 87 int64_t ?`s( Duration dur ) { return dur.tn / TIMEGRAN; } 88 int64_t ?`m( Duration dur ) { return dur.tn / (60LL * TIMEGRAN); } 89 int64_t ?`h( Duration dur ) { return dur.tn / (60LL * 60LL * TIMEGRAN); } 90 int64_t ?`d( Duration dur ) { return dur.tn / (24LL * 60LL * 60LL * TIMEGRAN); } 91 int64_t ?`w( Duration dur ) { return dur.tn / (7LL * 24LL * 60LL * 60LL * TIMEGRAN); } 92 93 Duration max( Duration lhs, Duration rhs ) { return (lhs.tn < rhs.tn) ? rhs : lhs;} 94 Duration min( Duration lhs, Duration rhs ) { return !(rhs.tn < lhs.tn) ? lhs : rhs;} 95 95 } // distribution 96 96 … … 143 143 //######################### Time ######################### 144 144 145 void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 );145 void ?{}( Time & time, int year, int month = 1, int day = 1, int hour = 0, int min = 0, int sec = 0, int64_t nsec = 0 ); 146 146 static inline { 147 147 Time ?=?( Time & time, __attribute__((unused)) zero_t ) { return time{ 0 }; } 148 148 149 void ?{}( Time & time, timeval t ) with( time ) { t v= (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; }149 void ?{}( Time & time, timeval t ) with( time ) { tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; } 150 150 Time ?=?( Time & time, timeval t ) with( time ) { 151 t v= (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * (TIMEGRAN / 1_000_000LL);151 tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * (TIMEGRAN / 1_000_000LL); 152 152 return time; 153 153 } // ?=? 154 154 155 void ?{}( Time & time, timespec t ) with( time ) { t v= (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; }155 void ?{}( Time & time, timespec t ) with( time ) { tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; } 156 156 Time ?=?( Time & time, timespec t ) with( time ) { 157 t v= (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec;157 tn = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; 158 158 return time; 159 159 } // ?=? 160 160 161 Time ?+?( Time & lhs, Duration rhs ) { return (Time)@{ lhs.t v + rhs.tv}; }161 Time ?+?( Time & lhs, Duration rhs ) { return (Time)@{ lhs.tn + rhs.tn }; } 162 162 Time ?+?( Duration lhs, Time rhs ) { return rhs + lhs; } 163 163 Time ?+=?( Time & lhs, Duration rhs ) { lhs = lhs + rhs; return lhs; } 164 164 165 Duration ?-?( Time lhs, Time rhs ) { return (Duration)@{ lhs.t v - rhs.tv}; }166 Time ?-?( Time lhs, Duration rhs ) { return (Time)@{ lhs.t v - rhs.tv}; }165 Duration ?-?( Time lhs, Time rhs ) { return (Duration)@{ lhs.tn - rhs.tn }; } 166 Time ?-?( Time lhs, Duration rhs ) { return (Time)@{ lhs.tn - rhs.tn }; } 167 167 Time ?-=?( Time & lhs, Duration rhs ) { lhs = lhs - rhs; return lhs; } 168 bool ?==?( Time lhs, Time rhs ) { return lhs.tv == rhs.tv; } 169 bool ?!=?( Time lhs, Time rhs ) { return lhs.tv != rhs.tv; } 170 bool ?<?( Time lhs, Time rhs ) { return lhs.tv < rhs.tv; } 171 bool ?<=?( Time lhs, Time rhs ) { return lhs.tv <= rhs.tv; } 172 bool ?>?( Time lhs, Time rhs ) { return lhs.tv > rhs.tv; } 173 bool ?>=?( Time lhs, Time rhs ) { return lhs.tv >= rhs.tv; } 168 bool ?==?( Time lhs, Time rhs ) { return lhs.tn == rhs.tn; } 169 bool ?!=?( Time lhs, Time rhs ) { return lhs.tn != rhs.tn; } 170 bool ?<?( Time lhs, Time rhs ) { return lhs.tn < rhs.tn; } 171 bool ?<=?( Time lhs, Time rhs ) { return lhs.tn <= rhs.tn; } 172 bool ?>?( Time lhs, Time rhs ) { return lhs.tn > rhs.tn; } 173 bool ?>=?( Time lhs, Time rhs ) { return lhs.tn >= rhs.tn; } 174 175 int64_t ?`ns( Time t ) { return t.tn; } 174 176 } // distribution 175 177 … … 189 191 } // dmy 190 192 191 size_t strftime( char * buf, size_t size, const char * fmt, Time time );193 size_t strftime( char buf[], size_t size, const char fmt[], Time time ); 192 194 193 195 //------------------------- timeval (cont) ------------------------- 194 196 195 197 static inline void ?{}( timeval & t, Time time ) with( t, time ) { 196 tv_sec = t v/ TIMEGRAN; // seconds197 tv_usec = t v% TIMEGRAN / (TIMEGRAN / 1_000_000LL); // microseconds198 tv_sec = tn / TIMEGRAN; // seconds 199 tv_usec = tn % TIMEGRAN / (TIMEGRAN / 1_000_000LL); // microseconds 198 200 } // ?{} 199 201 … … 201 203 202 204 static inline void ?{}( timespec & t, Time time ) with( t, time ) { 203 tv_sec = t v/ TIMEGRAN; // seconds204 tv_nsec = t v% TIMEGRAN; // nanoseconds205 tv_sec = tn / TIMEGRAN; // seconds 206 tv_nsec = tn % TIMEGRAN; // nanoseconds 205 207 } // ?{} 206 208 -
libcfa/src/time_t.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Apr 10 14:42:03 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 13 07:51:47 201813 // Update Count : 612 // Last Modified On : Sun Jan 5 08:22:46 2020 13 // Update Count : 7 14 14 // 15 15 … … 20 20 21 21 struct Duration { // private 22 int64_t t v; // nanoseconds22 int64_t tn; // nanoseconds 23 23 }; // Duration 24 24 25 static inline void ?{}( Duration & dur ) with( dur ) { t v= 0; }26 static inline void ?{}( Duration & dur, __attribute__((unused)) zero_t ) with( dur ) { t v= 0; }25 static inline void ?{}( Duration & dur ) with( dur ) { tn = 0; } 26 static inline void ?{}( Duration & dur, __attribute__((unused)) zero_t ) with( dur ) { tn = 0; } 27 27 28 28 … … 30 30 31 31 struct Time { // private 32 uint64_t t v; // nanoseconds since UNIX epoch32 uint64_t tn; // nanoseconds since UNIX epoch 33 33 }; // Time 34 34 35 static inline void ?{}( Time & time ) with( time ) { t v= 0; }36 static inline void ?{}( Time & time, __attribute__((unused)) zero_t ) with( time ) { t v= 0; }35 static inline void ?{}( Time & time ) with( time ) { tn = 0; } 36 static inline void ?{}( Time & time, __attribute__((unused)) zero_t ) with( time ) { tn = 0; } 37 37 38 38 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.