- Timestamp:
- May 11, 2020, 1:53:29 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 504a7dc
- Parents:
- b7d6a36 (diff), a7b486b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa
- Files:
-
- 7 added
- 40 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/Makefile.in
rb7d6a36 r6a490b2 106 106 configure.lineno config.status.lineno 107 107 mkinstalldirs = $(install_sh) -d 108 CONFIG_HEADER = $(top_builddir)/prelude/defines.hfa 108 109 CONFIG_CLEAN_FILES = 109 110 CONFIG_CLEAN_VPATH_FILES = -
libcfa/configure
rb7d6a36 r6a490b2 790 790 enable_distcc 791 791 with_cfa_name 792 enable_static 792 793 enable_shared 793 enable_static794 794 with_pic 795 795 enable_fast_install … … 1452 1452 --disable-silent-rules verbose build output (undo: "make V=0") 1453 1453 --enable-distcc whether or not to enable distributed compilation 1454 --enable-static[=PKGS] build static libraries [default=no] 1454 1455 --enable-shared[=PKGS] build shared libraries [default=yes] 1455 --enable-static[=PKGS] build static libraries [default=yes]1456 1456 --enable-fast-install[=PKGS] 1457 1457 optimize for fast installation [default=yes] … … 1960 1960 1961 1961 } # ac_fn_cxx_try_link 1962 1963 # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES 1964 # ------------------------------------------------------- 1965 # Tests whether HEADER exists, giving a warning if it cannot be compiled using 1966 # the include files in INCLUDES and setting the cache variable VAR 1967 # accordingly. 1968 ac_fn_c_check_header_mongrel () 1969 { 1970 as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack 1971 if eval \${$3+:} false; then : 1972 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 1973 $as_echo_n "checking for $2... " >&6; } 1974 if eval \${$3+:} false; then : 1975 $as_echo_n "(cached) " >&6 1976 fi 1977 eval ac_res=\$$3 1978 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 1979 $as_echo "$ac_res" >&6; } 1980 else 1981 # Is the header compilable? 1982 { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 1983 $as_echo_n "checking $2 usability... " >&6; } 1984 cat confdefs.h - <<_ACEOF >conftest.$ac_ext 1985 /* end confdefs.h. */ 1986 $4 1987 #include <$2> 1988 _ACEOF 1989 if ac_fn_c_try_compile "$LINENO"; then : 1990 ac_header_compiler=yes 1991 else 1992 ac_header_compiler=no 1993 fi 1994 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext 1995 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 1996 $as_echo "$ac_header_compiler" >&6; } 1997 1998 # Is the header present? 1999 { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 2000 $as_echo_n "checking $2 presence... " >&6; } 2001 cat confdefs.h - <<_ACEOF >conftest.$ac_ext 2002 /* end confdefs.h. */ 2003 #include <$2> 2004 _ACEOF 2005 if ac_fn_c_try_cpp "$LINENO"; then : 2006 ac_header_preproc=yes 2007 else 2008 ac_header_preproc=no 2009 fi 2010 rm -f conftest.err conftest.i conftest.$ac_ext 2011 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 2012 $as_echo "$ac_header_preproc" >&6; } 2013 2014 # So? What about this header? 2015 case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( 2016 yes:no: ) 2017 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 2018 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} 2019 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 2020 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} 2021 ;; 2022 no:yes:* ) 2023 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 2024 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} 2025 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 2026 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} 2027 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 2028 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} 2029 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 2030 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} 2031 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 2032 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} 2033 ( $as_echo "## --------------------------------------- ## 2034 ## Report this to cforall@plg.uwaterloo.ca ## 2035 ## --------------------------------------- ##" 2036 ) | sed "s/^/$as_me: WARNING: /" >&2 2037 ;; 2038 esac 2039 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 2040 $as_echo_n "checking for $2... " >&6; } 2041 if eval \${$3+:} false; then : 2042 $as_echo_n "(cached) " >&6 2043 else 2044 eval "$3=\$ac_header_compiler" 2045 fi 2046 eval ac_res=\$$3 2047 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 2048 $as_echo "$ac_res" >&6; } 2049 fi 2050 eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno 2051 2052 } # ac_fn_c_check_header_mongrel 1962 2053 cat >config.log <<_ACEOF 1963 2054 This file contains any messages produced by compilers while … … 7939 8030 7940 8031 # Set options 8032 # Check whether --enable-static was given. 8033 if test "${enable_static+set}" = set; then : 8034 enableval=$enable_static; p=${PACKAGE-default} 8035 case $enableval in 8036 yes) enable_static=yes ;; 8037 no) enable_static=no ;; 8038 *) 8039 enable_static=no 8040 # Look at the argument we got. We use all the common list separators. 8041 lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, 8042 for pkg in $enableval; do 8043 IFS=$lt_save_ifs 8044 if test "X$pkg" = "X$p"; then 8045 enable_static=yes 8046 fi 8047 done 8048 IFS=$lt_save_ifs 8049 ;; 8050 esac 8051 else 8052 enable_static=no 8053 fi 8054 8055 8056 8057 8058 8059 8060 7941 8061 7942 8062 … … 7971 8091 fi 7972 8092 7973 7974 7975 7976 7977 7978 7979 7980 7981 # Check whether --enable-static was given.7982 if test "${enable_static+set}" = set; then :7983 enableval=$enable_static; p=${PACKAGE-default}7984 case $enableval in7985 yes) enable_static=yes ;;7986 no) enable_static=no ;;7987 *)7988 enable_static=no7989 # Look at the argument we got. We use all the common list separators.7990 lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,7991 for pkg in $enableval; do7992 IFS=$lt_save_ifs7993 if test "X$pkg" = "X$p"; then7994 enable_static=yes7995 fi7996 done7997 IFS=$lt_save_ifs7998 ;;7999 esac8000 else8001 enable_static=yes8002 fi8003 8093 8004 8094 … … 16859 16949 16860 16950 16951 for ac_header in linux/io_uring.h 16952 do : 16953 ac_fn_c_check_header_mongrel "$LINENO" "linux/io_uring.h" "ac_cv_header_linux_io_uring_h" "$ac_includes_default" 16954 if test "x$ac_cv_header_linux_io_uring_h" = xyes; then : 16955 cat >>confdefs.h <<_ACEOF 16956 #define HAVE_LINUX_IO_URING_H 1 16957 _ACEOF 16958 16959 fi 16960 16961 done 16962 16963 for ac_func in preadv2 pwritev2 16964 do : 16965 as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` 16966 ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" 16967 if eval test \"x\$"$as_ac_var"\" = x"yes"; then : 16968 cat >>confdefs.h <<_ACEOF 16969 #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 16970 _ACEOF 16971 16972 fi 16973 done 16974 16975 16861 16976 ac_config_files="$ac_config_files Makefile src/Makefile prelude/Makefile" 16977 16978 16979 ac_config_headers="$ac_config_headers prelude/defines.hfa" 16862 16980 16863 16981 … … 16952 17070 test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' 16953 17071 16954 # Transform confdefs.h into DEFS. 16955 # Protect against shell expansion while executing Makefile rules. 16956 # Protect against Makefile macro expansion. 16957 # 16958 # If the first sed substitution is executed (which looks for macros that 16959 # take arguments), then branch to the quote section. Otherwise, 16960 # look for a macro that doesn't take arguments. 16961 ac_script=' 16962 :mline 16963 /\\$/{ 16964 N 16965 s,\\\n,, 16966 b mline 16967 } 16968 t clear 16969 :clear 16970 s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g 16971 t quote 16972 s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g 16973 t quote 16974 b any 16975 :quote 16976 s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g 16977 s/\[/\\&/g 16978 s/\]/\\&/g 16979 s/\$/$$/g 16980 H 16981 :any 16982 ${ 16983 g 16984 s/^\n// 16985 s/\n/ /g 16986 p 16987 } 16988 ' 16989 DEFS=`sed -n "$ac_script" confdefs.h` 16990 17072 DEFS=-DHAVE_CONFIG_H 16991 17073 16992 17074 ac_libobjs= … … 17466 17548 esac 17467 17549 17550 case $ac_config_headers in *" 17551 "*) set x $ac_config_headers; shift; ac_config_headers=$*;; 17552 esac 17468 17553 17469 17554 … … 17471 17556 # Files that config.status was made for. 17472 17557 config_files="$ac_config_files" 17558 config_headers="$ac_config_headers" 17473 17559 config_commands="$ac_config_commands" 17474 17560 … … 17492 17578 --file=FILE[:TEMPLATE] 17493 17579 instantiate the configuration file FILE 17580 --header=FILE[:TEMPLATE] 17581 instantiate the configuration header FILE 17494 17582 17495 17583 Configuration files: 17496 17584 $config_files 17585 17586 Configuration headers: 17587 $config_headers 17497 17588 17498 17589 Configuration commands: … … 17562 17653 as_fn_append CONFIG_FILES " '$ac_optarg'" 17563 17654 ac_need_defaults=false;; 17564 --he | --h | --help | --hel | -h ) 17655 --header | --heade | --head | --hea ) 17656 $ac_shift 17657 case $ac_optarg in 17658 *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; 17659 esac 17660 as_fn_append CONFIG_HEADERS " '$ac_optarg'" 17661 ac_need_defaults=false;; 17662 --he | --h) 17663 # Conflict between --help and --header 17664 as_fn_error $? "ambiguous option: \`$1' 17665 Try \`$0 --help' for more information.";; 17666 --help | --hel | -h ) 17565 17667 $as_echo "$ac_cs_usage"; exit ;; 17566 17668 -q | -quiet | --quiet | --quie | --qui | --qu | --q \ … … 17625 17727 macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' 17626 17728 macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' 17729 enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' 17627 17730 enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' 17628 enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'17629 17731 pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' 17630 17732 enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' … … 18009 18111 "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; 18010 18112 "prelude/Makefile") CONFIG_FILES="$CONFIG_FILES prelude/Makefile" ;; 18113 "prelude/defines.hfa") CONFIG_HEADERS="$CONFIG_HEADERS prelude/defines.hfa" ;; 18011 18114 18012 18115 *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; … … 18021 18124 if $ac_need_defaults; then 18022 18125 test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files 18126 test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers 18023 18127 test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands 18024 18128 fi … … 18209 18313 fi # test -n "$CONFIG_FILES" 18210 18314 18211 18212 eval set X " :F $CONFIG_FILES :C $CONFIG_COMMANDS" 18315 # Set up the scripts for CONFIG_HEADERS section. 18316 # No need to generate them if there are no CONFIG_HEADERS. 18317 # This happens for instance with `./config.status Makefile'. 18318 if test -n "$CONFIG_HEADERS"; then 18319 cat >"$ac_tmp/defines.awk" <<\_ACAWK || 18320 BEGIN { 18321 _ACEOF 18322 18323 # Transform confdefs.h into an awk script `defines.awk', embedded as 18324 # here-document in config.status, that substitutes the proper values into 18325 # config.h.in to produce config.h. 18326 18327 # Create a delimiter string that does not exist in confdefs.h, to ease 18328 # handling of long lines. 18329 ac_delim='%!_!# ' 18330 for ac_last_try in false false :; do 18331 ac_tt=`sed -n "/$ac_delim/p" confdefs.h` 18332 if test -z "$ac_tt"; then 18333 break 18334 elif $ac_last_try; then 18335 as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 18336 else 18337 ac_delim="$ac_delim!$ac_delim _$ac_delim!! " 18338 fi 18339 done 18340 18341 # For the awk script, D is an array of macro values keyed by name, 18342 # likewise P contains macro parameters if any. Preserve backslash 18343 # newline sequences. 18344 18345 ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* 18346 sed -n ' 18347 s/.\{148\}/&'"$ac_delim"'/g 18348 t rset 18349 :rset 18350 s/^[ ]*#[ ]*define[ ][ ]*/ / 18351 t def 18352 d 18353 :def 18354 s/\\$// 18355 t bsnl 18356 s/["\\]/\\&/g 18357 s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ 18358 D["\1"]=" \3"/p 18359 s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p 18360 d 18361 :bsnl 18362 s/["\\]/\\&/g 18363 s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ 18364 D["\1"]=" \3\\\\\\n"\\/p 18365 t cont 18366 s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p 18367 t cont 18368 d 18369 :cont 18370 n 18371 s/.\{148\}/&'"$ac_delim"'/g 18372 t clear 18373 :clear 18374 s/\\$// 18375 t bsnlc 18376 s/["\\]/\\&/g; s/^/"/; s/$/"/p 18377 d 18378 :bsnlc 18379 s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p 18380 b cont 18381 ' <confdefs.h | sed ' 18382 s/'"$ac_delim"'/"\\\ 18383 "/g' >>$CONFIG_STATUS || ac_write_fail=1 18384 18385 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 18386 for (key in D) D_is_set[key] = 1 18387 FS = "" 18388 } 18389 /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { 18390 line = \$ 0 18391 split(line, arg, " ") 18392 if (arg[1] == "#") { 18393 defundef = arg[2] 18394 mac1 = arg[3] 18395 } else { 18396 defundef = substr(arg[1], 2) 18397 mac1 = arg[2] 18398 } 18399 split(mac1, mac2, "(") #) 18400 macro = mac2[1] 18401 prefix = substr(line, 1, index(line, defundef) - 1) 18402 if (D_is_set[macro]) { 18403 # Preserve the white space surrounding the "#". 18404 print prefix "define", macro P[macro] D[macro] 18405 next 18406 } else { 18407 # Replace #undef with comments. This is necessary, for example, 18408 # in the case of _POSIX_SOURCE, which is predefined and required 18409 # on some systems where configure will not decide to define it. 18410 if (defundef == "undef") { 18411 print "/*", prefix defundef, macro, "*/" 18412 next 18413 } 18414 } 18415 } 18416 { print } 18417 _ACAWK 18418 _ACEOF 18419 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 18420 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 18421 fi # test -n "$CONFIG_HEADERS" 18422 18423 18424 eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" 18213 18425 shift 18214 18426 for ac_tag … … 18429 18641 || as_fn_error $? "could not create $ac_file" "$LINENO" 5 18430 18642 ;; 18431 18643 :H) 18644 # 18645 # CONFIG_HEADER 18646 # 18647 if test x"$ac_file" != x-; then 18648 { 18649 $as_echo "/* $configure_input */" \ 18650 && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" 18651 } >"$ac_tmp/config.h" \ 18652 || as_fn_error $? "could not create $ac_file" "$LINENO" 5 18653 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then 18654 { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 18655 $as_echo "$as_me: $ac_file is unchanged" >&6;} 18656 else 18657 rm -f "$ac_file" 18658 mv "$ac_tmp/config.h" "$ac_file" \ 18659 || as_fn_error $? "could not create $ac_file" "$LINENO" 5 18660 fi 18661 else 18662 $as_echo "/* $configure_input */" \ 18663 && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ 18664 || as_fn_error $? "could not create -" "$LINENO" 5 18665 fi 18666 # Compute "$ac_file"'s index in $config_headers. 18667 _am_arg="$ac_file" 18668 _am_stamp_count=1 18669 for _am_header in $config_headers :; do 18670 case $_am_header in 18671 $_am_arg | $_am_arg:* ) 18672 break ;; 18673 * ) 18674 _am_stamp_count=`expr $_am_stamp_count + 1` ;; 18675 esac 18676 done 18677 echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || 18678 $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ 18679 X"$_am_arg" : 'X\(//\)[^/]' \| \ 18680 X"$_am_arg" : 'X\(//\)$' \| \ 18681 X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || 18682 $as_echo X"$_am_arg" | 18683 sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ 18684 s//\1/ 18685 q 18686 } 18687 /^X\(\/\/\)[^/].*/{ 18688 s//\1/ 18689 q 18690 } 18691 /^X\(\/\/\)$/{ 18692 s//\1/ 18693 q 18694 } 18695 /^X\(\/\).*/{ 18696 s//\1/ 18697 q 18698 } 18699 s/.*/./; q'`/stamp-h$_am_stamp_count 18700 ;; 18432 18701 18433 18702 :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 … … 18587 18856 macro_revision=$macro_revision 18588 18857 18858 # Whether or not to build static libraries. 18859 build_old_libs=$enable_static 18860 18589 18861 # Whether or not to build shared libraries. 18590 18862 build_libtool_libs=$enable_shared 18591 18592 # Whether or not to build static libraries.18593 build_old_libs=$enable_static18594 18863 18595 18864 # What type of objects to build. -
libcfa/configure.ac
rb7d6a36 r6a490b2 109 109 110 110 # Checks for programs. 111 LT_INIT 111 LT_INIT([disable-static]) 112 112 113 113 AC_PROG_CXX … … 118 118 AC_PROG_MAKE_SET 119 119 120 AC_CHECK_HEADERS([linux/io_uring.h]) 121 AC_CHECK_FUNCS([preadv2 pwritev2]) 122 120 123 AC_CONFIG_FILES([ 121 124 Makefile … … 124 127 ]) 125 128 129 AC_CONFIG_HEADERS(prelude/defines.hfa) 130 126 131 AC_OUTPUT() 127 132 -
libcfa/prelude/Makefile.am
rb7d6a36 r6a490b2 21 21 # put into lib for now 22 22 cfalibdir = ${CFA_LIBDIR} 23 cfalib_DATA = gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c 23 cfalib_DATA = gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c defines.hfa 24 24 25 25 CC = @LOCAL_CFACC@ -
libcfa/prelude/Makefile.in
rb7d6a36 r6a490b2 1 # Makefile.in generated by automake 1.1 6.1from Makefile.am.1 # Makefile.in generated by automake 1.15 from Makefile.am. 2 2 # @configure_input@ 3 3 4 # Copyright (C) 1994-201 8Free Software Foundation, Inc.4 # Copyright (C) 1994-2014 Free Software Foundation, Inc. 5 5 6 6 # This Makefile.in is free software; the Free Software Foundation … … 104 104 DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) 105 105 mkinstalldirs = $(install_sh) -d 106 CONFIG_HEADER = defines.hfa 106 107 CONFIG_CLEAN_FILES = 107 108 CONFIG_CLEAN_VPATH_FILES = … … 154 155 am__installdirs = "$(DESTDIR)$(cfalibdir)" 155 156 DATA = $(cfalib_DATA) 156 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) 157 am__DIST_COMMON = $(srcdir)/Makefile.in 157 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ 158 $(LISP)defines.hfa.in 159 # Read a list of newline-separated strings from the standard input, 160 # and print each of them once, without duplicates. Input order is 161 # *not* preserved. 162 am__uniquify_input = $(AWK) '\ 163 BEGIN { nonempty = 0; } \ 164 { items[$$0] = 1; nonempty = 1; } \ 165 END { if (nonempty) { for (i in items) print i; }; } \ 166 ' 167 # Make sure the list of sources is unique. This is necessary because, 168 # e.g., the same source file might be shared among _SOURCES variables 169 # for different programs/libraries. 170 am__define_uniq_tagged_files = \ 171 list='$(am__tagged_files)'; \ 172 unique=`for i in $$list; do \ 173 if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ 174 done | $(am__uniquify_input)` 175 ETAGS = etags 176 CTAGS = ctags 177 am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/defines.hfa.in 158 178 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) 159 179 ACLOCAL = @ACLOCAL@ … … 306 326 # put into lib for now 307 327 cfalibdir = ${CFA_LIBDIR} 308 cfalib_DATA = gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c 328 cfalib_DATA = gcc-builtins.cf builtins.cf extras.cf prelude.cfa bootloader.c defines.hfa 309 329 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC @ARCH_FLAGS@ @CONFIG_CFLAGS@ 310 330 AM_CFAFLAGS = @CONFIG_CFAFLAGS@ 311 331 MOSTLYCLEANFILES = bootloader.c builtins.cf extras.cf gcc-builtins.c gcc-builtins.cf prelude.cfa 312 332 MAINTAINERCLEANFILES = ${addprefix ${libdir}/,${cfalib_DATA}} ${addprefix ${libdir}/,${lib_LIBRARIES}} 313 all: all-am 333 all: defines.hfa 334 $(MAKE) $(AM_MAKEFLAGS) all-am 314 335 315 336 .SUFFIXES: … … 331 352 cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ 332 353 *) \ 333 echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__ maybe_remake_depfiles)'; \334 cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__ maybe_remake_depfiles);; \354 echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ 355 cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ 335 356 esac; 336 357 … … 343 364 cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh 344 365 $(am__aclocal_m4_deps): 366 367 defines.hfa: stamp-h1 368 @test -f $@ || rm -f stamp-h1 369 @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 370 371 stamp-h1: $(srcdir)/defines.hfa.in $(top_builddir)/config.status 372 @rm -f stamp-h1 373 cd $(top_builddir) && $(SHELL) ./config.status prelude/defines.hfa 374 $(srcdir)/defines.hfa.in: $(am__configure_deps) 375 ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) 376 rm -f stamp-h1 377 touch $@ 378 379 distclean-hdr: 380 -rm -f defines.hfa stamp-h1 345 381 346 382 mostlyclean-libtool: … … 370 406 files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ 371 407 dir='$(DESTDIR)$(cfalibdir)'; $(am__uninstall_files_from_dir) 372 tags TAGS: 373 374 ctags CTAGS: 375 376 cscope cscopelist: 377 378 379 distdir: $(BUILT_SOURCES) 380 $(MAKE) $(AM_MAKEFLAGS) distdir-am 381 382 distdir-am: $(DISTFILES) 408 409 ID: $(am__tagged_files) 410 $(am__define_uniq_tagged_files); mkid -fID $$unique 411 tags: tags-am 412 TAGS: tags 413 414 tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) 415 set x; \ 416 here=`pwd`; \ 417 $(am__define_uniq_tagged_files); \ 418 shift; \ 419 if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ 420 test -n "$$unique" || unique=$$empty_fix; \ 421 if test $$# -gt 0; then \ 422 $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ 423 "$$@" $$unique; \ 424 else \ 425 $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ 426 $$unique; \ 427 fi; \ 428 fi 429 ctags: ctags-am 430 431 CTAGS: ctags 432 ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) 433 $(am__define_uniq_tagged_files); \ 434 test -z "$(CTAGS_ARGS)$$unique" \ 435 || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ 436 $$unique 437 438 GTAGS: 439 here=`$(am__cd) $(top_builddir) && pwd` \ 440 && $(am__cd) $(top_srcdir) \ 441 && gtags -i $(GTAGS_ARGS) "$$here" 442 cscopelist: cscopelist-am 443 444 cscopelist-am: $(am__tagged_files) 445 list='$(am__tagged_files)'; \ 446 case "$(srcdir)" in \ 447 [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ 448 *) sdir=$(subdir)/$(srcdir) ;; \ 449 esac; \ 450 for i in $$list; do \ 451 if test -f "$$i"; then \ 452 echo "$(subdir)/$$i"; \ 453 else \ 454 echo "$$sdir/$$i"; \ 455 fi; \ 456 done >> $(top_builddir)/cscope.files 457 458 distclean-tags: 459 -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags 460 461 distdir: $(DISTFILES) 383 462 @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ 384 463 topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ … … 412 491 check-am: all-am 413 492 check: check-am 414 all-am: Makefile $(DATA) 493 all-am: Makefile $(DATA) defines.hfa 415 494 installdirs: 416 495 for dir in "$(DESTDIR)$(cfalibdir)"; do \ … … 455 534 distclean: distclean-am 456 535 -rm -f Makefile 457 distclean-am: clean-am distclean-generic 536 distclean-am: clean-am distclean-generic distclean-hdr distclean-tags 458 537 459 538 dvi: dvi-am … … 516 595 uninstall-am: uninstall-cfalibDATA 517 596 518 .MAKE: install-am install-strip 519 520 .PHONY: all all-am check check-am clean clean-generic clean-libtool \ 521 cscopelist-am ctags-am distclean distclean-generic \ 522 distclean-libtool distdir dvi dvi-am html html-am info info-am \ 597 .MAKE: all install-am install-strip 598 599 .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ 600 clean-libtool cscopelist-am ctags ctags-am distclean \ 601 distclean-generic distclean-hdr distclean-libtool \ 602 distclean-tags distdir dvi dvi-am html html-am info info-am \ 523 603 install install-am install-cfalibDATA install-data \ 524 604 install-data-am install-dvi install-dvi-am install-exec \ … … 529 609 maintainer-clean-generic maintainer-clean-local mostlyclean \ 530 610 mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ 531 tags -am uninstall uninstall-am uninstall-cfalibDATA611 tags tags-am uninstall uninstall-am uninstall-cfalibDATA 532 612 533 613 .PRECIOUS: Makefile -
libcfa/prelude/builtins.c
rb7d6a36 r6a490b2 48 48 void exit( int status, const char fmt[], ... ) __attribute__ (( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 49 49 void abort( const char fmt[], ... ) __attribute__ (( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 50 51 forall(dtype T) 52 static inline T & identity(T & i) { 53 return i; 54 } 55 56 // generator support 57 struct $generator { 58 inline int; 59 }; 60 61 static inline void ?{}($generator & this) { ((int&)this) = 0; } 62 static inline void ^?{}($generator &) {} 63 64 trait is_generator(dtype T) { 65 void main(T & this); 66 $generator * get_generator(T & this); 67 }; 68 69 forall(dtype T | is_generator(T)) 70 static inline T & resume(T & gen) { 71 main(gen); 72 return gen; 73 } 50 74 51 75 // implicit increment, decrement if += defined, and implicit not if != defined -
libcfa/src/Makefile.am
rb7d6a36 r6a490b2 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Mon Jul 15 22:43:27 201914 ## Update Count : 24 113 ## Last Modified On : Mon Mar 16 18:07:59 2020 14 ## Update Count : 242 15 15 ############################################################################### 16 16 … … 33 33 # The built sources must not depend on the installed headers 34 34 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC - pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@35 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 36 36 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 37 37 CFACC = @CFACC@ … … 39 39 #---------------------------------------------------------------------------------------------------------------- 40 40 if BUILDLIB 41 headers_nosrc = math.hfa gmp.hfa time_t.hfa bits/align.hfa bits/containers.hfa bits/defs.hfa bits/debug.hfa bits/locks.hfa41 headers_nosrc = bitmanip.hfa math.hfa gmp.hfa time_t.hfa bits/align.hfa bits/containers.hfa bits/defs.hfa bits/debug.hfa bits/locks.hfa containers/list.hfa 42 42 headers = fstream.hfa iostream.hfa iterator.hfa limits.hfa rational.hfa time.hfa stdlib.hfa common.hfa \ 43 43 containers/maybe.hfa containers/pair.hfa containers/result.hfa containers/vector.hfa … … 48 48 thread_headers_nosrc = concurrency/invoke.h 49 49 thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa 50 thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/ preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}50 thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa} 51 51 else 52 52 headers = -
libcfa/src/Makefile.in
rb7d6a36 r6a490b2 105 105 $(am__nobase_cfa_include_HEADERS_DIST) $(am__DIST_COMMON) 106 106 mkinstalldirs = $(install_sh) -d 107 CONFIG_HEADER = $(top_builddir)/prelude/defines.hfa 107 108 CONFIG_CLEAN_FILES = 108 109 CONFIG_CLEAN_VPATH_FILES = … … 164 165 am__libcfathread_la_SOURCES_DIST = \ 165 166 concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa \ 166 concurrency/invoke.c concurrency/preemption.cfa \ 167 concurrency/ready_queue.cfa concurrency/coroutine.cfa \ 168 concurrency/thread.cfa concurrency/kernel.cfa \ 169 concurrency/monitor.cfa concurrency/mutex.cfa 167 concurrency/invoke.c concurrency/io.cfa \ 168 concurrency/preemption.cfa concurrency/ready_queue.cfa \ 169 concurrency/coroutine.cfa concurrency/thread.cfa \ 170 concurrency/kernel.cfa concurrency/monitor.cfa \ 171 concurrency/mutex.cfa 170 172 @BUILDLIB_TRUE@am__objects_3 = concurrency/coroutine.lo \ 171 173 @BUILDLIB_TRUE@ concurrency/thread.lo concurrency/kernel.lo \ … … 174 176 @BUILDLIB_TRUE@ concurrency/CtxSwitch-@ARCHITECTURE@.lo \ 175 177 @BUILDLIB_TRUE@ concurrency/alarm.lo concurrency/invoke.lo \ 176 @BUILDLIB_TRUE@ concurrency/ preemption.lo \178 @BUILDLIB_TRUE@ concurrency/io.lo concurrency/preemption.lo \ 177 179 @BUILDLIB_TRUE@ concurrency/ready_queue.lo $(am__objects_3) 178 180 am_libcfathread_la_OBJECTS = $(am__objects_4) … … 194 196 am__v_at_0 = @ 195 197 am__v_at_1 = 196 DEFAULT_INCLUDES = -I.@am__isrc@ 198 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/prelude 197 199 depcomp = $(SHELL) $(top_srcdir)/automake/depcomp 198 200 am__depfiles_maybe = depfiles … … 238 240 limits.hfa rational.hfa time.hfa stdlib.hfa common.hfa \ 239 241 containers/maybe.hfa containers/pair.hfa containers/result.hfa \ 240 containers/vector.hfa math.hfa gmp.hfa time_t.hfa \242 containers/vector.hfa bitmanip.hfa math.hfa gmp.hfa time_t.hfa \ 241 243 bits/align.hfa bits/containers.hfa bits/defs.hfa \ 242 bits/debug.hfa bits/locks.hfa con currency/coroutine.hfa \243 concurrency/ thread.hfa concurrency/kernel.hfa \244 concurrency/ monitor.hfa concurrency/mutex.hfa \245 concurrency/ invoke.h244 bits/debug.hfa bits/locks.hfa containers/list.hfa \ 245 concurrency/coroutine.hfa concurrency/thread.hfa \ 246 concurrency/kernel.hfa concurrency/monitor.hfa \ 247 concurrency/mutex.hfa concurrency/invoke.h 246 248 HEADERS = $(nobase_cfa_include_HEADERS) 247 249 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) … … 433 435 am__v_GOC_0 = @echo " GOC " $@; 434 436 am__v_GOC_1 = 437 AM_V_PY = $(am__v_PY_@AM_V@) 438 am__v_PY_ = $(am__v_PY_@AM_DEFAULT_V@) 439 am__v_PY_0 = @echo " PYTHON " $@; 440 am__v_PY_1 = 435 441 AM_V_RUST = $(am__v_RUST_@AM_V@) 436 442 am__v_RUST_ = $(am__v_RUST_@AM_DEFAULT_V@) 437 am__v_RUST_0 = @echo " RUST 443 am__v_RUST_0 = @echo " RUST " $@; 438 444 am__v_RUST_1 = 439 445 AM_V_NODEJS = $(am__v_NODEJS_@AM_V@) 440 446 am__v_NODEJS_ = $(am__v_NODEJS_@AM_DEFAULT_V@) 441 am__v_NODEJS_0 = @echo " NODEJS 447 am__v_NODEJS_0 = @echo " NODEJS " $@; 442 448 am__v_NODEJS_1 = 443 449 AM_V_JAVAC = $(am__v_JAVAC_@AM_V@) … … 453 459 # The built sources must not depend on the installed headers 454 460 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 455 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC - pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@461 AM_CFLAGS = -g -Wall -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 456 462 AM_CCASFLAGS = -g -Wall -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 457 463 @BUILDLIB_FALSE@headers_nosrc = 458 464 459 465 #---------------------------------------------------------------------------------------------------------------- 460 @BUILDLIB_TRUE@headers_nosrc = math.hfa gmp.hfa time_t.hfa bits/align.hfa bits/containers.hfa bits/defs.hfa bits/debug.hfa bits/locks.hfa466 @BUILDLIB_TRUE@headers_nosrc = bitmanip.hfa math.hfa gmp.hfa time_t.hfa bits/align.hfa bits/containers.hfa bits/defs.hfa bits/debug.hfa bits/locks.hfa containers/list.hfa 461 467 @BUILDLIB_FALSE@headers = 462 468 @BUILDLIB_TRUE@headers = fstream.hfa iostream.hfa iterator.hfa limits.hfa rational.hfa time.hfa stdlib.hfa common.hfa \ … … 471 477 @BUILDLIB_FALSE@thread_headers = 472 478 @BUILDLIB_TRUE@thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa 473 @BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/ preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}479 @BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa} 474 480 475 481 #---------------------------------------------------------------------------------------------------------------- … … 605 611 concurrency/$(DEPDIR)/$(am__dirstamp) 606 612 concurrency/invoke.lo: concurrency/$(am__dirstamp) \ 613 concurrency/$(DEPDIR)/$(am__dirstamp) 614 concurrency/io.lo: concurrency/$(am__dirstamp) \ 607 615 concurrency/$(DEPDIR)/$(am__dirstamp) 608 616 concurrency/preemption.lo: concurrency/$(am__dirstamp) \ -
libcfa/src/bits/containers.hfa
rb7d6a36 r6a490b2 146 146 static inline forall( dtype T | is_node(T) ) { 147 147 void ?{}( __queue(T) & this ) with( this ) { 148 head{ 0p };148 head{ 1p }; 149 149 tail{ &head }; 150 verify(*tail == 1p); 150 151 } 151 152 152 153 void append( __queue(T) & this, T * val ) with( this ) { 153 154 verify(tail != 0p); 155 verify(*tail == 1p); 154 156 *tail = val; 155 157 tail = &get_next( *val ); 158 *tail = 1p; 156 159 } 157 160 158 161 T * pop_head( __queue(T) & this ) { 162 verify(*this.tail == 1p); 159 163 T * head = this.head; 160 if( head ) {164 if( head != 1p ) { 161 165 this.head = get_next( *head ); 162 if( !get_next( *head )) {166 if( get_next( *head ) == 1p ) { 163 167 this.tail = &this.head; 164 168 } 165 169 get_next( *head ) = 0p; 166 } 167 return head; 170 verify(*this.tail == 1p); 171 verify( get_next(*head) == 0p ); 172 return head; 173 } 174 verify(*this.tail == 1p); 175 return 0p; 168 176 } 169 177 … … 180 188 get_next( *val ) = 0p; 181 189 182 verify( (head == 0p) == (&head == tail) );183 verify( *tail == 0p );190 verify( (head == 1p) == (&head == tail) ); 191 verify( *tail == 1p ); 184 192 return val; 185 193 } … … 266 274 return this.head != 0; 267 275 } 276 277 void move_to_front( __dllist(T) & src, __dllist(T) & dst, T & node ) { 278 remove (src, node); 279 push_front(dst, node); 280 } 268 281 } 269 282 #undef next -
libcfa/src/bits/debug.hfa
rb7d6a36 r6a490b2 9 9 // Author : Thierry Delisle 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Tue Feb 4 12:29:21202013 // Update Count : 911 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Apr 27 10:15:00 2020 13 // Update Count : 10 14 14 // 15 15 … … 23 23 #define __cfaabi_dbg_ctx_param const char caller[] 24 24 #define __cfaabi_dbg_ctx_param2 , const char caller[] 25 #define __cfaabi_dbg_ctx_fwd caller 26 #define __cfaabi_dbg_ctx_fwd2 , caller 25 27 #else 26 28 #define __cfaabi_dbg_debug_do(...) … … 30 32 #define __cfaabi_dbg_ctx_param 31 33 #define __cfaabi_dbg_ctx_param2 34 #define __cfaabi_dbg_ctx_fwd 35 #define __cfaabi_dbg_ctx_fwd2 32 36 #endif 33 37 … … 36 40 #endif 37 41 #include <stdarg.h> 38 #include <stdio.h>39 #include <unistd.h>40 42 41 43 extern void __cfaabi_bits_write( int fd, const char buffer[], int len ); … … 46 48 extern void __cfaabi_bits_print_vararg( int fd, const char fmt[], va_list arg ); 47 49 extern void __cfaabi_bits_print_buffer( int fd, char buffer[], int buffer_size, const char fmt[], ... ) __attribute__(( format(printf, 4, 5) )); 50 51 #if defined(__CFA_DEBUG_PRINT__) \ 52 || defined(__CFA_DEBUG_PRINT_IO__) || defined(__CFA_DEBUG_PRINT_IO_CORE__) \ 53 || defined(__CFA_DEBUG_PRINT_MONITOR__) || defined(__CFA_DEBUG_PRINT_PREEMPTION__) \ 54 || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__) 55 #include <stdio.h> 56 #include <unistd.h> 57 #endif 48 58 #ifdef __cforall 49 59 } 50 60 #endif 51 61 52 // #define __CFA_DEBUG_PRINT__ 53 62 // Deprecated: Use the versions with the new module names. 54 63 #ifdef __CFA_DEBUG_PRINT__ 55 64 #define __cfaabi_dbg_write( buffer, len ) __cfaabi_bits_write( STDERR_FILENO, buffer, len ) 56 65 #define __cfaabi_dbg_acquire() __cfaabi_bits_acquire() 57 66 #define __cfaabi_dbg_release() __cfaabi_bits_release() 58 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe ( STDERR_FILENO, __VA_ARGS__ )59 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock ( STDERR_FILENO, __VA_ARGS__ )60 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer ( STDERR_FILENO, __VA_ARGS__ )67 #define __cfaabi_dbg_print_safe(...) __cfaabi_bits_print_safe ( STDERR_FILENO, __VA_ARGS__ ) 68 #define __cfaabi_dbg_print_nolock(...) __cfaabi_bits_print_nolock ( STDERR_FILENO, __VA_ARGS__ ) 69 #define __cfaabi_dbg_print_buffer(...) __cfaabi_bits_print_buffer ( STDERR_FILENO, __VA_ARGS__ ) 61 70 #define __cfaabi_dbg_print_buffer_decl(...) char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( STDERR_FILENO, __dbg_text, __dbg_len ); 62 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_ bits_write( STDERR_FILENO, __dbg_text, __dbg_len );71 #define __cfaabi_dbg_print_buffer_local(...) __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_dbg_write( STDERR_FILENO, __dbg_text, __dbg_len ); 63 72 #else 64 73 #define __cfaabi_dbg_write(...) ((void)0) … … 72 81 #endif 73 82 83 // Debug print functions and statements: 84 // Most are wrappers around the bits printing function but are not always used. 85 // If they are used depends if the group (first argument) is active or not. The group must be one 86 // defined belowe. The other arguments depend on the wrapped function. 87 #define __cfadbg_write(group, buffer, len) \ 88 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_write(STDERR_FILENO, buffer, len)) 89 #define __cfadbg_acquire(group) \ 90 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_acquire()) 91 #define __cfadbg_release(group) \ 92 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_release()) 93 #define __cfadbg_print_safe(group, ...) \ 94 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_print_safe(STDERR_FILENO, __VA_ARGS__)) 95 #define __cfadbg_print_nolock(group, ...) \ 96 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_print_nolock(STDERR_FILENO, __VA_ARGS__)) 97 #define __cfadbg_print_buffer(group, ...) \ 98 __CFADBG_PRINT_GROUP_##group(__cfaabi_bits_print_buffer(STDERR_FILENO, __VA_ARGS__)) 99 #define __cfadbg_print_buffer_decl(group, ...) \ 100 __CFADBG_PRINT_GROUP_##group(char __dbg_text[256]; int __dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write( __dbg_text, __dbg_len )) 101 #define __cfadbg_print_buffer_local(group, ...) \ 102 __CFADBG_PRINT_GROUP_##group(__dbg_len = snprintf( __dbg_text, 256, __VA_ARGS__ ); __cfaabi_bits_write(STDERR_FILENO, __dbg_text, __dbg_len)) 103 104 // The debug print groups: 105 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_IO__) 106 # define __CFADBG_PRINT_GROUP_io(...) __VA_ARGS__ 107 #else 108 # define __CFADBG_PRINT_GROUP_io(...) ((void)0) 109 #endif 110 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_IO__) || defined(__CFA_DEBUG_PRINT_IO_CORE__) 111 # define __CFADBG_PRINT_GROUP_io_core(...) __VA_ARGS__ 112 #else 113 # define __CFADBG_PRINT_GROUP_io_core(...) ((void)0) 114 #endif 115 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_MONITOR__) 116 # define __CFADBG_PRINT_GROUP_monitor(...) __VA_ARGS__ 117 #else 118 # define __CFADBG_PRINT_GROUP_monitor(...) ((void)0) 119 #endif 120 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_PREEMPTION__) 121 # define __CFADBG_PRINT_GROUP_preemption(...) __VA_ARGS__ 122 #else 123 # define __CFADBG_PRINT_GROUP_preemption(...) ((void)0) 124 #endif 125 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_RUNTIME_CORE__) 126 # define __CFADBG_PRINT_GROUP_runtime_core(...) __VA_ARGS__ 127 #else 128 # define __CFADBG_PRINT_GROUP_runtime_core(...) ((void)0) 129 #endif 130 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 131 # define __CFADBG_PRINT_GROUP_ready_queue(...) __VA_ARGS__ 132 #else 133 # define __CFADBG_PRINT_GROUP_ready_queue(...) ((void)0) 134 #endif 135 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_EXCEPTION__) 136 # define __CFADBG_PRINT_GROUP_exception(...) __VA_ARGS__ 137 #else 138 # define __CFADBG_PRINT_GROUP_exception(...) ((void)0) 139 #endif 140 74 141 // Local Variables: // 75 142 // mode: c // -
libcfa/src/bits/locks.hfa
rb7d6a36 r6a490b2 54 54 55 55 #ifdef __CFA_DEBUG__ 56 void __cfaabi_dbg_record (__spinlock_t & this, const char prev_name[]);56 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]); 57 57 #else 58 #define __cfaabi_dbg_record (x, y)58 #define __cfaabi_dbg_record_lock(x, y) 59 59 #endif 60 60 } 61 62 extern void yield( unsigned int );63 61 64 62 static inline void ?{}( __spinlock_t & this ) { … … 68 66 // Lock the spinlock, return false if already acquired 69 67 static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 68 disable_interrupts(); 70 69 bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 71 70 if( result ) { 72 disable_interrupts(); 73 __cfaabi_dbg_record( this, caller ); 71 __cfaabi_dbg_record_lock( this, caller ); 72 } else { 73 enable_interrupts_noPoll(); 74 74 } 75 75 return result; … … 83 83 #endif 84 84 85 disable_interrupts(); 85 86 for ( unsigned int i = 1;; i += 1 ) { 86 87 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; … … 98 99 #endif 99 100 } 100 disable_interrupts(); 101 __cfaabi_dbg_record( this, caller ); 101 __cfaabi_dbg_record_lock( this, caller ); 102 102 } 103 103 104 104 static inline void unlock( __spinlock_t & this ) { 105 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 105 106 enable_interrupts_noPoll(); 106 __atomic_clear( &this.lock, __ATOMIC_RELEASE );107 107 } 108 108 … … 112 112 #endif 113 113 114 extern "C" { 115 char * strerror(int); 116 } 117 #define CHECKED(x) { int err = x; if( err != 0 ) abort("KERNEL ERROR: Operation \"" #x "\" return error %d - %s\n", err, strerror(err)); } 118 114 119 struct __bin_sem_t { 115 bool signaled;116 120 pthread_mutex_t lock; 117 121 pthread_cond_t cond; 122 int val; 118 123 }; 119 124 120 125 static inline void ?{}(__bin_sem_t & this) with( this ) { 121 signaled = false; 122 pthread_mutex_init(&lock, NULL); 123 pthread_cond_init (&cond, NULL); 126 // Create the mutex with error checking 127 pthread_mutexattr_t mattr; 128 pthread_mutexattr_init( &mattr ); 129 pthread_mutexattr_settype( &mattr, PTHREAD_MUTEX_ERRORCHECK_NP); 130 pthread_mutex_init(&lock, &mattr); 131 132 pthread_cond_init (&cond, 0p); 133 val = 0; 124 134 } 125 135 126 136 static inline void ^?{}(__bin_sem_t & this) with( this ) { 127 pthread_mutex_destroy(&lock);128 pthread_cond_destroy (&cond);137 CHECKED( pthread_mutex_destroy(&lock) ); 138 CHECKED( pthread_cond_destroy (&cond) ); 129 139 } 130 140 131 141 static inline void wait(__bin_sem_t & this) with( this ) { 132 142 verify(__cfaabi_dbg_in_kernel()); 133 pthread_mutex_lock(&lock);134 if(!signaled) { // this must be a loop, not if!143 CHECKED( pthread_mutex_lock(&lock) ); 144 while(val < 1) { 135 145 pthread_cond_wait(&cond, &lock); 136 146 } 137 signaled = false;138 pthread_mutex_unlock(&lock);147 val -= 1; 148 CHECKED( pthread_mutex_unlock(&lock) ); 139 149 } 140 150 141 static inline voidpost(__bin_sem_t & this) with( this ) {142 verify(__cfaabi_dbg_in_kernel());151 static inline bool post(__bin_sem_t & this) with( this ) { 152 bool needs_signal = false; 143 153 144 pthread_mutex_lock(&lock); 145 bool needs_signal = !signaled; 146 signaled = true; 147 pthread_mutex_unlock(&lock); 154 CHECKED( pthread_mutex_lock(&lock) ); 155 if(val < 1) { 156 val += 1; 157 pthread_cond_signal(&cond); 158 needs_signal = true; 159 } 160 CHECKED( pthread_mutex_unlock(&lock) ); 148 161 149 if (needs_signal) 150 pthread_cond_signal(&cond); 162 return needs_signal; 151 163 } 164 165 #undef CHECKED 152 166 #endif -
libcfa/src/bits/signal.hfa
rb7d6a36 r6a490b2 54 54 sig, handler, flags, errno, strerror( errno ) 55 55 ); 56 _ exit( EXIT_FAILURE );56 _Exit( EXIT_FAILURE ); 57 57 } // if 58 58 } -
libcfa/src/concurrency/CtxSwitch-arm.S
rb7d6a36 r6a490b2 13 13 .text 14 14 .align 2 15 .global CtxSwitch16 .type CtxSwitch, %function15 .global __cfactx_switch 16 .type __cfactx_switch, %function 17 17 18 CtxSwitch:18 __cfactx_switch: 19 19 @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification) 20 20 @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved … … 52 52 mov r15, r14 53 53 #endif // R9_SPECIAL 54 54 55 55 .text 56 56 .align 2 57 .global CtxInvokeStub58 .type CtxInvokeStub, %function57 .global __cfactx_invoke_stub 58 .type __cfactx_invoke_stub, %function 59 59 60 CtxInvokeStub:60 __cfactx_invoke_stub: 61 61 ldmfd r13!, {r0-r1} 62 62 mov r15, r1 -
libcfa/src/concurrency/CtxSwitch-i386.S
rb7d6a36 r6a490b2 43 43 .text 44 44 .align 2 45 .globl CtxSwitch46 .type CtxSwitch, @function47 CtxSwitch:45 .globl __cfactx_switch 46 .type __cfactx_switch, @function 47 __cfactx_switch: 48 48 49 49 // Copy the "from" context argument from the stack to register eax … … 83 83 84 84 ret 85 .size CtxSwitch, .-CtxSwitch85 .size __cfactx_switch, .-__cfactx_switch 86 86 87 87 // Local Variables: // -
libcfa/src/concurrency/CtxSwitch-x86_64.S
rb7d6a36 r6a490b2 44 44 .text 45 45 .align 2 46 .globl CtxSwitch47 .type CtxSwitch, @function48 CtxSwitch:46 .globl __cfactx_switch 47 .type __cfactx_switch, @function 48 __cfactx_switch: 49 49 50 50 // Save volatile registers on the stack. … … 77 77 78 78 ret 79 .size CtxSwitch, .-CtxSwitch79 .size __cfactx_switch, .-__cfactx_switch 80 80 81 81 //----------------------------------------------------------------------------- … … 83 83 .text 84 84 .align 2 85 .globl CtxInvokeStub86 .type CtxInvokeStub, @function87 CtxInvokeStub:85 .globl __cfactx_invoke_stub 86 .type __cfactx_invoke_stub, @function 87 __cfactx_invoke_stub: 88 88 movq %rbx, %rdi 89 89 movq %r12, %rsi 90 90 jmp *%r13 91 .size CtxInvokeStub, .-CtxInvokeStub91 .size __cfactx_invoke_stub, .-__cfactx_invoke_stub 92 92 93 93 // Local Variables: // -
libcfa/src/concurrency/alarm.cfa
rb7d6a36 r6a490b2 47 47 //============================================================================================= 48 48 49 void ?{}( alarm_node_t & this, thread_desc* thrd, Time alarm, Duration period ) with( this ) {49 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ) with( this ) { 50 50 this.thrd = thrd; 51 51 this.alarm = alarm; 52 52 this.period = period; 53 next = 0;54 53 set = false; 55 54 kernel_alarm = false; … … 60 59 this.alarm = alarm; 61 60 this.period = period; 62 next = 0;63 61 set = false; 64 62 kernel_alarm = true; … … 71 69 } 72 70 73 #if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 74 bool validate( alarm_list_t * this ) { 75 alarm_node_t ** it = &this->head; 76 while( (*it) ) { 77 it = &(*it)->next; 71 void insert( alarm_list_t * this, alarm_node_t * n ) { 72 alarm_node_t * it = & (*this)`first; 73 while( it && (n->alarm > it->alarm) ) { 74 it = & (*it)`next; 75 } 76 if ( it ) { 77 insert_before( *it, *n ); 78 } else { 79 insert_last(*this, *n); 78 80 } 79 81 80 return it == this->tail; 81 } 82 #endif 83 84 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { 85 verify( !n->next ); 86 if( p == this->tail ) { 87 this->tail = &n->next; 88 } 89 else { 90 n->next = *p; 91 } 92 *p = n; 93 94 verify( validate( this ) ); 95 } 96 97 void insert( alarm_list_t * this, alarm_node_t * n ) { 98 alarm_node_t ** it = &this->head; 99 while( (*it) && (n->alarm > (*it)->alarm) ) { 100 it = &(*it)->next; 101 } 102 103 insert_at( this, n, it ); 104 105 verify( validate( this ) ); 82 verify( validate( *this ) ); 106 83 } 107 84 108 85 alarm_node_t * pop( alarm_list_t * this ) { 109 alarm_node_t * head = this->head; 86 verify( validate( *this ) ); 87 alarm_node_t * head = & (*this)`first; 110 88 if( head ) { 111 this->head = head->next; 112 if( !head->next ) { 113 this->tail = &this->head; 114 } 115 head->next = 0p; 89 remove(*head); 116 90 } 117 verify( validate( this ) );91 verify( validate( *this ) ); 118 92 return head; 119 93 } 120 94 121 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) {122 verify( it );123 verify( (*it) == n );124 125 (*it) = n->next;126 if( !n-> next ) {127 this->tail = it;128 }129 n->next = 0p;130 131 verify( validate( this ) );132 }133 134 static inline void remove( alarm_list_t * this, alarm_node_t * n ) {135 alarm_node_t ** it = &this->head;136 while( (*it) && (*it) != n ) {137 it = &(*it)->next;138 }139 140 verify( validate( this ) );141 142 if( *it ) { remove_at( this, n, it ); }143 144 verify( validate( this ) );145 }146 147 95 void register_self( alarm_node_t * this ) { 148 alarm_list_t * alarms = &event_kernel->alarms;96 alarm_list_t & alarms = event_kernel->alarms; 149 97 150 98 disable_interrupts(); … … 152 100 { 153 101 verify( validate( alarms ) ); 154 bool first = ! alarms->head;102 bool first = ! & alarms`first; 155 103 156 insert( alarms, this );104 insert( &alarms, this ); 157 105 if( first ) { 158 __kernel_set_timer( alarms ->head->alarm - __kernel_get_time() );106 __kernel_set_timer( alarms`first.alarm - __kernel_get_time() ); 159 107 } 160 108 } … … 168 116 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 169 117 { 170 verify( validate( &event_kernel->alarms ) );171 remove( &event_kernel->alarms,this );118 verify( validate( event_kernel->alarms ) ); 119 remove( *this ); 172 120 } 173 121 unlock( event_kernel->lock ); … … 176 124 } 177 125 126 //============================================================================================= 127 // Utilities 128 //============================================================================================= 129 130 void sleep( Duration duration ) { 131 alarm_node_t node = { active_thread(), __kernel_get_time() + duration, 0`s }; 132 133 register_self( &node ); 134 park( __cfaabi_dbg_ctx ); 135 136 /* paranoid */ verify( !node.set ); 137 /* paranoid */ verify( & node`next == 0p ); 138 /* paranoid */ verify( & node`prev == 0p ); 139 } 140 178 141 // Local Variables: // 179 142 // mode: c // -
libcfa/src/concurrency/alarm.hfa
rb7d6a36 r6a490b2 23 23 #include "time.hfa" 24 24 25 struct thread_desc; 25 #include <containers/list.hfa> 26 27 struct $thread; 26 28 struct processor; 27 29 … … 40 42 Time alarm; // time when alarm goes off 41 43 Duration period; // if > 0 => period of alarm 42 alarm_node_t * next; // intrusive link list field 44 45 DLISTED_MGD_IMPL_IN(alarm_node_t) 43 46 44 47 union { 45 thread_desc* thrd; // thrd who created event48 $thread * thrd; // thrd who created event 46 49 processor * proc; // proc who created event 47 50 }; … … 50 53 bool kernel_alarm :1; // true if this is not a user defined alarm 51 54 }; 55 DLISTED_MGD_IMPL_OUT(alarm_node_t) 52 56 53 typedef alarm_node_t ** __alarm_it_t; 54 55 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period ); 57 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ); 56 58 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 57 59 void ^?{}( alarm_node_t & this ); 58 60 59 struct alarm_list_t { 60 alarm_node_t * head; 61 __alarm_it_t tail; 62 }; 63 64 static inline void ?{}( alarm_list_t & this ) with( this ) { 65 head = 0; 66 tail = &head; 67 } 61 typedef dlist(alarm_node_t, alarm_node_t) alarm_list_t; 68 62 69 63 void insert( alarm_list_t * this, alarm_node_t * n ); -
libcfa/src/concurrency/coroutine.cfa
rb7d6a36 r6a490b2 37 37 38 38 extern "C" { 39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 40 40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 41 41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 89 89 } 90 90 91 void ?{}( coroutine_desc& this, const char name[], void * storage, size_t storageSize ) with( this ) {91 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) { 92 92 (this.context){0p, 0p}; 93 93 (this.stack){storage, storageSize}; … … 99 99 } 100 100 101 void ^?{}( coroutine_desc& this) {101 void ^?{}($coroutine& this) { 102 102 if(this.state != Halted && this.state != Start && this.state != Primed) { 103 coroutine_desc* src = TL_GET( this_thread )->curr_cor;104 coroutine_desc* dst = &this;103 $coroutine * src = TL_GET( this_thread )->curr_cor; 104 $coroutine * dst = &this; 105 105 106 106 struct _Unwind_Exception storage; … … 115 115 } 116 116 117 CoroutineCtxSwitch( src, dst );117 $ctx_switch( src, dst ); 118 118 } 119 119 } … … 123 123 forall(dtype T | is_coroutine(T)) 124 124 void prime(T& cor) { 125 coroutine_desc* this = get_coroutine(cor);125 $coroutine* this = get_coroutine(cor); 126 126 assert(this->state == Start); 127 127 … … 187 187 // is not inline (We can't inline Cforall in C) 188 188 extern "C" { 189 void __ leave_coroutine( struct coroutine_desc* src ) {190 coroutine_desc* starter = src->cancellation != 0 ? src->last : src->starter;189 void __cfactx_cor_leave( struct $coroutine * src ) { 190 $coroutine * starter = src->cancellation != 0 ? src->last : src->starter; 191 191 192 192 src->state = Halted; … … 201 201 src->name, src, starter->name, starter ); 202 202 203 CoroutineCtxSwitch( src, starter );204 } 205 206 struct coroutine_desc * __finish_coroutine(void) {207 struct coroutine_desc* cor = kernelTLS.this_thread->curr_cor;203 $ctx_switch( src, starter ); 204 } 205 206 struct $coroutine * __cfactx_cor_finish(void) { 207 struct $coroutine * cor = kernelTLS.this_thread->curr_cor; 208 208 209 209 if(cor->state == Primed) { 210 suspend();210 __cfactx_suspend(); 211 211 } 212 212 -
libcfa/src/concurrency/coroutine.hfa
rb7d6a36 r6a490b2 25 25 trait is_coroutine(dtype T) { 26 26 void main(T & this); 27 coroutine_desc* get_coroutine(T & this);27 $coroutine * get_coroutine(T & this); 28 28 }; 29 29 30 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X& this) { return &this.__cor; } void main(X& this)30 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this) 31 31 32 32 //----------------------------------------------------------------------------- … … 35 35 // void ^?{}( coStack_t & this ); 36 36 37 void ?{}( coroutine_desc& this, const char name[], void * storage, size_t storageSize );38 void ^?{}( coroutine_desc& this );37 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ); 38 void ^?{}( $coroutine & this ); 39 39 40 static inline void ?{}( coroutine_desc& this) { this{ "Anonymous Coroutine", 0p, 0 }; }41 static inline void ?{}( coroutine_desc& this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; }42 static inline void ?{}( coroutine_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; }43 static inline void ?{}( coroutine_desc& this, const char name[]) { this{ name, 0p, 0 }; }44 static inline void ?{}( coroutine_desc& this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }40 static inline void ?{}( $coroutine & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 41 static inline void ?{}( $coroutine & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 42 static inline void ?{}( $coroutine & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( $coroutine & this, const char name[]) { this{ name, 0p, 0 }; } 44 static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 45 45 46 46 //----------------------------------------------------------------------------- 47 47 // Public coroutine API 48 static inline void suspend(void);49 50 forall(dtype T | is_coroutine(T))51 static inline T & resume(T & cor);52 53 48 forall(dtype T | is_coroutine(T)) 54 49 void prime(T & cor); 55 50 56 static inline struct coroutine_desc* active_coroutine() { return TL_GET( this_thread )->curr_cor; }51 static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread )->curr_cor; } 57 52 58 53 //----------------------------------------------------------------------------- … … 61 56 // Start coroutine routines 62 57 extern "C" { 63 void CtxInvokeCoroutine(void (*main)(void *), void * this);58 void __cfactx_invoke_coroutine(void (*main)(void *), void * this); 64 59 65 60 forall(dtype T) 66 void CtxStart(void (*main)(T &), struct coroutine_desc* cor, T & this, void (*invoke)(void (*main)(void *), void *));61 void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 67 62 68 extern void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));63 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 69 64 70 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");65 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 71 66 } 72 67 73 68 // Private wrappers for context switch and stack creation 74 69 // Wrapper for co 75 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {70 static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) { 76 71 // set state of current coroutine to inactive 77 src->state = src->state == Halted ? Halted : Inactive;72 src->state = src->state == Halted ? Halted : Blocked; 78 73 79 74 // set new coroutine that task is executing … … 82 77 // context switch to specified coroutine 83 78 verify( dst->context.SP ); 84 CtxSwitch( &src->context, &dst->context );85 // when CtxSwitch returns we are back in the src coroutine79 __cfactx_switch( &src->context, &dst->context ); 80 // when __cfactx_switch returns we are back in the src coroutine 86 81 87 82 // set state of new coroutine to active … … 89 84 90 85 if( unlikely(src->cancellation != 0p) ) { 91 _ CtxCoroutine_Unwind(src->cancellation, src);86 __cfactx_coroutine_unwind(src->cancellation, src); 92 87 } 93 88 } … … 96 91 97 92 // Suspend implementation inlined for performance 98 static inline void suspend(void) { 99 // optimization : read TLS once and reuse it 100 // Safety note: this is preemption safe since if 101 // preemption occurs after this line, the pointer 102 // will also migrate which means this value will 103 // stay in syn with the TLS 104 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 93 extern "C" { 94 static inline void __cfactx_suspend(void) { 95 // optimization : read TLS once and reuse it 96 // Safety note: this is preemption safe since if 97 // preemption occurs after this line, the pointer 98 // will also migrate which means this value will 99 // stay in syn with the TLS 100 $coroutine * src = TL_GET( this_thread )->curr_cor; 105 101 106 assertf( src->last != 0,107 "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n"108 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",109 src->name, src );110 assertf( src->last->state != Halted,111 "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n"112 "Possible cause is terminated coroutine's main routine has already returned.",113 src->name, src, src->last->name, src->last );102 assertf( src->last != 0, 103 "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n" 104 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.", 105 src->name, src ); 106 assertf( src->last->state != Halted, 107 "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n" 108 "Possible cause is terminated coroutine's main routine has already returned.", 109 src->name, src, src->last->name, src->last ); 114 110 115 CoroutineCtxSwitch( src, src->last ); 111 $ctx_switch( src, src->last ); 112 } 116 113 } 117 114 … … 124 121 // will also migrate which means this value will 125 122 // stay in syn with the TLS 126 coroutine_desc* src = TL_GET( this_thread )->curr_cor;127 coroutine_desc* dst = get_coroutine(cor);123 $coroutine * src = TL_GET( this_thread )->curr_cor; 124 $coroutine * dst = get_coroutine(cor); 128 125 129 126 if( unlikely(dst->context.SP == 0p) ) { 130 127 TL_GET( this_thread )->curr_cor = dst; 131 128 __stack_prepare(&dst->stack, 65000); 132 CtxStart(main, dst, cor, CtxInvokeCoroutine);129 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 133 130 TL_GET( this_thread )->curr_cor = src; 134 131 } … … 147 144 148 145 // always done for performance testing 149 CoroutineCtxSwitch( src, dst );146 $ctx_switch( src, dst ); 150 147 151 148 return cor; 152 149 } 153 150 154 static inline void resume( coroutine_desc * dst) {151 static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) { 155 152 // optimization : read TLS once and reuse it 156 153 // Safety note: this is preemption safe since if … … 158 155 // will also migrate which means this value will 159 156 // stay in syn with the TLS 160 coroutine_desc* src = TL_GET( this_thread )->curr_cor;157 $coroutine * src = TL_GET( this_thread )->curr_cor; 161 158 162 159 // not resuming self ? … … 172 169 173 170 // always done for performance testing 174 CoroutineCtxSwitch( src, dst );171 $ctx_switch( src, dst ); 175 172 } 176 173 -
libcfa/src/concurrency/invoke.c
rb7d6a36 r6a490b2 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern void __leave_coroutine ( struct coroutine_desc * ); 32 extern struct coroutine_desc * __finish_coroutine(void); 33 extern void __leave_thread_monitor(); 31 extern struct $coroutine * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct $coroutine * ); 33 extern void __cfactx_thrd_leave(); 34 34 35 extern void disable_interrupts() OPTIONAL_THREAD; 35 36 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 36 37 37 void CtxInvokeCoroutine(38 void __cfactx_invoke_coroutine( 38 39 void (*main)(void *), 39 40 void *this 40 41 ) { 41 42 // Finish setting up the coroutine by setting its state 42 struct coroutine_desc * cor = __finish_coroutine();43 struct $coroutine * cor = __cfactx_cor_finish(); 43 44 44 45 // Call the main of the coroutine … … 46 47 47 48 //Final suspend, should never return 48 __ leave_coroutine( cor );49 __cfactx_cor_leave( cor ); 49 50 __cabi_abort( "Resumed dead coroutine" ); 50 51 } 51 52 52 static _Unwind_Reason_Code _ CtxCoroutine_UnwindStop(53 static _Unwind_Reason_Code __cfactx_coroutine_unwindstop( 53 54 __attribute((__unused__)) int version, 54 55 _Unwind_Action actions, … … 61 62 // We finished unwinding the coroutine, 62 63 // leave it 63 __ leave_coroutine( param );64 __cfactx_cor_leave( param ); 64 65 __cabi_abort( "Resumed dead coroutine" ); 65 66 } … … 69 70 } 70 71 71 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) __attribute__ ((__noreturn__));72 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) {73 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _ CtxCoroutine_UnwindStop, cor );72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) { 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 74 75 printf("UNWIND ERROR %d after force unwind\n", ret); 75 76 abort(); 76 77 } 77 78 78 void CtxInvokeThread(79 void __cfactx_invoke_thread( 79 80 void (*main)(void *), 80 81 void *this … … 93 94 // The order of these 4 operations is very important 94 95 //Final suspend, should never return 95 __ leave_thread_monitor();96 __cfactx_thrd_leave(); 96 97 __cabi_abort( "Resumed dead thread" ); 97 98 } 98 99 99 void CtxStart(100 void __cfactx_start( 100 101 void (*main)(void *), 101 struct coroutine_desc* cor,102 struct $coroutine * cor, 102 103 void *this, 103 104 void (*invoke)(void *) … … 139 140 140 141 fs->dummyReturn = NULL; 141 fs->rturn = CtxInvokeStub;142 fs->rturn = __cfactx_invoke_stub; 142 143 fs->fixedRegisters[0] = main; 143 144 fs->fixedRegisters[1] = this; … … 157 158 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 158 159 159 fs->intRegs[8] = CtxInvokeStub;160 fs->intRegs[8] = __cfactx_invoke_stub; 160 161 fs->arg[0] = this; 161 162 fs->arg[1] = invoke; -
libcfa/src/concurrency/invoke.h
rb7d6a36 r6a490b2 47 47 extern "Cforall" { 48 48 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 49 struct thread_desc* volatile this_thread;49 struct $thread * volatile this_thread; 50 50 struct processor * volatile this_processor; 51 51 … … 92 92 }; 93 93 94 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 95 96 struct coroutine_desc { 97 // context that is switch during a CtxSwitch 94 enum coroutine_state { Halted, Start, Primed, Blocked, Ready, Active, Rerun }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION }; 96 97 struct $coroutine { 98 // context that is switch during a __cfactx_switch 98 99 struct __stack_context_t context; 99 100 … … 108 109 109 110 // first coroutine to resume this one 110 struct coroutine_desc* starter;111 struct $coroutine * starter; 111 112 112 113 // last coroutine to resume this one 113 struct coroutine_desc* last;114 struct $coroutine * last; 114 115 115 116 // If non-null stack must be unwound with this exception … … 117 118 118 119 }; 120 121 static inline struct __stack_t * __get_stack( struct $coroutine * cor ) { return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); } 119 122 120 123 // struct which calls the monitor is accepting … … 127 130 }; 128 131 129 struct monitor_desc{132 struct $monitor { 130 133 // spinlock to protect internal data 131 134 struct __spinlock_t lock; 132 135 133 136 // current owner of the monitor 134 struct thread_desc* owner;137 struct $thread * owner; 135 138 136 139 // queue of threads that are blocked waiting for the monitor 137 __queue_t(struct thread_desc) entry_queue;140 __queue_t(struct $thread) entry_queue; 138 141 139 142 // stack of conditions to run next once we exit the monitor … … 152 155 struct __monitor_group_t { 153 156 // currently held monitors 154 __cfa_anonymous_object( __small_array_t( monitor_desc*) );157 __cfa_anonymous_object( __small_array_t($monitor*) ); 155 158 156 159 // last function that acquired monitors … … 161 164 // instrusive link field for threads 162 165 struct __thread_desc_link { 163 struct thread_desc* next;164 struct thread_desc* prev;166 struct $thread * next; 167 struct $thread * prev; 165 168 unsigned long long ts; 166 169 }; 167 170 168 struct thread_desc{171 struct $thread { 169 172 // Core threading fields 170 // context that is switch during a CtxSwitch173 // context that is switch during a __cfactx_switch 171 174 struct __stack_context_t context; 172 175 173 176 // current execution status for coroutine 174 enum coroutine_state state; 177 volatile int state; 178 enum __Preemption_Reason preempted; 175 179 176 180 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 177 181 178 182 // coroutine body used to store context 179 struct coroutine_descself_cor;183 struct $coroutine self_cor; 180 184 181 185 // current active context 182 struct coroutine_desc* curr_cor;186 struct $coroutine * curr_cor; 183 187 184 188 // monitor body used for mutual exclusion 185 struct monitor_descself_mon;189 struct $monitor self_mon; 186 190 187 191 // pointer to monitor with sufficient lifetime for current monitors 188 struct monitor_desc* self_mon_p;192 struct $monitor * self_mon_p; 189 193 190 194 // pointer to the cluster on which the thread is running … … 199 203 200 204 struct { 201 struct thread_desc* next;202 struct thread_desc* prev;205 struct $thread * next; 206 struct $thread * prev; 203 207 } node; 204 }; 208 209 #ifdef __CFA_DEBUG__ 210 // previous function to park/unpark the thread 211 const char * park_caller; 212 enum coroutine_state park_result; 213 bool park_stale; 214 const char * unpark_caller; 215 enum coroutine_state unpark_result; 216 bool unpark_stale; 217 #endif 218 }; 219 220 #ifdef __CFA_DEBUG__ 221 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]); 222 #else 223 #define __cfaabi_dbg_record_thrd(x, y, z) 224 #endif 205 225 206 226 #ifdef __cforall 207 227 extern "Cforall" { 208 static inline thread_desc *& get_next( thread_desc & this ) { 228 229 static inline $thread *& get_next( $thread & this ) __attribute__((const)) { 209 230 return this.link.next; 210 231 } 211 232 212 static inline [ thread_desc *&, thread_desc *& ] __get( thread_desc & this) {233 static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) { 213 234 return this.node.[next, prev]; 214 235 } … … 220 241 } 221 242 222 static inline void ?{}(__monitor_group_t & this, struct monitor_desc** data, __lock_size_t size, fptr_t func) {243 static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) { 223 244 (this.data){data}; 224 245 (this.size){size}; … … 226 247 } 227 248 228 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {249 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) { 229 250 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 230 251 if( lhs.size != rhs.size ) return false; … … 260 281 261 282 // assembler routines that performs the context switch 262 extern void CtxInvokeStub( void );263 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");283 extern void __cfactx_invoke_stub( void ); 284 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 264 285 // void CtxStore ( void * this ) asm ("CtxStore"); 265 286 // void CtxRet ( void * dst ) asm ("CtxRet"); -
libcfa/src/concurrency/kernel.cfa
rb7d6a36 r6a490b2 15 15 16 16 #define __cforall_thread__ 17 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 17 18 18 19 //C Includes … … 40 41 #include "invoke.h" 41 42 43 42 44 //----------------------------------------------------------------------------- 43 45 // Some assembly required … … 110 112 //----------------------------------------------------------------------------- 111 113 //Start and stop routine for the kernel, declared first to make sure they run first 112 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 114 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 115 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 116 117 //----------------------------------------------------------------------------- 118 // Kernel Scheduling logic 119 static $thread * __next_thread(cluster * this); 120 static void __run_thread(processor * this, $thread * dst); 121 static $thread * __halt(processor * this); 122 static bool __wake_one(cluster * cltr, bool was_empty); 123 static bool __wake_proc(processor *); 114 124 115 125 //----------------------------------------------------------------------------- … … 117 127 KERNEL_STORAGE(cluster, mainCluster); 118 128 KERNEL_STORAGE(processor, mainProcessor); 119 KERNEL_STORAGE( thread_desc, mainThread);129 KERNEL_STORAGE($thread, mainThread); 120 130 KERNEL_STORAGE(__stack_t, mainThreadCtx); 121 131 122 132 cluster * mainCluster; 123 133 processor * mainProcessor; 124 thread_desc* mainThread;134 $thread * mainThread; 125 135 126 136 extern "C" { … … 164 174 // Main thread construction 165 175 166 void ?{}( coroutine_desc& this, current_stack_info_t * info) with( this ) {176 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) { 167 177 stack.storage = info->storage; 168 178 with(*stack.storage) { … … 179 189 } 180 190 181 void ?{}( thread_desc& this, current_stack_info_t * info) with( this ) {191 void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 182 192 state = Start; 183 193 self_cor{ info }; … … 209 219 } 210 220 211 static void start(processor * this); 221 static void * __invoke_processor(void * arg); 222 212 223 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 213 224 this.name = name; … … 215 226 id = -1u; 216 227 terminated{ 0 }; 228 destroyer = 0p; 217 229 do_terminate = false; 218 230 preemption_alarm = 0p; … … 220 232 runner.proc = &this; 221 233 222 idleLock{}; 223 224 start( &this ); 234 idle{}; 235 236 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); 237 238 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 239 240 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 225 241 } 226 242 227 243 void ^?{}(processor & this) with( this ){ 228 244 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { 229 __cfa abi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);245 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); 230 246 231 247 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); 232 wake( &this );248 __wake_proc( &this ); 233 249 234 250 P( terminated ); … … 236 252 } 237 253 238 pthread_join( kernel_thread, 0p ); 254 int err = pthread_join( kernel_thread, 0p ); 255 if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err)); 256 239 257 free( this.stack ); 240 258 } 241 259 242 void ?{}(cluster & this, const char name[], Duration preemption_rate ) with( this ) {260 void ?{}(cluster & this, const char name[], Duration preemption_rate, int io_flags) with( this ) { 243 261 this.name = name; 244 262 this.preemption_rate = preemption_rate; … … 246 264 ready_lock{}; 247 265 266 #if !defined(__CFA_NO_STATISTICS__) 267 print_stats = false; 268 #endif 269 270 procs{ __get }; 248 271 idles{ __get }; 249 272 threads{ __get }; 250 273 274 __kernel_io_startup( this, io_flags, &this == mainCluster ); 275 251 276 doregister(this); 252 277 } 253 278 254 279 void ^?{}(cluster & this) { 280 __kernel_io_shutdown( this, &this == mainCluster ); 281 255 282 unregister(this); 256 283 } … … 259 286 // Kernel Scheduling logic 260 287 //============================================================================================= 261 static void runThread(processor * this, thread_desc * dst);262 static void finishRunning(processor * this);263 static void halt(processor * this);264 265 288 //Main of the processor contexts 266 289 void main(processorCtx_t & runner) { … … 272 295 verify(this); 273 296 274 __cfa abi_dbg_print_safe("Kernel : core %p starting\n", this);297 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 275 298 276 299 // register the processor unless it's the main thread which is handled in the boot sequence … … 285 308 preemption_scope scope = { this }; 286 309 287 __cfa abi_dbg_print_safe("Kernel : core %p started\n", this);288 289 thread_desc* readyThread = 0p;310 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 311 312 $thread * readyThread = 0p; 290 313 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 291 readyThread = nextThread( this->cltr ); 292 293 if(readyThread) { 294 verify( ! kernelTLS.preemption_state.enabled ); 295 296 runThread(this, readyThread); 297 298 verify( ! kernelTLS.preemption_state.enabled ); 299 300 //Some actions need to be taken from the kernel 301 finishRunning(this); 302 303 spin_count = 0; 304 } else { 305 // spin(this, &spin_count); 306 halt(this); 314 // Try to get the next thread 315 readyThread = __next_thread( this->cltr ); 316 317 // If no ready thread 318 if( readyThread == 0p ) { 319 // Block until a thread is ready 320 readyThread = __halt(this); 321 } 322 323 // Check if we actually found a thread 324 if( readyThread ) { 325 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 326 /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 327 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 328 329 // We found a thread run it 330 __run_thread(this, readyThread); 331 332 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 307 333 } 308 334 } 309 335 310 __cfa abi_dbg_print_safe("Kernel : core %p stopping\n", this);336 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 311 337 } 312 338 313 339 V( this->terminated ); 314 315 340 316 341 // unregister the processor unless it's the main thread which is handled in the boot sequence … … 319 344 unregister(this->cltr, this); 320 345 } 321 322 __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this); 346 else { 347 // HACK : the coroutine context switch expects this_thread to be set 348 // and it make sense for it to be set in all other cases except here 349 // fake it 350 kernelTLS.this_thread = mainThread; 351 } 352 353 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this); 323 354 324 355 stats_tls_tally(this->cltr); … … 331 362 // runThread runs a thread by context switching 332 363 // from the processor coroutine to the target thread 333 static void runThread(processor * this, thread_desc * thrd_dst) { 334 coroutine_desc * proc_cor = get_coroutine(this->runner); 335 336 // Reset the terminating actions here 337 this->finish.action_code = No_Action; 364 static void __run_thread(processor * this, $thread * thrd_dst) { 365 $coroutine * proc_cor = get_coroutine(this->runner); 338 366 339 367 // Update global state 340 368 kernelTLS.this_thread = thrd_dst; 341 369 342 // set state of processor coroutine to inactive and the thread to active 343 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 344 thrd_dst->state = Active; 345 346 // set context switch to the thread that the processor is executing 347 verify( thrd_dst->context.SP ); 348 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 349 // when CtxSwitch returns we are back in the processor coroutine 350 351 // set state of processor coroutine to active and the thread to inactive 352 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 370 // set state of processor coroutine to inactive 371 verify(proc_cor->state == Active); 372 proc_cor->state = Blocked; 373 374 // Actually run the thread 375 RUNNING: while(true) { 376 if(unlikely(thrd_dst->preempted)) { 377 thrd_dst->preempted = __NO_PREEMPTION; 378 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 379 } else { 380 verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun 381 thrd_dst->state = Active; 382 } 383 384 __cfaabi_dbg_debug_do( 385 thrd_dst->park_stale = true; 386 thrd_dst->unpark_stale = true; 387 ) 388 389 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 390 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst ); 391 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 392 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 393 394 // set context switch to the thread that the processor is executing 395 verify( thrd_dst->context.SP ); 396 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 397 // when __cfactx_switch returns we are back in the processor coroutine 398 399 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 400 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 401 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst ); 402 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 403 404 405 // We just finished running a thread, there are a few things that could have happened. 406 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 407 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 408 // 4 - Preempted 409 // In case 1, we may have won a race so we can't write to the state again. 410 // In case 2, we lost the race so we now own the thread. 411 412 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 413 // The thread was preempted, reschedule it and reset the flag 414 __schedule_thread( thrd_dst ); 415 break RUNNING; 416 } 417 418 // set state of processor coroutine to active and the thread to inactive 419 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 420 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST); 421 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; ) 422 switch(old_state) { 423 case Halted: 424 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 425 thrd_dst->state = Halted; 426 427 // We may need to wake someone up here since 428 unpark( this->destroyer __cfaabi_dbg_ctx2 ); 429 this->destroyer = 0p; 430 break RUNNING; 431 case Active: 432 // This is case 1, the regular case, nothing more is needed 433 break RUNNING; 434 case Rerun: 435 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 436 // In this case, just run it again. 437 continue RUNNING; 438 default: 439 // This makes no sense, something is wrong abort 440 abort("Finished running a thread that was Blocked/Start/Primed %d\n", old_state); 441 } 442 } 443 444 // Just before returning to the processor, set the processor coroutine to active 353 445 proc_cor->state = Active; 446 kernelTLS.this_thread = 0p; 354 447 } 355 448 356 449 // KERNEL_ONLY 357 static void returnToKernel() { 358 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 359 thread_desc * thrd_src = kernelTLS.this_thread; 360 361 // set state of current coroutine to inactive 362 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 363 proc_cor->state = Active; 364 int local_errno = *__volatile_errno(); 365 #if defined( __i386 ) || defined( __x86_64 ) 366 __x87_store; 367 #endif 368 369 // set new coroutine that the processor is executing 370 // and context switch to it 371 verify( proc_cor->context.SP ); 372 CtxSwitch( &thrd_src->context, &proc_cor->context ); 373 374 // set state of new coroutine to active 375 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 376 thrd_src->state = Active; 377 378 #if defined( __i386 ) || defined( __x86_64 ) 379 __x87_load; 380 #endif 381 *__volatile_errno() = local_errno; 382 } 383 384 // KERNEL_ONLY 385 // Once a thread has finished running, some of 386 // its final actions must be executed from the kernel 387 static void finishRunning(processor * this) with( this->finish ) { 388 verify( ! kernelTLS.preemption_state.enabled ); 389 choose( action_code ) { 390 case No_Action: 391 break; 392 case Release: 393 unlock( *lock ); 394 case Schedule: 395 ScheduleThread( thrd ); 396 case Release_Schedule: 397 unlock( *lock ); 398 ScheduleThread( thrd ); 399 case Release_Multi: 400 for(int i = 0; i < lock_count; i++) { 401 unlock( *locks[i] ); 402 } 403 case Release_Multi_Schedule: 404 for(int i = 0; i < lock_count; i++) { 405 unlock( *locks[i] ); 406 } 407 for(int i = 0; i < thrd_count; i++) { 408 ScheduleThread( thrds[i] ); 409 } 410 case Callback: 411 callback(); 412 default: 413 abort("KERNEL ERROR: Unexpected action to run after thread"); 414 } 450 void returnToKernel() { 451 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 452 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 453 $thread * thrd_src = kernelTLS.this_thread; 454 455 // Run the thread on this processor 456 { 457 int local_errno = *__volatile_errno(); 458 #if defined( __i386 ) || defined( __x86_64 ) 459 __x87_store; 460 #endif 461 verify( proc_cor->context.SP ); 462 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 463 #if defined( __i386 ) || defined( __x86_64 ) 464 __x87_load; 465 #endif 466 *__volatile_errno() = local_errno; 467 } 468 469 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 470 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 471 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 415 472 } 416 473 … … 419 476 // This is the entry point for processors (kernel threads) 420 477 // It effectively constructs a coroutine by stealing the pthread stack 421 static void * CtxInvokeProcessor(void * arg) {478 static void * __invoke_processor(void * arg) { 422 479 processor * proc = (processor *) arg; 423 480 kernelTLS.this_processor = proc; … … 438 495 439 496 //We now have a proper context from which to schedule threads 440 __cfa abi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);497 __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx); 441 498 442 499 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't … … 449 506 450 507 // Main routine of the core returned, the core is now fully terminated 451 __cfa abi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);508 __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner); 452 509 453 510 return 0p; … … 460 517 } // Abort 461 518 462 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {519 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 463 520 pthread_attr_t attr; 464 521 … … 488 545 } 489 546 490 static void start(processor * this) {491 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);492 493 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );494 495 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);496 }497 498 547 // KERNEL_ONLY 499 voidkernel_first_resume( processor * this ) {500 thread_desc* src = mainThread;501 coroutine_desc* dst = get_coroutine(this->runner);548 static void __kernel_first_resume( processor * this ) { 549 $thread * src = mainThread; 550 $coroutine * dst = get_coroutine(this->runner); 502 551 503 552 verify( ! kernelTLS.preemption_state.enabled ); … … 505 554 kernelTLS.this_thread->curr_cor = dst; 506 555 __stack_prepare( &dst->stack, 65000 ); 507 CtxStart(main, dst, this->runner, CtxInvokeCoroutine);556 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 508 557 509 558 verify( ! kernelTLS.preemption_state.enabled ); … … 512 561 dst->starter = dst->starter ? dst->starter : &src->self_cor; 513 562 514 // set state of current coroutine to inactive515 src->state = src->state == Halted ? Halted : Inactive;563 // make sure the current state is still correct 564 /* paranoid */ verify(src->state == Ready); 516 565 517 566 // context switch to specified coroutine 518 567 verify( dst->context.SP ); 519 CtxSwitch( &src->context, &dst->context );520 // when CtxSwitch returns we are back in the src coroutine568 __cfactx_switch( &src->context, &dst->context ); 569 // when __cfactx_switch returns we are back in the src coroutine 521 570 522 571 mainThread->curr_cor = &mainThread->self_cor; 523 572 524 // set state of new coroutine to active525 src->state = Active;573 // make sure the current state has been update 574 /* paranoid */ verify(src->state == Active); 526 575 527 576 verify( ! kernelTLS.preemption_state.enabled ); … … 529 578 530 579 // KERNEL_ONLY 531 voidkernel_last_resume( processor * this ) {532 coroutine_desc* src = &mainThread->self_cor;533 coroutine_desc* dst = get_coroutine(this->runner);580 static void __kernel_last_resume( processor * this ) { 581 $coroutine * src = &mainThread->self_cor; 582 $coroutine * dst = get_coroutine(this->runner); 534 583 535 584 verify( ! kernelTLS.preemption_state.enabled ); … … 537 586 verify( dst->context.SP ); 538 587 588 // SKULLDUGGERY in debug the processors check that the 589 // stack is still within the limit of the stack limits after running a thread. 590 // that check doesn't make sense if we context switch to the processor using the 591 // coroutine semantics. Since this is a special case, use the current context 592 // info to populate these fields. 593 __cfaabi_dbg_debug_do( 594 __stack_context_t ctx; 595 CtxGet( ctx ); 596 mainThread->context.SP = ctx.SP; 597 mainThread->context.FP = ctx.FP; 598 ) 599 539 600 // context switch to the processor 540 CtxSwitch( &src->context, &dst->context );601 __cfactx_switch( &src->context, &dst->context ); 541 602 } 542 603 543 604 //----------------------------------------------------------------------------- 544 605 // Scheduler routines 545 546 606 // KERNEL ONLY 547 void ScheduleThread( thread_desc * thrd ) { 548 verify( thrd ); 549 verify( thrd->state != Halted ); 550 551 verify( ! kernelTLS.preemption_state.enabled ); 552 553 verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); 554 607 void __schedule_thread( $thread * thrd ) { 608 /* paranoid */ verify( thrd ); 609 /* paranoid */ verify( thrd->state != Halted ); 610 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 611 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 612 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 613 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 614 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 615 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 616 /* paranoid */ #endif 617 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); 618 619 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 555 620 556 621 ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor); … … 558 623 ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor); 559 624 560 with( *thrd->curr_cluster ) { 561 // if(was_empty) { 562 // lock (proc_list_lock __cfaabi_dbg_ctx2); 563 // if(idles) { 564 // wake_fast(idles.head); 565 // } 566 // unlock (proc_list_lock); 567 // } 568 // else if( struct processor * idle = idles.head ) { 569 // wake_fast(idle); 570 // } 571 } 572 573 verify( ! kernelTLS.preemption_state.enabled ); 625 __wake_one(thrd->curr_cluster, was_empty); 626 627 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 574 628 } 575 629 576 630 // KERNEL ONLY 577 thread_desc * nextThread(cluster * this) with( *this ) {578 verify( ! kernelTLS.preemption_state.enabled );631 static $thread * __next_thread(cluster * this) with( *this ) { 632 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 579 633 580 634 ready_schedule_lock(this, kernelTLS.this_processor); 581 thread_desc* head = pop( this );635 $thread * head = pop( this ); 582 636 ready_schedule_unlock(this, kernelTLS.this_processor); 583 637 584 verify( ! kernelTLS.preemption_state.enabled );638 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 585 639 return head; 586 640 } 587 641 588 void BlockInternal() { 642 // KERNEL ONLY unpark with out disabling interrupts 643 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) { 644 static_assert(sizeof(thrd->state) == sizeof(int)); 645 646 // record activity 647 __cfaabi_dbg_record_thrd( *thrd, false, caller ); 648 649 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 650 __cfaabi_dbg_debug_do( thrd->unpark_result = old_state; ) 651 switch(old_state) { 652 case Active: 653 // Wake won the race, the thread will reschedule/rerun itself 654 break; 655 case Blocked: 656 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 657 658 // Wake lost the race, 659 thrd->state = Blocked; 660 __schedule_thread( thrd ); 661 break; 662 case Rerun: 663 abort("More than one thread attempted to schedule thread %p\n", thrd); 664 break; 665 case Halted: 666 case Start: 667 case Primed: 668 default: 669 // This makes no sense, something is wrong abort 670 abort(); 671 } 672 } 673 674 void unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) { 675 if( !thrd ) return; 676 589 677 disable_interrupts(); 590 verify( ! kernelTLS.preemption_state.enabled ); 678 __unpark( thrd __cfaabi_dbg_ctx_fwd2 ); 679 enable_interrupts( __cfaabi_dbg_ctx ); 680 } 681 682 void park( __cfaabi_dbg_ctx_param ) { 683 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 684 disable_interrupts(); 685 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 686 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 687 688 // record activity 689 __cfaabi_dbg_record_thrd( *kernelTLS.this_thread, true, caller ); 690 591 691 returnToKernel(); 592 verify( ! kernelTLS.preemption_state.enabled ); 692 693 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 593 694 enable_interrupts( __cfaabi_dbg_ctx ); 594 } 595 596 void BlockInternal( __spinlock_t * lock ) { 695 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 696 697 } 698 699 // KERNEL ONLY 700 void __leave_thread() { 701 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 702 returnToKernel(); 703 abort(); 704 } 705 706 // KERNEL ONLY 707 bool force_yield( __Preemption_Reason reason ) { 708 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 597 709 disable_interrupts(); 598 with( *kernelTLS.this_processor ) { 599 finish.action_code = Release; 600 finish.lock = lock; 601 } 602 603 verify( ! kernelTLS.preemption_state.enabled ); 604 returnToKernel(); 605 verify( ! kernelTLS.preemption_state.enabled ); 606 607 enable_interrupts( __cfaabi_dbg_ctx ); 608 } 609 610 void BlockInternal( thread_desc * thrd ) { 611 disable_interrupts(); 612 with( * kernelTLS.this_processor ) { 613 finish.action_code = Schedule; 614 finish.thrd = thrd; 615 } 616 617 verify( ! kernelTLS.preemption_state.enabled ); 618 returnToKernel(); 619 verify( ! kernelTLS.preemption_state.enabled ); 620 621 enable_interrupts( __cfaabi_dbg_ctx ); 622 } 623 624 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 625 assert(thrd); 626 disable_interrupts(); 627 with( * kernelTLS.this_processor ) { 628 finish.action_code = Release_Schedule; 629 finish.lock = lock; 630 finish.thrd = thrd; 631 } 632 633 verify( ! kernelTLS.preemption_state.enabled ); 634 returnToKernel(); 635 verify( ! kernelTLS.preemption_state.enabled ); 636 637 enable_interrupts( __cfaabi_dbg_ctx ); 638 } 639 640 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 641 disable_interrupts(); 642 with( * kernelTLS.this_processor ) { 643 finish.action_code = Release_Multi; 644 finish.locks = locks; 645 finish.lock_count = count; 646 } 647 648 verify( ! kernelTLS.preemption_state.enabled ); 649 returnToKernel(); 650 verify( ! kernelTLS.preemption_state.enabled ); 651 652 enable_interrupts( __cfaabi_dbg_ctx ); 653 } 654 655 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 656 disable_interrupts(); 657 with( *kernelTLS.this_processor ) { 658 finish.action_code = Release_Multi_Schedule; 659 finish.locks = locks; 660 finish.lock_count = lock_count; 661 finish.thrds = thrds; 662 finish.thrd_count = thrd_count; 663 } 664 665 verify( ! kernelTLS.preemption_state.enabled ); 666 returnToKernel(); 667 verify( ! kernelTLS.preemption_state.enabled ); 668 669 enable_interrupts( __cfaabi_dbg_ctx ); 670 } 671 672 void BlockInternal(__finish_callback_fptr_t callback) { 673 disable_interrupts(); 674 with( *kernelTLS.this_processor ) { 675 finish.action_code = Callback; 676 finish.callback = callback; 677 } 678 679 verify( ! kernelTLS.preemption_state.enabled ); 680 returnToKernel(); 681 verify( ! kernelTLS.preemption_state.enabled ); 682 683 enable_interrupts( __cfaabi_dbg_ctx ); 684 } 685 686 // KERNEL ONLY 687 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 688 verify( ! kernelTLS.preemption_state.enabled ); 689 with( * kernelTLS.this_processor ) { 690 finish.action_code = thrd ? Release_Schedule : Release; 691 finish.lock = lock; 692 finish.thrd = thrd; 693 } 694 695 returnToKernel(); 710 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 711 712 $thread * thrd = kernelTLS.this_thread; 713 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 714 715 // SKULLDUGGERY: It is possible that we are preempting this thread just before 716 // it was going to park itself. If that is the case and it is already using the 717 // intrusive fields then we can't use them to preempt the thread 718 // If that is the case, abandon the preemption. 719 bool preempted = false; 720 if(thrd->next == 0p) { 721 preempted = true; 722 thrd->preempted = reason; 723 returnToKernel(); 724 } 725 726 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 727 enable_interrupts_noPoll(); 728 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 729 730 return preempted; 696 731 } 697 732 … … 701 736 //----------------------------------------------------------------------------- 702 737 // Kernel boot procedures 703 static void kernel_startup(void) {738 static void __kernel_startup(void) { 704 739 verify( ! kernelTLS.preemption_state.enabled ); 705 __cfa abi_dbg_print_safe("Kernel : Starting\n");740 __cfadbg_print_safe(runtime_core, "Kernel : Starting\n"); 706 741 707 742 __page_size = sysconf( _SC_PAGESIZE ); … … 714 749 (*mainCluster){"Main Cluster"}; 715 750 716 __cfa abi_dbg_print_safe("Kernel : Main cluster ready\n");751 __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n"); 717 752 718 753 // Start by initializing the main thread 719 754 // SKULLDUGGERY: the mainThread steals the process main thread 720 755 // which will then be scheduled by the mainProcessor normally 721 mainThread = ( thread_desc*)&storage_mainThread;756 mainThread = ($thread *)&storage_mainThread; 722 757 current_stack_info_t info; 723 758 info.storage = (__stack_t*)&storage_mainThreadCtx; 724 759 (*mainThread){ &info }; 725 760 726 __cfa abi_dbg_print_safe("Kernel : Main thread ready\n");761 __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n"); 727 762 728 763 … … 746 781 747 782 runner{ &this }; 748 __cfa abi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);783 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner); 749 784 } 750 785 … … 765 800 // Add the main thread to the ready queue 766 801 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 767 ScheduleThread(mainThread);802 __schedule_thread(mainThread); 768 803 769 804 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 770 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that805 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 771 806 // mainThread is on the ready queue when this call is made. 772 kernel_first_resume( kernelTLS.this_processor ); 773 807 __kernel_first_resume( kernelTLS.this_processor ); 774 808 775 809 776 810 // THE SYSTEM IS NOW COMPLETELY RUNNING 777 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 811 812 813 // Now that the system is up, finish creating systems that need threading 814 __kernel_io_finish_start( *mainCluster ); 815 816 817 __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n"); 778 818 779 819 verify( ! kernelTLS.preemption_state.enabled ); … … 782 822 } 783 823 784 static void kernel_shutdown(void) { 785 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 786 787 verify( TL_GET( preemption_state.enabled ) ); 824 static void __kernel_shutdown(void) { 825 //Before we start shutting things down, wait for systems that need threading to shutdown 826 __kernel_io_prepare_stop( *mainCluster ); 827 828 /* paranoid */ verify( TL_GET( preemption_state.enabled ) ); 788 829 disable_interrupts(); 789 verify( ! kernelTLS.preemption_state.enabled ); 830 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 831 832 __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n"); 790 833 791 834 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 793 836 // which is currently here 794 837 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 795 kernel_last_resume( kernelTLS.this_processor );838 __kernel_last_resume( kernelTLS.this_processor ); 796 839 mainThread->self_cor.state = Halted; 797 840 … … 805 848 // Destroy the main processor and its context in reverse order of construction 806 849 // These were manually constructed so we need manually destroy them 807 void ^?{}(processor & this) with( this ) 808 / /don't join the main thread here, that wouldn't make any sense850 void ^?{}(processor & this) with( this ){ 851 /* paranoid */ verify( this.do_terminate == true ); 809 852 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner); 810 853 } … … 813 856 814 857 // Final step, destroy the main thread since it is no longer needed 815 // Since we provided a stack to this task it will not destroy anything 858 859 // Since we provided a stack to this taxk it will not destroy anything 860 /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1)); 816 861 ^(*mainThread){}; 817 862 … … 821 866 ^(__cfa_dbg_global_clusters.lock){}; 822 867 823 __cfa abi_dbg_print_safe("Kernel : Shutdown complete\n");868 __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n"); 824 869 } 825 870 826 871 //============================================================================================= 827 // Kernel Quiescing872 // Kernel Idle Sleep 828 873 //============================================================================================= 829 static void halt(processor * this) with( *this ) { 830 // // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 831 832 // with( *cltr ) { 833 // lock (proc_list_lock __cfaabi_dbg_ctx2); 834 // push_front(idles, *this); 835 // unlock (proc_list_lock); 836 // } 837 838 // __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 839 840 // wait( idleLock ); 841 842 // __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); 843 844 // with( *cltr ) { 845 // lock (proc_list_lock __cfaabi_dbg_ctx2); 846 // remove (idles, *this); 847 // unlock (proc_list_lock); 848 // } 874 static $thread * __halt(processor * this) with( *this ) { 875 if( do_terminate ) return 0p; 876 877 // First, lock the cluster idle 878 lock( cltr->idle_lock __cfaabi_dbg_ctx2 ); 879 880 // Check if we can find a thread 881 if( $thread * found = __next_thread( cltr ) ) { 882 unlock( cltr->idle_lock ); 883 return found; 884 } 885 886 // Move this processor from the active list to the idle list 887 move_to_front(cltr->procs, cltr->idles, *this); 888 889 // Unlock the idle lock so we don't go to sleep with a lock 890 unlock (cltr->idle_lock); 891 892 // We are ready to sleep 893 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this); 894 wait( idle ); 895 896 // We have woken up 897 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this); 898 899 // Get ourself off the idle list 900 with( *cltr ) { 901 lock (idle_lock __cfaabi_dbg_ctx2); 902 move_to_front(idles, procs, *this); 903 unlock(idle_lock); 904 } 905 906 // Don't check the ready queue again, we may not be in a position to run a thread 907 return 0p; 908 } 909 910 // Wake a thread from the front if there are any 911 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) { 912 // if we don't want to force check if we know it's false 913 // if( !this->idles.head && !force ) return false; 914 915 // First, lock the cluster idle 916 lock( this->idle_lock __cfaabi_dbg_ctx2 ); 917 918 // Check if there is someone to wake up 919 if( !this->idles.head ) { 920 // Nope unlock and return false 921 unlock( this->idle_lock ); 922 return false; 923 } 924 925 // Wake them up 926 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head); 927 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 928 post( this->idles.head->idle ); 929 930 // Unlock and return true 931 unlock( this->idle_lock ); 932 return true; 933 } 934 935 // Unconditionnaly wake a thread 936 static bool __wake_proc(processor * this) { 937 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 938 939 disable_interrupts(); 940 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 941 bool ret = post( this->idle ); 942 enable_interrupts( __cfaabi_dbg_ctx ); 943 944 return ret; 849 945 } 850 946 … … 880 976 881 977 void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) { 882 thread_desc* thrd = kernel_data;978 $thread * thrd = kernel_data; 883 979 884 980 if(thrd) { … … 928 1024 void ^?{}(semaphore & this) {} 929 1025 930 voidP(semaphore & this) with( this ){1026 bool P(semaphore & this) with( this ){ 931 1027 lock( lock __cfaabi_dbg_ctx2 ); 932 1028 count -= 1; … … 936 1032 937 1033 // atomically release spin lock and block 938 BlockInternal( &lock ); 1034 unlock( lock ); 1035 park( __cfaabi_dbg_ctx ); 1036 return true; 939 1037 } 940 1038 else { 941 1039 unlock( lock ); 942 } 943 } 944 945 void V(semaphore & this) with( this ) { 946 thread_desc * thrd = 0p; 1040 return false; 1041 } 1042 } 1043 1044 bool V(semaphore & this) with( this ) { 1045 $thread * thrd = 0p; 947 1046 lock( lock __cfaabi_dbg_ctx2 ); 948 1047 count += 1; … … 955 1054 956 1055 // make new owner 957 WakeThread( thrd ); 1056 unpark( thrd __cfaabi_dbg_ctx2 ); 1057 1058 return thrd != 0p; 1059 } 1060 1061 bool V(semaphore & this, unsigned diff) with( this ) { 1062 $thread * thrd = 0p; 1063 lock( lock __cfaabi_dbg_ctx2 ); 1064 int release = max(-count, (int)diff); 1065 count += diff; 1066 for(release) { 1067 unpark( pop_head( waiting ) __cfaabi_dbg_ctx2 ); 1068 } 1069 1070 unlock( lock ); 1071 1072 return thrd != 0p; 958 1073 } 959 1074 … … 972 1087 } 973 1088 974 void doregister( cluster * cltr, thread_desc& thrd ) {1089 void doregister( cluster * cltr, $thread & thrd ) { 975 1090 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 976 1091 cltr->nthreads += 1; … … 979 1094 } 980 1095 981 void unregister( cluster * cltr, thread_desc& thrd ) {1096 void unregister( cluster * cltr, $thread & thrd ) { 982 1097 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 983 1098 remove(cltr->threads, thrd ); … … 990 1105 __cfaabi_dbg_debug_do( 991 1106 extern "C" { 992 void __cfaabi_dbg_record (__spinlock_t & this, const char prev_name[]) {1107 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) { 993 1108 this.prev_name = prev_name; 994 1109 this.prev_thrd = kernelTLS.this_thread; 995 1110 } 1111 1112 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]) { 1113 if(park) { 1114 this.park_caller = prev_name; 1115 this.park_stale = false; 1116 } 1117 else { 1118 this.unpark_caller = prev_name; 1119 this.unpark_stale = false; 1120 } 1121 } 996 1122 } 997 1123 ) … … 999 1125 //----------------------------------------------------------------------------- 1000 1126 // Debug 1001 bool threading_enabled(void) {1127 bool threading_enabled(void) __attribute__((const)) { 1002 1128 return true; 1003 1129 } -
libcfa/src/concurrency/kernel.hfa
rb7d6a36 r6a490b2 17 17 18 18 #include <stdbool.h> 19 #include <stdint.h> 19 20 20 21 #include "invoke.h" … … 32 33 __spinlock_t lock; 33 34 int count; 34 __queue_t( thread_desc) waiting;35 __queue_t($thread) waiting; 35 36 }; 36 37 37 38 void ?{}(semaphore & this, int count = 1); 38 39 void ^?{}(semaphore & this); 39 void P (semaphore & this); 40 void V (semaphore & this); 40 bool P (semaphore & this); 41 bool V (semaphore & this); 42 bool V (semaphore & this, unsigned count); 41 43 42 44 … … 44 46 // Processor 45 47 extern struct cluster * mainCluster; 46 47 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };48 49 typedef void (*__finish_callback_fptr_t)(void);50 51 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)52 struct FinishAction {53 FinishOpCode action_code;54 /*55 // Union of possible actions56 union {57 // Option 1 : locks and threads58 struct {59 // 1 thread or N thread60 union {61 thread_desc * thrd;62 struct {63 thread_desc ** thrds;64 unsigned short thrd_count;65 };66 };67 // 1 lock or N lock68 union {69 __spinlock_t * lock;70 struct {71 __spinlock_t ** locks;72 unsigned short lock_count;73 };74 };75 };76 // Option 2 : action pointer77 __finish_callback_fptr_t callback;78 };79 /*/80 thread_desc * thrd;81 thread_desc ** thrds;82 unsigned short thrd_count;83 __spinlock_t * lock;84 __spinlock_t ** locks;85 unsigned short lock_count;86 __finish_callback_fptr_t callback;87 //*/88 };89 static inline void ?{}(FinishAction & this) {90 this.action_code = No_Action;91 this.thrd = 0p;92 this.lock = 0p;93 }94 static inline void ^?{}(FinishAction &) {}95 48 96 49 // Processor … … 117 70 // RunThread data 118 71 // Action to do after a thread is ran 119 struct FinishAction finish;72 $thread * destroyer; 120 73 121 74 // Preemption data … … 126 79 bool pending_preemption; 127 80 128 // Idle lock 129 __bin_sem_t idle Lock;81 // Idle lock (kernel semaphore) 82 __bin_sem_t idle; 130 83 131 84 // Termination … … 133 86 volatile bool do_terminate; 134 87 135 // Termination synchronisation 88 // Termination synchronisation (user semaphore) 136 89 semaphore terminated; 137 90 … … 158 111 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 159 112 160 static inline [processor *&, processor *& ] __get( processor & this ) { 161 return this.node.[next, prev]; 162 } 113 static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; } 114 115 //----------------------------------------------------------------------------- 116 // I/O 117 struct __io_data; 118 119 #define CFA_CLUSTER_IO_POLLER_USER_THREAD 1 << 0 120 // #define CFA_CLUSTER_IO_POLLER_KERNEL_SIDE 1 << 1 163 121 164 122 … … 333 291 // List of threads 334 292 __spinlock_t thread_list_lock; 335 __dllist_t(struct thread_desc) threads;293 __dllist_t(struct $thread) threads; 336 294 unsigned int nthreads; 337 295 … … 341 299 cluster * prev; 342 300 } node; 301 302 struct __io_data * io; 303 304 #if !defined(__CFA_NO_STATISTICS__) 305 bool print_stats; 306 #endif 343 307 }; 344 308 extern Duration default_preemption(); 345 309 346 void ?{} (cluster & this, const char name[], Duration preemption_rate );310 void ?{} (cluster & this, const char name[], Duration preemption_rate, int flags); 347 311 void ^?{}(cluster & this); 348 312 349 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 350 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 351 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 352 353 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 354 return this.node.[next, prev]; 355 } 313 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption(), 0}; } 314 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate, 0}; } 315 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption(), 0}; } 316 static inline void ?{} (cluster & this, int flags) { this{"Anonymous Cluster", default_preemption(), flags}; } 317 static inline void ?{} (cluster & this, Duration preemption_rate, int flags) { this{"Anonymous Cluster", preemption_rate, flags}; } 318 static inline void ?{} (cluster & this, const char name[], int flags) { this{name, default_preemption(), flags}; } 319 320 static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } 356 321 357 322 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE 358 323 static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; } 324 325 #if !defined(__CFA_NO_STATISTICS__) 326 static inline void print_stats_at_exit( cluster & this ) { 327 this.print_stats = true; 328 } 329 #endif 359 330 360 331 // Local Variables: // -
libcfa/src/concurrency/kernel_private.hfa
rb7d6a36 r6a490b2 31 31 } 32 32 33 void ScheduleThread( thread_desc * ); 34 static inline void WakeThread( thread_desc * thrd ) { 35 if( !thrd ) return; 36 37 verify(thrd->state == Inactive); 38 39 disable_interrupts(); 40 ScheduleThread( thrd ); 41 enable_interrupts( __cfaabi_dbg_ctx ); 42 } 43 thread_desc * nextThread(cluster * this); 33 void __schedule_thread( $thread * ) __attribute__((nonnull (1))); 44 34 45 35 //Block current thread and release/wake-up the following resources 46 void BlockInternal(void); 47 void BlockInternal(__spinlock_t * lock); 48 void BlockInternal(thread_desc * thrd); 49 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 50 void BlockInternal(__spinlock_t * locks [], unsigned short count); 51 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 void BlockInternal(__finish_callback_fptr_t callback); 53 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 36 void __leave_thread() __attribute__((noreturn)); 54 37 55 38 //----------------------------------------------------------------------------- … … 57 40 void main(processorCtx_t *); 58 41 59 void * create_pthread( pthread_t *, void * (*)(void *), void * ); 60 61 static inline void wake_fast(processor * this) { 62 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this); 63 post( this->idleLock ); 64 } 65 66 static inline void wake(processor * this) { 67 disable_interrupts(); 68 wake_fast(this); 69 enable_interrupts( __cfaabi_dbg_ctx ); 70 } 42 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 43 44 71 45 72 46 struct event_kernel_t { … … 85 59 extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" ))); 86 60 61 extern cluster * mainCluster; 62 87 63 //----------------------------------------------------------------------------- 88 64 // Threads 89 65 extern "C" { 90 void CtxInvokeThread(void (*main)(void *), void * this); 91 } 92 93 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst); 66 void __cfactx_invoke_thread(void (*main)(void *), void * this); 67 } 94 68 95 69 __cfaabi_dbg_debug_do( 96 extern void __cfaabi_dbg_thread_register ( thread_desc* thrd );97 extern void __cfaabi_dbg_thread_unregister( thread_desc* thrd );70 extern void __cfaabi_dbg_thread_register ( $thread * thrd ); 71 extern void __cfaabi_dbg_thread_unregister( $thread * thrd ); 98 72 ) 73 74 // KERNEL ONLY unpark with out disabling interrupts 75 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ); 76 77 //----------------------------------------------------------------------------- 78 // I/O 79 void __kernel_io_startup ( cluster &, int, bool ); 80 void __kernel_io_finish_start( cluster & ); 81 void __kernel_io_prepare_stop( cluster & ); 82 void __kernel_io_shutdown ( cluster &, bool ); 99 83 100 84 //----------------------------------------------------------------------------- … … 102 86 #define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)] 103 87 104 static inline uint32_t tls_rand() {88 static inline uint32_t __tls_rand() { 105 89 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 106 90 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; … … 113 97 void unregister( struct cluster & cltr ); 114 98 115 void doregister( struct cluster * cltr, struct thread_desc& thrd );116 void unregister( struct cluster * cltr, struct thread_desc& thrd );99 void doregister( struct cluster * cltr, struct $thread & thrd ); 100 void unregister( struct cluster * cltr, struct $thread & thrd ); 117 101 118 102 //======================================================================= -
libcfa/src/concurrency/monitor.cfa
rb7d6a36 r6a490b2 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // monitor_desc.c --7 // $monitor.c -- 8 8 // 9 9 // Author : Thierry Delisle … … 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void set_owner ( monitor_desc * this, thread_desc* owner );30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc* owner );31 static inline void set_mask ( monitor_desc* storage [], __lock_size_t count, const __waitfor_mask_t & mask );32 static inline void reset_mask( monitor_desc* this );33 34 static inline thread_desc * next_thread( monitor_desc* this );35 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & monitors );29 static inline void __set_owner ( $monitor * this, $thread * owner ); 30 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner ); 31 static inline void set_mask ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 static inline void reset_mask( $monitor * this ); 33 34 static inline $thread * next_thread( $monitor * this ); 35 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors ); 36 36 37 37 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 38 static inline void lock_all ( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );38 static inline void lock_all ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 39 39 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 40 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count );41 42 static inline void save ( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );43 static inline void restore( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );44 45 static inline void init ( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );46 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 48 static inline thread_desc* check_condition ( __condition_criterion_t * );40 static inline void unlock_all( $monitor * locks [], __lock_size_t count ); 41 42 static inline void save ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 43 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 44 45 static inline void init ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 46 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 48 static inline $thread * check_condition ( __condition_criterion_t * ); 49 49 static inline void brand_condition ( condition & ); 50 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc* monitors [], __lock_size_t count );50 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count ); 51 51 52 52 forall(dtype T | sized( T )) 53 53 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 54 54 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 55 static inline __lock_size_t aggregate ( monitor_desc* storage [], const __waitfor_mask_t & mask );55 static inline __lock_size_t aggregate ( $monitor * storage [], const __waitfor_mask_t & mask ); 56 56 57 57 //----------------------------------------------------------------------------- … … 68 68 69 69 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 70 monitor_desc** monitors = mons; /* Save the targeted monitors */ \70 $monitor ** monitors = mons; /* Save the targeted monitors */ \ 71 71 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 72 72 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 80 80 //----------------------------------------------------------------------------- 81 81 // Enter/Leave routines 82 83 84 extern "C" { 85 // Enter single monitor 86 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 87 // Lock the monitor spinlock 88 lock( this->lock __cfaabi_dbg_ctx2 ); 89 // Interrupts disable inside critical section 90 thread_desc * thrd = kernelTLS.this_thread; 91 92 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 93 94 if( !this->owner ) { 95 // No one has the monitor, just take it 96 set_owner( this, thrd ); 97 98 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 99 } 100 else if( this->owner == thrd) { 101 // We already have the monitor, just note how many times we took it 102 this->recursion += 1; 103 104 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 105 } 106 else if( is_accepted( this, group) ) { 107 // Some one was waiting for us, enter 108 set_owner( this, thrd ); 109 110 // Reset mask 111 reset_mask( this ); 112 113 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 114 } 115 else { 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 117 118 // Some one else has the monitor, wait in line for it 119 append( this->entry_queue, thrd ); 120 121 BlockInternal( &this->lock ); 122 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 124 125 // BlockInternal will unlock spinlock, no need to unlock ourselves 126 return; 127 } 82 // Enter single monitor 83 static void __enter( $monitor * this, const __monitor_group_t & group ) { 84 // Lock the monitor spinlock 85 lock( this->lock __cfaabi_dbg_ctx2 ); 86 // Interrupts disable inside critical section 87 $thread * thrd = kernelTLS.this_thread; 88 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 90 91 if( !this->owner ) { 92 // No one has the monitor, just take it 93 __set_owner( this, thrd ); 94 95 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 96 } 97 else if( this->owner == thrd) { 98 // We already have the monitor, just note how many times we took it 99 this->recursion += 1; 100 101 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 102 } 103 else if( is_accepted( this, group) ) { 104 // Some one was waiting for us, enter 105 __set_owner( this, thrd ); 106 107 // Reset mask 108 reset_mask( this ); 109 110 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 111 } 112 else { 113 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 114 115 // Some one else has the monitor, wait in line for it 116 /* paranoid */ verify( thrd->next == 0p ); 117 append( this->entry_queue, thrd ); 118 /* paranoid */ verify( thrd->next == 1p ); 119 120 unlock( this->lock ); 121 park( __cfaabi_dbg_ctx ); 128 122 129 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 124 131 // Release the lock and leave 125 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 126 return; 127 } 128 129 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 131 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 132 /* paranoid */ verify( this->lock.lock ); 133 134 // Release the lock and leave 135 unlock( this->lock ); 136 return; 137 } 138 139 static void __dtor_enter( $monitor * this, fptr_t func ) { 140 // Lock the monitor spinlock 141 lock( this->lock __cfaabi_dbg_ctx2 ); 142 // Interrupts disable inside critical section 143 $thread * thrd = kernelTLS.this_thread; 144 145 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 146 147 148 if( !this->owner ) { 149 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 150 151 // No one has the monitor, just take it 152 __set_owner( this, thrd ); 153 154 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 155 132 156 unlock( this->lock ); 133 157 return; 134 158 } 135 136 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 137 // Lock the monitor spinlock 138 lock( this->lock __cfaabi_dbg_ctx2 ); 139 // Interrupts disable inside critical section 140 thread_desc * thrd = kernelTLS.this_thread; 141 142 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 143 144 145 if( !this->owner ) { 146 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 147 148 // No one has the monitor, just take it 149 set_owner( this, thrd ); 150 151 unlock( this->lock ); 152 return; 159 else if( this->owner == thrd) { 160 // We already have the monitor... but where about to destroy it so the nesting will fail 161 // Abort! 162 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 163 } 164 165 __lock_size_t count = 1; 166 $monitor ** monitors = &this; 167 __monitor_group_t group = { &this, 1, func }; 168 if( is_accepted( this, group) ) { 169 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 170 171 // Wake the thread that is waiting for this 172 __condition_criterion_t * urgent = pop( this->signal_stack ); 173 /* paranoid */ verify( urgent ); 174 175 // Reset mask 176 reset_mask( this ); 177 178 // Create the node specific to this wait operation 179 wait_ctx_primed( thrd, 0 ) 180 181 // Some one else has the monitor, wait for him to finish and then run 182 unlock( this->lock ); 183 184 // Release the next thread 185 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 186 unpark( urgent->owner->waiting_thread __cfaabi_dbg_ctx2 ); 187 188 // Park current thread waiting 189 park( __cfaabi_dbg_ctx ); 190 191 // Some one was waiting for us, enter 192 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 } 194 else { 195 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 196 197 wait_ctx( thrd, 0 ) 198 this->dtor_node = &waiter; 199 200 // Some one else has the monitor, wait in line for it 201 /* paranoid */ verify( thrd->next == 0p ); 202 append( this->entry_queue, thrd ); 203 /* paranoid */ verify( thrd->next == 1p ); 204 unlock( this->lock ); 205 206 // Park current thread waiting 207 park( __cfaabi_dbg_ctx ); 208 209 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 210 return; 211 } 212 213 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 214 215 } 216 217 // Leave single monitor 218 void __leave( $monitor * this ) { 219 // Lock the monitor spinlock 220 lock( this->lock __cfaabi_dbg_ctx2 ); 221 222 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 223 224 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 225 226 // Leaving a recursion level, decrement the counter 227 this->recursion -= 1; 228 229 // If we haven't left the last level of recursion 230 // it means we don't need to do anything 231 if( this->recursion != 0) { 232 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 233 unlock( this->lock ); 234 return; 235 } 236 237 // Get the next thread, will be null on low contention monitor 238 $thread * new_owner = next_thread( this ); 239 240 // Check the new owner is consistent with who we wake-up 241 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 242 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 243 244 // We can now let other threads in safely 245 unlock( this->lock ); 246 247 //We need to wake-up the thread 248 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 249 unpark( new_owner __cfaabi_dbg_ctx2 ); 250 } 251 252 // Leave single monitor for the last time 253 void __dtor_leave( $monitor * this ) { 254 __cfaabi_dbg_debug_do( 255 if( TL_GET( this_thread ) != this->owner ) { 256 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 153 257 } 154 else if( this->owner == thrd) { 155 // We already have the monitor... but where about to destroy it so the nesting will fail 156 // Abort! 157 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 258 if( this->recursion != 1 ) { 259 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 158 260 } 159 160 __lock_size_t count = 1; 161 monitor_desc ** monitors = &this; 162 __monitor_group_t group = { &this, 1, func }; 163 if( is_accepted( this, group) ) { 164 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 165 166 // Wake the thread that is waiting for this 167 __condition_criterion_t * urgent = pop( this->signal_stack ); 168 verify( urgent ); 169 170 // Reset mask 171 reset_mask( this ); 172 173 // Create the node specific to this wait operation 174 wait_ctx_primed( thrd, 0 ) 175 176 // Some one else has the monitor, wait for him to finish and then run 177 BlockInternal( &this->lock, urgent->owner->waiting_thread ); 178 179 // Some one was waiting for us, enter 180 set_owner( this, thrd ); 181 } 182 else { 183 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 184 185 wait_ctx( thrd, 0 ) 186 this->dtor_node = &waiter; 187 188 // Some one else has the monitor, wait in line for it 189 append( this->entry_queue, thrd ); 190 BlockInternal( &this->lock ); 191 192 // BlockInternal will unlock spinlock, no need to unlock ourselves 193 return; 194 } 195 196 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 197 198 } 199 200 // Leave single monitor 201 void __leave_monitor_desc( monitor_desc * this ) { 202 // Lock the monitor spinlock 203 lock( this->lock __cfaabi_dbg_ctx2 ); 204 205 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 206 207 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 208 209 // Leaving a recursion level, decrement the counter 210 this->recursion -= 1; 211 212 // If we haven't left the last level of recursion 213 // it means we don't need to do anything 214 if( this->recursion != 0) { 215 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 216 unlock( this->lock ); 217 return; 218 } 219 220 // Get the next thread, will be null on low contention monitor 221 thread_desc * new_owner = next_thread( this ); 222 223 // We can now let other threads in safely 224 unlock( this->lock ); 225 226 //We need to wake-up the thread 227 WakeThread( new_owner ); 228 } 229 230 // Leave single monitor for the last time 231 void __leave_dtor_monitor_desc( monitor_desc * this ) { 232 __cfaabi_dbg_debug_do( 233 if( TL_GET( this_thread ) != this->owner ) { 234 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 235 } 236 if( this->recursion != 1 ) { 237 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 238 } 239 ) 240 } 241 261 ) 262 } 263 264 extern "C" { 242 265 // Leave the thread monitor 243 266 // last routine called by a thread. 244 267 // Should never return 245 void __ leave_thread_monitor() {246 thread_desc* thrd = TL_GET( this_thread );247 monitor_desc* this = &thrd->self_mon;268 void __cfactx_thrd_leave() { 269 $thread * thrd = TL_GET( this_thread ); 270 $monitor * this = &thrd->self_mon; 248 271 249 272 // Lock the monitor now … … 252 275 disable_interrupts(); 253 276 254 thrd->s elf_cor.state = Halted;255 256 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );277 thrd->state = Halted; 278 279 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 257 280 258 281 // Leaving a recursion level, decrement the counter … … 264 287 265 288 // Fetch the next thread, can be null 266 thread_desc * new_owner = next_thread( this ); 267 268 // Leave the thread, this will unlock the spinlock 269 // Use leave thread instead of BlockInternal which is 270 // specialized for this case and supports null new_owner 271 LeaveThread( &this->lock, new_owner ); 289 $thread * new_owner = next_thread( this ); 290 291 // Release the monitor lock 292 unlock( this->lock ); 293 294 // Unpark the next owner if needed 295 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 296 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 297 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 298 /* paranoid */ verify( thrd->state == Halted ); 299 300 kernelTLS.this_processor->destroyer = new_owner; 301 302 // Leave the thread 303 __leave_thread(); 272 304 273 305 // Control flow should never reach here! … … 279 311 static inline void enter( __monitor_group_t monitors ) { 280 312 for( __lock_size_t i = 0; i < monitors.size; i++) { 281 __enter _monitor_desc( monitors[i], monitors );313 __enter( monitors[i], monitors ); 282 314 } 283 315 } … … 285 317 // Leave multiple monitor 286 318 // relies on the monitor array being sorted 287 static inline void leave( monitor_desc* monitors [], __lock_size_t count) {319 static inline void leave($monitor * monitors [], __lock_size_t count) { 288 320 for( __lock_size_t i = count - 1; i >= 0; i--) { 289 __leave _monitor_desc( monitors[i] );321 __leave( monitors[i] ); 290 322 } 291 323 } … … 293 325 // Ctor for monitor guard 294 326 // Sorts monitors before entering 295 void ?{}( monitor_guard_t & this, monitor_desc* m [], __lock_size_t count, fptr_t func ) {296 thread_desc* thrd = TL_GET( this_thread );327 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) { 328 $thread * thrd = TL_GET( this_thread ); 297 329 298 330 // Store current array … … 334 366 // Ctor for monitor guard 335 367 // Sorts monitors before entering 336 void ?{}( monitor_dtor_guard_t & this, monitor_desc* m [], fptr_t func ) {368 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) { 337 369 // optimization 338 thread_desc* thrd = TL_GET( this_thread );370 $thread * thrd = TL_GET( this_thread ); 339 371 340 372 // Store current array … … 347 379 (thrd->monitors){m, 1, func}; 348 380 349 __ enter_monitor_dtor( this.m, func );381 __dtor_enter( this.m, func ); 350 382 } 351 383 … … 353 385 void ^?{}( monitor_dtor_guard_t & this ) { 354 386 // Leave the monitors in order 355 __ leave_dtor_monitor_desc( this.m );387 __dtor_leave( this.m ); 356 388 357 389 // Restore thread context … … 361 393 //----------------------------------------------------------------------------- 362 394 // Internal scheduling types 363 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info ) {395 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 364 396 this.waiting_thread = waiting_thread; 365 397 this.count = count; … … 375 407 } 376 408 377 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t & owner ) {409 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) { 378 410 this.ready = false; 379 411 this.target = target; … … 400 432 // Append the current wait operation to the ones already queued on the condition 401 433 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 434 /* paranoid */ verify( waiter.next == 0p ); 402 435 append( this.blocked, &waiter ); 436 /* paranoid */ verify( waiter.next == 1p ); 403 437 404 438 // Lock all monitors (aggregates the locks as well) … … 407 441 // Find the next thread(s) to run 408 442 __lock_size_t thread_count = 0; 409 thread_desc* threads[ count ];443 $thread * threads[ count ]; 410 444 __builtin_memset( threads, 0, sizeof( threads ) ); 411 445 … … 415 449 // Remove any duplicate threads 416 450 for( __lock_size_t i = 0; i < count; i++) { 417 thread_desc* new_owner = next_thread( monitors[i] );451 $thread * new_owner = next_thread( monitors[i] ); 418 452 insert_unique( threads, thread_count, new_owner ); 419 453 } 420 454 455 // Unlock the locks, we don't need them anymore 456 for(int i = 0; i < count; i++) { 457 unlock( *locks[i] ); 458 } 459 460 // Wake the threads 461 for(int i = 0; i < thread_count; i++) { 462 unpark( threads[i] __cfaabi_dbg_ctx2 ); 463 } 464 421 465 // Everything is ready to go to sleep 422 BlockInternal( locks, count, threads, thread_count);466 park( __cfaabi_dbg_ctx ); 423 467 424 468 // We are back, restore the owners and recursions … … 435 479 //Some more checking in debug 436 480 __cfaabi_dbg_debug_do( 437 thread_desc* this_thrd = TL_GET( this_thread );481 $thread * this_thrd = TL_GET( this_thread ); 438 482 if ( this.monitor_count != this_thrd->monitors.size ) { 439 483 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 489 533 490 534 //Find the thread to run 491 thread_desc* signallee = pop_head( this.blocked )->waiting_thread;492 set_owner( monitors, count, signallee );535 $thread * signallee = pop_head( this.blocked )->waiting_thread; 536 __set_owner( monitors, count, signallee ); 493 537 494 538 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 495 539 540 // unlock all the monitors 541 unlock_all( locks, count ); 542 543 // unpark the thread we signalled 544 unpark( signallee __cfaabi_dbg_ctx2 ); 545 496 546 //Everything is ready to go to sleep 497 BlockInternal( locks, count, &signallee, 1);547 park( __cfaabi_dbg_ctx ); 498 548 499 549 … … 536 586 // Create one! 537 587 __lock_size_t max = count_max( mask ); 538 monitor_desc* mon_storage[max];588 $monitor * mon_storage[max]; 539 589 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 540 590 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 554 604 { 555 605 // Check if the entry queue 556 thread_desc* next; int index;606 $thread * next; int index; 557 607 [next, index] = search_entry_queue( mask, monitors, count ); 558 608 … … 564 614 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 565 615 566 monitor_desc* mon2dtor = accepted[0];616 $monitor * mon2dtor = accepted[0]; 567 617 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 568 618 … … 590 640 591 641 // Set the owners to be the next thread 592 set_owner( monitors, count, next ); 593 594 // Everything is ready to go to sleep 595 BlockInternal( locks, count, &next, 1 ); 642 __set_owner( monitors, count, next ); 643 644 // unlock all the monitors 645 unlock_all( locks, count ); 646 647 // unpark the thread we signalled 648 unpark( next __cfaabi_dbg_ctx2 ); 649 650 //Everything is ready to go to sleep 651 park( __cfaabi_dbg_ctx ); 596 652 597 653 // We are back, restore the owners and recursions … … 631 687 } 632 688 689 // unlock all the monitors 690 unlock_all( locks, count ); 691 633 692 //Everything is ready to go to sleep 634 BlockInternal( locks, count);693 park( __cfaabi_dbg_ctx ); 635 694 636 695 … … 649 708 // Utilities 650 709 651 static inline void set_owner( monitor_desc * this, thread_desc* owner ) {652 / / __cfaabi_dbg_print_safe( "Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner);710 static inline void __set_owner( $monitor * this, $thread * owner ) { 711 /* paranoid */ verify( this->lock.lock ); 653 712 654 713 //Pass the monitor appropriately … … 659 718 } 660 719 661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 662 monitors[0]->owner = owner; 663 monitors[0]->recursion = 1; 720 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) { 721 /* paranoid */ verify ( monitors[0]->lock.lock ); 722 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 723 monitors[0]->owner = owner; 724 monitors[0]->recursion = 1; 664 725 for( __lock_size_t i = 1; i < count; i++ ) { 665 monitors[i]->owner = owner; 666 monitors[i]->recursion = 0; 667 } 668 } 669 670 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 726 /* paranoid */ verify ( monitors[i]->lock.lock ); 727 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 728 monitors[i]->owner = owner; 729 monitors[i]->recursion = 0; 730 } 731 } 732 733 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 671 734 for( __lock_size_t i = 0; i < count; i++) { 672 735 storage[i]->mask = mask; … … 674 737 } 675 738 676 static inline void reset_mask( monitor_desc* this ) {739 static inline void reset_mask( $monitor * this ) { 677 740 this->mask.accepted = 0p; 678 741 this->mask.data = 0p; … … 680 743 } 681 744 682 static inline thread_desc * next_thread( monitor_desc* this ) {745 static inline $thread * next_thread( $monitor * this ) { 683 746 //Check the signaller stack 684 747 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 688 751 //regardless of if we are ready to baton pass, 689 752 //we need to set the monitor as in use 690 set_owner( this, urgent->owner->waiting_thread ); 753 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 754 __set_owner( this, urgent->owner->waiting_thread ); 691 755 692 756 return check_condition( urgent ); … … 695 759 // No signaller thread 696 760 // Get the next thread in the entry_queue 697 thread_desc * new_owner = pop_head( this->entry_queue ); 698 set_owner( this, new_owner ); 761 $thread * new_owner = pop_head( this->entry_queue ); 762 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 763 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 764 __set_owner( this, new_owner ); 699 765 700 766 return new_owner; 701 767 } 702 768 703 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & group ) {769 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) { 704 770 __acceptable_t * it = this->mask.data; // Optim 705 771 __lock_size_t count = this->mask.size; … … 723 789 } 724 790 725 static inline void init( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {791 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 726 792 for( __lock_size_t i = 0; i < count; i++) { 727 793 (criteria[i]){ monitors[i], waiter }; … … 731 797 } 732 798 733 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {799 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 734 800 for( __lock_size_t i = 0; i < count; i++) { 735 801 (criteria[i]){ monitors[i], waiter }; … … 747 813 } 748 814 749 static inline void lock_all( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {815 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 750 816 for( __lock_size_t i = 0; i < count; i++ ) { 751 817 __spinlock_t * l = &source[i]->lock; … … 761 827 } 762 828 763 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count ) {829 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) { 764 830 for( __lock_size_t i = 0; i < count; i++ ) { 765 831 unlock( locks[i]->lock ); … … 768 834 769 835 static inline void save( 770 monitor_desc* ctx [],836 $monitor * ctx [], 771 837 __lock_size_t count, 772 838 __attribute((unused)) __spinlock_t * locks [], … … 781 847 782 848 static inline void restore( 783 monitor_desc* ctx [],849 $monitor * ctx [], 784 850 __lock_size_t count, 785 851 __spinlock_t * locks [], … … 799 865 // 2 - Checks if all the monitors are ready to run 800 866 // if so return the thread to run 801 static inline thread_desc* check_condition( __condition_criterion_t * target ) {867 static inline $thread * check_condition( __condition_criterion_t * target ) { 802 868 __condition_node_t * node = target->owner; 803 869 unsigned short count = node->count; … … 822 888 823 889 static inline void brand_condition( condition & this ) { 824 thread_desc* thrd = TL_GET( this_thread );890 $thread * thrd = TL_GET( this_thread ); 825 891 if( !this.monitors ) { 826 892 // __cfaabi_dbg_print_safe( "Branding\n" ); … … 828 894 this.monitor_count = thrd->monitors.size; 829 895 830 this.monitors = ( monitor_desc**)malloc( this.monitor_count * sizeof( *this.monitors ) );896 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 831 897 for( int i = 0; i < this.monitor_count; i++ ) { 832 898 this.monitors[i] = thrd->monitors[i]; … … 835 901 } 836 902 837 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc* monitors [], __lock_size_t count ) {838 839 __queue_t( thread_desc) & entry_queue = monitors[0]->entry_queue;903 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) { 904 905 __queue_t($thread) & entry_queue = monitors[0]->entry_queue; 840 906 841 907 // For each thread in the entry-queue 842 for( thread_desc** thrd_it = &entry_queue.head;908 for( $thread ** thrd_it = &entry_queue.head; 843 909 *thrd_it; 844 910 thrd_it = &(*thrd_it)->link.next … … 884 950 } 885 951 886 static inline __lock_size_t aggregate( monitor_desc* storage [], const __waitfor_mask_t & mask ) {952 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) { 887 953 __lock_size_t size = 0; 888 954 for( __lock_size_t i = 0; i < mask.size; i++ ) { -
libcfa/src/concurrency/monitor.hfa
rb7d6a36 r6a490b2 23 23 24 24 trait is_monitor(dtype T) { 25 monitor_desc* get_monitor( T & );25 $monitor * get_monitor( T & ); 26 26 void ^?{}( T & mutex ); 27 27 }; 28 28 29 static inline void ?{}( monitor_desc& this) with( this ) {29 static inline void ?{}($monitor & this) with( this ) { 30 30 lock{}; 31 31 entry_queue{}; … … 39 39 } 40 40 41 static inline void ^?{}( monitor_desc& ) {}41 static inline void ^?{}($monitor & ) {} 42 42 43 43 struct monitor_guard_t { 44 monitor_desc** m;44 $monitor ** m; 45 45 __lock_size_t count; 46 46 __monitor_group_t prev; 47 47 }; 48 48 49 void ?{}( monitor_guard_t & this, monitor_desc** m, __lock_size_t count, void (*func)() );49 void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() ); 50 50 void ^?{}( monitor_guard_t & this ); 51 51 52 52 struct monitor_dtor_guard_t { 53 monitor_desc* m;53 $monitor * m; 54 54 __monitor_group_t prev; 55 55 }; 56 56 57 void ?{}( monitor_dtor_guard_t & this, monitor_desc** m, void (*func)() );57 void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)() ); 58 58 void ^?{}( monitor_dtor_guard_t & this ); 59 59 … … 72 72 73 73 // The monitor this criterion concerns 74 monitor_desc* target;74 $monitor * target; 75 75 76 76 // The parent node to which this criterion belongs … … 87 87 struct __condition_node_t { 88 88 // Thread that needs to be woken when all criteria are met 89 thread_desc* waiting_thread;89 $thread * waiting_thread; 90 90 91 91 // Array of criteria (Criterions are contiguous in memory) … … 106 106 } 107 107 108 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info );108 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ); 109 109 void ?{}(__condition_criterion_t & this ); 110 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t * owner );110 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner ); 111 111 112 112 struct condition { … … 115 115 116 116 // Array of monitor pointers (Monitors are NOT contiguous in memory) 117 monitor_desc** monitors;117 $monitor ** monitors; 118 118 119 119 // Number of monitors in the array … … 133 133 bool signal ( condition & this ); 134 134 bool signal_block( condition & this ); 135 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }135 static inline bool is_empty ( condition & this ) { return this.blocked.head == 1p; } 136 136 uintptr_t front ( condition & this ); 137 137 -
libcfa/src/concurrency/mutex.cfa
rb7d6a36 r6a490b2 40 40 if( is_locked ) { 41 41 append( blocked_threads, kernelTLS.this_thread ); 42 BlockInternal( &lock ); 42 unlock( lock ); 43 park( __cfaabi_dbg_ctx ); 43 44 } 44 45 else { … … 62 63 lock( this.lock __cfaabi_dbg_ctx2 ); 63 64 this.is_locked = (this.blocked_threads != 0); 64 WakeThread(65 pop_head( this.blocked_threads ) 65 unpark( 66 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx2 66 67 ); 67 68 unlock( this.lock ); … … 94 95 else { 95 96 append( blocked_threads, kernelTLS.this_thread ); 96 BlockInternal( &lock ); 97 unlock( lock ); 98 park( __cfaabi_dbg_ctx ); 97 99 } 98 100 } … … 118 120 recursion_count--; 119 121 if( recursion_count == 0 ) { 120 thread_desc* thrd = pop_head( blocked_threads );122 $thread * thrd = pop_head( blocked_threads ); 121 123 owner = thrd; 122 124 recursion_count = (thrd ? 1 : 0); 123 WakeThread( thrd);125 unpark( thrd __cfaabi_dbg_ctx2 ); 124 126 } 125 127 unlock( lock ); … … 138 140 void notify_one(condition_variable & this) with(this) { 139 141 lock( lock __cfaabi_dbg_ctx2 ); 140 WakeThread(141 pop_head( this.blocked_threads ) 142 unpark( 143 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx2 142 144 ); 143 145 unlock( lock ); … … 147 149 lock( lock __cfaabi_dbg_ctx2 ); 148 150 while(this.blocked_threads) { 149 WakeThread(150 pop_head( this.blocked_threads ) 151 unpark( 152 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx2 151 153 ); 152 154 } … … 157 159 lock( this.lock __cfaabi_dbg_ctx2 ); 158 160 append( this.blocked_threads, kernelTLS.this_thread ); 159 BlockInternal( &this.lock ); 161 unlock( this.lock ); 162 park( __cfaabi_dbg_ctx ); 160 163 } 161 164 … … 164 167 lock( this.lock __cfaabi_dbg_ctx2 ); 165 168 append( this.blocked_threads, kernelTLS.this_thread ); 166 void __unlock(void) { 167 unlock(l); 168 unlock(this.lock); 169 } 170 BlockInternal( __unlock ); 169 unlock(l); 170 unlock(this.lock); 171 park( __cfaabi_dbg_ctx ); 171 172 lock(l); 172 173 } -
libcfa/src/concurrency/mutex.hfa
rb7d6a36 r6a490b2 36 36 37 37 // List of blocked threads 38 __queue_t(struct thread_desc) blocked_threads;38 __queue_t(struct $thread) blocked_threads; 39 39 40 40 // Locked flag … … 55 55 56 56 // List of blocked threads 57 __queue_t(struct thread_desc) blocked_threads;57 __queue_t(struct $thread) blocked_threads; 58 58 59 59 // Current thread owning the lock 60 struct thread_desc* owner;60 struct $thread * owner; 61 61 62 62 // Number of recursion level … … 83 83 84 84 // List of blocked threads 85 __queue_t(struct thread_desc) blocked_threads;85 __queue_t(struct $thread) blocked_threads; 86 86 }; 87 87 -
libcfa/src/concurrency/preemption.cfa
rb7d6a36 r6a490b2 39 39 // FwdDeclarations : timeout handlers 40 40 static void preempt( processor * this ); 41 static void timeout( thread_desc* this );41 static void timeout( $thread * this ); 42 42 43 43 // FwdDeclarations : Signal handlers 44 44 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 45 static void sigHandler_alarm ( __CFA_SIGPARMS__ ); 45 46 static void sigHandler_segv ( __CFA_SIGPARMS__ ); 46 47 static void sigHandler_ill ( __CFA_SIGPARMS__ ); … … 83 84 // Get next expired node 84 85 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 85 if( ! alarms->head) return 0p; // If no alarms return null86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null86 if( ! & (*alarms)`first ) return 0p; // If no alarms return null 87 if( (*alarms)`first.alarm >= currtime ) return 0p; // If alarms head not expired return null 87 88 return pop(alarms); // Otherwise just pop head 88 89 } … … 97 98 while( node = get_expired( alarms, currtime ) ) { 98 99 // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" ); 100 Duration period = node->period; 101 if( period == 0) { 102 node->set = false; // Node is one-shot, just mark it as not pending 103 } 99 104 100 105 // Check if this is a kernel … … 107 112 108 113 // Check if this is a periodic alarm 109 Duration period = node->period;110 114 if( period > 0 ) { 111 115 // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv ); … … 113 117 insert( alarms, node ); // Reinsert the node for the next time it triggers 114 118 } 115 else {116 node->set = false; // Node is one-shot, just mark it as not pending117 }118 119 } 119 120 120 121 // If there are still alarms pending, reset the timer 121 if( alarms->head) {122 // __cfaabi_dbg_print_buffer_decl(" KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);123 Duration delta = alarms->head->alarm - currtime;124 Duration cap ed = max(delta, 50`us);122 if( & (*alarms)`first ) { 123 __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 124 Duration delta = (*alarms)`first.alarm - currtime; 125 Duration capped = max(delta, 50`us); 125 126 // itimerval tim = { caped }; 126 127 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec); 127 128 128 __kernel_set_timer( cap ed );129 __kernel_set_timer( capped ); 129 130 } 130 131 } … … 184 185 185 186 // Enable interrupts by decrementing the counter 186 // If counter reaches 0, execute any pending CtxSwitch187 // If counter reaches 0, execute any pending __cfactx_switch 187 188 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 189 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 189 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store190 190 191 191 with( kernelTLS.preemption_state ){ … … 209 209 if( proc->pending_preemption ) { 210 210 proc->pending_preemption = false; 211 BlockInternal( thrd);211 force_yield( __POLL_PREEMPTION ); 212 212 } 213 213 } … … 219 219 220 220 // Disable interrupts by incrementint the counter 221 // Don't execute any pending CtxSwitch even if counter reaches 0221 // Don't execute any pending __cfactx_switch even if counter reaches 0 222 222 void enable_interrupts_noPoll() { 223 223 unsigned short prev = kernelTLS.preemption_state.disable_count; … … 257 257 258 258 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 259 259 abort( "internal error, pthread_sigmask" ); 260 260 } 261 261 } … … 268 268 269 269 // reserved for future use 270 static void timeout( thread_desc* this ) {271 //TODO : implement waking threads270 static void timeout( $thread * this ) { 271 __unpark( this __cfaabi_dbg_ctx2 ); 272 272 } 273 273 274 274 // KERNEL ONLY 275 // Check if a CtxSwitch signal handler shoud defer275 // Check if a __cfactx_switch signal handler shoud defer 276 276 // If true : preemption is safe 277 277 // If false : preemption is unsafe and marked as pending … … 303 303 304 304 // Setup proper signal handlers 305 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler 305 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 306 __cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO | SA_RESTART ); // debug handler 306 307 307 308 signal_block( SIGALRM ); 308 309 309 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );310 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 310 311 } 311 312 … … 394 395 // Preemption can occur here 395 396 396 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 397 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 398 } 399 400 static void sigHandler_alarm( __CFA_SIGPARMS__ ) { 401 abort("SIGALRM should never reach the signal handler"); 397 402 } 398 403 -
libcfa/src/concurrency/thread.cfa
rb7d6a36 r6a490b2 23 23 #include "invoke.h" 24 24 25 extern "C" {26 #include <fenv.h>27 #include <stddef.h>28 }29 30 //extern volatile thread_local processor * this_processor;31 32 25 //----------------------------------------------------------------------------- 33 26 // Thread ctors and dtors 34 void ?{}( thread_desc& this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {27 void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 35 28 context{ 0p, 0p }; 36 29 self_cor{ name, storage, storageSize }; 37 30 state = Start; 31 preempted = __NO_PREEMPTION; 38 32 curr_cor = &self_cor; 39 33 self_mon.owner = &this; … … 51 45 } 52 46 53 void ^?{}( thread_desc& this) with( this ) {47 void ^?{}($thread& this) with( this ) { 54 48 unregister(curr_cluster, this); 55 49 ^self_cor{}; 56 50 } 57 51 52 //----------------------------------------------------------------------------- 53 // Starting and stopping threads 54 forall( dtype T | is_thread(T) ) 55 void __thrd_start( T & this, void (*main_p)(T &) ) { 56 $thread * this_thrd = get_thread(this); 57 58 disable_interrupts(); 59 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); 60 61 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 62 verify( this_thrd->context.SP ); 63 64 __schedule_thread(this_thrd); 65 enable_interrupts( __cfaabi_dbg_ctx ); 66 } 67 68 //----------------------------------------------------------------------------- 69 // Support for threads that don't ues the thread keyword 58 70 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 59 71 void ?{}( scoped(T)& this ) with( this ) { … … 73 85 } 74 86 75 //-----------------------------------------------------------------------------76 // Starting and stopping threads77 forall( dtype T | is_thread(T) )78 void __thrd_start( T & this, void (*main_p)(T &) ) {79 thread_desc * this_thrd = get_thread(this);80 81 disable_interrupts();82 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);83 84 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];85 verify( this_thrd->context.SP );86 87 ScheduleThread(this_thrd);88 enable_interrupts( __cfaabi_dbg_ctx );89 }90 91 void yield( void ) {92 // Safety note : This could cause some false positives due to preemption93 verify( TL_GET( preemption_state.enabled ) );94 BlockInternal( TL_GET( this_thread ) );95 // Safety note : This could cause some false positives due to preemption96 verify( TL_GET( preemption_state.enabled ) );97 }98 99 void yield( unsigned times ) {100 for( unsigned i = 0; i < times; i++ ) {101 yield();102 }103 }104 105 87 // Local Variables: // 106 88 // mode: c // -
libcfa/src/concurrency/thread.hfa
rb7d6a36 r6a490b2 28 28 void ^?{}(T& mutex this); 29 29 void main(T& this); 30 thread_desc* get_thread(T& this);30 $thread* get_thread(T& this); 31 31 }; 32 32 33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this) 33 // define that satisfies the trait without using the thread keyword 34 #define DECL_THREAD(X) $thread* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this) 35 36 // Inline getters for threads/coroutines/monitors 37 forall( dtype T | is_thread(T) ) 38 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 34 39 35 40 forall( dtype T | is_thread(T) ) 36 static inline coroutine_desc* get_coroutine(T & this) { 37 return &get_thread(this)->self_cor; 38 } 41 static inline $monitor * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 39 42 40 forall( dtype T | is_thread(T) ) 41 static inline monitor_desc* get_monitor(T & this) { 42 return &get_thread(this)->self_mon; 43 } 43 static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; } 44 static inline $monitor * get_monitor ($thread * this) __attribute__((const)) { return &this->self_mon; } 44 45 45 static inline coroutine_desc* get_coroutine(thread_desc * this) { 46 return &this->self_cor; 47 } 48 49 static inline monitor_desc* get_monitor(thread_desc * this) { 50 return &this->self_mon; 51 } 52 46 //----------------------------------------------------------------------------- 47 // forward declarations needed for threads 53 48 extern struct cluster * mainCluster; 54 49 … … 58 53 //----------------------------------------------------------------------------- 59 54 // Ctors and dtors 60 void ?{}( thread_desc& this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );61 void ^?{}( thread_desc& this);55 void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 56 void ^?{}($thread & this); 62 57 63 static inline void ?{}( thread_desc& this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }64 static inline void ?{}( thread_desc& this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }65 static inline void ?{}( thread_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }66 static inline void ?{}( thread_desc& this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; }67 static inline void ?{}( thread_desc& this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; }68 static inline void ?{}( thread_desc& this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; }69 static inline void ?{}( thread_desc& this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; }70 static inline void ?{}( thread_desc& this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; }71 static inline void ?{}( thread_desc& this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }58 static inline void ?{}($thread & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 59 static inline void ?{}($thread & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 60 static inline void ?{}($thread & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 61 static inline void ?{}($thread & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 62 static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 63 static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 64 static inline void ?{}($thread & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 65 static inline void ?{}($thread & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 66 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 72 67 73 68 //----------------------------------------------------------------------------- … … 88 83 void ^?{}( scoped(T)& this ); 89 84 90 void yield(); 91 void yield( unsigned times ); 85 //----------------------------------------------------------------------------- 86 // Thread getters 87 static inline struct $thread * active_thread () { return TL_GET( this_thread ); } 92 88 93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 89 //----------------------------------------------------------------------------- 90 // Scheduler API 91 92 //---------- 93 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 94 void park( __cfaabi_dbg_ctx_param ); 95 96 //---------- 97 // Unpark a thread, if the thread is already blocked, schedule it 98 // if the thread is not yet block, signal that it should rerun immediately 99 void unpark( $thread * this __cfaabi_dbg_ctx_param2 ); 100 101 forall( dtype T | is_thread(T) ) 102 static inline void unpark( T & this __cfaabi_dbg_ctx_param2 ) { if(!&this) return; unpark( get_thread( this ) __cfaabi_dbg_ctx_fwd2 );} 103 104 //---------- 105 // Yield: force thread to block and be rescheduled 106 bool force_yield( enum __Preemption_Reason ); 107 108 static inline void yield() { 109 force_yield(__MANUAL_PREEMPTION); 110 } 111 112 // Yield: yield N times 113 static inline void yield( unsigned times ) { 114 for( times ) { 115 yield(); 116 } 117 } 118 119 //---------- 120 // sleep: force thread to block and be rescheduled after Duration duration 121 void sleep( Duration duration ); 94 122 95 123 // Local Variables: // -
libcfa/src/exception.c
rb7d6a36 r6a490b2 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Jun 26 15:13:00 2017 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : T hu Feb 22 18:17:34 201813 // Update Count : 1 111 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Apr 14 12:01:00 2020 13 // Update Count : 18 14 14 // 15 15 16 // Normally we would get this from the CFA prelude. 16 17 #include <stddef.h> // for size_t 17 18 18 19 #include "exception.h" 19 20 20 // Implementation of the secret header. 21 // Implementation of the secret header is hardware dependent. 22 #if !( defined( __x86_64 ) || defined( __i386 ) ) 23 #error Exception Handling: No known architecture detected. 24 #endif 21 25 22 26 #include <stdlib.h> … … 24 28 #include <unwind.h> 25 29 #include <bits/debug.hfa> 30 #include "stdhdr/assert.h" 26 31 27 32 // FIX ME: temporary hack to keep ARM build working 28 33 #ifndef _URC_FATAL_PHASE1_ERROR 29 #define _URC_FATAL_PHASE1_ERROR 234 #define _URC_FATAL_PHASE1_ERROR 3 30 35 #endif // ! _URC_FATAL_PHASE1_ERROR 31 36 #ifndef _URC_FATAL_PHASE2_ERROR … … 35 40 #include "lsda.h" 36 41 42 /* The exception class for our exceptions. Because of the vendor component 43 * its value would not be standard. 44 * Vendor: UWPL 45 * Language: CFA\0 46 */ 47 const _Unwind_Exception_Class __cfaehm_exception_class = 0x4c50575500414643; 37 48 38 49 // Base exception vtable is abstract, you should not have base exceptions. 39 struct __cfa abi_ehm__base_exception_t_vtable40 ___cfa abi_ehm__base_exception_t_vtable_instance = {50 struct __cfaehm_base_exception_t_vtable 51 ___cfaehm_base_exception_t_vtable_instance = { 41 52 .parent = NULL, 42 53 .size = 0, … … 49 60 // Temperary global exception context. Does not work with concurency. 50 61 struct exception_context_t { 51 struct __cfaabi_ehm__try_resume_node * top_resume; 52 struct __cfaabi_ehm__try_resume_node * current_resume; 53 54 exception_t * current_exception; 55 int current_handler_index; 56 } shared_stack = {NULL, NULL, 0, 0}; 62 struct __cfaehm_try_resume_node * top_resume; 63 64 exception_t * current_exception; 65 int current_handler_index; 66 } static shared_stack = {NULL, NULL, 0}; 57 67 58 68 // Get the current exception context. … … 62 72 return &shared_stack; 63 73 } 64 //#define SAVE_EXCEPTION_CONTEXT(to_name)65 //struct exception_context_t * to_name = this_exception_context();66 //exception * this_exception() {67 // return this_exception_context()->current_exception;68 //}69 70 71 // This macro should be the only thing that needs to change across machines.72 // Used in the personality function, way down in termination.73 // struct _Unwind_Context * -> _Unwind_Reason_Code(*)(exception_t *)74 #define MATCHER_FROM_CONTEXT(ptr_to_context) \75 (*(_Unwind_Reason_Code(**)(exception_t *))(_Unwind_GetCFA(ptr_to_context) + 8))76 74 77 75 78 76 // RESUMPTION ================================================================ 79 77 80 void __cfaabi_ehm__throw_resume(exception_t * except) { 81 82 __cfaabi_dbg_print_safe("Throwing resumption exception\n"); 83 84 struct __cfaabi_ehm__try_resume_node * original_head = shared_stack.current_resume; 85 struct __cfaabi_ehm__try_resume_node * current = 86 (original_head) ? original_head->next : shared_stack.top_resume; 78 static void reset_top_resume(struct __cfaehm_try_resume_node ** store) { 79 this_exception_context()->top_resume = *store; 80 } 81 82 void __cfaehm_throw_resume(exception_t * except) { 83 struct exception_context_t * context = this_exception_context(); 84 85 __cfadbg_print_safe(exception, "Throwing resumption exception\n"); 86 87 __attribute__((cleanup(reset_top_resume))) 88 struct __cfaehm_try_resume_node * original_head = context->top_resume; 89 struct __cfaehm_try_resume_node * current = context->top_resume; 87 90 88 91 for ( ; current ; current = current->next) { 89 shared_stack.current_resume = current;92 context->top_resume = current->next; 90 93 if (current->handler(except)) { 91 shared_stack.current_resume = original_head;92 94 return; 93 95 } 94 96 } 95 97 96 __cfaabi_dbg_print_safe("Unhandled exception\n"); 97 shared_stack.current_resume = original_head; 98 __cfadbg_print_safe(exception, "Unhandled exception\n"); 98 99 99 100 // Fall back to termination: 100 __cfa abi_ehm__throw_terminate(except);101 __cfaehm_throw_terminate(except); 101 102 // TODO: Default handler for resumption. 102 103 } … … 106 107 // be added after the node is built but before it is made the top node. 107 108 108 void __cfa abi_ehm__try_resume_setup(struct __cfaabi_ehm__try_resume_node * node,109 void __cfaehm_try_resume_setup(struct __cfaehm_try_resume_node * node, 109 110 _Bool (*handler)(exception_t * except)) { 110 node->next = shared_stack.top_resume; 111 struct exception_context_t * context = this_exception_context(); 112 node->next = context->top_resume; 111 113 node->handler = handler; 112 shared_stack.top_resume = node; 113 } 114 115 void __cfaabi_ehm__try_resume_cleanup(struct __cfaabi_ehm__try_resume_node * node) { 116 shared_stack.top_resume = node->next; 114 context->top_resume = node; 115 } 116 117 void __cfaehm_try_resume_cleanup(struct __cfaehm_try_resume_node * node) { 118 struct exception_context_t * context = this_exception_context(); 119 context->top_resume = node->next; 117 120 } 118 121 … … 123 126 // May have to move to cfa for constructors and destructors (references). 124 127 125 struct __cfaabi_ehm__node { 126 struct __cfaabi_ehm__node * next; 128 // How to clean up an exception in various situations. 129 static void __cfaehm_exception_cleanup( 130 _Unwind_Reason_Code reason, 131 struct _Unwind_Exception * exception) { 132 switch (reason) { 133 case _URC_FOREIGN_EXCEPTION_CAUGHT: 134 // This one we could clean-up to allow cross-language exceptions. 135 case _URC_FATAL_PHASE1_ERROR: 136 case _URC_FATAL_PHASE2_ERROR: 137 default: 138 abort(); 139 } 140 } 141 142 // We need a piece of storage to raise the exception, for now its a single 143 // piece. 144 static struct _Unwind_Exception this_exception_storage; 145 146 struct __cfaehm_node { 147 struct __cfaehm_node * next; 127 148 }; 128 149 129 150 #define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node))) 130 #define EXCEPT_TO_NODE(except) ((struct __cfa abi_ehm__node *)(except) - 1)151 #define EXCEPT_TO_NODE(except) ((struct __cfaehm_node *)(except) - 1) 131 152 132 153 // Creates a copy of the indicated exception and sets current_exception to it. 133 static void __cfa abi_ehm__allocate_exception( exception_t * except ) {154 static void __cfaehm_allocate_exception( exception_t * except ) { 134 155 struct exception_context_t * context = this_exception_context(); 135 156 136 157 // Allocate memory for the exception. 137 struct __cfa abi_ehm__node * store = malloc(138 sizeof( struct __cfa abi_ehm__node ) + except->virtual_table->size );158 struct __cfaehm_node * store = malloc( 159 sizeof( struct __cfaehm_node ) + except->virtual_table->size ); 139 160 140 161 if ( ! store ) { … … 149 170 // Copy the exception to storage. 150 171 except->virtual_table->copy( context->current_exception, except ); 172 173 // Set up the exception storage. 174 this_exception_storage.exception_class = __cfaehm_exception_class; 175 this_exception_storage.exception_cleanup = __cfaehm_exception_cleanup; 151 176 } 152 177 153 178 // Delete the provided exception, unsetting current_exception if relivant. 154 static void __cfa abi_ehm__delete_exception( exception_t * except ) {179 static void __cfaehm_delete_exception( exception_t * except ) { 155 180 struct exception_context_t * context = this_exception_context(); 156 181 157 __cfa abi_dbg_print_safe("Deleting Exception\n");182 __cfadbg_print_safe(exception, "Deleting Exception\n"); 158 183 159 184 // Remove the exception from the list. 160 struct __cfa abi_ehm__node * to_free = EXCEPT_TO_NODE(except);161 struct __cfa abi_ehm__node * node;185 struct __cfaehm_node * to_free = EXCEPT_TO_NODE(except); 186 struct __cfaehm_node * node; 162 187 163 188 if ( context->current_exception == except ) { … … 167 192 node = EXCEPT_TO_NODE(context->current_exception); 168 193 // It may always be in the first or second position. 169 while ( to_free != node->next ) {194 while ( to_free != node->next ) { 170 195 node = node->next; 171 196 } … … 179 204 180 205 // If this isn't a rethrow (*except==0), delete the provided exception. 181 void __cfaabi_ehm__cleanup_terminate( void * except ) { 182 if ( *(void**)except ) __cfaabi_ehm__delete_exception( *(exception_t **)except ); 183 } 184 185 186 // We need a piece of storage to raise the exception 187 struct _Unwind_Exception this_exception_storage; 206 void __cfaehm_cleanup_terminate( void * except ) { 207 if ( *(void**)except ) __cfaehm_delete_exception( *(exception_t **)except ); 208 } 188 209 189 210 // Function needed by force unwind … … 192 213 int version, 193 214 _Unwind_Action actions, 194 _Unwind_Exception_Class exception Class,215 _Unwind_Exception_Class exception_class, 195 216 struct _Unwind_Exception * unwind_exception, 196 struct _Unwind_Context * context, 197 void * some_param) { 198 if( actions & _UA_END_OF_STACK ) exit(1); 199 if( actions & _UA_CLEANUP_PHASE ) return _URC_NO_REASON; 200 201 return _URC_FATAL_PHASE2_ERROR; 217 struct _Unwind_Context * unwind_context, 218 void * stop_param) { 219 // Verify actions follow the rules we expect. 220 verify((actions & _UA_CLEANUP_PHASE) && (actions & _UA_FORCE_UNWIND)); 221 verify(!(actions & (_UA_SEARCH_PHASE | _UA_HANDER_FRAME))); 222 223 if ( actions & _UA_END_OF_STACK ) { 224 exit(1); 225 } else { 226 return _URC_NO_REASON; 227 } 202 228 } 203 229 204 230 // The exception that is being thrown must already be stored. 205 __attribute__((noreturn)) void __cfaabi_ehm__begin_unwind(void) {231 static __attribute__((noreturn)) void __cfaehm_begin_unwind(void) { 206 232 if ( ! this_exception_context()->current_exception ) { 207 233 printf("UNWIND ERROR missing exception in begin unwind\n"); 208 234 abort(); 209 235 } 210 211 236 212 237 // Call stdlibc to raise the exception … … 220 245 // the whole stack. 221 246 222 if ( ret == _URC_END_OF_STACK ) {247 if ( ret == _URC_END_OF_STACK ) { 223 248 // No proper handler was found. This can be handled in many ways, C++ calls std::terminate. 224 249 // Here we force unwind the stack, basically raising a cancellation. … … 235 260 } 236 261 237 void __cfaabi_ehm__throw_terminate( exception_t * val ) { 238 __cfaabi_dbg_print_safe("Throwing termination exception\n"); 239 240 __cfaabi_ehm__allocate_exception( val ); 241 __cfaabi_ehm__begin_unwind(); 242 } 243 244 void __cfaabi_ehm__rethrow_terminate(void) { 245 __cfaabi_dbg_print_safe("Rethrowing termination exception\n"); 246 247 __cfaabi_ehm__begin_unwind(); 248 } 249 250 #pragma GCC push_options 251 #pragma GCC optimize("O0") 262 void __cfaehm_throw_terminate( exception_t * val ) { 263 __cfadbg_print_safe(exception, "Throwing termination exception\n"); 264 265 __cfaehm_allocate_exception( val ); 266 __cfaehm_begin_unwind(); 267 } 268 269 void __cfaehm_rethrow_terminate(void) { 270 __cfadbg_print_safe(exception, "Rethrowing termination exception\n"); 271 272 __cfaehm_begin_unwind(); 273 } 252 274 253 275 // This is our personality routine. For every stack frame annotated with 254 276 // ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding. 255 277 // Once in the search phase and once in the cleanup phase. 256 _Unwind_Reason_Code __gcfa_personality_v0 ( 257 int version, _Unwind_Action actions, unsigned long long exceptionClass, 258 struct _Unwind_Exception* unwind_exception, 259 struct _Unwind_Context* context) 278 _Unwind_Reason_Code __gcfa_personality_v0( 279 int version, 280 _Unwind_Action actions, 281 unsigned long long exception_class, 282 struct _Unwind_Exception * unwind_exception, 283 struct _Unwind_Context * unwind_context) 260 284 { 261 285 262 //__cfaabi_dbg_print_safe("CFA: 0x%lx\n", _Unwind_GetCFA(context)); 263 __cfaabi_dbg_print_safe("Personality function (%d, %x, %llu, %p, %p):", 264 version, actions, exceptionClass, unwind_exception, context); 265 266 // If we've reached the end of the stack then there is nothing much we can do... 267 if( actions & _UA_END_OF_STACK ) return _URC_END_OF_STACK; 268 286 //__cfadbg_print_safe(exception, "CFA: 0x%lx\n", _Unwind_GetCFA(context)); 287 __cfadbg_print_safe(exception, "Personality function (%d, %x, %llu, %p, %p):", 288 version, actions, exception_class, unwind_exception, unwind_context); 289 290 // Verify that actions follow the rules we expect. 291 // This function should never be called at the end of the stack. 292 verify(!(actions & _UA_END_OF_STACK)); 293 // Either only the search phase flag is set or... 269 294 if (actions & _UA_SEARCH_PHASE) { 270 __cfaabi_dbg_print_safe(" lookup phase"); 271 } 272 else if (actions & _UA_CLEANUP_PHASE) { 273 __cfaabi_dbg_print_safe(" cleanup phase"); 274 } 275 // Just in case, probably can't actually happen 276 else { 277 printf(" error\n"); 278 return _URC_FATAL_PHASE1_ERROR; 295 verify(actions == _UA_SEARCH_PHASE); 296 __cfadbg_print_safe(exception, " lookup phase"); 297 // ... we are in clean-up phase. 298 } else { 299 verify(actions & _UA_CLEANUP_PHASE); 300 __cfadbg_print_safe(exception, " cleanup phase"); 301 // We shouldn't be the handler frame during forced unwind. 302 if (actions & _UA_HANDLER_FRAME) { 303 verify(!(actions & _UA_FORCE_UNWIND)); 304 __cfadbg_print_safe(exception, " (handler frame)"); 305 } else if (actions & _UA_FORCE_UNWIND) { 306 __cfadbg_print_safe(exception, " (force unwind)"); 307 } 279 308 } 280 309 281 310 // Get a pointer to the language specific data from which we will read what we need 282 const unsigned char * lsd = (const unsigned char*) _Unwind_GetLanguageSpecificData(context );283 284 if ( !lsd ) { //Nothing to do, keep unwinding311 const unsigned char * lsd = _Unwind_GetLanguageSpecificData( unwind_context ); 312 313 if ( !lsd ) { //Nothing to do, keep unwinding 285 314 printf(" no LSD"); 286 315 goto UNWIND; … … 289 318 // Get the instuction pointer and a reading pointer into the exception table 290 319 lsda_header_info lsd_info; 291 const unsigned char * cur_ptr = parse_lsda_header(context, lsd, &lsd_info); 292 _Unwind_Ptr instruction_ptr = _Unwind_GetIP( context ); 320 const unsigned char * cur_ptr = parse_lsda_header(unwind_context, lsd, &lsd_info); 321 _Unwind_Ptr instruction_ptr = _Unwind_GetIP(unwind_context); 322 323 struct exception_context_t * context = this_exception_context(); 293 324 294 325 // Linearly search the table for stuff to do 295 while ( cur_ptr < lsd_info.action_table ) {326 while ( cur_ptr < lsd_info.action_table ) { 296 327 _Unwind_Ptr callsite_start; 297 328 _Unwind_Ptr callsite_len; … … 306 337 307 338 // Have we reach the correct frame info yet? 308 if ( lsd_info.Start + callsite_start + callsite_len < instruction_ptr ) {339 if ( lsd_info.Start + callsite_start + callsite_len < instruction_ptr ) { 309 340 #ifdef __CFA_DEBUG_PRINT__ 310 341 void * ls = (void*)lsd_info.Start; … … 314 345 void * ep = (void*)lsd_info.Start + callsite_start + callsite_len; 315 346 void * ip = (void*)instruction_ptr; 316 __cfa abi_dbg_print_safe("\nfound %p - %p (%p, %p, %p), looking for %p\n",347 __cfadbg_print_safe(exception, "\nfound %p - %p (%p, %p, %p), looking for %p\n", 317 348 bp, ep, ls, cs, cl, ip); 318 349 #endif // __CFA_DEBUG_PRINT__ … … 321 352 322 353 // Have we gone too far? 323 if ( lsd_info.Start + callsite_start > instruction_ptr ) {354 if ( lsd_info.Start + callsite_start > instruction_ptr ) { 324 355 printf(" gone too far"); 325 356 break; 326 357 } 327 358 328 // Something to do? 329 if( callsite_landing_pad ) { 330 // Which phase are we in 331 if (actions & _UA_SEARCH_PHASE) { 332 // In search phase, these means we found a potential handler we must check. 333 334 // We have arbitrarily decided that 0 means nothing to do and 1 means there is 335 // a potential handler. This doesn't seem to conflict the gcc default behavior. 336 if (callsite_action != 0) { 337 // Now we want to run some code to see if the handler matches 338 // This is the tricky part where we want to the power to run arbitrary code 339 // However, generating a new exception table entry and try routine every time 340 // is way more expansive than we might like 341 // The information we have is : 342 // - The GR (Series of registers) 343 // GR1=GP Global Pointer of frame ref by context 344 // - The instruction pointer 345 // - The instruction pointer info (???) 346 // - The CFA (Canonical Frame Address) 347 // - The BSP (Probably the base stack pointer) 348 349 350 // The current apprach uses one exception table entry per try block 351 _uleb128_t imatcher; 352 // Get the relative offset to the {...}? 353 cur_ptr = read_uleb128(cur_ptr, &imatcher); 354 355 _Unwind_Reason_Code (*matcher)(exception_t *) = 356 MATCHER_FROM_CONTEXT(context); 357 int index = matcher(shared_stack.current_exception); 358 _Unwind_Reason_Code ret = (0 == index) 359 ? _URC_CONTINUE_UNWIND : _URC_HANDLER_FOUND; 360 shared_stack.current_handler_index = index; 361 362 // Based on the return value, check if we matched the exception 363 if( ret == _URC_HANDLER_FOUND) { 364 __cfaabi_dbg_print_safe(" handler found\n"); 365 } else { 366 __cfaabi_dbg_print_safe(" no handler\n"); 367 } 368 return ret; 359 // Check for what we must do: 360 if ( 0 == callsite_landing_pad ) { 361 // Nothing to do, move along 362 __cfadbg_print_safe(exception, " no landing pad"); 363 } else if (actions & _UA_SEARCH_PHASE) { 364 // In search phase, these means we found a potential handler we must check. 365 366 // We have arbitrarily decided that 0 means nothing to do and 1 means there is 367 // a potential handler. This doesn't seem to conflict the gcc default behavior. 368 if (callsite_action != 0) { 369 // Now we want to run some code to see if the handler matches 370 // This is the tricky part where we want to the power to run arbitrary code 371 // However, generating a new exception table entry and try routine every time 372 // is way more expansive than we might like 373 // The information we have is : 374 // - The GR (Series of registers) 375 // GR1=GP Global Pointer of frame ref by context 376 // - The instruction pointer 377 // - The instruction pointer info (???) 378 // - The CFA (Canonical Frame Address) 379 // - The BSP (Probably the base stack pointer) 380 381 // The current apprach uses one exception table entry per try block 382 _uleb128_t imatcher; 383 // Get the relative offset to the {...}? 384 cur_ptr = read_uleb128(cur_ptr, &imatcher); 385 386 # if defined( __x86_64 ) 387 _Unwind_Word match_pos = _Unwind_GetCFA(unwind_context) + 8; 388 # elif defined( __i386 ) 389 _Unwind_Word match_pos = _Unwind_GetCFA(unwind_context) + 24; 390 # endif 391 int (*matcher)(exception_t *) = *(int(**)(exception_t *))match_pos; 392 393 int index = matcher(context->current_exception); 394 _Unwind_Reason_Code ret = (0 == index) 395 ? _URC_CONTINUE_UNWIND : _URC_HANDLER_FOUND; 396 context->current_handler_index = index; 397 398 // Based on the return value, check if we matched the exception 399 if (ret == _URC_HANDLER_FOUND) { 400 __cfadbg_print_safe(exception, " handler found\n"); 401 } else { 402 __cfadbg_print_safe(exception, " no handler\n"); 369 403 } 370 371 // This is only a cleanup handler, ignore it 372 __cfaabi_dbg_print_safe(" no action"); 404 return ret; 373 405 } 374 else if (actions & _UA_CLEANUP_PHASE) { 375 376 if( (callsite_action != 0) && !(actions & _UA_HANDLER_FRAME) ){ 377 // If this is a potential exception handler 378 // but not the one that matched the exception in the seach phase, 379 // just ignore it 380 goto UNWIND; 381 } 382 383 // We need to run some clean-up or a handler 384 // These statment do the right thing but I don't know any specifics at all 385 _Unwind_SetGR( context, __builtin_eh_return_data_regno(0), (_Unwind_Ptr) unwind_exception ); 386 _Unwind_SetGR( context, __builtin_eh_return_data_regno(1), 0 ); 387 388 // I assume this sets the instruction pointer to the adress of the landing pad 389 // It doesn't actually set it, it only state the value that needs to be set once we return _URC_INSTALL_CONTEXT 390 _Unwind_SetIP( context, ((lsd_info.LPStart) + (callsite_landing_pad)) ); 391 392 __cfaabi_dbg_print_safe(" action\n"); 393 394 // Return have some action to run 395 return _URC_INSTALL_CONTEXT; 406 407 // This is only a cleanup handler, ignore it 408 __cfadbg_print_safe(exception, " no action"); 409 } else { 410 // In clean-up phase, no destructors here but this could be the handler. 411 412 if ( (callsite_action != 0) && !(actions & _UA_HANDLER_FRAME) ){ 413 // If this is a potential exception handler 414 // but not the one that matched the exception in the seach phase, 415 // just ignore it 416 goto UNWIND; 396 417 } 418 419 // We need to run some clean-up or a handler 420 // These statment do the right thing but I don't know any specifics at all 421 _Unwind_SetGR( unwind_context, __builtin_eh_return_data_regno(0), 422 (_Unwind_Ptr)unwind_exception ); 423 _Unwind_SetGR( unwind_context, __builtin_eh_return_data_regno(1), 0 ); 424 425 // I assume this sets the instruction pointer to the adress of the landing pad 426 // It doesn't actually set it, it only state the value that needs to be set once we 427 // return _URC_INSTALL_CONTEXT 428 _Unwind_SetIP( unwind_context, ((lsd_info.LPStart) + (callsite_landing_pad)) ); 429 430 __cfadbg_print_safe(exception, " action\n"); 431 432 // Return have some action to run 433 return _URC_INSTALL_CONTEXT; 397 434 } 398 399 // Nothing to do, move along400 __cfaabi_dbg_print_safe(" no landing pad");401 435 } 402 436 // No handling found 403 __cfa abi_dbg_print_safe(" table end reached\n");437 __cfadbg_print_safe(exception, " table end reached"); 404 438 405 439 UNWIND: 406 __cfa abi_dbg_print_safe(" unwind\n");440 __cfadbg_print_safe(exception, " unwind\n"); 407 441 408 442 // Keep unwinding the stack 409 443 return _URC_CONTINUE_UNWIND; 410 444 } 445 446 #pragma GCC push_options 447 #pragma GCC optimize(0) 411 448 412 449 // Try statements are hoisted out see comments for details. While this could probably be unique 413 450 // and simply linked from libcfa but there is one problem left, see the exception table for details 414 451 __attribute__((noinline)) 415 void __cfa abi_ehm__try_terminate(void (*try_block)(),452 void __cfaehm_try_terminate(void (*try_block)(), 416 453 void (*catch_block)(int index, exception_t * except), 417 454 __attribute__((unused)) int (*match_block)(exception_t * except)) { … … 419 456 //! printf("%p %p %p %p\n", &try_block, &catch_block, &match_block, &xy); 420 457 421 // Setup statments: These 2 statments won't actually result in any code, they only setup global tables.422 // However, they clobber gcc cancellation support from gcc. We can replace the personality routine but423 // replacing the exception table gcc generates is not really doable, it generates labels based on how the424 // assembly works.425 426 458 // Setup the personality routine and exception table. 459 // Unforturnately these clobber gcc cancellation support which means we can't get access to 460 // the attribute cleanup tables at the same time. We would have to inspect the assembly to 461 // create a new set ourselves. 427 462 #ifdef __PIC__ 428 463 asm volatile (".cfi_personality 0x9b,CFA.ref.__gcfa_personality_v0"); … … 449 484 // Label which defines the end of the area for which the handler is setup. 450 485 asm volatile (".TRYEND:"); 451 // Label which defines the start of the exception landing pad. Basically what is called when the exception is452 // caught. Note, if multiple handlers are given, the multiplexing should be done by the generated code, not the453 // exception runtime.486 // Label which defines the start of the exception landing pad. Basically what is called when 487 // the exception is caught. Note, if multiple handlers are given, the multiplexing should be 488 // done by the generated code, not the exception runtime. 454 489 asm volatile (".CATCH:"); 455 490 456 491 // Exception handler 457 catch_block( shared_stack.current_handler_index, 458 shared_stack.current_exception ); 492 // Note: Saving the exception context on the stack breaks termination exceptions. 493 catch_block( this_exception_context()->current_handler_index, 494 this_exception_context()->current_exception ); 459 495 } 460 496 … … 464 500 465 501 #ifdef __PIC__ 466 #if defined( __i386 ) || defined( __x86_64 )467 502 asm ( 468 503 // HEADER … … 481 516 // handler landing pad offset and 1 (action code, gcc seems to use 0). 482 517 ".LLSDACSBCFA2:\n" 483 " .uleb128 .TRYSTART-__cfa abi_ehm__try_terminate\n"518 " .uleb128 .TRYSTART-__cfaehm_try_terminate\n" 484 519 " .uleb128 .TRYEND-.TRYSTART\n" 485 " .uleb128 .CATCH-__cfa abi_ehm__try_terminate\n"520 " .uleb128 .CATCH-__cfaehm_try_terminate\n" 486 521 " .uleb128 1\n" 487 522 ".LLSDACSECFA2:\n" 488 523 // TABLE FOOTER 489 524 " .text\n" 490 " .size __cfa abi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n"525 " .size __cfaehm_try_terminate, .-__cfaehm_try_terminate\n" 491 526 ); 492 527 … … 507 542 " .quad __gcfa_personality_v0\n" 508 543 #else // then __i386 509 " 544 " .long __gcfa_personality_v0\n" 510 545 #endif 511 546 ); 512 #else513 #error Exception Handling: unknown architecture for position independent code.514 #endif // __i386 || __x86_64515 547 #else // __PIC__ 516 #if defined( __i386 ) || defined( __x86_64 )517 548 asm ( 518 549 // HEADER … … 529 560 ".LLSDACSBCFA2:\n" 530 561 // Handled area start (relative to start of function) 531 " .uleb128 .TRYSTART-__cfa abi_ehm__try_terminate\n"562 " .uleb128 .TRYSTART-__cfaehm_try_terminate\n" 532 563 // Handled area length 533 564 " .uleb128 .TRYEND-.TRYSTART\n" 534 565 // Handler landing pad address (relative to start of function) 535 " .uleb128 .CATCH-__cfa abi_ehm__try_terminate\n"566 " .uleb128 .CATCH-__cfaehm_try_terminate\n" 536 567 // Action code, gcc seems to always use 0. 537 568 " .uleb128 1\n" … … 539 570 ".LLSDACSECFA2:\n" 540 571 " .text\n" 541 " .size __cfa abi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n"572 " .size __cfaehm_try_terminate, .-__cfaehm_try_terminate\n" 542 573 " .ident \"GCC: (Ubuntu 6.2.0-3ubuntu11~16.04) 6.2.0 20160901\"\n" 543 574 " .section .note.GNU-stack,\"x\",@progbits\n" 544 575 ); 545 #else546 #error Exception Handling: unknown architecture for position dependent code.547 #endif // __i386 || __x86_64548 576 #endif // __PIC__ 549 577 -
libcfa/src/exception.h
rb7d6a36 r6a490b2 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Jun 26 15:11:00 2017 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Feb 22 18:11:15 201813 // Update Count : 811 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Mar 27 10:16:00 2020 13 // Update Count : 9 14 14 // 15 15 … … 21 21 #endif 22 22 23 struct __cfa abi_ehm__base_exception_t;24 typedef struct __cfa abi_ehm__base_exception_t exception_t;25 struct __cfa abi_ehm__base_exception_t_vtable {26 const struct __cfa abi_ehm__base_exception_t_vtable * parent;23 struct __cfaehm_base_exception_t; 24 typedef struct __cfaehm_base_exception_t exception_t; 25 struct __cfaehm_base_exception_t_vtable { 26 const struct __cfaehm_base_exception_t_vtable * parent; 27 27 size_t size; 28 void (*copy)(struct __cfa abi_ehm__base_exception_t *this,29 struct __cfa abi_ehm__base_exception_t * other);30 void (*free)(struct __cfa abi_ehm__base_exception_t *this);31 const char * (*msg)(struct __cfa abi_ehm__base_exception_t *this);28 void (*copy)(struct __cfaehm_base_exception_t *this, 29 struct __cfaehm_base_exception_t * other); 30 void (*free)(struct __cfaehm_base_exception_t *this); 31 const char * (*msg)(struct __cfaehm_base_exception_t *this); 32 32 }; 33 struct __cfa abi_ehm__base_exception_t {34 struct __cfa abi_ehm__base_exception_t_vtable const * virtual_table;33 struct __cfaehm_base_exception_t { 34 struct __cfaehm_base_exception_t_vtable const * virtual_table; 35 35 }; 36 extern struct __cfa abi_ehm__base_exception_t_vtable37 ___cfa abi_ehm__base_exception_t_vtable_instance;36 extern struct __cfaehm_base_exception_t_vtable 37 ___cfaehm_base_exception_t_vtable_instance; 38 38 39 39 40 40 // Used in throw statement translation. 41 void __cfa abi_ehm__throw_terminate(exception_t * except) __attribute__((noreturn));42 void __cfa abi_ehm__rethrow_terminate() __attribute__((noreturn));43 void __cfa abi_ehm__throw_resume(exception_t * except);41 void __cfaehm_throw_terminate(exception_t * except) __attribute__((noreturn)); 42 void __cfaehm_rethrow_terminate() __attribute__((noreturn)); 43 void __cfaehm_throw_resume(exception_t * except); 44 44 45 45 // Function catches termination exceptions. 46 void __cfa abi_ehm__try_terminate(46 void __cfaehm_try_terminate( 47 47 void (*try_block)(), 48 48 void (*catch_block)(int index, exception_t * except), … … 50 50 51 51 // Clean-up the exception in catch blocks. 52 void __cfa abi_ehm__cleanup_terminate(void * except);52 void __cfaehm_cleanup_terminate(void * except); 53 53 54 54 // Data structure creates a list of resume handlers. 55 struct __cfa abi_ehm__try_resume_node {56 struct __cfa abi_ehm__try_resume_node * next;55 struct __cfaehm_try_resume_node { 56 struct __cfaehm_try_resume_node * next; 57 57 _Bool (*handler)(exception_t * except); 58 58 }; 59 59 60 60 // These act as constructor and destructor for the resume node. 61 void __cfa abi_ehm__try_resume_setup(62 struct __cfa abi_ehm__try_resume_node * node,61 void __cfaehm_try_resume_setup( 62 struct __cfaehm_try_resume_node * node, 63 63 _Bool (*handler)(exception_t * except)); 64 void __cfa abi_ehm__try_resume_cleanup(65 struct __cfa abi_ehm__try_resume_node * node);64 void __cfaehm_try_resume_cleanup( 65 struct __cfaehm_try_resume_node * node); 66 66 67 67 // Check for a standard way to call fake deconstructors. 68 struct __cfa abi_ehm__cleanup_hook {};68 struct __cfaehm_cleanup_hook {}; 69 69 70 70 #ifdef __cforall -
libcfa/src/heap.cfa
rb7d6a36 r6a490b2 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 4 10:04:51202013 // Update Count : 64812 // Last Modified On : Wed May 6 17:29:26 2020 13 // Update Count : 727 14 14 // 15 15 … … 19 19 #include <errno.h> // errno 20 20 #include <string.h> // memset, memcpy 21 #include <limits.h> // ULONG_MAX 21 22 extern "C" { 22 23 #include <sys/mman.h> // mmap, munmap 23 24 } // extern "C" 24 25 25 // #comment TD : Many of these should be merged into math I believe26 26 #include "bits/align.hfa" // libPow2 27 27 #include "bits/defs.hfa" // likely, unlikely … … 30 30 //#include "stdlib.hfa" // bsearchl 31 31 #include "malloc.h" 32 #include "bitmanip.hfa" // ceiling 32 33 33 34 #define MIN(x, y) (y > x ? x : y) … … 81 82 }; 82 83 84 size_t default_heap_expansion() __attribute__(( weak )) { 85 return __CFA_DEFAULT_HEAP_EXPANSION__; 86 } // default_heap_expansion 87 83 88 size_t default_mmap_start() __attribute__(( weak )) { 84 89 return __CFA_DEFAULT_MMAP_START__; 85 90 } // default_mmap_start 86 87 size_t default_heap_expansion() __attribute__(( weak )) {88 return __CFA_DEFAULT_HEAP_EXPANSION__;89 } // default_heap_expansion90 91 91 92 … … 150 151 union { 151 152 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 153 // 2nd low-order bit => zero filled 152 154 void * home; // allocated block points back to home locations (must overlay alignment) 153 155 size_t blockSize; // size for munmap (must overlay alignment) … … 169 171 struct FakeHeader { 170 172 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 171 uint32_t alignment; // low-order bits of home/blockSize used for tricks 173 // 1st low-order bit => fake header & alignment 174 uint32_t alignment; 172 175 #endif // __ORDER_LITTLE_ENDIAN__ 173 176 … … 179 182 } fake; // FakeHeader 180 183 } kind; // Kind 184 size_t size; // allocation size in bytes 181 185 } header; // Header 182 186 char pad[libAlign() - sizeof( Header )]; … … 262 266 static unsigned long long int free_storage; 263 267 static unsigned int free_calls; 268 static unsigned long long int aalloc_storage; 269 static unsigned int aalloc_calls; 264 270 static unsigned long long int calloc_storage; 265 271 static unsigned int calloc_calls; 266 272 static unsigned long long int memalign_storage; 267 273 static unsigned int memalign_calls; 274 static unsigned long long int amemalign_storage; 275 static unsigned int amemalign_calls; 268 276 static unsigned long long int cmemalign_storage; 269 277 static unsigned int cmemalign_calls; 278 static unsigned long long int resize_storage; 279 static unsigned int resize_calls; 270 280 static unsigned long long int realloc_storage; 271 281 static unsigned int realloc_calls; … … 275 285 // Use "write" because streams may be shutdown when calls are made. 276 286 static void printStats() { 277 char helpText[ 512];287 char helpText[1024]; 278 288 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 279 289 "\nHeap statistics:\n" 280 290 " malloc: calls %u / storage %llu\n" 291 " aalloc: calls %u / storage %llu\n" 281 292 " calloc: calls %u / storage %llu\n" 282 293 " memalign: calls %u / storage %llu\n" 294 " amemalign: calls %u / storage %llu\n" 283 295 " cmemalign: calls %u / storage %llu\n" 296 " resize: calls %u / storage %llu\n" 284 297 " realloc: calls %u / storage %llu\n" 285 298 " free: calls %u / storage %llu\n" … … 288 301 " sbrk: calls %u / storage %llu\n", 289 302 malloc_calls, malloc_storage, 303 aalloc_calls, calloc_storage, 290 304 calloc_calls, calloc_storage, 291 305 memalign_calls, memalign_storage, 306 amemalign_calls, amemalign_storage, 292 307 cmemalign_calls, cmemalign_storage, 308 resize_calls, resize_storage, 293 309 realloc_calls, realloc_storage, 294 310 free_calls, free_storage, … … 300 316 301 317 static int printStatsXML( FILE * stream ) { // see malloc_info 302 char helpText[ 512];318 char helpText[1024]; 303 319 int len = snprintf( helpText, sizeof(helpText), 304 320 "<malloc version=\"1\">\n" … … 307 323 "</sizes>\n" 308 324 "<total type=\"malloc\" count=\"%u\" size=\"%llu\"/>\n" 325 "<total type=\"aalloc\" count=\"%u\" size=\"%llu\"/>\n" 309 326 "<total type=\"calloc\" count=\"%u\" size=\"%llu\"/>\n" 310 327 "<total type=\"memalign\" count=\"%u\" size=\"%llu\"/>\n" 328 "<total type=\"amemalign\" count=\"%u\" size=\"%llu\"/>\n" 311 329 "<total type=\"cmemalign\" count=\"%u\" size=\"%llu\"/>\n" 330 "<total type=\"resize\" count=\"%u\" size=\"%llu\"/>\n" 312 331 "<total type=\"realloc\" count=\"%u\" size=\"%llu\"/>\n" 313 332 "<total type=\"free\" count=\"%u\" size=\"%llu\"/>\n" … … 317 336 "</malloc>", 318 337 malloc_calls, malloc_storage, 338 aalloc_calls, aalloc_storage, 319 339 calloc_calls, calloc_storage, 320 340 memalign_calls, memalign_storage, 341 amemalign_calls, amemalign_storage, 321 342 cmemalign_calls, cmemalign_storage, 343 resize_calls, resize_storage, 322 344 realloc_calls, realloc_storage, 323 345 free_calls, free_storage, … … 339 361 340 362 341 static inline void checkAlign( size_t alignment ) {342 if ( alignment < libAlign() || ! libPow2( alignment ) ) {343 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() );344 } // if345 } // checkAlign346 347 348 static inline bool setHeapExpand( size_t value ) {349 if ( heapExpand < pageSize ) return true;350 heapExpand = value;351 return false;352 } // setHeapExpand353 354 355 363 // thunk problem 356 364 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 369 377 370 378 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 371 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return true;379 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return false; 372 380 mmapStart = value; // set global 373 381 … … 376 384 assert( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 377 385 assert( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? 378 return false;386 return true; 379 387 } // setMmapStart 388 389 390 // <-------+----------------------------------------------------> bsize (bucket size) 391 // |header |addr 392 //================================================================================== 393 // align/offset | 394 // <-----------------<------------+-----------------------------> bsize (bucket size) 395 // |fake-header | addr 396 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) )) 397 #define realHeader( header ) ((HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset)) 398 399 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size) 400 // |header |addr 401 //================================================================================== 402 // align/offset | 403 // <------------------------------<<---------- dsize --------->>> bsize (bucket size) 404 // |fake-header |addr 405 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header )) 406 407 408 static inline void checkAlign( size_t alignment ) { 409 if ( alignment < libAlign() || ! libPow2( alignment ) ) { 410 abort( "Alignment %zu for memory allocation is less than %d and/or not a power of 2.", alignment, libAlign() ); 411 } // if 412 } // checkAlign 380 413 381 414 … … 391 424 static inline void fakeHeader( HeapManager.Storage.Header *& header, size_t & alignment ) { 392 425 if ( unlikely( (header->kind.fake.alignment & 1) == 1 ) ) { // fake header ? 393 size_t offset = header->kind.fake.offset;394 426 alignment = header->kind.fake.alignment & -2; // remove flag from value 395 427 #ifdef __CFA_DEBUG__ 396 428 checkAlign( alignment ); // check alignment 397 429 #endif // __CFA_DEBUG__ 398 header = (HeapManager.Storage.Header *)((char *)header - offset);430 header = realHeader( header ); // backup from fake to real header 399 431 } // if 400 432 } // fakeHeader 401 402 403 // <-------+----------------------------------------------------> bsize (bucket size)404 // |header |addr405 //==================================================================================406 // | alignment407 // <-----------------<------------+-----------------------------> bsize (bucket size)408 // |fake-header | addr409 #define headerAddr( addr ) ((HeapManager.Storage.Header *)( (char *)addr - sizeof(HeapManager.Storage) ))410 411 // <-------<<--------------------- dsize ---------------------->> bsize (bucket size)412 // |header |addr413 //==================================================================================414 // | alignment415 // <------------------------------<<---------- dsize --------->>> bsize (bucket size)416 // |fake-header |addr417 #define dataStorage( bsize, addr, header ) (bsize - ( (char *)addr - (char *)header ))418 433 419 434 … … 428 443 429 444 #ifdef __CFA_DEBUG__ 430 checkHeader( addr < heapBegin || header < (HeapManager.Storage.Header *)heapBegin, name, addr );// bad low address ?445 checkHeader( addr < heapBegin, name, addr ); // bad low address ? 431 446 #endif // __CFA_DEBUG__ 432 447 … … 487 502 // along with the block and is a multiple of the alignment size. 488 503 489 if ( unlikely( size > ~0ul- sizeof(HeapManager.Storage) ) ) return 0p;504 if ( unlikely( size > ULONG_MAX - sizeof(HeapManager.Storage) ) ) return 0p; 490 505 size_t tsize = size + sizeof(HeapManager.Storage); 491 506 if ( likely( tsize < mmapStart ) ) { // small size => sbrk … … 539 554 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 540 555 } else { // large size => mmap 541 if ( unlikely( size > ~0ul- pageSize ) ) return 0p;556 if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p; 542 557 tsize = libCeiling( tsize, pageSize ); // must be multiple of page size 543 558 #ifdef __STATISTICS__ … … 557 572 } // if 558 573 574 block->header.size = size; // store allocation size 559 575 void * addr = &(block->data); // adjust off header to user bytes 560 576 … … 680 696 #endif // FASTLOOKUP 681 697 682 if ( setMmapStart( default_mmap_start() ) ) {698 if ( ! setMmapStart( default_mmap_start() ) ) { 683 699 abort( "HeapManager : internal error, mmap start initialization failure." ); 684 700 } // if … … 686 702 687 703 char * end = (char *)sbrk( 0 ); 688 sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 689 heapBegin = heapEnd = sbrk( 0 ); // get new start point 704 heapBegin = heapEnd = sbrk( (char *)libCeiling( (long unsigned int)end, libAlign() ) - end ); // move start of heap to multiple of alignment 690 705 } // HeapManager 691 706 … … 713 728 //assert( heapManager.heapBegin != 0 ); 714 729 //heapManager{}; 715 if ( heapManager.heapBegin == 0p ) heapManager{}; 730 if ( heapManager.heapBegin == 0p ) heapManager{}; // sanity check 716 731 } // memory_startup 717 732 … … 725 740 //assert( heapManager.heapBegin != 0 ); 726 741 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 742 #if __SIZEOF_POINTER__ == 8 743 verify( size < ((typeof(size_t))1 << 48) ); 744 #endif // __SIZEOF_POINTER__ == 8 727 745 void * addr = doMalloc( size ); 728 746 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX … … 731 749 732 750 733 static inline void * callocNoStats( size_t noOfElems, size_t elemSize ) {734 size_t size = noOfElems* elemSize;751 static inline void * callocNoStats( size_t dim, size_t elemSize ) { 752 size_t size = dim * elemSize; 735 753 char * addr = (char *)mallocNoStats( size ); 736 754 if ( unlikely( addr == 0p ) ) return 0p; … … 790 808 791 809 792 static inline void * cmemalignNoStats( size_t alignment, size_t noOfElems, size_t elemSize ) {793 size_t size = noOfElems* elemSize;810 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) { 811 size_t size = dim * elemSize; 794 812 char * addr = (char *)memalignNoStats( alignment, size ); 795 813 if ( unlikely( addr == 0p ) ) return 0p; … … 803 821 #endif // __CFA_DEBUG__ 804 822 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 805 header->kind.real.blockSize |= 2; // mark as zero filled 806 823 824 header->kind.real.blockSize |= 2; // mark as zero filled 807 825 return addr; 808 826 } // cmemalignNoStats … … 819 837 820 838 extern "C" { 821 // The malloc() function allocates size bytes and returns a pointer to the allocated memory. The memory is not 822 // initialized. If size is 0, then malloc() returns either 0p, or a unique pointer value that can later be 823 // successfully passed to free(). 839 // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, 840 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 824 841 void * malloc( size_t size ) { 825 842 #ifdef __STATISTICS__ … … 831 848 } // malloc 832 849 833 // The calloc() function allocates memory for an array of nmemb elements of size bytes each and returns a pointer to 834 // the allocated memory. The memory is set to zero. If nmemb or size is 0, then calloc() returns either 0p, or a 835 // unique pointer value that can later be successfully passed to free(). 836 void * calloc( size_t noOfElems, size_t elemSize ) { 850 851 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 852 void * aalloc( size_t dim, size_t elemSize ) { 853 #ifdef __STATISTICS__ 854 __atomic_add_fetch( &aalloc_calls, 1, __ATOMIC_SEQ_CST ); 855 __atomic_add_fetch( &aalloc_storage, dim * elemSize, __ATOMIC_SEQ_CST ); 856 #endif // __STATISTICS__ 857 858 return mallocNoStats( dim * elemSize ); 859 } // aalloc 860 861 862 // Same as aalloc() with memory set to zero. 863 void * calloc( size_t dim, size_t elemSize ) { 837 864 #ifdef __STATISTICS__ 838 865 __atomic_add_fetch( &calloc_calls, 1, __ATOMIC_SEQ_CST ); 839 __atomic_add_fetch( &calloc_storage, noOfElems* elemSize, __ATOMIC_SEQ_CST );840 #endif // __STATISTICS__ 841 842 return callocNoStats( noOfElems, elemSize );866 __atomic_add_fetch( &calloc_storage, dim * elemSize, __ATOMIC_SEQ_CST ); 867 #endif // __STATISTICS__ 868 869 return callocNoStats( dim, elemSize ); 843 870 } // calloc 844 871 845 // The realloc() function changes the size of the memory block pointed to by ptr to size bytes. The contents will be 846 // unchanged in the range from the start of the region up to the minimum of the old and new sizes. If the new size 847 // is larger than the old size, the added memory will not be initialized. If ptr is 0p, then the call is 848 // equivalent to malloc(size), for all values of size; if size is equal to zero, and ptr is not 0p, then the call 849 // is equivalent to free(ptr). Unless ptr is 0p, it must have been returned by an earlier call to malloc(), 850 // calloc() or realloc(). If the area pointed to was moved, a free(ptr) is done. 872 // Change the size of the memory block pointed to by oaddr to size bytes. The contents are undefined. If oaddr is 873 // 0p, then the call is equivalent to malloc(size), for all values of size; if size is equal to zero, and oaddr is 874 // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier 875 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 876 void * resize( void * oaddr, size_t size ) { 877 #ifdef __STATISTICS__ 878 __atomic_add_fetch( &resize_calls, 1, __ATOMIC_SEQ_CST ); 879 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 880 #endif // __STATISTICS__ 881 882 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 883 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 884 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 885 886 HeapManager.Storage.Header * header; 887 HeapManager.FreeHeader * freeElem; 888 size_t bsize, oalign = 0; 889 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 890 891 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 892 // same size, DO NOT preserve STICKY PROPERTIES. 893 if ( oalign == 0 && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 894 header->kind.real.blockSize &= -2; // no alignment and turn off 0 fill 895 return oaddr; 896 } // if 897 898 // change size, DO NOT preserve STICKY PROPERTIES. 899 free( oaddr ); 900 void * naddr = mallocNoStats( size ); // create new area 901 return naddr; 902 } // resize 903 904 905 // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of 906 // the old and new sizes. 851 907 void * realloc( void * oaddr, size_t size ) { 852 908 #ifdef __STATISTICS__ 853 909 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 910 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 854 911 #endif // __STATISTICS__ 855 912 … … 867 924 // Do not know size of original allocation => cannot do 0 fill for any additional space because do not know 868 925 // where to start filling, i.e., do not overwrite existing values in space. 869 //870 // This case does not result in a new profiler entry because the previous one still exists and it must match with871 // the free for this memory. Hence, this realloc does not appear in the profiler output.872 926 return oaddr; 873 927 } // if 874 875 #ifdef __STATISTICS__876 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST );877 #endif // __STATISTICS__878 928 879 929 // change size and copy old content to new storage … … 903 953 } // realloc 904 954 905 // The obsolete function memalign() allocates size bytes and returns a pointer to the allocated memory. The memory 906 // address will be a multiple of alignment, which must be a power of two. 955 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 907 956 void * memalign( size_t alignment, size_t size ) { 908 957 #ifdef __STATISTICS__ … … 915 964 916 965 917 // The cmemalign() function is the same as calloc() with memory alignment.918 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ) {966 // Same as aalloc() with memory alignment. 967 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) { 919 968 #ifdef __STATISTICS__ 920 969 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 921 __atomic_add_fetch( &cmemalign_storage, noOfElems * elemSize, __ATOMIC_SEQ_CST ); 922 #endif // __STATISTICS__ 923 924 return cmemalignNoStats( alignment, noOfElems, elemSize ); 970 __atomic_add_fetch( &cmemalign_storage, dim * elemSize, __ATOMIC_SEQ_CST ); 971 #endif // __STATISTICS__ 972 973 return memalignNoStats( alignment, dim * elemSize ); 974 } // amemalign 975 976 977 // Same as calloc() with memory alignment. 978 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) { 979 #ifdef __STATISTICS__ 980 __atomic_add_fetch( &cmemalign_calls, 1, __ATOMIC_SEQ_CST ); 981 __atomic_add_fetch( &cmemalign_storage, dim * elemSize, __ATOMIC_SEQ_CST ); 982 #endif // __STATISTICS__ 983 984 return cmemalignNoStats( alignment, dim, elemSize ); 925 985 } // cmemalign 926 986 927 // The function aligned_alloc() is the same as memalign(), except for the added restriction that size should be a928 // multiple of alignment.987 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 988 // of alignment. This requirement is universally ignored. 929 989 void * aligned_alloc( size_t alignment, size_t size ) { 930 990 return memalign( alignment, size ); … … 932 992 933 993 934 // The function posix_memalign() allocates size bytes and places the address of the allocated memory in *memptr. The935 // address of the allocated memory will be a multiple of alignment, which must be a power of two and a multiple of936 // sizeof(void *). If size is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later937 // be successfully passed tofree(3).994 // Allocates size bytes and places the address of the allocated memory in *memptr. The address of the allocated 995 // memory shall be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *). If size 996 // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to 997 // free(3). 938 998 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 939 999 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment … … 943 1003 } // posix_memalign 944 1004 945 // The obsolete function valloc() allocates size bytes and returns a pointer to the allocated memory. The memory946 // address will be a multiple of thepage size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).1005 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1006 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 947 1007 void * valloc( size_t size ) { 948 1008 return memalign( pageSize, size ); … … 950 1010 951 1011 952 // The free() function frees the memory space pointed to by ptr, which must have been returned by a previous call to 953 // malloc(), calloc() or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behavior 954 // occurs. If ptr is 0p, no operation is performed. 1012 // Same as valloc but rounds size to multiple of page size. 1013 void * pvalloc( size_t size ) { 1014 return memalign( pageSize, libCeiling( size, pageSize ) ); 1015 } // pvalloc 1016 1017 1018 // Frees the memory space pointed to by ptr, which must have been returned by a previous call to malloc(), calloc() 1019 // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is 1020 // 0p, no operation is performed. 955 1021 void free( void * addr ) { 956 1022 #ifdef __STATISTICS__ … … 973 1039 974 1040 975 // The malloc_alignment() function returns the alignment of theallocation.1041 // Returns the alignment of an allocation. 976 1042 size_t malloc_alignment( void * addr ) { 977 1043 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment … … 980 1046 return header->kind.fake.alignment & -2; // remove flag from value 981 1047 } else { 982 return libAlign 1048 return libAlign(); // minimum alignment 983 1049 } // if 984 1050 } // malloc_alignment 985 1051 986 987 // The malloc_zero_fill() function returns true if the allocation is zero filled, i.e., initially allocated by calloc(). 1052 // Set the alignment for an the allocation and return previous alignment or 0 if no alignment. 1053 size_t $malloc_alignment_set( void * addr, size_t alignment ) { 1054 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1055 size_t ret; 1056 HeapManager.Storage.Header * header = headerAddr( addr ); 1057 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1058 ret = header->kind.fake.alignment & -2; // remove flag from old value 1059 header->kind.fake.alignment = alignment | 1; // add flag to new value 1060 } else { 1061 ret = 0; // => no alignment to change 1062 } // if 1063 return ret; 1064 } // $malloc_alignment_set 1065 1066 1067 // Returns true if the allocation is zero filled, e.g., allocated by calloc(). 988 1068 bool malloc_zero_fill( void * addr ) { 989 1069 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 990 1070 HeapManager.Storage.Header * header = headerAddr( addr ); 991 1071 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 992 header = (HeapManager.Storage.Header *)((char *)header - header->kind.fake.offset);993 } // if 994 return (header->kind.real.blockSize & 2) != 0; // zero filled (calloc/cmemalign)?1072 header = realHeader( header ); // backup from fake to real header 1073 } // if 1074 return (header->kind.real.blockSize & 2) != 0; // zero filled ? 995 1075 } // malloc_zero_fill 996 1076 997 998 // The malloc_usable_size() function returns the number of usable bytes in the block pointed to by ptr, a pointer to 999 // a block of memory allocated by malloc(3) or a related function. 1077 // Set allocation is zero filled and return previous zero filled. 1078 bool $malloc_zero_fill_set( void * addr ) { 1079 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1080 HeapManager.Storage.Header * header = headerAddr( addr ); 1081 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1082 header = realHeader( header ); // backup from fake to real header 1083 } // if 1084 bool ret = (header->kind.real.blockSize & 2) != 0; // zero filled ? 1085 header->kind.real.blockSize |= 2; // mark as zero filled 1086 return ret; 1087 } // $malloc_zero_fill_set 1088 1089 1090 // Returns original total allocation size (not bucket size) => array size is dimension * sizeif(T). 1091 size_t malloc_size( void * addr ) { 1092 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1093 HeapManager.Storage.Header * header = headerAddr( addr ); 1094 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1095 header = realHeader( header ); // backup from fake to real header 1096 } // if 1097 return header->size; 1098 } // malloc_size 1099 1100 // Set allocation size and return previous size. 1101 size_t $malloc_size_set( void * addr, size_t size ) { 1102 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1103 HeapManager.Storage.Header * header = headerAddr( addr ); 1104 if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ? 1105 header = realHeader( header ); // backup from fake to real header 1106 } // if 1107 size_t ret = header->size; 1108 header->size = size; 1109 return ret; 1110 } // $malloc_size_set 1111 1112 1113 // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by 1114 // malloc or a related function. 1000 1115 size_t malloc_usable_size( void * addr ) { 1001 1116 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size … … 1009 1124 1010 1125 1011 // The malloc_stats() function prints (on default standard error) statistics about memory allocated by malloc(3) and 1012 // related functions. 1126 // Prints (on default standard error) statistics about memory allocated by malloc and related functions. 1013 1127 void malloc_stats( void ) { 1014 1128 #ifdef __STATISTICS__ … … 1018 1132 } // malloc_stats 1019 1133 1020 // The malloc_stats_fd() function changes the file descripter where malloc_stats() writes thestatistics.1134 // Changes the file descripter where malloc_stats() writes statistics. 1021 1135 int malloc_stats_fd( int fd __attribute__(( unused )) ) { 1022 1136 #ifdef __STATISTICS__ … … 1030 1144 1031 1145 1032 // The mallopt() function adjusts parameters that control the behavior of the memory-allocation functions (see 1033 // malloc(3)). The param argument specifies the parameter to be modified, and value specifies the new value for that 1034 // parameter. 1146 // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument 1147 // specifies the parameter to be modified, and value specifies the new value for that parameter. 1035 1148 int mallopt( int option, int value ) { 1036 1149 choose( option ) { 1037 1150 case M_TOP_PAD: 1038 if ( setHeapExpand( value ) )return 1;1151 heapExpand = ceiling( value, pageSize ); return 1; 1039 1152 case M_MMAP_THRESHOLD: 1040 1153 if ( setMmapStart( value ) ) return 1; 1154 break; 1041 1155 } // switch 1042 1156 return 0; // error, unsupported 1043 1157 } // mallopt 1044 1158 1045 // The malloc_trim() function attempts to release free memory at the top of the heap (by calling sbrk(2) with a 1046 // suitable argument). 1159 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). 1047 1160 int malloc_trim( size_t ) { 1048 1161 return 0; // => impossible to release memory … … 1050 1163 1051 1164 1052 // The malloc_info() function exports an XML string that describes the current state of the memory-allocation1053 // implementation in the caller. The string is printed on the file stream stream. The exported string includes1054 // information about all arenas (see malloc(3)).1165 // Exports an XML string that describes the current state of the memory-allocation implementation in the caller. 1166 // The string is printed on the file stream stream. The exported string includes information about all arenas (see 1167 // malloc). 1055 1168 int malloc_info( int options, FILE * stream ) { 1056 1169 if ( options != 0 ) { errno = EINVAL; return -1; } … … 1059 1172 1060 1173 1061 // The malloc_get_state() function records the current state of all malloc(3) internal bookkeeping variables (but1062 // not the actual contents of the heap or the state of malloc_hook(3) functions pointers). The state is recorded in1063 // a system-dependent opaque data structure dynamically allocated via malloc(3), and a pointer to that data1064 // structure is returned as the function result. (It is the caller's responsibility to free(3)this memory.)1174 // Records the current state of all malloc internal bookkeeping variables (but not the actual contents of the heap 1175 // or the state of malloc_hook functions pointers). The state is recorded in a system-dependent opaque data 1176 // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function 1177 // result. (The caller must free this memory.) 1065 1178 void * malloc_get_state( void ) { 1066 1179 return 0p; // unsupported … … 1068 1181 1069 1182 1070 // The malloc_set_state() function restores the state of all malloc(3) internal bookkeeping variables to the values1071 // recorded in the opaque datastructure pointed to by state.1183 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data 1184 // structure pointed to by state. 1072 1185 int malloc_set_state( void * ptr ) { 1073 1186 return 0; // unsupported … … 1077 1190 1078 1191 // Must have CFA linkage to overload with C linkage realloc. 1079 void * re alloc( void * oaddr, size_t nalign, size_t size ) {1192 void * resize( void * oaddr, size_t nalign, size_t size ) { 1080 1193 #ifdef __STATISTICS__ 1081 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1194 __atomic_add_fetch( &resize_calls, 1, __ATOMIC_SEQ_CST ); 1195 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 1082 1196 #endif // __STATISTICS__ 1083 1197 1084 1198 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1085 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases 1086 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 1199 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases 1200 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1201 1087 1202 1088 1203 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum … … 1095 1210 HeapManager.FreeHeader * freeElem; 1096 1211 size_t bsize, oalign = 0; 1097 headers( "re alloc", oaddr, header, freeElem, bsize, oalign );1212 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 1098 1213 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1099 1214 1100 if ( oalign != 0 && (uintptr_t)oaddr % nalign == 0 ) { // has alignment and just happens to work out 1101 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1102 return realloc( oaddr, size ); 1103 } // if 1104 1105 #ifdef __STATISTICS__ 1106 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1107 #endif // __STATISTICS__ 1108 1109 // change size and copy old content to new storage 1215 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1216 if ( oalign >= libAlign() ) { // fake header ? 1217 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1218 } // if 1219 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 1220 header->kind.real.blockSize &= -2; // turn off 0 fill 1221 return oaddr; 1222 } // if 1223 } // if 1224 1225 // change size 1110 1226 1111 1227 void * naddr; … … 1116 1232 } // if 1117 1233 1234 free( oaddr ); 1235 return naddr; 1236 } // resize 1237 1238 1239 void * realloc( void * oaddr, size_t nalign, size_t size ) { 1240 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum 1241 #ifdef __CFA_DEBUG__ 1242 else 1243 checkAlign( nalign ); // check alignment 1244 #endif // __CFA_DEBUG__ 1245 1246 HeapManager.Storage.Header * header; 1247 HeapManager.FreeHeader * freeElem; 1248 size_t bsize, oalign = 0; 1249 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1250 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1251 1252 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1253 if ( oalign >= libAlign() ) { // fake header ? 1254 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1255 } // if 1256 return realloc( oaddr, size ); 1257 } // if 1258 1259 // change size and copy old content to new storage 1260 1261 #ifdef __STATISTICS__ 1262 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1263 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1264 #endif // __STATISTICS__ 1265 1266 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1267 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases 1268 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1269 1270 void * naddr; 1271 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1272 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1273 } else { 1274 naddr = memalignNoStats( nalign, size ); // create new aligned area 1275 } // if 1276 1118 1277 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1119 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage av ilable in bucket1278 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket 1120 1279 // To preserve prior fill, the entire bucket must be copied versus the size. 1121 1280 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes -
libcfa/src/interpose.cfa
rb7d6a36 r6a490b2 10 10 // Created On : Wed Mar 29 16:10:31 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Feb 17 10:18:53202013 // Update Count : 1 6612 // Last Modified On : Fri Mar 13 17:35:37 2020 13 // Update Count : 178 14 14 // 15 15 16 16 #include <stdarg.h> // va_start, va_end 17 #include <stdio.h> 17 18 #include <string.h> // strlen 18 19 #include <unistd.h> // _exit, getpid … … 143 144 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 144 145 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 146 void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )); 145 147 146 148 extern "C" { … … 152 154 va_list argp; 153 155 va_start( argp, fmt ); 154 abort( false, fmt, argp );156 __abort( false, fmt, argp ); 155 157 va_end( argp ); 156 158 } … … 218 220 } 219 221 220 void abort( bool signalAbort, const char fmt[], ... ) { 222 // Cannot forward va_list. 223 void __abort( bool signalAbort, const char fmt[], va_list args ) { 221 224 void * kernel_data = kernel_abort(); // must be done here to lock down kernel 222 225 int len; … … 228 231 229 232 assert( fmt ); 230 va_list args;231 va_start( args, fmt );232 233 233 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 234 va_end( args );235 234 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 236 235 237 236 if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text 238 __cfaabi_ dbg_write("\n", 1 );237 __cfaabi_bits_write( STDERR_FILENO, "\n", 1 ); 239 238 } // if 240 239 kernel_abort_msg( kernel_data, abort_text, abort_text_size ); … … 248 247 va_list args; 249 248 va_start( args, fmt ); 250 abort( false, fmt, args ); 249 __abort( false, fmt, args ); 250 // CONTROL NEVER REACHES HERE! 251 251 va_end( args ); 252 } 253 254 void abort( bool signalAbort, const char fmt[], ... ) { 255 va_list args; 256 va_start( args, fmt ); 257 __abort( signalAbort, fmt, args ); 258 // CONTROL NEVER REACHES HERE! 259 va_end( args ); 252 260 } 253 261 -
libcfa/src/iostream.cfa
rb7d6a36 r6a490b2 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 20 15:53:23202013 // Update Count : 82912 // Last Modified On : Sat May 2 18:30:25 2020 13 // Update Count : 1017 14 14 // 15 15 … … 29 29 #include <complex.h> // creal, cimag 30 30 } // extern "C" 31 32 #include <bitmanip.hfa> // fms 31 33 32 34 … … 459 461 \ 460 462 if ( f.base == 'b' || f.base == 'B' ) { /* bespoke binary format */ \ 461 int bits; \ 462 if ( f.val == (T){0} ) bits = 1; /* force at least one bit to print */ \ 463 else bits = sizeof(long long int) * 8 - __builtin_clzll( f.val ); /* position of most significant bit */ \ 464 bits = bits > sizeof(f.val) * 8 ? sizeof(f.val) * 8 : bits; \ 465 int spaces = f.wd - bits; /* can be negative */ \ 466 if ( ! f.flags.nobsdp ) { spaces -= 2; } /* base prefix takes space */ \ 467 /* printf( "%d %d\n", bits, spaces ); */ \ 463 int bits = high1( f.val ); /* position of most significant bit */ \ 464 if ( bits == 0 ) bits = 1; /* 0 value => force one bit to print */ \ 465 int spaces; \ 468 466 if ( ! f.flags.left ) { /* right justified ? */ \ 469 467 /* Note, base prefix then zero padding or spacing then prefix. */ \ 470 if ( f.flags.pad0 || f.flags.pc ) { \ 468 if ( f.flags.pc ) { \ 469 spaces = f.wd - f.pc; \ 470 if ( ! f.flags.nobsdp ) { spaces -= 2; } /* base prefix takes space */ \ 471 if ( spaces > 0 ) fmt( os, "%*s", spaces, " " ); /* space pad */ \ 471 472 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 472 if ( f.flags.pc )spaces = f.pc - bits; \473 spaces = f.pc - bits; \ 473 474 if ( spaces > 0 ) fmt( os, "%0*d", spaces, 0 ); /* zero pad */ \ 474 475 } else { \ 475 if ( spaces > 0 ) fmt( os, "%*s", spaces, " " ); /* space pad */ \ 476 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 476 spaces = f.wd - bits; \ 477 if ( ! f.flags.nobsdp ) { spaces -= 2; } /* base prefix takes space */ \ 478 if ( f.flags.pad0 ) { \ 479 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 480 if ( spaces > 0 ) fmt( os, "%0*d", spaces, 0 ); /* zero pad */ \ 481 } else { \ 482 if ( spaces > 0 ) fmt( os, "%*s", spaces, " " ); /* space pad */ \ 483 if ( ! f.flags.nobsdp ) { fmt( os, "0%c", f.base ); } \ 484 } /* if */ \ 477 485 } /* if */ \ 478 } else if ( ! f.flags.nobsdp ) { \ 479 fmt( os, "0%c", f.base ); \ 486 } else { \ 487 if ( ! f.flags.nobsdp ) fmt( os, "0%c", f.base ); \ 488 if ( f.flags.pc ) { \ 489 spaces = f.pc - bits; \ 490 if ( spaces > 0 ) fmt( os, "%0*d", spaces, 0 ); /* zero pad */ \ 491 spaces = f.wd - f.pc; \ 492 } else { /* pad0 flag ignored with left flag */ \ 493 spaces = f.wd - bits; \ 494 } /* if */ \ 495 if ( ! f.flags.nobsdp ) { spaces -= 2; } /* base prefix takes space */ \ 480 496 } /* if */ \ 481 int shift = (bits - 1) / 4 * 4; /* floor( bits - 1, 4 ) */\497 int shift = floor( bits - 1, 4 ); \ 482 498 typeof( f.val ) temp = f.val; \ 483 499 fmt( os, "%s", shortbin[(temp >> shift) & 0xf] ); \ … … 534 550 #define IntegralFMTImpl128( T, SIGNED, CODE, IFMTNP, IFMTP ) \ 535 551 forall( dtype ostype | ostream( ostype ) ) \ 536 static void base10_128( ostype & os, _Ostream_Manip(T) fmt ) { \ 537 if ( fmt.val > UINT64_MAX ) { \ 538 fmt.val /= P10_UINT64; \ 539 base10_128( os, fmt ); /* recursive */ \ 540 _Ostream_Manip(unsigned long long int) fmt2 @= { (uint64_t)(fmt.val % P10_UINT64), 0, 19, 'u', { .all : 0 } }; \ 541 fmt2.flags.nobsdp = true; \ 542 printf( "fmt2 %c %lld %d\n", fmt2.base, fmt2.val, fmt2.all ); \ 552 static void base10_128( ostype & os, _Ostream_Manip(T) f ) { \ 553 if ( f.val > UINT64_MAX ) { \ 554 unsigned long long int lsig = f.val % P10_UINT64; \ 555 f.val /= P10_UINT64; /* msig */ \ 556 base10_128( os, f ); /* recursion */ \ 557 _Ostream_Manip(unsigned long long int) fmt @= { lsig, 0, 19, 'u', { .all : 0 } }; \ 558 fmt.flags.nobsdp = true; \ 559 /* printf( "fmt1 %c %lld %d\n", fmt.base, fmt.val, fmt.all ); */ \ 543 560 sepOff( os ); \ 544 (ostype &)(os | fmt 2); \561 (ostype &)(os | fmt); \ 545 562 } else { \ 546 printf( "fmt %c %lld %d\n", fmt.base, fmt.val, fmt.all ); \ 563 /* printf( "fmt2 %c %lld %d\n", f.base, (unsigned long long int)f.val, f.all ); */ \ 564 _Ostream_Manip(SIGNED long long int) fmt @= { (SIGNED long long int)f.val, f.wd, f.pc, f.base, { .all : f.all } }; \ 547 565 (ostype &)(os | fmt); \ 548 566 } /* if */ \ 549 } /* base10_128 */ 567 } /* base10_128 */ \ 550 568 forall( dtype ostype | ostream( ostype ) ) { \ 551 569 ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \ 552 570 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \ 553 571 \ 554 if ( f.base == 'b' | f.base == ' o' | f.base == 'x' | f.base == 'X' ) { \572 if ( f.base == 'b' | f.base == 'B' | f.base == 'o' | f.base == 'x' | f.base == 'X' ) { \ 555 573 unsigned long long int msig = (unsigned long long int)(f.val >> 64); \ 556 574 unsigned long long int lsig = (unsigned long long int)(f.val); \ … … 562 580 } else { \ 563 581 fmt2.flags.pad0 = fmt2.flags.nobsdp = true; \ 564 if ( f.base == 'b' ) { \ 565 if ( f.wd > 64 ) fmt.wd = f.wd - 64; \ 566 fmt2.wd = 64; \ 582 if ( f.base == 'b' | f.base == 'B' ) { \ 583 if ( fmt.flags.pc && fmt.pc > 64 ) fmt.pc -= 64; else { fmt.flags.pc = false; fmt.pc = 0; } \ 584 if ( fmt.flags.left ) { \ 585 fmt.flags.left = false; \ 586 fmt.wd = 0; \ 587 /* printf( "L %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 588 fmt2.flags.left = true; \ 589 int msigd = high1( msig ); \ 590 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 591 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 2; /* compensate for 0b base specifier */ \ 592 if ( (int)fmt2.wd < 64 ) fmt2.wd = 64; /* cast deals with negative value */ \ 593 fmt2.flags.pc = true; fmt2.pc = 64; \ 594 } else { \ 595 if ( fmt.wd > 64 ) fmt.wd -= 64; \ 596 else fmt.wd = 1; \ 597 /* printf( "R %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 598 fmt2.wd = 64; \ 599 } /* if */ \ 600 /* printf( "C %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 567 601 (ostype &)(os | fmt | "" | fmt2); \ 568 602 } else if ( f.base == 'o' ) { \ 603 if ( fmt.flags.pc && fmt.pc > 22 ) fmt.pc -= 22; else { fmt.flags.pc = false; fmt.pc = 0; } \ 569 604 fmt.val = (unsigned long long int)fmt.val >> 2; \ 570 if ( f.wd > 21 ) fmt.wd = f.wd - 21; \ 571 fmt2.wd = 1; \ 572 fmt2.val = ((msig & 0x3) << 1) + 1; \ 573 (ostype &)(os | fmt | "" | fmt2); \ 574 sepOff( os ); \ 575 fmt2.wd = 21; \ 576 fmt2.val = lsig & 0x7fffffffffffffff; \ 605 fmt2.val = ((msig & 0x3) << 1) + ((lsig & 0x8000000000000000U) != 0); \ 606 if ( fmt.flags.left ) { \ 607 fmt.flags.left = false; \ 608 fmt.wd = 0; \ 609 /* printf( "L %llo %llo %llo %d %d '%c' %x %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all, fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 610 (ostype &)(os | fmt | "" | fmt2); \ 611 sepOff( os ); \ 612 fmt2.flags.left = true; \ 613 int msigd = ceiling( high1( fmt.val ), 3 ); \ 614 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 615 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 1; /* compensate for 0 base specifier */ \ 616 if ( (int)fmt2.wd < 21 ) fmt2.wd = 21; /* cast deals with negative value */ \ 617 fmt2.flags.pc = true; fmt2.pc = 21; \ 618 } else { \ 619 if ( fmt.wd > 22 ) fmt.wd -= 22; \ 620 else fmt.wd = 1; \ 621 /* printf( "R %llo %llo %llo %d %d '%c' %x %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all, fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 622 (ostype &)(os | fmt | "" | fmt2); \ 623 sepOff( os ); \ 624 fmt2.wd = 21; \ 625 } /* if */ \ 626 fmt2.val = lsig & 0x7fffffffffffffffU; \ 627 /* printf( "\nC %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 577 628 (ostype &)(os | fmt2); \ 578 } else { \ 579 if ( f.flags.left ) { \ 580 if ( f.wd > 16 ) fmt2.wd = f.wd - 16; \ 581 fmt.wd = 16; \ 629 } else { /* f.base == 'x' | f.base == 'X' */ \ 630 if ( fmt.flags.pc && fmt.pc > 16 ) fmt.pc -= 16; else { fmt.flags.pc = false; fmt.pc = 0; } \ 631 if ( fmt.flags.left ) { \ 632 fmt.flags.left = false; \ 633 fmt.wd = 0; \ 634 /* printf( "L %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 635 fmt2.flags.left = true; \ 636 int msigd = high1( msig ); \ 637 fmt2.wd = f.wd - (fmt.pc > msigd ? fmt.pc : msigd); \ 638 if ( ! fmt.flags.nobsdp ) fmt2.wd -= 2; /* compensate for 0x base specifier */ \ 639 if ( (int)fmt2.wd < 16 ) fmt2.wd = 16; /* cast deals with negative value */ \ 640 fmt2.flags.pc = true; fmt2.pc = 16; \ 582 641 } else { \ 583 if ( f.wd > 16 ) fmt.wd = f.wd - 16; \ 584 fmt2.wd = 16; \ 642 if ( fmt.wd > 16 ) fmt.wd -= 16; \ 643 else fmt.wd = 1; \ 644 /* printf( "R %llo %llo %llo %d %d '%c' %x\n", msig, lsig, fmt.val, fmt.wd, fmt.pc, fmt.base, fmt.all ); */ \ 645 fmt2.wd = 16; \ 585 646 } /* if */ \ 647 /* printf( "C %llo %d %d '%c' %x\n", fmt2.val, fmt2.wd, fmt2.pc, fmt2.base, fmt2.all ); */ \ 586 648 (ostype &)(os | fmt | "" | fmt2); \ 587 649 } /* if */ \ 588 650 } /* if */ \ 589 651 } else { \ 652 if ( CODE == 'd' ) { \ 653 if ( f.val < 0 ) { fmt( os, "-" ); sepOff( os ); f.val = -f.val; f.flags.sign = false; } \ 654 } /* if */ \ 590 655 base10_128( os, f ); \ 591 656 } /* if */ \ -
libcfa/src/startup.cfa
rb7d6a36 r6a490b2 14 14 // 15 15 16 #include <time.h> // tzset 16 #include <time.h> // tzset 17 #include <locale.h> // setlocale 17 18 #include "startup.hfa" 18 19 … … 21 22 void __cfaabi_appready_startup( void ) { 22 23 tzset(); // initialize time global variables 24 setlocale(LC_NUMERIC, ""); 23 25 #ifdef __CFA_DEBUG__ 24 26 extern void heapAppStart(); … … 41 43 struct __spinlock_t; 42 44 extern "C" { 43 void __cfaabi_dbg_record (struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}45 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {} 44 46 } 45 47 -
libcfa/src/stdhdr/malloc.h
rb7d6a36 r6a490b2 10 10 // Created On : Thu Jul 20 15:58:16 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 11 09:06:31 201813 // Update Count : 1 012 // Last Modified On : Thu Apr 16 22:44:06 2020 13 // Update Count : 13 14 14 // 15 15 … … 31 31 32 32 extern "C" { 33 void * aalloc( size_t noOfElems, size_t elemSize ); 34 void * amemalign( size_t alignment, size_t noOfElems, size_t elemSize ); 35 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ); 33 36 size_t malloc_alignment( void * ); 34 37 bool malloc_zero_fill( void * ); 38 size_t malloc_size( void * ); 35 39 int malloc_stats_fd( int fd ); 36 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize );37 40 } // extern "C" 38 41 -
libcfa/src/stdlib.cfa
rb7d6a36 r6a490b2 10 10 // Created On : Thu Jan 28 17:10:29 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Feb 4 08:27:08202013 // Update Count : 4 8612 // Last Modified On : Thu Apr 16 22:43:33 2020 13 // Update Count : 498 14 14 // 15 15 … … 20 20 #define _XOPEN_SOURCE 600 // posix_memalign, *rand48 21 21 #include <string.h> // memcpy, memset 22 #include <malloc.h> // malloc_usable_size23 22 //#include <math.h> // fabsf, fabs, fabsl 24 23 #include <complex.h> // _Complex_I … … 38 37 } // alloc_set 39 38 39 T * alloc_set( T ptr[], size_t dim, T fill ) { // realloc array with fill 40 size_t olen = malloc_usable_size( ptr ); // current allocation 41 void * nptr = (void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 42 size_t nlen = malloc_usable_size( nptr ); // new allocation 43 if ( nlen > olen ) { // larger ? 44 for ( i; malloc_size( ptr ) / sizeof(T) ~ dim ) { 45 memcpy( &ptr[i], &fill, sizeof(T) ); // initialize with fill value 46 } // for 47 } // if 48 return (T *)nptr; 49 } // alloc_align_set 50 40 51 T * alloc_align_set( T ptr[], size_t align, char fill ) { // aligned realloc with fill 41 52 size_t olen = malloc_usable_size( ptr ); // current allocation … … 45 56 if ( nlen > olen ) { // larger ? 46 57 memset( (char *)nptr + olen, (int)fill, nlen - olen ); // initialize added storage 58 } // if 59 return (T *)nptr; 60 } // alloc_align_set 61 62 T * alloc_align_set( T ptr[], size_t align, size_t dim, T fill ) { // aligned realloc with fill 63 size_t olen = malloc_usable_size( ptr ); // current allocation 64 void * nptr = (void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 65 // char * nptr = alloc_align( ptr, align ); 66 size_t nlen = malloc_usable_size( nptr ); // new allocation 67 if ( nlen > olen ) { // larger ? 68 for ( i; dim ) { memcpy( &ptr[i], &fill, sizeof(T) ); } // initialize with fill value 47 69 } // if 48 70 return (T *)nptr; -
libcfa/src/stdlib.hfa
rb7d6a36 r6a490b2 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Feb 4 08:27:01202013 // Update Count : 4 0112 // Last Modified On : Thu Apr 16 22:44:05 2020 13 // Update Count : 432 14 14 // 15 15 … … 21 21 #include <stdlib.h> // *alloc, strto*, ato* 22 22 23 // Reduce includes by explicitly defining these routines. 23 24 extern "C" { 24 25 void * memalign( size_t align, size_t size ); // malloc.h 26 size_t malloc_usable_size( void * ptr ); // malloc.h 27 size_t malloc_size( void * addr ); // CFA heap 28 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize ); // CFA heap 25 29 void * memset( void * dest, int fill, size_t size ); // string.h 26 30 void * memcpy( void * dest, const void * src, size_t size ); // string.h 27 void * cmemalign( size_t alignment, size_t noOfElems, size_t elemSize );// CFA heap31 void * resize( void * oaddr, size_t size ); // CFA heap 28 32 } // extern "C" 29 33 34 void * resize( void * oaddr, size_t nalign, size_t size ); // CFA heap 30 35 void * realloc( void * oaddr, size_t nalign, size_t size ); // CFA heap 31 36 … … 40 45 41 46 static inline forall( dtype T | sized(T) ) { 42 // C dynamic allocation47 // Cforall safe equivalents, i.e., implicit size specification 43 48 44 49 T * malloc( void ) { … … 71 76 return posix_memalign( (void **)ptr, align, sizeof(T) ); // C posix_memalign 72 77 } // posix_memalign 73 74 // Cforall dynamic allocation 78 } // distribution 79 80 static inline forall( dtype T | sized(T) ) { 81 // Cforall safe general allocation, fill, resize, array 75 82 76 83 T * alloc( void ) { … … 83 90 } // alloc 84 91 85 T * alloc( T ptr[], size_t dim ) { // realloc 86 return (T *)(void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 92 forall( dtype S | sized(S) ) 93 T * alloc( S ptr[], size_t dim = 1 ) { // singleton/array resize 94 size_t len = malloc_usable_size( ptr ); // current bucket size 95 if ( sizeof(T) * dim > len ) { // not enough space ? 96 T * temp = alloc( dim ); // new storage 97 free( ptr ); // free old storage 98 return temp; 99 } else { 100 return (T *)ptr; 101 } // if 102 } // alloc 103 104 T * alloc( T ptr[], size_t dim, bool copy = true ) { 105 if ( copy ) { // realloc 106 return (T *)(void *)realloc( (void *)ptr, dim * sizeof(T) ); // C realloc 107 } else { 108 struct __Unknown {}; 109 return alloc( (__Unknown *)ptr, dim ); // reuse, cheat making T/S different types 110 } // if 87 111 } // alloc 88 112 … … 112 136 forall( dtype T | sized(T) ) { 113 137 T * alloc_set( T ptr[], size_t dim, char fill ); // realloc array with fill 138 T * alloc_set( T ptr[], size_t dim, T fill ); // realloc array with fill 114 139 } // distribution 115 140 … … 125 150 T * alloc_align( T ptr[], size_t align ) { // aligned realloc array 126 151 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc 152 } // alloc_align 153 154 forall( dtype S | sized(S) ) 155 T * alloc_align( S ptr[], size_t align ) { // aligned reuse array 156 return (T *)(void *)resize( (void *)ptr, align, sizeof(T) ); // CFA realloc 127 157 } // alloc_align 128 158 … … 155 185 156 186 forall( dtype T | sized(T) ) { 187 T * alloc_align_set( T ptr[], size_t align, char fill ); // aligned realloc with fill 188 T * alloc_align_set( T ptr[], size_t align, T fill ); // aligned realloc with fill 157 189 T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ); // aligned realloc array with fill 158 } // distribution 159 160 static inline forall( dtype T | sized(T) ) { 161 // data, non-array types 190 T * alloc_align_set( T ptr[], size_t align, size_t dim, T fill ); // aligned realloc array with fill 191 } // distribution 192 193 static inline forall( dtype T | sized(T) ) { 194 // Cforall safe initialization/copy, i.e., implicit size specification, non-array types 162 195 T * memset( T * dest, char fill ) { 163 196 return (T *)memset( dest, fill, sizeof(T) ); … … 170 203 171 204 static inline forall( dtype T | sized(T) ) { 172 // data, array types205 // Cforall safe initialization/copy, i.e., implicit size specification, array types 173 206 T * amemset( T dest[], char fill, size_t dim ) { 174 207 return (T *)(void *)memset( dest, fill, dim * sizeof(T) ); // C memset … … 180 213 } // distribution 181 214 182 // allocation/deallocation and constructor/destructor, non-array types215 // Cforall allocation/deallocation and constructor/destructor, non-array types 183 216 forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) T * new( Params p ); 184 217 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void delete( T * ptr ); 185 218 forall( dtype T, ttype Params | sized(T) | { void ^?{}( T & ); void delete( Params ); } ) void delete( T * ptr, Params rest ); 186 219 187 // allocation/deallocation and constructor/destructor, array types220 // Cforall allocation/deallocation and constructor/destructor, array types 188 221 forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) T * anew( size_t dim, Params p ); 189 222 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( size_t dim, T arr[] );
Note:
See TracChangeset
for help on using the changeset viewer.