Changeset 5a40e4e

Ignore:
Timestamp:
Sep 9, 2021, 3:56:32 PM (8 months ago)
Branches:
enum, forall-pointer-decay, master
Children:
d0b9247
Parents:
dd1cc02 (diff), d8d512e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
95 edited
13 moved

Unmodified
Removed

• doc/theses/andrew_beach_MMath/code/test.sh

 rdd1cc02 # test.sh LANGUAGE TEST #   Run the TEST in LANGUAGE. # test.sh -a #   Build all tests. # test.sh -b SOURCE_FILE... #   Build a test from SOURCE_FILE(s). # test.sh -c #   Clean all executables. # test.sh -v LANGUAGE TEST FILE #   View the result from TEST in LANGUAGE stored in FILE. readonly ITERATIONS=1000000 # 1 000 000, one million readonly DIR=$(dirname "$(readlink -f "$0")") cd$DIR readonly MIL=000000 # Various preset values used as arguments. readonly ITERS_1M=1$MIL readonly ITERS_10M=10$MIL readonly ITERS_100M=100$MIL readonly ITERS_1000M=1000$MIL readonly STACK_HEIGHT=100 case "$1" in *.cfa) # Requires a symbolic link. mmake "${1%.cfa}" "$1" ./cfa -DNDEBUG -nodebug -O3 "$1" -o "${1%.cfa}" # A symbolic link/local copy can be used as an override. cmd=./cfa if [ ! -x$cmd ]; then cmd=cfa fi mmake "${1%.cfa}" "$1" $cmd -DNDEBUG -nodebug -O3 "$1" -o "${1%.cfa}" ;; *.cpp) mmake "${1%.cpp}-cpp" "$1" g++ -DNDEBUG -O3 "$1" -o "${1%.cpp}-cpp" mmake "${1%.cpp}-cpp" "$1" g++-10 -DNDEBUG -O3 "$1" -o "${1%.cpp}-cpp" ;; *.java) ) if [ "-b" = "$1" ]; then if [ "-a" = "$1" ]; then for file in *.cfa *.cpp *.java; do build$file done exit 0 elif [ "-b" = "$1" ]; then for file in "${@:2}"; do build $file done exit 0 elif [ "-c" = "$1" ]; then rm $(basename -s ".cfa" -a *.cfa) rm$(basename -s ".cpp" -a *.cpp) rm *-cpp rm *.class exit 0 elif [ "-v" = "$1" -a 4 = "$#" ]; then TEST_LANG="$2" TEST_CASE="$3" VIEW_FILE="$4" TEST_LANG="$2" TEST_CASE="$3" VIEW_FILE="$4" elif [ 2 -eq "$#" ]; then TEST_LANG="$1" case "$TEST_CASE" in cond-match-all) CFAT="./cond-catch$ITERATIONS 1" CFAR="./cond-fixup $ITERATIONS 1" CPP="./cond-catch-cpp$ITERATIONS 1" JAVA="java CondCatch $ITERATIONS 1" PYTHON="./cond_catch.py$ITERATIONS 1" ;; cond-match-none) CFAT="./cond-catch $ITERATIONS 0" CFAR="./cond-fixup$ITERATIONS 0" CPP="./cond-catch-cpp $ITERATIONS 0" JAVA="java CondCatch$ITERATIONS 0" PYTHON="./cond_catch.py $ITERATIONS 0" ;; cross-catch) CFAT="./cross-catch$ITERATIONS" CFAR="./cross-resume $ITERATIONS" CPP="./cross-catch-cpp$ITERATIONS" JAVA="java CrossCatch $ITERATIONS" PYTHON="./cross_catch.py$ITERATIONS" ;; cross-finally) CFAT="./cross-finally $ITERATIONS" CFAR=unsupported CPP=unsupported JAVA="java CrossFinally$ITERATIONS" PYTHON="./cross_finally.py $ITERATIONS" raise-empty) CFAT="./throw-empty$ITERS_1M $STACK_HEIGHT" CFAR="./resume-empty$ITERS_10M $STACK_HEIGHT" CPP="./throw-empty-cpp$ITERS_1M $STACK_HEIGHT" JAVA="java ThrowEmpty$ITERS_1M $STACK_HEIGHT" PYTHON="./throw-empty.py$ITERS_1M $STACK_HEIGHT" ;; raise-detor) CFAT="./throw-detor$ITERATIONS $STACK_HEIGHT" CFAR="./resume-detor$ITERATIONS $STACK_HEIGHT" CPP="./throw-detor-cpp$ITERATIONS $STACK_HEIGHT" CFAT="./throw-detor$ITERS_1M $STACK_HEIGHT" CFAR="./resume-detor$ITERS_10M $STACK_HEIGHT" CPP="./throw-detor-cpp$ITERS_1M $STACK_HEIGHT" JAVA=unsupported PYTHON=unsupported ;; raise-empty) CFAT="./throw-empty$ITERATIONS $STACK_HEIGHT" CFAR="./resume-empty$ITERATIONS $STACK_HEIGHT" CPP="./throw-empty-cpp$ITERATIONS $STACK_HEIGHT" JAVA="java ThrowEmpty$ITERATIONS $STACK_HEIGHT" PYTHON="./throw_empty.py$ITERATIONS $STACK_HEIGHT" ;; raise-finally) CFAT="./throw-finally$ITERATIONS $STACK_HEIGHT" CFAR="./resume-finally$ITERATIONS $STACK_HEIGHT" CFAT="./throw-finally$ITERS_1M $STACK_HEIGHT" CFAR="./resume-finally$ITERS_10M $STACK_HEIGHT" CPP=unsupported JAVA="java ThrowFinally$ITERATIONS $STACK_HEIGHT" PYTHON="./throw_finally.py$ITERATIONS $STACK_HEIGHT" JAVA="java ThrowFinally$ITERS_1M $STACK_HEIGHT" PYTHON="./throw-finally.py$ITERS_1M $STACK_HEIGHT" ;; raise-other) CFAT="./throw-other$ITERATIONS $STACK_HEIGHT" CFAR="./resume-other$ITERATIONS $STACK_HEIGHT" CPP="./throw-other-cpp$ITERATIONS $STACK_HEIGHT" JAVA="java ThrowOther$ITERATIONS $STACK_HEIGHT" PYTHON="./throw_other.py$ITERATIONS $STACK_HEIGHT" CFAT="./throw-other$ITERS_1M $STACK_HEIGHT" CFAR="./resume-other$ITERS_10M $STACK_HEIGHT" CPP="./throw-other-cpp$ITERS_1M $STACK_HEIGHT" JAVA="java ThrowOther$ITERS_1M $STACK_HEIGHT" PYTHON="./throw-other.py$ITERS_1M $STACK_HEIGHT" ;; try-catch) CFAT="./try-catch$ITERS_1000M" CFAR="./try-resume $ITERS_1000M" CPP="./try-catch-cpp$ITERS_1000M" JAVA="java TryCatch $ITERS_1000M" PYTHON="./try-catch.py$ITERS_1000M" ;; try-finally) CFAT="./try-finally $ITERS_1000M" CFAR=unsupported CPP=unsupported JAVA="java TryFinally$ITERS_1000M" PYTHON="./try-finally.py $ITERS_1000M" ;; cond-match-all) CFAT="./cond-catch$ITERS_10M 1" CFAR="./cond-fixup $ITERS_100M 1" CPP="./cond-catch-cpp$ITERS_10M 1" JAVA="java CondCatch $ITERS_10M 1" PYTHON="./cond-catch.py$ITERS_10M 1" ;; cond-match-none) CFAT="./cond-catch $ITERS_10M 0" CFAR="./cond-fixup$ITERS_100M 0" CPP="./cond-catch-cpp $ITERS_10M 0" JAVA="java CondCatch$ITERS_10M 0" PYTHON="./cond-catch.py $ITERS_10M 0" ;; fixup-empty) CFAT="./fixup-empty-f$ITERS_10M $STACK_HEIGHT" CFAR="./fixup-empty-r$ITERS_10M $STACK_HEIGHT" CPP="./fixup-empty-cpp$ITERS_10M $STACK_HEIGHT" JAVA="java FixupEmpty$ITERS_10M $STACK_HEIGHT" PYTHON="./fixup-empty.py$ITERS_10M $STACK_HEIGHT" ;; fixup-other) CFAT="./fixup-other-f$ITERS_10M $STACK_HEIGHT" CFAR="./fixup-other-r$ITERS_10M $STACK_HEIGHT" CPP="./fixup-other-cpp$ITERS_10M $STACK_HEIGHT" JAVA="java FixupOther$ITERS_10M $STACK_HEIGHT" PYTHON="./fixup-other.py$ITERS_10M $STACK_HEIGHT" ;; *) if [ -n "$VIEW_FILE" ]; then grep -A 1 -B 0 "$CALL" "$VIEW_FILE" | sed -n -e 's!Run-Time (ns): !!;T;p' exit grep -A 1 -B 0 "$CALL" "$VIEW_FILE" | sed -n -e 's!Run-Time (ns): !!;T;p' exit fi
• doc/theses/andrew_beach_MMath/code/throw-detor.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(empty_exception)(); EHM_VIRTUAL_TABLE(empty_exception, empty_vt); exception empty_exception; vtable(empty_exception) empty_vt; struct WithDestructor {}; unsigned int total_frames = 1; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } if (2 < argc) { total_frames = strtol(argv[2], 0p, 10); total_frames = strto(argv[2], 0p, 10); } } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/throw-detor.cpp

 rdd1cc02 #include #include #include using namespace std; using namespace std::chrono; time_point end_time = steady_clock::now(); nanoseconds duration = duration_cast(end_time - start_time); std::cout << "Run-Time (ns): " << duration.count() << std::endl; cout << "Run-Time (s): " << fixed << setprecision(1) << duration.count() / 1'000'000'000. << endl; }
• doc/theses/andrew_beach_MMath/code/throw-empty.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(empty_exception)(); EHM_VIRTUAL_TABLE(empty_exception, empty_vt); exception empty_exception; vtable(empty_exception) empty_vt; void unwind_empty(unsigned int frames) { unsigned int total_frames = 1; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } if (2 < argc) { total_frames = strtol(argv[2], 0p, 10); total_frames = strto(argv[2], 0p, 10); } } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/throw-empty.cpp

 rdd1cc02 // Throw Across Empty Function #include #include #include #include #include #include using namespace std; using namespace std::chrono; if (frames) { unwind_empty(frames - 1); if (-1 == frames) printf("~"); } else { throw (EmptyException){}; time_point end_time = steady_clock::now(); nanoseconds duration = duration_cast(end_time - start_time); std::cout << "Run-Time (ns): " << duration.count() << std::endl; cout << "Run-Time (s): " << fixed << setprecision(1) << duration.count() / 1'000'000'000. << endl; }
• doc/theses/andrew_beach_MMath/code/throw-empty.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/throw-finally.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(empty_exception)(); exception empty_exception; vtable(empty_exception) empty_vt; EHM_VIRTUAL_TABLE(empty_exception, empty_vt); unsigned int frames;                                                                    // use global because of gcc thunk problem void unwind_finally(unsigned int frames) { void unwind_finally(unsigned int dummy) { if (frames) { frames -= 1; try { unwind_finally(frames - 1); unwind_finally(42); } finally { asm volatile ("# finally block"); } } else { dummy = 42; throw (empty_exception){&empty_vt}; } unsigned int total_frames = 1; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } if (2 < argc) { total_frames = strtol(argv[2], 0p, 10); total_frames = strto(argv[2], 0p, 10); } frames = total_frames; Time start_time = timeHiRes(); for (int count = 0 ; count < times ; ++count) { try { unwind_finally(total_frames); unwind_finally(42); } catch (empty_exception *) { asm volatile ("# catch block"); } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/throw-finally.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/throw-other.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(empty_exception)(); exception empty_exception; vtable(empty_exception) empty_vt; exception not_raised_exception; EHM_VIRTUAL_TABLE(empty_exception, empty_vt); unsigned int frames;                                                                    // use global because of gcc thunk problem EHM_EXCEPTION(not_raised_exception)(); void unwind_other(unsigned int frames) { void unwind_other(unsigned int dummy) { if (frames) { frames -= 1; try { unwind_other(frames - 1); unwind_other(42); } catch (not_raised_exception *) { asm volatile ("# catch block (stack)"); } } else { dummy = 42; throw (empty_exception){&empty_vt}; } unsigned int total_frames = 1; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } if (2 < argc) { total_frames = strtol(argv[2], 0p, 10); total_frames = strto(argv[2], 0p, 10); } frames = total_frames; Time start_time = timeHiRes(); for (int count = 0 ; count < times ; ++count) { try { unwind_other(total_frames); unwind_other(42); } catch (empty_exception *) { asm volatile ("# catch block (base)"); } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/throw-other.cpp

 rdd1cc02 #include #include #include using namespace std; using namespace std::chrono; time_point end_time = steady_clock::now(); nanoseconds duration = duration_cast(end_time - start_time); std::cout << "Run-Time (ns): " << duration.count() << std::endl; cout << "Run-Time (s): " << fixed << setprecision(1) << duration.count() / 1'000'000'000. << endl; }
• doc/theses/andrew_beach_MMath/code/throw-other.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/throw-with.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/try-catch.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(not_raised_exception)(); EHM_VIRTUAL_TABLE(not_raised_exception, not_vt); exception not_raised_exception; vtable(not_raised_exception) not_vt; int main(int argc, char * argv[]) { volatile bool should_throw = false; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/try-catch.cpp

 rdd1cc02 #include #include #include using namespace std; using namespace std::chrono; time_point end_time = steady_clock::now(); nanoseconds duration = duration_cast(end_time - start_time); std::cout << "Run-Time (ns): " << duration.count() << std::endl; cout << "Run-Time (s): " << fixed << setprecision(1) << duration.count() / 1'000'000'000. << endl; }
• doc/theses/andrew_beach_MMath/code/try-catch.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/try-finally.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(not_raised_exception)(); EHM_VIRTUAL_TABLE(not_raised_exception, not_vt); exception not_raised_exception; vtable(not_raised_exception) not_vt; int main(int argc, char * argv[]) { volatile bool should_throw = false; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/code/try-finally.py

 rdd1cc02 end_time = thread_time_ns() print('Run-Time (ns):', end_time - start_time) print('Run-Time (s): {:.1f}'.format((end_time - start_time) / 1_000_000_000.))
• doc/theses/andrew_beach_MMath/code/try-resume.cfa

 rdd1cc02 #include #include #include #include                                                                    // strto EHM_EXCEPTION(not_raised_exception)(); exception not_raised_exception; int main(int argc, char * argv[]) { unsigned int total_frames = 1; if (1 < argc) { times = strtol(argv[1], 0p, 10); times = strto(argv[1], 0p, 10); } if (2 < argc) { total_frames = strtol(argv[2], 0p, 10); total_frames = strto(argv[2], 0p, 10); } } Time end_time = timeHiRes(); sout | "Run-Time (ns): " | (end_time - start_time)ns; sout | "Run-Time (s): " | wd(0,1, (end_time - start_time)ns / 1_000_000_000.); }
• doc/theses/andrew_beach_MMath/conclusion.tex

 rdd1cc02 \chapter{Conclusion} \label{c:conclusion} % Just a little knot to tie the paper together. In the previous chapters this thesis presents the design and implementation of \CFA's exception handling mechanism (EHM). Both the design and implementation are based off of tools and techniques developed for other programming languages but they were adapted to better fit \CFA's feature set. Both the design and implementation are based off of tools and techniques developed for other programming languages but they were adapted to better fit \CFA's feature set and add a few features that do not exist in other EHMs; including conditional matching, default handlers for unhandled exceptions and cancellation though coroutines and threads back to the program main stack. The resulting features cover all of the major use cases of the most popular termination EHMs of today, along with reintroducing resumption exceptions and creating some new features that fix with \CFA's larger programming patterns. creating some new features that fit with \CFA's larger programming patterns, such as virtuals independent of traditional objects. The implementation has been tested and compared to other implementations. The \CFA project's test suite has been expanded to test the EHM. The implementation's performance has also been compared to other implementations with a small set of targeted micro-benchmarks. The results, while not cutting edge, are good enough for prototyping, which is \CFA's stage of development. is \CFA's current stage of development. This is a valuable new feature for \CFA in its own right but also serves as a tool (and motivation) for other developments in the language. This initial EHM will bring valuable new features to \CFA in its own right but also serves as a tool and motivation for other developments in the language.
• doc/theses/andrew_beach_MMath/exception-layout.fig

 rdd1cc02 0 0 1.00 240.00 240.00 360 405 360 2070 4 0 0 50 -1 0 12 0.0000 4 135 1080 2700 585 Fixed Header\001 4 0 0 50 -1 0 12 0.0000 4 135 1710 540 990 Cforall Information\001 4 0 0 50 -1 0 12 0.0000 4 165 1530 540 585 _Unwind_Exception\001 4 0 0 50 -1 0 12 0.0000 4 165 1260 540 1530 User Exception\001 4 0 0 50 -1 0 12 0.0000 4 165 1170 2655 1530 Variable Body\001 4 0 0 50 -1 0 12 0.0000 4 165 1260 2655 1215 (Fixed Offset)\001 4 0 0 50 -1 0 12 0.0000 0 135 1080 2700 585 Fixed Header\001 4 0 0 50 -1 0 12 0.0000 0 135 1575 540 990 Cforall Information\001 4 0 0 50 -1 0 12 0.0000 0 180 1695 540 585 _Unwind_Exception\001 4 0 0 50 -1 0 12 0.0000 0 180 1245 540 1530 User Exception\001 4 0 0 50 -1 0 12 0.0000 0 180 1185 2655 1530 Variable Body\001 4 0 0 50 -1 0 12 0.0000 0 165 1110 2655 1215 (Fixed Offset)\001
• doc/theses/andrew_beach_MMath/existing.tex

 rdd1cc02 Only those \CFA features pertaining to this thesis are discussed. % Also, only new features of \CFA will be discussed, A familiarity with C or C-like languages is assumed. \CFA has extensive overloading, allowing multiple definitions of the same name to be defined~\cite{Moss18}. \begin{lstlisting}[language=CFA,{moredelim=**[is][\color{red}]{@}{@}}] char @i@; int @i@; double @i@; int @f@(); double @f@(); void @g@( int ); void @g@( double ); \end{lstlisting} \begin{cfa} char i; int i; double i; int f(); double f(); void g( int ); void g( double ); \end{cfa} This feature requires name mangling so the assembly symbols are unique for different overloads. For compatibility with names in C, there is also a syntax int && rri = ri; rri = 3; &ri = &j; // rebindable &ri = &j; ri = 5; \end{cfa} \end{minipage} References are intended for pointer situations where dereferencing is the common usage, \ie the value is more important than the pointer. References are intended to be used when the indirection of a pointer is required, but the address is not as important as the value and dereferencing is the common usage. Mutable references may be assigned to by converting them to a pointer with a @&@ and then assigning a pointer to them, as in @&ri = &j;@ above with a @&@ and then assigning a pointer to them, as in @&ri = &j;@ above. % ??? \section{Operators} \CFA implements operator overloading by providing special names, where operator usages are translated into function calls using these names. operator expressions are translated into function calls using these names. An operator name is created by taking the operator symbols and joining them with @?@s to show where the arguments go. This syntax make it easy to tell the difference between prefix operations (such as @++?@) and post-fix operations (@?++@). For example, plus and equality operators are defined for a point type. As an example, here are the addition and equality operators for a point type. \begin{cfa} point ?+?(point a, point b) { return point{a.x + b.x, a.y + b.y}; } } \end{cfa} Note these special names are not limited to builtin operators, and hence, may be used with arbitrary types. \begin{cfa} double ?+?( int x, point y ); // arbitrary types \end{cfa} % Some near misses", that are that do not match an operator form but looks like % it may have been supposed to, will generate warning but otherwise they are % left alone. Because operators are never part of the type definition they may be added at any time, including on built-in types. Note that this syntax works effectively but a textual transformation, the compiler converts all operators into functions and then resolves them normally. This means any combination of types may be used, although nonsensical ones (like @double ?==?(point, int);@) are discouraged. This feature is also used for all builtin operators as well, although those are implicitly provided by the language. %\subsection{Constructors and Destructors} \CFA also provides constructors and destructors as operators, which means they are functions with special operator names rather than type names in \Cpp. While constructors and destructions are normally called implicitly by the compiler, the special operator names, allow explicit calls. % Placement new means that this is actually equivalent to C++. In \CFA, constructors and destructors are operators, which means they are functions with special operator names rather than type names in \Cpp. Both constructors and destructors can be implicity called by the compiler, however the operator names allow explicit calls. % Placement new means that this is actually equivant to C++. The special name for a constructor is @?{}@, which comes from the struct Example { ... }; void ?{}(Example & this) { ... } { Example a; Example b = {}; } void ?{}(Example & this, char first, int num) { ... } Example a;              // implicit constructor calls Example b = {}; Example c = {'a', 2}; \end{cfa} Both @a@ and @b@ are initialized with the first constructor, while @c@ is initialized with the second. Constructor calls can be replaced with C initialization using special operator \lstinline{@=}. \begin{cfa} Example d @= {42}; \end{cfa} { Example c = {'a', 2}; } \end{cfa} Both @a@ and @b@ will be initalized with the first constructor, @b@ because of the explicit call and @a@ implicitly. @c@ will be initalized with the second constructor. Currently, there is no general way to skip initialation. % I don't use @= anywhere in the thesis. % I don't like the \^{} symbol but $^\wedge$ isn't better. Similarly, destructors use the special name @^?{}@ (the @^@ has no special meaning). % These are a normally called implicitly called on a variable when it goes out % of scope. They can be called explicitly as well. \begin{cfa} void ^?{}(Example & this) { ... } { Example e;      // implicit constructor call ^?{}(e);                // explicit destructor call ?{}(e);         // explicit constructor call } // implicit destructor call Example d; ^?{}(d); Example e; } // Implicit call of ^?{}(e); \end{cfa} The global definition of @do_once@ is ignored, however if quadruple took a @double@ argument, then the global definition would be used instead as it is a better match. % Aaron's thesis might be a good reference here. To avoid typing long lists of assertions, constraints can be collect into convenient package called a @trait@, which can then be used in an assertion would then be a better match.\cite{Moss19} To avoid typing long lists of assertions, constraints can be collected into convenient a package called a @trait@, which can then be used in an assertion instead of the individual constraints. \begin{cfa} node(T) * next; T * data; } }; node(int) inode; \end{cfa} }; CountUp countup; for (10) sout | resume(countup).next; // print 10 values \end{cfa} Each coroutine has a @main@ function, which takes a reference to a coroutine object and returns @void@. %[numbers=left] Why numbers on this one? \begin{cfa}[numbers=left,numberstyle=\scriptsize\sf] \begin{cfa} void main(CountUp & this) { for (unsigned int up = 0;; ++up) { this.next = up; for (unsigned int next = 0 ; true ; ++next) { this.next = next; suspend;$\label{suspend}$ } \end{cfa} In this function, or functions called by this function (helper functions), the @suspend@ statement is used to return execution to the coroutine's resumer without terminating the coroutine's function(s). @suspend@ statement is used to return execution to the coroutine's caller without terminating the coroutine's function. A coroutine is resumed by calling the @resume@ function, \eg @resume(countup)@. The first resume calls the @main@ function at the top. Thereafter, resume calls continue a coroutine in the last suspended function after the @suspend@ statement, in this case @main@ line~\ref{suspend}.  The @resume@ function takes a reference to the coroutine structure and returns the same reference. The return value allows easy access to communication variables defined in the coroutine object. For example, the @next@ value for coroutine object @countup@ is both generated and collected in the single expression: @resume(countup).next@. statement. In this case there is only one and, hence, the difference between subsequent calls is the state of variables inside the function and the coroutine object. The return value of @resume@ is a reference to the coroutine, to make it convent to access fields of the coroutine in the same expression. Here is a simple example in a helper function: \begin{cfa} unsigned int get_next(CountUp & this) { return resume(this).next; } \end{cfa} When the main function returns the coroutine halts and can no longer be resumed. \subsection{Monitor and Mutex Parameter} exclusion on a monitor object by qualifying an object reference parameter with @mutex@. \begin{lstlisting}[language=CFA,{moredelim=**[is][\color{red}]{@}{@}}] void example(MonitorA & @mutex@ argA, MonitorB & @mutex@ argB); \end{lstlisting} \begin{cfa} void example(MonitorA & mutex argA, MonitorB & mutex argB); \end{cfa} When the function is called, it implicitly acquires the monitor lock for all of the mutex parameters without deadlock.  This semantics means all functions with { StringWorker stringworker; // fork thread running in "main" } // implicitly join with thread / wait for completion } // Implicit call to join(stringworker), waits for completion. \end{cfa} The thread main is where a new thread starts execution after a fork operation
• doc/theses/andrew_beach_MMath/features.tex

 rdd1cc02 \paragraph{Raise} The raise is the starting point for exception handling The raise is the starting point for exception handling, by raising an exception, which passes it to the EHM. \paragraph{Handle} The primary purpose of an EHM is to run some user code to handle a raised exception. This code is given, with some other information, in a handler. exception. This code is given, along with some other information, in a handler. A handler has three common features: the previously mentioned user code, a region of code it guards, and an exception label/condition that matches the raised exception. region of code it guards and an exception label/condition that matches against the raised exception. Only raises inside the guarded region and raising exceptions that match the label can be handled by a given handler. The @try@ statements of \Cpp, Java and Python are common examples. All three show the common features of guarded region, raise, matching and handler. \begin{cfa} try {                           // guarded region ... throw exception;        // raise ... } catch( exception ) {  // matching condition, with exception label ...                             // handler code } \end{cfa} also show another common feature of handlers, they are grouped by the guarded region. \subsection{Propagation} After an exception is raised comes what is usually the biggest step for the EHM: finding and setting up the handler for execution. The propagation from raise to EHM: finding and setting up the handler for execution. The propagation from raise to handler can be broken up into three different tasks: searching for a handler, matching against the handler and installing the handler. \paragraph{Searching} The EHM begins by searching for handlers that might be used to handle the exception. The search is restricted to handlers that have the raise site in their guarded the exception. The search will find handlers that have the raise site in their guarded region. The search includes handlers in the current function, as well as any in \paragraph{Matching} Each handler found is matched with the raised exception. The exception Each handler found is with the raised exception. The exception label defines a condition that is used with the exception and decides if there is a match or not. % In languages where the first match is used, this step is intertwined with searching; a match check is performed immediately after the search finds different course of action for this case. This situation only occurs with unchecked exceptions as checked exceptions (such as in Java) are guaranteed to find a matching handler. (such as in Java) can make the guarantee. The unhandled action is usually very general, such as aborting the program. A handler labeled with any given exception can handle exceptions of that type or any child type of that exception. The root of the exception hierarchy (here \code{C}{exception}) acts as a catch-all, leaf types catch single types, (here \code{C}{exception}) acts as a catch-all, leaf types catch single types and the exceptions in the middle can be used to catch different groups of related exceptions. This system has some notable advantages, such as multiple levels of grouping, the ability for libraries to add new exception types, and the isolation the ability for libraries to add new exception types and the isolation between different sub-hierarchies. This design is used in \CFA even though it is not a object-orientated For effective exception handling, additional information is often passed from the raise to the handler and back again. So far, only communication of the exception's identity is covered. A common communication method for passing more information is putting fields into the exception instance So far, only communication of the exceptions' identity is covered. A common communication method for adding information to an exception is putting fields into the exception instance and giving the handler access to them. Using reference fields pointing to data at the raise location allows data to be passed in both directions. % You can either have pointers/references in the exception, or have p/rs to % the exception when it doesn't have to be copied. Passing references or pointers allows data at the raise location to be updated, passing information in both directions. \section{Virtuals} \label{s:virtuals} Virtual types and casts are not part of \CFA's EHM nor are they required for an EHM. However, one of the best ways to support an exception hierarchy is via a virtual hierarchy and dispatch system. Ideally, the virtual system should have been part of \CFA before the work Ideally, the virtual system would have been part of \CFA before the work on exception handling began, but unfortunately it was not. Hence, only the features and framework needed for the EHM were designed and implemented for this thesis. Other features were considered to ensure that designed and implemented for this thesis. Other features were considered to ensure that the structure could accommodate other desirable features in the future but are not implemented. The rest of this section only discusses the implemented subset of the virtual-system design. virtual system design. The virtual system supports multiple trees" of types. Each tree is number of children. Any type that belongs to any of these trees is called a virtual type. % A type's ancestors are its parent and its parent's ancestors. % The root type has no ancestors. % A type's descendants are its children and its children's descendants. Every virtual type also has a list of virtual members. Children inherit their parent's list of virtual members but may add new members to it. It is important to note that these are virtual members, not virtual methods of object-orientated programming, and can be of any type. \PAB{Need to look at these when done. \CFA still supports virtual methods as a special case of virtual members. Function pointers that take a pointer to the virtual type are modified with each level of inheritance so that refers to the new type. This means an object can always be passed to a function in its virtual table as if it were a method. \todo{Clarify (with an example) virtual methods.} Each virtual type has a unique id. This id and all the virtual members are combined into a virtual table type. Each virtual type has a pointer to a virtual table as a hidden field. \todo{Might need a diagram for virtual structure.} }% For the purposes of illustration, a proposed -- but unimplemented syntax -- will be used. Each virtual type is represented by a trait with an annotation that makes it a virtual type. This annotation is empty for a root type, which creates a new tree: \begin{cfa} trait root_type(T) virtual() {} \end{cfa} The annotation may also refer to any existing virtual type to make this new type a child of that type and part of the same tree. The parent may itself be a child or a root type and may have any number of existing children. % OK, for some reason the b and t positioning options are reversed here. \begin{minipage}[b]{0.6\textwidth} \begin{cfa} trait child_a(T) virtual(root_type) {} trait grandchild(T) virtual(child_a) {} trait child_b(T) virtual(root_type) {} \end{cfa} \end{minipage} \begin{minipage}{0.4\textwidth} \begin{center} \input{virtual-tree} \end{center} \end{minipage} Every virtual type also has a list of virtual members and a unique id, both are stored in a virtual table. Every instance of a virtual type also has a pointer to a virtual table stored in it, although there is no per-type virtual table as in many other languages. The list of virtual members is built up down the tree. Every virtual type inherits the list of virtual members from its parent and may add more virtual members to the end of the list which are passed on to its children. Again, using the unimplemented syntax this might look like: \begin{cfa} trait root_type(T) virtual() { const char * to_string(T const & this); unsigned int size; } trait child_type(T) virtual(root_type) { char * irrelevant_function(int, char); } \end{cfa} % Consider adding a diagram, but we might be good with the explanation. As @child_type@ is a child of @root_type@ it has the virtual members of @root_type@ (@to_string@ and @size@) as well as the one it declared (@irrelevant_function@). It is important to note that these are virtual members, and may contain arbitrary fields, functions or otherwise. The names size" and align" are reserved for the size and alignment of the virtual type, and are always automatically initialized as such. The other special case are uses of the trait's polymorphic argument (@T@ in the example), which are always updated to refer to the current virtual type. This allows functions that refer to to polymorphic argument to act as traditional virtual methods (@to_string@ in the example), as the object can always be passed to a virtual method in its virtual table. Up until this point the virtual system is similar to ones found in object-orientated languages but this is where \CFA diverges. Objects encapsulate a single set of methods in each type, universally across the entire program, and indeed all programs that use that type definition. Even if a type inherits and adds methods, it still encapsulate a single set of methods. In this sense, object-oriented types are closed" and cannot be altered. In \CFA, types do not encapsulate any code. Traits are local for each function and types can satisfy a local trait, stop satisfying it or, satisfy the same trait in a different way at any lexical location in the program where a function is call. In this sense, the set of functions/variables that satisfy a trait for a type is open" as the set can change at every call site. object-oriented languages but this is where \CFA diverges. Objects encapsulate a single set of methods in each type, universally across the entire program, and indeed all programs that use that type definition. The only way to change any method is to inherit and define a new type with its own universal implementation. In this sense, these object-oriented types are closed" and cannot be altered. % Because really they are class oriented. In \CFA, types do not encapsulate any code. Whether or not satisfies any given assertion, and hence any trait, is context sensitive. Types can begin to satisfy a trait, stop satisfying it or satisfy the same trait at any lexical location in the program. In this sense, an type's implementation in the set of functions and variables that allow it to satisfy a trait is open" and can change throughout the program. This capability means it is impossible to pick a single set of functions that represent a type's implementation across a program. type. A user can define virtual tables that are filled in at their declaration and given a name. Anywhere that name is visible, even if it is defined locally inside a function \PAB{What does this mean? (although that means it does not have a static lifetime)}, it can be used. defined locally inside a function (although in this case the user must ensure it outlives any objects that use it), it can be used. Specifically, a virtual type is bound" to a virtual table that sets the virtual members for that object. The virtual members can be accessed through the object. This means virtual tables are declared and named in \CFA. They are declared as variables, using the type @vtable(VIRTUAL_TYPE)@ and any valid name. For example: \begin{cfa} vtable(virtual_type_name) table_name; \end{cfa} Like any variable they may be forward declared with the @extern@ keyword. Forward declaring virtual tables is relatively common. Many virtual types have an obvious" implementation that works in most cases. A pattern that has appeared in the early work using virtuals is to implement a virtual table with the the obvious definition and place a forward declaration of it in the header beside the definition of the virtual type. Even on the full declaration, no initializer should be used. Initialization is automatic. The type id and special virtual members size" and align" only depend on the virtual type, which is fixed given the type of the virtual table and so the compiler fills in a fixed value. The other virtual members are resolved, using the best match to the member's name and type, in the same context as the virtual table is declared using \CFA's normal resolution rules. While much of the virtual infrastructure is created, it is currently only used @EXPRESSION@ object, otherwise it returns @0p@ (null pointer). \section{Exception} % Leaving until later, hopefully it can talk about actual syntax instead % of my many strange macros. Syntax aside I will also have to talk about the % features all exceptions support. Exceptions are defined by the trait system; there are a series of traits, and if a type satisfies them, then it can be used as an exception. The following \section{Exceptions} The syntax for declaring an exception is the same as declaring a structure except the keyword that is swapped out: \begin{cfa} exception TYPE_NAME { FIELDS }; \end{cfa} Fields are filled in the same way as a structure as well. However an extra field is added that contains the pointer to the virtual table. It must be explicitly initialized by the user when the exception is constructed. Here is an example of declaring an exception type along with a virtual table, assuming the exception has an obvious" implementation and a default virtual table makes sense. \begin{minipage}[t]{0.4\textwidth} Header: \begin{cfa} exception Example { int data; }; extern vtable(Example) example_base_vtable; \end{cfa} \end{minipage} \begin{minipage}[t]{0.6\textwidth} Source: \begin{cfa} vtable(Example) example_base_vtable \end{cfa} \vfil \end{minipage} %\subsection{Exception Details} This is the only interface needed when raising and handling exceptions. However it is actually a short hand for a more complex trait based interface. The language views exceptions through a series of traits. If a type satisfies them, then it can be used as an exception. The following is the base trait all exceptions need to match. \begin{cfa} }; \end{cfa} The trait is defined over two types, the exception type and the virtual table The trait is defined over two types: the exception type and the virtual table type. Each exception type should have a single virtual table type. There are no actual assertions in this trait because the trait system completing the virtual system). The imaginary assertions would probably come from a trait defined by the virtual system, and state that the exception type is a virtual type, is a descendant of @exception_t@ (the base exception type), and note its virtual table type. is a virtual type, is a descendant of @exception_t@ (the base exception type) and allow the user to find the virtual table type. % I did have a note about how it is the programmer's responsibility to make }; \end{cfa} Both traits ensure a pair of types are an exception type, its virtual table type, Both traits ensure a pair of types is an exception type, its virtual table type and defines one of the two default handlers. The default handlers are used as fallbacks and are discussed in detail in \vref{s:ExceptionHandling}. facing way. So these three macros are provided to wrap these traits to simplify referring to the names: @IS_EXCEPTION@, @IS_TERMINATION_EXCEPTION@, and @IS_RESUMPTION_EXCEPTION@. @IS_EXCEPTION@, @IS_TERMINATION_EXCEPTION@ and @IS_RESUMPTION_EXCEPTION@. All three take one or two arguments. The first argument is the name of the These twin operations are the core of \CFA's exception handling mechanism. This section covers the general patterns shared by the two operations and then goes on to cover the details of each individual operation. then goes on to cover the details each individual operation. Both operations follow the same set of steps. First, a user raises an exception. Second, the exception propagates up the stack. Second, the exception propagates up the stack, searching for a handler. Third, if a handler is found, the exception is caught and the handler is run. After that control continues at a raise-dependent location. Fourth, if a handler is not found, a default handler is run and, if it returns, then control As an alternate to the third step, if a handler is not found, a default handler is run and, if it returns, then control continues after the raise. %This general description covers what the two kinds have in common. The differences in the two operations include how propagation is performed, where execution continues after an exception is caught and handled, and which default handler is run. The differences between the two operations include how propagation is performed, where execution continues after an exception is handled and which default handler is run. \subsection{Termination} \label{s:Termination} Termination handling is the familiar EHM and used in most programming Termination handling is the familiar kind of handling and used in most programming languages with exception handling. It is a dynamic, non-local goto. If the raised exception is matched and Then propagation starts with the search. \CFA uses a first match" rule so matching is performed with the copied exception as the search key. It starts from the raise in the throwing function and proceeds towards the base of the stack, It starts from the raise site and proceeds towards base of the stack, from callee to caller. At each stack frame, a check is made for termination handlers defined by the \end{cfa} When viewed on its own, a try statement simply executes the statements in the \snake{GUARDED_BLOCK}, and when those are finished, in the \snake{GUARDED_BLOCK} and when those are finished, the try statement finishes. termination exception types. The global default termination handler performs a cancellation (see \vref{s:Cancellation} for the justification) on the current stack with the copied exception. Since it is so general, a more specific handler is usually defined, possibly with a detailed message, and used for specific exception type, effectively overriding the default handler. (as described in \vref{s:Cancellation}) on the current stack with the copied exception. Since it is so general, a more specific handler can be defined, overriding the default behaviour for the specific exception types. \subsection{Resumption} \label{s:Resumption} Resumption exception handling is the less familar EHM, but is Resumption exception handling is less familar form of exception handling, but is just as old~\cite{Goodenough75} and is simpler in many ways. It is a dynamic, non-local function call. If the raised exception is function once the error is corrected, and ignorable events, such as logging where nothing needs to happen and control should always continue from the raise point. should always continue from the raise site. Except for the changes to fit into that pattern, resumption exception handling is symmetric with termination exception handling, by design (see \autoref{s:Termination}). A resumption raise is started with the @throwResume@ statement: throwResume EXPRESSION; \end{cfa} \todo{Decide on a final set of keywords and use them everywhere.} It works much the same way as the termination throw. The expression must return a reference to a resumption exception, where the resumption exception is any type that satisfies the trait @is_resumption_exception@ at the call site. The assertions from this trait are available to the exception system while handling the exception. At run-time, no exception copy is made, since % The new keywords are currently experimental" and not used in this work. It works much the same way as the termination raise, except the type must satisfy the \snake{is_resumption_exception} that uses the default handler: \defaultResumptionHandler. This can be specialized for particular exception types. At run-time, no exception copy is made. Since resumption does not unwind the stack nor otherwise remove values from the current scope, so there is no need to manage memory to keep the exception in scope. Then propagation starts with the search. It starts from the raise in the resuming function and proceeds towards the base of the stack, from callee to caller. At each stack frame, a check is made for resumption handlers defined by the @catchResume@ clauses of a @try@ statement. current scope, there is no need to manage memory to keep the exception allocated. Then propagation starts with the search, following the same search path as termination, from the raise site to the base of stack and top of try statement to bottom. However, the handlers on try statements are defined by @catchResume@ clauses. \begin{cfa} try { } \end{cfa} % PAB, you say this above. % When a try statement is executed, it simply executes the statements in the % @GUARDED_BLOCK@ and then finishes. % % However, while the guarded statements are being executed, including any % invoked functions, all the handlers in these statements are included in the % search path. % Hence, if a resumption exception is raised, these handlers may be matched % against the exception and may handle it. % % Exception matching checks the handler in each catch clause in the order % they appear, top to bottom. If the representation of the raised exception type % is the same or a descendant of @EXCEPTION_TYPE@$_i$, then @NAME@$_i$ % (if provided) is bound to a pointer to the exception and the statements in % @HANDLER_BLOCK@$_i$ are executed. % If control reaches the end of the handler, execution continues after the % the raise statement that raised the handled exception. % % Like termination, if no resumption handler is found during the search, % then the default handler (\defaultResumptionHandler) visible at the raise % statement is called. It will use the best match at the raise sight according % to \CFA's overloading rules. The default handler is % passed the exception given to the raise. When the default handler finishes % execution continues after the raise statement. % % There is a global @defaultResumptionHandler{} is polymorphic over all % resumption exceptions and performs a termination throw on the exception. % The \defaultTerminationHandler{} can be overridden by providing a new % function that is a better match. The @GUARDED_BLOCK@ and its associated nested guarded statements work the same for resumption as for termination, as does exception matching at each @catchResume@. Similarly, if no resumption handler is found during the search, then the currently visible default handler (\defaultResumptionHandler) is called and control continues after the raise statement if it returns. Finally, there is also a global @defaultResumptionHandler@, which can be overridden, that is polymorphic over all resumption exceptions but performs a termination throw on the exception rather than a cancellation. Throwing the exception in @defaultResumptionHandler@ has the positive effect of walking the stack a second time for a recovery handler. Hence, a programmer has two chances for help with a problem, fixup or recovery, should either kind of handler appear on the stack. However, this dual stack walk leads to following apparent anomaly: \begin{cfa} try { throwResume E; } catch (E) { // this handler runs } \end{cfa} because the @catch@ appears to handle a @throwResume@, but a @throwResume@ only matches with @catchResume@. The anomaly results because the unmatched @catchResuem@, calls @defaultResumptionHandler@, which in turn throws @E@. % I wonder if there would be some good central place for this. Note, termination and resumption handlers may be used together Note that termination handlers and resumption handlers may be used together in a single try statement, intermixing @catch@ and @catchResume@ freely. Each type of handler only interacts with exceptions from the matching kind of raise. Like @catch@ clauses, @catchResume@ clauses have no effect if an exception is not raised. The matching rules are exactly the same as well. The first major difference here is that after @EXCEPTION_TYPE@$_i$ is matched and @NAME@$_i$ is bound to the exception, @HANDLER_BLOCK@$_i$ is executed right away without first unwinding the stack. After the block has finished running control jumps to the raise site, where the just handled exception came from, and continues executing after it, not after the try statement. \subsubsection{Resumption Marking} and run, its try block (the guarded statements) and every try statement searched before it are still on the stack. There presence can lead to the \emph{recursive resumption problem}. the recursive resumption problem.\cite{Buhr00a} % Other possible citation is MacLaren77, but the form is different. The recursive resumption problem is any situation where a resumption handler When this code is executed, the guarded @throwResume@ starts a search and matches the handler in the @catchResume@ clause. This call is placed on the stack above the try-block. Now the second raise in the handler searches the same try block, matches, and puts another instance of the call is placed on the stack above the try-block. Now the second raise in the handler searches the same try block, matches again and then puts another instance of the same handler on the stack leading to infinite recursion. While this situation is trivial and easy to avoid, much more complex cycles can form with multiple handlers and different exception types.  The key point is that the programmer's intuition expects every raise in a handler to start searching \emph{below} the @try@ statement, making it difficult to understand and fix the problem. While this situation is trivial and easy to avoid, much more complex cycles can form with multiple handlers and different exception types. To prevent all of these cases, each try statement is marked" from the time the exception search reaches it to either when a matching handler completes or when the search reaches the base time the exception search reaches it to either when a handler completes handling that exception or when the search reaches the base of the stack. While a try statement is marked, its handlers are never matched, effectively for instance, marking just the handlers that caught the exception, would also prevent recursive resumption. However, the rule selected mirrors what happens with termination, and hence, matches programmer intuition that a raise searches below a try. In detail, the marked try statements are the ones that would be removed from However, the rules selected mirrors what happens with termination, so this reduces the amount of rules and patterns a programmer has to know. The marked try statements are the ones that would be removed from the stack for a termination exception, \ie those on the stack between the handler and the raise statement. \subsection{Comparison with Reraising} Without conditional catch, the only approach to match in more detail is to reraise the exception after it has been caught, if it could not be handled. In languages without conditional catch, that is no ability to match an exception based on something other than its type, it can be mimicked by matching all exceptions of the right type, checking any additional conditions inside the handler and re-raising the exception if it does not match those. Here is a minimal example comparing both patterns, using @throw;@ (no argument) to start a re-raise. \begin{center} \begin{tabular}{l|l} \begin{tabular}{l r} \begin{cfa} try { do_work_may_throw(); } catch(excep_t * ex; can_handle(ex)) { handle(ex); } do_work_may_throw(); } catch(exception_t * exc ; can_handle(exc)) { handle(exc); } \end{cfa} & \begin{cfa} try { do_work_may_throw(); } catch(excep_t * ex) { if (can_handle(ex)) { handle(ex); do_work_may_throw(); } catch(exception_t * exc) { if (can_handle(exc)) { handle(exc); } else { throw; } } \end{cfa} \end{tabular} \end{center} At first glance catch-and-reraise may appear to just be a quality of life feature, but there are some significant differences between the two stratagies. A simple difference that is more important for \CFA than many other languages is that the raise site changes, with a re-raise but does not with a conditional catch. This is important in \CFA because control returns to the raise site to run the per-site default handler. Because of this only a conditional catch can allow the original raise to continue. The more complex issue comes from the difference in how conditional catches and re-raises handle multiple handlers attached to a single try statement. A conditional catch will continue checking later handlers while a re-raise will skip them. If the different handlers could handle some of the same exceptions, translating a try statement that uses one to use the other can quickly become non-trivial: \noindent Original, with conditional catch: \begin{cfa} ... } catch (an_exception * e ; check_a(e)) { handle_a(e); } catch (exception_t * e ; check_b(e)) { handle_b(e); } \end{cfa} Translated, with re-raise: \begin{cfa} ... } catch (exception_t * e) { an_exception * an_e = (virtual an_exception *)e; if (an_e && check_a(an_e)) { handle_a(an_e); } else if (check_b(e)) { handle_b(e); } else { throw; } \end{cfa} \end{tabular} \end{center} Notice catch-and-reraise increases complexity by adding additional data and code to the exception process. Nevertheless, catch-and-reraise can simulate conditional catch straightforwardly, when exceptions are disjoint, \ie no inheritance. However, catch-and-reraise simulation becomes unusable for exception inheritance. \begin{flushleft} \begin{cfa}[xleftmargin=6pt] exception E1; exception E2(E1); // inheritance \end{cfa} \begin{tabular}{l|l} \begin{cfa} try { ... foo(); ... // raise E1/E2 ... bar(); ... // raise E1/E2 } catch( E2 e; e.rtn == foo ) { ... } catch( E1 e; e.rtn == foo ) { ... } catch( E1 e; e.rtn == bar ) { ... } \end{cfa} & \begin{cfa} try { ... foo(); ... ... bar(); ... } catch( E2 e ) { if ( e.rtn == foo ) { ... } else throw; // reraise } catch( E1 e ) { if (e.rtn == foo) { ... } else if (e.rtn == bar) { ... else throw; // reraise } \end{cfa} \end{tabular} \end{flushleft} The derived exception @E2@ must be ordered first in the catch list, otherwise the base exception @E1@ catches both exceptions. In the catch-and-reraise code (right), the @E2@ handler catches exceptions from both @foo@ and @bar@. However, the reraise misses the following catch clause. To fix this problem, an enclosing @try@ statement is need to catch @E2@ for @bar@ from the reraise, and its handler must duplicate the inner handler code for @bar@. To generalize, this fix for any amount of inheritance and complexity of try statement requires a technique called \emph{try-block splitting}~\cite{Krischer02}, which is not discussed in this thesis. It is sufficient to state that conditional catch is more expressive than catch-and-reraise in terms of complexity. \begin{comment} That is, they have the same behaviour in isolation. Two things can expose differences between these cases. One is the existence of multiple handlers on a single try statement. A reraise skips all later handlers for a try statement but a conditional catch does not. % Hence, if an earlier handler contains a reraise later handlers are % implicitly skipped, with a conditional catch they are not. Still, they are equivalently powerful, both can be used two mimic the behaviour of the other, as reraise can pack arbitrary code in the handler and conditional catches can put arbitrary code in the predicate. % I was struggling with a long explanation about some simple solutions, % like repeating a condition on later handlers, and the general solution of % merging everything together. I don't think it is useful though unless its % for a proof. % https://en.cppreference.com/w/cpp/language/throw The question then becomes Which is a better default?" We believe that not skipping possibly useful handlers is a better default. If a handler can handle an exception it should and if the handler can not handle the exception then it is probably safer to have that explicitly described in the handler itself instead of implicitly described by its ordering with other handlers. % Or you could just alter the semantics of the throw statement. The handler % index is in the exception so you could use it to know where to start % searching from in the current try statement. % No place for the goto else; metaphor. The other issue is all of the discussion above assumes that the only way to tell apart two raises is the exception being raised and the remaining search path. This is not true generally, the current state of the stack can matter in a number of cases, even only for a stack trace after an program abort. But \CFA has a much more significant need of the rest of the stack, the default handlers for both termination and resumption. % For resumption it turns out it is possible continue a raise after the % exception has been caught, as if it hadn't been caught in the first place. This becomes a problem combined with the stack unwinding used in termination exception handling. The stack is unwound before the handler is installed, and hence before any reraises can run. So if a reraise happens the previous stack is gone, the place on the stack where the default handler was supposed to run is gone, if the default handler was a local function it may have been unwound too. There is no reasonable way to restore that information, so the reraise has to be considered as a new raise. This is the strongest advantage conditional catches have over reraising, they happen before stack unwinding and avoid this problem. % The one possible disadvantage of conditional catch is that it runs user % code during the exception search. While this is a new place that user code % can be run destructors and finally clauses are already run during the stack % unwinding. (There is a simpler solution if @handle_a@ never raises exceptions, using nested try statements.) % } catch (an_exception * e ; check_a(e)) { %     handle_a(e); % } catch (exception_t * e ; !(virtual an_exception *)e && check_b(e)) { %     handle_b(e); % } % % https://www.cplusplus.com/reference/exception/current_exception/ %   exception_ptr current_exception() noexcept; % https://www.python.org/dev/peps/pep-0343/ \end{comment} % } catch (an_exception * e) %   if (check_a(e)) { %     handle_a(e); %   } else throw; % } catch (exception_t * e) %   if (check_b(e)) { %     handle_b(e); %   } else throw; % } In similar simple examples translating from re-raise to conditional catch takes less code but it does not have a general trivial solution either. So, given that the two patterns do not trivially translate into each other, it becomes a matter of which on should be encouraged and made the default. From the premise that if a handler that could handle an exception then it should, it follows that checking as many handlers as possible is preferred. So conditional catch and checking later handlers is a good default. \section{Finally Clauses} The @FINALLY_BLOCK@ is executed when the try statement is removed from the stack, including when the @GUARDED_BLOCK@ finishes, any termination handler finishes, or during an unwind. finishes or during an unwind. The only time the block is not executed is if the program is exited before the stack is unwound. they have their own strengths, similar to top-level function and lambda functions with closures. Destructors take more work for their creation, but if there is clean-up code Destructors take more work to create, but if there is clean-up code that needs to be run every time a type is used, they are much easier to set-up. to set-up for each use. % It's automatic. On the other hand finally clauses capture the local context, so is easy to use when the clean-up is not dependent on the type of a variable or requires raise, this exception is not used in matching only to pass information about the cause of the cancellation. Finaly, since a cancellation only unwinds and forwards, there is no default handler. Finally, as no handler is provided, there is no default handler. After @cancel_stack@ is called the exception is copied into the EHM's memory After the main stack is unwound there is a program-level abort. The reasons for this semantics in a sequential program is that there is no more code to execute. This semantics also applies to concurrent programs, too, even if threads are running. That is, if any threads starts a cancellation, it implies all threads terminate. Keeping the same behaviour in sequential and concurrent programs is simple. Also, even in concurrent programs there may not currently be any other stacks and even if other stacks do exist, main has no way to know where they are. The first reason for this behaviour is for sequential programs where there is only one stack, and hence to stack to pass information to. Second, even in concurrent programs, the main stack has no dependency on another stack and no reliable way to find another living stack. Finally, keeping the same behaviour in both sequential and concurrent programs is simple and easy to understand. \paragraph{Thread Stack} With explicit join and a default handler that triggers a cancellation, it is possible to cascade an error across any number of threads, cleaning up each possible to cascade an error across any number of threads, alternating between the resumption (possibly termination) and cancellation, cleaning up each in turn, until the error is handled or the main thread is reached. caller's context and passes it to the internal report. A coroutine only knows of two other coroutines, its starter and its last resumer. A coroutine only knows of two other coroutines, its starter and its last resumer. The starter has a much more distant connection, while the last resumer just (in terms of coroutine state) called resume on this coroutine, so the message With a default handler that triggers a cancellation, it is possible to cascade an error across any number of coroutines, cleaning up each in turn, cascade an error across any number of coroutines, alternating between the resumption (possibly termination) and cancellation, cleaning up each in turn, until the error is handled or a thread stack is reached. \PAB{Part of this I do not understand. A cancellation cannot be caught. But you talk about handling a cancellation in the last sentence. Which is correct?}
• doc/theses/andrew_beach_MMath/future.tex

 rdd1cc02 \label{c:future} The following discussion covers both possible interesting research that could follow from this work as long as simple implementation improvements. \section{Language Improvements} \todo{Future/Language Improvements seems to have gotten mixed up. It is presented as waiting on language improvements" but really its more non-research based impovements.} \CFA is a developing programming language. As such, there are partially or unimplemented features of the language (including several broken components) that I had to workaround while building an exception handling system largely in the \CFA language (some C components).  The following are a few of these issues, and once implemented/fixed, how they would affect the exception system. unimplemented features (including several broken components) that I had to workaround while building the EHM largely in the \CFA language (some C components). Below are a few of these issues and how implementing/fixing them would affect the EHM. In addition there are some simple improvements that had no interesting research attached to them but would make using the language easier. \begin{itemize} \item The implementation of termination is not portable because it includes hand-crafted assembly statements. The existing compilers cannot translate that for other platforms and those sections must be ported by hand to support more hardware architectures, such as the ARM processor. \item Due to a type-system problem, the catch clause cannot bind the exception to a result in little or no change in the exception system but simplify usage. \item The @copy@ function in the exception virtual table is an adapter to address some limitations in the \CFA copy constructor. If the copy constructor is improved it can be used directly without the adapter. \item Termination handlers cannot use local control-flow transfers, \eg by @break@, @return@, \etc. The reason is that current code generation hoists a handler into a nested function for convenience (versus assemble-code generation at the @try@ statement). Hence, when the handler runs, its code is not in the lexical scope of the @try@ statement, where the local control-flow transfers are meaningful. try statement). Hence, when the handler runs, it can still access local variables in the lexical scope of the try statement. Still, it does mean that seemingly local control flow is not in fact local and crosses a function boundary. Making the termination handlers code within the surrounding function would remove this limitation. % Try blocks are much more difficult to do practically (requires our own % assembly) and resumption handlers have some theoretical complexity. \item There is no detection of colliding unwinds. It is possible for clean-up code run during an unwind to trigger another unwind that escapes the clean-up code itself; such as a termination exception caught further down the stack or a cancellation. There do exist ways to handle this but currently they are not even detected and the first unwind will simply be forgotten, often leaving cancellation. There do exist ways to handle this case, but currently there is no detection and the first unwind will simply be forgotten, often leaving it in a bad state. \item Also the exception system did not have a lot of time to be tried and tested. So just letting people use the exception system more will reveal new quality of life upgrades that can be made with time. Finally, the exception system has not had a lot of programmer testing. More time with encouraged usage will reveal new quality of life upgrades that can be made. \end{itemize} project, but was thrust upon it to do exception inheritance; hence, only minimal work is done. A draft for a complete virtual system is available but it is not finalized. A future \CFA project is to complete that work and then not finalized. A future \CFA project is to complete that work and then update the exception system that uses the current version. exception traits. The most important one is an assertion to check one virtual type is a child of another. This check precisely captures many of the correctness requirements. current ad-hoc correctness requirements. Other features of the virtual system could also remove some of the special cases around exception virtual tables, such as the generation of the @msg@ function, could be removed. The full virtual system might also include other improvement like associated types to allow traits to refer to types not listed in their header. This feature allows exception traits to not refer to the virtual-table type explicitly, removing the need for the current interface macros. explicitly, removing the need for the current interface macros, such as @EHM_IS_EXCEPTION@. \section{Additional Raises} Non-local/concurrent raise requires more coordination between the concurrency system and the exception system. Many of the interesting design decisions centre and the exception system. Many of the interesting design decisions center around masking, \ie controlling which exceptions may be thrown at a stack. It would likely require more of the virtual system and would also effect how Checked exceptions make exceptions part of a function's type by adding an exception signature. An exception signature must declare all checked exceptions that could propagate from the function (either because they were raised inside the function or came from a sub-function). This improves safety exceptions that could propagate from the function, either because they were raised inside the function or came from a sub-function. This improves safety by making sure every checked exception is either handled or consciously passed on. However checked exceptions were never seriously considered for this project because they have significant trade-offs in usablity and code reuse in because they have significant trade-offs in usability and code reuse in exchange for the increased safety. These trade-offs are most problematic when trying to pass exceptions through not support a successful-exiting stack-search without doing an unwind. Workarounds are possible but awkward. Ideally an extension to libunwind could be made, but that would either require separate maintenance or gain enough support to have it folded into the standard. be made, but that would either require separate maintenance or gaining enough support to have it folded into the official library itself. Also new techniques to skip previously searched parts of the stack need to be to leave the handler. Currently, mimicking this behaviour in \CFA is possible by throwing a termination inside a resumption handler. termination exception inside a resumption handler. % Maybe talk about the escape; and escape CONTROL_STMT; statements or how
• doc/theses/andrew_beach_MMath/implement.tex

 rdd1cc02 \label{s:VirtualSystem} % Virtual table rules. Virtual tables, the pointer to them and the cast. While the \CFA virtual system currently has only one public feature, virtual cast (see the virtual cast feature \vpageref{p:VirtualCast}), substantial structure is required to support it, While the \CFA virtual system currently has only one public features, virtual cast and virtual tables, % ??? refs (see the virtual cast feature \vpageref{p:VirtualCast}), substantial structure is required to support them, and provide features for exception handling and the standard library. \subsection{Virtual Type} Virtual types only have one change to their structure: the addition of a pointer to the virtual table, which is called the \emph{virtual-table pointer}. Internally, the field is called \snake{virtual_table}. The field is fixed after construction. It is always the first field in the A virtual type~(see \autoref{s:virtuals}) has a pointer to a virtual table, called the \emph{virtual-table pointer}, which binds each instance of a virtual type to a virtual table. Internally, the field is called \snake{virtual_table} and is fixed after construction. This pointer is also the table's id and how the system accesses the virtual table and the virtual members there. It is always the first field in the structure so that its location is always known. \todo{Talk about constructors for virtual types (after they are working).} The virtual table pointer binds an instance of a virtual type to a virtual table. The pointer is also the table's id and how the system accesses the virtual table and the virtual members there. % We have no special rules for these constructors. Virtual table pointers are passed to the constructors of virtual types as part of field-by-field construction. \subsection{Type Id} Every virtual type has a unique id. Type ids can be compared for equality, which checks if the types reperented are the same, or used to access the type's type information. These are used in type equality, to check if the representation of two values are the same, and to access the type's type information. This uniqueness means across a program composed of multiple translation units (TU), not uniqueness across all programs or even across multiple processes on the same machine. Our approach for program uniqueness is using a static declaration for each type id, where the run-time storage address of that variable is guaranteed to be unique during program execution. The type id storage can also be used for other purposes, and is used for type information. The problem is that a type id may appear in multiple TUs that compose a program (see \autoref{ss:VirtualTable}); so the initial solution would seem to be make it external in each translation unit. Hovever, the type id must have a declaration in (exactly) one of the TUs to create the storage. No other declaration related to the virtual type has this property, so doing this through standard C declarations would require the user to do it manually. Instead the linker is used to handle this problem. % I did not base anything off of C++17; they are solving the same problem. A new feature has been added to \CFA for this purpose, the special attribute \snake{cfa_linkonce}, which uses the special section @.gnu.linkonce@. When used as a prefix (\eg @.gnu.linkonce.example@) the linker does not combine these sections, but instead discards all but one with the same full name. So each type id must be given a unique section name with the linkonce prefix. Luckily \CFA already has a way to get unique names, the name mangler. For example, this could be written directly in \CFA: \begin{cfa} __attribute__((cfa_linkonce)) void f() {} \end{cfa} This is translated to: \begin{cfa} __attribute__((section(".gnu.linkonce._X1fFv___1"))) void _X1fFv___1() {} \end{cfa} This is done internally to access the name manglers. This attribute is useful for other purposes, any other place a unique instance required, and should eventually be made part of a public and stable feature in \CFA. \subsection{Type Information} There is data stored at the type id's declaration, the type information. The type information currently is only the parent's type id or, if the type has no parent, the null pointer. The id's are implemented as pointers to the type's type information instance. Dereferencing the pointer gets the type information. The ancestors of a virtual type are found by traversing type ids through the type information. The information pushes the issue of creating a unique value (for the type id) to the problem of creating a unique instance (for type information), which the linker can solve. The advanced linker support is used here to avoid having to create a new declaration to attach this data to. With C/\CFA's header/implementation file divide for something to appear exactly once it must come from a declaration that appears in exactly one implementation file; the declarations in header files may exist only once they can be included in many different translation units. Therefore, structure's declaration will not work. Neither will attaching the type information to the virtual table -- although a vtable declarations are in implemention files they are not unique, see \autoref{ss:VirtualTable}. Instead the same type information is generated multiple times and then the new attribute \snake{cfa_linkone} is used to removed duplicates. An example using helper macros looks like: \begin{cfa} struct INFO_TYPE(TYPE) { INFO_TYPE(PARENT) const * parent; }; __attribute__((cfa_linkonce)) INFO_TYPE(TYPE) const INFO_NAME(TYPE) = { &INFO_NAME(PARENT), }; \end{cfa} Type information is constructed as follows: \begin{enumerate} \begin{enumerate}[nosep] \item Use the type's name to generate a name for the type information structure. This is saved so it may be reused. Use the type's name to generate a name for the type information structure, which is saved so it can be reused. \item Generate a new structure definition to store the type information. The layout is the same in each case, just the parent's type id, but the types used change from instance to instance. The generated name is used for both this structure and, if relivant, the The generated name is used for both this structure and, if relevant, the parent pointer. If the virtual type is polymorphic then the type information structure is polymorphic as well, with the same polymorphic arguments. \item A seperate name for instances is generated from the type's name. A separate name for instances is generated from the type's name. \item The definition is generated and initialised. The definition is generated and initialized. The parent id is set to the null pointer or to the address of the parent's type information instance. Name resolution handles the rest. \item \CFA's name mangler does its regular name mangling encoding the type of the declaration into the instance name. This gives a completely unique name the declaration into the instance name. This process gives a completely unique name including different instances of the same polymorphic type. \end{enumerate} \todo{The list is making me realise, some of this isn't ordered.} Writing that code manually, with helper macros for the early name mangling, \end{cfa} \begin{comment} \subsubsection{\lstinline{cfa\_linkonce} Attribute} % I just realised: This is an extension of the inline keyword. % I just realized: This is an extension of the inline keyword. % An extension of C's at least, it is very similar to C++'s. Another feature added to \CFA is a new attribute: \texttt{cfa\_linkonce}. everything that comes after the special prefix, then only one is used and the other is discarded. \end{comment} \subsection{Virtual Table} below. The layout always comes in three parts. \todo{Add labels to the virtual table layout figure.} The layout always comes in three parts (see \autoref{f:VirtualTableLayout}). The first section is just the type id at the head of the table. It is always there to ensure that it can be found even when the accessing code does not The second section are all the virtual members of the parent, in the same order as they appear in the parent's virtual table. Note that the type may change slightly as references to the this" will change. This is limited to change slightly as references to the this" change. This is limited to inside pointers/references and via function pointers so that the size (and hence the offsets) are the same. \begin{figure} \begin{center} \input{vtable-layout} \end{center} \caption{Virtual Table Layout} \label{f:VirtualTableLayout} \todo*{Improve the Virtual Table Layout diagram.} \end{figure} type's alignment, is set using an @alignof@ expression. \subsubsection{Concurrency Integration} Most of these tools are already inside the compiler. Using simple code transformations early on in compilation, allows most of that work to be handed off to the existing tools. \autoref{f:VirtualTableTransformation} shows an example transformation, this example shows an exception virtual table. It also shows the transformation on the full declaration. For a forward declaration, the @extern@ keyword is preserved and the initializer is not added. \begin{figure}[htb] \begin{cfa} vtable(example_type) example_name; \end{cfa} \transformline % Check mangling. \begin{cfa} const struct example_type_vtable example_name = { .__cfavir_typeid : &__cfatid_example_type, .size : sizeof(example_type), .copy : copy, .^?{} : ^?{}, .msg : msg, }; \end{cfa} \caption{Virtual Table Transformation} \label{f:VirtualTableTransformation} \end{figure} \subsection{Concurrency Integration} Coroutines and threads need instances of @CoroutineCancelled@ and @ThreadCancelled@ respectively to use all of their functionality. When a new at the definition of the main function. This is showned through code re-writing in \autoref{f:ConcurrencyTypeTransformation} and \autoref{f:ConcurrencyMainTransformation}. In both cases the original declaration is not modified, These transformations are shown through code re-writing in \autoref{f:CoroutineTypeTransformation} and \autoref{f:CoroutineMainTransformation}. Threads use the same pattern, with some names and types changed. In both cases, the original declaration is not modified, only new ones are added. \begin{figure} \begin{figure}[htb] \begin{cfa} coroutine Example { extern CoroutineCancelled_vtable & _default_vtable; \end{cfa} \caption{Concurrency Type Transformation} \label{f:ConcurrencyTypeTransformation} \caption{Coroutine Type Transformation} \label{f:CoroutineTypeTransformation} \end{figure} \begin{figure} \begin{figure}[htb] \begin{cfa} void main(Example & this) { &_default_vtable_object_declaration; \end{cfa} \caption{Concurrency Main Transformation} \label{f:ConcurrencyMainTransformation} \caption{Coroutine Main Transformation} \label{f:CoroutineMainTransformation} \end{figure} \begin{cfa} void * __cfa__virtual_cast( struct __cfavir_type_td parent, struct __cfavir_type_id const * child ); \end{cfa} The type id of target type of the virtual cast is passed in as @parent@ and struct __cfavir_type_id * parent, struct __cfavir_type_id * const * child ); \end{cfa} The type id for the target type of the virtual cast is passed in as @parent@ and the cast target is passed in as @child@. For generated C code wraps both arguments and the result with type casts. The generated C code wraps both arguments and the result with type casts. There is also an internal check inside the compiler to make sure that the target type is a virtual type. \section{Exceptions} % Anything about exception construction. % The implementation of exception types. Creating exceptions can roughly divided into two parts, the exceptions themselves and the virtual system interactions. Creating an exception type is just a matter of prepending the field with the virtual table pointer to the list of the fields (see \autoref{f:ExceptionTypeTransformation}). \begin{figure}[htb] \begin{cfa} exception new_exception { // EXISTING FIELDS }; \end{cfa} \transformline \begin{cfa} struct new_exception { struct new_exception_vtable const * virtual_table; // EXISTING FIELDS }; \end{cfa} \caption{Exception Type Transformation} \label{f:ExceptionTypeTransformation} \end{figure} The integration between exceptions and the virtual system is a bit more complex simply because of the nature of the virtual system prototype. The primary issue is that the virtual system has no way to detect when it should generate any of its internal types and data. This is handled by the exception code, which tells the virtual system when to generate its components. All types associated with a virtual type, the types of the virtual table and the type id, are generated when the virtual type (the exception) is first found. The type id (the instance) is generated with the exception, if it is a monomorphic type. However, if the exception is polymorphic, then a different type id has to be generated for every instance. In this case, generation is delayed until a virtual table is created. % There are actually some problems with this, which is why it is not used % for monomorphic types. When a virtual table is created and initialized, two functions are created to fill in the list of virtual members. The first is a copy function that adapts the exception's copy constructor to work with pointers, avoiding some issues with the current copy constructor interface. Second is the msg function that returns a C-string with the type's name, including any polymorphic parameters. \section{Unwinding} stack. On function entry and return, unwinding is handled directly by the call/return code embedded in the function. In many cases, the position of the instruction pointer (relative to parameter and local declarations) is enough to know the current size of the stack frame. % Discussing normal stack unwinding: Usually, the stack-frame size is known statically based on parameter and local variable declarations. Even with dynamic stack-size, the information local variable declarations. Even for a dynamic stack-size, the information to determine how much of the stack has to be removed is still contained within the function. bumping the hardware stack-pointer up or down as needed. Constructing/destructing values within a stack frame has a similar complexity but can add additional work and take longer. a similar complexity but larger constants. % Discussing multiple frame stack unwinding: Unwinding across multiple stack frames is more complex because that information is no longer contained within the current function. With seperate compilation a function has no way of knowing what its callers are so it can't know how large those frames are. Without altering the main code path it is also hard to pass that work off to the caller. With separate compilation, a function does not know its callers nor their frame layout. Even using the return address, that information is encoded in terms of actions in code, intermixed with the actions required finish the function. Without changing the main code path it is impossible to select one of those two groups of actions at the return site. The traditional unwinding mechanism for C is implemented by saving a snap-shot This approach is fragile and requires extra work in the surrounding code. With respect to the extra work in the surounding code, With respect to the extra work in the surrounding code, many languages define clean-up actions that must be taken when certain sections of the stack are removed. Such as when the storage for a variable is removed from the stack or when a try statement with a finally clause is is removed from the stack, possibly requiring a destructor call, or when a try statement with a finally clause is (conceptually) popped from the stack. None of these should be handled by the user --- that would contradict the None of these cases should be handled by the user --- that would contradict the intention of these features --- so they need to be handled automatically. In plain C (which \CFA currently compiles down to) this flag only handles the cleanup attribute: %\label{code:cleanup} \begin{cfa} void clean_up( int * var ) { ... } in this case @clean_up@, run when the variable goes out of scope. This feature is enough to mimic destructors, but not try statements which can effect but not try statements that affect the unwinding. To get full unwinding support, all of these features must be handled directly in assembly and assembler directives; partiularly the cfi directives in assembly and assembler directives; particularly the cfi directives \snake{.cfi_lsda} and \snake{.cfi_personality}. @_UA_FORCE_UNWIND@ specifies a forced unwind call. Forced unwind only performs the cleanup phase and uses a different means to decide when to stop (see \vref{s:ForcedUnwind}). (see \autoref{s:ForcedUnwind}). \end{enumerate} The @exception_class@ argument is a copy of the \code{C}{exception}'s @exception_class@ field, which is a number that identifies the exception handling mechanism which is a number that identifies the EHM that created the exception. needs its own exception context. The exception context should be retrieved by calling the function The current exception context should be retrieved by calling the function \snake{this_exception_context}. For sequential execution, this function is defined as The first step of a termination raise is to copy the exception into memory managed by the exception system. Currently, the system uses @malloc@, rather than reserved memory or the stack top. The exception handling mechanism manages than reserved memory or the stack top. The EHM manages memory for the exception as well as memory for libunwind and the system's own per-exception storage. \newsavebox{\stackBox} \begin{lrbox}{\codeBox} \begin{lstlisting}[language=CFA,{moredelim=**[is][\color{red}]{@}{@}}] \begin{cfa} unsigned num_exceptions = 0; void throws() { throws(); } \end{lstlisting} \end{cfa} \end{lrbox} \begin{lrbox}{\stackBox} \begin{lstlisting} | try-finally | try-catch (Example) | finally block (Example) | try block throws() | try-finally | try-catch (Example) | finally block (Example) | try block throws() | try-finally | try-catch (Example) | finally block (Example) | try block throws() main() \label{f:MultipleExceptions} \end{figure} \todo*{Work on multiple exceptions code sample.} All exceptions are stored in nodes, which are then linked together in lists \subsection{Try Statements and Catch Clauses} The try statement with termination handlers is complex because it must compensate for the C code-generation versus compensate for the C code-generation versus proper assembly-code generated from \CFA. Libunwind requires an LSDA and personality function for control to unwind across a function. The LSDA in particular is hard to mimic in generated C code. The workaround is a function called @__cfaehm_try_terminate@ in the standard library. The contents of a try block and the termination handlers are converted into functions. These are then passed to the try terminate function and it calls them. The workaround is a function called \snake{__cfaehm_try_terminate} in the standard \CFA library. The contents of a try block and the termination handlers are converted into nested functions. These are then passed to the try terminate function and it calls them, appropriately. Because this function is known and fixed (and not an arbitrary function that happens to contain a try statement), the LSDA can be generated ahead happens to contain a try statement), its LSDA can be generated ahead of time. Both the LSDA and the personality function are set ahead of time using Both the LSDA and the personality function for \snake{__cfaehm_try_terminate} are set ahead of time using embedded assembly. This assembly code is handcrafted using C @asm@ statements and contains enough information for a single try statement the function repersents. enough information for the single try statement the function represents. The three functions passed to try terminate are: decides if a catch clause matches the termination exception. It is constructed from the conditional part of each handler and runs each check, top to bottom, in turn, first checking to see if the exception type matches and then if the condition is true. It takes a pointer to the exception and returns 0 if the in turn, to see if the exception matches this handler. The match is performed in two steps, first a virtual cast is used to check if the raised exception is an instance of the declared exception type or one of its descendant types, and then the condition is evaluated, if present. The match function takes a pointer to the exception and returns 0 if the exception is not handled here. Otherwise the return value is the id of the handler that matches the exception. All three functions are created with GCC nested functions. GCC nested functions can be used to create closures, in other words functions that can refer to the state of other functions on the stack. This approach allows the functions to refer to all the in other words, functions that can refer to variables in their lexical scope even those variables are part of a different function. This approach allows the functions to refer to all the variables in scope for the function containing the @try@ statement. These nested functions and all other functions besides @__cfaehm_try_terminate@ in \autoref{f:TerminationTransformation} shows the pattern used to transform a \CFA try statement with catch clauses into the approprate C functions. \todo{Explain the Termination Transformation figure.} a \CFA try statement with catch clauses into the appropriate C functions. \begin{figure} \caption{Termination Transformation} \label{f:TerminationTransformation} \todo*{Improve (compress?) Termination Transformations.} \end{figure} Instead of storing the data in a special area using assembly, there is just a linked list of possible handlers for each stack, with each node on the list reperenting a try statement on the stack. with each node on the list representing a try statement on the stack. The head of the list is stored in the exception context. to the head of the list. Instead of traversing the stack, resumption handling traverses the list. At each node, the EHM checks to see if the try statement the node repersents At each node, the EHM checks to see if the try statement the node represents can handle the exception. If it can, then the exception is handled and the operation finishes, otherwise the search continues to the next node. If the search reaches the end of the list without finding a try statement that can handle the exception, the default handler is executed and the operation finishes. with a handler clause that can handle the exception, the default handler is executed. If the default handler returns, control continues after the raise statement. Each node has a handler function that does most of the work. The handler function is passed the raised exception and returns true if the exception is handled and false otherwise. The handler function checks each of its internal handlers in order, top-to-bottom, until it funds a match. If a match is found that handler is If no match is found the function returns false. The match is performed in two steps, first a virtual cast is used to see if the thrown exception is an instance of the declared exception or one of its descendant type, then check to see if passes the custom predicate if one is defined. This ordering gives the type guarantee used in the predicate. if the raised exception is an instance of the declared exception type or one of its descendant types, if so then it is passed to the custom predicate if one is defined. % You need to make sure the type is correct before running the predicate % because the predicate can depend on that. \autoref{f:ResumptionTransformation} shows the pattern used to transform a \CFA try statement with catch clauses into the approprate C functions. \todo{Explain the Resumption Transformation figure.} a \CFA try statement with catchResume clauses into the appropriate C functions. \begin{figure} \caption{Resumption Transformation} \label{f:ResumptionTransformation} \todo*{Improve (compress?) Resumption Transformations.} \end{figure} (see \vpageref{s:ResumptionMarking}), which ignores parts of the stack already examined, is accomplished by updating the front of the list as the search continues. Before the handler at a node is called, the head of the list already examined, and is accomplished by updating the front of the list as the search continues. Before the handler is called at a matching node, the head of the list is updated to the next node of the current node. After the search is complete, successful or not, the head of the list is reset. been checked are not on the list while a handler is run. If a resumption is thrown during the handling of another resumption, the active handlers and all the other handler checked up to this point are not checked again. the other handlers checked up to this point are not checked again. % No paragraph? This structure also supports new handlers added while the resumption is being \begin{figure} \centering \input{resumption-marking} \caption{Resumption Marking} \label{f:ResumptionMarking} \todo*{Label Resumption Marking to aid clarity.} \end{figure} \section{Finally} % Uses destructors and GCC nested functions. A finally clause is placed into a GCC nested-function with a unique name, and no arguments or return values. This nested function is then set as the cleanup function of an empty object that is declared at the beginning of a block placed around the context of the associated @try@ statement. The rest is handled by GCC. The try block and all handlers are inside this block. At completion, control exits the block and the empty object is cleaned %\autoref{code:cleanup} A finally clause is handled by converting it into a once-off destructor. The code inside the clause is placed into GCC nested-function with a unique name, and no arguments or return values. This nested function is then set as the cleanup function of an empty object that is declared at the beginning of a block placed around the context of the associated try statement (see \autoref{f:FinallyTransformation}). \begin{figure} \begin{cfa} try { // TRY BLOCK } finally { // FINALLY BLOCK } \end{cfa} \transformline \begin{cfa} { void finally(void *__hook){ // FINALLY BLOCK } __attribute__ ((cleanup(finally))) struct __cfaehm_cleanup_hook __finally_hook; { // TRY BLOCK } } \end{cfa} \caption{Finally Transformation} \label{f:FinallyTransformation} \end{figure} The rest is handled by GCC. The TRY BLOCK contains the try block itself as well as all code generated for handlers. Once that code has completed, control exits the block and the empty object is cleaned up, which runs the function that contains the finally code. passed to the forced-unwind function. The general pattern of all three stop functions is the same: continue unwinding until the end of stack and then preform the appropriate transfer. then perform the appropriate transfer. For main stack cancellation, the transfer is just a program abort.
• doc/theses/andrew_beach_MMath/intro.tex

 rdd1cc02 % Now take a step back and explain what exceptions are generally. Exception handling provides dynamic inter-function control flow. A language's EHM is a combination of language syntax and run-time components that are used to construct, raise, and handle exceptions, including all control flow. Exceptions are an active mechanism for replacing passive error/return codes and return unions (Go and Rust). Exception handling provides dynamic inter-function control flow. components that construct, raise, propagate and handle exceptions, to provide all of that control flow. There are two forms of exception handling covered in this thesis: termination, which acts as a multi-level return, and resumption, which is a dynamic function call. % PAB: Maybe this sentence was suppose to be deleted? Termination handling is much more common, to the extent that it is often seen as the only form of handling. % PAB: I like this sentence better than the next sentence. % This separation is uncommon because termination exception handling is so % much more common that it is often assumed. % WHY: Mention other forms of continuation and \cite{CommonLisp} here? Exception handling relies on the concept of nested functions to create handlers that deal with exceptions. \begin{center} \begin{tabular}[t]{ll} \begin{lstlisting}[aboveskip=0pt,belowskip=0pt,language=CFA,{moredelim=**[is][\color{red}]{@}{@}}] void f( void (*hp)() ) { hp(); } void g( void (*hp)() ) { f( hp ); } void h( int @i@, void (*hp)() ) { void @handler@() { // nested printf( "%d\n", @i@ ); } if ( i == 1 ) hp = handler; if ( i > 0 ) h( i - 1, hp ); else g( hp ); } h( 2, 0 ); \end{lstlisting} & \raisebox{-0.5\totalheight}{\input{handler}} \end{tabular} \end{center} The nested function @handler@ in the second stack frame is explicitly passed to function @f@. When this handler is called in @f@, it uses the parameter @i@ in the second stack frame, which is accessible by an implicit lexical-link pointer. Setting @hp@ in @h@ at different points in the recursion, results in invoking a different handler. Exception handling extends this idea by eliminating explicit handler passing, and instead, performing a stack search for a handler that matches some criteria (conditional dynamic call), and calls the handler at the top of the stack. It is the runtime search $O(N)$ that differentiates an EHM call (raise) from normal dynamic call $O(1)$ via a function or virtual-member pointer. Termination exception handling searches the stack for a handler, unwinds the stack to the frame containing the matching handler, and calling the handler at the top of the stack. % About other works: Often, when this separation is not made, termination exceptions are assumed as they are more common and may be the only form of handling provided in a language. All types of exception handling link a raise with a handler. Both operations are usually language primitives, although raises can be treated as a primitive function that takes an exception argument. Handlers are more complex as they are added to and removed from the stack during execution, must specify what they can handle and give the code to handle the exception. Exceptions work with different execution models but for the descriptions that follow a simple call stack, with functions added and removed in a first-in-last-out order, is assumed. Termination exception handling searches the stack for the handler, then unwinds the stack to where the handler was found before calling it. The handler is run inside the function that defined it and when it finishes it returns control to that function. \begin{center} \input{termination} \end{center} Note, since the handler can reference variables in @h@, @h@ must remain on the stack for the handler call. After the handler returns, control continues after the lexical location of the handler in @h@ (static return)~\cite[p.~108]{Tennent77}. Unwinding allows recover to any previous function on the stack, skipping any functions between it and the function containing the matching handler. Resumption exception handling searches the stack for a handler, does \emph{not} unwind the stack to the frame containing the matching handler, and calls the handler at the top of the stack. Resumption exception handling searches the stack for a handler and then calls it without removing any other stack frames. The handler is run on top of the existing stack, often as a new function or closure capturing the context in which the handler was defined. After the handler has finished running it returns control to the function that preformed the raise, usually starting after the raise. \begin{center} \input{resumption} \end{center} After the handler returns, control continues after the resume in @f@ (dynamic return). Not unwinding allows fix up of the problem in @f@ by any previous function on the stack, without disrupting the current set of stack frames. Although a powerful feature, exception handling tends to be complex to set up and expensive to use so it is often limited to unusual or exceptional" cases. The classic example is error handling, where exceptions are used to remove error handling logic from the main execution path, while paying The classic example is error handling, exceptions can be used to remove error handling logic from the main execution path, and pay most of the cost only when the error actually occurs. some of the underlying tools used to implement and express exception handling in other languages are absent in \CFA. Still the resulting basic syntax resembles that of other languages: \begin{lstlisting}[language=CFA,{moredelim=**[is][\color{red}]{@}{@}}] @try@ { Still the resulting syntax resembles that of other languages: \begin{cfa} try { ... T * object = malloc(request_size); if (!object) { @throw@ OutOfMemory{fixed_allocation, request_size}; throw OutOfMemory{fixed_allocation, request_size}; } ... } @catch@ (OutOfMemory * error) { } catch (OutOfMemory * error) { ... } \end{lstlisting} \end{cfa} % A note that yes, that was a very fast overview. The design and implementation of all of \CFA's EHM's features are % The current state of the project and what it contributes. The majority of the \CFA EHM is implemented in \CFA, except for a small amount of assembler code. In addition, a suite of tests and performance benchmarks were created as part of this project. The \CFA implementation techniques are generally applicable in other programming All of these features have been implemented in \CFA, covering both changes to the compiler and the run-time. In addition, a suite of test cases and performance benchmarks were created along side the implementation. The implementation techniques are generally applicable in other programming languages and much of the design is as well. Some parts of the EHM use features unique to \CFA, and hence, are harder to replicate in other programming languages. % Talk about other programming languages. Three well known programming languages with EHMs, %/exception handling C++, Java and Python are examined in the performance work. However, these languages focus on termination exceptions, so there is no comparison with resumption. Some parts of the EHM use other features unique to \CFA and would be harder to replicate in other programming languages. The contributions of this work are: \begin{enumerate} \item Designing \CFA's exception handling mechanism, adapting designs from other programming languages, and creating new features. \item Implementing stack unwinding for the \CFA EHM, including updating the \CFA compiler and run-time environment to generate and execute the EHM code. \item Designing and implementing a prototype virtual system. other programming languages and creating new features. \item Implementing stack unwinding and the \CFA EHM, including updating the \CFA compiler and the run-time environment. \item Designed and implemented a prototype virtual system. % I think the virtual system and per-call site default handlers are the only % "new" features, everything else is a matter of implementation. \item Creating tests and performance benchmarks to compare with EHM's in other languages. \item Creating tests to check the behaviour of the EHM. \item Creating benchmarks to check the performances of the EHM, as compared to other languages. \end{enumerate} %\todo{I can't figure out a good lead-in to the roadmap.} The thesis is organization as follows. The next section and parts of \autoref{c:existing} cover existing EHMs. New \CFA EHM features are introduced in \autoref{c:features}, The rest of this thesis is organized as follows. The current state of exceptions is covered in \autoref{s:background}. The existing state of \CFA is also covered in \autoref{c:existing}. New EHM features are introduced in \autoref{c:features}, covering their usage and design. That is followed by the implementation of these features in \autoref{c:implement}. Performance results are presented in \autoref{c:performance}. Summing up and possibilities for extending this project are discussed in \autoref{c:future}. Performance results are examined in \autoref{c:performance}. Possibilities to extend this project are discussed in \autoref{c:future}. Finally, the project is summarized in \autoref{c:conclusion}. \section{Background} \label{s:background} Exception handling is a well examined area in programming languages, with papers on the subject dating back the 70s~\cite{Goodenough75}. Exception handling has been examined before in programming languages, with papers on the subject dating back 70s.\cite{Goodenough75} Early exceptions were often treated as signals, which carried no information except their identity. Ada~\cite{Ada} still uses this system. except their identity. Ada originally used this system\cite{Ada}, but now allows for a string message as a payload\cite{Ada12}. The modern flag-ship for termination exceptions is \Cpp, which added them in its first major wave of non-object-orientated features in 1990. % https://en.cppreference.com/w/cpp/language/history While many EHMs have special exception types, \Cpp has the ability to use any type as an exception. However, this generality is not particularly useful, and has been pushed aside for classes, with a convention of inheriting from in 1990.\cite{CppHistory} Many EHMs have special exception types, however \Cpp has the ability to use any type as an exception. These were found to be not very useful and have been pushed aside for classes inheriting from \code{C++}{std::exception}. While \Cpp has a special catch-all syntax @catch(...)@, there is no way to discriminate its exception type, so nothing can be done with the caught value because nothing is known about it. Instead the base exception-type \code{C++}{std::exception} is defined with common functionality (such as the ability to print a message when the exception is raised but not caught) and all Although there is a special catch-all syntax (@catch(...)@) there are no operations that can be performed on the caught value, not even type inspection. Instead the base exception-type \code{C++}{std::exception} defines common functionality (such as the ability to describe the reason the exception was raised) and all exceptions have this functionality. Having a root exception-type seems to be the standard now, as the guaranteed functionality is worth any lost in flexibility from limiting exceptions types to classes. Java~\cite{Java} was the next popular language to use exceptions. Its exception system largely reflects that of \Cpp, except it requires exceptions to be a subtype of \code{Java}{java.lang.Throwable} That trade-off, restricting usable types to gain guaranteed functionality, is almost universal now, as without some common functionality it is almost impossible to actually handle any errors. Java was the next popular language to use exceptions.\cite{Java8} Its exception system largely reflects that of \Cpp, except that requires you throw a child type of \code{Java}{java.lang.Throwable} and it uses checked exceptions. Checked exceptions are part of a function's interface defining all exceptions it or its called functions raise. Using this information, it is possible to statically verify if a handler exists for all raised exception, \ie no uncaught exceptions. Making exception information explicit, improves clarity and safety, but can slow down programming. For example, programming complexity increases when dealing with high-order methods or an overly specified throws clause. However some of the issues are more programming annoyances, such as writing/updating many exception signatures after adding or remove calls. Java programmers have developed multiple programming hacks'' to circumvent checked exceptions negating the robustness it is suppose to provide. For example, the catch-and-ignore" pattern, where the handler is empty because the exception does not appear relevant to the programmer versus repairing or recovering from the exception. Checked exceptions are part of a function's interface, the exception signature of the function. Every function that could be raised from a function, either directly or because it is not handled from a called function, is given. Using this information, it is possible to statically verify if any given exception is handled and guarantee that no exception will go unhandled. Making exception information explicit improves clarity and safety, but can slow down or restrict programming. For example, programming high-order functions becomes much more complex if the argument functions could raise exceptions. However, as odd it may seem, the worst problems are rooted in the simple inconvenience of writing and updating exception signatures. This has caused Java programmers to develop multiple programming hacks'' to circumvent checked exceptions, negating their advantages. One particularly problematic example is the catch-and-ignore'' pattern, where an empty handler is used to handle an exception without doing any recovery or repair. In theory that could be good enough to properly handle the exception, but more often is used to ignore an exception that the programmer does not feel is worth the effort of handling it, for instance if they do not believe it will ever be raised. If they are incorrect the exception will be silenced, while in a similar situation with unchecked exceptions the exception would at least activate the language's unhandled exception code (usually program abort with an error message). %\subsection Resumption exceptions are less popular, although resumption is as old as termination; hence, few although resumption is as old as termination; hence, few programming languages have implemented them. % http://bitsavers.informatik.uni-stuttgart.de/pdf/xerox/parc/techReports/ %   CSL-79-3_Mesa_Language_Manual_Version_5.0.pdf Mesa~\cite{Mesa} is one programming languages that did. Experience with Mesa is quoted as being one of the reasons resumptions are not Mesa is one programming language that did.\cite{Mesa} Experience with Mesa is quoted as being one of the reasons resumptions were not included in the \Cpp standard. % https://en.wikipedia.org/wiki/Exception_handling As a result, resumption has ignored in main-stream programming languages. However, what goes around comes around'' and resumption is being revisited now (like user-level threading). While rejecting resumption might have been the right decision in the past, there are decades of developments in computer science that have changed the situation. Some of these developments, such as functional programming's resumption equivalent, algebraic effects\cite{Zhang19}, are enjoying significant success. A complete reexamination of resumptions is beyond this thesis, but their re-emergence is enough to try them in \CFA. Since then resumptions have been ignored in main-stream programming languages. However, resumption is being revisited in the context of decades of other developments in programming languages. While rejecting resumption may have been the right decision in the past, the situation has changed since then. Some developments, such as the function programming equivalent to resumptions, algebraic effects\cite{Zhang19}, are enjoying success. A complete reexamination of resumptions is beyond this thesis, but there reemergence is enough to try them in \CFA. % Especially considering how much easier they are to implement than % termination exceptions. %\subsection Functional languages tend to use other solutions for their primary EHM, but exception-like constructs still appear. Termination appears in error construct, which marks the result of an expression as an error; thereafter, the result of any expression that tries to use it is also an error, and so on until an appropriate handler is reached. % termination exceptions and how much Peter likes them. %\subsection Functional languages tend to use other solutions for their primary error handling mechanism, but exception-like constructs still appear. Termination appears in the error construct, which marks the result of an expression as an error; then the result of any expression that tries to use it also results in an error, and so on until an appropriate handler is reached. Resumption appears in algebraic effects, where a function dispatches its side-effects to its caller for handling. %\subsection Some programming languages have moved to a restricted kind of EHM called panic". In Rust~\cite{Rust}, a panic is just a program level abort that may be implemented by unwinding the stack like in termination exception handling. % https://doc.rust-lang.org/std/panic/fn.catch_unwind.html In Go~\cite{Go}, a panic is very similar to a termination, except it only supports More recently exceptions seem to be vanishing from newer programming languages, replaced by panic". In Rust, a panic is just a program level abort that may be implemented by unwinding the stack like in termination exception handling.\cite{RustPanicMacro}\cite{RustPanicModule} Go's panic through is very similar to a termination, except it only supports a catch-all by calling \code{Go}{recover()}, simplifying the interface at the cost of flexibility. the cost of flexibility.\cite{Go:2021} %\subsection While exception handling's most common use cases are in error handling, here are other ways to handle errors with comparisons to exceptions. here are some other ways to handle errors with comparisons with exceptions. \begin{itemize} \item\emph{Error Codes}: This pattern has a function return an enumeration (or just a set of fixed values) to indicate if an error occurred and possibly which error it was. Error codes mix exceptional and normal values, artificially enlarging the type and/or value range. Some languages address this issue by returning multiple values or a tuple, separating the error code from the function result. However, the main issue with error codes is forgetting to checking them, This pattern has a function return an enumeration (or just a set of fixed values) to indicate if an error has occurred and possibly which error it was. Error codes mix exceptional/error and normal values, enlarging the range of possible return values. This can be addressed with multiple return values (or a tuple) or a tagged union. However, the main issue with error codes is forgetting to check them, which leads to an error being quietly and implicitly ignored. Some new languages have tools that issue warnings, if the error code is discarded to avoid this problem. Checking error codes also results in bloating the main execution path, especially if an error is not dealt with locally and has to be cascaded down the call stack to a higher-level function.. Some new languages and tools will try to issue warnings when an error code is discarded to avoid this problem. Checking error codes also bloats the main execution path, especially if the error is not handled immediately hand has to be passed through multiple functions before it is addressed. \item\emph{Special Return with Global Store}: Some functions only return a boolean indicating success or failure and store the exact reason for the error in a fixed global location. For example, many C routines return non-zero or -1, indicating success or failure, and write error details into the C standard variable @errno@. This approach avoids the multiple results issue encountered with straight error codes but otherwise has many (if not more) of the disadvantages. For example, everything that uses the global location must agree on all possible errors and global variable are unsafe with concurrency. Similar to the error codes pattern but the function itself only returns that there was an error and store the reason for the error in a fixed global location. For example many routines in the C standard library will only return some error value (such as -1 or a null pointer) and the error code is written into the standard variable @errno@. This approach avoids the multiple results issue encountered with straight error codes but otherwise has the same disadvantages and more. Every function that reads or writes to the global store must agree on all possible errors and managing it becomes more complex with concurrency. \item\emph{Return Union}: so that one type can be used everywhere in error handling code. This pattern is very popular in functional or any semi-functional language with primitive support for tagged unions (or algebraic data types). % We need listing Rust/rust to format code snipits from it. This pattern is very popular in any functional or semi-functional language with primitive support for tagged unions (or algebraic data types). % We need listing Rust/rust to format code snippets from it. % Rust's \code{rust}{Result} The main advantage is providing for more information about an error, other than one of a fix-set of ids. While some languages use checked union access to force error-code checking, it is still possible to bypass the checking. The main disadvantage is again significant error code on the main execution path and cascading through called functions. The main advantage is that an arbitrary object can be used to represent an error so it can include a lot more information than a simple error code. The disadvantages include that the it does have to be checked along the main execution and if there aren't primitive tagged unions proper usage can be hard to enforce. \item\emph{Handler Functions}: This pattern implicitly associates functions with errors. On error, the function that produced the error implicitly calls another function to This pattern associates errors with functions. On error, the function that produced the error calls another function to handle it. The handler function can be provided locally (passed in as an argument, either directly as as a field of a structure/object) or globally (a global variable). C++ uses this approach as its fallback system if exception handling fails, \eg \snake{std::terminate_handler} and for a time \snake{std::unexpected_handler} Handler functions work a lot like resumption exceptions, without the dynamic handler search. Therefore, setting setting up the handler can be more complex/expensive, especially if the handle must be passed through multiple function calls, but cheaper to call $O(1)$, and hence, are more suited to frequent exceptional situations. % The exception being global handlers if they are rarely change as the time % in both cases shrinks towards zero. C++ uses this approach as its fallback system if exception handling fails, such as \snake{std::terminate_handler} and, for a time, \snake{std::unexpected_handler}. Handler functions work a lot like resumption exceptions, but without the dynamic search for a handler. Since setting up the handler can be more complex/expensive, especially when the handler has to be passed through multiple layers of function calls, but cheaper (constant time) to call, they are more suited to more frequent (less exceptional) situations. \end{itemize} %\subsection Because of their cost, exceptions are rarely used for hot paths of execution. Therefore, there is an element of self-fulfilling prophecy for implementation techniques to make exceptions cheap to set-up at the cost of expensive usage. This cost differential is less important in higher-level scripting languages, where use of exceptions for other tasks is more common. An iconic example is Python's @StopIteration@ exception that is thrown by an iterator to indicate that it is exhausted, especially when combined with Python's heavy use of the iterator-based for-loop. % https://docs.python.org/3/library/exceptions.html#StopIteration Hence, there is an element of self-fulfilling prophecy as implementation techniques have been focused on making them cheap to set-up, happily making them expensive to use in exchange. This difference is less important in higher-level scripting languages, where using exception for other tasks is more common. An iconic example is Python's \code{Python}{StopIteration}\cite{PythonExceptions} exception that is thrown by an iterator to indicate that it is exhausted. When paired with Python's iterator-based for-loop this will be thrown every time the end of the loop is reached.\cite{PythonForLoop}
• doc/theses/andrew_beach_MMath/performance.tex

 rdd1cc02 \label{c:performance} Performance has been of secondary importance for most of this project. Instead, the focus has been to get the features working. The only performance requirements is to ensure the tests for correctness run in a reasonable amount of time. Performance is of secondary importance for most of this project. Instead, the focus was to get the features working. The only performance requirement is to ensure the tests for correctness run in a reasonable amount of time. Hence, a few basic performance tests were performed to check this requirement. \section{Test Set-Up} Tests will be run in \CFA, C++, Java and Python. Tests were run in \CFA, C++, Java and Python. In addition there are two sets of tests for \CFA, one for termination exceptions and once with resumption exceptions. one with termination and one with resumption. C++ is the most comparable language because both it and \CFA use the same framework, libunwind. In fact, the comparison is almost entirely a quality of implementation comparison. \CFA's EHM has had significantly less time to be optimized and In fact, the comparison is almost entirely in quality of implementation. Specifically, \CFA's EHM has had significantly less time to be optimized and does not generate its own assembly. It does have a slight advantage in that there are some features it does not handle, through utility functions, but otherwise \Cpp has a significant advantage. Java is another very popular language with similar termination semantics. It is implemented in a very different environment, a virtual machine with \Cpp has to do some extra bookkeeping to support its utility functions, but otherwise \Cpp should have a significant advantage. Java, a popular language with similar termination semantics, is implemented in a very different environment, a virtual machine with garbage collection. It also implements the finally clause on try blocks allowing for a direct feature-to-feature comparison. As with \Cpp, Java's implementation is more mature, has more optimizations and more extra features. Python was used as a point of comparison because of the \CFA EHM's current performance goals, which is not be prohibitively slow while the As with \Cpp, Java's implementation is mature, has more optimizations and extra features as compared to \CFA. Python is used as an alternative comparison because of the \CFA EHM's current performance goals, which is to not be prohibitively slow while the features are designed and examined. Python has similar performance goals for creating quick scripts and its wide use suggests it has achieved those goals. Unfortunately there are no notable modern programming languages with resumption exceptions. Even the older programming languages with resumptions seem to be notable only for having resumptions. So instead resumptions are compared to a less similar but much more familiar feature, termination exceptions. All tests are run inside a main loop which will perform the test repeatedly. This is to avoids start-up or tear-down time from Unfortunately, there are no notable modern programming languages with resumption exceptions. Even the older programming languages with resumption seem to be notable only for having resumption. Instead, resumption is compared to its simulation in other programming languages: fixup functions that are explicitly passed into a function. All tests are run inside a main loop that repeatedly performs a test. This approach avoids start-up or tear-down time from affecting the timing results. Tests ran their main loop a million times. The Java versions of the test also run this loop an extra 1000 times before beginning to time the results to warm-up" the JVM. The number of times the loop is run is configurable from the command line; the number used in the timing runs is given with the results per test. The Java tests run the main loop 1000 times before beginning the actual test to warm-up" the JVM. % All other languages are precompiled or interpreted. Timing is done internally, with time measured immediately before and immediately after the test loop. The difference is calculated and printed. after the test loop. The difference is calculated and printed. The loop structure and internal timing means it is impossible to test unhandled exceptions in \Cpp and Java as that would cause the process to critical. The exceptions used in these tests will always be a exception based off of the base exception. This requirement minimizes performance differences based on the object model used to repersent the exception. All tests were designed to be as minimal as possible while still preventing exessive optimizations. The exceptions used in these tests are always based off of the base exception for the language. This requirement minimizes performance differences based on the object model used to represent the exception. All tests are designed to be as minimal as possible, while still preventing excessive optimizations. For example, empty inline assembly blocks are used in \CFA and \Cpp to prevent excessive optimizations while adding no actual work. % \code{C++}{catch(...)}). When collecting data, each test is run eleven times. The top three and bottom three results are discarded and the remaining five values are averaged. The test are run with the latest (still pre-release) \CFA compiler, using gcc-10 10.3.0 as a backend. g++-10 10.3.0 is used for \Cpp. Java tests are complied and run with version 11.0.11. Python used version 3.8.10. The machines used to run the tests are: \begin{itemize}[nosep] \item ARM 2280 Kunpeng 920 48-core 2$\times$socket \lstinline{@} 2.6 GHz running Linux v5.11.0-25 \item AMD 6380 Abu Dhabi 16-core 4$\times$socket \lstinline{@} 2.5 GHz running Linux v5.11.0-25 \end{itemize} Representing the two major families of hardware architecture. \section{Tests} The following tests were selected to test the performance of different components of the exception system. The should provide a guide as to where the EHM's costs can be found. \paragraph{Raise and Handle} The first group of tests involve setting up So there is three layers to the test. The first is set up and a loop, which configures the test and then runs it repeatedly to reduce the impact of start-up and shutdown on the results. Each iteration of the main loop They should provide a guide as to where the EHM's costs are found. \paragraph{Stack Traversal} This group measures the cost of traversing the stack, (and in termination, unwinding it). Inside the main loop is a call to a recursive function. This function calls itself F times before raising an exception. F is configurable from the command line, but is usually 100. This builds up many stack frames, and any contents they may have, before the raise. The exception is always handled at the base of the stack. For example the Empty test for \CFA resumption looks like: \begin{cfa} void unwind_empty(unsigned int frames) { if (frames) { unwind_empty(frames - 1); } else { throwResume (empty_exception){&empty_vt}; } } \end{cfa} Other test cases have additional code around the recursive call adding something besides simple stack frames to the stack. Note that both termination and resumption have to traverse over the stack but only termination has to unwind it. \begin{itemize}[nosep] \item Empty Function: % \item None: % Reuses the empty test code (see below) except that the number of frames % is set to 0 (this is the only test for which the number of frames is not % 100). This isolates the start-up and shut-down time of a throw. \item Empty: The repeating function is empty except for the necessary control code. As other traversal tests add to this, it is the baseline for the group as the cost comes from traversing over and unwinding a stack frame that has no other interactions with the exception system. \item Destructor: The repeating function creates an object with a destructor before calling itself. Comparing this to the empty test gives the time to traverse over and unwind a destructor. \item Finally: The repeating function calls itself inside a try block with a finally clause attached. Comparing this to the empty test gives the time to traverse over and unwind a finally clause. \item Other Handler: The repeating function calls itself inside a try block with a handler that will not match the raised exception. (But is of the same kind of handler.) does not match the raised exception, but is of the same kind of handler. This means that the EHM has to check each handler, and continue over all of them until it reaches the base of the stack. Comparing this to the empty test gives the time to traverse over and unwind a handler. \end{itemize} \paragraph{Cross Try Statement} The next group measures the cost of a try statement when no exceptions are raised. The test is set-up, then there is a loop to reduce the impact of start-up and shutdown on the results. In each iteration, a try statement is executed. Entering and leaving a loop is all the test wants to do. This group of tests measures the cost for setting up exception handling, if it is not used (because the exceptional case did not occur). Tests repeatedly cross (enter, execute and leave) a try statement but never perform a raise. \begin{itemize}[nosep] \item Handler: The try statement has a handler (of the matching kind). The try statement has a handler (of the appropriate kind). \item Finally: The try statement has a finally clause. \paragraph{Conditional Matching} This group of tests checks the cost of conditional matching. This group measures the cost of conditional matching. Only \CFA implements the language level conditional match, the other languages must mimic with an unconditional" match (it still checks the exception's type) and conditional re-raise if it was not supposed the other languages mimic it with an unconditional" match (it still checks the exception's type) and conditional re-raise if it is not supposed to handle that exception. Here is the pattern shown in \CFA and \Cpp. Java and Python use the same pattern as \Cpp, but with their own syntax. \begin{minipage}{0.45\textwidth} \begin{cfa} try { ... } catch (exception_t * e ; should_catch(e)) { ... } \end{cfa} \end{minipage} \begin{minipage}{0.55\textwidth} \begin{lstlisting}[language=C++] try { ... } catch (std::exception & e) { if (!should_catch(e)) throw; ... } \end{lstlisting} \end{minipage} \begin{itemize}[nosep] \item Match All: The condition is always false. (Never matches or always re-raises.) \end{itemize} \paragraph{Resumption Simulation} A slightly altered version of the Empty Traversal test is used when comparing resumption to fix-up routines. The handler, the actual resumption handler or the fix-up routine, always captures a variable at the base of the loop, and receives a reference to a variable at the raise site, either as a field on the exception or an argument to the fix-up routine. % I don't actually know why that is here but not anywhere else. %\section{Cost in Size} \section{Results} Each test was run eleven times. The top three and bottom three results were discarded and the remaining five values are averaged. In cases where a feature is not supported by a language the test is skipped for that language. Similarly, if a test is does not change between resumption and termination in \CFA, then only one test is written and the result was put into the termination column. % Raw Data: % run-algol-a.sat % --------------- % Raise Empty   &  82687046678 &  291616256 &   3252824847 & 15422937623 & 14736271114 \\ % Raise D'tor   & 219933199603 &  297897792 & 223602799362 &         N/A &         N/A \\ % Raise Finally & 219703078448 &  298391745 &          N/A &         ... & 18923060958 \\ % Raise Other   & 296744104920 & 2854342084 & 112981255103 & 15475924808 & 21293137454 \\ % Cross Handler &      9256648 &   13518430 &       769328 &     3486252 &    31790804 \\ % Cross Finally &       769319 &        N/A &          N/A &     2272831 &    37491962 \\ % Match All     &   3654278402 &   47518560 &   3218907794 &  1296748192 &   624071886 \\ % Match None    &   4788861754 &   58418952 &   9458936430 &  1318065020 &   625200906 \\ % % run-algol-thr-c % --------------- % Raise Empty   &   3757606400 &   36472972 &   3257803337 & 15439375452 & 14717808642 \\ % Raise D'tor   &  64546302019 &  102148375 & 223648121635 &         N/A &         N/A \\ % Raise Finally &  64671359172 &  103285005 &          N/A & 15442729458 & 18927008844 \\ % Raise Other   & 294143497130 & 2630130385 & 112969055576 & 15448220154 & 21279953424 \\ % Cross Handler &      9646462 &   11955668 &       769328 &     3453707 &    31864074 \\ % Cross Finally &       773412 &        N/A &          N/A &     2253825 &    37266476 \\ % Match All     &   3719462155 &   43294042 &   3223004977 &  1286054154 &   623887874 \\ % Match None    &   4971630929 &   55311709 &   9481225467 &  1310251289 &   623752624 \\ \begin{tabular}{|l|c c c c c|} \hline & \CFA (Terminate) & \CFA (Resume) & \Cpp & Java & Python \\ \hline Raise Empty   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Raise D'tor   & 0.0 & 0.0 & 0.0 & N/A & N/A \\ Raise Finally & 0.0 & 0.0 & N/A & 0.0 & 0.0 \\ Raise Other   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Cross Handler & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Cross Finally & 0.0 & N/A & N/A & 0.0 & 0.0 \\ Match All     & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Match None    & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ % First, introduce the tables. \autoref{t:PerformanceTermination}, \autoref{t:PerformanceResumption} and~\autoref{t:PerformanceFixupRoutines} show the test results. In cases where a feature is not supported by a language, the test is skipped for that language and the result is marked N/A. There are also cases where the feature is supported but measuring its cost is impossible. This happened with Java, which uses a JIT that optimize away the tests and it cannot be stopped.\cite{Dice21} These tests are marked N/C. To get results in a consistent range (1 second to 1 minute is ideal, going higher is better than going low) N, the number of iterations of the main loop in each test, is varied between tests. It is also given in the results and has a value in the millions. An anomaly in some results came from \CFA's use of gcc nested functions. These nested functions are used to create closures that can access stack variables in their lexical scope. However, if they do so, then they can cause the benchmark's run-time to increase by an order of magnitude. The simplest solution is to make those values global variables instead of function local variables. % Do we know if editing a global inside nested function is a problem? Tests that had to be modified to avoid this problem have been marked with a *'' in the results. % Now come the tables themselves: % You might need a wider window for this. \begin{table}[htb] \centering \caption{Termination Performance Results (sec)} \label{t:PerformanceTermination} \begin{tabular}{|r|*{2}{|r r r r|}} \hline & \multicolumn{4}{c||}{AMD}         & \multicolumn{4}{c|}{ARM}  \\ \cline{2-9} N\hspace{8pt}          & \multicolumn{1}{c}{\CFA} & \multicolumn{1}{c}{\Cpp} & \multicolumn{1}{c}{Java} & \multicolumn{1}{c||}{Python} & \multicolumn{1}{c}{\CFA} & \multicolumn{1}{c}{\Cpp} & \multicolumn{1}{c}{Java} & \multicolumn{1}{c|}{Python} \\ \hline Empty Traversal (1M)   & 3.4   & 2.8   & 18.3  & 23.4      & 3.7   & 3.2   & 15.5  & 14.8  \\ D'tor Traversal (1M)   & 48.4  & 23.6  & N/A   & N/A       & 64.2  & 29.0  & N/A   & N/A   \\ Finally Traversal (1M) & 3.4*  & N/A   & 17.9  & 29.0      & 4.1*  & N/A   & 15.6  & 19.0  \\ Other Traversal (1M)   & 3.6*  & 23.2  & 18.2  & 32.7      & 4.0*  & 24.5  & 15.5  & 21.4  \\ Cross Handler (1B)     & 6.0   & 0.9   & N/C   & 37.4      & 10.0  & 0.8   & N/C   & 32.2  \\ Cross Finally (1B)     & 0.9   & N/A   & N/C   & 44.1      & 0.8   & N/A   & N/C   & 37.3  \\ Match All (10M)        & 32.9  & 20.7  & 13.4  & 4.9       & 36.2  & 24.5  & 12.0  & 3.1   \\ Match None (10M)       & 32.7  & 50.3  & 11.0  & 5.1       & 36.3  & 71.9  & 12.3  & 4.2   \\ \hline \end{tabular} % run-plg7a-a.sat % --------------- % Raise Empty   &  57169011329 &  296612564 &   2788557155 & 17511466039 & 23324548496 \\ % Raise D'tor   & 150599858014 &  318443709 & 149651693682 &         N/A &         N/A \\ % Raise Finally & 148223145000 &  373325807 &          N/A &         ... & 29074552998 \\ % Raise Other   & 189463708732 & 3017109322 &  85819281694 & 17584295487 & 32602686679 \\ % Cross Handler &      8001654 &   13584858 &      1555995 &     6626775 &    41927358 \\ % Cross Finally &      1002473 &        N/A &          N/A &     4554344 &    51114381 \\ % Match All     &   3162460860 &   37315018 &   2649464591 &  1523205769 &   742374509 \\ % Match None    &   4054773797 &   47052659 &   7759229131 &  1555373654 &   744656403 \\ % % run-plg7a-thr-a % --------------- % Raise Empty   &   3604235388 &   29829965 &   2786931833 & 17576506385 & 23352975105 \\ % Raise D'tor   &  46552380948 &  178709605 & 149834207219 &         N/A &         N/A \\ % Raise Finally &  46265157775 &  177906320 &          N/A & 17493045092 & 29170962959 \\ % Raise Other   & 195659245764 & 2376968982 &  86070431924 & 17552979675 & 32501882918 \\ % Cross Handler &    397031776 &   12503552 &      1451225 &     6658628 &    42304965 \\ % Cross Finally &      1136746 &        N/A &          N/A &     4468799 &    46155817 \\ % Match All     &   3189512499 &   39124453 &   2667795989 &  1525889031 &   733785613 \\ % Match None    &   4094675477 &   48749857 &   7850618572 &  1566713577 &   733478963 \\ % PLG7A (in seconds) \begin{tabular}{|l|c c c c c|} \hline & \CFA (Terminate) & \CFA (Resume) & \Cpp & Java & Python \\ \hline % Raise Empty   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ % Raise D'tor   & 0.0 & 0.0 & 0.0 & N/A & N/A \\ % Raise Finally & 0.0 & 0.0 & N/A & 0.0 & 0.0 \\ % Raise Other   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ % Cross Handler & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ % Cross Finally & 0.0 & N/A & N/A & 0.0 & 0.0 \\ % Match All     & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ % Match None    & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Raise Empty   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Raise D'tor   & 0.0 & 0.0 & 0.0 & N/A & N/A \\ Raise Finally & 0.0 & 0.0 & N/A & 0.0 & 0.0 \\ Raise Other   & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Cross Handler & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Cross Finally & 0.0 & N/A & N/A & 0.0 & 0.0 \\ Match All     & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ Match None    & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\ \end{table} \begin{table}[htb] \centering \caption{Resumption Performance Results (sec)} \label{t:PerformanceResumption} \begin{tabular}{|r||r||r|} \hline N\hspace{8pt} & AMD     & ARM  \\ \hline Empty Traversal (10M)   & 0.2     & 0.3  \\ D'tor Traversal (10M)   & 1.8     & 1.0  \\ Finally Traversal (10M) & 1.7     & 1.0  \\ Other Traversal (10M)   & 22.6    & 25.9 \\ Cross Handler (1B)      & 8.4     & 11.9 \\ Match All (100M)        & 2.3     & 3.2  \\ Match None (100M)       & 2.9     & 3.9  \\ \hline \end{tabular} One result that is not directly related to \CFA but is important to keep in mind is that in exceptions the standard intuitions about which languages should go faster often do not hold. There are cases where Python out-preforms \Cpp and Java. The most likely explination is that, since exceptions are rarely considered to be the common case, the more optimized langages have optimized at their expence. In addition languages with high level repersentations have a much easier time scanning the stack as there is less to decode. This means that while \CFA does not actually keep up with Python in every case it is no worse than roughly half the speed of \Cpp. This is good enough for the prototyping purposes of the project. One difference not shown is that optimizations in \CFA is very fragile. The \CFA compiler uses gcc as part of its complation process and the version of gcc could change the speed of some of the benchmarks by 10 times or more. Similar changes to g++ for the \Cpp benchmarks had no significant changes. Because of the connection between gcc and g++; this suggests it is not the optimizations that are changing but how the optimizer is detecting if the optimizations can be applied. So the optimizations are always applied in g++, but only newer versions of gcc can detect that they can be applied in the more complex \CFA code. Resumption exception handling is also incredibly fast. Often an order of magnitude or two better than the best termination speed. There is a simple explination for this; traversing a linked list is much faster than examining and unwinding the stack. When resumption does not do as well its when more try statements are used per raise. Updating the interal linked list is not very expencive but it does add up. The relative speed of the Match All and Match None tests (within each language) can also show the effectiveness conditional matching as compared to catch and rethrow. \begin{itemize}[nosep] \item Java and Python get similar values in both tests. Between the interperated code, a higher level repersentation of the call stack and exception reuse it it is possible the cost for a second throw can be folded into the first. % Is this due to optimization? \item Both types of \CFA are slighly slower if there is not a match. For termination this likely comes from unwinding a bit more stack through libunwind instead of executing the code normally. For resumption there is extra work in traversing more of the list and running more checks for a matching exceptions. % Resumption is a bit high for that but this is my best theory. \item Then there is \Cpp, which takes 2--3 times longer to catch and rethrow vs. just the catch. This is very high, but it does have to repeat the same process of unwinding the stack and may have to parse the LSDA of the function with the catch and rethrow twice, once before the catch and once after the rethrow. % I spent a long time thinking of what could push it over twice, this is all % I have to explain it. \end{itemize} The difference in relative performance does show that there are savings to be made by performing the check without catching the exception. \end{table} \begin{table}[htb] \centering \small \caption{Resumption/Fixup Routine Comparison (sec)} \label{t:PerformanceFixupRoutines} \setlength{\tabcolsep}{5pt} \begin{tabular}{|r|*{2}{|r r r r r|}} \hline & \multicolumn{5}{c||}{AMD}     & \multicolumn{5}{c|}{ARM}  \\ \cline{2-11} N\hspace{8pt}       & \multicolumn{1}{c}{Raise} & \multicolumn{1}{c}{\CFA} & \multicolumn{1}{c}{\Cpp} & \multicolumn{1}{c}{Java} & \multicolumn{1}{c||}{Python} & \multicolumn{1}{c}{Raise} & \multicolumn{1}{c}{\CFA} & \multicolumn{1}{c}{\Cpp} & \multicolumn{1}{c}{Java} & \multicolumn{1}{c|}{Python} \\ \hline Resume Empty (10M)  & 1.5 & 1.5 & 14.7 & 2.3 & 176.1  & 1.0 & 1.4 & 8.9 & 1.2 & 119.9 \\ \hline \end{tabular} \end{table} % Now discuss the results in the tables. One result not directly related to \CFA but important to keep in mind is that, for exceptions, the standard intuition about which languages should go faster often does not hold. For example, there are a few cases where Python out-performs \CFA, \Cpp and Java. % To be exact, the Match All and Match None cases. The most likely explanation is that, since exceptions are rarely considered to be the common case, the more optimized languages make that case expensive to improve other cases. In addition, languages with high-level representations have a much easier time scanning the stack as there is less to decode. As stated, the performance tests are not attempting to show \CFA has a new competitive way of implementing exception handling. The only performance requirement is to insure the \CFA EHM has reasonable performance for prototyping. Although that may be hard to exactly quantify, I believe it has succeeded in that regard. Details on the different test cases follow. \subsection{Termination \texorpdfstring{(\autoref{t:PerformanceTermination})}{}} \begin{description} \item[Empty Traversal] \CFA is slower than \Cpp, but is still faster than the other languages and closer to \Cpp than other languages. This result is to be expected, as \CFA is closer to \Cpp than the other languages. \item[D'tor Traversal] Running destructors causes a huge slowdown in the two languages that support them. \CFA has a higher proportionate slowdown but it is similar to \Cpp's. Considering the amount of work done in destructors is effectively zero (an assembly comment), the cost must come from the change of context required to run the destructor. \item[Finally Traversal] Performance is similar to Empty Traversal in all languages that support finally clauses. Only Python seems to have a larger than random noise change in its run-time and it is still not large. Despite the similarity between finally clauses and destructors, finally clauses seem to avoid the spike that run-time destructors have. Possibly some optimization removes the cost of changing contexts. \item[Other Traversal] For \Cpp, stopping to check if a handler applies seems to be about as expensive as stopping to run a destructor. This results in a significant jump. Other languages experience a small increase in run-time. The small increase likely comes from running the checks, but they could avoid the spike by not having the same kind of overhead for switching to the check's context. \item[Cross Handler] Here \CFA falls behind \Cpp by a much more significant margin. This is likely due to the fact \CFA has to insert two extra function calls, while \Cpp does not have to do execute any other instructions. Python is much further behind. \item[Cross Finally] \CFA's performance now matches \Cpp's from Cross Handler. If the code from the finally clause is being inlined, which is just an asm comment, than there are no additional instructions to execute again when exiting the try statement normally. \item[Conditional Match] Both of the conditional matching tests can be considered on their own. However for evaluating the value of conditional matching itself, the comparison of the two sets of results is useful. Consider the massive jump in run-time for \Cpp going from match all to match none, which none of the other languages have. Some strange interaction is causing run-time to more than double for doing twice as many raises. Java and Python avoid this problem and have similar run-time for both tests, possibly through resource reuse or their program representation. However \CFA is built like \Cpp and avoids the problem as well, this matches the pattern of the conditional match, which makes the two execution paths very similar. \end{description} \subsection{Resumption \texorpdfstring{(\autoref{t:PerformanceResumption})}{}} Moving on to resumption, there is one general note, resumption is \textit{fast}. The only test where it fell behind termination is Cross Handler. In every other case, the number of iterations had to be increased by a factor of 10 to get the run-time in an appropriate range and in some cases resumption still took less time. % I tried \paragraph and \subparagraph, maybe if I could adjust spacing % between paragraphs those would work. \begin{description} \item[Empty Traversal] See above for the general speed-up notes. This result is not surprising as resumption's linked-list approach means that traversing over stack frames without a resumption handler is $O(1)$. \item[D'tor Traversal] Resumption does have the same spike in run-time that termination has. The run-time is actually very similar to Finally Traversal. As resumption does not unwind the stack, both destructors and finally clauses are run while walking down the stack during the recursive returns. So it follows their performance is similar. \item[Finally Traversal] Same as D'tor Traversal, except termination did not have a spike in run-time on this test case. \item[Other Traversal] Traversing across handlers reduces resumption's advantage as it actually has to stop and check each one. Resumption still came out ahead (adjusting for iterations) but by much less than the other cases. \item[Cross Handler] The only test case where resumption could not keep up with termination, although the difference is not as significant as many other cases. It is simply a matter of where the costs come from, both termination and resumption have some work to set-up or tear-down a handler. It just so happens that resumption's work is slightly slower. \item[Conditional Match] Resumption shows a slight slowdown if the exception is not matched by the first handler, which follows from the fact the second handler now has to be checked. However the difference is not large. \end{description} \subsection{Resumption/Fixup \texorpdfstring{(\autoref{t:PerformanceFixupRoutines})}{}} Finally are the results of the resumption/fixup routine comparison. These results are surprisingly varied. It is possible that creating a closure has more to do with performance than passing the argument through layers of calls. At 100 stack frames, resumption and manual fixup routines have similar performance in \CFA. More experiments could try to tease out the exact trade-offs, but the prototype's only performance goal is to be reasonable. It has already in that range, and \CFA's fixup routine simulation is one of the faster simulations as well. Plus exceptions add features and remove syntactic overhead, so even at similar performance resumptions have advantages over fixup routines.
• doc/theses/andrew_beach_MMath/resumption-marking.fig

 rdd1cc02 -2 1200 2 6 5985 1530 6165 3105 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6075 1620 90 90 6075 1620 6075 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6075 2340 90 90 6075 2340 6075 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6075 3015 90 90 6075 3015 6075 3105 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6075 1755 6075 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6075 2475 6075 2925 -6 6 3465 1530 3645 3105 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3555 1620 90 90 3555 1620 3555 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3555 2340 90 90 3555 2340 3555 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3555 3015 90 90 3555 3015 3555 3105 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3555 1755 3555 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3555 2475 3555 2925 -6 6 2115 1530 2295 3105 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2205 1620 90 90 2205 1620 2205 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2205 2340 90 90 2205 2340 2205 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 2205 3015 90 90 2205 3015 2205 3105 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 2205 1755 2205 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 2205 2475 2205 2925 -6 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4905 1620 90 90 4905 1620 4905 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4905 3015 90 90 4905 3015 4905 3105 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4905 945 90 90 4905 945 4905 1035 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4905 2340 90 90 4905 2340 4905 2430 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 2790 1620 2430 1620 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 4095 2340 3735 2340 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6660 1620 6300 1620 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 5490 945 5130 945 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1665 1620 90 90 1665 1620 1665 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1665 2340 90 90 1665 2340 1665 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1665 3060 90 90 1665 3060 1665 3150 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3195 1620 90 90 3195 1620 3195 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3195 2340 90 90 3195 2340 3195 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3195 3060 90 90 3195 3060 3195 3150 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6525 1620 90 90 6525 1620 6525 1710 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6525 2340 90 90 6525 2340 6525 2430 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 4905 3060 90 90 4905 3060 4905 3150 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6525 3060 90 90 6525 3060 6525 3150 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1 1 1.00 60.00 120.00 4770 1080 4590 1260 4590 2070 4770 2250 4 0 0 50 -1 0 12 0.0000 4 135 1170 1980 3375 Initial State\001 4 0 0 50 -1 0 12 0.0000 4 135 1170 3420 3375 Found Handler\001 4 0 0 50 -1 0 12 0.0000 4 165 810 4770 3375 Try block\001 4 0 0 50 -1 0 12 0.0000 4 135 900 4770 3555 in Handler\001 4 0 0 50 -1 0 12 0.0000 4 165 1530 5940 3375 Handling Complete\001 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1665 1755 1665 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1665 2475 1665 2925 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3195 1755 3195 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3195 2475 3195 2925 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6525 1755 6525 2205 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6525 2475 6525 2925 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1260 1620 1485 1620 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 1980 1440 1755 1440 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 2790 2340 3015 2340 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 3600 1620 3375 1620 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 4500 945 4725 945 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 5265 765 5040 765 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6120 1620 6345 1620 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 1 1 1.00 60.00 120.00 6840 1440 6615 1440 4 1 0 50 -1 0 12 0.0000 0 135 1170 1665 3375 Initial State\001 4 1 0 50 -1 0 12 0.0000 0 135 1170 3195 3375 Found Handler\001 4 1 0 50 -1 0 12 0.0000 0 165 1530 6570 3375 Handling Complete\001 4 2 0 50 -1 0 12 0.0000 0 135 720 1485 2385 handlers\001 4 1 0 50 -1 0 12 0.0000 0 135 900 4905 3375 Handler in\001 4 1 0 50 -1 0 12 0.0000 0 165 810 4905 3600 Try block\001 4 0 0 50 -1 0 12 0.0000 0 135 360 855 1665 head\001 4 0 0 50 -1 0 12 0.0000 4 120 810 2025 1485 execution\001 4 0 0 50 -1 0 12 0.0000 0 135 360 2385 2385 head\001 4 0 0 50 -1 0 12 0.0000 4 120 810 3645 1665 execution\001 4 0 0 50 -1 0 12 0.0000 0 135 360 4095 990 head\001 4 0 0 50 -1 0 12 0.0000 4 120 810 5310 810 execution\001 4 0 0 50 -1 0 12 0.0000 0 135 360 5715 1665 head\001 4 0 0 50 -1 0 12 0.0000 4 120 810 6885 1485 execution\001
• doc/theses/andrew_beach_MMath/uw-ethesis-frontpgs.tex

 rdd1cc02 \begin{center}\textbf{Abstract}\end{center} This is the abstract. The \CFA (Cforall) programming language is an evolutionary refinement of the C programming language, adding modern programming features without changing the programming paradigms of C. One of these modern programming features is more powerful error handling through the addition of an exception handling mechanism (EHM). This thesis covers the design and implementation of the \CFA EHM, along with a review of the other required \CFA features. The EHM includes common features of termination exception handling and similar support for resumption exception handling. The design of both has been adapted to utilize other tools \CFA provides, as well as fit with the assertion based interfaces of the language. The EHM has been implemented into the \CFA compiler and run-time environment. Although it has not yet been optimized, performance testing has shown it has comparable performance to other EHM's, which is sufficient for use in current \CFA programs. \cleardoublepage \begin{center}\textbf{Acknowledgements}\end{center} I would like to thank all the little people who made this thesis possible. I would like to thank all the people who made this thesis possible. (I'm waiting until who is involved is finalized.) \cleardoublepage
• doc/theses/andrew_beach_MMath/uw-ethesis.bib

 rdd1cc02 % Bibliography of key references for "LaTeX for Thesis and Large Documents" % For use with BibTeX % The online reference does not seem to be supported here. @book{goossens.book, author =        "Michel Goossens and Frank Mittelbach and Alexander Samarin", title =         "The \LaTeX\ Companion", year =          "1994", publisher =     "Addison-Wesley", address =       "Reading, Massachusetts" @misc{Dice21, author      = {Dave Dice}, year        = 2021, month       = aug, howpublished= {personal communication} } @book{knuth.book, author =        "Donald Knuth", title =         "The \TeX book", year =          "1986", publisher =     "Addison-Wesley", address =       "Reading, Massachusetts" @misc{CforallExceptionBenchmarks, contributer = {pabuhr@plg}, key         = {Cforall Exception Benchmarks}, author      = {{\textsf{C}{$\mathbf{\forall}$} Exception Benchmarks}}, howpublished= {\href{https://github.com/cforall/ExceptionBenchmarks_SPE20}{https://\-github.com/\-cforall/\-ExceptionBenchmarks\_SPE20}}, } @book{lamport.book, author =        "Leslie Lamport", title =         "\LaTeX\ --- A Document Preparation System", edition =       "Second", year =          "1994", publisher =     "Addison-Wesley", address =       "Reading, Massachusetts" % Could not get #the-for-statement to work. @misc{PythonForLoop, author={Python Software Foundation}, key={Python Compound Statements}, howpublished={\href{https://docs.python.org/3/reference/compound_stmts.html}{https://\-docs.python.org/\-3/\-reference/\-compound\_stmts.html}}, addendum={Accessed 2021-08-30}, } % Again, I would like this to have #StopIteration. @misc{PythonExceptions, author={Python Software Foundation}, key={Python Exceptions}, howpublished={\href{https://docs.python.org/3/library/exceptions.html}{https://\-docs.python.org/\-3/\-library/\-exceptions.html}}, addendum={Accessed 2021-08-30}, } @misc{CppHistory, author={C++ Community}, key={Cpp Reference History}, howpublished={\href{https://en.cppreference.com/w/cpp/language/history}{https://\-en.cppreference.com/\-w/\-cpp/\-language/\-history}}, addendum={Accessed 2021-08-30}, } @misc{RustPanicMacro, author={The Rust Team}, key={Rust Panic Macro}, howpublished={\href{https://doc.rust-lang.org/std/panic/index.html}{https://\-doc.rust-lang.org/\-std/\-panic/\-index.html}}, addendum={Accessed 2021-08-31}, } @misc{RustPanicModule, author={The Rust Team}, key={Rust Panic Module}, howpublished={\href{https://doc.rust-lang.org/std/panic/index.html}{https://\-doc.rust-lang.org/\-std/\-panic/\-index.html}}, addendum={Accessed 2021-08-31}, } @manual{Go:2021, keywords={Go programming language}, author={Robert Griesemer and Rob Pike and Ken Thompson}, title={{Go} Programming Language}, organization={Google}, year=2021, note={\href{http://golang.org/ref/spec}{http://\-golang.org/\-ref/\-spec}}, addendum={Accessed 2021-08-31}, }
• doc/theses/andrew_beach_MMath/uw-ethesis.tex

 rdd1cc02 \lstMakeShortInline@ \lstset{language=CFA,style=cfacommon,basicstyle=\linespread{0.9}\tt} % PAB causes problems with inline @= %\lstset{moredelim=**[is][\protect\color{red}]{@}{@}} % Annotations from Peter: \newcommand{\PAB}[1]{{\color{blue}PAB: #1}}
• doc/theses/andrew_beach_MMath/vtable-layout.fig

 rdd1cc02 -2 1200 2 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1 1620 1665 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 3510 1890 3645 1755 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 3645 1305 3645 1755 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 2115 1935 2250 1935 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4 2250 1170 2115 1170 2115 2475 2250 2475 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 2250 1350 2115 1350 4 0 0 50 -1 0 12 0.0000 4 165 630 2295 1305 type_id\001 4 0 0 50 -1 0 12 0.0000 4 165 1170 2295 1500 parent_field0\001
• libcfa/prelude/builtins.c

 rdd1cc02 // Created On       : Fri Jul 21 16:21:03 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Wed Jul 21 13:31:34 2021 // Update Count     : 129 // Last Modified On : Sat Aug 14 08:45:54 2021 // Update Count     : 133 // #endif // __SIZEOF_INT128__ // for-control index constraints // forall( T | { void ?{}( T &, zero_t ); void ?{}( T &, one_t ); T ?+=?( T &, T ); T ?-=?( T &, T ); int ?
• libcfa/src/Makefile.am

 rdd1cc02 math.hfa \ time_t.hfa \ bits/algorithm.hfa \ bits/align.hfa \ bits/containers.hfa \ memory.hfa \ parseargs.hfa \ parseconfig.hfa \ rational.hfa \ stdlib.hfa \ containers/pair.hfa \ containers/result.hfa \ containers/string.hfa \ containers/string_res.hfa \ containers/vector.hfa \ device/cpu.hfa libsrc = ${inst_headers_src}${inst_headers_src:.hfa=.cfa} \ assert.cfa \ bits/algorithm.hfa \ bits/debug.cfa \ exception.c \ concurrency/invoke.h \ concurrency/future.hfa \ concurrency/kernel/fwd.hfa concurrency/kernel/fwd.hfa \ concurrency/mutex_stmt.hfa inst_thread_headers_src = \
• libcfa/src/concurrency/kernel/startup.cfa

 rdd1cc02 register_tls( mainProcessor ); mainThread->last_cpu = __kernel_getcpu(); //initialize the global state variables state = Start; self_cor{ info }; last_cpu = __kernel_getcpu(); curr_cor = &self_cor; curr_cluster = mainCluster;

• libcfa/src/heap.cfa

 rdd1cc02 // Created On       : Tue Dec 19 21:58:35 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Sat May 22 08:46:39 2021 // Update Count     : 1036 // Last Modified On : Mon Aug  9 19:03:02 2021 // Update Count     : 1040 // } // prtUnfreed extern int cfa_main_returned;                                                   // from bootloader.cf extern "C" { void heapAppStart() {                                                           // called by __cfaabi_appready_startup void heapAppStop() {                                                            // called by __cfaabi_appready_startdown fclose( stdin ); fclose( stdout ); prtUnfreed(); if ( cfa_main_returned ) prtUnfreed();                  // do not check unfreed storage if exit called } // heapAppStop } // extern "C"
• libcfa/src/memory.cfa

 rdd1cc02 forall(T &) T * release(unique_ptr(T) & this) { T * data = this.data; this.data = 0p; return data; } forall(T &) int ?==?(unique_ptr(T) const & this, unique_ptr(T) const & that) { return this.data == that.data;
• libcfa/src/memory.hfa

 rdd1cc02 forall(T &) T * release(unique_ptr(T) & this); forall(T &) int ?==?(unique_ptr(T) const & this, unique_ptr(T) const & that); forall(T &)
• src/AST/Convert.cpp

 rdd1cc02 } const ast::Stmt * visit( const ast::MutexStmt * node ) override final { if ( inCache( node ) ) return nullptr; auto stmt = new MutexStmt( get().accept1( node->stmt ), get().acceptL( node->mutexObjs ) ); return stmtPostamble( stmt, node ); } TypeSubstitution * convertTypeSubstitution(const ast::TypeSubstitution * src) { } virtual void visit( const MutexStmt * old ) override final { if ( inCache( old ) ) return; this->node = new ast::MutexStmt( old->location, GET_ACCEPT_1(stmt, Stmt), GET_ACCEPT_V(mutexObjs, Expr) ); cache.emplace( old, this->node ); } // TypeSubstitution shouldn't exist yet in old. ast::TypeSubstitution * convertTypeSubstitution(const TypeSubstitution * old) {
• src/AST/Fwd.hpp

 rdd1cc02 class NullStmt; class ImplicitCtorDtorStmt; class MutexStmt; class Expr;
• src/AST/Node.cpp

 rdd1cc02 template class ast::ptr_base< ast::ImplicitCtorDtorStmt, ast::Node::ref_type::weak >;