# Changeset 660665f

Ignore:
Timestamp:
Jun 29, 2021, 5:35:19 PM (7 months ago)
Branches:
jacob/cs343-translation, master, new-ast-unique-expr
Children:
Parents:
5a46e09 (diff), d02e547 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
84 edited
1 moved

Unmodified
Removed
• ## INSTALL

 r5a46e09 cfa-cc: The Cforall->C Compiler System cfa-cc: Cforall to C Trans-compiler ====================================== Cforall is built using GNU Make and the GNU Autoconf system.  It also requires g++ version >= 4.6, bison and flex.  On systems where GNU Make is the default g++ version >= 6, bison and flex.  On systems where GNU Make is the default make, the system is built by entering the commands: For devs using the root git: For developers using the root git: ./autogen.sh ./configure make make install $./autogen.sh$ ./configure [ --prefix=/some/directory ] $make -j 8 install For users using the distributed tarball: For users using the distributed tarball / github: ./configure make make install$ ./configure $make -j 8 install Options for 'configure' ----------------------- The script 'configure' accepts many command line arguments. Run './configure --help' to see a list of all of them. This document attempts to summarize the most useful arguments. where 8 is the number of CPUs on your computer. --prefix=/some/directory controls the path prefix common to all installed cfa-cc components. Some components are installed in /some/directory/bin, others in /some/directory/lib. If unspecified, this defaults to /usr/local. To use (a subdirectory of) your home directory,${HOME}/some/dir works, but it is important not to put quotes around the directory path; Cforall may appear to build, but the installed version may not work properly. Options for configure ====================================== The script 'configure' accepts many command-line arguments.  Run $./configure --help to list them. The most common argument is: --prefix=/some/directory controls the path prefix common to all installed cfa-cc components. Components are installed in directories bin and lib. If unspecified, prefix defaults to /usr/local. To use (a subdirectory of) your home directory,${HOME}/some/dir, but do not put quotes around the directory path; Cforall may appear to build, but the installed version may not work properly. Build Test ====================================== $cd ./test$ make -j 8 all-tests The tests take about 2-5 minutes and can be stopped at any time.
• ## Jenkins/Distribute

 r5a46e09 import groovy.transform.Field // For skipping stages import org.jenkinsci.plugins.pipeline.modeldefinition.Utils //=========================================================================================================== //=========================================================================================================== node('master') { // Globals BuildDir  = pwd tmp: true SrcDir    = pwd tmp: false Settings  = null Version   = '' // Globals BuildDir  = null SrcDir    = null Settings  = null Version   = '' // Local variables def err = null def log_needed = false // Local variables def err = null def log_needed = false currentBuild.result = "SUCCESS" currentBuild.result = "SUCCESS" final commit, build node { //Wrap build to add timestamp to command line wrap([$class: 'TimestamperBuildWrapper']) { (commit, build) = prepare_build() } } final commit, build (commit, build) = prepare_build() node('x64') { BuildDir = pwd tmp: true SrcDir = pwd tmp: false Tools.Clean() Tools.Checkout( commit ) Version = GetVersion( build ) Configure() Package() Test() Archive() } // Update the build directories when exiting the node node('x64') { //Wrap build to add timestamp to command line wrap([$class: 'TimestamperBuildWrapper']) { BuildDir  = pwd tmp: true SrcDir    = pwd tmp: false Tools.Clean() Tools.Checkout( commit ) Version = GetVersion( build ) Configure() Package() Test() Archive() } }
• ## Jenkins/FullBuild

 r5a46e09 //=========================================================================================================== node ('master') { node { def err = null parallel ( gcc_8_x86_new: { trigger_build( 'gcc-8',   'x86' ) }, gcc_7_x86_new: { trigger_build( 'gcc-7',   'x86' ) }, gcc_6_x86_new: { trigger_build( 'gcc-6',   'x86' ) }, gcc_9_x64_new: { trigger_build( 'gcc-9',   'x64' ) }, gcc_8_x64_new: { trigger_build( 'gcc-8',   'x64' ) }, gcc_7_x64_new: { trigger_build( 'gcc-7',   'x64' ) }, gcc_6_x64_new: { trigger_build( 'gcc-6',   'x64' ) }, gcc_5_x64_new: { trigger_build( 'gcc-5',   'x64' ) }, clang_x64_new: { trigger_build( 'clang',   'x64' ) }, gcc_08_x86_new: { trigger_build( 'gcc-8',   'x86' ) }, gcc_07_x86_new: { trigger_build( 'gcc-7',   'x86' ) }, gcc_06_x86_new: { trigger_build( 'gcc-6',   'x86' ) }, gcc_10_x64_new: { trigger_build( 'gcc-10',  'x64' ) }, gcc_09_x64_new: { trigger_build( 'gcc-9',   'x64' ) }, gcc_08_x64_new: { trigger_build( 'gcc-8',   'x64' ) }, gcc_07_x64_new: { trigger_build( 'gcc-7',   'x64' ) }, gcc_06_x64_new: { trigger_build( 'gcc-6',   'x64' ) }, clang_x64_new:  { trigger_build( 'clang',   'x64' ) }, ) } if(result.result != 'SUCCESS') { sh("wget -q -O - http://localhost:8084/jenkins/job/Cforall/job/master/${result.number}/consoleText") sh("wget -q -O - https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/${result.number}/consoleText") error(result.result) } //Email notification on a full build failure def promote_email(boolean success) { echo('notifying users') node { echo('notifying users') def result = success ? "PROMOTE - SUCCESS" : "PROMOTE - FAILURE" def result = success ? "PROMOTE - SUCCESS" : "PROMOTE - FAILURE" //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line //Configurations for email format def email_subject = "[cforall git][${result}]" def email_body = """ This is an automated email from the Jenkins build machine. It was generated following the result of the C\u2200 nightly build. //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line //Configurations for email format def email_subject = "[cforall git][${result}]" def email_body = """

This is an automated email from the Jenkins build machine. It was generated following the result of the C\u2200 nightly build.

Check console output at ${env.BUILD_URL} to view the results. Check console output at${env.BUILD_URL} to view the results.

- Status --------------------------------------------------------------

- Status --------------------------------------------------------------

${result}${result}

- Performance ---------------------------------------------------------

- Performance ---------------------------------------------------------

- Logs ----------------------------------------------------------------

"""

- Logs ----------------------------------------------------------------

""" def email_to = "cforall@lists.uwaterloo.ca" def email_to = "cforall@lists.uwaterloo.ca" //send email notification emailext body: email_body, subject: email_subject, to: email_to, attachLog: !success //send email notification emailext body: email_body, subject: email_subject, to: email_to, attachLog: !success } }
• ## Jenkins/tools.groovy

 r5a46e09 } PrevGitOldRef = '' PrevGitNewRef = '' def GitLogMessage(String oldRef = '', String newRef = '') { if (!oldRef) { if(!PrevGitOldRef) { return "\nERROR retrieveing current git information!\n"  } else { oldRef = PrevGitOldRef } } if (!newRef) { if(!PrevGitNewRef) { return "\nERROR retrieveing previous git information!\n" } else { newRef = PrevGitNewRef } } def ConstructGitLogMessage(String oldRef, String newRef) { def revText = sh(returnStdout: true, script: "git rev-list ${oldRef}..${newRef}").trim() def revList = SplitLines( revText ) gitDiff = gitDiff.replace('[m', '') PrevGitOldRef = oldRef PrevGitNewRef = newRef return """

- Changes -------------------------------------------------------------

return """
The branch ${env.BRANCH_NAME} has been updated.${gitUpdate}

• ## benchmark/io/io_uring.h

 r5a46e09 extern "C" { #ifndef _GNU_SOURCE         /* See feature_test_macros(7) */ #define _GNU_SOURCE         /* See feature_test_macros(7) */ #endif #include #include
• ## doc/bibliography/pl.bib

 r5a46e09 address     = {Waterloo Ontario, Canada}, month       = sep, year        = 2018, year        = 2020, note        = {\href{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-pub/\-uSystem/uC++.pdf}}, } author      = {Martin Karsten}, title       = {{libfibre:~User-Level Threading Runtime}}, howpublished= {\href{https://git.uwaterloo.ca/mkarsten/libfibre} {https://\-git.uwaterloo.ca/\-mkarsten/\-libfibre}}, howpublished= {\href{https://git.uwaterloo.ca/mkarsten/libfibre}{https://\-git.uwaterloo.ca/\-mkarsten/\-libfibre}}, note        = {[Online; accessed 2020-04-15]}, }
• ## doc/theses/andrew_beach_MMath/cfalab.sty

• ## doc/theses/andrew_beach_MMath/existing.tex

 r5a46e09 \chapter{\CFA Existing Features} \chapter{\CFA{} Existing Features} \label{c:existing} existing C code-base allowing programmers to learn \CFA on an as-needed basis. Only those \CFA features pertaining to this thesis are discussed.  Many of the \CFA syntactic and semantic features used in the thesis should be fairly obvious to the reader. Only those \CFA features pertaining to this thesis are discussed. Also, only new features of \CFA will be discussed, a familiarity with C or C-like languages is assumed. \section{Overloading and \lstinline{extern}} // name mangling on by default int i; // _X1ii_1 @extern "C"@ {  // disables name mangling extern "C" {  // disables name mangling int j; // j @extern "Cforall"@ {  // enables name mangling extern "Cforall" {  // enables name mangling int k; // _X1ki_1 } Reference-types are written the same way as a pointer-type but each asterisk (@*@) is replaced with a ampersand (@&@); this includes cv-qualifiers and multiple levels of reference, \eg: this includes cv-qualifiers and multiple levels of reference. Generally, references act like pointers with an implicate dereferencing operation added to each use of the variable. These automatic dereferences may be disabled with the address-of operator (@&@). % Check to see if these are generating errors. \begin{minipage}{0,5\textwidth} With references: int && rri = ri; rri = 3; &ri = &j; // reference assignment &ri = &j; ri = 5; \end{cfa} int ** ppi = π **ppi = 3; pi = &j; // pointer assignment pi = &j; *pi = 5; \end{cfa} \end{minipage} References are intended for cases where you would want to use pointers but would References are intended to be used when you would use pointers but would be dereferencing them (almost) every usage. In most cases a reference can just be thought of as a pointer that automatically puts a dereference in front of each of its uses (per-level of reference). The address-of operator (@&@) acts as an escape and removes one of the automatic dereference operations. Mutable references may be assigned by converting them to a pointer with a @&@ and then assigning a pointer to them, as in @&ri = &j;@ above. Mutable references may be assigned to by converting them to a pointer with a @&@ and then assigning a pointer to them, as in @&ri = &j;@ above \section{Operators} In general, operator names in \CFA are constructed by bracketing an operator token with @?@, which indicates the position of the arguments. For example, \CFA implements operator overloading by providing special names. Operator uses are translated into function calls using these names. These names are created by taking the operator symbols and joining them with @?@s to show where the arguments go. For example, infixed multiplication is @?*?@ while prefix dereference is @*?@. This syntax make it easy to tell the difference between prefix operations (such as @++?@) and post-fix operations (@?++@). An operator name may describe any function signature (it is just a name) but only certain signatures may be called in operator form. \begin{cfa} int ?+?( int i, int j, int k ) { return i + j + k; } { sout | ?+?( 3, 4, 5 ); // no infix form } \end{cfa} Some near-misses" for unary/binary operator prototypes generate warnings. \begin{cfa} point ?+?(point a, point b) { return point{a.x + b.x, a.y + b.y}; } bool ?==?(point a, point b) { return a.x == b.x && a.y == b.y; } { assert(point{1, 2} + point{3, 4} == point{4, 6}); } \end{cfa} Note that these special names are not limited to just being used for these operator functions, and may be used name other declarations. Some near misses", that will not match an operator form but looks like it may have been supposed to, will generate wantings but otherwise they are left alone. %\subsection{Constructors and Destructors} Both constructors and destructors are operators, which means they are functions with special operator names rather than type names in \Cpp. The special operator names may be used to call the functions explicitly (not allowed in \Cpp for constructors). The special name for a constructor is @?{}@, where the name @{}@ comes from the initialization syntax in C, \eg @Structure s = {...}@. % That initialization syntax is also the operator form. \CFA generates a constructor call each time a variable is declared, passing the initialization arguments to the constructor. \begin{cfa} struct Structure { ... }; void ?{}(Structure & this) { ... } { Structure a; Structure b = {}; } void ?{}(Structure & this, char first, int num) { ... } { Structure c = {'a', 2}; } \end{cfa} Both @a@ and @b@ are initialized with the first constructor, while @c@ is initialized with the second. Currently, there is no general way to skip initialization. special operator names may be used to call the functions explicitly. % Placement new means that this is actually equivant to C++. The special name for a constructor is @?{}@, which comes from the initialization syntax in C, \eg @Example e = { ... }@. \CFA will generate a constructor call each time a variable is declared, passing the initialization arguments to the constructort. \begin{cfa} struct Example { ... }; void ?{}(Example & this) { ... } { Example a; Example b = {}; } void ?{}(Example & this, char first, int num) { ... } { Example c = {'a', 2}; } \end{cfa} Both @a@ and @b@ will be initalized with the first constructor, while @c@ will be initalized with the second. Currently, there is no general way to skip initialation. % I don't like the \^{} symbol but $^\wedge$ isn't better. Similarly, destructors use the special name @^?{}@ (the @^@ has no special meaning).  Normally, they are implicitly called on a variable when it goes out of scope but they can be called explicitly as well. \begin{cfa} void ^?{}(Structure & this) { ... } { Structure d; Similarly destructors use the special name @^?{}@ (the @^@ has no special meaning). These are a normally called implicitly called on a variable when it goes out of scope. They can be called explicitly as well. \begin{cfa} void ^?{}(Example & this) { ... } { Example d; } // <- implicit destructor call \end{cfa} Whenever a type is defined, \CFA creates a default zero-argument Whenever a type is defined, \CFA will create a default zero-argument constructor, a copy constructor, a series of argument-per-field constructors and a destructor. All user constructors are defined after this. void do_once(double y) { ... } int quadruple(int x) { void do_once(int y) { y = y * 2; } // replace global do_once do_twice(x); // use local do_once do_twice(x + 1.5); // use global do_once void do_once(int & y) { y = y * 2; } do_twice(x); return x; } \end{cfa} Specifically, the complier deduces that @do_twice@'s T is an integer from the argument @x@. It then looks for the most \emph{specific} definition matching the argument @x@. It then looks for the most specific definition matching the assertion, which is the nested integral @do_once@ defined within the function. The matched assertion function is then passed as a function pointer to @do_twice@ and called within it.  The global definition of @do_once@ is used for the second call because the float-point argument is a better match. to @do_twice@ and called within it. The global definition of @do_once@ is ignored, however if quadruple took a @double@ argument then the global definition would be used instead as it would be a better match. % Aaron's thesis might be a good reference here. To avoid typing long lists of assertions, constraints can be collect into Each coroutine has a @main@ function, which takes a reference to a coroutine object and returns @void@. \begin{cfa}[numbers=left] %[numbers=left] Why numbers on this one? \begin{cfa} void main(CountUp & this) { for (unsigned int next = 0 ; true ; ++next) {
• ## doc/theses/andrew_beach_MMath/features.tex

 r5a46e09 \label{c:features} This chapter covers the design and user interface of the \CFA EHM, % or exception system. This chapter covers the design and user interface of the \CFA EHM and begins with a general overview of EHMs. It is not a strict definition of all EHMs nor an exhaustive list of all possible features. However it does cover the most common structures and features found in them. However it does cover the most common structure and features found in them. \section{Overview of EHMs} % We should cover what is an exception handling mechanism and what is an % exception before this. Probably in the introduction. Some of this could % move there. \section{Raise / Handle} \subsection{Raise / Handle} An exception operation has two main parts: raise and handle. These terms are sometimes also known as throw and catch but this work uses These terms are sometimes known as throw and catch but this work uses throw/catch as a particular kind of raise/handle. These are the two parts that the user writes and may Some well known examples include the @throw@ statements of \Cpp and Java and the \code{Python}{raise} statement from Python. A raise may perform some other work (such as memory management) but for the the \code{Python}{raise} statement from Python. In real systems a raise may preform some other work (such as memory management) but for the purposes of this overview that can be ignored. A handler has three common features: the previously mentioned user code, a region of code they guard, and an exception label/condition that matches region of code they guard and an exception label/condition that matches certain exceptions. Only raises inside the guarded region and raising exceptions that match the label can be handled by a given handler. Different EHMs have different rules to pick a handler, if multiple handlers could be used, such as best match" or first found". If multiple handlers could can handle an exception, EHMs will define a rule to pick one, such as best match" or first found". The @try@ statements of \Cpp, Java and Python are common examples. All three region. \section{Propagation} \subsection{Propagation} After an exception is raised comes what is usually the biggest step for the EHM: finding and setting up the handler. The propagation from raise to handler can be broken up into three different tasks: searching for a handler, matching against the handler, and installing the handler. matching against the handler and installing the handler. \paragraph{Searching} thrown as it looks for handlers that have the raise site in their guarded region. This search includes handlers in the current function, as well as any in callers on the stack that have the function call in their guarded region. The search includes handlers in the current function, as well as any in callers on the stack that have the function call in their guarded region. \paragraph{Matching} Each handler found has to be matched with the raised exception. The exception label defines a condition that is used with the exception to decide if label defines a condition that is used with exception and decides if there is a match or not. In languages where the first match is used, this step is intertwined with searching: a match check is performed immediately after the search finds searching; a match check is preformed immediately after the search finds a possible handler. \section{Installing} \paragraph{Installing} After a handler is chosen it must be made ready to run. The implementation can vary widely to fit with the rest of the case when stack unwinding is involved. If a matching handler is not guarantied to be found, the EHM needs a If a matching handler is not guaranteed to be found, the EHM needs a different course of action for the case where no handler matches. This situation only occurs with unchecked exceptions as checked exceptions (such as in Java) can make the guarantee. This unhandled action can abort the program or install a very general handler. This unhandled action is usually very general, such as aborting the program. \paragraph{Hierarchy} A common way to organize exceptions is in a hierarchical structure. This organization is often used in object-orientated languages where the This pattern comes from object-orientated languages where the exception hierarchy is a natural extension of the object hierarchy. \end{center} A handler labelled with any given exception can handle exceptions of that A handler labeled with any given exception can handle exceptions of that type or any child type of that exception. The root of the exception hierarchy (here \code{C}{exception}) acts as a catch-all, leaf types catch single types % Could I cite the rational for the Python IO exception rework? \paragraph{Completion} After the handler has finished the entire exception operation has to complete \subsection{Completion} After the handler has finished, the entire exception operation has to complete and continue executing somewhere else. This step is usually simple, both logically and in its implementation, as the installation of the handler The EHM can return control to many different places, the most common are after the handler definition (termination) and after the raise (resumption). \paragraph{Communication} the most common are after the handler definition (termination) and after the raise (resumption). \subsection{Communication} For effective exception handling, additional information is often passed from the raise to the handler and back again. So far only communication of the exceptions' identity has been covered. A common communication method is putting fields into the exception instance and giving the handler access to them. References in the exception instance can push data back to the raise. A common communication method is putting fields into the exception instance and giving the handler access to them. Passing the exception by reference instead of by value can allow data to be passed in both directions. \section{Virtuals} Virtual types and casts are not part of \CFA's EHM nor are they required for any EHM. However, one of the best ways to support an exception hierarchy is via a virtual system among exceptions and used for exception matching. However, it is one of the best ways to support an exception hierarchy is via a virtual hierarchy and dispatch system. Ideally, the virtual system would have been part of \CFA before the work on exception handling began, but unfortunately it was not. Therefore, only the features and framework needed for the EHM were Hence, only the features and framework needed for the EHM were designed and implemented. Other features were considered to ensure that the structure could accommodate other desirable features in the future but they were not implemented. The rest of this section discusses the implemented subset of the virtual-system design. the structure could accommodate other desirable features in the future but they were not implemented. The rest of this section will only discuss the implemented subset of the virtual system design. The virtual system supports multiple trees" of types. Each tree is % A type's ancestors are its parent and its parent's ancestors. % The root type has no ancestors. % A type's decedents are its children and its children's decedents. % A type's descendants are its children and its children's descendants. Every virtual type also has a list of virtual members. Children inherit of object-orientated programming, and can be of any type. \PAB{I do not understand these sentences. Can you add an example? $\Rightarrow$ \CFA still supports virtual methods as a special case of virtual members. Function pointers that take a pointer to the virtual type are modified with each level of inheritance so that refers to the new type. This means an object can always be passed to a function in its virtual table as if it were a method.} as if it were a method. \todo{Clarify (with an example) virtual methods.} Each virtual type has a unique id. into a virtual table type. Each virtual type has a pointer to a virtual table as a hidden field. \PAB{God forbid, maybe you need a UML diagram to relate these entities.} \todo{Might need a diagram for virtual structure.} Up until this point the virtual system is similar to ones found in types can begin to satisfy a trait, stop satisfying a trait or satisfy the same trait in a different way at any lexical location in the program. In this sense, they are open" as they can change at any time. This capability means it is impossible to pick a single set of functions that represent the type's implementation across the program. In this sense, they are open" as they can change at any time. This capability means it is impossible to pick a single set of functions that represent the type's implementation across the program. \CFA side-steps this issue by not having a single virtual table for each type. A user can define virtual tables that are filled in at their declaration and given a name. Anywhere that name is visible, even if declaration and given a name. Anywhere that name is visible, even if it is defined locally inside a function (although that means it does not have a static lifetime), it can be used. through the object. \PAB{The above explanation is very good!} While much of the virtual infrastructure is created, it is currently only used internally for exception handling. The only user-level feature is the virtual cast cast, which is the same as the \Cpp \code{C++}{dynamic_cast}. \label{p:VirtualCast} \begin{cfa} (virtual TYPE)EXPRESSION \end{cfa} which is the same as the \Cpp \code{C++}{dynamic_cast}. Note, the syntax and semantics matches a C-cast, rather than the function-like \Cpp syntax for special casts. Both the type of @EXPRESSION@ and @TYPE@ must be The trait is defined over two types, the exception type and the virtual table type. Each exception type should have a single virtual table type. There are no actual assertions in this trait because currently the trait system cannot express them (adding such assertions would be part of There are no actual assertions in this trait because the trait system cannot express them yet (adding such assertions would be part of completing the virtual system). The imaginary assertions would probably come from a trait defined by the virtual system, and state that the exception type is a virtual type, is a descendent of @exception_t@ (the base exception type) is a virtual type, is a descendant of @exception_t@ (the base exception type) and note its virtual table type. }; \end{cfa} Both traits ensure a pair of types are an exception type and its virtual table, Both traits ensure a pair of types are an exception type, its virtual table type and defines one of the two default handlers. The default handlers are used as fallbacks and are discussed in detail in \vref{s:ExceptionHandling}. \section{Exception Handling} \label{s:ExceptionHandling} As stated, \CFA provides two kinds of exception handling: termination and resumption. As stated, \CFA provides two kinds of exception handling: termination and resumption. These twin operations are the core of \CFA's exception handling mechanism. This section covers the general patterns shared by the two operations and then go on to cover the details of each individual operation. This section will cover the general patterns shared by the two operations and then go on to cover the details each individual operation. Both operations follow the same set of steps. Both start with the user performing a raise on an exception. Both start with the user preforming a raise on an exception. Then the exception propagates up the stack. If a handler is found the exception is caught and the handler is run. After that control returns to a point specific to the kind of exception. If the search fails a default handler is run, and if it returns, control continues after the raise. Note, the default handler may further change control flow rather than return. After that control continues at a raise-dependent location. If the search fails a default handler is run and, if it returns, then control continues after the raise. This general description covers what the two kinds have in common. Differences include how propagation is performed, where exception continues Differences include how propagation is preformed, where exception continues after an exception is caught and handled and which default handler is run. \subsection{Termination} \label{s:Termination} Termination handling is the familiar kind and used in most programming languages with exception handling. The throw copies the provided exception into managed memory to ensure the exception is not destroyed when the stack is unwound. the exception is not destroyed if the stack is unwound. It is the user's responsibility to ensure the original exception is cleaned up whether the stack is unwound or not. Allocating it on the stack is usually sufficient. Then propagation starts the search. \CFA uses a first match" rule so matching is performed with the copied exception as the search continues. It starts from the throwing function and proceeds towards the base of the stack, % How to say propagation starts, its first sub-step is the search. Then propagation starts with the search. \CFA uses a first match" rule so matching is preformed with the copied exception as the search continues. It starts from the throwing function and proceeds towards base of the stack, from callee to caller. At each stack frame, a check is made for resumption handlers defined by the \end{cfa} When viewed on its own, a try statement simply executes the statements in \snake{GUARDED_BLOCK} and when those are finished, the try statement finishes. in \snake{GUARDED_BLOCK} and when those are finished, the try statement finishes. However, while the guarded statements are being executed, including any invoked functions, all the handlers in these statements are included on the search path. Hence, if a termination exception is raised, the search includes the added handlers associated with the guarded block and those further up the stack from the guarded block. invoked functions, all the handlers in these statements are included in the search path. Hence, if a termination exception is raised these handlers may be matched against the exception and may handle it. Exception matching checks the handler in each catch clause in the order they appear, top to bottom. If the representation of the raised exception type is the same or a descendant of @EXCEPTION_TYPE@$_i$ then @NAME@$_i$ (if provided) is bound to a pointer to the exception and the statements in @HANDLER_BLOCK@$_i$ are executed. If control reaches the end of the handler, the exception is (if provided) is bound to a pointer to the exception and the statements in @HANDLER_BLOCK@$_i$ are executed. If control reaches the end of the handler, the exception is freed and control continues after the try statement. If no termination handler is found during the search, the default handler (\defaultTerminationHandler) visible at the raise statement is called. Through \CFA's trait system, the best match at the raise sight is used. This function is run and is passed the copied exception. If the default handler returns, control continues after the throw statement. If no termination handler is found during the search then the default handler (\defaultTerminationHandler) visible at the raise statement is run. Through \CFA's trait system the best match at the raise statement will be used. This function is run and is passed the copied exception. If the default handler is run control continues after the raise statement. There is a global @defaultTerminationHandler@ that is polymorphic over all termination exception types. Since it is so general, a more specific handler can be termination exception types. Since it is so general a more specific handler can be defined and is used for those types, effectively overriding the handler for a particular exception type. matched a closure is taken from up the stack and executed, after which the raising function continues executing. These are most often used when a potentially repairable error occurs, some handler is found on the stack to fix it, and the raising function can continue with the correction. Another common usage is dynamic event analysis, \eg logging, without disrupting control flow. Note, if an event is raised and there is no interest, control continues normally. \PAB{We also have \lstinline{report} instead of \lstinline{throwResume}, \lstinline{recover} instead of \lstinline{catch}, and \lstinline{fixup} instead of \lstinline{catchResume}. You may or may not want to mention it. You can still stick with \lstinline{catch} and \lstinline{throw/catchResume} in the thesis.} The common uses for resumption exceptions include potentially repairable errors, where execution can continue in the same function once the error is corrected, and ignorable events, such as logging where nothing needs to happen and control should always continue from the same place. A resumption raise is started with the @throwResume@ statement: throwResume EXPRESSION; \end{cfa} \todo{Decide on a final set of keywords and use them everywhere.} It works much the same way as the termination throw. The expression must return a reference to a resumption exception, @is_resumption_exception@ at the call site. The assertions from this trait are available to the exception system, while handling the exception. Resumption does not need to copy the raised exception, as the stack is not unwound. The exception and any values on the stack remain in scope, while the resumption is handled. The EHM then begins propogation. The search starts from the raise in the resuming function and proceeds towards the base of the stack, from callee to caller. the exception system while handling the exception. At run-time, no exception copy is made. Resumption does not unwind the stack nor otherwise remove values from the current scope, so there is no need to manage memory to keep things in scope. The EHM then begins propagation. The search starts from the raise in the resuming function and proceeds towards the base of the stack, from callee to caller. At each stack frame, a check is made for resumption handlers defined by the @catchResume@ clauses of a @try@ statement. kind of raise. When a try statement is executed, it simply executes the statements in the @GUARDED_BLOCK@ and then returns. @GUARDED_BLOCK@ and then finishes. However, while the guarded statements are being executed, including any invoked functions, all the handlers in these statements are included on the search path. Hence, if a resumption exception is raised the search includes the added handlers associated with the guarded block and those further up the stack from the guarded block. invoked functions, all the handlers in these statements are included in the search path. Hence, if a resumption exception is raised these handlers may be matched against the exception and may handle it. Exception matching checks the handler in each catch clause in the order the raise statement that raised the handled exception. Like termination, if no resumption handler is found during the search, the default handler (\defaultResumptionHandler) visible at the raise statement is called. It uses the best match at the raise sight according to \CFA's overloading rules. The default handler is passed the exception given to the throw. When the default handler finishes Like termination, if no resumption handler is found during the search, the default handler (\defaultResumptionHandler) visible at the raise statement is called. It will use the best match at the raise sight according to \CFA's overloading rules. The default handler is passed the exception given to the raise. When the default handler finishes execution continues after the raise statement. There is a global \defaultResumptionHandler{} that is polymorphic over all resumption exception types and preforms a termination throw on the exception. The \defaultTerminationHandler{} can be customized by introducing a new or better match as well. There is a global \defaultResumptionHandler{} is polymorphic over all resumption exceptions and preforms a termination throw on the exception. The \defaultTerminationHandler{} can be overridden by providing a new function that is a better match. \subsubsection{Resumption Marking} \label{s:ResumptionMarking} A key difference between resumption and termination is that resumption does not unwind the stack. A side effect that is that when a handler is matched and run, its try block (the guarded statements) and every try statement searched before it are still on the stack. Their existence can lead to the recursive resumption problem. and run it's try block (the guarded statements) and every try statement searched before it are still on the stack. There presence can lead to the recursive resumption problem. The recursive resumption problem is any situation where a resumption handler \end{cfa} When this code is executed, the guarded @throwResume@ starts a search and matchs the handler in the @catchResume@ clause. This call is placed on the top of stack above the try-block. The second throw searchs the same try block and puts call another instance of the same handler on the stack leading to an infinite recursion. search and matches the handler in the @catchResume@ clause. This call is placed on the stack above the try-block. The second raise then searches the same try block and puts another instance of the same handler on the stack leading to infinite recursion. While this situation is trivial and easy to avoid, much more complex cycles can form with multiple handlers and different exception types. To prevent all of these cases, the exception search marks the try statements it visits. A try statement is marked when a match check is preformed with it and an exception. The statement is unmarked when the handling of that exception is completed or the search completes without finding a handler. While a try statement is marked, its handlers are never matched, effectify skipping over them to the next try statement. To prevent all of these cases, a each try statement is marked" from the time the exception search reaches it to either when the exception is being handled completes the matching handler or when the search reaches the base of the stack. While a try statement is marked, its handlers are never matched, effectively skipping over it to the next try statement. \begin{center} \end{center} These rules mirror what happens with termination. When a termination throw happens in a handler, the search does not look at any handlers from the original throw to the original catch because that part of the stack is unwound. A resumption raise in the same situation wants to search the entire stack, but with marking, the search does match exceptions for try statements at equivalent sections that would have been unwound by termination. The symmetry between resumption termination is why this pattern is picked. Other patterns, such as marking just the handlers that caught the exception, also work but lack the symmetry, meaning there are more rules to remember. There are other sets of marking rules that could be used, for instance, marking just the handlers that caught the exception, would also prevent recursive resumption. However, these rules mirror what happens with termination. The try statements that are marked are the ones that would be removed from the stack if this was a termination exception, that is those on the stack between the handler and the raise statement. This symmetry applies to the default handler as well, as both kinds of default handlers are run at the raise statement, rather than (physically or logically) at the bottom of the stack. % In early development having the default handler happen after % unmarking was just more useful. We assume that will continue. \section{Conditional Catch} Both termination and resumption handler clauses can be given an additional condition to further control which exceptions they handle: did not match. The condition matching allows finer matching to check The condition matching allows finer matching by checking more kinds of information than just the exception type. \begin{cfa} // Can't handle a failure relating to f2 here. \end{cfa} In this example, the file that experianced the IO error is used to decide In this example the file that experienced the IO error is used to decide which handler should be run, if any at all. \subsection{Comparison with Reraising} A more popular way to allow handlers to match in more detail is to reraise the exception after it has been caught, if it could not be handled here. On the surface these two features seem interchangable. If @throw@ is used to start a termination reraise then these two statements have the same behaviour: On the surface these two features seem interchangeable. If @throw;@ (no argument) starts a termination reraise, which is the same as a raise but reuses the last caught exception, then these two statements have the same behaviour: \begin{cfa} try { } \end{cfa} However, if there are further handlers after this handler only the first is check. For multiple handlers on a single try block that could handle the same exception, the equivalent translations to conditional catch becomes more complex, resulting is multiple nested try blocks for all possible reraises. So while catch-with-reraise is logically equivilant to conditional catch, there is a lexical explosion for the former. \PAB{I think the following discussion makes an incorrect assumption. A conditional catch CAN happen with the stack unwound. Roy talked about this issue in Section 2.3.3 here: \newline \url{http://plg.uwaterloo.ca/theses/KrischerThesis.pdf}} Specifically for termination handling, a conditional catch happens before the stack is unwound, but a reraise happens afterwards. Normally this might only cause you to loose some debug information you could get from a stack trace (and that can be side stepped entirely by collecting information during the unwind). But for \CFA there is another issue, if the exception is not handled the default handler should be run at the site of the original raise. There are two problems with this: the site of the original raise does not exist anymore and the default handler might not exist anymore. The site is always removed as part of the unwinding, often with the entirety of the function it was in. The default handler could be a stack allocated nested function removed during the unwind. This means actually trying to pretend the catch didn't happening, continuing the original raise instead of starting a new one, is infeasible. That is the expected behaviour for most languages and we can't replicate that behaviour. That is, they will have the same behaviour in isolation. Two things can expose differences between these cases. One is the existence of multiple handlers on a single try statement. A reraise skips all later handlers on this try statement but a conditional catch does not. Hence, if an earlier handler contains a reraise later handlers are implicitly skipped, with a conditional catch they are not. Still, they are equivalently powerful, both can be used two mimic the behaviour of the other, as reraise can pack arbitrary code in the handler and conditional catches can put arbitrary code in the predicate. % I was struggling with a long explanation about some simple solutions, % like repeating a condition on later handlers, and the general solution of % merging everything together. I don't think it is useful though unless its % for a proof. % https://en.cppreference.com/w/cpp/language/throw The question then becomes Which is a better default?" We believe that not skipping possibly useful handlers is a better default. If a handler can handle an exception it should and if the handler can not handle the exception then it is probably safer to have that explicitly described in the handler itself instead of implicitly described by its ordering with other handlers. % Or you could just alter the semantics of the throw statement. The handler % index is in the exception so you could use it to know where to start % searching from in the current try statement. % No place for the goto else; metaphor. The other issue is all of the discussion above assumes that the only way to tell apart two raises is the exception being raised and the remaining search path. This is not true generally, the current state of the stack can matter in a number of cases, even only for a stack trace after an program abort. But \CFA has a much more significant need of the rest of the stack, the default handlers for both termination and resumption. % For resumption it turns out it is possible continue a raise after the % exception has been caught, as if it hadn't been caught in the first place. This becomes a problem combined with the stack unwinding used in termination exception handling. The stack is unwound before the handler is installed, and hence before any reraises can run. So if a reraise happens the previous stack is gone, the place on the stack where the default handler was supposed to run is gone, if the default handler was a local function it may have been unwound too. There is no reasonable way to restore that information, so the reraise has to be considered as a new raise. This is the strongest advantage conditional catches have over reraising, they happen before stack unwinding and avoid this problem. % The one possible disadvantage of conditional catch is that it runs user % code during the exception search. While this is a new place that user code % can be run destructors and finally clauses are already run during the stack % unwinding. % % https://www.cplusplus.com/reference/exception/current_exception/ %   exception_ptr current_exception() noexcept; % https://www.python.org/dev/peps/pep-0343/ \section{Finally Clauses} \label{s:FinallyClauses} Finally clauses are used to preform unconditional clean-up when leaving a scope and are placed at the end of a try statement after any handler clauses: The @FINALLY_BLOCK@ is executed when the try statement is removed from the stack, including when the @GUARDED_BLOCK@ finishes, any termination handler finishes, or during an unwind. finishes or during an unwind. The only time the block is not executed is if the program is exited before the stack is unwound. Not all languages with unwinding have finally clauses. Notably \Cpp does without it as destructors with RAII serve a similar role. Although destructors and finally clauses have overlapping usage cases, they have their own specializations, like top-level functions and lambda functions with closures. Destructors take more work if a number of unrelated, local variables without destructors or dynamically allocated variables must be passed for de-intialization. Maintaining this destructor during local-block modification is a source of errors. A finally clause places local de-intialization inline with direct access to all local variables. without it as descructors, and the RAII design pattern, serve a similar role. Although destructors and finally clauses can be used in the same cases, they have their own strengths, similar to top-level function and lambda functions with closures. Destructors take more work for their first use, but if there is clean-up code that needs to be run every time a type is used they soon become much easier to set-up. On the other hand finally clauses capture the local context, so is easy to use when the clean-up is not dependent on the type of a variable or requires information from multiple variables. % To Peter: I think these are the main points you were going for. \section{Cancellation} raise, this exception is not used in matching only to pass information about the cause of the cancellation. (This restriction also means matching cannot fail so there is no default handler.) (This also means matching cannot fail so there is no default handler.) After @cancel_stack@ is called the exception is copied into the EHM's memory and the current stack is unwound. The result of a cancellation depends on the kind of stack that is being unwound. and the current stack is unwound. The behaviour after that depends on the kind of stack being cancelled. \paragraph{Main Stack} After the main stack is unwound there is a program-level abort. There are two reasons for this semantics. The first is that it obviously had to do the abort There are two reasons for these semantics. The first is that it had to do this abort. in a sequential program as there is nothing else to notify and the simplicity of keeping the same behaviour in sequential and concurrent programs is good. \PAB{I do not understand this sentence. $\Rightarrow$ Also, even in concurrent programs, there is no stack that an innate connection to, so it would have be explicitly managed.} Also, even in concurrent programs there may not currently be any other stacks and even if other stacks do exist, main has no way to know where they are. \paragraph{Thread Stack} and an implicit join (from a destructor call). The explicit join takes the default handler (@defaultResumptionHandler@) from its calling context while the implicit join provides its own, which does a program abort if the the implicit join provides its own; which does a program abort if the @ThreadCancelled@ exception cannot be handled. \PAB{Communication can occur during the lifetime of a thread using shared variable and \lstinline{waitfor} statements. Are you sure you mean communication here? Maybe you mean synchronization (rendezvous) point. $\Rightarrow$ Communication is done at join because a thread only has two points of communication with other threads: start and join.} The communication and synchronization are done here because threads only have two structural points (not dependent on user-code) where communication/synchronization happens: start and join. Since a thread must be running to perform a cancellation (and cannot be cancelled from another stack), the cancellation must be after start and before the join, so join is use. before the join, so join is used. % TODO: Find somewhere to discuss unwind collisions. a destructor and prevents cascading the error across multiple threads if the user is not equipped to deal with it. Also you can always add an explicit join if that is the desired behaviour. It is always possible to add an explicit join if that is the desired behaviour. With explicit join and a default handler that triggers a cancellation, it is possible to cascade an error across any number of threads, cleaning up each in turn, until the error is handled or the main thread is reached. \paragraph{Coroutine Stack} satisfies the @is_coroutine@ trait. After a coroutine stack is unwound, control returns to the @resume@ function that most recently resumed it. The resume reports a @CoroutineCancelled@ exception, which contains references to the cancelled that most recently resumed it. @resume@ reports a @CoroutineCancelled@ exception, which contains a references to the cancelled coroutine and the exception used to cancel it. The @resume@ function also takes the \defaultResumptionHandler{} from the caller's context and passes it to the internal cancellation. caller's context and passes it to the internal report. A coroutine knows of two other coroutines, its starter and its last resumer. (in terms of coroutine state) called resume on this coroutine, so the message is passed to the latter. With a default handler that triggers a cancellation, it is possible to cascade an error across any number of coroutines, cleaning up each in turn, until the error is handled or a thread stack is reached.
• ## doc/theses/andrew_beach_MMath/future.tex

 r5a46e09 \section{Language Improvements} \todo{Future/Language Improvements seems to have gotten mixed up. It is presented as waiting on language improvements" but really its more non-research based impovements.} \CFA is a developing programming language. As such, there are partially or unimplemented features of the language (including several broken components) that I had to workaround while building an exception handling system largely in the \CFA language (some C components).  The following are a few of these issues, and once implemented/fixed, how this would affect the exception system. issues, and once implemented/fixed, how they would affect the exception system. \begin{itemize} \item The implementation of termination is not portable because it includes hand-crafted assembly statements. These sections must be ported by hand to hand-crafted assembly statements. The existing compilers cannot translate that for other platforms and those sections must be ported by hand to support more hardware architectures, such as the ARM processor. \item reference instead of a pointer. Since \CFA has a very general reference capability, programmers will want to use it. Once fixed, this capability should result in little or no change in the exception system. result in little or no change in the exception system but simplify usage. \item Termination handlers cannot use local control-flow transfers, \eg by @break@, The virtual system should be completed. It was not supposed to be part of this project, but was thrust upon it to do exception inheritance; hence, only minimal work was done. A draft for a complete virtual system is available but minimal work is done. A draft for a complete virtual system is available but it is not finalized.  A future \CFA project is to complete that work and then update the exception system that uses the current version. bad software engineering. Non-local/concurrent requires more coordination between the concurrency system Non-local/concurrent raise requires more coordination between the concurrency system and the exception system. Many of the interesting design decisions centre around masking (controlling which exceptions may be thrown at a stack). It around masking, \ie controlling which exceptions may be thrown at a stack. It would likely require more of the virtual system and would also effect how default handlers are set. \section{Checked Exceptions} Checked exceptions make exceptions part of a function's type by adding the Checked exceptions make exceptions part of a function's type by adding an exception signature. An exception signature must declare all checked exceptions that could propogate from the function (either because they were exceptions that could propagate from the function (either because they were raised inside the function or came from a sub-function). This improves safety by making sure every checked exception is either handled or consciously However checked exceptions were never seriously considered for this project for two reasons. The first is due to time constraints, even copying an existing checked exception system would be pushing the remaining time and trying to address the second problem would take even longer. The second problem is that checked exceptions have some real usability trade-offs in because they have significant trade-offs in usablity and code reuse in exchange for the increased safety. These trade-offs are most problematic when trying to pass exceptions through higher-order functions from the functions the user passed into the higher-order function. There are no well known solutions to this problem that were statifactory for \CFA (which carries some of C's flexability over safety design) so one would have to be researched and developed. that were satisfactory for \CFA (which carries some of C's flexibility over safety design) so additional research is needed. Follow-up work might add checked exceptions to \CFA, possibly using polymorphic exception signatures, a form of tunneling\cite{Zhang19} or Follow-up work might add some form of checked exceptions to \CFA, possibly using polymorphic exception signatures, a form of tunneling\cite{Zhang19} or checked and unchecked raises. For instance, resumption could be extended to cover this use by allowing local control flow out of it. This approach would require an unwind as part of the transition as there are stack frames that have to be removed.  This approach means there is no notify raise, but because \CFA does not have exception signatures, a termination can be thrown from within any resumption handler so there is already a way to do mimic this in existing \CFA. transition as there are stack frames that have to be removed between where the resumption handler is installed and where it is defined. This approach would not require, but might benefit from, a special statement to leave the handler. Currently, mimicking this behaviour in \CFA is possible by throwing a termination inside a resumption handler. % Maybe talk about the escape; and escape CONTROL_STMT; statements or how
• ## doc/theses/andrew_beach_MMath/implement.tex

 r5a46e09 \label{c:implement} The implementation work for this thesis covers two components: the virtual % Local Helpers: \newcommand\transformline[1][becomes...]{ \hrulefill#1\hrulefill \medskip } The implementation work for this thesis covers the two components: virtual system and exceptions. Each component is discussed in detail. \todo{Talk about constructors for virtual types (after they are working).} This is what binds an instance of a virtual type to a virtual table. This pointer can be used as an identity check. It can also be used to access the The virtual table pointer binds an instance of a virtual type to a virtual table. The pointer is also the table's id and how the system accesses the virtual table and the virtual members there. \subsection{Type Id} Every virtual type has a unique id. Type ids can be compared for equality (the types reperented are the same) Type ids can be compared for equality, which checks if the types reperented are the same, or used to access the type's type information. The type information currently is only the parent's type id or, if the type has no parent, zero. type has no parent, the null pointer. The id's are implemented as pointers to the type's type information instance. Derefencing the pointer gets the type information. By going back-and-forth between the type id and the type info one can find every ancestor of a virtual type. It also pushes the issue of creating a unique value (for Dereferencing the pointer gets the type information. The ancestors of a virtual type are found by traversing type ids through the type information. The information pushes the issue of creating a unique value (for the type id) to the problem of creating a unique instance (for type information) which the linker can solve. Advanced linker support is required because there is no place that appears only once to attach the type information to. There should be one structure definition but it is included in multiple translation units. Each virtual table definition should be unique but there are an arbitrary number of thoses. So the special section prefix \texttt{.gnu.linkonce} is used. With a unique suffix (making the entire section name unique) the linker will remove multiple definition making sure only one version exists after linking. Then it is just a matter of making sure there is a unique name for each type. This is done in three phases. The first phase is to generate a new structure definition to store the type information), which the linker can solve. The advanced linker support is used here to avoid having to create a new declaration to attach this data to. With C/\CFA's header/implementation file divide for something to appear exactly once it must come from a declaration that appears in exactly one implementation file; the declarations in header files may exist only once they can be included in many different translation units. Therefore, structure's declaration will not work. Neither will attaching the type information to the virtual table -- although a vtable declarations are in implemention files they are not unique, see \autoref{ss:VirtualTable}. Instead the same type information is generated multiple times and then the new attribute \snake{cfa_linkone} is used to removed duplicates. Type information is constructed as follows: \begin{enumerate} \item Use the type's name to generate a name for the type information structure. This is saved so it may be reused. \item Generate a new structure definition to store the type information. The layout is the same in each case, just the parent's type id, but the types are changed. The structure's name is change, it is based off the virtual type's name, and the type of the parent's type id. but the types used change from instance to instance. The generated name is used for both this structure and, if relivant, the parent pointer. If the virtual type is polymorphic then the type information structure is polymorphic as well, with the same polymorphic arguments. The second phase is to generate an instance of the type information with a almost unique name, generated by mangling the virtual type name. The third phase is implicit with \CFA's overloading scheme. \CFA mangles names with type information so that all of the symbols exported to the linker are unique even if in \CFA code they are the same. Having two declarations with the same name and same type is forbidden because it is impossible for overload resolution to pick between them. This is why a unique type is generated for each virtual type. Polymorphic information is included in this mangling so polymorphic types will have seperate instances for each set of polymorphic arguments. \begin{cfa} struct TYPE_ID_TYPE { PARENT_ID_TYPE const * parent; \item A seperate name for instances is generated from the type's name. \item The definition is generated and initialised. The parent id is set to the null pointer or to the address of the parent's type information instance. Name resolution handles the rest. \item \CFA's name mangler does its regular name mangling encoding the type of the declaration into the instance name. This gives a completely unique name including different instances of the same polymorphic type. \end{enumerate} \todo{The list is making me realise, some of this isn't ordered.} Writing that code manually, with helper macros for the early name mangling, would look like this: \begin{cfa} struct INFO_TYPE(TYPE) { INFO_TYPE(PARENT) const * parent; }; __attribute__((cfa_linkonce)) TYPE_ID_TYPE const TYPE_ID_NAME = { &PARENT_ID_NAME, INFO_TYPE(TYPE) const INFO_NAME(TYPE) = { &INFO_NAME(PARENT), }; \end{cfa} \subsubsection{cfa\_linkonce Attribute} \subsubsection{\lstinline{cfa\_linkonce} Attribute} % I just realised: This is an extension of the inline keyword. % An extension of C's at least, it is very similar to C++'s. Another feature added to \CFA is a new attribute: \texttt{cfa\_linkonce}. This attribute can be put on an object or function definition (any global declaration with a name and a type). This allows you to define that object or function multiple times. All definitions should have the link-once attribute on them and all should be identical. The simplist way to use it is to put a definition in a header where the forward declaration would usually go. This is how it is used for type-id instances. There was is no unique location associated with a type except for the type definition which is in a header. This allows the unique type-id object to be generated there. Internally @cfa_linkonce@ removes all @section@ attributes from the declaration (as well as itself) and replaces them with This attribute is attached to an object or function definition (any global declaration with a name and a type) allowing it to be defined multiple times. All matching definitions mush have the link-once attribute and their implementations should be identical as well. A single definition with the attribute can be included in a header file as if it was a forward declaration, except no definition is required. This technique is used for type-id instances. A link-once definition is generated each time the structure is seen. This will result in multiple copies but the link-once attribute ensures all but one are removed for a unique instance. Internally, @cfa_linkonce@ is replaced with @section(".gnu.linkonce.NAME")@ where \texttt{NAME} is replaced by the mangled name of the object. Any other @section@ attributes are removed from the declaration. The prefix \texttt{.gnu.linkonce} in section names is recognized by the linker. If two of these sections with the same name, including everything that comes after the special prefix, then only one will be used and the other will be discarded. linker. If two of these sections appear with the same name, including everything that comes after the special prefix, then only one is used and the other is discarded. \subsection{Virtual Table} \label{ss:VirtualTable} Each virtual type has a virtual table type that stores its type id and virtual members. The layout always comes in three parts. \todo{Add labels to the virtual table layout figure.} The first section is just the type id at the head of the table. It is always there to ensure that there to ensure that it can be found even when the accessing code does not know which virtual type it has. The second section are all the virtual members of the parent, in the same order as they appear in the parent's virtual table. Note that the type may prefix that has the same layout and types as its parent virtual table. This, combined with the fixed offset to the virtual table pointer, means that for any virtual type it doesn't matter if we have it or any of its descendants, it is still always safe to access the virtual table through the virtual table pointer. From there it is safe to check the type id to identify the exact type of the for any virtual type, it is always safe to access its virtual table and, from there, it is safe to check the type id to identify the exact type of the underlying object, access any of the virtual members and pass the object to any of the method-like virtual members. When a virtual table is declared the user decides where to declare it and its When a virtual table is declared, the user decides where to declare it and its name. The initialization of the virtual table is entirely automatic based on the context of the declaration. The type id is always fixed, each virtual table type will always have one The type id is always fixed; with each virtual table type having exactly one possible type id. The virtual members are usually filled in by resolution. The best match for a given name and type at the declaration site is filled in. There are two exceptions to that rule: the @size@ field is the type's size and is set to the result of a @sizeof@ expression, the @align@ field is the type's alignment and similarly uses an @alignof@ expression. The virtual members are usually filled in by type resolution. The best match for a given name and type at the declaration site is used. There are two exceptions to that rule: the @size@ field, the type's size, is set using a @sizeof@ expression and the @align@ field, the type's alignment, is set using an @alignof@ expression. \subsubsection{Concurrency Integration} Coroutines and threads need instances of @CoroutineCancelled@ and @ThreadCancelled@ respectively to use all of their functionality. When a new data type is declared with @coroutine@ or @thread@ the forward declaration for data type is declared with @coroutine@ or @thread@, a forward declaration for the instance is created as well. The definition of the virtual table is created at the definition of the main function. This is showned through code re-writing in \autoref{f:ConcurrencyTypeTransformation} and \autoref{f:ConcurrencyMainTransformation}. In both cases the original declaration is not modified, only new ones are added. \begin{figure} }; \end{cfa} \transformline[appends...] \begin{cfa} extern CoroutineCancelled_vtable & _default_vtable; \end{cfa} \caption{Concurrency Type Transformation} \label{f:ConcurrencyTypeTransformation} \end{figure} \begin{figure} \begin{cfa} void main(Example & this) { } \end{cfa} \transformline[appends...] \begin{cfa} &_default_vtable_object_declaration; \end{cfa} \caption{Concurrency Transformations} \label{f:ConcurrencyTransformations} \caption{Concurrency Main Transformation} \label{f:ConcurrencyMainTransformation} \end{figure} \todo{Improve Concurrency Transformations figure.} \subsection{Virtual Cast} the cast target is passed in as @child@. For C generation both arguments and the result are wrapped with type casts. There is also an internal store inside the compiler to make sure that the For generated C code wraps both arguments and the result with type casts. There is also an internal check inside the compiler to make sure that the target type is a virtual type. % It also checks for conflicting definitions. The virtual cast either returns the original pointer as a new type or null. So the function just does the parent check and returns the approprate value. The virtual cast either returns the original pointer or the null pointer as the new type. So the function does the parent check and returns the appropriate value. The parent check is a simple linear search of child's ancestors using the type information. % resumption doesn't as well. % Many modern languages work with an interal stack that function push and pop % Many modern languages work with an internal stack that function push and pop % their local data to. Stack unwinding removes large sections of the stack, % often across functions. stack. On function entry and return, unwinding is handled directly by the call/return code embedded in the function. In many cases the position of the instruction pointer (relative to parameter In many cases, the position of the instruction pointer (relative to parameter and local declarations) is enough to know the current size of the stack frame. Usually, the stack-frame size is known statically based on parameter and local variable declarations. Even with dynamic stack-size the information to determain how much of the stack has to be removed is still contained local variable declarations. Even with dynamic stack-size, the information to determine how much of the stack has to be removed is still contained within the function. Allocating/deallocating stack space is usually an $O(1)$ operation achieved by bumping the hardware stack-pointer up or down as needed. Constructing/destructing values on the stack takes longer put in terms of figuring out what needs to be done is of similar complexity. Constructing/destructing values within a stack frame has a similar complexity but can add additional work and take longer. Unwinding across multiple stack frames is more complex because that reseting to a snap-shot of an arbitrary but existing function frame on the stack. It is up to the programmer to ensure the snap-shot is valid when it is reset and that all required clean-up from the unwound stacks is preformed. This approach is fragile and forces a work onto the surounding code. With respect to that work forced onto the surounding code, reset and that all required clean-up from the unwound stacks is performed. This approach is fragile and requires extra work in the surrounding code. With respect to the extra work in the surounding code, many languages define clean-up actions that must be taken when certain sections of the stack are removed. Such as when the storage for a variable is removed from the stack or when a try statement with a finally clause is (conceptually) popped from the stack. None of these should be handled by the user, that would contradict the intention of these features, so they need to be handled automatically. To safely remove sections of the stack the language must be able to find and None of these should be handled by the user --- that would contradict the intention of these features --- so they need to be handled automatically. To safely remove sections of the stack, the language must be able to find and run these clean-up actions even when removing multiple functions unknown at the beginning of the unwinding. current stack frame, and what handlers should be checked. Theoretically, the LSDA can contain any information but conventionally it is a table with entries representing regions of the function and what has to be done there during representing regions of a function and what has to be done there during unwinding. These regions are bracketed by instruction addresses. If the instruction pointer is within a region's start/end, then execution is currently int avar __attribute__(( cleanup(clean_up) )); \end{cfa} The attribue is used on a variable and specifies a function, The attribute is used on a variable and specifies a function, in this case @clean_up@, run when the variable goes out of scope. This is enough to mimic destructors, but not try statements which can effect This feature is enough to mimic destructors, but not try statements which can effect the unwinding. To get full unwinding support all of this has to be done directly with assembly and assembler directives. Partiularly the cfi directives To get full unwinding support, all of these features must be handled directly in assembly and assembler directives; partiularly the cfi directives \snake{.cfi_lsda} and \snake{.cfi_personality}. section covers some of the important parts of the interface. A personality function can preform different actions depending on how it is A personality function can perform different actions depending on how it is called. \begin{lstlisting} The @exception_class@ argument is a copy of the \code{C}{exception}'s @exception_class@ field. This a number that identifies the exception handling mechanism that created the The \code{C}{exception} argument is a pointer to the user \code{C}{exception}'s @exception_class@ field, which is a number that identifies the exception handling mechanism that created the exception. The \code{C}{exception} argument is a pointer to a user provided storage object. It has two public fields: the @exception_class@, which is described above, and the @exception_cleanup@ function. The clean-up function is used by the EHM to clean-up the exception if it The clean-up function is used by the EHM to clean-up the exception, if it should need to be freed at an unusual time, it takes an argument that says why it had to be cleaned up. messages for special cases (some of which should never be used by the personality function) and error codes. However, unless otherwise noted, the personality function should always return @_URC_CONTINUE_UNWIND@. personality function always returns @_URC_CONTINUE_UNWIND@. \subsection{Raise Exception} Raising an exception is the central function of libunwind and it performs a Raising an exception is the central function of libunwind and it performs two-staged unwinding. \begin{cfa} % catches. Talk about GCC nested functions. \CFA termination exceptions use libunwind heavily because they match \Cpp \CFA termination exceptions use libunwind heavily because they match \Cpp exceptions closely. The main complication for \CFA is that the compiler generates C code, making it very difficult to generate the assembly to \begin{figure} \centering \input{exception-layout} \caption{Exception Layout} \label{f:ExceptionLayout} \end{figure} \todo*{Convert the exception layout to an actual diagram.} Exceptions are stored in variable-sized blocks (see \vref{f:ExceptionLayout}). Exceptions are stored in variable-sized blocks (see \autoref{f:ExceptionLayout}). The first component is a fixed-sized data structure that contains the information for libunwind and the exception system. The second component is an @_Unwind_Exception@ to the entire node. Multipe exceptions can exist at the same time because exceptions can be Multiple exceptions can exist at the same time because exceptions can be raised inside handlers, destructors and finally blocks. Figure~\vref{f:MultipleExceptions} shows a program that has multiple exceptions active at one time. Each time an exception is thrown and caught the stack unwinds and the finally clause runs. This will throw another exception (until @num_exceptions@ gets high enough) which must be allocated. The previous exceptions may not be clause runs. This handler throws another exception (until @num_exceptions@ gets high enough), which must be allocated. The previous exceptions may not be freed because the handler/catch clause has not been run. So the EHM must keep them alive while it allocates exceptions for new throws. Therefore, the EHM must keep all unhandled exceptions alive while it allocates exceptions for new throws. \begin{figure} \todo*{Work on multiple exceptions code sample.} All exceptions are stored in nodes which are then linked together in lists, All exceptions are stored in nodes, which are then linked together in lists one list per stack, with the list head stored in the exception context. Within each linked list, the most exception is being handled. The exception at the head of the list is currently being handled, while other exceptions wait for the exceptions before them to be removed. handled and removed. The virtual members in the exception's virtual table provide the size of the exception into managed memory. After the exception is handled, the free function is used to clean up the exception and then the entire node is passed to free so the memory can be given back to the heap. passed to free, returning the memory back to the heap. \subsection{Try Statements and Catch Clauses} The try statement with termination handlers is complex because it must compensate for the lack of assembly-code generated from \CFA. Libunwind compensate for the C code-generation versus assembly-code generated from \CFA. Libunwind requires an LSDA and personality function for control to unwind across a function. The LSDA in particular is hard to mimic in generated C code. embedded assembly. This assembly code is handcrafted using C @asm@ statements and contains enough information for the single try statement the function repersents. enough information for a single try statement the function repersents. The three functions passed to try terminate are: \begin{description} \item[try function:] This function is the try block, all the code inside the try block is placed inside the try function. It takes no parameters and has no \item[try function:] This function is the try block, it is where all the code from inside the try block is placed. It takes no parameters and has no return value. This function is called during regular execution to run the try block. handler that matches the exception. \item[handler function:] This function handles the exception. It takes a \item[handler function:] This function handles the exception, and contains all the code from the handlers in the try statement, joined with a switch statement on the handler's id. It takes a pointer to the exception and the handler's id and returns nothing. It is called after the cleanup phase. It is constructed by stitching together the bodies of each handler and dispatches to the selected handler. after the cleanup phase. \end{description} All three functions are created with GCC nested functions. GCC nested functions can be used to create closures, functions that can refer to the state of other can be used to create closures, in other words functions that can refer to the state of other functions on the stack. This approach allows the functions to refer to all the variables in scope for the function containing the @try@ statement. These Using this pattern, \CFA implements destructors with the cleanup attribute. \autoref{f:TerminationTransformation} shows the pattern used to transform a \CFA try statement with catch clauses into the approprate C functions. \todo{Explain the Termination Transformation figure.} \begin{figure} \begin{cfa} } \end{cfa} \transformline \begin{cfa} % The stack-local data, the linked list of nodes. Resumption simpler to implement than termination Resumption is simpler to implement than termination because there is no stack unwinding. Instead of storing the data in a special area using assembly, The nodes are stored in order, with the more recent try statements closer to the head of the list. Instead of traversing the stack resumption handling traverses the list. At each node the EHM checks to see if the try statement the node repersents Instead of traversing the stack, resumption handling traverses the list. At each node, the EHM checks to see if the try statement the node repersents can handle the exception. If it can, then the exception is handled and the operation finishes, otherwise the search continues to the next node. If the search reaches the end of the list without finding a try statement that can handle the exception the default handler is executed and the that can handle the exception, the default handler is executed and the operation finishes. In each node is a handler function which does most of the work there. The handler function is passed the raised the exception and returns true if the exception is handled and false if it cannot be handled here. For each @catchResume@ clause the handler function will: check to see if the raised exception is a descendant type of the declared exception type, if it is and there is a conditional expression then it will run the test, if both checks pass the handling code for the clause is run and the function returns true, otherwise it moves onto the next clause. If this is the last @catchResume@ clause then instead of moving onto the next clause the function returns false as no handler could be found. Each node has a handler function that does most of the work. The handler function is passed the raised exception and returns true if the exception is handled and false otherwise. The handler function checks each of its internal handlers in order, top-to-bottom, until it funds a match. If a match is found that handler is run, after which the function returns true, ignoring all remaining handlers. If no match is found the function returns false. The match is performed in two steps, first a virtual cast is used to see if the thrown exception is an instance of the declared exception or one of its descendant type, then check to see if passes the custom predicate if one is defined. This ordering gives the type guarantee used in the predicate. \autoref{f:ResumptionTransformation} shows the pattern used to transform a \CFA try statement with catch clauses into the approprate C functions. \todo{Explain the Resumption Transformation figure.} \begin{figure} } \end{cfa} \transformline \begin{cfa} % Recursive Resumption Stuff: Search skipping (see \vpageref{s:ResumptionMarking}), which ignores parts of \autoref{f:ResumptionMarking} shows search skipping (see \vpageref{s:ResumptionMarking}), which ignores parts of the stack already examined, is accomplished by updating the front of the list as the is updated to the next node of the current node. After the search is complete, successful or not, the head of the list is reset. % No paragraph? This mechanism means the current handler and every handler that has already been checked are not on the list while a handler is run. If a resumption is thrown during the handling of another resumption the active handlers and all thrown during the handling of another resumption, the active handlers and all the other handler checked up to this point are not checked again. This structure also supports new handler added while the resumption is being % No paragraph? This structure also supports new handlers added while the resumption is being handled. These are added to the front of the list, pointing back along the stack -- the first one points over all the checked handlers -- and the ordering is maintained. stack --- the first one points over all the checked handlers --- and the ordering is maintained. \begin{figure} \caption{Resumption Marking} \label{f:ResumptionMarking} \todo*{Convert Resumption Marking into a line figure.} \todo*{Label Resumption Marking to aid clarity.} \end{figure} \label{p:zero-cost} Note, the resumption implementation has a cost for entering/exiting a @try@ statement with @catchResume@ clauses, whereas a @try@ statement with @catch@ Finally, the resumption implementation has a cost for entering/exiting a try statement with @catchResume@ clauses, whereas a try statement with @catch@ clauses has zero-cost entry/exit. While resumption does not need the stack unwinding and cleanup provided by libunwind, it could use the search phase to The first step of cancellation is to find the cancelled stack and its type: coroutine or thread. Fortunately, the thread library stores the main thread pointer and the current thread pointer, and every thread stores a pointer to its main coroutine and the coroutine it is currently executing. \todo*{Consider adding a description of how threads are coroutines.} If a the current thread's main and current coroutines are the same then the current stack is a thread stack. Furthermore it is easy to compare the current thread to the main thread to see if they are the same. And if this is not a thread stack then it must be a coroutine stack. coroutine, thread or main thread. In \CFA, a thread (the construct the user works with) is a user-level thread (point of execution) paired with a coroutine, the thread's main coroutine. The thread library also stores pointers to the main thread and the current thread. If the current thread's main and current coroutines are the same then the current stack is a thread stack, otherwise it is a coroutine stack. If the current stack is a thread stack, it is also the main thread stack if and only if the main and current threads are the same. However, if the threading library is not linked, the sequential execution is on the main stack. Hence, the entire check is skipped because the weak-symbol function is loaded. Therefore, a main thread cancellation is unconditionally function is loaded. Therefore, main thread cancellation is unconditionally performed. Regardless of how the stack is chosen, the stop function and parameter are passed to the forced-unwind function. The general pattern of all three stop functions is the same: they continue unwinding until the end of stack and then preform their transfer. functions is the same: continue unwinding until the end of stack and then preform the appropriate transfer. For main stack cancellation, the transfer is just a program abort. For coroutine cancellation, the exception is stored on the coroutine's stack, and the coroutine context switches to its last resumer. The rest is handled on the backside of the resume, which check if the resumed coroutine is the backside of the resume, which checks if the resumed coroutine is cancelled. If cancelled, the exception is retrieved from the resumed coroutine, and a @CoroutineCancelled@ exception is constructed and loaded with the
• ## doc/theses/andrew_beach_MMath/intro.tex

 r5a46e09 \chapter{Introduction} \PAB{Stay in the present tense. \newline \url{https://plg.uwaterloo.ca/~pabuhr/technicalWriting.shtml}} \newline \PAB{Note, \lstinline{lstlisting} normally bolds keywords. None of the keywords in your thesis are bolded.} % Talk about Cforall and exceptions generally. %This thesis goes over the design and implementation of the exception handling %mechanism (EHM) of %\CFA (pernounced sea-for-all and may be written Cforall or CFA). Exception handling provides alternative dynamic inter-function control flow. % The highest level overview of Cforall and EHMs. Get this done right away. This thesis goes over the design and implementation of the exception handling mechanism (EHM) of \CFA (pronounced sea-for-all and may be written Cforall or CFA). \CFA is a new programming language that extends C, that maintains backwards-compatibility while introducing modern programming features. Adding exception handling to \CFA gives it new ways to handle errors and make other large control-flow jumps. % Now take a step back and explain what exceptions are generally. Exception handling provides dynamic inter-function control flow. There are two forms of exception handling covered in this thesis: termination, which acts as a multi-level return, and resumption, which is a dynamic function call. Note, termination exception handling is so common it is often assumed to be the only form. Lesser know derivations of inter-function control flow are continuation passing in Lisp~\cite{CommonLisp}. Termination handling is much more common, to the extent that it is often seen This seperation is uncommon because termination exception handling is so much more common that it is often assumed. % WHY: Mention other forms of continuation and \cite{CommonLisp} here? A language's EHM is the combination of language syntax and run-time components that are used to construct, raise and handle exceptions, including all control flow. Termination exception handling allows control to return to any previous \end{center} Resumption exception handling calls a function, but asks the functions on the stack what function that is. Resumption exception handling seaches the stack for a handler and then calls it without adding or removing any other stack frames. \todo{Add a diagram showing control flow for resumption.} most of the cost only when the error actually occurs. % Overview of exceptions in Cforall. \PAB{You need section titles here. Don't take them out.} \section{Thesis Overview} This thesis goes over the design and implementation of the exception handling mechanism (EHM) of \CFA (pernounced sea-for-all and may be written Cforall or CFA). %This thesis describes the design and implementation of the \CFA EHM. This work describes the design and implementation of the \CFA EHM. The \CFA EHM implements all of the common exception features (or an equivalent) found in most other EHMs and adds some features of its own. harder to replicate in other programming languages. \section{Background} % Talk about other programming languages. Some existing programming languages that include EHMs/exception handling exceptions which unwind the stack as part of the Exceptions also can replace return codes and return unions. In functional languages will also sometimes fold exceptions into monads. \PAB{You must demonstrate knowledge of background material here. It should be at least a full page.} \section{Contributions} The contributions of this work are: \end{enumerate} \todo{I can't figure out a good lead-in to the overview.} Covering the existing \CFA features in \autoref{c:existing}. Then the new features are introduce in \autoref{c:features}, explaining their usage and design. \todo{I can't figure out a good lead-in to the roadmap.} The next section covers the existing state of exceptions. The existing state of \CFA is also covered in \autoref{c:existing}. The new features are introduced in \autoref{c:features}, which explains their usage and design. That is followed by the implementation of those features in \autoref{c:implement}. % Future Work \autoref{c:future} The performance results are examined in \autoref{c:performance}. Possibilities to extend this project are discussed in \autoref{c:future}. \section{Background} \label{s:background} Exception handling is not a new concept, with papers on the subject dating back 70s. Their were popularised by \Cpp, which added them in its first major wave of non-object-orientated features in 1990. % https://en.cppreference.com/w/cpp/language/history Java was the next popular language to use exceptions. It is also the most popular language with checked exceptions. Checked exceptions are part of the function interface they are raised from. This includes functions they propogate through, until a handler for that type of exception is found. This makes exception information explicit, which can improve clarity and safety, but can slow down programming. Some of these, such as dealing with high-order methods or an overly specified throws clause, are technical. However some of the issues are much more human, in that writing/updating all the exception signatures can be enough of a burden people will hack the system to avoid them. Including the catch-and-ignore" pattern where a catch block is used without anything to repair or recover from the exception. %\subsection Resumption exceptions have been much less popular. Although resumption has a history as old as termination's, very few programming languages have implement them. % http://bitsavers.informatik.uni-stuttgart.de/pdf/xerox/parc/techReports/ %   CSL-79-3_Mesa_Language_Manual_Version_5.0.pdf Mesa is one programming languages that did and experiance with that languages is quoted as being one of the reasons resumptions were not included in the \Cpp standard. % https://en.wikipedia.org/wiki/Exception_handling \todo{A comment about why we did include them when they are so unpopular might be approprate.} %\subsection Functional languages, tend to use solutions like the return union, but some exception-like constructs still appear. For instance Haskell's built in error mechanism can make the result of any expression, including function calls. Any expression that examines an error value will in-turn produce an error. This continues until the main function produces an error or until it is handled by one of the catch functions. %\subsection More recently exceptions seem to be vanishing from newer programming languages. Rust and Go reduce this feature to panics. Panicing is somewhere between a termination exception and a program abort. Notably in Rust a panic can trigger either, a panic may unwind the stack or simply kill the process. % https://doc.rust-lang.org/std/panic/fn.catch_unwind.html Go's panic is much more similar to a termination exception but there is only a catch-all function with \code{Go}{recover()}. So exceptions still are appearing, just in reduced forms. %\subsection Exception handling's most common use cases are in error handling. Here are some other ways to handle errors and comparisons with exceptions. \begin{itemize} \item\emph{Error Codes}: This pattern uses an enumeration (or just a set of fixed values) to indicate that an error has occured and which error it was. There are some issues if a function wants to return an error code and another value. The main issue is that it can be easy to forget checking the error code, which can lead to an error being quitely and implicitly ignored. Some new languages have tools that raise warnings if the return value is discarded to avoid this. It also puts more code on the main execution path. \item\emph{Special Return with Global Store}: A function that encounters an error returns some value indicating that it encountered a value but store which error occured in a fixed global location. Perhaps the C standard @errno@ is the most famous example of this, where some standard library functions will return some non-value (often a NULL pointer) and set @errno@. This avoids the multiple results issue encountered with straight error codes but otherwise many of the same advantages and disadvantages. It does however introduce one other major disadvantage: Everything that uses that global location must agree on all possible errors. \item\emph{Return Union}: Replaces error codes with a tagged union. Success is one tag and the errors are another. It is also possible to make each possible error its own tag and carry its own additional information, but the two branch format is easy to make generic so that one type can be used everywhere in error handling code. This pattern is very popular in functional or semi-functional language, anything with primitive support for tagged unions (or algebraic data types). % We need listing Rust/rust to format code snipits from it. % Rust's \code{rust}{Result} The main disadvantage is again it puts code on the main execution path. This is also the first technique that allows for more information about an error, other than one of a fix-set of ids, to be sent. They can be missed but some languages can force that they are checked. It is also implicitly forced in any languages with checked union access. \item\emph{Handler Functions}: On error the function that produced the error calls another function to handle it. The handler function can be provided locally (passed in as an argument, either directly as as a field of a structure/object) or globally (a global variable). C++ uses this as its fallback system if exception handling fails. \snake{std::terminate_handler} and for a time \snake{std::unexpected_handler} Handler functions work a lot like resumption exceptions. The difference is they are more expencive to set up but cheaper to use, and so are more suited to more fequent errors. The exception being global handlers if they are rarely change as the time in both cases strinks towards zero. \end{itemize} %\subsection Because of their cost exceptions are rarely used for hot paths of execution. There is an element of self-fulfilling prophocy here as implementation techniques have been designed to make exceptions cheap to set-up at the cost of making them expencive to use. Still, use of exceptions for other tasks is more common in higher-level scripting languages. An iconic example is Python's StopIteration exception which is thrown by an iterator to indicate that it is exausted. Combined with Python's heavy use of the iterator based for-loop. % https://docs.python.org/3/library/exceptions.html#StopIteration
• ## doc/theses/andrew_beach_MMath/uw-ethesis.tex

 r5a46e09 \input{features} \input{implement} \input{performance} \input{future}
• ## doc/theses/mubeen_zulfiqar_MMath/.gitignore

 r5a46e09 # Intermediate Results: out/ build/ # Final Files:
• ## doc/theses/mubeen_zulfiqar_MMath/allocator.tex

 r5a46e09 \begin{itemize} \item Objective of @uHeapLmmm@. Objective of uHeapLmmm. \item Design philosophy. \item Background and previous design of @uHeapLmmm@. Background and previous design of uHeapLmmm. \item Distributed design of @uHeapLmmm@. Distributed design of uHeapLmmm. ----- SHOULD WE GIVE IMPLEMENTATION DETAILS HERE? ----- \end{itemize} The new features added to @uHeapLmmm@ (incl. @malloc_size@ routine) The new features added to uHeapLmmm (incl. @malloc_size@ routine) \CFA alloc interface with examples. \begin{itemize} \end{itemize} ----- SHOULD WE GIVE PERFORMANCE AND USABILITY COMPARISON OF DIFFERENT INTERFACES THAT WE TRIED? ----- \PAB{Often Performance is its own chapter. I added one for now.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% uHeapLmmm Design %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Performance evaluation using u-benchmark suite. \section{Objective of uHeapLmmm} UHeapLmmm is a lightweight memory allocator. The objective behind uHeapLmmm is to design a minimal concurrent memory allocator that has new features and also fulfills GNU C Library requirements (FIX ME: cite requirements). \subsection{Design philosophy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Background and previous design of uHeapLmmm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Distributed design of uHeapLmmm} \subsection{Advantages of distributed design} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Added Features} \subsection{Methods} Why did we need it? The added benefits. \subsection{Alloc Interface} Why did we need it? The added benefits. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Following is added by Peter \noindent

• ## doc/theses/mubeen_zulfiqar_MMath/uw-ethesis.tex

 r5a46e09 % cfa macros used in the document \input{common} %\usepackageinput{common} \CFAStyle                                               % CFA code-style for all languages \lstset{language=CFA,basicstyle=\linespread{0.9}\tt}    % CFA default language \lstset{basicstyle=\linespread{0.9}\tt}                 % CFA typewriter font \newcommand{\PAB}[1]{{\color{red}PAB: #1}}
• ## libcfa/configure.ac

 r5a46e09 #io_uring 5.5 uses enum values #io_uring 5.6 and later uses probes AH_TEMPLATE([CFA_HAVE_LINUX_RSEQ_H],[Defined if rseq support is present when compiling libcfathread.]) AC_CHECK_HEADERS([linux/rseq.h], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)]) AH_TEMPLATE([CFA_HAVE_LINUX_LIBRSEQ],[Defined if librseq support is present when compiling libcfathread.]) AC_CHECK_LIB([rseq], [rseq_available], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)], []) AH_TEMPLATE([CFA_HAVE_LINUX_IO_URING_H],[Defined if io_uring support is present when compiling libcfathread.])
• ## libcfa/prelude/defines.hfa.in

 r5a46e09 #undef CFA_HAVE_LINUX_IO_URING_H /* Defined if librseq support is present when compiling libcfathread. */ #undef CFA_HAVE_LINUX_LIBRSEQ /* Defined if rseq support is present when compiling libcfathread. */ #undef CFA_HAVE_LINUX_RSEQ_H /* Defined if openat2 support is present when compiling libcfathread. */ #undef CFA_HAVE_OPENAT2 #undef HAVE_LINUX_IO_URING_H /* Define to 1 if you have the header file. */ #undef HAVE_LINUX_RSEQ_H /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H
• ## libcfa/src/Makefile.am

 r5a46e09 containers/queueLockFree.hfa \ containers/stackLockFree.hfa \ containers/vector2.hfa \ vec/vec.hfa \ vec/vec2.hfa \ common.hfa \ fstream.hfa \ strstream.hfa \ heap.hfa \ iostream.hfa \ rational.hfa \ stdlib.hfa \ strstream.hfa \ time.hfa \ bits/weakso_locks.hfa \ containers/pair.hfa \ containers/result.hfa \ containers/vector.hfa containers/vector.hfa \ device/cpu.hfa libsrc = ${inst_headers_src}${inst_headers_src:.hfa=.cfa} \
• ## libcfa/src/bits/signal.hfa

 r5a46e09 #include #define __USE_GNU #include #undef __USE_GNU #include #include
• ## libcfa/src/concurrency/coroutine.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #include "coroutine.hfa"
• ## libcfa/src/concurrency/io.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #if defined(__CFA_DEBUG__) #if defined(CFA_HAVE_LINUX_IO_URING_H) #define _GNU_SOURCE         /* See feature_test_macros(7) */ #include #include
• ## libcfa/src/concurrency/io/setup.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE         /* See feature_test_macros(7) */ #define _GNU_SOURCE #if defined(__CFA_DEBUG__)
• ## libcfa/src/concurrency/kernel.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ // Spin a little on I/O, just in case for(5) { for(5) { __maybe_io_drain( this ); readyThread = pop_fast( this->cltr ); // no luck, try stealing a few times for(5) { for(5) { if( __maybe_io_drain( this ) ) { readyThread = pop_fast( this->cltr ); __cfactx_switch( &proc_cor->context, &thrd_dst->context ); // when __cfactx_switch returns we are back in the processor coroutine /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning$thread %p has been corrupted.\n StackPointer too large.\n", thrd_src ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning$thread %p has been corrupted.\n StackPointer too large.\n", thrd_src ); }
• ## libcfa/src/concurrency/kernel.hfa

 r5a46e09 unsigned id; unsigned target; unsigned last; unsigned long long int cutoff; } rdq;
• ## libcfa/src/concurrency/kernel/startup.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE // C Includes #include               // errno #include #include              // strerror #include              // sysconf extern "C" { #include        // PTHREAD_STACK_MIN #include        // syscall #include   // eventfd #include      // mprotect }; #if   defined(CFA_HAVE_LINUX_LIBRSEQ) // No data needed #elif defined(CFA_HAVE_LINUX_RSEQ_H) extern "Cforall" { __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq @= { .cpu_id : RSEQ_CPU_ID_UNINITIALIZED, }; } #else // No data needed #endif //----------------------------------------------------------------------------- // Struct to steal stack self_mon_p = &self_mon; link.next = 0p; link.ts   = 0; link.ts   = -1llu; preferred = -1u; last_proc = 0p; this.rdq.id  = -1u; this.rdq.target = -1u; this.rdq.last = -1u; this.rdq.cutoff = 0ull; do_terminate = false;
• ## libcfa/src/concurrency/kernel_private.hfa

 r5a46e09 #pragma once #if !defined(__cforall_thread__) #error kernel_private.hfa should only be included in libcfathread source #endif #include "kernel.hfa" #include "thread.hfa" #include "stats.hfa" extern "C" { #if   defined(CFA_HAVE_LINUX_LIBRSEQ) #include #elif defined(CFA_HAVE_LINUX_RSEQ_H) #include #else #ifndef _GNU_SOURCE #error kernel_private requires gnu_source #endif #include #endif } //----------------------------------------------------------------------------- // Scheduler extern "C" { void disable_interrupts() OPTIONAL_THREAD; //----------------------------------------------------------------------------- // Hardware #if   defined(CFA_HAVE_LINUX_LIBRSEQ) // No data needed #elif defined(CFA_HAVE_LINUX_RSEQ_H) extern "Cforall" { extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq; } #else // No data needed #endif static inline int __kernel_getcpu() { /* paranoid */ verify( ! __preemption_enabled() ); #if   defined(CFA_HAVE_LINUX_LIBRSEQ) return rseq_current_cpu(); #elif defined(CFA_HAVE_LINUX_RSEQ_H) int r = __cfaabi_rseq.cpu_id; /* paranoid */ verify( r >= 0 ); return r; #else return sched_getcpu(); #endif } //----------------------------------------------------------------------------- // Processor void main(processorCtx_t *); void * __create_pthread( pthread_t *, void * (*)(void *), void * ); void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); extern cluster * mainCluster;
• ## libcfa/src/concurrency/locks.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #include "locks.hfa"
• ## libcfa/src/concurrency/locks.hfa

 r5a46e09 #include "containers/list.hfa" #include "limits.hfa" #include "thread.hfa" bool tryP(BinaryBenaphore & this) { ssize_t c = this.counter; /* paranoid */ verify( c > MIN ); return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); } ssize_t c = 0; for () { /* paranoid */ verify( this.counter < MAX ); if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { if (c == 0) return true; ThreadBenaphore sem; }; static inline void ?{}(fast_lock & this) { this.owner = 0p; } static inline bool $try_lock(fast_lock & this,$thread * thrd) {
• ## libcfa/src/concurrency/monitor.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #include "monitor.hfa"
• ## libcfa/src/concurrency/mutex.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #include "mutex.hfa"
• ## libcfa/src/concurrency/preemption.cfa

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE // #define __CFA_DEBUG_PRINT_PREEMPTION__

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE // #define __CFA_DEBUG_PRINT_READY_QUEUE__ #define USE_RELAXED_FIFO // #define USE_WORK_STEALING // #define USE_CPU_WORK_STEALING #include "bits/defs.hfa" #include "device/cpu.hfa" #include "kernel_private.hfa" #define _GNU_SOURCE #include "stdlib.hfa" #include "math.hfa" #include #include extern "C" { #include   // __NR_xxx } #include "ready_subqueue.hfa" #endif #if   defined(USE_RELAXED_FIFO) #if   defined(USE_CPU_WORK_STEALING) #define READYQ_SHARD_FACTOR 2 #elif defined(USE_RELAXED_FIFO) #define BIAS 4 #define READYQ_SHARD_FACTOR 4 } #if   defined(CFA_HAVE_LINUX_LIBRSEQ) // No forward declaration needed #define __kernel_rseq_register rseq_register_current_thread #define __kernel_rseq_unregister rseq_unregister_current_thread #elif defined(CFA_HAVE_LINUX_RSEQ_H) void __kernel_raw_rseq_register  (void); void __kernel_raw_rseq_unregister(void); #define __kernel_rseq_register __kernel_raw_rseq_register #define __kernel_rseq_unregister __kernel_raw_rseq_unregister #else // No forward declaration needed // No initialization needed static inline void noop(void) {} #define __kernel_rseq_register noop #define __kernel_rseq_unregister noop #endif //======================================================================= // Cluster wide reader-writer lock // Lock-Free registering/unregistering of threads unsigned register_proc_id( void ) with(*__scheduler_lock) { __kernel_rseq_register(); __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); bool * handle = (bool *)&kernelTLS().sched_lock; __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); __kernel_rseq_unregister(); } //======================================================================= void ?{}(__ready_queue_t & this) with (this) { lanes.data  = 0p; lanes.tscs  = 0p; lanes.count = 0; #if defined(USE_CPU_WORK_STEALING) lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; lanes.data = alloc( lanes.count ); lanes.tscs = alloc( lanes.count ); for( idx; (size_t)lanes.count ) { (lanes.data[idx]){}; lanes.tscs[idx].tv = rdtscl(); } #else lanes.data  = 0p; lanes.tscs  = 0p; lanes.count = 0; #endif } void ^?{}(__ready_queue_t & this) with (this) { verify( SEQUENTIAL_SHARD == lanes.count ); #if !defined(USE_CPU_WORK_STEALING) verify( SEQUENTIAL_SHARD == lanes.count ); #endif free(lanes.data); free(lanes.tscs); //----------------------------------------------------------------------- #if defined(USE_CPU_WORK_STEALING) __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) { __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); processor * const proc = kernelTLS().this_processor; const bool external = !push_local || (!proc) || (cltr != proc->cltr); const int cpu = __kernel_getcpu(); /* paranoid */ verify(cpu >= 0); /* paranoid */ verify(cpu < cpu_info.hthrd_count); /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); const int start = map.self * READYQ_SHARD_FACTOR; unsigned i; do { unsigned r; if(unlikely(external)) { r = __tls_rand(); } else { r = proc->rdq.its++; } i = start + (r % READYQ_SHARD_FACTOR); // If we can't lock it retry } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); // Actually push it push(lanes.data[i], thrd); // Unlock and return __atomic_unlock( &lanes.data[i].lock ); #if !defined(__CFA_NO_STATISTICS__) if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); else __tls_stats()->ready.push.local.success++; #endif __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); } // Pop from the ready queue from a given cluster __attribute__((hot))$thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { /* paranoid */ verify( lanes.count > 0 ); /* paranoid */ verify( kernelTLS().this_processor ); const int cpu = __kernel_getcpu(); /* paranoid */ verify(cpu >= 0); /* paranoid */ verify(cpu < cpu_info.hthrd_count); /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); processor * const proc = kernelTLS().this_processor; const int start = map.self * READYQ_SHARD_FACTOR; // Did we already have a help target if(proc->rdq.target == -1u) { // if We don't have a unsigned long long min = ts(lanes.data[start]); for(i; READYQ_SHARD_FACTOR) { unsigned long long tsc = ts(lanes.data[start + i]); if(tsc < min) min = tsc; } proc->rdq.cutoff = min; /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores. /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores. uint64_t chaos = __tls_rand(); uint64_t high_chaos = (chaos >> 32); uint64_t  mid_chaos = (chaos >> 16) & 0xffff; uint64_t  low_chaos = chaos & 0xffff; unsigned me = map.self; unsigned cpu_chaos = map.start + (mid_chaos % map.count); bool global = cpu_chaos == me; if(global) { proc->rdq.target = high_chaos % lanes.count; } else { proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (low_chaos % READYQ_SHARD_FACTOR); /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR)); /* paranoid */ verify(proc->rdq.target <  ((map.start + map.count) * READYQ_SHARD_FACTOR)); } /* paranoid */ verify(proc->rdq.target != -1u); } else { const unsigned long long bias = 0; //2_500_000_000; const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; { unsigned target = proc->rdq.target; proc->rdq.target = -1u; if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); proc->rdq.last = target; if(t) return t; } } unsigned last = proc->rdq.last; if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) {$thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); if(t) return t; } else { proc->rdq.last = -1u; } } for(READYQ_SHARD_FACTOR) { unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; } // All lanes where empty return 0p return 0p; } __attribute__((hot)) struct$thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { processor * const proc = kernelTLS().this_processor; unsigned last = proc->rdq.last; if(last != -1u) { struct $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); if(t) return t; proc->rdq.last = -1u; } unsigned i = __tls_rand() % lanes.count; return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); } __attribute__((hot)) struct$thread * pop_search(struct cluster * cltr) { return search(cltr); } #endif #if defined(USE_RELAXED_FIFO) //----------------------------------------------------------------------- if(is_empty(sl)) { assert( sl.anchor.next == 0p ); assert( sl.anchor.ts   == 0 ); assert( sl.anchor.ts   == -1llu ); assert( mock_head(sl)  == sl.prev ); } else { assert( sl.anchor.next != 0p ); assert( sl.anchor.ts   != 0 ); assert( sl.anchor.ts   != -1llu ); assert( mock_head(sl)  != sl.prev ); } lanes.tscs = alloc(lanes.count, lanes.tscsrealloc); for(i; lanes.count) { unsigned long long tsc = ts(lanes.data[i]); lanes.tscs[i].tv = tsc != 0 ? tsc : rdtscl(); unsigned long long tsc1 = ts(lanes.data[i]); unsigned long long tsc2 = rdtscl(); lanes.tscs[i].tv = min(tsc1, tsc2); } #endif } // Grow the ready queue void ready_queue_grow(struct cluster * cltr) { size_t ncount; int target = cltr->procs.total; /* paranoid */ verify( ready_mutate_islocked() ); __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); // grow the ready queue with( cltr->ready_queue ) { // Find new count // Make sure we always have atleast 1 list if(target >= 2) { ncount = target * READYQ_SHARD_FACTOR; } else { ncount = SEQUENTIAL_SHARD; } // Allocate new array (uses realloc and memcpies the data) lanes.data = alloc( ncount, lanes.datarealloc ); // Fix the moved data for( idx; (size_t)lanes.count ) { fix(lanes.data[idx]); } // Construct new data for( idx; (size_t)lanes.count ~ ncount) { (lanes.data[idx]){}; } // Update original lanes.count = ncount; } fix_times(cltr); reassign_cltr_id(cltr); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); /* paranoid */ verify( ready_mutate_islocked() ); } // Shrink the ready queue void ready_queue_shrink(struct cluster * cltr) { /* paranoid */ verify( ready_mutate_islocked() ); __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); int target = cltr->procs.total; with( cltr->ready_queue ) { // Remember old count size_t ocount = lanes.count; // Find new count // Make sure we always have atleast 1 list lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; /* paranoid */ verify( ocount >= lanes.count ); /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); // for printing count the number of displaced threads #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) __attribute__((unused)) size_t displaced = 0; #endif // redistribute old data for( idx; (size_t)lanes.count ~ ocount) { // Lock is not strictly needed but makes checking invariants much easier __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); verify(locked); // As long as we can pop from this lane to push the threads somewhere else in the queue while(!is_empty(lanes.data[idx])) { struct $thread * thrd; unsigned long long _; [thrd, _] = pop(lanes.data[idx]); push(cltr, thrd, true); // for printing count the number of displaced threads #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) displaced++; #endif } // Unlock the lane __atomic_unlock(&lanes.data[idx].lock); // TODO print the queue statistics here ^(lanes.data[idx]){}; } __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); // Allocate new array (uses realloc and memcpies the data) lanes.data = alloc( lanes.count, lanes.datarealloc ); // Fix the moved data for( idx; (size_t)lanes.count ) { fix(lanes.data[idx]); } } fix_times(cltr); reassign_cltr_id(cltr); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); /* paranoid */ verify( ready_mutate_islocked() ); } #if defined(USE_CPU_WORK_STEALING) // ready_queue size is fixed in this case void ready_queue_grow(struct cluster * cltr) {} void ready_queue_shrink(struct cluster * cltr) {} #else // Grow the ready queue void ready_queue_grow(struct cluster * cltr) { size_t ncount; int target = cltr->procs.total; /* paranoid */ verify( ready_mutate_islocked() ); __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); // grow the ready queue with( cltr->ready_queue ) { // Find new count // Make sure we always have atleast 1 list if(target >= 2) { ncount = target * READYQ_SHARD_FACTOR; } else { ncount = SEQUENTIAL_SHARD; } // Allocate new array (uses realloc and memcpies the data) lanes.data = alloc( ncount, lanes.datarealloc ); // Fix the moved data for( idx; (size_t)lanes.count ) { fix(lanes.data[idx]); } // Construct new data for( idx; (size_t)lanes.count ~ ncount) { (lanes.data[idx]){}; } // Update original lanes.count = ncount; } fix_times(cltr); reassign_cltr_id(cltr); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); /* paranoid */ verify( ready_mutate_islocked() ); } // Shrink the ready queue void ready_queue_shrink(struct cluster * cltr) { /* paranoid */ verify( ready_mutate_islocked() ); __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); int target = cltr->procs.total; with( cltr->ready_queue ) { // Remember old count size_t ocount = lanes.count; // Find new count // Make sure we always have atleast 1 list lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; /* paranoid */ verify( ocount >= lanes.count ); /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); // for printing count the number of displaced threads #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) __attribute__((unused)) size_t displaced = 0; #endif // redistribute old data for( idx; (size_t)lanes.count ~ ocount) { // Lock is not strictly needed but makes checking invariants much easier __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); verify(locked); // As long as we can pop from this lane to push the threads somewhere else in the queue while(!is_empty(lanes.data[idx])) { struct$thread * thrd; unsigned long long _; [thrd, _] = pop(lanes.data[idx]); push(cltr, thrd, true); // for printing count the number of displaced threads #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) displaced++; #endif } // Unlock the lane __atomic_unlock(&lanes.data[idx].lock); // TODO print the queue statistics here ^(lanes.data[idx]){}; } __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); // Allocate new array (uses realloc and memcpies the data) lanes.data = alloc( lanes.count, lanes.datarealloc ); // Fix the moved data for( idx; (size_t)lanes.count ) { fix(lanes.data[idx]); } } fix_times(cltr); reassign_cltr_id(cltr); // Make sure that everything is consistent /* paranoid */ check( cltr->ready_queue ); __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); /* paranoid */ verify( ready_mutate_islocked() ); } #endif #if !defined(__CFA_NO_STATISTICS__) } #endif #if   defined(CFA_HAVE_LINUX_LIBRSEQ) // No definition needed #elif defined(CFA_HAVE_LINUX_RSEQ_H) #if defined( __x86_64 ) || defined( __i386 ) #define RSEQ_SIG        0x53053053 #elif defined( __ARM_ARCH ) #ifdef __ARMEB__ #define RSEQ_SIG    0xf3def5e7      /* udf    #24035    ; 0x5de3 (ARMv6+) */ #else #define RSEQ_SIG    0xe7f5def3      /* udf    #24035    ; 0x5de3 */ #endif #endif extern void __disable_interrupts_hard(); extern void __enable_interrupts_hard(); void __kernel_raw_rseq_register  (void) { /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED ); // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8); int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG); if(ret != 0) { int e = errno; switch(e) { case EINVAL: abort("KERNEL ERROR: rseq register invalid argument"); case ENOSYS: abort("KERNEL ERROR: rseq register no supported"); case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument"); case EBUSY : abort("KERNEL ERROR: rseq register already registered"); case EPERM : abort("KERNEL ERROR: rseq register sig  argument  on unregistration does not match the signature received on registration"); default: abort("KERNEL ERROR: rseq register unexpected return %d", e); } } } void __kernel_raw_rseq_unregister(void) { /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 ); // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8); int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG); if(ret != 0) { int e = errno; switch(e) { case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument"); case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported"); case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument"); case EBUSY : abort("KERNEL ERROR: rseq unregister already registered"); case EPERM : abort("KERNEL ERROR: rseq unregister sig  argument  on unregistration does not match the signature received on registration"); default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e); } } } #else // No definition needed #endif

 r5a46e09 #define __cforall_thread__ #define _GNU_SOURCE #include "thread.hfa" curr_cluster = &cl; link.next = 0p; link.ts   = 0; link.ts   = -1llu; preferred = -1u; last_proc = 0p;
• ## libcfa/src/containers/array.hfa

 r5a46e09 // a type whose size is n #define Z(n) char[n] // the inverse of Z(-) #define z(N) sizeof(N) forall( T & ) struct tag {}; forall( __CFA_tysys_id_only_X & ) struct tag {}; #define ttag(T) ((tag(T)){}) #define ztag(n) ttag(Z(n)) #define ztag(n) ttag(n) forall( [N], S & | sized(S), Timmed &, Tbase & ) { struct arpk { S strides[z(N)]; S strides[N]; }; static inline size_t ?len( arpk(N, S, Timmed, Tbase) & a ) { return z(N); return N; } // workaround #226 (and array relevance thereof demonstrated in mike102/otype-slow-ndims.cfa) static inline void ?{}( arpk(N, S, Timmed, Tbase) & this ) { void ?{}( S (&inner)[z(N)] ) {} void ?{}( S (&inner)[N] ) {} ?{}(this.strides); } static inline void ^?{}( arpk(N, S, Timmed, Tbase) & this ) { void ^?{}( S (&inner)[z(N)] ) {} void ^?{}( S (&inner)[N] ) {} ^?{}(this.strides); } // Base forall( [Nq], Sq & | sized(Sq), Tbase & ) static inline tag(arpk(Nq, Sq, Tbase, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(Tbase) ) {} static inline tag(arpk(Nq, Sq, Tbase, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(Tbase) ) { tag(arpk(Nq, Sq, Tbase, Tbase)) ret; return ret; } // Rec forall( [Nq], Sq & | sized(Sq), [N], S & | sized(S), recq &, recr &, Tbase & | { tag(recr) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(recq) ); } ) static inline tag(arpk(N, S, recr, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(arpk(N, S, recq, Tbase)) ) {} static inline tag(arpk(N, S, recr, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(arpk(N, S, recq, Tbase)) ) { tag(arpk(N, S, recr, Tbase)) ret; return ret; } // Wrapper
• ## libcfa/src/exception.c

 r5a46e09 #include "stdhdr/assert.h" #include "virtual.h" #if defined( __ARM_ARCH ) #warning FIX ME: temporary hack to keep ARM build working #ifndef _URC_FATAL_PHASE1_ERROR #define _URC_FATAL_PHASE1_ERROR 3 #endif // ! _URC_FATAL_PHASE1_ERROR #ifndef _URC_FATAL_PHASE2_ERROR #define _URC_FATAL_PHASE2_ERROR 2 #endif // ! _URC_FATAL_PHASE2_ERROR #endif // __ARM_ARCH #include "lsda.h" // the whole stack. #if defined( __x86_64 ) || defined( __i386 ) // We did not simply reach the end of the stack without finding a handler. This is an error. if ( ret != _URC_END_OF_STACK ) { #else // defined( __ARM_ARCH ) // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack. // This workaround tries to keep default exception handling working. if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) { #endif printf("UNWIND ERROR %d after raise exception\n", ret); abort(); } #if defined( __x86_64 ) || defined( __i386 ) #if defined( __x86_64 ) || defined( __i386 ) || defined( __ARM_ARCH ) // This is our personality routine. For every stack frame annotated with // ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding. _Unwind_GetCFA(unwind_context) + 24; #                               elif defined( __ARM_ARCH ) #                                   warning FIX ME: check if anything needed for ARM 42; _Unwind_GetCFA(unwind_context) + 40; #                               endif int (*matcher)(exception_t *) = *(int(**)(exception_t *))match_pos; // HEADER ".LFECFA1:\n" #if defined( __x86_64 ) || defined( __i386 ) "       .globl  __gcfa_personality_v0\n" #else // defined( __ARM_ARCH ) "       .global __gcfa_personality_v0\n" #endif "       .section        .gcc_except_table,\"a\",@progbits\n" // TABLE HEADER (important field is the BODY length at the end) // No clue what this does specifically "       .section        .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n" #if defined( __x86_64 ) || defined( __i386 ) "       .align 8\n" #else // defined( __ARM_ARCH ) "       .align 3\n" #endif "       .type CFA.ref.__gcfa_personality_v0, @object\n" "       .size CFA.ref.__gcfa_personality_v0, 8\n" #if defined( __x86_64 ) "       .quad __gcfa_personality_v0\n" #else // then __i386 #elif defined( __i386 ) "       .long __gcfa_personality_v0\n" #else // defined( __ARM_ARCH ) "       .xword __gcfa_personality_v0\n" #endif ); // HEADER ".LFECFA1:\n" #if defined( __x86_64 ) || defined( __i386 ) "       .globl  __gcfa_personality_v0\n" #else // defined( __ARM_ARCH ) "       .global __gcfa_personality_v0\n" #endif "       .section        .gcc_except_table,\"a\",@progbits\n" // TABLE HEADER (important field is the BODY length at the end) #pragma GCC pop_options #elif defined( __ARM_ARCH ) _Unwind_Reason_Code __gcfa_personality_v0( int version, _Unwind_Action actions, unsigned long long exception_class, struct _Unwind_Exception * unwind_exception, struct _Unwind_Context * unwind_context) { return _URC_CONTINUE_UNWIND; } __attribute__((noinline)) void __cfaehm_try_terminate(void (*try_block)(), void (*catch_block)(int index, exception_t * except), __attribute__((unused)) int (*match_block)(exception_t * except)) { } #else #error unsupported hardware architecture #endif // __x86_64 || __i386 #endif // __x86_64 || __i386 || __ARM_ARCH
• ## libcfa/src/interpose.cfa

 r5a46e09 extern "C" { void __cfaabi_interpose_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_CORE ) )); void __cfaabi_interpose_startup( void ) { const char *version = 0p;

• ## src/AST/Convert.cpp

 r5a46e09 } virtual void visit( const DimensionExpr * old ) override final { // DimensionExpr gets desugared away in Validate. // As long as new-AST passes don't use it, this cheap-cheerful error // detection helps ensure that these occurrences have been compiled // away, as expected.  To move the DimensionExpr boundary downstream // or move the new-AST translation boundary upstream, implement // DimensionExpr in the new AST and implement a conversion. (void) old; assert(false && "DimensionExpr should not be present at new-AST boundary"); } virtual void visit( const AsmExpr * old ) override final { this->node = visitBaseExpr( old,
• ## src/AST/Decl.cpp

 r5a46e09 const char * TypeDecl::typeString() const { static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized array length type" }; static const char * kindNames[] = { "sized data type", "sized data type", "sized object type", "sized function type", "sized tuple type", "sized length value" }; static_assert( sizeof(kindNames) / sizeof(kindNames[0]) == TypeDecl::NUMBER_OF_KINDS, "typeString: kindNames is out of sync." ); assertf( kind < TypeDecl::NUMBER_OF_KINDS, "TypeDecl kind is out of bounds." );
• ## src/AST/Decl.hpp

 r5a46e09 class TypeDecl final : public NamedTypeDecl { public: enum Kind { Dtype, DStype, Otype, Ftype, Ttype, ALtype, NUMBER_OF_KINDS }; enum Kind { Dtype, DStype, Otype, Ftype, Ttype, Dimension, NUMBER_OF_KINDS }; Kind kind;
• ## src/AST/Pass.impl.hpp

 r5a46e09 guard_symtab guard { *this }; // implicit add __func__ identifier as specified in the C manual 6.4.2.2 static ast::ptr< ast::ObjectDecl > func{ new ast::ObjectDecl{ static ast::ptr< ast::ObjectDecl > func{ new ast::ObjectDecl{ CodeLocation{}, "__func__", new ast::ArrayType{ VISIT({ guard_symtab guard { * this }; maybe_accept( node, &StructDecl::params  ); maybe_accept( node, &StructDecl::members ); maybe_accept( node, &StructDecl::params     ); maybe_accept( node, &StructDecl::members    ); maybe_accept( node, &StructDecl::attributes ); }) VISIT({ guard_symtab guard { * this }; maybe_accept( node, &UnionDecl::params  ); maybe_accept( node, &UnionDecl::members ); maybe_accept( node, &UnionDecl::params     ); maybe_accept( node, &UnionDecl::members    ); maybe_accept( node, &UnionDecl::attributes ); }) VISIT( // unlike structs, traits, and unions, enums inject their members into the global scope maybe_accept( node, &EnumDecl::params  ); maybe_accept( node, &EnumDecl::members ); maybe_accept( node, &EnumDecl::params     ); maybe_accept( node, &EnumDecl::members    ); maybe_accept( node, &EnumDecl::attributes ); ) VISIT({ guard_symtab guard { *this }; maybe_accept( node, &TraitDecl::params  ); maybe_accept( node, &TraitDecl::members ); maybe_accept( node, &TraitDecl::params     ); maybe_accept( node, &TraitDecl::members    ); maybe_accept( node, &TraitDecl::attributes ); })
• ## src/CodeGen/CodeGenerator.cc

 r5a46e09 output << nameExpr->get_name(); } // if } void CodeGenerator::postvisit( DimensionExpr * dimensionExpr ) { extension( dimensionExpr ); output << "/*non-type*/" << dimensionExpr->get_name(); }
• ## src/CodeGen/CodeGenerator.h

 r5a46e09 void postvisit( TupleIndexExpr * tupleExpr ); void postvisit( TypeExpr *typeExpr ); void postvisit( DimensionExpr *dimensionExpr ); void postvisit( AsmExpr * ); void postvisit( StmtExpr * );
• ## src/Common/PassVisitor.h

 r5a46e09 virtual void visit( TypeExpr * typeExpr ) override final; virtual void visit( const TypeExpr * typeExpr ) override final; virtual void visit( DimensionExpr * dimensionExpr ) override final; virtual void visit( const DimensionExpr * dimensionExpr ) override final; virtual void visit( AsmExpr * asmExpr ) override final; virtual void visit( const AsmExpr * asmExpr ) override final; virtual Expression * mutate( CommaExpr * commaExpr ) override final; virtual Expression * mutate( TypeExpr * typeExpr ) override final; virtual Expression * mutate( DimensionExpr * dimensionExpr ) override final; virtual Expression * mutate( AsmExpr * asmExpr ) override final; virtual Expression * mutate( ImplicitCopyCtorExpr * impCpCtorExpr ) override final; class WithIndexer { protected: WithIndexer() {} WithIndexer( bool trackIdentifiers = true ) : indexer(trackIdentifiers) {} ~WithIndexer() {}
• ## src/Common/PassVisitor.impl.h

 r5a46e09 maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeMutate_impl( node->parameters, *this ); maybeMutate_impl( node->members   , *this ); maybeMutate_impl( node->attributes, *this ); } maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeMutate_impl( node->parameters, *this ); maybeMutate_impl( node->members   , *this ); maybeMutate_impl( node->attributes, *this ); } maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); VISIT_END( node ); maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); VISIT_END( node ); maybeMutate_impl( node->parameters, *this ); maybeMutate_impl( node->members   , *this ); maybeMutate_impl( node->attributes, *this ); MUTATE_END( Declaration, node ); maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeAccept_impl( node->parameters, *this ); maybeAccept_impl( node->members   , *this ); maybeAccept_impl( node->attributes, *this ); } maybeMutate_impl( node->parameters, *this ); maybeMutate_impl( node->members   , *this ); maybeMutate_impl( node->attributes, *this ); } //-------------------------------------------------------------------------- // DimensionExpr template< typename pass_type > void PassVisitor< pass_type >::visit( DimensionExpr * node ) { VISIT_START( node ); indexerScopedAccept( node->result, *this ); VISIT_END( node ); } template< typename pass_type > void PassVisitor< pass_type >::visit( const DimensionExpr * node ) { VISIT_START( node ); indexerScopedAccept( node->result, *this ); VISIT_END( node ); } template< typename pass_type > Expression * PassVisitor< pass_type >::mutate( DimensionExpr * node ) { MUTATE_START( node ); indexerScopedMutate( node->env   , *this ); indexerScopedMutate( node->result, *this ); MUTATE_END( Expression, node ); } //-------------------------------------------------------------------------- // AsmExpr template< typename pass_type > maybeAccept_impl( node->forall, *this ); // xxx - should PointerType visit/mutate dimension? maybeAccept_impl( node->dimension, *this ); maybeAccept_impl( node->base, *this ); maybeAccept_impl( node->forall, *this ); // xxx - should PointerType visit/mutate dimension? maybeAccept_impl( node->dimension, *this ); maybeAccept_impl( node->base, *this ); maybeMutate_impl( node->forall, *this ); // xxx - should PointerType visit/mutate dimension? maybeMutate_impl( node->dimension, *this ); maybeMutate_impl( node->base, *this ); //-------------------------------------------------------------------------- // Attribute // Constant template< typename pass_type > void PassVisitor< pass_type >::visit( Constant * node ) {
• ## src/InitTweak/InitTweak.cc

 r5a46e09 // Created On       : Fri May 13 11:26:36 2016 // Last Modified By : Peter A. Buhr // Last Modified On : Fri Dec 13 23:15:52 2019 // Update Count     : 8 // Last Modified On : Wed Jun 16 20:57:22 2021 // Update Count     : 18 // void addDataSectonAttribute( ObjectDecl * objDecl ) { objDecl->attributes.push_back(new Attribute("section", { new ConstantExpr( Constant::from_string(".data#") ), })); new ConstantExpr( Constant::from_string(".data" #if defined( __x86_64 ) || defined( __i386 ) // assembler comment to prevent assembler warning message "#" #else // defined( __ARM_ARCH ) "//" #endif ))})); } void addDataSectionAttribute( ast::ObjectDecl * objDecl ) { objDecl->attributes.push_back(new ast::Attribute("section", { ast::ConstantExpr::from_string(objDecl->location, ".data#"), })); ast::ConstantExpr::from_string(objDecl->location, ".data" #if defined( __x86_64 ) || defined( __i386 ) // assembler comment to prevent assembler warning message "#" #else // defined( __ARM_ARCH ) "//" #endif )})); }
• ## src/Parser/DeclarationNode.cc

 r5a46e09 if ( variable.tyClass != TypeDecl::NUMBER_OF_KINDS ) { // otype is internally converted to dtype + otype parameters static const TypeDecl::Kind kindMap[] = { TypeDecl::Dtype, TypeDecl::DStype, TypeDecl::Dtype, TypeDecl::Ftype, TypeDecl::Ttype, TypeDecl::Dtype }; static const TypeDecl::Kind kindMap[] = { TypeDecl::Dtype, TypeDecl::Dtype, TypeDecl::Dtype, TypeDecl::Ftype, TypeDecl::Ttype, TypeDecl::Dimension }; static_assert( sizeof(kindMap) / sizeof(kindMap[0]) == TypeDecl::NUMBER_OF_KINDS, "DeclarationNode::build: kindMap is out of sync." ); assertf( variable.tyClass < sizeof(kindMap)/sizeof(kindMap[0]), "Variable's tyClass is out of bounds." ); TypeDecl * ret = new TypeDecl( *name, Type::StorageClasses(), nullptr, kindMap[ variable.tyClass ], variable.tyClass == TypeDecl::Otype || variable.tyClass == TypeDecl::ALtype, variable.initializer ? variable.initializer->buildType() : nullptr ); TypeDecl * ret = new TypeDecl( *name, Type::StorageClasses(), nullptr, kindMap[ variable.tyClass ], variable.tyClass == TypeDecl::Otype || variable.tyClass == TypeDecl::DStype, variable.initializer ? variable.initializer->buildType() : nullptr ); buildList( variable.assertions, ret->get_assertions() ); return ret;
• ## src/Parser/ExpressionNode.cc

 r5a46e09 } // build_varref DimensionExpr * build_dimensionref( const string * name ) { DimensionExpr * expr = new DimensionExpr( *name ); delete name; return expr; } // build_varref // TODO: get rid of this and OperKinds and reuse code from OperatorTable static const char * OperName[] = {                                              // must harmonize with OperKinds
• ## src/Parser/ParseNode.h

 r5a46e09 NameExpr * build_varref( const std::string * name ); DimensionExpr * build_dimensionref( const std::string * name ); Expression * build_cast( DeclarationNode * decl_node, ExpressionNode * expr_node );
• ## src/Parser/TypedefTable.cc

 r5a46e09 // Created On       : Sat May 16 15:20:13 2015 // Last Modified By : Peter A. Buhr // Last Modified On : Mon Mar 15 20:56:47 2021 // Update Count     : 260 // Last Modified On : Wed May 19 08:30:14 2021 // Update Count     : 262 // switch ( kind ) { case IDENTIFIER: return "identifier"; case TYPEDIMname: return "typedim"; case TYPEDEFname: return "typedef"; case TYPEGENname: return "typegen";
• ## src/Parser/lex.ll

 r5a46e09 * Created On       : Sat Sep 22 08:58:10 2001 * Last Modified By : Peter A. Buhr * Last Modified On : Thu Apr  1 13:22:31 2021 * Update Count     : 754 * Last Modified On : Sun Jun 20 18:41:09 2021 * Update Count     : 759 */ hex_constant {hex_prefix}{hex_digits}{integer_suffix_opt} // GCC: D (double) and iI (imaginary) suffixes, and DL (long double) // GCC: floating D (double), imaginary iI, and decimal floating DF, DD, DL exponent "_"?[eE]"_"?[+-]?{decimal_digits} floating_size 16|32|32x|64|64x|80|128|128x floating_length ([fFdDlLwWqQ]|[fF]{floating_size}) floating_suffix ({floating_length}?[iI]?)|([iI]{floating_length}) floating_suffix_opt ("_"?({floating_suffix}|"DL"))? decimal_floating_suffix [dD][fFdDlL] floating_suffix_opt ("_"?({floating_suffix}|{decimal_floating_suffix}))? decimal_digits ({decimal})|({decimal}({decimal}|"_")*{decimal}) floating_decimal {decimal_digits}"."{exponent}?{floating_suffix_opt} continue                { KEYWORD_RETURN(CONTINUE); } coroutine               { KEYWORD_RETURN(COROUTINE); }                  // CFA _Decimal32              { KEYWORD_RETURN(DECIMAL32); }                  // GCC _Decimal64              { KEYWORD_RETURN(DECIMAL64); }                  // GCC _Decimal128             { KEYWORD_RETURN(DECIMAL128); }                 // GCC default                 { KEYWORD_RETURN(DEFAULT); } disable                 { KEYWORD_RETURN(DISABLE); }                    // CFA
• ## src/Parser/parser.yy

 r5a46e09 // Created On       : Sat Sep  1 20:22:55 2001 // Last Modified By : Peter A. Buhr // Last Modified On : Mon Apr 26 18:41:54 2021 // Update Count     : 4990 // Last Modified On : Tue Jun 29 09:12:47 2021 // Update Count     : 5027 // // The root language for this grammar is ANSI99/11 C. All of ANSI99/11 is parsed, except for: // // 1. designation with '=' (use ':' instead) // // Most of the syntactic extensions from ANSI90 to ANSI11 C are marked with the comment "C99/C11". This grammar also has // two levels of extensions. The first extensions cover most of the GCC C extensions, except for: // // 1. designation with and without '=' (use ':' instead) // // All of the syntactic extensions for GCC C are marked with the comment "GCC". The second extensions are for Cforall // (CFA), which fixes several of C's outstanding problems and extends C with many modern language concepts. All of the // syntactic extensions for CFA C are marked with the comment "CFA". As noted above, there is one unreconcileable // parsing problem between C99 and CFA with respect to designators; this is discussed in detail before the "designation" // grammar rule. //   designation with '=' (use ':' instead) // // This incompatibility is discussed in detail before the "designation" grammar rule.  Most of the syntactic extensions // from ANSI90 to ANSI11 C are marked with the comment "C99/C11". // This grammar also has two levels of extensions. The first extensions cover most of the GCC C extensions All of the // syntactic extensions for GCC C are marked with the comment "GCC". The second extensions are for Cforall (CFA), which // fixes several of C's outstanding problems and extends C with many modern language concepts. All of the syntactic // extensions for CFA C are marked with the comment "CFA". %{ %token INT128 UINT128 uuFLOAT80 uuFLOAT128                              // GCC %token uFLOAT16 uFLOAT32 uFLOAT32X uFLOAT64 uFLOAT64X uFLOAT128 // GCC %token DECIMAL32 DECIMAL64 DECIMAL128                                   // GCC %token ZERO_T ONE_T                                                                             // CFA %token SIZEOF TYPEOF VALIST AUTO_TYPE                                   // GCC // names and constants: lexer differentiates between identifier and typedef names %token IDENTIFIER          QUOTED_IDENTIFIER       TYPEDEFname             TYPEGENname %token IDENTIFIER          QUOTED_IDENTIFIER       TYPEDIMname             TYPEDEFname             TYPEGENname %token TIMEOUT                     WOR                                     CATCH                   RECOVER                 CATCHRESUME             FIXUP           FINALLY         // CFA %token INTEGERconstant     CHARACTERconstant       STRINGliteral | quasi_keyword { $$= new ExpressionNode( build_varref( 1 ) ); } | TYPEDIMname // CFA, generic length argument // {$$ = new ExpressionNode( new TypeExpr( maybeMoveBuildType( DeclarationNode::newFromTypedef( $1 ) ) ) ); } // { $$= new ExpressionNode( build_varref( 1 ) ); } {$$ = new ExpressionNode( build_dimensionref($1 ) ); } | tuple | '(' comma_expression ')' postfix_expression: primary_expression | postfix_expression '[' assignment_expression ',' comma_expression ']' // { $$= new ExpressionNode( build_binary_val( OperKinds::Index, 1, new ExpressionNode( build_binary_val( OperKinds::Index, 3, 5 ) ) ) ); } { SemanticError( yylloc, "New array subscript is currently unimplemented." );$$ = nullptr; } | postfix_expression '[' assignment_expression ',' tuple_expression_list ']' // Historic, transitional: Disallow commas in subscripts. // Switching to this behaviour may help check if a C compatibilty case uses comma-exprs in subscripts. // { SemanticError( yylloc, "New array subscript is currently unimplemented." ); $$= nullptr; } // Current: Commas in subscripts make tuples. {$$ = new ExpressionNode( build_binary_val( OperKinds::Index, $1, new ExpressionNode( build_tuple( (ExpressionNode *)($3->set_last( $5 ) ) )) ) ); } | postfix_expression '[' assignment_expression ']' // CFA, comma_expression disallowed in this context because it results in a common user error: subscripting a | uFLOAT128 { $$= DeclarationNode::newBasicType( DeclarationNode::uFloat128 ); } | DECIMAL32 { SemanticError( yylloc, "_Decimal32 is currently unimplemented." );$$ = nullptr; } | DECIMAL64 { SemanticError( yylloc, "_Decimal64 is currently unimplemented." ); $$= nullptr; } | DECIMAL128 { SemanticError( yylloc, "_Decimal128 is currently unimplemented." );$$ = nullptr; } | COMPLEX // C99 { $$= DeclarationNode::newComplexType( DeclarationNode::Complex ); } // empty {$$ = nullptr; } | vtable; | vtable ; | '[' identifier_or_type_name ']' { typedefTable.addToScope( *$2, TYPEDEFname, "9" ); $$= DeclarationNode::newTypeParam( TypeDecl::ALtype, 2 ); typedefTable.addToScope( *2, TYPEDIMname, "9" );$$ = DeclarationNode::newTypeParam( TypeDecl::Dimension, $2 ); } // | type_specifier identifier_parameter_declarator | '*' { $$= TypeDecl::DStype; } // dtype + sized // | '(' '*' ')' // {$$ = TypeDecl::Ftype; } | ELLIPSIS { $$= TypeDecl::Ttype; } {$$ = new ExpressionNode( new TypeExpr( maybeMoveBuildType($1 ) ) ); } | assignment_expression { SemanticError( yylloc, toString("Expression generic parameters are currently unimplemented: ", $1->build()) ); $$= nullptr; } | type_list ',' type {$$ = (ExpressionNode *)($1->set_last( new ExpressionNode( new TypeExpr( maybeMoveBuildType( $3 ) ) ) )); } | type_list ',' assignment_expression { SemanticError( yylloc, toString("Expression generic parameters are currently unimplemented: ",$3->build()) ); $$= nullptr; } // {$$ = (ExpressionNode *)( $1->set_last($3 )); } {  = (ExpressionNode *)( $1->set_last($3 )); } ;
• ## src/SymTab/Indexer.cc

 r5a46e09 } Indexer::Indexer() Indexer::Indexer( bool trackIdentifiers ) : idTable(), typeTable(), structTable(), enumTable(), unionTable(), traitTable(), prevScope(), scope( 0 ), repScope( 0 ) { ++* stats().count; } prevScope(), scope( 0 ), repScope( 0 ), trackIdentifiers( trackIdentifiers ) { ++* stats().count; } Indexer::~Indexer() { void Indexer::lookupId( const std::string & id, std::list< IdData > &out ) const { assert( trackIdentifiers ); ++* stats().lookup_calls; if ( ! idTable ) return; const Declaration * deleteStmt ) { ++* stats().add_calls; if ( ! trackIdentifiers ) return; const std::string &name = decl->name; if ( name == "" ) return;
• ## src/SymTab/Indexer.h

 r5a46e09 class Indexer : public std::enable_shared_from_this { public: explicit Indexer(); explicit Indexer( bool trackIdentifiers = true ); virtual ~Indexer(); /// returns true if there exists a declaration with C linkage and the given name with a different mangled name bool hasIncompatibleCDecl( const std::string & id, const std::string & mangleName ) const; bool trackIdentifiers; }; } // namespace SymTab
• ## src/SymTab/Validate.cc

 r5a46e09 struct FixQualifiedTypes final : public WithIndexer { FixQualifiedTypes() : WithIndexer(false) {} Type * postmutate( QualifiedType * ); }; }; /// Does early resolution on the expressions that give enumeration constants their values struct ResolveEnumInitializers final : public WithIndexer, public WithGuards, public WithVisitorRef, public WithShortCircuiting { ResolveEnumInitializers( const Indexer * indexer ); void postvisit( EnumDecl * enumDecl ); private: const Indexer * local_indexer;