# Changeset 9c35431

Ignore:
Timestamp:
Nov 29, 2017, 2:00:32 PM (4 years ago)
Branches:
arm-eh, cleanup-dtors, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
c13e8dc8
Parents:
c6e2c18 (diff), 389528b0 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

Files:
1 deleted
46 edited

Unmodified
Removed
• ## .gitignore

 rc6e2c18 include share *.class # src executables, for lib and bin
• ## doc/proposals/concurrency/.gitignore

 rc6e2c18 build/*.ind build/*.ist build/*.lof build/*.log build/*.lol build/*.lot build/*.out build/*.ps
• ## doc/proposals/concurrency/Makefile

 rc6e2c18 style/cfa-format \ annex/glossary \ text/frontpgs \ text/intro \ text/basics \

• ## doc/proposals/concurrency/annex/local.bib

 rc6e2c18 keywords        = {Intel, TBB}, title   = {Intel Thread Building Blocks}, note            = "\url{https://www.threadingbuildingblocks.org/}" } title   = {TwoHardThings}, author  = {Martin Fowler}, address = {https://martinfowler.com/bliki/TwoHardThings.html}, howpublished= "\url{https://martinfowler.com/bliki/TwoHardThings.html}", year            = 2009 } } @misc{affinityLinux, @book{Herlihy93, title={Transactional memory: Architectural support for lock-free data structures}, author={Herlihy, Maurice and Moss, J Eliot B}, volume={21}, number={2}, year={1993}, publisher={ACM} } @manual{affinityLinux, title           = "{Linux man page - sched\_setaffinity(2)}" } @misc{affinityWindows, @manual{affinityWindows, title           = "{Windows (vs.85) - SetThreadAffinityMask function}" } @misc{affinityFreebsd, @manual{switchToWindows, title           = "{Windows (vs.85) - SwitchToFiber function}" } @manual{affinityFreebsd, title           = "{FreeBSD General Commands Manual - CPUSET(1)}" } @misc{affinityNetbsd, @manual{affinityNetbsd, title           = "{NetBSD Library Functions Manual - AFFINITY(3)}" } @misc{affinityMacosx, @manual{affinityMacosx, title           = "{Affinity API Release Notes for OS X v10.5}" } @misc{NodeJs, title           = "{Node.js}", howpublished= "\url{https://nodejs.org/en/}", } @misc{SpringMVC, title           = "{Spring Web MVC}", howpublished= "\url{https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html}", } @misc{Django, title           = "{Django}", howpublished= "\url{https://www.djangoproject.com/}", }
• ## doc/proposals/concurrency/figures/ext_monitor.fig

 rc6e2c18 5250 3150 5250 2400 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 3150 3150 3750 3150 3750 2850 5850 2850 5850 1650 3150 3150 3750 3150 3750 2850 5700 2850 5700 1650 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 5850 2850 6150 3000 5700 2850 6150 3000 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5 5100 1800 5400 1800 5400 2400 5100 2400 5100 1800 4 1 -1 0 0 0 12 0.0000 2 135 735 5100 3975 variables\001 4 0 0 50 -1 0 11 0.0000 2 165 855 4275 3150 Acceptables\001 4 0 0 50 -1 0 11 0.0000 2 120 165 5775 2700 W\001 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 2400 X\001 4 0 0 50 -1 0 11 0.0000 2 120 105 5775 2100 Z\001 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 1800 Y\001
• ## doc/proposals/concurrency/figures/int_monitor.fig

 rc6e2c18 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 1200 2850 125 125 1200 2850 1082 2809 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 900 2850 125 125 900 2850 782 2809 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 6225 4650 105 105 6225 4650 6330 4755 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3150 4650 80 80 3150 4650 3230 4730 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4575 4650 105 105 4575 4650 4680 4755 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 4650 105 105 6000 4650 6105 4755 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3900 4650 80 80 3900 4650 3980 4730 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 3900 1950 4200 2100 4 1 -1 0 0 0 12 0.0000 2 165 420 4050 1050 entry\001 4 0 0 50 -1 0 11 0.0000 2 120 705 600 2325 Condition\001 4 0 -1 0 0 0 12 0.0000 2 180 930 6450 4725 routine ptrs\001 4 0 -1 0 0 0 12 0.0000 2 135 1050 3300 4725 active thread\001 4 0 -1 0 0 0 12 0.0000 2 135 1215 4725 4725 blocked thread\001 4 0 -1 0 0 0 12 0.0000 2 135 1215 6150 4725 blocked thread\001 4 0 -1 0 0 0 12 0.0000 2 135 1050 4050 4725 active thread\001
• ## doc/proposals/concurrency/style/cfa-format.tex

 rc6e2c18 language = C, style=defaultStyle, captionpos=b, #1 } language = CFA, style=cfaStyle, captionpos=b, #1 } language = pseudo, style=pseudoStyle, captionpos=b, #1 } language = c++, style=defaultStyle, captionpos=b, #1 } language = c++, style=defaultStyle, captionpos=b, #1 } language = java, style=defaultStyle, captionpos=b, #1 } language = scala, style=defaultStyle, captionpos=b, #1 } language = sml, style=defaultStyle, captionpos=b, #1 } language = D, style=defaultStyle, captionpos=b, #1 } language = rust, style=defaultStyle, captionpos=b, #1 } language = Golang, style=defaultStyle, captionpos=b, #1 }

• ## doc/proposals/concurrency/text/cforall.tex

 rc6e2c18 % ====================================================================== % ====================================================================== \chapter{Cforall Overview} \chapter{\CFA Overview} % ====================================================================== % ====================================================================== The following is a quick introduction to the \CFA language, specifically tailored to the features needed to support concurrency. \CFA is a extension of ISO-C and therefore supports all of the same paradigms as C. It is a non-object oriented system language, meaning most of the major abstractions have either no runtime overhead or can be opt-out easily. Like C, the basics of \CFA revolve around structures and routines, which are thin abstractions over machine code. The vast majority of the code produced by the \CFA translator respects memory-layouts and calling-conventions laid out by C. Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (e.g., this), it does have some notion of objects\footnote{C defines the term objects as : region of data storage in the execution environment, the contents of which can represent values''\cite[3.15]{C11}}, most importantly construction and destruction of objects. Most of the following code examples can be found on the \CFA website \cite{www-cfa} \CFA is an extension of ISO-C and therefore supports all of the same paradigms as C. It is a non-object-oriented system-language, meaning most of the major abstractions have either no runtime overhead or can be opt-out easily. Like C, the basics of \CFA revolve around structures and routines, which are thin abstractions over machine code. The vast majority of the code produced by the \CFA translator respects memory-layouts and calling-conventions laid out by C. Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (e.g., {\tt this}), it does have some notion of objects\footnote{C defines the term objects as : region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}}, most importantly construction and destruction of objects. Most of the following code examples can be found on the \CFA website~\cite{www-cfa} % ====================================================================== \section{References} Like \CC, \CFA introduces rebindable references providing multiple dereferecing as an alternative to pointers. In regards to concurrency, the semantic difference between pointers and references are not particularly relevant, but since this document uses mostly references, here is a quick overview of the semantics: Like \CC, \CFA introduces rebind-able references providing multiple dereferencing as an alternative to pointers. In regards to concurrency, the semantic difference between pointers and references are not particularly relevant, but since this document uses mostly references, here is a quick overview of the semantics: \begin{cfacode} int x, *p1 = &x, **p2 = &p1, ***p3 = &p2, *p3   = ...;                                            //change p2 int y, z, & ar[3] = {x, y, z};          //initialize array of references typeof( ar[1]) p;                                       //is int, i.e., the type of referenced object typeof(&ar[1]) q;                                       //is int &, i.e., the type of reference sizeof( ar[1]) == sizeof(int);          //is true, i.e., the size of referenced object sizeof(&ar[1]) == sizeof(int *);        //is true, i.e., the size of a reference typeof( ar[1]) p;                                       //is int, referenced object type typeof(&ar[1]) q;                                       //is int &, reference type sizeof( ar[1]) == sizeof(int);          //is true, referenced object size sizeof(&ar[1]) == sizeof(int *);        //is true, reference size \end{cfacode} The important take away from this code example is that references offer a handle to an object, much like pointers, but which is automatically dereferenced for convinience. The important take away from this code example is that a reference offers a handle to an object, much like a pointer, but which is automatically dereferenced for convenience. % ====================================================================== \section{Overloading} Another important feature of \CFA is function overloading as in Java and \CC, where routines with the same name are selected based on the number and type of the arguments. As well, \CFA uses the return type as part of the selection criteria, as in Ada\cite{Ada}. For routines with multiple parameters and returns, the selection is complex. Another important feature of \CFA is function overloading as in Java and \CC, where routines with the same name are selected based on the number and type of the arguments. As well, \CFA uses the return type as part of the selection criteria, as in Ada~\cite{Ada}. For routines with multiple parameters and returns, the selection is complex. \begin{cfacode} //selection based on type and number of parameters This feature is particularly important for concurrency since the runtime system relies on creating different types to represent concurrency objects. Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions that prevent name clashes. As seen in chapter \ref{basics}, routine \code{main} is an example that benefits from overloading. % ====================================================================== \section{Operators} Overloading also extends to operators. The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation occur, e.g.: While concurrency does not use operator overloading directly, this feature is more important as an introduction for the syntax of constructors. % ====================================================================== \section{Constructors/Destructors} Object life-time is often a challenge in concurrency. \CFA uses the approach of giving concurrent meaning to object life-time as a mean of synchronization and/or mutual exclusion. Since \CFA relies heavily on the life time of objects, constructors and destructors are a core feature required for concurrency and parallelism. \CFA uses the following syntax for constructors and destructors : } int main() { S x = {10}, y = {100};          //implict calls: ?{}(x, 10), ?{}(y, 100) S x = {10}, y = {100};          //implicit calls: ?{}(x, 10), ?{}(y, 100) ...                                                     //use x and y ^x{};  ^y{};                            //explicit calls to de-initialize x{20};  y{200};                         //explicit calls to reinitialize ...                                                     //reuse x and y }                                                               //implict calls: ^?{}(y), ^?{}(x) }                                                               //implicit calls: ^?{}(y), ^?{}(x) \end{cfacode} The language guarantees that every object and all their fields are constructed. Like \CC, construction of an object is automatically done on allocation and destruction of the object is done on deallocation. Allocation and deallocation can occur on the stack or on the heap. delete(s);                              //deallocation, call destructor \end{cfacode} Note that like \CC, \CFA introduces \code{new} and \code{delete}, which behave like \code{malloc} and \code{free} in addition to constructing and destructing objects, after calling \code{malloc} and before calling \code{free} respectively. Note that like \CC, \CFA introduces \code{new} and \code{delete}, which behave like \code{malloc} and \code{free} in addition to constructing and destructing objects, after calling \code{malloc} and before calling \code{free}, respectively. % ====================================================================== \section{Parametric Polymorphism} Routines in \CFA can also be reused for multiple types. This capability is done using the \code{forall} clause which gives \CFA its name. \code{forall} clauses allow separately compiled routines to support generic usage over multiple types. For example, the following sum function works for any type that supports construction from 0 and addition : Routines in \CFA can also be reused for multiple types. This capability is done using the \code{forall} clause, which gives \CFA its name. \code{forall} clauses allow separately compiled routines to support generic usage over multiple types. For example, the following sum function works for any type that supports construction from 0 and addition : \begin{cfacode} //constraint type, 0 and + \end{cfacode} Note that the type use for assertions can be either an \code{otype} or a \code{dtype}. Types declares as \code{otype} refer to complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator. Using \code{dtype} on the other hand has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable. % ====================================================================== \section{with Clause/Statement} Since \CFA lacks the concept of a receiver, certain functions end-up needing to repeat variable names often. To remove this inconvenience, \CFA provides the \code{with} statement, which opens an aggregate scope making its fields directly accessible (like Pascal). struct S { int i, j; }; int mem(S & this) with (this)           //with clause i = 1;                                          //this->i j = 2;                                          //this->j i = 1;                                                  //this->i j = 2;                                                  //this->j } int foo() { struct S1 { ... } s1; struct S2 { ... } s2; with (s1)                                       //with statement with (s1)                                               //with statement { //access fields of s1 //without qualification //access fields of s1 without qualification with (s2)                                       //nesting { //access fields of s1 and s2 //without qualification //access fields of s1 and s2 without qualification } } with (s1, s2)                           //scopes open in parallel with (s1, s2)                                   //scopes open in parallel { //access fields of s1 and s2 //without qualification //access fields of s1 and s2 without qualification } }
• ## doc/proposals/concurrency/text/concurrency.tex

 rc6e2c18 % ====================================================================== % ====================================================================== Several tool can be used to solve concurrency challenges. Since many of these challenges appear with the use of mutable shared-state, some languages and libraries simply disallow mutable shared-state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}). In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (channels\cite{CSP,Go} for example). However, in languages that use routine calls as their core abstraction-mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine call). This distinction in turn means that, in order to be effective, programmers need to learn two sets of designs patterns. While this distinction can be hidden away in library code, effective use of the librairy still has to take both paradigms into account. Approaches based on shared memory are more closely related to non-concurrent paradigms since they often rely on basic constructs like routine calls and shared objects. At the lowest level, concurrent paradigms are implemented as atomic operations and locks. Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. However, for productivity reasons it is desireable to have a higher-level construct be the core concurrency paradigm~\cite{HPP:Study}. An approach that is worth mentioning because it is gaining in popularity is transactionnal memory~\cite{Dice10}[Check citation]. While this approach is even pursued by system languages like \CC\cite{Cpp-Transactions}, the performance and feature set is currently too restrictive to be the main concurrency paradigm for systems language, which is why it was rejected as the core paradigm for concurrency in \CFA. Several tool can be used to solve concurrency challenges. Since many of these challenges appear with the use of mutable shared-state, some languages and libraries simply disallow mutable shared-state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}). In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (channels~\cite{CSP,Go} for example). However, in languages that use routine calls as their core abstraction-mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine call). This distinction in turn means that, in order to be effective, programmers need to learn two sets of designs patterns. While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account. Approaches based on shared memory are more closely related to non-concurrent paradigms since they often rely on basic constructs like routine calls and shared objects. At the lowest level, concurrent paradigms are implemented as atomic operations and locks. Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. However, for productivity reasons it is desirable to have a higher-level construct be the core concurrency paradigm~\cite{HPP:Study}. An approach that is worth mentioning because it is gaining in popularity is transactional memory~\cite{Herlihy93}. While this approach is even pursued by system languages like \CC~\cite{Cpp-Transactions}, the performance and feature set is currently too restrictive to be the main concurrency paradigm for systems language, which is why it was rejected as the core paradigm for concurrency in \CFA. One of the most natural, elegant, and efficient mechanisms for synchronization and communication, especially for shared-memory systems, is the \emph{monitor}. Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}. Many programming languages---e.g., Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs. In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors. For these reasons, this project proposes monitors as the core concurrency-construct. \section{Basics} Non-determinism requires concurrent systems to offer support for mutual-exclusion and synchronisation. Mutual-exclusion is the concept that only a fixed number of threads can access a critical section at any given time, where a critical section is a group of instructions on an associated portion of data that requires the restricted access. On the other hand, synchronization enforces relative ordering of execution and synchronization tools provide numerous mechanisms to establish timing relationships among threads. Non-determinism requires concurrent systems to offer support for mutual-exclusion and synchronization. Mutual-exclusion is the concept that only a fixed number of threads can access a critical section at any given time, where a critical section is a group of instructions on an associated portion of data that requires the restricted access. On the other hand, synchronization enforces relative ordering of execution and synchronization tools provide numerous mechanisms to establish timing relationships among threads. \subsection{Mutual-Exclusion} As mentionned above, mutual-exclusion is the guarantee that only a fix number of threads can enter a critical section at once. However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use. Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to  higher-level mutual-exclusion methods, which sacrifice some performance in order to improve ease of use. Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. For example, the \CC \code{std::atomic} offers an easy way to express mutual-exclusion on a restricted set of operations (e.g.: reading/writing large types atomically). Another challenge with low-level locks is composability. Locks have restricted composability because it takes careful organising for multiple locks to be used while preventing deadlocks. Easing composability is another feature higher-level mutual-exclusion mechanisms often offer. As mentioned above, mutual-exclusion is the guarantee that only a fix number of threads can enter a critical section at once. However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use. Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to  higher-level mutual-exclusion methods, which sacrifice some performance in order to improve ease of use. Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. For example, the \CC \code{std::atomic} offers an easy way to express mutual-exclusion on a restricted set of operations (e.g.: reading/writing large types atomically). Another challenge with low-level locks is composability. Locks have restricted composability because it takes careful organizing for multiple locks to be used while preventing deadlocks. Easing composability is another feature higher-level mutual-exclusion mechanisms often offer. \subsection{Synchronization} As for mutual-exclusion, low-level synchronisation primitives often offer good performance and good flexibility at the cost of ease of use. Again, higher-level mechanism often simplify usage by adding better coupling between synchronization and data, e.g.: message passing, or offering simpler solution to otherwise involved challenges. As mentioned above, synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}. Most of the time, synchronisation happens within a critical section, where threads must acquire mutual-exclusion in a certain order. However, it may also be desirable to guarantee that event \textit{Z} does not occur between \textit{X} and \textit{Y}. Not satisfying this property called barging. For example, where event \textit{X} tries to effect event \textit{Y} but another thread acquires the critical section and emits \textit{Z} before \textit{Y}. The classic exmaple is the thread that finishes using a ressource and unblocks a thread waiting to use the resource, but the unblocked thread must compete again to acquire the resource. Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs. This challenge is often split into two different methods, barging avoidance and barging prevention. Algorithms that use status flags and other flag variables to detect barging threads are said to be using barging avoidance while algorithms that baton-passing locks between threads instead of releasing the locks are said to be using barging prevention. As for mutual-exclusion, low-level synchronization primitives often offer good performance and good flexibility at the cost of ease of use. Again, higher-level mechanism often simplify usage by adding better coupling between synchronization and data, e.g.: message passing, or offering a simpler solution to otherwise involved challenges. As mentioned above, synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}. Most of the time, synchronization happens within a critical section, where threads must acquire mutual-exclusion in a certain order. However, it may also be desirable to guarantee that event \textit{Z} does not occur between \textit{X} and \textit{Y}. Not satisfying this property is called barging. For example, where event \textit{X} tries to effect event \textit{Y} but another thread acquires the critical section and emits \textit{Z} before \textit{Y}. The classic example is the thread that finishes using a resource and unblocks a thread waiting to use the resource, but the unblocked thread must compete again to acquire the resource. Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs. This challenge is often split into two different methods, barging avoidance and barging prevention. Algorithms that use flag variables to detect barging threads are said to be using barging avoidance, while algorithms that baton-pass locks~\cite{Andrews89} between threads instead of releasing the locks are said to be using barging prevention. % ====================================================================== \begin{cfacode} typedef /*some monitor type*/ monitor; int f(monitor & m); int f(monitor& m); int main() { % ====================================================================== % ====================================================================== The above monitor example displays some of the intrinsic characteristics. First, it is necessary to use pass-by-reference over pass-by-value for monitor routines. This semantics is important because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. Therefore, monitors are implicitly non-copyable objects. The above monitor example displays some of the intrinsic characteristics. First, it is necessary to use pass-by-reference over pass-by-value for monitor routines. This semantics is important, because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. Therefore, monitors are implicitly non-copy-able objects (\code{dtype}). Another aspect to consider is when a monitor acquires its mutual exclusion. For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. Pass through can occur for generic helper routines (\code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter : \end{tabular} \end{center} Notice how the counter is used without any explicit synchronisation and yet supports thread-safe semantics for both reading and writting, which is similar in usage to \CC \code{atomic} template. Here, the constructor(\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual-exclusion when constructing. This semantics is because an object not yet con\-structed should never be shared and therefore does not require mutual exclusion. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} keyword depending on whether or not reading a \code{size_t} is an atomic operation. For maximum usability, monitors use \gls{multi-acq} semantics, which means a single thread can acquire the same monitor multiple times without deadlock. For example, figure \ref{fig:search} uses recursion and \gls{multi-acq} to print values inside a binary tree. Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to \CC \code{atomic} template. Here, the constructor (\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual-exclusion when constructing. This semantics is because an object not yet con\-structed should never be shared and therefore does not require mutual exclusion. Furthermore, it allows the implementation greater freedom when it initializes the monitor locking. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} keyword depending on whether or not reading a \code{size_t} is an atomic operation. For maximum usability, monitors use \gls{multi-acq} semantics, which means a single thread can acquire the same monitor multiple times without deadlock. For example, listing \ref{fig:search} uses recursion and \gls{multi-acq} to print values inside a binary tree. \begin{figure} \label{fig:search} \begin{cfacode} \begin{cfacode}[caption={Recursive printing algorithm using \gls{multi-acq}.},label={fig:search}] monitor printer { ... }; struct tree { } \end{cfacode} \caption{Recursive printing algorithm using \gls{multi-acq}.} \end{figure} Having both \code{mutex} and \code{nomutex} keywords is redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without qualifiers \code{void foo(counter_t & this)}, then it is reasonable that it should default to the safest option \code{mutex}, whereas assuming \code{nomutex} is unsafe and may cause subtle errors. In fact, \code{nomutex} is the normal'' parameter behaviour, with the \code{nomutex} keyword effectively stating explicitly that this routine is not special''. Another alternative is making exactly one of these keywords mandatory, which provides the same semantics but without the ambiguity of supporting routines with neither keyword. Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. While there are several benefits to mandatory keywords, they do bring a few challenges. Mandatory keywords in \CFA would imply that the compiler must know without doubt whether or not a parameter is a monitor or not. Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. For this reason, \CFA only has the \code{mutex} keyword and uses no keyword to mean \code{nomutex}. Having both \code{mutex} and \code{nomutex} keywords is redundant based on the meaning of a routine having neither of these keywords. For example, it is reasonable that it should default to the safest option (\code{mutex}) when given a routine without qualifiers \code{void foo(counter_t & this)}, whereas assuming \code{nomutex} is unsafe and may cause subtle errors. On the other hand, \code{nomutex} is the normal'' parameter behaviour, it effectively states explicitly that this routine is not special''. Another alternative is making exactly one of these keywords mandatory, which provides the same semantics but without the ambiguity of supporting routines with neither keyword. Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. While there are several benefits to mandatory keywords, they do bring a few challenges. Mandatory keywords in \CFA would imply that the compiler must know without doubt whether or not a parameter is a monitor or not. Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. For this reason, \CFA only has the \code{mutex} keyword and uses no keyword to mean \code{nomutex}. The next semantic decision is to establish when \code{mutex} may be used as a type qualifier. Consider the following declarations: \begin{cfacode} int f1(monitor & mutex m); int f1(monitor& mutex m); int f2(const monitor & mutex m); int f3(monitor ** mutex m); int f4(monitor * mutex m []); int f3(monitor** mutex m); int f4(monitor* mutex m []); int f5(graph(monitor*) & mutex m); \end{cfacode} The problem is to indentify which object(s) should be acquired. Furthermore, each object needs to be acquired only once. In the case of simple routines like \code{f1} and \code{f2} it is easy to identify an exhaustive list of objects to acquire on entry. Adding indirections (\code{f3}) still allows the compiler and programmer to indentify which object is acquired. However, adding in arrays (\code{f4}) makes it much harder. Array lengths are not necessarily known in C, and even then making sure objects are only acquired once becomes none-trivial. This problem can be extended to absurd limits like \code{f5}, which uses a graph of monitors. To make the issue tractable, this project imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with at most one level of indirection (ignoring potential qualifiers). Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m} is be acquired, passing an array to this routine would be type safe and yet result in undefined behavior because only the first element of the array is acquired. However, this ambiguity is part of the C type-system with respects to arrays. For this reason, \code{mutex} is disallowed in the context where arrays may be passed: \begin{cfacode} int f1(monitor & mutex m);   //Okay : recommanded case int f2(monitor * mutex m);   //Okay : could be an array but probably not int f3(monitor mutex m []);  //Not Okay : Array of unkown length int f4(monitor ** mutex m);  //Not Okay : Could be an array int f5(monitor * mutex m []); //Not Okay : Array of unkown length The problem is to identify which object(s) should be acquired. Furthermore, each object needs to be acquired only once. In the case of simple routines like \code{f1} and \code{f2} it is easy to identify an exhaustive list of objects to acquire on entry. Adding indirections (\code{f3}) still allows the compiler and programmer to identify which object is acquired. However, adding in arrays (\code{f4}) makes it much harder. Array lengths are not necessarily known in C, and even then, making sure objects are only acquired once becomes none-trivial. This problem can be extended to absurd limits like \code{f5}, which uses a graph of monitors. To make the issue tractable, this project imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with at most one level of indirection (ignoring potential qualifiers). Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m} is be acquired, passing an array to this routine would be type safe and yet result in undefined behaviour because only the first element of the array is acquired. However, this ambiguity is part of the C type-system with respects to arrays. For this reason, \code{mutex} is disallowed in the context where arrays may be passed: \begin{cfacode} int f1(monitor& mutex m);   //Okay : recommended case int f2(monitor* mutex m);   //Okay : could be an array but probably not int f3(monitor mutex m []);  //Not Okay : Array of unknown length int f4(monitor** mutex m);  //Not Okay : Could be an array int f5(monitor* mutex m []); //Not Okay : Array of unknown length \end{cfacode} Note that not all array functions are actually distinct in the type system. However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic. f(a,b); \end{cfacode} While OO monitors could be extended with a mutex qualifier for multiple-monitor calls, no example of this feature could be found. The capacity to acquire multiple locks before entering a critical section is called \emph{\gls{bulk-acq}}. In practice, writing multi-locking routines that do not lead to deadlocks is tricky. Having language support for such a feature is therefore a significant asset for \CFA. In the case presented above, \CFA guarantees that the order of aquisition is consistent across calls to different routines using the same monitors as arguments. This consistent ordering means acquiring multiple monitors in the way is safe from deadlock. However, users can still force the acquiring order. For example, notice which routines use \code{mutex}/\code{nomutex} and how this affects aquiring order: \begin{cfacode} void foo(A & mutex a, B & mutex b) { //acquire a & b While OO monitors could be extended with a mutex qualifier for multiple-monitor calls, no example of this feature could be found. The capability to acquire multiple locks before entering a critical section is called \emph{\gls{bulk-acq}}. In practice, writing multi-locking routines that do not lead to deadlocks is tricky. Having language support for such a feature is therefore a significant asset for \CFA. In the case presented above, \CFA guarantees that the order of acquisition is consistent across calls to different routines using the same monitors as arguments. This consistent ordering means acquiring multiple monitors is safe from deadlock when using \gls{bulk-acq}. However, users can still force the acquiring order. For example, notice which routines use \code{mutex}/\code{nomutex} and how this affects acquiring order: \begin{cfacode} void foo(A& mutex a, B& mutex b) { //acquire a & b ... } void bar(A & mutex a, B & /*nomutex*/ b) { //acquire a void bar(A& mutex a, B& /*nomutex*/ b) { //acquire a ... foo(a, b); ... //acquire b } void baz(A & /*nomutex*/ a, B & mutex b) { //acquire b void baz(A& /*nomutex*/ a, B& mutex b) { //acquire b ... foo(a, b); ... //acquire a } The \gls{multi-acq} monitor lock allows a monitor lock to be acquired by both \code{bar} or \code{baz} and acquired again in \code{foo}. In the calls to \code{bar} and \code{baz} the monitors are acquired in opposite order. However, such use leads to the lock acquiring order problem. In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}. This subtle mistake means that calling these routines concurrently may lead to deadlock and is therefore undefined behavior. As shown\cite{Lister77}, solving this problem requires: However, such use leads to the lock acquiring order problem. In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}. This subtle difference means that calling these routines concurrently may lead to deadlock and is therefore Undefined Behavior. As shown~\cite{Lister77}, solving this problem requires: \begin{enumerate} \item Dynamically tracking of the monitor-call order. \item Implement rollback semantics. \end{enumerate} While the first requirement is already a significant constraint on the system, implementing a general rollback semantics in a C-like language is still prohibitively complex \cite{Dice10}. In \CFA, users simply need to be carefull when acquiring multiple monitors at the same time or only use \gls{bulk-acq} of all the monitors. While \CFA provides only a partial solution, many system provide no solution and the \CFA partial solution handles many useful cases. While the first requirement is already a significant constraint on the system, implementing a general rollback semantics in a C-like language is still prohibitively complex~\cite{Dice10}. In \CFA, users simply need to be careful when acquiring multiple monitors at the same time or only use \gls{bulk-acq} of all the monitors. While \CFA provides only a partial solution, most systems provide no solution and the \CFA partial solution handles many useful cases. For example, \gls{multi-acq} and \gls{bulk-acq} can be used together in interesting ways: } \end{cfacode} This example shows a trivial solution to the bank-account transfer-problem\cite{BankTransfer}. Without \gls{multi-acq} and \gls{bulk-acq}, the solution to this problem is much more involved and requires carefull engineering. This example shows a trivial solution to the bank-account transfer-problem~\cite{BankTransfer}. Without \gls{multi-acq} and \gls{bulk-acq}, the solution to this problem is much more involved and requires careful engineering. \subsection{\code{mutex} statement} \label{mutex-stmt} The call semantics discussed aboved have one software engineering issue, only a named routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the \code{mutex} statement to workaround the need for unnecessary names, avoiding a major software engineering problem\cite{2FTwoHardThings}. Listing \ref{lst:mutex-stmt} shows an example of the \code{mutex} statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. Beyond naming, the \code{mutex} statement has no semantic difference from a routine call with \code{mutex} parameters. \begin{figure} The call semantics discussed above have one software engineering issue, only a named routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the \code{mutex} statement to workaround the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}. Table \ref{lst:mutex-stmt} shows an example of the \code{mutex} statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. Beyond naming, the \code{mutex} statement has no semantic difference from a routine call with \code{mutex} parameters. \begin{table} \begin{center} \begin{tabular}{|c|c|} \begin{cfacode}[tabsize=3] monitor M {}; void foo( M & mutex m ) { void foo( M & mutex m1, M & mutex m2 ) { //critical section } void bar( M & m ) { foo( m ); void bar( M & m1, M & m2 ) { foo( m1, m2 ); } \end{cfacode}&\begin{cfacode}[tabsize=3] monitor M {}; void bar( M & m ) { mutex(m) { void bar( M & m1, M & m2 ) { mutex(m1, m2) { //critical section } \caption{Regular call semantics vs. \code{mutex} statement} \label{lst:mutex-stmt} \end{figure} \end{table} % ====================================================================== }; \end{cfacode} Note that the destructor of a monitor must be a \code{mutex} routine. This requirement ensures that the destructor has mutual-exclusion. As with any object, any call to a monitor, using \code{mutex} or otherwise, is Undefined Behaviour after the destructor has run. Note that the destructor of a monitor must be a \code{mutex} routine to prevent deallocation while a thread is accessing the monitor. As with any object, calls to a monitor, using \code{mutex} or otherwise, is Undefined Behaviour after the destructor has run. % ====================================================================== % ====================================================================== % ====================================================================== In addition to mutual exclusion, the monitors at the core of \CFA's concurrency can also be used to achieve synchronisation. With monitors, this capability is generally achieved with internal or external scheduling as in \cite{Hoare74}. Since internal scheduling within a single monitor is mostly a solved problem, this thesis concentrates on extending internal scheduling to multiple monitors. Indeed, like the \gls{bulk-acq} semantics, internal scheduling extends to multiple monitors in a way that is natural to the user but requires additional complexity on the implementation side. First, here is a simple example of such a technique: In addition to mutual exclusion, the monitors at the core of \CFA's concurrency can also be used to achieve synchronization. With monitors, this capability is generally achieved with internal or external scheduling as in~\cite{Hoare74}. Since internal scheduling within a single monitor is mostly a solved problem, this thesis concentrates on extending internal scheduling to multiple monitors. Indeed, like the \gls{bulk-acq} semantics, internal scheduling extends to multiple monitors in a way that is natural to the user but requires additional complexity on the implementation side. First, here is a simple example of internal-scheduling : \begin{cfacode} } void foo(A & mutex a) { void foo(A& mutex a1, A& mutex a2) { ... //Wait for cooperation from bar() wait(a.e); wait(a1.e); ... } void bar(A & mutex a) { void bar(A& mutex a1, A& mutex a2) { //Provide cooperation for foo() ... //Unblock foo signal(a.e); } \end{cfacode} There are two details to note here. First, the \code{signal} is a delayed operation, it only unblocks the waiting thread when it reaches the end of the critical section. This semantic is needed to respect mutual-exclusion. The alternative is to return immediately after the call to \code{signal}, which is significantly more restrictive. Second, in \CFA, while it is common to store a \code{condition} as a field of the monitor, a \code{condition} variable can be stored/created independently of a monitor. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. An important aspect of the implementation is that \CFA does not allow barging, which means that once function \code{bar} releases the monitor, \code{foo} is guaranteed to resume immediately after (unless some other thread waited on the same condition). This guarantees offers the benefit of not having to loop arount waits in order to guarantee that a condition is still met. The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding barging prevention or barging avoidance is more involved without language support. Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design of \CFA concurrency. signal(a1.e); } \end{cfacode} There are two details to note here. First, the \code{signal} is a delayed operation, it only unblocks the waiting thread when it reaches the end of the critical section. This semantic is needed to respect mutual-exclusion, i.e., the signaller and signalled thread cannot be in the monitor simultaneously. The alternative is to return immediately after the call to \code{signal}, which is significantly more restrictive. Second, in \CFA, while it is common to store a \code{condition} as a field of the monitor, a \code{condition} variable can be stored/created independently of a monitor. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. An important aspect of the implementation is that \CFA does not allow barging, which means that once function \code{bar} releases the monitor, \code{foo} is guaranteed to resume immediately after (unless some other thread waited on the same condition). This guarantee offers the benefit of not having to loop around waits to recheck that a condition is met. The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding barging prevention or barging avoidance is more involved without language support. Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design of \CFA concurrency. % ====================================================================== % ====================================================================== % ====================================================================== It is easier to understand the problem of multi-monitor scheduling using a series of pseudo-code. Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors. Indeed, \code{wait} statements always use the implicit condition as paremeter and explicitly names the monitors (A and B) associated with the condition. Note that in \CFA, condition variables are tied to a set of monitors on first use (called branding) which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors. It is easier to understand the problem of multi-monitor scheduling using a series of pseudo-code examples. Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors. Indeed, \code{wait} statements always use the implicit condition variable as parameter and explicitly names the monitors (A and B) associated with the condition. Note that in \CFA, condition variables are tied to a \emph{group} of monitors on first use (called branding), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors. The example below shows the simple case of having two threads (one for each column) and a single monitor A. \begin{multicols}{2} \end{pseudo} \end{multicols} The example shows the simple case of having two threads (one for each column) and a single monitor A. One thread acquires before waiting (atomically blocking and releasing A) and the other acquires before signalling. It is important to note here that both \code{wait} and \code{signal} must be called with the proper monitor(s) already acquired. This semantic is a logical requirement for barging prevention. One thread acquires before waiting (atomically blocking and releasing A) and the other acquires before signalling. It is important to note here that both \code{wait} and \code{signal} must be called with the proper monitor(s) already acquired. This semantic is a logical requirement for barging prevention. A direct extension of the previous example is a \gls{bulk-acq} version: \begin{multicols}{2} \begin{pseudo} release A & B \end{pseudo} \columnbreak \begin{pseudo} acquire A & B This version uses \gls{bulk-acq} (denoted using the {\sf\&} symbol), but the presence of multiple monitors does not add a particularly new meaning. Synchronization happens between the two threads in exactly the same way and order. The only difference is that mutual exclusion covers more monitors. On the implementation side, handling multiple monitors does add a degree of complexity as the next few examples demonstrate. While deadlock issues can occur when nesting monitors, these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable. For monitors, a well known deadlock problem is the Nested Monitor Problem \cite{Lister77}, which occurs when a \code{wait} is made by a thread that holds more than one monitor. For example, the following pseudo-code runs into the nested-monitor problem : While deadlock issues can occur when nesting monitors, these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable. For monitors, a well known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a \code{wait} is made by a thread that holds more than one monitor. For example, the following pseudo-code runs into the nested-monitor problem : \begin{multicols}{2} \begin{pseudo} \end{pseudo} \end{multicols} The \code{wait} only releases monitor \code{B} so the signalling thread cannot acquire monitor \code{A} to get to the \code{signal}. Attempting release of all acquired monitors at the \code{wait} results in another set of problems such as releasing monitor \code{C}, which has nothing to do with the \code{signal}. However, for monitors as for locks, it is possible to write a program using nesting without encountering any problems if nesting is done correctly. For example, the next pseudo-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the nested monitor problem. The \code{wait} only releases monitor \code{B} so the signalling thread cannot acquire monitor \code{A} to get to the \code{signal}. Attempting release of all acquired monitors at the \code{wait} introduces a different set of problems, such as releasing monitor \code{C}, which has nothing to do with the \code{signal}. However, for monitors as for locks, it is possible to write a program using nesting without encountering any problems if nesting is done correctly. For example, the next pseudo-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}. \begin{multicols}{2} \end{multicols} This simple refactoring may not be possible, forcing more complex restructuring. % ====================================================================== % ====================================================================== % ====================================================================== A larger example is presented to show complex issuesfor \gls{bulk-acq} and all the implementation options are analyzed. Listing \ref{lst:int-bulk-pseudo} shows an example where \gls{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code which implements the pseudo-code in listing \ref{lst:int-bulk-pseudo}. For the purpose of translating the given pseudo-code into \CFA-code any method of introducing monitor into context, other than a \code{mutex} parameter, is acceptable, e.g., global variables, pointer parameters or using locals with the \code{mutex}-statement. \begin{figure}[!b] A larger example is presented to show complex issues for \gls{bulk-acq} and all the implementation options are analyzed. Listing \ref{lst:int-bulk-pseudo} shows an example where \gls{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the pseudo-code in listing \ref{lst:int-bulk-pseudo}. For the purpose of translating the given pseudo-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., \code{mutex} parameter, global variables, pointer parameters or using locals with the \code{mutex}-statement. \begin{figure}[!t] \begin{multicols}{2} Waiting thread release A \end{pseudo} \columnbreak Signalling thread \begin{pseudo}[numbers=left, firstnumber=10] \begin{pseudo}[numbers=left, firstnumber=10,escapechar=|] acquire A //Code Section 5 acquire A & B //Code Section 6 signal A & B |\label{line:signal1}|signal A & B //Code Section 7 release A & B //Code Section 8 release A |\label{line:lastRelease}|release A \end{pseudo} \end{multicols} \caption{Internal scheduling with \gls{bulk-acq}} \label{lst:int-bulk-pseudo} \end{figure} \begin{figure}[!b] \begin{cfacode}[caption={Internal scheduling with \gls{bulk-acq}},label={lst:int-bulk-pseudo}] \end{cfacode} \begin{center} \begin{cfacode}[xleftmargin=.4\textwidth] } \end{cfacode} \columnbreak Signalling thread \begin{cfacode} \end{cfacode} \end{multicols} \caption{Equivalent \CFA code for listing \ref{lst:int-bulk-pseudo}} \label{lst:int-bulk-cfa} \end{figure} The complexity begins at code sections 4 and 8, which are where the existing semantics of internal scheduling need to be extended for multiple monitors. The root of the problem is that \gls{bulk-acq} is used in a context where one of the monitors is already acquired and is why it is important to define the behaviour of the previous pseudo-code. When the signaller thread reaches the location where it should release \code{A & B}'' (line 16), it must actually transfer ownership of monitor \code{B} to the waiting thread. This ownership trasnfer is required in order to prevent barging. Since the signalling thread still needs monitor \code{A}, simply waking up the waiting thread is not an option because it violates mutual exclusion. There are three options. \subsubsection{Delaying signals} The obvious solution to solve the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred. It can be argued that that moment is when the last lock is no longer needed because this semantics fits most closely to the behaviour of single-monitor scheduling. This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from mutiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups. \begin{cfacode}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-pseudo}},label={lst:int-bulk-cfa}] \end{cfacode} \begin{multicols}{2} Waiter Signaller \begin{pseudo}[numbers=left, firstnumber=6] \begin{pseudo}[numbers=left, firstnumber=6,escapechar=|] acquire A acquire A & B signal A & B release A & B //Secretly keep B here |\label{line:secret}|//Secretly keep B here release A //Wakeup waiter and transfer A & B \end{pseudo} \end{multicols} However, this solution can become much more complicated depending on what is executed while secretly holding B (at line 10). Indeed, nothing prevents signalling monitor A on a different condition variable: \begin{cfacode}[caption={Listing \ref{lst:int-bulk-pseudo}, with delayed signalling comments},label={lst:int-secret}] \end{cfacode} \end{figure} The complexity begins at code sections 4 and 8, which are where the existing semantics of internal scheduling need to be extended for multiple monitors. The root of the problem is that \gls{bulk-acq} is used in a context where one of the monitors is already acquired and is why it is important to define the behaviour of the previous pseudo-code. When the signaller thread reaches the location where it should release \code{A & B}'' (listing \ref{lst:int-bulk-pseudo} line \ref{line:signal1}), it must actually transfer ownership of monitor \code{B} to the waiting thread. This ownership transfer is required in order to prevent barging into \code{B} by another thread, since both the signalling and signalled threads still need monitor \code{A}. There are three options. \subsubsection{Delaying signals} The obvious solution to solve the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred. It can be argued that that moment is when the last lock is no longer needed because this semantics fits most closely to the behaviour of single-monitor scheduling. This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from multiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups. The naive approach to this solution is to only release monitors once every monitor in a group can be released. However, since some monitors are never released (i.e., the monitor of a thread), this interpretation means groups can grow but may never shrink. A more interesting interpretation is to only transfer groups as one but to recreate the groups on every operation, i.e., limit ownership transfer to one per \code{signal}/\code{release}. However, this solution can become much more complicated depending on what is executed while secretly holding B (listing \ref{lst:int-secret} line \ref{line:secret}). The goal in this solution is to avoid the need to transfer ownership of a subset of the condition monitors. However, listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor \code{A}, using a different condition variable. Because the third thread is signalled when secretly holding \code{B}, the goal  becomes unreachable. Depending on the order of signals (listing \ref{lst:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen : \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor \code{A} needs to be passed to thread $\beta$ when thread $\alpha$ is done with it. \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor \code{B} needs to be retained and passed to thread $\alpha$ along with monitor \code{A}, which can be done directly or possibly using thread $\beta$ as an intermediate. \\ Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order. However, regardless of the answer, users can move line \ref{line:signal-a} before line \ref{line:signal-ab} and get the reverse effect for listing \ref{lst:dependency}. In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means monitors cannot be handled as a single homogeneous group and therefore effectively precludes this approach. \subsubsection{Dependency graphs} \begin{figure} \begin{multicols}{3} release A \end{pseudo} \columnbreak Thread $\gamma$ \begin{pseudo}[numbers=left, firstnumber=1] \begin{pseudo}[numbers=left, firstnumber=6, escapechar=|] acquire A acquire A & B signal A & B release A & B signal A release A \end{pseudo} |\label{line:signal-ab}|signal A & B |\label{line:release-ab}|release A & B |\label{line:signal-a}|signal A |\label{line:release-a}|release A \end{pseudo} \columnbreak Thread $\beta$ \begin{pseudo}[numbers=left, firstnumber=1] \begin{pseudo}[numbers=left, firstnumber=12, escapechar=|] acquire A wait A release A \end{pseudo} |\label{line:release-aa}|release A \end{pseudo} \end{multicols} \caption{Dependency graph} \label{lst:dependency} \end{figure} The goal in this solution is to avoid the need to transfer ownership of a subset of the condition monitors. However, this goal is unreacheable in the previous example. Depending on the order of signals (line 12 and 15) two cases can happen. \paragraph{Case 1: thread 1 goes first.} In this case, the problem is that monitor A needs to be passed to thread 2 when thread 1 is done with it. \paragraph{Case 2: thread 2 goes first.} In this case, the problem is that monitor B needs to be passed to thread 1, which can be done directly or using thread 2 as an intermediate. \\ Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order. However, regardless of the answer, users can move line 15 before line 11 and get the reverse effect. In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means monitors cannot be handled as a single homogenous group and therefore effectively precludes this approach. \subsubsection{Dependency graphs} In the listing \ref{lst:int-bulk-pseudo} pseudo-code, there is a solution which statisfies both barging prevention and mutual exclusion. If ownership of both monitors is transferred to the waiter when the signaller releases \code{A & B} and then the waiter transfers back ownership of \code{A} when it releases it, then the problem is solved (\code{B} is no longer in use at this point). Dynamically finding the correct order is therefore the second possible solution. The problem it encounters is that it effectively boils down to resolving a dependency graph of ownership requirements. Here even the simplest of code snippets requires two transfers and it seems to increase in a manner closer to polynomial. For example, the following code, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions: \begin{multicols}{2} \begin{pseudo} acquire A acquire B acquire C wait A & B & C release C release B release A \end{pseudo} \columnbreak \begin{pseudo} acquire A acquire B acquire C signal A & B & C release C release B release A \end{pseudo} \end{multicols} \begin{figure} \begin{cfacode}[caption={Pseudo-code for the three thread example.},label={lst:dependency}] \end{cfacode} \begin{center} \input{dependency} \end{figure} Listing \ref{lst:dependency} is the three thread example rewritten for dependency graphs. Figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (e.g., $\alpha1$ must happen before $\alpha2$). The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependency unfolds. Resolving dependency graph being a complex and expensive endeavour, this solution is not the preffered one. In the listing \ref{lst:int-bulk-pseudo} pseudo-code, there is a solution that satisfies both barging prevention and mutual exclusion. If ownership of both monitors is transferred to the waiter when the signaller releases \code{A & B} and then the waiter transfers back ownership of \code{A} back to the signaller when it releases it, then the problem is solved (\code{B} is no longer in use at this point). Dynamically finding the correct order is therefore the second possible solution. The problem is effectively resolving a dependency graph of ownership requirements. Here even the simplest of code snippets requires two transfers and it seems to increase in a manner close to polynomial. This complexity explosion can be seen in listing \ref{lst:explosion}, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions. Furthermore, the presence of multiple solutions for ownership transfer can cause deadlock problems if a specific solution is not consistently picked; In the same way that multiple lock acquiring order can cause deadlocks. \begin{figure} \begin{multicols}{2} \begin{pseudo} acquire A acquire B acquire C wait A & B & C release C release B release A \end{pseudo} \columnbreak \begin{pseudo} acquire A acquire B acquire C signal A & B & C release C release B release A \end{pseudo} \end{multicols} \begin{cfacode}[caption={Extension to three monitors of listing \ref{lst:int-bulk-pseudo}},label={lst:explosion}] \end{cfacode} \end{figure} Listing \ref{lst:dependency} is the three threads example used in the delayed signals solution. Figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (e.g., $\alpha1$ must happen before $\alpha2$). The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependency unfolds. Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one. \subsubsection{Partial signalling} \label{partial-sig} Finally, the solution that is chosen for \CFA is to use partial signalling. Again using listing \ref{lst:int-bulk-pseudo}, the partial signalling solution transfers ownership of monitor B at lines 10 but does not wake the waiting thread since it is still using monitor A. Only when it reaches line 11 does it actually wakeup the waiting thread. This solution has the benefit that complexity is encapsulated into only two actions, passing monitors to the next owner when they should be release and conditionally waking threads if all conditions are met. This solution has a much simpler implementation than a dependency graph solving algorithm which is why it was chosen. Furthermore, after being fully implemented, this solution does not appear to have any downsides worth mentionning. Finally, the solution that is chosen for \CFA is to use partial signalling. Again using listing \ref{lst:int-bulk-pseudo}, the partial signalling solution transfers ownership of monitor \code{B} at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor \code{A}. Only when it reaches line \ref{line:lastRelease} does it actually wakeup the waiting thread. This solution has the benefit that complexity is encapsulated into only two actions, passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met. This solution has a much simpler implementation than a dependency graph solving algorithm, which is why it was chosen. Furthermore, after being fully implemented, this solution does not appear to have any significant downsides. While listing \ref{lst:dependency} is a complicated problem for previous solutions, it can be solved easily with partial signalling : \begin{itemize} \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor \code{B} to thread $\alpha$ and continues to hold monitor \code{A}. \item When thread $\gamma$ reaches line \ref{line:release-a}  it transfers monitor \code{A} to thread $\beta$  and wakes it up. \item When thread $\beta$  reaches line \ref{line:release-aa} it transfers monitor \code{A} to thread $\alpha$ and wakes it up. \item Problem solved! \end{itemize} % ====================================================================== % ====================================================================== % ====================================================================== \begin{figure} \begin{table} \begin{tabular}{|c|c|} \code{signal} & \code{signal_block} \\ \end{tabular} \caption{Dating service example using \code{signal} and \code{signal_block}. } \label{lst:datingservice} \end{figure} An important note is that, until now, signalling a monitor was a delayed operation. The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the \code{signal} statement. However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the \code{signal_block} routine\footnote{name to be discussed}. The example in listing \ref{lst:datingservice} highlights the difference in behaviour. As mentioned, \code{signal} only transfers ownership once the current critical section exits, this behaviour requires additional synchronisation when a two-way handshake is needed. To avoid this extraneous synchronisation, the \code{condition} type offers the \code{signal_block} routine, which handles the two-way handshake as shown in the example. This removes the need for a second condition variables and simplifies programming. Like every other monitor semantic, \code{signal_block} uses barging prevention, which means mutual-exclusion is baton-passed both on the frond-end and the back-end of the call to \code{signal_block}, meaning no other thread can acquire the monitor neither before nor after the call. \label{tbl:datingservice} \end{table} An important note is that, until now, signalling a monitor was a delayed operation. The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the \code{signal} statement. However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the \code{signal_block} routine. The example in table \ref{tbl:datingservice} highlights the difference in behaviour. As mentioned, \code{signal} only transfers ownership once the current critical section exits, this behaviour requires additional synchronization when a two-way handshake is needed. To avoid this explicit synchronization, the \code{condition} type offers the \code{signal_block} routine, which handles the two-way handshake as shown in the example. This feature removes the need for a second condition variables and simplifies programming. Like every other monitor semantic, \code{signal_block} uses barging prevention, which means mutual-exclusion is baton-passed both on the frond-end and the back-end of the call to \code{signal_block}, meaning no other thread can acquire the monitor either before or after the call. % ====================================================================== \end{tabular} \end{center} This method is more constrained and explicit, which helps users tone down the undeterministic nature of concurrency. Indeed, as the following examples demonstrates, external scheduling allows users to wait for events from other threads without the concern of unrelated events occuring. External scheduling can generally be done either in terms of control flow (e.g., \uC with \code{_Accept}) or in terms of data (e.g., Go with channels). Of course, both of these paradigms have their own strenghts and weaknesses but for this project control-flow semantics were chosen to stay consistent with the rest of the languages semantics. Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multi-monitor routines. The previous example shows a simple use \code{_Accept} versus \code{wait}/\code{signal} and its advantages. Note that while other languages often use \code{accept}/\code{select} as the core external scheduling keyword, \CFA uses \code{waitfor} to prevent name collisions with existing socket \acrshort{api}s. This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency. Indeed, as the following examples demonstrates, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring. External scheduling can generally be done either in terms of control flow (e.g., Ada with \code{accept}, \uC with \code{_Accept}) or in terms of data (e.g., Go with channels). Of course, both of these paradigms have their own strengths and weaknesses, but for this project control-flow semantics were chosen to stay consistent with the rest of the languages semantics. Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines. The previous example shows a simple use \code{_Accept} versus \code{wait}/\code{signal} and its advantages. Note that while other languages often use \code{accept}/\code{select} as the core external scheduling keyword, \CFA uses \code{waitfor} to prevent name collisions with existing socket \acrshort{api}s. For the \code{P} member above using internal scheduling, the call to \code{wait} only guarantees that \code{V} is the last routine to access the monitor, allowing a third routine, say \code{isInUse()}, acquire mutual exclusion several times while routine \code{P} is waiting. On the other hand, external scheduling guarantees that while routine \code{P} is waiting, no routine other than \code{V} can acquire the monitor. % ====================================================================== % ====================================================================== In \uC, monitor declarations include an exhaustive list of monitor operations. Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: In \uC, a monitor class declaration includee an exhaustive list of monitor operations. Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: \begin{cfacode} \end{cfacode} Furthermore, external scheduling is an example where implementation constraints become visible from the interface. Indeed, since there is no hard limit to the number of threads trying to acquire a monitor concurrently, performance is a significant concern. Here is the pseudo code for the entering phase of a monitor: Furthermore, external scheduling is an example where implementation constraints become visible from the interface. Here is the pseudo code for the entering phase of a monitor: \begin{center} \begin{tabular}{l} \end{tabular} \end{center} For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instruction. However, a fast check for \pscode{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. Indeed, monitors are often expressed as an entry queue and some acceptor queue as in the following figure: \end{figure} There are other alternatives to these pictures, but in the case of this picture, implementing a fast accept check is relatively easy. Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (e.g., 128) of mutex members. This technique cannot be used in \CFA because it relies on the fact that the monitor type enumerates (declares) all the acceptable routines. For OO languages this does not compromise much since monitors already have an exhaustive list of member routines. However, for \CFA this is not the case; routines can be added to a type anywhere after its declaration. It is important to note that the bitmask approach does not actually require an exhaustive list of routines, but it requires a dense unique ordering of routines with an upper-bound and that ordering must be consistent across translation units. The alternative is to alter the implementeation like this: There are other alternatives to these pictures, but in the case of this picture, implementing a fast accept check is relatively easy. Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (e.g., 128) of mutex members. This approach requires a dense unique ordering of routines with an upper-bound and that ordering must be consistent across translation units. For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritence. However, in \CFA users can extend objects with mutex routines that are only visible in certain translation unit. This means that establishing a program-wide dense-ordering among mutex routines can only be done in the program linking phase, and still could have issues when using dynamically shared objects. The alternative is to alter the implementation like this: \begin{center} \end{center} Generating a mask dynamically means that the storage for the mask information can vary between calls to \code{waitfor}, allowing for more flexibility and extensions. Storing an array of accepted function-pointers replaces the single instruction bitmask compare with dereferencing a pointer followed by a linear search. Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additionnal searches on calls to \code{waitfor} statement to check if a routine is already queued in. Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept seperately. Generating a mask dynamically means that the storage for the mask information can vary between calls to \code{waitfor}, allowing for more flexibility and extensions. Storing an array of accepted function-pointers replaces the single instruction bitmask compare with dereferencing a pointer followed by a linear search. Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the \code{waitfor} statement to check if a routine is already queued. \begin{figure} \begin{cfacode} \begin{cfacode}[caption={Example of nested external scheduling},label={lst:nest-ext}] monitor M {}; void foo( M & mutex a ) {} \end{cfacode} \caption{Example of nested external scheduling} \label{lst:nest-ext} \end{figure} Note that in the second picture, tasks need to always keep track of which routine they are attempting to acquire the monitor and the routine mask needs to have both a function pointer and a set of monitors, as will be discussed in the next section. These details where omitted from the picture for the sake of simplifying the representation. At this point, a decision must be made between flexibility and performance. Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost. Here however, the cost of flexibility cannot be trivially removed. In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be prohibitively hard to write. This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problems than writing locks that are as flexible as external scheduling in \CFA. Note that in the second picture, tasks need to always keep track of the monitors associated with mutex routines, and the routine mask needs to have both a function pointer and a set of monitors, as is be discussed in the next section. These details are omitted from the picture for the sake of simplicity. At this point, a decision must be made between flexibility and performance. Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost. Here however, the cost of flexibility cannot be trivially removed. In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be hard to write. This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problems than writing locks that are as flexible as external scheduling in \CFA. % ====================================================================== void g(M & mutex b, M & mutex c) { waitfor(f); //two monitors M => unkown which to pass to f(M & mutex) } \end{cfacode} waitfor(f); //two monitors M => unknown which to pass to f(M & mutex) } \end{cfacode} The obvious solution is to specify the correct monitor as follows: void g(M & mutex a, M & mutex b) { waitfor( f, b ); } \end{cfacode} This syntax is unambiguous. Both locks are acquired and kept by \code{g}. When routine \code{f} is called, the lock for monitor \code{b} is temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{a}). This behavior can be extended to multi-monitor \code{waitfor} statement as follows. //wait for call to f with argument b waitfor(f, b); } \end{cfacode} This syntax is unambiguous. Both locks are acquired and kept by \code{g}. When routine \code{f} is called, the lock for monitor \code{b} is temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{a}). This behaviour can be extended to the multi-monitor \code{waitfor} statement as follows. \begin{cfacode} void g(M & mutex a, M & mutex b) { waitfor( f, a, b); //wait for call to f with argument a and b waitfor(f, a, b); } \end{cfacode} Note that the set of monitors passed to the \code{waitfor} statement must be entirely contained in the set of monitors already acquired in the routine. \code{waitfor} used in any other context is Undefined Behaviour. An important behavior to note is when a set of monitors only match partially : An important behaviour to note is when a set of monitors only match partially : \begin{cfacode} void bar() { f(a2, b); //fufill cooperation } \end{cfacode} While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables. In both cases, partially matching monitor sets does not wake-up the waiting thread. It is also important to note that in the case of external scheduling, as for routine calls, the order of parameters is irrelevant; \code{waitfor(f,a,b)} and \code{waitfor(f,b,a)} are indistinguishable waiting condition. f(a2, b); //fulfill cooperation } \end{cfacode} While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables. In both cases, partially matching monitor sets does not wake-up the waiting thread. It is also important to note that in the case of external scheduling the order of parameters is irrelevant; \code{waitfor(f,a,b)} and \code{waitfor(f,b,a)} are indistinguishable waiting condition. % ====================================================================== % ====================================================================== Syntactically, the \code{waitfor} statement takes a function identifier and a set of monitors. While the set of monitors can be any list of expression, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the \code{waitfor} statement. It checks that the set of monitor passed in matches the requirements for a function call. Listing \ref{lst:waitfor} shows various usage of the waitfor statement and which are acceptable. The choice of the function type is made ignoring any non-\code{mutex} parameter. One limitation of the current implementation is that it does not handle overloading. Syntactically, the \code{waitfor} statement takes a function identifier and a set of monitors. While the set of monitors can be any list of expression, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the \code{waitfor} statement. It checks that the set of monitors passed in matches the requirements for a function call. Listing \ref{lst:waitfor} shows various usage of the waitfor statement and which are acceptable. The choice of the function type is made ignoring any non-\code{mutex} parameter. One limitation of the current implementation is that it does not handle overloading but overloading is possible. \begin{figure} \begin{cfacode} \begin{cfacode}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}] monitor A{}; monitor B{}; waitfor(f4, a1);     //Incorrect : f4 ambiguous waitfor(f2, a1, b2); //Undefined Behaviour : b2 may not acquired } \end{cfacode} \caption{Various correct and incorrect uses of the waitfor statement} \label{lst:waitfor} waitfor(f2, a1, b2); //Undefined Behaviour : b2 not mutex } \end{cfacode} \end{figure} Finally, for added flexibility, \CFA supports constructing complex \code{waitfor} mask using the \code{or}, \code{timeout} and \code{else}. Indeed, multiple \code{waitfor} can be chained together using \code{or}; this chain forms a single statement that uses baton-pass to any one function that fits one of the function+monitor set passed in. To eanble users to tell which accepted function is accepted, \code{waitfor}s are followed by a statement (including the null statement \code{;}) or a compound statement. When multiple \code{waitfor} are chained together, only the statement corresponding to the accepted function is executed. A \code{waitfor} chain can also be followed by a \code{timeout}, to signify an upper bound on the wait, or an \code{else}, to signify that the call should be non-blocking, that is only check of a matching function call already arrived and return immediately otherwise. Any and all of these clauses can be preceded by a \code{when} condition to dynamically construct the mask based on some current state. Listing \ref{lst:waitfor2}, demonstrates several complex masks and some incorrect ones. Finally, for added flexibility, \CFA supports constructing a complex \code{waitfor} statement using the \code{or}, \code{timeout} and \code{else}. Indeed, multiple \code{waitfor} clauses can be chained together using \code{or}; this chain forms a single statement that uses baton-pass to any one function that fits one of the function+monitor set passed in. To enable users to tell which accepted function executed, \code{waitfor}s are followed by a statement (including the null statement \code{;}) or a compound statement, which is executed after the clause is triggered. A \code{waitfor} chain can also be followed by a \code{timeout}, to signify an upper bound on the wait, or an \code{else}, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues. Any and all of these clauses can be preceded by a \code{when} condition to dynamically toggle the accept clauses on or off based on some current state. Listing \ref{lst:waitfor2}, demonstrates several complex masks and some incorrect ones. \begin{figure} \begin{cfacode} \begin{cfacode}[caption={Various correct and incorrect uses of the or, else, and timeout clause around a waitfor statement},label={lst:waitfor2}] monitor A{}; } \end{cfacode} \caption{Various correct and incorrect uses of the or, else, and timeout clause around a waitfor statement} \label{lst:waitfor2} \end{figure} An interesting use for the \code{waitfor} statement is destructor semantics. Indeed, the \code{waitfor} statement can accept any \code{mutex} routine, which includes the destructor (see section \ref{data}). However, with the semantics discussed until now, waiting for the destructor does not make any sense since using an object after its destructor is called is undefined behaviour. The simplest approach is to disallow \code{waitfor} on a destructor. However, a more expressive approach is to flip execution ordering when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current \code{mutex} routine, similarly to how a condition is signalled. \begin{figure} \begin{cfacode} \begin{cfacode}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}] monitor Executer {}; struct  Action; } \end{cfacode} \caption{Example of an executor which executes action in series until the destructor is called.} \label{lst:dtor-order} \end{figure} For example, listing \ref{lst:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop. Switching the semantic meaning introduces an idiomatic way to terminate a task and/or wait for its termination via destruction.
• ## doc/proposals/concurrency/text/future.tex

 rc6e2c18 \chapter{Conclusion} As mentionned in the introduction, this thesis provides a minimal concurrency \acrshort{api} that is simple, efficient and usable as the basis for higher-level features. The approach presented is based on a lighweight thread system for parallelism which sits on top of clusters of processors. This M:N model is jugded to be both more efficient and allow more flexibility for users. Furthermore, this document introduces monitors as the main concurrency tool for users. This thesis also offers a novel approach which allows using multiple monitors at once without running into the Nested Monitor Problem~\cite{Lister77}. It also offers a full implmentation of the concurrency runtime wirtten enterily in \CFA, effectively the largest \CFA code base to date. % ====================================================================== % ====================================================================== \chapter{Future Work} \section{Future Work} % ====================================================================== % ====================================================================== \section{Flexible Scheduling} \label{futur:sched} An important part of concurrency is scheduling. Different scheduling algorithm can affact peformance (both in terms of average and variation). However, no single scheduler is optimal for all workloads and therefore there is value in being able to change the scheduler for given programs. One solution is to offer various tweaking options to users, allowing the scheduler to be adjusted the to requirements of the workload. However, in order to be truly flexible, it would be interesting to allow users to add arbitrary data and arbirary scheduling algorithms to the scheduler. For example, a web server could attach Type-of-Service information to threads and have a ToS aware'' scheduling algorithm tailored to this specific web server. This path of flexible schedulers will be explored for \CFA. \subsection{Performance} \label{futur:perf} This thesis presents a first implementation of the \CFA runtime. Therefore, there is still significant work to do to improve performance. Many of the data structures and algorithms will change in the future to more efficient versions. For example, \CFA the number of monitors in a single \gls{bulk-acq} is only bound by the stack size, this is probably unnecessarily generous. It may be possible that limiting the number help increase performance. However, it is not obvious that the benefit would be significant. \section{Non-Blocking IO} \label{futur:nbio} While most of the parallelism tools However, many modern workloads are not bound on computation but on IO operations, an common case being webservers and XaaS (anything as a service). These type of workloads often require significant engineering around amortising costs of blocking IO operations. While improving throughtput of these operations is outside what \CFA can do as a language, it can help users to make better use of the CPU time otherwise spent waiting on IO operations. The current trend is to use asynchronous programming using tools like callbacks and/or futurs and promises\cite. However, while these are valid solutions, they lead to code that is harder to read and maintain because it is much less linear \subsection{Flexible Scheduling} \label{futur:sched} An important part of concurrency is scheduling. Different scheduling algorithm can affect performance (both in terms of average and variation). However, no single scheduler is optimal for all workloads and therefore there is value in being able to change the scheduler for given programs. One solution is to offer various tweaking options to users, allowing the scheduler to be adjusted to the requirements of the workload. However, in order to be truly flexible, it would be interesting to allow users to add arbitrary data and arbitrary scheduling algorithms to the scheduler. For example, a web server could attach Type-of-Service information to threads and have a ToS aware'' scheduling algorithm tailored to this specific web server. This path of flexible schedulers will be explored for \CFA. \section{Other concurrency tools} \label{futur:tools} While monitors offer a flexible and powerful concurent core for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package. Example of such tools can include simple locks and condition variables, futures and promises\cite{promises}, and executors. These additional features are useful when monitors offer a level of abstraction which is indaquate for certain tasks. \subsection{Non-Blocking IO} \label{futur:nbio} While most of the parallelism tools are aimed at data parallelism and control-flow parallelism, many modern workloads are not bound on computation but on IO operations, a common case being web-servers and XaaS (anything as a service). These type of workloads often require significant engineering around amortizing costs of blocking IO operations. At its core, Non-Blocking IO is a operating system level feature that allows queuing IO operations (e.g., network operations) and registering for notifications instead of waiting for requests to complete. In this context, the role of the language make Non-Blocking IO easily available and with low overhead. The current trend is to use asynchronous programming using tools like callbacks and/or futures and promises, which can be seen in frameworks like Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java and Django~\cite{Django} for Python. However, while these are valid solutions, they lead to code that is harder to read and maintain because it is much less linear. \section{Implicit threading} \label{futur:implcit} Simpler applications can benefit greatly from having implicit parallelism. That is, parallelism that does not rely on the user to write concurrency. This type of parallelism can be achieved both at the language level and at the library level. The cannonical example of implcit parallelism is parallel for loops, which are the simplest example of a divide and conquer algorithm\cite{uC++book}. Listing \ref{lst:parfor} shows three different code examples that accomplish pointwise sums of large arrays. Note that none of these example explicitly declare any concurrency or parallelism objects. \subsection{Other concurrency tools} \label{futur:tools} While monitors offer a flexible and powerful concurrent core for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package. Example of such tools can include simple locks and condition variables, futures and promises~\cite{promises}, executors and actors. These additional features are useful when monitors offer a level of abstraction that is inadequate for certain tasks. \begin{figure} \subsection{Implicit threading} \label{futur:implcit} Simpler applications can benefit greatly from having implicit parallelism. That is, parallelism that does not rely on the user to write concurrency. This type of parallelism can be achieved both at the language level and at the library level. The canonical example of implicit parallelism is parallel for loops, which are the simplest example of a divide and conquer algorithm~\cite{uC++book}. Table \ref{lst:parfor} shows three different code examples that accomplish point-wise sums of large arrays. Note that none of these examples explicitly declare any concurrency or parallelism objects. \begin{table} \begin{center} \begin{tabular}[t]{|c|c|c|} \caption{For loop to sum numbers: Sequential, using library parallelism and language parallelism.} \label{lst:parfor} \end{figure} \end{table} Implicit parallelism is a general solution and therefore has its limitations. However, it is a quick and simple approach to parallelism which may very well be sufficient for smaller applications and reduces the amount of boiler-plate that is needed to start benefiting from parallelism in modern CPUs. Implicit parallelism is a restrictive solution and therefore has its limitations. However, it is a quick and simple approach to parallelism, which may very well be sufficient for smaller applications and reduces the amount of boiler-plate needed to start benefiting from parallelism in modern CPUs.
• ## doc/proposals/concurrency/text/internals.tex

 rc6e2c18 \chapter{Behind the scene} There are several challenges specific to \CFA when implementing concurrency. These challenges are a direct result of \gls{bulk-acq} and loose object-definitions. These two constraints are the root cause of most design decisions in the implementation. Furthermore, to avoid contention from dynamically allocating memory in a concurrent environment, the internal-scheduling design is (almost) entirely free of mallocs. This is to avoid the chicken and egg problem \cite{Chicken} of having a memory allocator that relies on the threading system and a threading system that relies on the runtime. This extra goal, means that memory management is a constant concern in the design of the system. The main memory concern for concurrency is queues. All blocking operations are made by parking threads onto queues. The queue design needs to be intrusive\cite{IntrusiveData} to avoid the need for memory allocation, which entails that all the nodes need specific fields to keep track of all needed information. Since many concurrency operations can use an unbound amount of memory (depending on \gls{bulk-acq}), statically defining information in the intrusive fields of threads is insufficient. The only variable sized container that does not require memory allocation is the callstack, which is heavily used in the implementation of internal scheduling. Particularly variable length arrays, which are used extensively. Since stack allocation is based around scope, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable length. The threads and the condition both allow a fixed amount of memory to be stored, while mutex-routines and the actual blocking call allow for an unbound amount (though the later is preferable in terms of performance). Note that since the major contributions of this thesis are extending monitor semantics to \gls{bulk-acq} and loose object definitions, any challenges that are not resulting of these characteristiques of \CFA are considered as solved problems and therefore not discussed further. There are several challenges specific to \CFA when implementing concurrency. These challenges are a direct result of \gls{bulk-acq} and loose object-definitions. These two constraints are the root cause of most design decisions in the implementation. Furthermore, to avoid contention from dynamically allocating memory in a concurrent environment, the internal-scheduling design is (almost) entirely free of mallocs. This approach avoids the chicken and egg problem~\cite{Chicken} of having a memory allocator that relies on the threading system and a threading system that relies on the runtime. This extra goal means that memory management is a constant concern in the design of the system. The main memory concern for concurrency is queues. All blocking operations are made by parking threads onto queues and all queues are designed with intrusive nodes, where each not has pre-allocated link fields for chaining, to avoid the need for memory allocation. Since several concurrency operations can use an unbound amount of memory (depending on \gls{bulk-acq}), statically defining information in the intrusive fields of threads is insufficient.The only way to use a variable amount of memory without requiring memory allocation is to pre-allocate large buffers of memory eagerly and store the information in these buffers. Conveniently, the callstack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays. Since stack allocation is based around scope, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array. The threads and the condition both have a fixed amount of memory, while mutex-routines and the actual blocking call allow for an unbound amount, within the stack size. Note that since the major contributions of this thesis are extending monitor semantics to \gls{bulk-acq} and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed. % ====================================================================== % ====================================================================== The first step towards the monitor implementation is simple mutex-routines using monitors. In the single monitor case, this is done using the entry/exit procedure highlighted in listing \ref{lst:entry1}. This entry/exit procedure does not actually have to be extended to support multiple monitors, indeed it is sufficient to enter/leave monitors one-by-one as long as the order is correct to prevent deadlocks\cite{Havender68}. In \CFA, ordering of monitor relies on memory ordering, this is sufficient because all objects are guaranteed to have distinct non-overlaping memory layouts and mutual-exclusion for a monitor is only defined for its lifetime, meaning that destroying a monitor while it is acquired is undefined behavior. When a mutex call is made, the concerned monitors are agregated into a variable-length pointer array and sorted based on pointer values. This array presists for the entire duration of the mutual-exclusion and its ordering reused extensively. The first step towards the monitor implementation is simple mutex-routines. In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{lst:entry1}. The entry/exit procedures do not have to be extended to support multiple monitors. Indeed it is sufficient to enter/leave monitors one-by-one as long as the order is correct to prevent deadlock~\cite{Havender68}. In \CFA, ordering of monitor acquisition relies on memory ordering. This approach is sufficient because all objects are guaranteed to have distinct non-overlapping memory layouts and mutual-exclusion for a monitor is only defined for its lifetime, meaning that destroying a monitor while it is acquired is Undefined Behavior. When a mutex call is made, the concerned monitors are aggregated into a variable-length pointer-array and sorted based on pointer values. This array persists for the entire duration of the mutual-exclusion and its ordering reused extensively. \begin{figure} \begin{multicols}{2} \end{pseudo} \end{multicols} \caption{Initial entry and exit routine for monitors} \label{lst:entry1} \begin{pseudo}[caption={Initial entry and exit routine for monitors},label={lst:entry1}] \end{pseudo} \end{figure} Depending on the choice of semantics for when monitor locks are acquired, interaction between monitors and \CFA's concept of polymorphism can be more complex to support. However, it is shown that entry-point locking solves most of the issues. First of all, interaction between \code{otype} polymorphism and monitors is impossible since monitors do not support copying. Therefore, the main question is how to support \code{dtype} polymorphism. It is important to present the difference between the two acquiring options : callsite and entry-point locking, i.e. acquiring the monitors before making a mutex routine call or as the first operation of the mutex routine-call. For example: \begin{figure}[H] First of all, interaction between \code{otype} polymorphism and monitors is impossible since monitors do not support copying. Therefore, the main question is how to support \code{dtype} polymorphism. It is important to present the difference between the two acquiring options : \glspl{callsite-locking} and entry-point locking, i.e., acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call. For example: \begin{table}[H] \begin{center} \begin{tabular}{|c|c|c|} \end{center} \caption{Call-site vs entry-point locking for mutex calls} \label{fig:locking-site} \end{figure} Note the \code{mutex} keyword relies on the type system, which means that in cases where a generic monitor routine is desired, writing the mutex routine is possible with the proper trait, for example: \label{tbl:locking-site} \end{table} Note the \code{mutex} keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.: \begin{cfacode} //Incorrect: T may not be monitor \end{cfacode} Both entry-point and callsite locking are feasible implementations. The current \CFA implementations uses entry-point locking because it requires less work when using \gls{raii}, effectively transferring the burden of implementation to object construction/destruction. The same could be said of callsite locking, the difference being that the later does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, i.e.: the function body. Furthermore, entry-point locking requires less code generation since any useful routine is called at least as often as it is define, there can be only one entry-point but many callsites. Both entry-point and \gls{callsite-locking} are feasible implementations. The current \CFA implementations uses entry-point locking because it requires less work when using \gls{raii}, effectively transferring the burden of implementation to object construction/destruction. It is harder to use \gls{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, i.e.: the function body. For example, the monitor call can appear in the middle of an expression. Furthermore, entry-point locking requires less code generation since any useful routine multiple times, but there is only one entry-point for many call-sites. % ====================================================================== % ====================================================================== Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency. Each component of the picture is explained in details in the fllowing sections. Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency. Each component of the picture is explained in details in the flowing sections. \begin{figure} \subsection{Context Switching} As mentionned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading. This is because they share the same mechanism for context-switching between different stacks. To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific function call. This assumption means that the context-switch only has to copy the callee-saved registers onto the stack and then switch the stack registers with the ones of the target coroutine/thread. Note that the instruction pointer can be left untouched since the context-switch is always inside the same function. Threads however do not context-switch between each other directly. They context-switch to the scheduler. This method is called a 2-step context-switch and has the advantage of having a clear distinction between user code and the kernel where scheduling and other system operation happen. Obiously, this has the cost of doubling the context-switch cost because threads must context-switch to an intermediate stack. However, the performance of the 2-step context-switch is still superior to a \code{pthread_yield}(see section \ref{results}). additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch to use manually (or as part of monitors). This option is not currently present in \CFA but the changes required to add it are strictly additive. As mentioned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading, because they share the same mechanism for context-switching between different stacks. To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific function call. This assumption means that the context-switch only has to copy the callee-saved registers onto the stack and then switch the stack registers with the ones of the target coroutine/thread. Note that the instruction pointer can be left untouched since the context-switch is always inside the same function. Threads however do not context-switch between each other directly. They context-switch to the scheduler. This method is called a 2-step context-switch and has the advantage of having a clear distinction between user code and the kernel where scheduling and other system operation happen. Obviously, this doubles the context-switch cost because threads must context-switch to an intermediate stack. The alternative 1-step context-switch uses the stack of the from'' thread to schedule and then context-switches directly to the to'' thread. However, the performance of the 2-step context-switch is still superior to a \code{pthread_yield} (see section \ref{results}). Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft \code{SwitchToFiber}~\cite{switchToWindows} routine). This option is not currently present in \CFA but the changes required to add it are strictly additive. \subsection{Processors} Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically pthreads in the current implementation of \CFA. Indeed, any parallelism must go through operating-system librairies. However, \glspl{uthread} are still the main source of concurrency, processors are simply the underlying source of parallelism. Indeed, processor \glspl{kthread} simply fetch a \glspl{uthread} from the scheduler and run, they are effectively executers for user-threads. The main benefit of this approach is that it offers a well defined boundary between kernel code and user code, for example, kernel thread quiescing, scheduling and interrupt handling. Processors internally use coroutines to take advantage of the existing context-switching semantics. Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically pthreads in the current implementation of \CFA. Indeed, any parallelism must go through operating-system libraries. However, \glspl{uthread} are still the main source of concurrency, processors are simply the underlying source of parallelism. Indeed, processor \glspl{kthread} simply fetch a \gls{uthread} from the scheduler and run it; they are effectively executers for user-threads. The main benefit of this approach is that it offers a well defined boundary between kernel code and user code, for example, kernel thread quiescing, scheduling and interrupt handling. Processors internally use coroutines to take advantage of the existing context-switching semantics. \subsection{Stack management} One of the challenges of this system is to reduce the footprint as much as possible. Specifically, all pthreads created also have a stack created with them, which should be used as much as possible. Normally, coroutines also create there own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the kernel thread stack, effectively stealing the processor stack. The exception to this rule is the Main Processor, i.e. the initial kernel thread that is given to any program. In order to respect user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor. One of the challenges of this system is to reduce the footprint as much as possible. Specifically, all pthreads created also have a stack created with them, which should be used as much as possible. Normally, coroutines also create there own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the \gls{kthread} stack, effectively stealing the processor stack. The exception to this rule is the Main Processor, i.e. the initial \gls{kthread} that is given to any program. In order to respect C user-expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large \subsection{Preemption} \label{preemption} Finally, an important aspect for any complete threading system is preemption. As mentionned in chapter \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution. Indeed, preemption is desireable because it adds a degree of isolation among threads. In a fully cooperative system, any thread that runs into a long loop can starve other threads, while in a preemptive system starvation can still occur but it does not rely on every thread having to yield or block on a regular basis, which reduces significantly a programmer burden. Obviously, preemption is not optimal for every workload, however any preemptive system can become a cooperative system by making the time-slices extremely large. Which is why \CFA uses a preemptive threading system. Preemption in \CFA is based on kernel timers, which are used to run a discrete-event simulation. Every processor keeps track of the current time and registers an expiration time with the preemption system. When the preemption system receives a change in preemption, it sorts these expiration times in a list and sets a kernel timer for the closest one, effectively stepping between preemption events on each signals sent by the timer. These timers use the linux signal {\tt SIGALRM}, which is delivered to the process rather than the kernel-thread. This results in an implementation problem,because when delivering signals to a process, the kernel documentation states that the signal can be delivered to any kernel thread for which the signal is not blocked i.e. : Finally, an important aspect for any complete threading system is preemption. As mentioned in chapter \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution. Indeed, preemption is desirable because it adds a degree of isolation among threads. In a fully cooperative system, any thread that runs a long loop can starve other threads, while in a preemptive system, starvation can still occur but it does not rely on every thread having to yield or block on a regular basis, which reduces significantly a programmer burden. Obviously, preemption is not optimal for every workload, however any preemptive system can become a cooperative system by making the time-slices extremely large. Therefore, \CFA uses a preemptive threading system. Preemption in \CFA is based on kernel timers, which are used to run a discrete-event simulation. Every processor keeps track of the current time and registers an expiration time with the preemption system. When the preemption system receives a change in preemption, it inserts the time in a sorted order and sets a kernel timer for the closest one, effectively stepping through preemption events on each signal sent by the timer. These timers use the Linux signal {\tt SIGALRM}, which is delivered to the process rather than the kernel-thread. This results in an implementation problem, because when delivering signals to a process, the kernel can deliver the signal to any kernel thread for which the signal is not blocked, i.e. : \begin{quote} A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked. If more than one of the threads has the signal unblocked, then the kernel chooses an arbitrary thread to which to deliver the signal. SIGNAL(7) - Linux Programmer's Manual \end{quote} For the sake of simplicity and in order to prevent the case of having two threads receiving alarms simultaneously, \CFA programs block the {\tt SIGALRM} signal on every thread except one. Now because of how involontary context-switches are handled, the kernel thread handling {\tt SIGALRM} cannot also be a processor thread. Involuntary context-switching is done by sending signal {\tt SIGUSER1} to the corresponding processor and having the thread yield from inside the signal handler. Effectively context-switching away from the signal-handler back to the kernel and the signal-handler frame is eventually unwound when the thread is scheduled again. This approach means that a signal-handler can start on one kernel thread and terminate on a second kernel thread (but the same user thread). It is important to note that signal-handlers save and restore signal masks because user-thread migration can cause signal mask to migrate from one kernel thread to another. This behaviour is only a problem if all kernel threads among which a user thread can migrate differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distiguishes async-signal-safe'' functions from other functions}. However, since the kernel thread hanlding preemption requires a different signal mask, executing user threads on the kernel alarm thread can cause deadlocks. For this reason, the alarm thread is on a tight loop around a system call to \code{sigwaitinfo}, requiring very little CPU time for preemption. One final detail about the alarm thread is how to wake it when additional communication is required (e.g., on thread termination). This unblocking is also done using {\tt SIGALRM}, but sent throught the \code{pthread_sigqueue}. Indeed, \code{sigwait} can differentiate signals sent from \code{pthread_sigqueue} from signals sent from alarms or the kernel. For the sake of simplicity and in order to prevent the case of having two threads receiving alarms simultaneously, \CFA programs block the {\tt SIGALRM} signal on every kernel thread except one. Now because of how involuntary context-switches are handled, the kernel thread handling {\tt SIGALRM} cannot also be a processor thread. Involuntary context-switching is done by sending signal {\tt SIGUSER1} to the corresponding proces\-sor and having the thread yield from inside the signal handler. This approach effectively context-switches away from the signal-handler back to the kernel and the signal-handler frame is eventually unwound when the thread is scheduled again. As a result, a signal-handler can start on one kernel thread and terminate on a second kernel thread (but the same user thread). It is important to note that signal-handlers save and restore signal masks because user-thread migration can cause a signal mask to migrate from one kernel thread to another. This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes async-signal-safe'' functions from other functions.}. However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks. For this reason, the alarm thread is in a tight loop around a system call to \code{sigwaitinfo}, requiring very little CPU time for preemption. One final detail about the alarm thread is how to wake it when additional communication is required (e.g., on thread termination). This unblocking is also done using {\tt SIGALRM}, but sent through the \code{pthread_sigqueue}. Indeed, \code{sigwait} can differentiate signals sent from \code{pthread_sigqueue} from signals sent from alarms or the kernel. \subsection{Scheduler} Finally, an aspect that was not mentionned yet is the scheduling algorithm. Currently, the \CFA scheduler uses a single ready queue for all processors, which is the simplest approach to scheduling. Further discussion on scheduling is present in section \label{futur:sched}. Finally, an aspect that was not mentioned yet is the scheduling algorithm. Currently, the \CFA scheduler uses a single ready queue for all processors, which is the simplest approach to scheduling. Further discussion on scheduling is present in section \ref{futur:sched}. % ====================================================================== \end{center} \caption{Traditional illustration of a monitor} \label{fig:monitor} \end{figure} This picture has several components, the two most important being the entry-queue and the AS-stack. The entry-queue is an (almost) FIFO list where threads waiting to enter are parked, while the acceptor-signalor (AS) stack is a FILO list used for threads that have been signalled or otherwise marked as running next. For \CFA, this picture does not have support for blocking multiple monitors on a single condition. To support \gls{bulk-acq} two changes to this picture are required. First, it is non longer helpful to attach the condition to a single monitor. Secondly, the thread waiting on the conditions has to be seperated multiple monitors, which yields : \end{figure} This picture has several components, the two most important being the entry-queue and the AS-stack. The entry-queue is an (almost) FIFO list where threads waiting to enter are parked, while the acceptor-signaler (AS) stack is a FILO list used for threads that have been signalled or otherwise marked as running next. For \CFA, this picture does not have support for blocking multiple monitors on a single condition. To support \gls{bulk-acq} two changes to this picture are required. First, it is no longer helpful to attach the condition to \emph{a single} monitor. Secondly, the thread waiting on the condition has to be separated across multiple monitors, seen in figure \ref{fig:monitor_cfa}. \begin{figure}[H] \end{figure} This picture and the proper entry and leave algorithms is the fundamental implementation of internal scheduling (see listing \ref{lst:entry2}). Note that when threads are moved from the condition to the AS-stack, it splits the thread into to pieces. The thread is woken up when all the pieces have moved from the AS-stacks to the active thread seat. In this picture, the threads are split into halves but this is only because there are two monitors in this picture. For a specific signaling operation every monitor needs a piece of thread on its AS-stack. This picture and the proper entry and leave algorithms (see listing \ref{lst:entry2}) is the fundamental implementation of internal scheduling. Note that when a thread is moved from the condition to the AS-stack, it is conceptually split the thread into N pieces, where N is the number of monitors specified in the parameter list. The thread is woken up when all the pieces have popped from the AS-stacks and made active. In this picture, the threads are split into halves but this is only because there are two monitors. For a specific signaling operation every monitor needs a piece of thread on its AS-stack. \begin{figure}[b] \end{pseudo} \end{multicols} \caption{Entry and exit routine for monitors with internal scheduling} \label{lst:entry2} \end{figure} Some important things to notice about the exit routine. The solution discussed in \ref{intsched} can be seen in the exit routine of listing \ref{lst:entry2}. Basically, the solution boils down to having a seperate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership. This solution is deadlock safe as well as preventing any potential barging. The data structure used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the callstack of the \code{wait} and \code{signal_block} routines. \begin{pseudo}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}] \end{pseudo} \end{figure} Some important things to notice about the exit routine. The solution discussed in \ref{intsched} can be seen in the exit routine of listing \ref{lst:entry2}. Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership. This solution is deadlock safe as well as preventing any potential barging. The data structure used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call-stack of the \code{wait} and \code{signal_block} routines. \begin{figure}[H] \end{figure} Figure \ref{fig:structs} shows a high level representation of these data-structures. The main idea behind them is that, while figure \ref{fig:monitor_cfa} is a nice illustration in theory, in practice breaking a threads into multiple pieces to put unto intrusive stacks does not make sense. The \code{condition node} is the data structure that is queued into a condition variable and, when signaled, the condition queue is popped and each \code{condition criterion} are moved to the AS-stack. Once all the criterion have be popped from their respective AS-stacks, the thread is woken-up, which is what is shown in listing \ref{lst:entry2}. Figure \ref{fig:structs} shows a high-level representation of these data-structures. The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive stacks for linking onto monitor. The \code{condition node} is the data structure that is queued onto a condition variable and, when signaled, the condition queue is popped and each \code{condition criterion} are moved to the AS-stack. Once all the criterion have be popped from their respective AS-stacks, the thread is woken-up, which is what is shown in listing \ref{lst:entry2}. % ====================================================================== % ====================================================================== % ====================================================================== Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentionned in section \ref{extsched}. For internal scheduling, these queues are part of condition variables which are still unique for a given scheduling operation (e.g., no single statment uses multiple conditions). However, in the case of external scheduling, there is no equivalent object which is associated with \code{waitfor} statements. This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired. The monitors being the only objects that have sufficient lifetime and are available on both sides of the \code{waitfor} statment. This requires an algorithm to choose which monitor holds the relevant queue. It is also important that said algorithm be independent of the order in which users list parameters. The proposed algorithm is to fall back on monitor lock ordering and specify that the monitor that is acquired first is the one with the relevant wainting queue. This assumes that the lock acquiring order is static for the lifetime of all concerned objects but that is a reasonable constraint. Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}. For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (e.g., no signal statement uses multiple conditions). However, in the case of external scheduling, there is no equivalent object which is associated with \code{waitfor} statements. This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired. These monitors being the only objects that have sufficient lifetime and are available on both sides of the \code{waitfor} statement. This requires an algorithm to choose which monitor holds the relevant queue. It is also important that said algorithm be independent of the order in which users list parameters. The proposed algorithm is to fall back on monitor lock ordering (sorting by address) and specify that the monitor that is acquired first is the one with the relevant waiting queue. This assumes that the lock acquiring order is static for the lifetime of all concerned objects but that is a reasonable constraint. This algorithm choice has two consequences : \begin{itemize} \item The queue of the highest priority monitor is no longer a true FIFO queue because threads can be moved to the front of the queue. These queues need to contain a set of monitors for each of the waiting threads. Therefore, another thread whose set contains the same highest priority monitor but different lower priority monitors may arrive first but enter the critical section after a thread with the correct pairing. \item The queue of the lowest priority monitor is both required and potentially unused. Indeed, since it is not known at compile time which monitor will be the lowest priority monitor, every monitor needs to have the correct queues even though it is possible that some queues will go unused for the entire duration of the program, for example if a monitor is only used in a specific pair. \item The queue of the monitor with the lowest address is no longer a true FIFO queue because threads can be moved to the front of the queue. These queues need to contain a set of monitors for each of the waiting threads. Therefore, another thread whose set contains the same lowest address monitor but different lower priority monitors may arrive first but enter the critical section after a thread with the correct pairing. \item The queue of the lowest priority monitor is both required and potentially unused. Indeed, since it is not known at compile time which monitor is the monitor with have the lowest address, every monitor needs to have the correct queues even though it is possible that some queues go unused for the entire duration of the program, for example if a monitor is only used in a specific pair. \end{itemize} Therefore, the following modifications need to be made to support external scheduling : \begin{itemize} \item The threads waiting on the entry-queue need to keep track of which routine is trying to enter, and using which set of monitors. The \code{mutex} routine already has all the required information on its stack so the thread only needs to keep a pointer to that information. \item The monitors need to keep a mask of acceptable routines. This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it. It also needs storage to keep track of which routine was accepted. Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread. Note that the complete mask can be pushed to any owned monitors, regardless of \code{when} statements, the \code{waitfor} statement is used in a context where the thread already has full ownership of (at least) every concerned monitor and therefore monitors will refuse all calls no matter what. \item The threads waiting on the entry-queue need to keep track of which routine it is trying to enter, and using which set of monitors. The \code{mutex} routine already has all the required information on its stack so the thread only needs to keep a pointer to that information. \item The monitors need to keep a mask of acceptable routines. This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it. It also needs storage to keep track of which routine was accepted. Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread. Note that if a thread has acquired two monitors but executes a \code{waitfor} with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless. This becomes relevant when \code{when} clauses affect the number of monitors passed to a \code{waitfor} statement. \item The entry/exit routine need to be updated as shown in listing \ref{lst:entry3}. \end{itemize} \subsection{External scheduling - destructors} Finally, to support the ordering inversion of destructors, the code generation needs to be modified to use a special entry routine. This routine is needed because of the storage requirements of the call order inversion. Indeed, when waiting for the destructors, storage is need for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for. For regular \code{waitfor} statements, the callstack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later. The waitfor semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor} Finally, to support the ordering inversion of destructors, the code generation needs to be modified to use a special entry routine. This routine is needed because of the storage requirements of the call order inversion. Indeed, when waiting for the destructors, storage is need for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for. For regular \code{waitfor} statements, the call-stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later. The waitfor semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor} \begin{figure} \end{pseudo} \end{multicols} \caption{Entry and exit routine for monitors with internal scheduling and external scheduling} \label{lst:entry3} \begin{pseudo}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}] \end{pseudo} \end{figure} \end{pseudo} \end{multicols} \caption{Pseudo code for the \code{waitfor} routine and the \code{mutex} entry routine for destructors} \label{lst:entry-dtor} \end{figure} \begin{pseudo}[caption={Pseudo code for the \code{waitfor} routine and the \code{mutex} entry routine for destructors},label={lst:entry-dtor}] \end{pseudo} \end{figure}

• ## doc/proposals/concurrency/thesis.tex

 rc6e2c18 \rfoot{v\input{version}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %====================================================================== %   L O G I C A L    D O C U M E N T -- the content of your thesis %====================================================================== \begin{document} % \linenumbers \title{Concurrency in \CFA} \author{Thierry Delisle \\ School of Computer Science, University of Waterloo, \\ Waterloo, Ontario, Canada } % For a large document, it is a good idea to divide your thesis % into several files, each one containing one chapter. % To illustrate this idea, the "front pages" (i.e., title page, % declaration, borrowers' page, abstract, acknowledgements, % dedication, table of contents, list of tables, list of figures, % nomenclature) are contained within the file "thesis-frontpgs.tex" which is % included into the document by the following statement. %---------------------------------------------------------------------- % FRONT MATERIAL %---------------------------------------------------------------------- \input{frontpgs} \maketitle \tableofcontents %---------------------------------------------------------------------- % MAIN BODY %---------------------------------------------------------------------- \input{intro} \input{future} \chapter{Conclusion} \section*{Acknowledgements} \clearpage \printglossary[type=\acronymtype]
• ## doc/proposals/concurrency/version

 rc6e2c18 0.11.129 0.11.280
• ## doc/refrat/refrat.tex

 rc6e2c18 %% Created On       : Wed Apr  6 14:52:25 2016 %% Last Modified By : Peter A. Buhr %% Last Modified On : Sun Aug  6 10:25:31 2017 %% Update Count     : 105 %% Last Modified On : Tue Aug 15 18:46:31 2017 %% Update Count     : 106 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{rationale} The use of ©?©'' in identifiers means that some C programs are not \CFA programs.  For instance, the sequence of characters ©(i < 0)?--i:i©'' is legal in a C program, but a \CFA compiler detects a syntax error because it treats ©?--©'' as an identifier, not as the two tokens ©?©'' and ©--©''. The use of ©?©'' in identifiers means that some C programs are not \CFA programs. For instance, the sequence of characters ©(i < 0)?--i:i©'' is legal in a C program, but a \CFA compiler detects a syntax error because it treats ©?--©'' as an identifier, not as the two tokens ©?©'' and ©--©''. \end{rationale}
• ## doc/user/user.tex

 rc6e2c18 %% Created On       : Wed Apr  6 14:53:29 2016 %% Last Modified By : Peter A. Buhr %% Last Modified On : Sun Aug  6 10:24:21 2017 %% Update Count     : 3036 %% Last Modified On : Mon Nov 27 18:09:59 2017 %% Update Count     : 3143 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \CFAStyle                                                                                               % use default CFA format-style \lstnewenvironment{C++}[1][]                            % use C++ style {\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®}#1}} {\lstset{language=C++,moredelim=**[is][\protect\color{red}]{®}{®},#1}} {} case 7: ... ®break®                                         §\C{// redundant explicit end of switch}§ ®break®                                         §\C{// explicit end of switch (redundant)}§ default: j = 3; ®int k = 0;®                    §\C{// allowed at different nesting levels}§ ... ®case 2:®                                     §\C{// disallow case in nested statements}§ } ... \end{cfa} The components in the "with" clause with a, b, c { ... } serve 2 purposes: each component provides a type and object. The type must be a structure type. Enumerations are already opened, and I think a union is opened to some extent, too. (Or is that just unnamed unions?) The object is the target that the naked structure-fields apply to. The components are open in "parallel" at the scope of the "with" clause/statement, so opening "a" does not affect opening "b", etc. This semantic is different from Pascal, which nests the openings. Having said the above, it seems reasonable to allow a "with" component to be an expression. The type is the static expression-type and the object is the result of the expression. Again, the type must be an aggregate. Expressions require parenthesis around the components. with( a, b, c ) { ... } Does this now make sense? Having written more CFA code, it is becoming clear to me that I *really* want the "with" to be implemented because I hate having to type all those object names for fields. It's a great way to drive people away from the language. \section{Exception Handling} try { f(...); } catch( E e : §boolean-predicate§ ) {                  §\C[8cm]{// termination handler}§ } catch( E e ; §boolean-predicate§ ) {                  §\C[8cm]{// termination handler}§ // recover and continue } catchResume( E e : §boolean-predicate§ ) {    §\C{// resumption handler}\CRT§ } catchResume( E e ; §boolean-predicate§ ) {    §\C{// resumption handler}\CRT§ // repair and return } finally { \end{cfa} This syntax allows a prototype declaration to be created by cutting and pasting source text from the routine definition header (or vice versa). It is possible to declare multiple routine-prototypes in a single declaration, but the entire type specification is distributed across \emph{all} routine names in the declaration list (see~\VRef{s:Declarations}), \eg: \begin{quote2} \begin{tabular}{@{}l@{\hspace{3em}}l@{}} \multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c}{\textbf{C}}        \\ \begin{cfa} [ int ] f( int ), g; \end{cfa} & \begin{cfa} int f( int ), g( int ); \end{cfa} \end{tabular} \end{quote2} Like C, it is possible to declare multiple routine-prototypes in a single declaration, where the return type is distributed across \emph{all} routine names in the declaration list (see~\VRef{s:Declarations}), \eg: \begin{cfa} C :             const double bar1(), bar2( int ), bar3( double ); §\CFA§: [const double] foo(), foo( int ), foo( double ) { return 3.0; } \end{cfa} \CFA allows the last routine in the list to define its body. Declaration qualifiers can only appear at the start of a \CFA routine declaration,\footref{StorageClassSpecifier} \eg: \begin{cfa} \label{s:MRV_Functions} In standard C, functions can return at most one value. In C and most programming languages, functions return at most one value; however, many operations have multiple outcomes, some exceptional (see~\VRef{s:ExceptionHandling}). To emulate functions with multiple return values, \emph{\Index{aggregation}} and/or \emph{\Index{aliasing}} is used. In the former situation, the function designer creates a record type that combines all of the return values into a single type. For example, consider a function returning the most frequently occurring letter in a string, and its frequency. This example is complex enough to illustrate that an array is insufficient, since arrays are homogeneous, and demonstrates a potential pitfall that exists with aliasing. \begin{cfa} struct mf_ret { int freq; char ch; }; struct mf_ret most_frequent(const char * str) { char freqs [26] = { 0 }; struct mf_ret ret = { 0, 'a' }; for (int i = 0; str[i] != '\0'; ++i) { if (isalpha(str[i])) {        // only count letters int ch = tolower(str[i]);   // convert to lower case int idx = ch-'a'; if (++freqs[idx] > ret.freq) {  // update on new max ret.freq = freqs[idx]; ret.ch = ch; } } } return ret; } const char * str = "hello world"; struct mf_ret ret = most_frequent(str); printf("%s -- %d %c\n", str, ret.freq, ret.ch); \end{cfa} Of note, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. That is, adding another named type creates another association in the programmer's mind that needs to be kept track of when reading and writing code. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the routine body to emulate a return. Using the same example, \begin{cfa} int most_frequent(const char * str, char * ret_ch) { char freqs [26] = { 0 }; int ret_freq = 0; for (int i = 0; str[i] != '\0'; ++i) { if (isalpha(str[i])) {        // only count letters int ch = tolower(str[i]);   // convert to lower case int idx = ch-'a'; if (++freqs[idx] > ret_freq) {  // update on new max ret_freq = freqs[idx]; *ret_ch = ch;   // assign to out parameter } } } return ret_freq;  // only one value returned directly } const char * str = "hello world"; char ch;                            // pre-allocate return value int freq = most_frequent(str, &ch); // pass return value as out parameter printf("%s -- %d %c\n", str, freq, ch); \end{cfa} Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values, which complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of ©const© can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are designed so that it is safe to pass ©NULL© as a parameter, there are many C routines that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. Interestingly, there is a subtle bug in the previous example, in that ©ret_ch© is never assigned for a string that does not contain any letters, which can lead to undefined behaviour. In this particular case, it turns out that the frequency return value also doubles as an error code, where a frequency of 0 means the character return value should be ignored. In the former approach, a record type is created combining all of the return values. For example, consider C's \Indexc{div} function, which returns the quotient and remainder for a division of an integer value. \begin{cfa} typedef struct { int quot, rem; } div_t;        §\C[7cm]{// from include stdlib.h}§ div_t div( int num, int den ); div_t qr = div( 13, 5 );                                        §\C{// return quotient/remainder aggregate}§ printf( "%d %d\n", qr.quot, qr.rem );           §\C{// print quotient/remainder}§ \end{cfa} This approach requires a name for the return type and fields, where \Index{naming} is a common programming-language issue. That is, naming creates an association that must be managed when reading and writing code. While effective when used sparingly, this approach does not scale when functions need to return multiple combinations of types. In the latter approach, additional return values are passed as pointer parameters. A pointer parameter is assigned inside the routine to emulate a return. For example, consider C's \Indexc{modf} function, which returns the integral and fractional part of a floating-point value. \begin{cfa} double modf( double x, double * i );            §\C{// from include math.h}§ double intp, frac = modf( 13.5, &intp );        §\C{// return integral and fractional components}§ printf( "%g %g\n", intp, frac );                        §\C{// print integral/fractional components}§ \end{cfa} This approach requires allocating storage for the return values, which complicates the call site with a sequence of variable declarations leading to the call. Also, while a disciplined use of ©const© can give clues about whether a pointer parameter is used as an \Index{out parameter}, it is not obvious from the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are safe for a ©NULL© argument, there are many C routines that are not null-safe. Finally, C does not provide a mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. Still, not every routine with multiple return values should be required to return an error code, and error codes are easily ignored, so this is not a satisfying solution. As with the previous approach, this technique can simulate multiple return values, but in practice it is verbose and error prone. In \CFA, functions can be declared to return multiple values with an extension to the function declaration syntax. \CFA allows functions to return multiple values by extending the function declaration syntax. Multiple return values are declared as a comma-separated list of types in square brackets in the same location that the return type appears in standard C function declarations. \begin{cfa} [ char, int, double ] f( ... ); \end{cfa} The ability to return multiple values from a function requires a new syntax for the return statement. For consistency, the return statement in \CFA accepts a comma-separated list of expressions in square brackets. The expression resolution phase of the \CFA translator ensures that the correct form is used depending on the values being returned and the return type of the current function. \begin{cfa} return [ c, i, d ]; \end{cfa} The expression resolution ensures the correct form is used depending on the values being returned and the return type of the current function. A multiple-returning function with return type ©T© can return any expression that is implicitly convertible to ©T©. Using the running example, the ©most_frequent© function can be written using multiple return values as such, \begin{cfa} [int, char] most_frequent(const char * str) { char freqs [26] = { 0 }; int ret_freq = 0; char ret_ch = 'a';  // arbitrary default value for consistent results for (int i = 0; str[i] != '\0'; ++i) { if (isalpha(str[i])) {        // only count letters int ch = tolower(str[i]);   // convert to lower case int idx = ch-'a'; if (++freqs[idx] > ret_freq) {  // update on new max ret_freq = freqs[idx]; ret_ch = ch; } } } return [ret_freq, ret_ch]; } \end{cfa} This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type, which precludes the bug seen with out-parameters. The addition of multiple-return-value functions necessitates a syntax for accepting multiple values at the call-site. A common use of a function's output is input to another function. \CFA allows this case, without any new syntax; a multiple-returning function can be used in any of the contexts where an expression is allowed. When a function call is passed as an argument to another call, the best match of actual arguments to formal parameters is evaluated given all possible expression interpretations in the current scope. \begin{cfa} void g( int, int );                                                     §\C{// 1}§ void g( double, double );                                       §\C{// 2}§ g( div( 13, 5 ) );                                                      §\C{// select 1}§ g( modf( 13.5 ) );                                                      §\C{// select 2}§ \end{cfa} In this case, there are two overloaded ©g© routines. Both calls to ©g© expect two arguments that are matched by the two return values from ©div© and ©modf©. respectively, which are fed directly to the first and second parameters of ©g©. As well, both calls to ©g© have exact type matches for the two different versions of ©g©, so these exact matches are chosen. When type matches are not exact, conversions are used to find a best match. The previous examples can be rewritten passing the multiple returned-values directly to the ©printf© function call. \begin{cfa} [ int, int ] div( int x, int y );                       §\C{// from include stdlib}§ printf( "%d %d\n", div( 13, 5 ) );                      §\C{// print quotient/remainder}§ [ double, double ] modf( double x );            §\C{// from include math}§ printf( "%g %g\n", modf( 13.5 ) );                      §\C{// print integral/fractional components}§ \end{cfa} This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type. Finally, the addition of multiple-return-value functions necessitates a syntax for retaining the multiple values at the call-site versus their temporary existence during a call. The simplest mechanism for retaining a return value in C is variable assignment. By assigning the return value into a variable, its value can be retrieved later at any point in the program. By assigning the multiple return-values into multiple variables, the values can be retrieved later. As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions on the left side. \begin{cfa} const char * str = "hello world"; int freq; char ch; [freq, ch] = most_frequent(str);  // assign into multiple variables printf("%s -- %d %c\n", str, freq, ch); \end{cfa} It is also common to use a function's output as the input to another function. \CFA also allows this case, without any new syntax. When a function call is passed as an argument to another call, the expression resolver attempts to find the best match of actual arguments to formal parameters given all of the possible expression interpretations in the current scope \cite{Bilson03}. For example, \begin{cfa} void process(int);       // (1) void process(char);      // (2) void process(int, char); // (3) void process(char, int); // (4) process(most_frequent("hello world"));  // selects (3) \end{cfa} In this case, there is only one option for a function named ©most_frequent© that takes a string as input. This function returns two values, one ©int© and one ©char©. There are four options for a function named ©process©, but only two that accept two arguments, and of those the best match is (3), which is also an exact match. This expression first calls ©most_frequent("hello world")©, which produces the values ©3© and ©'l'©, which are fed directly to the first and second parameters of (3), respectively. \section{Tuple Expressions} int quot, rem; [ quot, rem ] = div( 13, 5 );                           §\C{// assign multiple variables}§ printf( "%d %d\n", quot, rem );                         §\C{// print quotient/remainder}\CRT§ \end{cfa} Here, the multiple return-values are matched in much the same way as passing multiple return-values to multiple parameters in a call. \subsection{Expressions} Multiple-return-value functions provide \CFA with a new syntax for expressing a combination of expressions in the return statement and a combination of types in a function signature. These notions can be generalized to provide \CFA with \emph{tuple expressions} and \emph{tuple types}. These notions are generalized to provide \CFA with \newterm{tuple expression}s and \newterm{tuple type}s. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. The type of a tuple expression is the tuple of the subexpression types, or a tuple type. In \CFA, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression ©[5, 'x', 10.5]© has type ©[int, char, double]©. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}. \subsection{Tuple Variables} The call-site of the ©most_frequent© routine has a notable blemish, in that it required the preallocation of return variables in a manner similar to the aliasing example, since it is impossible to declare multiple variables of different types in the same declaration in standard C. In \CFA, it is possible to overcome this restriction by declaring a \emph{tuple variable}. \begin{cfa}[emph=ret, emphstyle=\color{red}] const char * str = "hello world"; [int, char] ret = most_frequent(str);  // initialize tuple variable printf("%s -- %d %c\n", str, ret); \end{cfa} It is now possible to accept multiple values into a single piece of storage, in much the same way that it was previously possible to pass multiple values from one function call to another. These variables can be used in any of the contexts where a tuple expression is allowed, such as in the ©printf© function call. As in the ©process© example, the components of the tuple value are passed as separate parameters to ©printf©, allowing very simple printing of tuple expressions. One way to access the individual components is with a simple assignment, as in previous examples. \begin{cfa} int freq; char ch; [freq, ch] = ret; \end{cfa} \begin{sloppypar} Multiple-return-value functions can equivalently be called \newterm{tuple-returning functions}. \subsection{Variables} The previous call of ©div© still requires the preallocation of multiple return-variables in a manner similar to the aliasing example. In \CFA, it is possible to overcome this restriction by declaring a \newterm{tuple variable}. \begin{cfa} [int, int] ®qr® = div( 13, 5 );                 §\C{// initialize tuple variable}§ printf( "%d %d\n", ®qr® );                              §\C{// print quotient/remainder}§ \end{cfa} It is now possible to match the multiple return-values to a single variable, in much the same way as \Index{aggregation}. As well, the components of the tuple value are passed as separate parameters to ©printf©, allowing direct printing of tuple variables. One way to access the individual components of a tuple variable is with assignment. \begin{cfa} [ quot, rem ] = qr;                                             §\C{// assign multiple variables}§ \end{cfa} In addition to variables of tuple type, it is also possible to have pointers to tuples, and arrays of tuples. Tuple types can be composed of any types, except for array types, since array assignment is disallowed, which makes tuple assignment difficult when a tuple contains an array. \begin{cfa} [double, int] di; [double, int] * pdi [double, int] adi[10]; [ double, int ] di; [ double, int ] * pdi [ double, int ] adi[10]; \end{cfa} This examples declares a variable of type ©[double, int]©, a variable of type pointer to ©[double, int]©, and an array of ten ©[double, int]©. \end{sloppypar} \subsection{Tuple Indexing} At times, it is desirable to access a single component of a tuple-valued expression without creating unnecessary temporary variables to assign to. Given a tuple-valued expression ©e© and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in ©e©, ©e.i© accesses the $i$\textsuperscript{th} component of ©e©. For example, \subsection{Indexing} It is also possible to access a single component of a tuple-valued expression without creating temporary variables. Given a tuple-valued expression $e$np and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in $e$, $e.i$ accesses the $i^{\:th}$ component of $e$, \eg: \begin{cfa} [int, double] x; p->0 = 5;                                                               §\C{// access int component of tuple pointed-to by p}§ g( x.1, x.0 );                                                  §\C{// rearrange x to pass to g}§ double z = [x, f()].0.1;                                §\C{// access second component of first component of tuple expression}§ \end{cfa} As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. double z = [ x, f() ].0.1;                              §\C{// access second component of first component of tuple expression}§ \end{cfa} Tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for \KWC but never implemented \cite[p.~45]{Till89}. \subsection{Flattening and Structuring} As evident in previous examples, tuples in \CFA do not have a rigid structure. In function call contexts, tuples support implicit flattening and restructuring conversions. There is only a single definition of ©f©, and 3 arguments with only single interpretations. First, the argument alternative list ©[5, 10.2], 4© is flattened to produce the argument list ©5, 10.2, 4©. Next, the parameter matching algorithm begins, with $P =$©int© and $A =$©int©, which unifies exactly. Moving to the next parameter and argument, $P =$©[double, int]© and $A =$©double©. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =$©double© and $A =$©double©, which unifies exactly. Then $P' =$©int© and $A =$©double©, which again unifies exactly. Next, the parameter matching algorithm begins, with $P =$© int© and $A =$© int©, which unifies exactly. Moving to the next parameter and argument, $P =$© [double, int]© and $A =$© double©. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =$© double© and $A =$© double©, which unifies exactly. Then $P' =$© int© and $A =$© double©, which again unifies exactly. At this point, the end of $P'$ has been reached, so the arguments ©10.2, 4© are structured into the tuple expression ©[10.2, 4]©. Finally, the end of the parameter list $P$ has also been reached, so the final expression is ©f(5, [10.2, 4])©. \section{Tuple Assignment} \subsection{Assignment} \label{s:TupleAssignment} An assignment where the left side of the assignment operator has a tuple type is called tuple assignment. There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{Multiple} and \emph{Mass} Assignment, respectively. An assignment where the left side of the assignment operator has a tuple type is called \newterm{tuple assignment}. There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a non-tuple or tuple type, called \newterm[mass assignment]{mass} and \newterm[multiple assignment]{multiple} assignment, respectively. \begin{cfa} int x; double y; [int, double] z; [y, x] = 3.14;  // mass assignment [x, y] = z;     // multiple assignment z = 10;         // mass assignment z = [x, y];     // multiple assignment [y, x] = 3.14;                                                  §\C{// mass assignment}§ [x, y] = z;                                                         §\C{// multiple assignment}§ z = 10;                                                         §\C{// mass assignment}§ z = [x, y];                                                             §\C{// multiple assignment}§ \end{cfa} Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left side, $R_i$ represent each component of the flattened right side of a multiple assignment, and $R$ represent the right side of a mass assignment. For example, the following is invalid because the number of components on the left does not match the number of components on the right. \begin{cfa} [int, int] x, y, z; [x, y] = z;   // multiple assignment, invalid 4 != 2 [ int, int ] x, y, z; [ x, y ] = z;                                              §\C{// multiple assignment, invalid 4 != 2}§ \end{cfa} Multiple assignment assigns $R_i$ to $L_i$ for each $i$. \begin{cfa} int x = 10, y = 20; [x, y] = [y, x]; [ x, y ] = [ y, x ]; \end{cfa} After executing this code, ©x© has the value ©20© and ©y© has the value ©10©. int a, b; double c, d; [void] f([int, int]); f([c, a] = [b, d] = 1.5);  // assignments in parameter list [ void ] f( [ int, int ] ); f( [ c, a ] = [ b, d ] = 1.5 );  // assignments in parameter list \end{cfa} The tuple expression begins with a mass assignment of ©1.5© into ©[b, d]©, which assigns ©1.5© into ©b©, which is truncated to ©1©, and ©1.5© into ©d©, producing the tuple ©[1, 1.5]© as a result. Finally, the tuple ©[1, 1]© is used as an expression in the call to ©f©. \subsection{Tuple Construction} \subsection{Construction} Tuple construction and destruction follow the same rules and semantics as tuple assignment, except that in the case where there is no right side, the default constructor or destructor is called on each component of the tuple. As constructors and destructors did not exist in previous versions of \CFA or in \KWC, this is a primary contribution of this thesis to the design of tuples. The initialization of ©s© with ©t© works by default because ©t© is flattened into its components, which satisfies the generated field constructor ©?{}(S *, int, double)© to initialize the first two values. \section{Member-Access Tuple Expression} \subsection{Member-Access Expression} \label{s:MemberAccessTuple} It is possible to access multiple fields from a single expression using a \emph{Member-Access Tuple Expression}. Tuples may be used to select multiple fields of a record by field name. The result is a single tuple-valued expression whose type is the tuple of the types of the members. For example, \begin{cfa} struct S { int x; double y; char * z; } s; struct S { char x; int y; double z; } s; s.[x, y, z]; \end{cfa} Here, the type of ©s.[x, y, z]© is ©[int, double, char *]©. A member tuple expression has the form ©a.[x, y, z];© where ©a© is an expression with type ©T©, where ©T© supports member access expressions, and ©x, y, z© are all members of ©T© with types ©T$_x$©, ©T$_y$©, and ©T$_z$© respectively. Then the type of ©a.[x, y, z]© is ©[T_x, T_y, T_z]©. Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg, rearrange components, drop components, duplicate components, etc.). \begin{cfa} [int, int, long, double] x; void f(double, long); f(x.[0, 3]);          // f(x.0, x.3) x.[0, 1] = x.[1, 0];  // [x.0, x.1] = [x.1, x.0] [long, int, long] y = x.[2, 0, 2]; \end{cfa} It is possible for a member tuple expression to contain other member access expressions. For example, Here, the type of ©s.[ x, y, z ]© is ©[ char, int, double ]©. A member tuple expression has the form \emph{e}©.[x, y, z];© where \emph{e} is an expression with type ©T©, where ©T© supports member access expressions, and ©x, y, z© are all members of ©T© with types ©T$_x$©, ©T$_y$©, and ©T$_z$© respectively. Then the type of \emph{e}©.[x, y, z]© is ©[T$_x$, T$_y$, T$_z$]©. A member-access tuple may be used anywhere a tuple can be used, \eg: \begin{cfa} s.[ y, z, x ] = [ 3, 3.2, 'x' ];                §\C{// equivalent to s.x = 'x', s.y = 3, s.z = 3.2}§ f( s.[ y, z ] );                                                §\C{// equivalent to f( s.y, s.z )}§ \end{cfa} Note, the fields appearing in a record-field tuple may be specified in any order; also, it is unnecessary to specify all the fields of a struct in a multiple record-field tuple. Since tuple-index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member-access expressions to restructure a tuple (\eg, rearrange components, drop components, duplicate components, etc.). \begin{cfa} [ int, int, long, double ] x; void f( double, long ); f( x.[ 0, 3 ] );                                                §\C{// f( x.0, x.3 )}§ x.[ 0, 1 ] = x.[ 1, 0 ];                                §\C{// [ x.0, x.1 ] = [ x.1, x.0 ]}§ [ long, int, long ] y = x.[ 2, 0, 2 ]; \end{cfa} It is possible for a member tuple expression to contain other member access expressions, \eg: \begin{cfa} struct A { double i; int j; }; struct B { int * k; short l; }; struct C { int x; A y; B z; } v; v.[x, y.[i, j], z.k]; \end{cfa} This expression is equivalent to ©[v.x, [v.y.i, v.y.j], v.z.k]©. That is, the aggregate expression is effectively distributed across the tuple, which allows simple and easy access to multiple components in an aggregate, without repetition. v.[ x, y.[ i, j ], z.k ]; \end{cfa} This expression is equivalent to ©[ v.x, [ v.y.i, v.y.j ], v.z.k ]©. That is, the aggregate expression is effectively distributed across the tuple allowing simple and easy access to multiple components in an aggregate without repetition. It is guaranteed that the aggregate expression to the left of the ©.© in a member tuple expression is evaluated exactly once. As such, it is safe to use member tuple expressions on the result of a side-effecting function. \begin{cfa} [int, float, double] f(); [double, float] x = f().[2, 1]; As such, it is safe to use member tuple expressions on the result of a function with side-effects. \begin{cfa} [ int, float, double ] f(); [ double, float ] x = f().[ 2, 1 ];             §\C{// f() called once}§ \end{cfa} Since \CFA permits these tuple-access expressions using structures, unions, and tuples, \emph{member tuple expression} or \emph{field tuple expression} is more appropriate. It is possible to extend member-access expressions further. Currently, a member-access expression whose member is a name requires that the aggregate is a structure or union, while a constant integer member requires the aggregate to be a tuple. In the interest of orthogonal design, \CFA could apply some meaning to the remaining combinations as well. For example, \begin{cfa} struct S { int x, y; } s; [S, S] z; s.x;  // access member z.0;  // access component s.1;  // ??? z.y;  // ??? \end{cfa} One possibility is for ©s.1© to select the second member of ©s©. Under this interpretation, it becomes possible to not only access members of a struct by name, but also by position. Likewise, it seems natural to open this mechanism to enumerations as well, wherein the left side would be a type, rather than an expression. One benefit of this interpretation is familiarity, since it is extremely reminiscent of tuple-index expressions. On the other hand, it could be argued that this interpretation is brittle in that changing the order of members or adding new members to a structure becomes a brittle operation. This problem is less of a concern with tuples, since modifying a tuple affects only the code that directly uses the tuple, whereas modifying a structure has far reaching consequences for every instance of the structure. As for ©z.y©, one interpretation is to extend the meaning of member tuple expressions. That is, currently the tuple must occur as the member, \ie to the right of the dot. Allowing tuples to the left of the dot could distribute the member across the elements of the tuple, in much the same way that member tuple expressions distribute the aggregate across the member tuple. In this example, ©z.y© expands to ©[z.0.y, z.1.y]©, allowing what is effectively a very limited compile-time field-sections map operation, where the argument must be a tuple containing only aggregates having a member named ©y©. It is questionable how useful this would actually be in practice, since structures often do not have names in common with other structures, and further this could cause maintainability issues in that it encourages programmers to adopt very simple naming conventions to maximize the amount of overlap between different types. Perhaps more useful would be to allow arrays on the left side of the dot, which would likewise allow mapping a field access across the entire array, producing an array of the contained fields. The immediate problem with this idea is that C arrays do not carry around their size, which would make it impossible to use this extension for anything other than a simple stack allocated array. Supposing this feature works as described, it would be necessary to specify an ordering for the expansion of member-access expressions versus member-tuple expressions. \begin{cfa} struct { int x, y; }; [S, S] z; z.[x, y];  // ??? // => [z.0, z.1].[x, y] // => [z.0.x, z.0.y, z.1.x, z.1.y] // or // => [z.x, z.y] // => [[z.0, z.1].x, [z.0, z.1].y] // => [z.0.x, z.1.x, z.0.y, z.1.y] \end{cfa} Depending on exactly how the two tuples are combined, different results can be achieved. As such, a specific ordering would need to be imposed to make this feature useful. Furthermore, this addition moves a member-tuple expression's meaning from being clear statically to needing resolver support, since the member name needs to be distributed appropriately over each member of the tuple, which could itself be a tuple. A second possibility is for \CFA to have named tuples, as they exist in Swift and D. \begin{cfa} typedef [int x, int y] Point2D; Point2D p1, p2; p1.x + p1.y + p2.x + p2.y; p1.0 + p1.1 + p2.0 + p2.1;  // equivalent \end{cfa} In this simpler interpretation, a tuple type carries with it a list of possibly empty identifiers. This approach fits naturally with the named return-value feature, and would likely go a long way towards implementing it. Ultimately, the first two extensions introduce complexity into the model, with relatively little perceived benefit, and so were dropped from consideration. Named tuples are a potentially useful addition to the language, provided they can be parsed with a reasonable syntax. \section{Casting} \subsection{Casting} In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use, which is type ascription, since it forces the expression resolution algorithm to choose the lowest cost conversion to the target type. That is, it is invalid to cast ©[int, int]© to ©[int, int, int]©. \section{Polymorphism} \subsection{Polymorphism} Due to the implicit flattening and structuring conversions involved in argument passing, ©otype© and ©dtype© parameters are restricted to matching only with non-tuple types. The integration of polymorphism, type assertions, and monomorphic specialization of tuple-assertions are a primary contribution of this thesis to the design of tuples. Until this point, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie, no implicit conversions are applied to assertion arguments). This decision presents a conflict with the flexibility of tuples. \subsection{Assertion Inference} \subsubsection{Assertion Inference} \begin{cfa} int f([int, double], double); Unfortunately, C's syntax for subscripts precluded treating them as tuples. The C subscript list has the form ©[i][j]...© and not ©[i, j, ...]©. Therefore, there is no syntactic way for a routine returning multiple values to specify the different subscript values, \eg ©f[g()]© always means a single subscript value because there is only one set of brackets. Therefore, there is no syntactic way for a routine returning multiple values to specify the different subscript values, \eg ©f[ g() ]© always means a single subscript value because there is only one set of brackets. Fixing this requires a major change to C because the syntactic form ©M[i, j, k]© already has a particular meaning: ©i, j, k© is a comma expression. \end{rationale} \section{Mass Assignment} \subsection{Mass Assignment} \CFA permits assignment to several variables at once using mass assignment~\cite{CLU}. \section{Multiple Assignment} \subsection{Multiple Assignment} \CFA also supports the assignment of several values at once, known as multiple assignment~\cite{CLU,Galletly96}. \section{Cascade Assignment} \subsection{Cascade Assignment} As in C, \CFA mass and multiple assignments can be cascaded, producing cascade assignment. \end{cfa} As in C, the rightmost assignment is performed first, \ie assignment parses right to left. \section{Field Tuples} Tuples may be used to select multiple fields of a record by field name. Its general form is: \begin{cfa} §\emph{expr}§ . [ §\emph{fieldlist}§ ] §\emph{expr}§ -> [ §\emph{fieldlist}§ ] \end{cfa} \emph{expr} is any expression yielding a value of type record, \eg ©struct©, ©union©. Each element of \emph{ fieldlist} is an element of the record specified by \emph{expr}. A record-field tuple may be used anywhere a tuple can be used. An example of the use of a record-field tuple is the following: \begin{cfa} struct s { int f1, f2; char f3; double f4; } v; v.[ f3, f1, f2 ] = ['x', 11, 17 ];      §\C{// equivalent to v.f3 = 'x', v.f1 = 11, v.f2 = 17}§ f( v.[ f3, f1, f2 ] );                          §\C{// equivalent to f( v.f3, v.f1, v.f2 )}§ \end{cfa} Note, the fields appearing in a record-field tuple may be specified in any order; also, it is unnecessary to specify all the fields of a struct in a multiple record-field tuple. If a field of a ©struct© is itself another ©struct©, multiple fields of this subrecord can be specified using a nested record-field tuple, as in the following example: \begin{cfa} struct inner { int f2, f3; }; struct outer { int f1; struct inner i; double f4; } o; o.[ f1, i.[ f2, f3 ], f4 ] = [ 11, 12, 13, 3.14159 ]; \end{cfa}
• ## src/Concurrency/Waitfor.cc

 rc6e2c18 makeAccStatement( acceptables, index, "is_dtor", detectIsDtor( clause.target.function )                                    , indexer ), makeAccStatement( acceptables, index, "func"   , new CastExpr( clause.target.function, fptr_t )                            , indexer ), makeAccStatement( acceptables, index, "list"   , new VariableExpr( monitors )                                              , indexer ), makeAccStatement( acceptables, index, "data"   , new VariableExpr( monitors )                                              , indexer ), makeAccStatement( acceptables, index, "size"   , new ConstantExpr( Constant::from_ulong( clause.target.arguments.size() ) ), indexer ), setter->clone()

• ## src/Parser/ParseNode.h

 rc6e2c18 // Created On       : Sat May 16 13:28:16 2015 // Last Modified By : Peter A. Buhr // Last Modified On : Sat Sep 23 18:11:22 2017 // Update Count     : 821 // Last Modified On : Mon Nov 27 17:33:35 2017 // Update Count     : 824 // DeclarationNode * set_extension( bool exten ) { extension = exten; return this; } public: DeclarationNode * get_last() { return (DeclarationNode *)ParseNode::get_last(); } struct Variable_t { //              const std::string * name;

• ## src/libcfa/bits/containers.h

 rc6e2c18 #pragma once #include #include "bits/defs.h" #include "libhdr.h" #include "libhdr.h" //----------------------------------------------------------------------------- // Array //----------------------------------------------------------------------------- #ifdef __cforall forall(dtype T) #else #define T void #endif struct __small_array { T *           data; __lock_size_t size; }; #undef T #ifdef __cforall #define __small_array_t(T) __small_array(T) #else #define __small_array_t(T) struct __small_array #endif #ifdef __cforall // forall(otype T | sized(T)) // static inline void ?{}(__small_array(T) & this) {} forall(dtype T | sized(T)) static inline T& ?[?]( __small_array(T) & this, __lock_size_t idx) { return ((typeof(this.data))this.data)[idx]; } forall(dtype T | sized(T)) static inline T& ?[?]( const __small_array(T) & this, __lock_size_t idx) { return ((typeof(this.data))this.data)[idx]; } forall(dtype T | sized(T)) static inline T* begin( const __small_array(T) & this ) { return ((typeof(this.data))this.data); } forall(dtype T | sized(T)) static inline T* end( const __small_array(T) & this ) { return ((typeof(this.data))this.data) + this.size; } #endif //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- #ifdef __CFORALL__ #ifdef __cforall trait is_node(dtype T) { T*& get_next( T& ); // Stack //----------------------------------------------------------------------------- #ifdef __CFORALL__ #ifdef __cforall forall(dtype TYPE | is_node(TYPE)) #define T TYPE T * top; }; #undef T #ifdef __CFORALL__ #ifdef __cforall #define __stack_t(T) __stack(T) #else #endif #ifdef __CFORALL__ #ifdef __cforall forall(dtype T | is_node(T)) void ?{}( __stack(T) & this ) { this.top = NULL; static inline void ?{}( __stack(T) & this ) { (this.top){ NULL }; } forall(dtype T | is_node(T) | sized(T)) void push( __stack(T) & this, T * val ) { static inline void push( __stack(T) & this, T * val ) { verify( !get_next( *val ) ); get_next( *val ) = this.top; forall(dtype T | is_node(T) | sized(T)) T * pop( __stack(T) & this ) { static inline T * pop( __stack(T) & this ) { T * top = this.top; if( top ) { // Queue //----------------------------------------------------------------------------- #ifdef __CFORALL__ forall(dtype T | is_node(T)) #ifdef __cforall forall(dtype TYPE | is_node(TYPE)) #define T TYPE #else T ** tail; }; #undef T #ifdef __CFORALL__ #ifdef __cforall #define __queue_t(T) __queue(T) #else #define __queue_t(T) struct __queue #endif #ifdef __cforall forall(dtype T | is_node(T)) void ?{}( __queue(T) & this ) { this.head = NULL; this.tail = &this.head; static inline void ?{}( __queue(T) & this ) { (this.head){ NULL }; (this.tail){ &this.head }; } forall(dtype T | is_node(T) | sized(T)) void append( __queue(T) & this, T * val ) { static inline void append( __queue(T) & this, T * val ) { verify(this.tail != NULL); *this.tail = val; forall(dtype T | is_node(T) | sized(T)) T * pop_head( __queue(T) & this ) { static inline T * pop_head( __queue(T) & this ) { T * head = this.head; if( head ) { forall(dtype T | is_node(T) | sized(T)) T * remove( __queue(T) & this, T ** it ) { static inline T * remove( __queue(T) & this, T ** it ) { T * val = *it; verify( val );
• ## src/libcfa/bits/defs.h

 rc6e2c18 #include #include #include #define likely  (x)    __builtin_expect(!!(x), 1) #define thread_local _Thread_local typedef void (*fptr_t)(); typedef int_fast16_t __lock_size_t; #ifdef __cforall #define __cfa_anonymous_object #else #define __cfa_anonymous_object __cfa_anonymous_object #endif
• ## src/libcfa/bits/locks.h

 rc6e2c18 } __ALIGN__; #ifdef __CFORALL__ #ifdef __cforall extern void yield( unsigned int ); extern thread_local struct thread_desc *    volatile this_thread;

• ## src/libcfa/exception.h

 rc6e2c18 #ifdef __CFORALL__ #ifdef __cforall extern "C" { #endif struct __cfaehm__cleanup_hook {}; #ifdef __CFORALL__ #ifdef __cforall } #endif
• ## src/libcfa/stdhdr/assert.h

 rc6e2c18 // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // assert.h -- // // // assert.h -- // // Author           : Peter A. Buhr // Created On       : Mon Jul  4 23:25:26 2016 // Last Modified On : Mon Jul 31 23:09:32 2017 // Update Count     : 13 // // #ifdef __CFORALL__ #ifdef __cforall extern "C" { #endif //__CFORALL__ #endif //__cforall #include_next #endif #ifdef __CFORALL__ #ifdef __cforall } // extern "C" #endif //__CFORALL__ #endif //__cforall // Local Variables: //
• ## src/libcfa/virtual.h

 rc6e2c18 #pragma once #ifdef __CFORALL__ #ifdef __cforall extern "C" { #endif struct __cfa__parent_vtable const * const * child ); #ifdef __CFORALL__ #ifdef __cforall } #endif

• ## src/tests/designations.c

 rc6e2c18 // In particular, since the syntax for designations in Cforall differs from that of C, preprocessor substitution // is used for the designation syntax #ifdef __CFORALL__ #ifdef __cforall #define DES : #else
• ## src/tests/functions.c

 rc6e2c18 // Created On       : Wed Aug 17 08:39:58 2016 // Last Modified By : Peter A. Buhr // Last Modified On : Wed Aug 17 08:40:52 2016 // Update Count     : 1 // Last Modified On : Mon Nov 27 18:08:54 2017 // Update Count     : 11 // // Cforall extensions [] f( ); // [] f( ); [int] f( ); [] f(int); // [] f(int); [int] f(int); [] f( ) {} // [] f( ) {} [int] f( ) {} [] f(int) {} // [] f(int) {} [int] f(int) {} [int x] f( ); [] f(int x); [int x] f(int x); [int x] f( ) {} [] f(int x) {} [int x] f(int x) {} // [] f(int x); //[int x] f(int x); //[int x] f( ) {} // [] f(int x) {} //[int x] f(int x) {} [int, int x] f( ); [] f(int, int x); // [] f(int, int x); [int, int x] f(int, int x); [int, int x] f( ) {} [] f(int, int x) {} // [] f(int, int x) {} [int, int x] f(int, int x) {} [int, int x, int] f( ); [] f(int, int x, int); // [] f(int, int x, int); [int, int x, int] f(int, int x, int); [int, int x, int] f( ) {} [] f(int, int x, int) {} // [] f(int, int x, int) {} [int, int x, int] f(int, int x, int) {} [int, int x, * int y] f( ); [] f(int, int x, * int y); // [] f(int, int x, * int y); [int, int x, * int y] f(int, int x, * int y); [int, int x, * int y] f( ) {} [] f(int, int x, * int y) {} // [] f(int, int x, * int y) {} [int, int x, * int y] f(int, int x, * int y) {} [ int ] f11( int ), f12;  // => int f11( int ), f12( int ); // function prototypes [ int ] f11( int ), f12();  // => int f11( int ), f12( void ); const double bar1(), bar2( int ), bar3( double );               // C version [const double] foo(), foo( int ), foo( double ) { return 3.0; } // CFA version struct S { int i; }; [S] rtn( int ) {} [int] f( [int](int) ) { int (*(*p)[][10])[][3]; int (*(*pc)[][10])[][3]; * [][10] * [][3] int p; * [] * [int](int) p;
Note: See TracChangeset for help on using the changeset viewer.