Changeset 436c0de
- Timestamp:
- Jun 18, 2017, 9:22:22 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- f1e80d8
- Parents:
- ade20d0 (diff), 42b0d73 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 20 added
- 6 deleted
- 64 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
.gitignore
rade20d0 r436c0de 13 13 libcfa/Makefile 14 14 src/Makefile 15 version15 /version 16 16 17 17 # genereted by premake -
configure
rade20d0 r436c0de 6251 6251 6252 6252 6253 ac_config_files="$ac_config_files Makefile src/driver/Makefile src/Makefile src/benchmark/Makefile src/examples/Makefile src/tests/Makefile src/ prelude/Makefile src/libcfa/Makefile"6253 ac_config_files="$ac_config_files Makefile src/driver/Makefile src/Makefile src/benchmark/Makefile src/examples/Makefile src/tests/Makefile src/tests/preempt_longrun/Makefile src/prelude/Makefile src/libcfa/Makefile" 6254 6254 6255 6255 … … 7019 7019 "src/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/examples/Makefile" ;; 7020 7020 "src/tests/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/Makefile" ;; 7021 "src/tests/preempt_longrun/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/preempt_longrun/Makefile" ;; 7021 7022 "src/prelude/Makefile") CONFIG_FILES="$CONFIG_FILES src/prelude/Makefile" ;; 7022 7023 "src/libcfa/Makefile") CONFIG_FILES="$CONFIG_FILES src/libcfa/Makefile" ;; -
configure.ac
rade20d0 r436c0de 235 235 src/examples/Makefile 236 236 src/tests/Makefile 237 src/tests/preempt_longrun/Makefile 237 238 src/prelude/Makefile 238 239 src/libcfa/Makefile -
doc/proposals/concurrency/Makefile
rade20d0 r436c0de 13 13 annex/glossary \ 14 14 text/intro \ 15 text/cforall \ 15 16 text/basics \ 16 17 text/concurrency \ -
doc/proposals/concurrency/build/bump_ver.sh
rade20d0 r436c0de 1 1 #!/bin/bash 2 if [ ! -f build/version ]; then3 echo "0.0.0" > build/version2 if [ ! -f version ]; then 3 echo "0.0.0" > version 4 4 fi 5 5 6 sed -r 's/([0-9]+\.[0-9]+.)([0-9]+)/echo "\1\$((\2+1))" > version/ge' build/version > /dev/null6 sed -r 's/([0-9]+\.[0-9]+.)([0-9]+)/echo "\1\$((\2+1))" > version/ge' version > /dev/null -
doc/proposals/concurrency/text/basics.tex
rade20d0 r436c0de 7 7 8 8 \section{Basics of concurrency} 9 At its core, concurrency is based on having multiple call stacks and potentially multiple threads of execution for these stacks. Concurrency alone without parallelism only requires having multiple call stacks (or contexts) for a single thread of execution and switching between these call stacks on a regular basis. A minimal concurrency product can be achieved by creating coroutines which instead of context switching between each other, always ask an oracle where to context switch next. While coroutines do not technically require a stack, stackfull coroutines are the closest abstraction to a practical "naked"" call stack. When writing concurrency in terms of coroutines, the oracle effectively becomes a scheduler and the whole system now follows a cooperative threading model \cit. The oracle/scheduler can either be a stackless or stackfull entity and correspondingly require one or two context switches to run a different coroutine but in any case a subset of concurrency related challenges start to appear. For the complete set of concurrency challenges to be present, the only feature missing is preemption. Indeed, concurrency challenges appear with the lack of determinism. Guaranteeing mutual-exclusion or synchronisation are simply ways of limiting the lack of determinism in the system. A scheduler introduces order of execution uncertainty while preemption introduces incertainty about when context-switches occur. Now it is important to understand that uncertainty is not necessarily undesireable, uncertainty can often be used by systems to significantly increase performance and is often the basis of giving the user the illusion that hundred of tasks are running in parallel. Optimal performance in concurrent applications is often obtained by having as little determinism as correctness will allow\cit.9 At its core, concurrency is based on having call-stacks and potentially multiple threads of execution for these stacks. Concurrency without parallelism only requires having multiple call stacks (or contexts) for a single thread of execution, and switching between these call stacks on a regular basis. A minimal concurrency product can be achieved by creating coroutines, which instead of context switching between each other, always ask an oracle where to context switch next. While coroutines do not technically require a stack, stackfull coroutines are the closest abstraction to a practical "naked"" call stack. When writing concurrency in terms of coroutines, the oracle effectively becomes a scheduler and the whole system now follows a cooperative threading-model \cit. The oracle/scheduler can either be a stackless or stackfull entity and correspondingly require one or two context switches to run a different coroutine. In any case, a subset of concurrency related challenges start to appear. For the complete set of concurrency challenges to occur, the only feature missing is preemption. Indeed, concurrency challenges appear with non-determinism. Guaranteeing mutual-exclusion or synchronisation are simply ways of limiting the lack of determinism in a system. A scheduler introduces order of execution uncertainty, while preemption introduces incertainty about where context-switches occur. Now it is important to understand that uncertainty is not necessarily undesireable; uncertainty can often be used by systems to significantly increase performance and is often the basis of giving a user the illusion that tasks are running in parallel. Optimal performance in concurrent applications is often obtained by having as much non-determinism as correctness allows\cit. 10 10 11 11 \section{\protect\CFA 's Thread Building Blocks} 12 % As a system-level language, \CFA should offer both performance and flexibilty as its primary goals, simplicity and user-friendliness being a secondary concern. Therefore, the core of parallelism in \CFA should prioritize power and efficiency. With this said, deconstructing popular paradigms in order to get simple building blocks yields \glspl{uthread} as the core parallelism block. \Glspl{pool} and other parallelism paradigms can then be built on top of the underlying threading model. 13 One of the important features that is missing to C is threading. On modern architectures, the lack of threading is becoming less and less forgivable\cite{Sutter05, Sutter05b} and therefore any modern programming language should have the proper tools to allow users to write performant concurrent and/or parallel programs. As an extension of C, \CFA needs to express these concepts an a way that is as natural as possible to programmers used to imperative languages. And being a system level language means programmers will expect to be able to choose precisely which features they need and which cost they are willing to pay. 14 15 \section{Coroutines A stepping stone}\label{coroutine} 16 While the main focus of this proposal is concurrency and parallelism, as mentionned above it is important to adress coroutines which are actually a significant underlying aspect of the concurrency system. Indeed, while having nothing todo with parallelism and arguably little to do with concurrency, coroutines need to deal with context-switchs and and other context management operations. Therefore, this proposal includes coroutines both as an intermediate step for the implementation of threads and a first class feature of \CFA. Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. The core API of coroutines revolve around two features independent call stacks and \code{suspend}/\code{resume}. 12 One of the important features that is missing in C is threading. On modern architectures, a lack of threading is becoming less and less forgivable\cite{Sutter05, Sutter05b}, and therefore modern programming languages must have the proper tools to allow users to write performant concurrent and/or parallel programs. As an extension of C, \CFA needs to express these concepts in a way that is as natural as possible to programmers used to imperative languages. And being a system-level language means programmers expect to choose precisely which features they need and which cost they are willing to pay. 13 14 \section{Coroutines: A stepping stone}\label{coroutine} 15 While the main focus of this proposal is concurrency and parallelism, as mentionned above it is important to adress coroutines, which are actually a significant underlying aspect of a concurrency system. Indeed, while having nothing todo with parallelism and arguably little to do with concurrency, coroutines need to deal with context-switchs and and other context-management operations. Therefore, this proposal includes coroutines both as an intermediate step for the implementation of threads, and a first class feature of \CFA. Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. The core API of coroutines revolve around two features: independent call stacks and \code{suspend}/\code{resume}. 17 16 18 17 Here is an example of a solution to the fibonnaci problem using \CFA coroutines: … … 26 25 } 27 26 27 // main automacically called on first resume 28 28 void main(Fibonacci* this) { 29 29 int fn1, fn2; // retained between resumes … … 59 59 60 60 \subsection{Construction} 61 One important design challenge for coroutines and threads (shown in section \ref{threads}) is that the runtime system needs to run some code after the user-constructor runs. In the case of the coroutines this challenge is simpler since there is no loss of determinism brough by preemption or scheduling, however, the underlying challenge remains the same for coroutines and threads.62 63 The runtime system needs to create the coroutine's stack and more importantly prepare it for the first resumption. The timing of the creation is non trivial since users both expect to have fully constructed objects once execution enters the coroutine main and to be able to resume the coroutine from the constructor (Obviously we only solve cases where these two statements don't conflict). There are several solutions to this problem but the chosen options effectively forces the design of the coroutine.64 65 Furthermore, \CFA faces an extra challenge which is that polymorphique routines rely oninvisible thunks when casted to non-polymorphic routines and these thunks have function scope. For example, the following code, while looking benign, can run into undefined behaviour because of thunks:61 One important design challenge for coroutines and threads (shown in section \ref{threads}) is that the runtime system needs to run code after the user-constructor runs. In the case of coroutines, this challenge is simpler since there is no non-determinism from preemption or scheduling. However, the underlying challenge remains the same for coroutines and threads. 62 63 The runtime system needs to create the coroutine's stack and more importantly prepare it for the first resumption. The timing of the creation is non-trivial since users both expect to have fully constructed objects once execution enters the coroutine main and to be able to resume the coroutine from the constructor. Like for regular objects, constructors can still leak coroutines before they are ready. There are several solutions to this problem but the chosen options effectively forces the design of the coroutine. 64 65 Furthermore, \CFA faces an extra challenge as polymorphic routines create invisible thunks when casted to non-polymorphic routines and these thunks have function scope. For example, the following code, while looking benign, can run into undefined behaviour because of thunks: 66 66 67 67 \begin{cfacode} … … 78 78 } 79 79 \end{cfacode} 80 Indeed, the generated C code\footnote{Code trimmed down for brevity} shows that a local thunk is created in orderto hold type information:80 The generated C code\footnote{Code trimmed down for brevity} creates a local thunk to hold type information: 81 81 82 82 \begin{ccode} … … 95 95 } 96 96 \end{ccode} 97 The problem in th e this example is that there is a race condition between the start of the execution of \code{noop} on the other thread and the stack frame of \code{bar} being destroyed. This extra challenge limits which solutions are viable because storing the function pointer for too long only increases the chances that the race will end in undefined behavior; i.e. the stack based thunk being destroyed before it was used.97 The problem in this example is a race condition between the start of the execution of \code{noop} on the other thread and the stack frame of \code{bar} being destroyed. This extra challenge limits which solutions are viable because storing the function pointer for too long only increases the chances that the race will end in undefined behavior; i.e. the stack based thunk being destroyed before it was used. This challenge is an extension of challenges that come with second-class routines. Indeed, GCC nested routines also have the limitation that the routines cannot be passed outside of the scope of the functions these were declared in. The case of coroutines and threads is simply an extension of this problem to multiple call-stacks. 98 98 99 99 \subsection{Alternative: Composition} 100 One solution to this challenge would be to use inheritence,100 One solution to this challenge would be to use composition/containement, 101 101 102 102 \begin{cfacode} 103 103 struct Fibonacci { 104 104 int fn; // used for communication 105 coroutine c; 105 coroutine c; //composition 106 106 }; 107 107 … … 111 111 } 112 112 \end{cfacode} 113 114 There are two downsides to this approach. The first, which is relatively minor, is that the base class needs to be made aware of the main routine pointer, regardless of whether we use a parameter or a virtual pointer, this means the coroutine data must be made larger to store a value that is actually a compile time constant (The address of the main routine). The second problem which is both subtle but significant, is that now users can get the initialisation order of there coroutines wrong. Indeed, every field of a \CFA struct will be constructed but in the order of declaration, unless users explicitly write otherwise. This means that users who forget to initialize a the coroutine at the right time may resume the coroutine with an uninitilized object. For coroutines, this is unlikely to be a problem, for threads however, this is a significant problem. 113 There are two downsides to this approach. The first, which is relatively minor, is that the base class needs to be made aware of the main routine pointer, regardless of whether a parameter or a virtual pointer is used, this means the coroutine data must be made larger to store a value that is actually a compile time constant (address of the main routine). The second problem, which is both subtle and significant, is that now users can get the initialisation order of there coroutines wrong. Indeed, every field of a \CFA struct is constructed but in declaration order, unless users explicitly write otherwise. This semantics means that users who forget to initialize a the coroutine may resume the coroutine with an uninitilized object. For coroutines, this is unlikely to be a problem, for threads however, this is a significant problem. 115 114 116 115 \subsection{Alternative: Reserved keyword} … … 122 121 }; 123 122 \end{cfacode} 124 125 123 This mean the compiler can solve problems by injecting code where needed. The downside of this approach is that it makes coroutine a special case in the language. Users who would want to extend coroutines or build their own for various reasons can only do so in ways offered by the language. Furthermore, implementing coroutines without language supports also displays the power of \CFA. 126 124 While this is ultimately the option used for idiomatic \CFA code, coroutines and threads can both be constructed by users without using the language support. The reserved keywords are only present to improve ease of use for the common cases. … … 128 126 \subsection{Alternative: Lamda Objects} 129 127 130 For coroutines as for threads, many implementations are based on routine pointers or function objects\cit. For example, Boost implements coroutines in terms of four functor object types \code{asymmetric_coroutine<>::pull_type}, \code{asymmetric_coroutine<>::push_type}, \code{symmetric_coroutine<>::call_type}, \code{symmetric_coroutine<>::yield_type}. Often, the canonical threading paradigm in languages is based on function pointers, pthread being one of the most well known example. The main problem of these approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. Since the custom type is simple to write and \CFA and solves several issues, added support for routine/lambda based coroutines adds very little. 131 132 \subsection{Trait based coroutines} 133 134 Finally the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. This approach defines a coroutine as \say{anything that \say{satisfies the trait \code{is_coroutine} and is used as a coroutine} is a coroutine}. 128 For coroutines as for threads, many implementations are based on routine pointers or function objects\cit. For example, Boost implements coroutines in terms of four functor object types: 129 \begin{cfacode} 130 asymmetric_coroutine<>::pull_type 131 asymmetric_coroutine<>::push_type 132 symmetric_coroutine<>::call_type 133 symmetric_coroutine<>::yield_type 134 \end{cfacode} 135 Often, the canonical threading paradigm in languages is based on function pointers, pthread being one of the most well known examples. The main problem of this approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda based coroutines adds very little. 136 137 A variation of this would be to use an simple function pointer in the same way pthread does for threads : 138 \begin{cfacode} 139 void foo( coroutine_t cid, void * arg ) { 140 int * value = (int *)arg; 141 //Coroutine body 142 } 143 144 int main() { 145 int value = 0; 146 coroutine_t cid = coroutine_create( &foo, (void*)&value ); 147 coroutine_resume( &cid ); 148 } 149 \end{cfacode} 150 This semantic is more common for thread interfaces than coroutines but would work equally well. As discussed in section \ref{threads}, this approach is superseeded by static approaches in terms of expressivity. 151 152 \subsection{Alternative: Trait-based coroutines} 153 154 Finally the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. This approach defines a coroutine as anything that satisfies the trait \code{is_coroutine} and is used as a coroutine is a coroutine. 135 155 136 156 \begin{cfacode} … … 140 160 }; 141 161 \end{cfacode} 142 143 This entails that an object is not a coroutine until \code{resume} (or \code{prime}) is called on the object. Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine} trait to compile. The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory foot print of a coroutine is trivial when implementing the \code{get_coroutine} routine. The \CFA keyword \code{coroutine} only has the effect of implementing the getter and forward declarations required for users to only have to implement the main routine. 162 This ensures an object is not a coroutine until \code{resume} (or \code{prime}) is called on the object. Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine} trait to compile. The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory foot print of a coroutine is trivial when implementing the \code{get_coroutine} routine. The \CFA keyword \code{coroutine} only has the effect of implementing the getter and forward declarations required for users to only have to implement the main routine. 163 164 \begin{center} 165 \begin{tabular}{c c c} 166 \begin{cfacode}[tabsize=3] 167 coroutine MyCoroutine { 168 int someValue; 169 }; 170 \end{cfacode} & == & \begin{cfacode}[tabsize=3] 171 struct MyCoroutine { 172 int someValue; 173 coroutine_desc __cor; 174 }; 175 176 static inline 177 coroutine_desc * get_coroutine( 178 struct MyCoroutine * this 179 ) { 180 return &this->__cor; 181 } 182 183 void main(struct MyCoroutine * this); 184 \end{cfacode} 185 \end{tabular} 186 \end{center} 187 144 188 145 189 146 190 \section{Thread Interface}\label{threads} 147 The basic building blocks of multi-threading in \CFA are \glspl{cfathread}. B y default these are implemented as \glspl{uthread}, and as such, offer a flexible and lightweight threading interface (lightweight compared to \glspl{kthread}). A thread can be declared using a SUEdeclaration \code{thread} as follows:191 The basic building blocks of multi-threading in \CFA are \glspl{cfathread}. Both use and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism. User threads offer a flexible and lightweight interface. A thread can be declared using a struct declaration \code{thread} as follows: 148 192 149 193 \begin{cfacode} … … 151 195 \end{cfacode} 152 196 153 Likefor coroutines, the keyword is a thin wrapper arount a \CFA trait:197 As for coroutines, the keyword is a thin wrapper arount a \CFA trait: 154 198 155 199 \begin{cfacode} … … 170 214 \end{cfacode} 171 215 172 In this example, threads of type \code{foo} will start there execution in the \code{void main(foo*)} routine which in this case prints \code{"Hello World!"}. While this proposoal encourages this approach which enforces strongly typeprogramming, users may prefer to use the routine based thread semantics for the sake of simplicity. With these semantics it is trivial to write a thread type that takes a function pointer as parameter and executes it on its stack asynchronously216 In this example, threads of type \code{foo} start execution in the \code{void main(foo*)} routine which prints \code{"Hello World!"}. While this proposoal encourages this approach to enforce strongly-typed programming, users may prefer to use the routine based thread semantics for the sake of simplicity. With these semantics it is trivial to write a thread type that takes a function pointer as parameter and executes it on its stack asynchronously 173 217 \begin{cfacode} 174 218 typedef void (*voidFunc)(void); … … 201 245 void main() { 202 246 World w; 203 //Thread runforks here204 205 //Printing "Hello " and "World!" will be run concurrently247 //Thread forks here 248 249 //Printing "Hello " and "World!" are run concurrently 206 250 sout | "Hello " | endl; 207 251 … … 210 254 \end{cfacode} 211 255 212 This semantic has several advantages over explicit semantics typesafety is guaranteed, a thread is always started and stopped exaclty once and users cannot make any progamming errors. However, one of the apparent drawbacks of this system is that threads now always form a lattice, that is they are always destroyed in opposite order of construction. While this seems like a significant limitation, existing \CFA semantics can solve this problem. Indeed, using dynamic allocation to create threads will naturally let threads outlive the scope in which the thread was created much like dynamically allocating memory will let objects outlive the scope in which thy were created 256 This semantic has several advantages over explicit semantics typesafety is guaranteed, a thread is always started and stopped exaclty once and users cannot make any progamming errors. Another advantage of this semantic is that it naturally scale to multiple threads meaning basic synchronisation is very simple 257 258 \begin{cfacode} 259 thread MyThread { 260 //... 261 }; 262 263 //main 264 void main(MyThread* this) { 265 //... 266 } 267 268 void foo() { 269 MyThread thrds[10]; 270 //Start 10 threads at the beginning of the scope 271 272 DoStuff(); 273 274 //Wait for the 10 threads to finish 275 } 276 \end{cfacode} 277 278 However, one of the apparent drawbacks of this system is that threads now always form a lattice, that is they are always destroyed in opposite order of construction because of block structure. However, storage allocation os not limited to blocks; dynamic allocation can create threads that outlive the scope in which the thread is created much like dynamically allocating memory lets objects outlive the scope in which they are created 213 279 214 280 \begin{cfacode} … … 241 307 } 242 308 \end{cfacode} 243 244 Another advantage of this semantic is that it naturally scale to multiple threads meaning basic synchronisation is very simple245 246 \begin{cfacode}247 thread MyThread {248 //...249 };250 251 //main252 void main(MyThread* this) {253 //...254 }255 256 void foo() {257 MyThread thrds[10];258 //Start 10 threads at the beginning of the scope259 260 DoStuff();261 262 //Wait for the 10 threads to finish263 }264 \end{cfacode} -
doc/proposals/concurrency/text/concurrency.tex
rade20d0 r436c0de 4 4 % ====================================================================== 5 5 % ====================================================================== 6 Several tool can be used to solve concurrency challenges. Since many of these challenges appear with the use of mutable shared-state, some languages and libraries simply disallow mutable shared-state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}). In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms that closely relate to networking concepts (channels\cit for example). However, in languages that use routine calls as their core abstraction-mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine call). This distinction in turn means that, in order to be effective, programmers need to learn two sets of designs patterns. This distinction can be hidden away in library code, effective use of the librairy still has to take both paradigms into account. Approaches based on shared memory are more closely related to non-concurrent paradigms since they often rely on basic constructs like routine calls and shared objects. At a lower level, non-concurrent paradigms are often implemented as locks and atomic operations. Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. However, for productivity reasons it is desireable to have a higher-level construct be the core concurrency paradigm~\cite{HPP:Study}. An approach that is worth mentionning because it is gaining in popularity is transactionnal memory~\cite{Dice10}[Check citation]. While this approach is even pursued by system languages like \CC\cit, the performance and feature set is currently too restrictive to add such a paradigm to a language like C or \CC\cit, which is why it was rejected as the core paradigm for concurrency in \CFA. One of the most natural, elegant, and efficient mechanisms for synchronization and communication, especially for shared memory systems, is the \emph{monitor}. Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}. Many programming languages---e.g., Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs. In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors. For these reasons, this project proposes monitors as the core concurrency-construct. 6 Several tool can be used to solve concurrency challenges. Since many of these challenges appear with the use of mutable shared-state, some languages and libraries simply disallow mutable shared-state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}). In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms that closely relate to networking concepts (channels\cit for example). However, in languages that use routine calls as their core abstraction-mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine call). This distinction in turn means that, in order to be effective, programmers need to learn two sets of designs patterns. While this distinction can be hidden away in library code, effective use of the librairy still has to take both paradigms into account. 7 8 Approaches based on shared memory are more closely related to non-concurrent paradigms since they often rely on basic constructs like routine calls and shared objects. At the lowest level, concurrent paradigms are implemented as atomic operations and locks. Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. However, for productivity reasons it is desireable to have a higher-level construct be the core concurrency paradigm~\cite{HPP:Study}. 9 10 An approach that is worth mentionning because it is gaining in popularity is transactionnal memory~\cite{Dice10}[Check citation]. While this approach is even pursued by system languages like \CC\cit, the performance and feature set is currently too restrictive to be the main concurrency paradigm for general purpose language, which is why it was rejected as the core paradigm for concurrency in \CFA. 11 12 One of the most natural, elegant, and efficient mechanisms for synchronization and communication, especially for shared memory systems, is the \emph{monitor}. Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}. Many programming languages---e.g., Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs. In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors. For these reasons, this project proposes monitors as the core concurrency-construct. 7 13 8 14 \section{Basics} 9 The basic features that concurrency tools neet to offer is support for mutual-exclusion and synchronisation. Mutual-exclusion is the concept that only a fixed number of threads can access a critical section at any given time, where a critical section is the group of instructions on an associated portion of data that requires the limited access. On the other hand, synchronization enforces relative ordering of execution and synchronization tools are used to guarantee that event \textit{X} always happens before \textit{Y}.15 Non-determinism requires concurrent systems to offer support for mutual-exclusion and synchronisation. Mutual-exclusion is the concept that only a fixed number of threads can access a critical section at any given time, where a critical section is a group of instructions on an associated portion of data that requires the restricted access. On the other hand, synchronization enforces relative ordering of execution and synchronization tools numerous mechanisms to establish timing relationships among threads. 10 16 11 17 \subsection{Mutual-Exclusion} 12 As mentionned above, mutual-exclusion is the guarantee that only a fix number of threads can enter a critical section at once. However, many solution exists for mutual exclusion which vary in terms of performance, flexibility and ease of use. Methods range from low level locks, which are fast and flexible but require significant attention to be correct, to higher level mutual-exclusion methods, which sacrifice some performance in order to improve ease of use. Often by either guaranteeing some problems cannot occur (e.g. being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. For example, the \CC \code{std::atomic<T>} which offer an easy way to express mutual-exclusion on a restricted set of features (.e.g: reading/writing large types atomically). Another challenge with low level locks is composability. Locks are said to not be composable because it takes careful organising for multiple locks to be used and oncewhile preventing deadlocks. Easing composability is another feature higher-level mutual-exclusion mechanisms often offer.18 As mentionned above, mutual-exclusion is the guarantee that only a fix number of threads can enter a critical section at once. However, many solution exists for mutual exclusion which vary in terms of performance, flexibility and ease of use. Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to higher-level mutual-exclusion methods, which sacrifice some performance in order to improve ease of use. Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. For example, the \CC \code{std::atomic<T>} which offer an easy way to express mutual-exclusion on a restricted set of operations (.e.g: reading/writing large types atomically). Another challenge with low-level locks is composability. Locks are not composable because it takes careful organising for multiple locks to be used while preventing deadlocks. Easing composability is another feature higher-level mutual-exclusion mechanisms often offer. 13 19 14 20 \subsection{Synchronization} 15 As for mutual-exclusion, low level synchronisation primitive often offer g reat performance and good flexibility at the cost of ease of use. Again, higher-level mechanism often simplify usage by adding better coupling between synchronization and data, for examplemessage passing, or offering simple solution to otherwise involved challenges. An example of this is barging. As mentionned above synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}. Most of the time synchronisation happens around a critical section, where threads most acquire said critical section in a certain order. However, it may also be desired to be able to guarantee that event \textit{Z} does not occur between \textit{X} and \textit{Y}. This is called barging, where event \textit{X} tries to effect event \textit{Y} but anoter thread races to grab the critical section and emits \textit{Z} before \textit{Y}. Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs.21 As for mutual-exclusion, low level synchronisation primitive often offer good performance and good flexibility at the cost of ease of use. Again, higher-level mechanism often simplify usage by adding better coupling between synchronization and data, .eg., message passing, or offering simple solution to otherwise involved challenges. An example of this is barging. As mentionned above synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}. Most of the time synchronisation happens around a critical section, where threads most acquire said critical section in a certain order. However, it may also be desired to be able to guarantee that event \textit{Z} does not occur between \textit{X} and \textit{Y}. This is called barging, where event \textit{X} tries to effect event \textit{Y} but anoter thread races to grab the critical section and emits \textit{Z} before \textit{Y}. Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs. 16 22 17 23 % ====================================================================== … … 20 26 % ====================================================================== 21 27 % ====================================================================== 22 A monitor is a set of routines that ensure mutual exclusion when accessing shared state. This concept is generally associated with Object-Oriented Languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OO Psemantics. The only requirements is the ability to declare a handle to a shared object and a set of routines that act on it :28 A monitor is a set of routines that ensure mutual exclusion when accessing shared state. This concept is generally associated with Object-Oriented Languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OO semantics. The only requirements is the ability to declare a handle to a shared object and a set of routines that act on it : 23 29 \begin{cfacode} 24 30 typedef /*some monitor type*/ monitor; … … 36 42 % ====================================================================== 37 43 % ====================================================================== 38 The above monitor example displays some of the intrinsic characteristics. Indeed, it is necessary to use pass-by-reference over pass-by-value for monitor routines. This semantics is important because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. Therefore, monitors are implicitly non-copyable.39 40 Another aspect to consider is when a monitor acquires its mutual exclusion. For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. Pass through can be bothgeneric helper routines (\code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter :44 The above monitor example displays some of the intrinsic characteristics. First, it is necessary to use pass-by-reference over pass-by-value for monitor routines. This semantics is important because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. Therefore, monitors are implicitly non-copyable objects. 45 46 Another aspect to consider is when a monitor acquires its mutual exclusion. For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. Pass through can occur for generic helper routines (\code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter : 41 47 42 48 \begin{cfacode} … … 46 52 size_t ++?(counter_t & mutex this); //increment 47 53 48 //need for mutex is platform dependent here54 //need for mutex is platform dependent 49 55 void ?{}(size_t * this, counter_t & mutex cnt); //conversion 50 56 \end{cfacode} … … 52 58 Here, the constructor(\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual-exclusion when constructing. This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} keyword depending on whether or not reading an \code{size_t} is an atomic operation. 53 59 54 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without qualifiers \code{void foo(counter_t & this)} then it is reasonable that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. In fact\code{nomutex} is the "normal" parameter behaviour, with the \code{nomutex} keyword effectively stating explicitly that "this routine is not special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routines neither keyword. Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. While there are several benefits to mandatory keywords, they do bring a few challenges. Mandatory keywords in \CFA would imply that the compiler must know without a doubt wheter or not a parameter is a monitor or not. Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. For this reason, \CFA only has the \code{mutex} keyword.60 Having both \code{mutex} and \code{nomutex} keywords is redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without qualifiers \code{void foo(counter_t & this)}, then it is reasonable that it should default to the safest option \code{mutex}, whereas assuming \code{nomutex} is unsafe and may cause subtle errors. In fact, \code{nomutex} is the "normal" parameter behaviour, with the \code{nomutex} keyword effectively stating explicitly that "this routine is not special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routines neither keyword. Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. While there are several benefits to mandatory keywords, they do bring a few challenges. Mandatory keywords in \CFA would imply that the compiler must know without a doubt wheter or not a parameter is a monitor or not. Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. For this reason, \CFA only has the \code{mutex} keyword. 55 61 56 62 … … 60 66 int f2(const monitor & mutex m); 61 67 int f3(monitor ** mutex m); 62 int f4(monitor * [] mutex m);68 int f4(monitor * mutex m []); 63 69 int f5(graph(monitor*) & mutex m); 64 70 \end{cfacode} … … 68 74 int f1(monitor & mutex m); //Okay : recommanded case 69 75 int f2(monitor * mutex m); //Okay : could be an array but probably not 70 int f3(monitor [] mutex m); //Not Okay : Array of unkown length76 int f3(monitor mutex m []); //Not Okay : Array of unkown length 71 77 int f4(monitor ** mutex m); //Not Okay : Could be an array 72 int f5(monitor * [] mutex m); //Not Okay : Array of unkown length78 int f5(monitor * mutex m []); //Not Okay : Array of unkown length 73 79 \end{cfacode} 74 80 -
doc/proposals/concurrency/text/intro.tex
rade20d0 r436c0de 3 3 % ====================================================================== 4 4 5 This proposal provides a minimal concurrency API that is simple, efficient and can be reused to build higher-level features. The simplest possible concurrency core is a thread and a lock but this low-level approach is hard to master. An easier approach for users is to support higher-level constructs as the basis of the concurrency in \CFA. Indeed, for highly productive parallel programming, high-level approaches are much more popular~\cite{HPP:Study}. Examples are task based, message passing and implicit threading.5 This proposal provides a minimal concurrency API that is simple, efficient and can be reused to build higher-level features. The simplest possible concurrency system is a thread and a lock but this low-level approach is hard to master. An easier approach for users is to support higher-level constructs as the basis of the concurrency, in \CFA. Indeed, for highly productive parallel programming, high-level approaches are much more popular~\cite{HPP:Study}. Examples are task based, message passing and implicit threading. Therefore a high-level approach is adapted in \CFA 6 6 7 There are actually two problems that need to be solved in the design of concurrency for a programming language: which concurrency tools are available to the users and which parallelism tools are available. While these two concepts are often seen together, they are in fact distinct concepts that require different sorts oftools~\cite{Buhr05a}. Concurrency tools need to handle mutual exclusion and synchronization, while parallelism tools are about performance, cost and resource utilization.7 There are actually two problems that need to be solved in the design of concurrency for a programming language: which concurrency and which parallelism tools are available to the users. While these two concepts are often combined, they are in fact distinct concepts that require different tools~\cite{Buhr05a}. Concurrency tools need to handle mutual exclusion and synchronization, while parallelism tools are about performance, cost and resource utilization. -
doc/proposals/concurrency/thesis.tex
rade20d0 r436c0de 77 77 \fancyhf{} 78 78 \cfoot{\thepage} 79 \rfoot{v\input{ build/version}}79 \rfoot{v\input{version}} 80 80 81 81 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% … … 94 94 95 95 \input{intro} 96 97 \input{cforall} 96 98 97 99 \input{basics} -
doc/user/user.tex
rade20d0 r436c0de 11 11 %% Created On : Wed Apr 6 14:53:29 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Fri Jun 2 10:07:51 201714 %% Update Count : 2 12813 %% Last Modified On : Fri Jun 16 12:00:01 2017 14 %% Update Count : 2433 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 43 43 \usepackage[pagewise]{lineno} 44 44 \renewcommand{\linenumberfont}{\scriptsize\sffamily} 45 \input{common} % bespoke macros used in the document45 \input{common} % common CFA document macros 46 46 \usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref} 47 47 \usepackage{breakurl} … … 110 110 \renewcommand{\subsectionmark}[1]{\markboth{\thesubsection\quad #1}{\thesubsection\quad #1}} 111 111 \pagenumbering{roman} 112 \linenumbers % comment out to turn off line numbering112 %\linenumbers % comment out to turn off line numbering 113 113 114 114 \maketitle … … 454 454 the type suffixes ©U©, ©L©, etc. may start with an underscore ©1_U©, ©1_ll© or ©1.0E10_f©. 455 455 \end{enumerate} 456 It is significantly easier to read and enter long constants when they are broken up into smaller groupings (m ost cultures use commaor period among digits for the same purpose).456 It is significantly easier to read and enter long constants when they are broken up into smaller groupings (many cultures use comma and/or period among digits for the same purpose). 457 457 This extension is backwards compatible, matches with the use of underscore in variable names, and appears in \Index*{Ada} and \Index*{Java} 8. 458 458 … … 464 464 \begin{cfa} 465 465 int ®`®otype®`® = 3; §\C{// make keyword an identifier}§ 466 double ®`® choose®`® = 3.5;467 \end{cfa} 468 Programs can be converted easily by enclosing keyword identifiers in backquotes, and the backquotes can be removed later when the identifier name is changed to anon-keyword name.466 double ®`®forall®`® = 3.5; 467 \end{cfa} 468 Existing C programs with keyword clashes can be converted by enclosing keyword identifiers in backquotes, and eventually the identifier name can be changed to a non-keyword name. 469 469 \VRef[Figure]{f:InterpositionHeaderFile} shows how clashes in C header files (see~\VRef{s:StandardHeaders}) can be handled using preprocessor \newterm{interposition}: ©#include_next© and ©-I filename©: 470 470 … … 473 473 // include file uses the CFA keyword "otype". 474 474 #if ! defined( otype ) §\C{// nesting ?}§ 475 #define otype `otype`475 #define otype ®`®otype®`® §\C{// make keyword an identifier}§ 476 476 #define __CFA_BFD_H__ 477 477 #endif // ! otype … … 497 497 \begin{tabular}{@{}ll@{}} 498 498 \begin{cfa} 499 int * x[5]499 int * x[5] 500 500 \end{cfa} 501 501 & … … 508 508 For example, a routine returning a \Index{pointer} to an array of integers is defined and used in the following way: 509 509 \begin{cfa} 510 int (*f())[5] {...}; §\C{}§511 ... (*f())[3] += 1; 510 int ®(*®f®())[®5®]® {...}; §\C{definition}§ 511 ... ®(*®f®())[®3®]® += 1; §\C{usage}§ 512 512 \end{cfa} 513 513 Essentially, the return type is wrapped around the routine name in successive layers (like an \Index{onion}). … … 516 516 \CFA provides its own type, variable and routine declarations, using a different syntax. 517 517 The new declarations place qualifiers to the left of the base type, while C declarations place qualifiers to the right of the base type. 518 In the following example, \R{red} is for the base type and \B{blue} is for thequalifiers.518 In the following example, \R{red} is the base type and \B{blue} is qualifiers. 519 519 The \CFA declarations move the qualifiers to the left of the base type, \ie move the blue to the left of the red, while the qualifiers have the same meaning but are ordered left to right to specify a variable's type. 520 520 \begin{quote2} … … 534 534 \end{tabular} 535 535 \end{quote2} 536 The only exception is bit fieldspecification, which always appear to the right of the base type.536 The only exception is \Index{bit field} specification, which always appear to the right of the base type. 537 537 % Specifically, the character ©*© is used to indicate a pointer, square brackets ©[©\,©]© are used to represent an array or function return value, and parentheses ©()© are used to indicate a routine parameter. 538 538 However, unlike C, \CFA type declaration tokens are distributed across all variables in the declaration list. … … 583 583 \begin{cfa} 584 584 int z[ 5 ]; 585 char * w[ 5 ];586 double (* v)[ 5 ];585 char * w[ 5 ]; 586 double (* v)[ 5 ]; 587 587 struct s { 588 588 int f0:3; 589 int * f1;590 int * f2[ 5 ]589 int * f1; 590 int * f2[ 5 ] 591 591 }; 592 592 \end{cfa} … … 637 637 \begin{cfa} 638 638 int extern x[ 5 ]; 639 const int static * y;639 const int static * y; 640 640 \end{cfa} 641 641 & … … 658 658 \begin{cfa} 659 659 y = (®int *®)x; 660 i = sizeof(®int * [ 5 ]®);660 i = sizeof(®int * [ 5 ]®); 661 661 \end{cfa} 662 662 \end{tabular} … … 672 672 C provides a \newterm{pointer type}; 673 673 \CFA adds a \newterm{reference type}. 674 These types may be derived from a object or routine type, called the \newterm{referenced type}.674 These types may be derived from an object or routine type, called the \newterm{referenced type}. 675 675 Objects of these types contain an \newterm{address}, which is normally a location in memory, but may also address memory-mapped registers in hardware devices. 676 676 An integer constant expression with the value 0, or such an expression cast to type ©void *©, is called a \newterm{null-pointer constant}.\footnote{ … … 729 729 730 730 A \Index{pointer}/\Index{reference} object is a generalization of an object variable-name, \ie a mutable address that can point to more than one memory location during its lifetime. 731 (Similarly, an integer variable can contain multiple integer literals during its lifetime versus an integer constant representing a single literal during its lifetime, and like a variable name, may not occupy storage asthe literal is embedded directly into instructions.)731 (Similarly, an integer variable can contain multiple integer literals during its lifetime versus an integer constant representing a single literal during its lifetime, and like a variable name, may not occupy storage if the literal is embedded directly into instructions.) 732 732 Hence, a pointer occupies memory to store its current address, and the pointer's value is loaded by dereferencing, \eg: 733 733 \begin{quote2} … … 758 758 \begin{cfa} 759 759 p1 = p2; §\C{// p1 = p2\ \ rather than\ \ *p1 = *p2}§ 760 p2 = p1 + x; §\C{// p2 = p1 + x\ \ rather than\ \ *p 1= *p1 + x}§760 p2 = p1 + x; §\C{// p2 = p1 + x\ \ rather than\ \ *p2 = *p1 + x}§ 761 761 \end{cfa} 762 762 even though the assignment to ©p2© is likely incorrect, and the programmer probably meant: … … 765 765 ®*®p2 = ®*®p1 + x; §\C{// pointed-to value assignment / operation}§ 766 766 \end{cfa} 767 The C semantics work swell for situations where manipulation of addresses is the primary meaning and data is rarely accessed, such as storage management (©malloc©/©free©).767 The C semantics work well for situations where manipulation of addresses is the primary meaning and data is rarely accessed, such as storage management (©malloc©/©free©). 768 768 769 769 However, in most other situations, the pointed-to value is requested more often than the pointer address. … … 799 799 For a \CFA reference type, the cancellation on the left-hand side of assignment leaves the reference as an address (\Index{lvalue}): 800 800 \begin{cfa} 801 (&®*®)r1 = &x; §\C{// (\&*) cancel giving address ofr1 not variable pointed-to by r1}§801 (&®*®)r1 = &x; §\C{// (\&*) cancel giving address in r1 not variable pointed-to by r1}§ 802 802 \end{cfa} 803 803 Similarly, the address of a reference can be obtained for assignment or computation (\Index{rvalue}): 804 804 \begin{cfa} 805 (&(&®*®)®*®)r3 = &(&®*®)r2; §\C{// (\&*) cancel giving address of r2, (\&(\&*)*) cancel giving address ofr3}§805 (&(&®*®)®*®)r3 = &(&®*®)r2; §\C{// (\&*) cancel giving address in r2, (\&(\&*)*) cancel giving address in r3}§ 806 806 \end{cfa} 807 807 Cancellation\index{cancellation!pointer/reference}\index{pointer!cancellation} works to arbitrary depth. … … 824 824 As for a pointer type, a reference type may have qualifiers: 825 825 \begin{cfa} 826 const int cx = 5; §\C{// cannot change cx;}§827 const int & cr = cx; §\C{// cannot change what cr points to}§828 ®&®cr = &cx; §\C{// can change cr}§829 cr = 7; §\C{// error, cannot change cx}§830 int & const rc = x; §\C{// must be initialized}§831 ®&®rc = &x; §\C{// error, cannot change rc}§832 const int & const crc = cx; §\C{// must be initialized}§833 crc = 7; §\C{// error, cannot change cx}§834 ®&®crc = &cx; §\C{// error, cannot change crc}§835 \end{cfa} 836 Hence, for type ©& const©, there is no pointer assignment, so ©&rc = &x© is disallowed, and \emph{the address value cannot be the null pointer unless an arbitrary pointer is coerced into the reference}:837 \begin{cfa} 838 int & const cr = *0; §\C{// where 0 is the int * zero}§839 \end{cfa} 840 Note, constant reference-types do not prevent addressing errorsbecause of explicit storage-management:826 const int cx = 5; §\C{// cannot change cx;}§ 827 const int & cr = cx; §\C{// cannot change what cr points to}§ 828 ®&®cr = &cx; §\C{// can change cr}§ 829 cr = 7; §\C{// error, cannot change cx}§ 830 int & const rc = x; §\C{// must be initialized}§ 831 ®&®rc = &x; §\C{// error, cannot change rc}§ 832 const int & const crc = cx; §\C{// must be initialized}§ 833 crc = 7; §\C{// error, cannot change cx}§ 834 ®&®crc = &cx; §\C{// error, cannot change crc}§ 835 \end{cfa} 836 Hence, for type ©& const©, there is no pointer assignment, so ©&rc = &x© is disallowed, and \emph{the address value cannot be the null pointer unless an arbitrary pointer is coerced\index{coercion} into the reference}: 837 \begin{cfa} 838 int & const cr = *0; §\C{// where 0 is the int * zero}§ 839 \end{cfa} 840 Note, constant reference-types do not prevent \Index{addressing errors} because of explicit storage-management: 841 841 \begin{cfa} 842 842 int & const cr = *malloc(); 843 843 cr = 5; 844 delete &cr;845 cr = 7; §\C{// unsound pointer dereference}§846 \end{cfa} 847 848 Finally, the position of the ©const© qualifier \emph{after} the pointer/reference qualifier causes confuse for C programmers.844 free( &cr ); 845 cr = 7; §\C{// unsound pointer dereference}§ 846 \end{cfa} 847 848 The position of the ©const© qualifier \emph{after} the pointer/reference qualifier causes confuse for C programmers. 849 849 The ©const© qualifier cannot be moved before the pointer/reference qualifier for C style-declarations; 850 \CFA-style declarations attempt to address this issue:850 \CFA-style declarations (see \VRef{s:Declarations}) attempt to address this issue: 851 851 \begin{quote2} 852 852 \begin{tabular}{@{}l@{\hspace{3em}}l@{}} … … 863 863 \end{tabular} 864 864 \end{quote2} 865 where the \CFA declaration is read left-to-right (see \VRef{s:Declarations}). 865 where the \CFA declaration is read left-to-right. 866 867 Finally, like pointers, references are usable and composable with other type operators and generators. 868 \begin{cfa} 869 int w, x, y, z, & ar[3] = { x, y, z }; §\C{// initialize array of references}§ 870 &ar[1] = &w; §\C{// change reference array element}§ 871 typeof( ar[1] ) p; §\C{// (gcc) is int, i.e., the type of referenced object}§ 872 typeof( &ar[1] ) q; §\C{// (gcc) is int \&, i.e., the type of reference}§ 873 sizeof( ar[1] ) == sizeof( int ); §\C{// is true, i.e., the size of referenced object}§ 874 sizeof( &ar[1] ) == sizeof( int *) §\C{// is true, i.e., the size of a reference}§ 875 \end{cfa} 866 876 867 877 In contrast to \CFA reference types, \Index*[C++]{\CC{}}'s reference types are all ©const© references, preventing changes to the reference address, so only value assignment is possible, which eliminates half of the \Index{address duality}. 878 Also, \CC does not allow \Index{array}s\index{array!reference} of reference\footnote{ 879 The reason for disallowing arrays of reference is unknown, but possibly comes from references being ethereal (like a textual macro), and hence, replaceable by the referant object.} 868 880 \Index*{Java}'s reference types to objects (all Java objects are on the heap) are like C pointers, which always manipulate the address, and there is no (bit-wise) object assignment, so objects are explicitly cloned by shallow or deep copying, which eliminates half of the address duality. 881 882 883 \subsection{Initialization} 869 884 870 885 \Index{Initialization} is different than \Index{assignment} because initialization occurs on the empty (uninitialized) storage on an object, while assignment occurs on possibly initialized storage of an object. … … 872 887 Because the object being initialized has no value, there is only one meaningful semantics with respect to address duality: it must mean address as there is no pointed-to value. 873 888 In contrast, the left-hand side of assignment has an address that has a duality. 874 Therefore, for pointer/reference initialization, the initializing value must be an address (\Index{lvalue}) not a value (\Index{rvalue}). 875 \begin{cfa} 876 int * p = &x; §\C{// must have address of x}§ 877 int & r = x; §\C{// must have address of x}§ 878 \end{cfa} 879 Therefore, it is superfluous to require explicitly taking the address of the initialization object, even though the type is incorrect. 880 Hence, \CFA allows ©r© to be assigned ©x© because it infers a reference for ©x©, by implicitly inserting a address-of operator, ©&©, and it is an error to put an ©&© because the types no longer match. 881 Unfortunately, C allows ©p© to be assigned with ©&x© or ©x©, by value, but most compilers warn about the latter assignment as being potentially incorrect. 882 (\CFA extends pointer initialization so a variable name is automatically referenced, eliminating the unsafe assignment.) 889 Therefore, for pointer/reference initialization, the initializing value must be an address not a value. 890 \begin{cfa} 891 int * p = &x; §\C{// assign address of x}§ 892 ®int * p = x;® §\C{// assign value of x}§ 893 int & r = x; §\C{// must have address of x}§ 894 \end{cfa} 895 Like the previous example with C pointer-arithmetic, it is unlikely assigning the value of ©x© into a pointer is meaningful (again, a warning is usually given). 896 Therefore, for safety, this context requires an address, so it is superfluous to require explicitly taking the address of the initialization object, even though the type is incorrect. 897 Note, this is strictly a convenience and safety feature for a programmer. 898 Hence, \CFA allows ©r© to be assigned ©x© because it infers a reference for ©x©, by implicitly inserting a address-of operator, ©&©, and it is an error to put an ©&© because the types no longer match due to the implicit dereference. 899 Unfortunately, C allows ©p© to be assigned with ©&x© (address) or ©x© (value), but most compilers warn about the latter assignment as being potentially incorrect. 883 900 Similarly, when a reference type is used for a parameter/return type, the call-site argument does not require a reference operator for the same reason. 884 901 \begin{cfa} 885 int & f( int & r ); §\C{// reference parameter and return}§886 z = f( x ) + f( y ); §\C{// reference operator added, temporaries needed for call results}§902 int & f( int & r ); §\C{// reference parameter and return}§ 903 z = f( x ) + f( y ); §\C{// reference operator added, temporaries needed for call results}§ 887 904 \end{cfa} 888 905 Within routine ©f©, it is possible to change the argument by changing the corresponding parameter, and parameter ©r© can be locally reassigned within ©f©. … … 892 909 z = temp1 + temp2; 893 910 \end{cfa} 894 This implicit referencingis crucial for reducing the syntactic burden for programmers when using references;911 This \Index{implicit referencing} is crucial for reducing the syntactic burden for programmers when using references; 895 912 otherwise references have the same syntactic burden as pointers in these contexts. 896 913 … … 899 916 void f( ®const® int & cr ); 900 917 void g( ®const® int * cp ); 901 f( 3 ); g( &3 );902 f( x + y ); g( &(x + y) );918 f( 3 ); g( ®&®3 ); 919 f( x + y ); g( ®&®(x + y) ); 903 920 \end{cfa} 904 921 Here, the compiler passes the address to the literal 3 or the temporary for the expression ©x + y©, knowing the argument cannot be changed through the parameter. 905 (The ©&© is necessary for the pointer-type parameter to make the types match, and is a common requirement for a C programmer.) 922 The ©&© before the constant/expression for the pointer-type parameter (©g©) is a \CFA extension necessary to type match and is a common requirement before a variable in C (\eg ©scanf©). 923 Importantly, ©&3© may not be equal to ©&3©, where the references occur across calls because the temporaries maybe different on each call. 924 906 925 \CFA \emph{extends} this semantics to a mutable pointer/reference parameter, and the compiler implicitly creates the necessary temporary (copying the argument), which is subsequently pointed-to by the reference parameter and can be changed.\footnote{ 907 926 If whole program analysis is possible, and shows the parameter is not assigned, \ie it is ©const©, the temporary is unnecessary.} … … 909 928 void f( int & r ); 910 929 void g( int * p ); 911 f( 3 ); g( &3 ); §\C{// compiler implicit generates temporaries}§912 f( x + y ); g( &(x + y) ); §\C{// compiler implicit generates temporaries}§930 f( 3 ); g( ®&®3 ); §\C{// compiler implicit generates temporaries}§ 931 f( x + y ); g( ®&®(x + y) ); §\C{// compiler implicit generates temporaries}§ 913 932 \end{cfa} 914 933 Essentially, there is an implicit \Index{rvalue} to \Index{lvalue} conversion in this case.\footnote{ … … 917 936 918 937 %\CFA attempts to handle pointers and references in a uniform, symmetric manner. 919 However, C handles routine objects in an inconsistent way.920 A routine object is both a pointer and a reference ( particle and wave).938 Finally, C handles \Index{routine object}s in an inconsistent way. 939 A routine object is both a pointer and a reference (\Index{particle and wave}). 921 940 \begin{cfa} 922 941 void f( int i ); 923 void (*fp)( int ); 924 fp = f; §\C{// reference initialization}§ 925 fp = &f; §\C{// pointer initialization}§ 926 fp = *f; §\C{// reference initialization}§ 927 fp(3); §\C{// reference invocation}§ 928 (*fp)(3); §\C{// pointer invocation}§ 929 \end{cfa} 930 A routine object is best described by a ©const© reference: 931 \begin{cfa} 932 const void (&fr)( int ) = f; 933 fr = ... §\C{// error, cannot change code}§ 934 &fr = ...; §\C{// changing routine reference}§ 935 fr( 3 ); §\C{// reference call to f}§ 936 (*fr)(3); §\C{// error, incorrect type}§ 942 void (*fp)( int ); §\C{// routine pointer}§ 943 fp = f; §\C{// reference initialization}§ 944 fp = &f; §\C{// pointer initialization}§ 945 fp = *f; §\C{// reference initialization}§ 946 fp(3); §\C{// reference invocation}§ 947 (*fp)(3); §\C{// pointer invocation}§ 948 \end{cfa} 949 While C's treatment of routine objects has similarity to inferring a reference type in initialization contexts, the examples are assignment not initialization, and all possible forms of assignment are possible (©f©, ©&f©, ©*f©) without regard for type. 950 Instead, a routine object should be referenced by a ©const© reference: 951 \begin{cfa} 952 ®const® void (®&® fr)( int ) = f; §\C{// routine reference}§ 953 fr = ... §\C{// error, cannot change code}§ 954 &fr = ...; §\C{// changing routine reference}§ 955 fr( 3 ); §\C{// reference call to f}§ 956 (*fr)(3); §\C{// error, incorrect type}§ 937 957 \end{cfa} 938 958 because the value of the routine object is a routine literal, \ie the routine code is normally immutable during execution.\footnote{ … … 940 960 \CFA allows this additional use of references for routine objects in an attempt to give a more consistent meaning for them. 941 961 942 This situation is different from inferring with reference type being used ... 943 962 963 \subsection{Address-of Semantics} 964 965 In C, ©&E© is an rvalue for any expression ©E©. 966 \CFA extends the ©&© (address-of) operator as follows: 967 \begin{itemize} 968 \item 969 if ©R© is an \Index{rvalue} of type ©T &$_1$...&$_r$© where $r \ge 1$ references (©&© symbols) than ©&R© has type ©T ®*®&$_{\color{red}2}$...&$_{\color{red}r}$©, \ie ©T© pointer with $r-1$ references (©&© symbols). 970 971 \item 972 if ©L© is an \Index{lvalue} of type ©T &$_1$...&$_l$© where $l \ge 0$ references (©&© symbols) then ©&L© has type ©T ®*®&$_{\color{red}1}$...&$_{\color{red}l}$©, \ie ©T© pointer with $l$ references (©&© symbols). 973 \end{itemize} 974 The following example shows the first rule applied to different \Index{rvalue} contexts: 975 \begin{cfa} 976 int x, * px, ** ppx, *** pppx, **** ppppx; 977 int & rx = x, && rrx = rx, &&& rrrx = rrx ; 978 x = rrrx; // rrrx is an lvalue with type int &&& (equivalent to x) 979 px = &rrrx; // starting from rrrx, &rrrx is an rvalue with type int *&&& (&x) 980 ppx = &&rrrx; // starting from &rrrx, &&rrrx is an rvalue with type int **&& (&rx) 981 pppx = &&&rrrx; // starting from &&rrrx, &&&rrrx is an rvalue with type int ***& (&rrx) 982 ppppx = &&&&rrrx; // starting from &&&rrrx, &&&&rrrx is an rvalue with type int **** (&rrrx) 983 \end{cfa} 984 The following example shows the second rule applied to different \Index{lvalue} contexts: 985 \begin{cfa} 986 int x, * px, ** ppx, *** pppx; 987 int & rx = x, && rrx = rx, &&& rrrx = rrx ; 988 rrrx = 2; // rrrx is an lvalue with type int &&& (equivalent to x) 989 &rrrx = px; // starting from rrrx, &rrrx is an rvalue with type int *&&& (rx) 990 &&rrrx = ppx; // starting from &rrrx, &&rrrx is an rvalue with type int **&& (rrx) 991 &&&rrrx = pppx; // starting from &&rrrx, &&&rrrx is an rvalue with type int ***& (rrrx) 992 \end{cfa} 993 994 995 \subsection{Conversions} 996 997 C provides a basic implicit conversion to simplify variable usage: 998 \begin{enumerate} 999 \setcounter{enumi}{-1} 1000 \item 1001 lvalue to rvalue conversion: ©cv T© converts to ©T©, which allows implicit variable dereferencing. 1002 \begin{cfa} 1003 int x; 1004 x + 1; // lvalue variable (int) converts to rvalue for expression 1005 \end{cfa} 1006 An rvalue has no type qualifiers (©cv©), so the lvalue qualifiers are dropped. 1007 \end{enumerate} 1008 \CFA provides three new implicit conversion for reference types to simplify reference usage. 1009 \begin{enumerate} 1010 \item 1011 reference to rvalue conversion: ©cv T &© converts to ©T©, which allows implicit reference dereferencing. 1012 \begin{cfa} 1013 int x, &r = x, f( int p ); 1014 x = ®r® + f( ®r® ); // lvalue reference converts to rvalue 1015 \end{cfa} 1016 An rvalue has no type qualifiers (©cv©), so the reference qualifiers are dropped. 1017 1018 \item 1019 lvalue to reference conversion: \lstinline[deletekeywords={lvalue}]@lvalue-type cv1 T@ converts to ©cv2 T &©, which allows implicitly converting variables to references. 1020 \begin{cfa} 1021 int x, &r = ®x®, f( int & p ); // lvalue variable (int) convert to reference (int &) 1022 f( ®x® ); // lvalue variable (int) convert to reference (int &) 1023 \end{cfa} 1024 Conversion can restrict a type, where ©cv1© $\le$ ©cv2©, \eg passing an ©int© to a ©const volatile int &©, which has low cost. 1025 Conversion can expand a type, where ©cv1© $>$ ©cv2©, \eg passing a ©const volatile int© to an ©int &©, which has high cost (\Index{warning}); 1026 furthermore, if ©cv1© has ©const© but not ©cv2©, a temporary variable is created to preserve the immutable lvalue. 1027 1028 \item 1029 rvalue to reference conversion: ©T© converts to ©cv T &©, which allows binding references to temporaries. 1030 \begin{cfa} 1031 int x, & f( int & p ); 1032 f( ®x + 3® ); // rvalue parameter (int) implicitly converts to lvalue temporary reference (int &) 1033 ®&f®(...) = &x; // rvalue result (int &) implicitly converts to lvalue temporary reference (int &) 1034 \end{cfa} 1035 In both case, modifications to the temporary are inaccessible (\Index{warning}). 1036 Conversion expands the temporary-type with ©cv©, which is low cost since the temporary is inaccessible. 1037 \end{enumerate} 944 1038 945 1039 946 1040 \begin{comment} 947 \section{References}948 949 By introducing references in parameter types, users are given an easy way to pass a value by reference, without the need for NULL pointer checks.950 In structures, a reference can replace a pointer to an object that should always have a valid value.951 When a structure contains a reference, all of its constructors must initialize the reference and all instances of this structure must initialize it upon definition.952 953 The syntax for using references in \CFA is the same as \CC with the exception of reference initialization.954 Use ©&© to specify a reference, and access references just like regular objects, not like pointers (use dot notation to access fields).955 When initializing a reference, \CFA uses a different syntax which differentiates reference initialization from assignment to a reference.956 The ©&© is used on both sides of the expression to clarify that the address of the reference is being set to the address of the variable to which it refers.957 958 959 1041 From: Richard Bilson <rcbilson@gmail.com> 960 1042 Date: Wed, 13 Jul 2016 01:58:58 +0000 … … 1118 1200 \section{Routine Definition} 1119 1201 1120 \CFA also supports a new syntax for routine definition, as well as ISO Cand K\&R routine syntax.1202 \CFA also supports a new syntax for routine definition, as well as \Celeven and K\&R routine syntax. 1121 1203 The point of the new syntax is to allow returning multiple values from a routine~\cite{Galletly96,CLU}, \eg: 1122 1204 \begin{cfa} … … 1138 1220 in both cases the type is assumed to be void as opposed to old style C defaults of int return type and unknown parameter types, respectively, as in: 1139 1221 \begin{cfa} 1140 [§\,§] g(); §\C{// no input or output parameters}§1141 [ void ] g( void ); §\C{// no input or output parameters}§1222 [§\,§] g(); §\C{// no input or output parameters}§ 1223 [ void ] g( void ); §\C{// no input or output parameters}§ 1142 1224 \end{cfa} 1143 1225 … … 1157 1239 \begin{cfa} 1158 1240 typedef int foo; 1159 int f( int (* foo) ); §\C{// foo is redefined as a parameter name}§1241 int f( int (* foo) ); §\C{// foo is redefined as a parameter name}§ 1160 1242 \end{cfa} 1161 1243 The string ``©int (* foo)©'' declares a C-style named-parameter of type pointer to an integer (the parenthesis are superfluous), while the same string declares a \CFA style unnamed parameter of type routine returning integer with unnamed parameter of type pointer to foo. … … 1165 1247 C-style declarations can be used to declare parameters for \CFA style routine definitions, \eg: 1166 1248 \begin{cfa} 1167 [ int ] f( * int, int * ); §\C{// returns an integer, accepts 2 pointers to integers}§1168 [ * int, int * ] f( int ); §\C{// returns 2 pointers to integers, accepts an integer}§1249 [ int ] f( * int, int * ); §\C{// returns an integer, accepts 2 pointers to integers}§ 1250 [ * int, int * ] f( int ); §\C{// returns 2 pointers to integers, accepts an integer}§ 1169 1251 \end{cfa} 1170 1252 The reason for allowing both declaration styles in the new context is for backwards compatibility with existing preprocessor macros that generate C-style declaration-syntax, as in: 1171 1253 \begin{cfa} 1172 1254 #define ptoa( n, d ) int (*n)[ d ] 1173 int f( ptoa( p, 5 ) ) ... §\C{// expands to int f( int (*p)[ 5 ] )}§1174 [ int ] f( ptoa( p, 5 ) ) ... §\C{// expands to [ int ] f( int (*p)[ 5 ] )}§1255 int f( ptoa( p, 5 ) ) ... §\C{// expands to int f( int (*p)[ 5 ] )}§ 1256 [ int ] f( ptoa( p, 5 ) ) ... §\C{// expands to [ int ] f( int (*p)[ 5 ] )}§ 1175 1257 \end{cfa} 1176 1258 Again, programmers are highly encouraged to use one declaration form or the other, rather than mixing the forms. … … 1194 1276 int z; 1195 1277 ... x = 0; ... y = z; ... 1196 ®return;® 1278 ®return;® §\C{// implicitly return x, y}§ 1197 1279 } 1198 1280 \end{cfa} … … 1204 1286 [ int x, int y ] f() { 1205 1287 ... 1206 } 1288 } §\C{// implicitly return x, y}§ 1207 1289 \end{cfa} 1208 1290 In this case, the current values of ©x© and ©y© are returned to the calling routine just as if a ©return© had been encountered. 1291 1292 Named return values may be used in conjunction with named parameter values; 1293 specifically, a return and parameter can have the same name. 1294 \begin{cfa} 1295 [ int x, int y ] f( int, x, int y ) { 1296 ... 1297 } §\C{// implicitly return x, y}§ 1298 \end{cfa} 1299 This notation allows the compiler to eliminate temporary variables in nested routine calls. 1300 \begin{cfa} 1301 [ int x, int y ] f( int, x, int y ); §\C{// prototype declaration}§ 1302 int a, b; 1303 [a, b] = f( f( f( a, b ) ) ); 1304 \end{cfa} 1305 While the compiler normally ignores parameters names in prototype declarations, here they are used to eliminate temporary return-values by inferring that the results of each call are the inputs of the next call, and ultimately, the left-hand side of the assignment. 1306 Hence, even without the body of routine ©f© (separate compilation), it is possible to perform a global optimization across routine calls. 1307 The compiler warns about naming inconsistencies between routine prototype and definition in this case, and behaviour is \Index{undefined} if the programmer is inconsistent. 1209 1308 1210 1309 … … 1214 1313 as well, parameter names are optional, \eg: 1215 1314 \begin{cfa} 1216 [ int x ] f (); §\C{// returning int with no parameters}§1217 [ * int ] g (int y); §\C{// returning pointer to int with int parameter}§1218 [ ] h ( int,char);§\C{// returning no result with int and char parameters}§1219 [ * int, int ] j (int);§\C{// returning pointer to int and int, with int parameter}§1315 [ int x ] f (); §\C{// returning int with no parameters}§ 1316 [ * int ] g (int y); §\C{// returning pointer to int with int parameter}§ 1317 [ ] h ( int, char ); §\C{// returning no result with int and char parameters}§ 1318 [ * int, int ] j ( int ); §\C{// returning pointer to int and int, with int parameter}§ 1220 1319 \end{cfa} 1221 1320 This syntax allows a prototype declaration to be created by cutting and pasting source text from the routine definition header (or vice versa). … … 1225 1324 \multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}} & \multicolumn{1}{c}{\textbf{C}} \\ 1226 1325 \begin{cfa} 1227 [ int ] f( int), g;1326 [ int ] f( int ), g; 1228 1327 \end{cfa} 1229 1328 & 1230 1329 \begin{cfa} 1231 int f( int), g(int);1330 int f( int ), g( int ); 1232 1331 \end{cfa} 1233 1332 \end{tabular} … … 1235 1334 Declaration qualifiers can only appear at the start of a \CFA routine declaration,\footref{StorageClassSpecifier} \eg: 1236 1335 \begin{cfa} 1237 extern [ int ] f ( int);1238 static [ int ] g ( int);1336 extern [ int ] f ( int ); 1337 static [ int ] g ( int ); 1239 1338 \end{cfa} 1240 1339 … … 1244 1343 The syntax for pointers to \CFA routines specifies the pointer name on the right, \eg: 1245 1344 \begin{cfa} 1246 * [ int x ] () fp; §\C{// pointer to routine returning int with no parameters}§1247 * [ * int ] (int y) gp; §\C{// pointer to routine returning pointer to int with int parameter}§1248 * [ ] (int,char) hp; §\C{// pointer to routine returning no result with int and char parameters}§1249 * [ * int,int ] ( int) jp;§\C{// pointer to routine returning pointer to int and int, with int parameter}§1345 * [ int x ] () fp; §\C{// pointer to routine returning int with no parameters}§ 1346 * [ * int ] (int y) gp; §\C{// pointer to routine returning pointer to int with int parameter}§ 1347 * [ ] (int,char) hp; §\C{// pointer to routine returning no result with int and char parameters}§ 1348 * [ * int,int ] ( int ) jp; §\C{// pointer to routine returning pointer to int and int, with int parameter}§ 1250 1349 \end{cfa} 1251 1350 While parameter names are optional, \emph{a routine name cannot be specified}; 1252 1351 for example, the following is incorrect: 1253 1352 \begin{cfa} 1254 * [ int x ] f () fp; §\C{// routine name "f" is not allowed}§1353 * [ int x ] f () fp; §\C{// routine name "f" is not allowed}§ 1255 1354 \end{cfa} 1256 1355 … … 1258 1357 \section{Named and Default Arguments} 1259 1358 1260 Named and defaultarguments~\cite{Hardgrave76}\footnote{1359 Named\index{named arguments}\index{arguments!named} and default\index{default arguments}\index{arguments!default} arguments~\cite{Hardgrave76}\footnote{ 1261 1360 Francez~\cite{Francez77} proposed a further extension to the named-parameter passing style, which specifies what type of communication (by value, by reference, by name) the argument is passed to the routine.} 1262 1361 are two mechanisms to simplify routine call. … … 1439 1538 int ; §\C{// disallowed, unnamed field}§ 1440 1539 int *; §\C{// disallowed, unnamed field}§ 1441 int (*)( int); §\C{// disallowed, unnamed field}§1540 int (*)( int ); §\C{// disallowed, unnamed field}§ 1442 1541 }; 1443 1542 \end{cfa} … … 1562 1661 } 1563 1662 int main() { 1564 * [int]( int) fp = foo(); §\C{// int (*fp)(int)}§1663 * [int]( int ) fp = foo(); §\C{// int (*fp)( int )}§ 1565 1664 sout | fp( 3 ) | endl; 1566 1665 } … … 2683 2782 2684 2783 2685 \s ubsection{Constructors and Destructors}2784 \section{Constructors and Destructors} 2686 2785 2687 2786 \CFA supports C initialization of structures, but it also adds constructors for more advanced initialization. … … 3014 3113 3015 3114 3115 \begin{comment} 3016 3116 \section{Generics} 3017 3117 … … 3220 3320 } 3221 3321 \end{cfa} 3322 \end{comment} 3222 3323 3223 3324 … … 3279 3380 Complex *p3 = new(0.5, 1.0); // allocate + 2 param constructor 3280 3381 } 3281 3282 3382 \end{cfa} 3283 3383 … … 3291 3391 3292 3392 3393 \begin{comment} 3293 3394 \subsection{Unsafe C Constructs} 3294 3395 … … 3301 3402 The exact set of unsafe C constructs that will be disallowed in \CFA has not yet been decided, but is sure to include pointer arithmetic, pointer casting, etc. 3302 3403 Once the full set is decided, the rules will be listed here. 3404 \end{comment} 3303 3405 3304 3406 3305 3407 \section{Concurrency} 3306 3307 Today's processors for nearly all use cases, ranging from embedded systems to large cloud computing servers, are composed of multiple cores, often heterogeneous.3308 As machines grow in complexity, it becomes more difficult for a program to make the most use of the hardware available.3309 \CFA includes built-in concurrency features to enable high performance and improve programmer productivity on these multi-/many-core machines.3310 3408 3311 3409 Concurrency support in \CFA is implemented on top of a highly efficient runtime system of light-weight, M:N, user level threads. … … 3314 3412 This enables a very familiar interface to all programmers, even those with no parallel programming experience. 3315 3413 It also allows the compiler to do static type checking of all communication, a very important safety feature. 3316 This controlled communication with type safety has some similarities with channels in \Index*{Go}, and can actually implement 3317 channels exactly, as well as create additional communication patterns that channels cannot. 3414 This controlled communication with type safety has some similarities with channels in \Index*{Go}, and can actually implement channels exactly, as well as create additional communication patterns that channels cannot. 3318 3415 Mutex objects, monitors, are used to contain mutual exclusion within an object and synchronization across concurrent threads. 3319 3416 3320 Three new keywords are added to support these features: 3321 3322 monitor creates a structure with implicit locking when accessing fields 3323 3324 mutex implies use of a monitor requiring the implicit locking 3325 3326 task creates a type with implicit locking, separate stack, and a thread 3417 \begin{figure} 3418 \begin{cfa} 3419 #include <fstream> 3420 #include <coroutine> 3421 3422 coroutine Fibonacci { 3423 int fn; §\C{// used for communication}§ 3424 }; 3425 void ?{}( Fibonacci * this ) { 3426 this->fn = 0; 3427 } 3428 void main( Fibonacci * this ) { 3429 int fn1, fn2; §\C{// retained between resumes}§ 3430 this->fn = 0; §\C{// case 0}§ 3431 fn1 = this->fn; 3432 suspend(); §\C{// return to last resume}§ 3433 3434 this->fn = 1; §\C{// case 1}§ 3435 fn2 = fn1; 3436 fn1 = this->fn; 3437 suspend(); §\C{// return to last resume}§ 3438 3439 for ( ;; ) { §\C{// general case}§ 3440 this->fn = fn1 + fn2; 3441 fn2 = fn1; 3442 fn1 = this->fn; 3443 suspend(); §\C{// return to last resume}§ 3444 } // for 3445 } 3446 int next( Fibonacci * this ) { 3447 resume( this ); §\C{// transfer to last suspend}§ 3448 return this->fn; 3449 } 3450 int main() { 3451 Fibonacci f1, f2; 3452 for ( int i = 1; i <= 10; i += 1 ) { 3453 sout | next( &f1 ) | ' ' | next( &f2 ) | endl; 3454 } // for 3455 } 3456 \end{cfa} 3457 \caption{Fibonacci Coroutine} 3458 \label{f:FibonacciCoroutine} 3459 \end{figure} 3460 3461 3462 \subsection{Coroutine} 3463 3464 \Index{Coroutines} are the precursor to tasks. 3465 \VRef[Figure]{f:FibonacciCoroutine} shows a coroutine that computes the \Index*{Fibonacci} numbers. 3327 3466 3328 3467 … … 3339 3478 \end{cfa} 3340 3479 3480 \begin{figure} 3481 \begin{cfa} 3482 #include <fstream> 3483 #include <kernel> 3484 #include <monitor> 3485 #include <thread> 3486 3487 monitor global_t { 3488 int value; 3489 }; 3490 3491 void ?{}(global_t * this) { 3492 this->value = 0; 3493 } 3494 3495 static global_t global; 3496 3497 void increment3( global_t * mutex this ) { 3498 this->value += 1; 3499 } 3500 void increment2( global_t * mutex this ) { 3501 increment3( this ); 3502 } 3503 void increment( global_t * mutex this ) { 3504 increment2( this ); 3505 } 3506 3507 thread MyThread {}; 3508 3509 void main( MyThread* this ) { 3510 for(int i = 0; i < 1_000_000; i++) { 3511 increment( &global ); 3512 } 3513 } 3514 int main(int argc, char* argv[]) { 3515 processor p; 3516 { 3517 MyThread f[4]; 3518 } 3519 sout | global.value | endl; 3520 } 3521 \end{cfa} 3522 \caption{Atomic-Counter Monitor} 3523 \caption{f:AtomicCounterMonitor} 3524 \end{figure} 3525 3526 \begin{comment} 3341 3527 Since a monitor structure includes an implicit locking mechanism, it does not make sense to copy a monitor; 3342 3528 it is always passed by reference. … … 3385 3571 } 3386 3572 \end{cfa} 3573 \end{comment} 3387 3574 3388 3575 … … 3392 3579 A task provides mutual exclusion like a monitor, and also has its own execution state and a thread of control. 3393 3580 Similar to a monitor, a task is defined like a structure: 3581 3582 \begin{figure} 3583 \begin{cfa} 3584 #include <fstream> 3585 #include <kernel> 3586 #include <stdlib> 3587 #include <thread> 3588 3589 thread First { signal_once * lock; }; 3590 thread Second { signal_once * lock; }; 3591 3592 void ?{}( First * this, signal_once* lock ) { this->lock = lock; } 3593 void ?{}( Second * this, signal_once* lock ) { this->lock = lock; } 3594 3595 void main( First * this ) { 3596 for ( int i = 0; i < 10; i += 1 ) { 3597 sout | "First : Suspend No." | i + 1 | endl; 3598 yield(); 3599 } 3600 signal( this->lock ); 3601 } 3602 3603 void main( Second * this ) { 3604 wait( this->lock ); 3605 for ( int i = 0; i < 10; i += 1 ) { 3606 sout | "Second : Suspend No." | i + 1 | endl; 3607 yield(); 3608 } 3609 } 3610 3611 int main( void ) { 3612 signal_once lock; 3613 sout | "User main begin" | endl; 3614 { 3615 processor p; 3616 { 3617 First f = { &lock }; 3618 Second s = { &lock }; 3619 } 3620 } 3621 sout | "User main end" | endl; 3622 } 3623 \end{cfa} 3624 \caption{Simple Tasks} 3625 \label{f:SimpleTasks} 3626 \end{figure} 3627 3628 3629 \begin{comment} 3394 3630 \begin{cfa} 3395 3631 type Adder = task { … … 3445 3681 \end{cfa} 3446 3682 3447 3448 3683 \subsection{Cooperative Scheduling} 3449 3684 … … 3558 3793 } 3559 3794 \end{cfa} 3560 3561 3795 \end{comment} 3796 3797 3798 \begin{comment} 3562 3799 \section{Modules and Packages } 3563 3800 3564 \begin{comment}3565 3801 High-level encapsulation is useful for organizing code into reusable units, and accelerating compilation speed. 3566 3802 \CFA provides a convenient mechanism for creating, building and sharing groups of functionality that enhances productivity and improves compile time. … … 4226 4462 4227 4463 4464 \begin{comment} 4228 4465 \subsection[Comparing Key Features of CFA]{Comparing Key Features of \CFA} 4229 4466 … … 4603 4840 4604 4841 4605 \begin{comment}4606 4842 \subsubsection{Modules / Packages} 4607 4843 … … 4683 4919 } 4684 4920 \end{cfa} 4685 \end{comment}4686 4921 4687 4922 … … 4844 5079 4845 5080 \subsection{Summary of Language Comparison} 4846 4847 4848 \subsubsection[C++]{\CC} 5081 \end{comment} 5082 5083 5084 \subsection[C++]{\CC} 4849 5085 4850 5086 \Index*[C++]{\CC{}} is a general-purpose programming language. … … 4867 5103 4868 5104 4869 \subs ubsection{Go}5105 \subsection{Go} 4870 5106 4871 5107 \Index*{Go}, also commonly referred to as golang, is a programming language developed at Google in 2007 [.]. … … 4883 5119 4884 5120 4885 \subs ubsection{Rust}5121 \subsection{Rust} 4886 5122 4887 5123 \Index*{Rust} is a general-purpose, multi-paradigm, compiled programming language developed by Mozilla Research. … … 4897 5133 4898 5134 4899 \subs ubsection{D}5135 \subsection{D} 4900 5136 4901 5137 The \Index*{D} programming language is an object-oriented, imperative, multi-paradigm system programming … … 5009 5245 \item[Rationale:] keywords added to implement new semantics of \CFA. 5010 5246 \item[Effect on original feature:] change to semantics of well-defined feature. \\ 5011 Any ISO Cprograms using these keywords as identifiers are invalid \CFA programs.5247 Any \Celeven programs using these keywords as identifiers are invalid \CFA programs. 5012 5248 \item[Difficulty of converting:] keyword clashes are accommodated by syntactic transformations using the \CFA backquote escape-mechanism (see~\VRef{s:BackquoteIdentifiers}). 5013 5249 \item[How widely used:] clashes among new \CFA keywords and existing identifiers are rare. … … 5229 5465 hence, names in these include files are not mangled\index{mangling!name} (see~\VRef{s:Interoperability}). 5230 5466 All other C header files must be explicitly wrapped in ©extern "C"© to prevent name mangling. 5467 For \Index*[C++]{\CC{}}, the name-mangling issue is handled implicitly because most C header-files are augmented with checks for preprocessor variable ©__cplusplus©, which adds appropriate ©extern "C"© qualifiers. 5231 5468 5232 5469 … … 5311 5548 } 5312 5549 5313 // §\CFA§ safe initialization/copy 5550 // §\CFA§ safe initialization/copy, i.e., implicit size specification 5314 5551 forall( dtype T | sized(T) ) T * memset( T * dest, char c );§\indexc{memset}§ 5315 5552 forall( dtype T | sized(T) ) T * memcpy( T * dest, const T * src );§\indexc{memcpy}§ … … 5421 5658 \leavevmode 5422 5659 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 5423 forall( otype T | { int ?<?( T, T ); } ) 5424 T min( T t1, T t2 );§\indexc{min}§ 5425 5426 forall( otype T | { int ?>?( T, T ); } ) 5427 T max( T t1, T t2 );§\indexc{max}§ 5428 5429 forall( otype T | { T min( T, T ); T max( T, T ); } ) 5430 T clamp( T value, T min_val, T max_val );§\indexc{clamp}§ 5431 5432 forall( otype T ) 5433 void swap( T * t1, T * t2 );§\indexc{swap}§ 5660 forall( otype T | { int ?<?( T, T ); } ) T min( T t1, T t2 );§\indexc{min}§ 5661 forall( otype T | { int ?>?( T, T ); } ) T max( T t1, T t2 );§\indexc{max}§ 5662 forall( otype T | { T min( T, T ); T max( T, T ); } ) T clamp( T value, T min_val, T max_val );§\indexc{clamp}§ 5663 forall( otype T ) void swap( T * t1, T * t2 );§\indexc{swap}§ 5434 5664 \end{cfa} 5435 5665 -
doc/working/exception/design.txt
rade20d0 r436c0de 1 1 Design of Exceptions and EHM in Cforall: 2 2 3 Currently this is a combination of ideas and big questions that still have to 4 be addressed. It also includes some other error handling options, how they 5 interact and compare to exceptions. 3 4 Exception Instances: 5 Currently, exceptions are integers (like errno). 6 7 They are planned to be the new "tagged structures", which allows them to 8 exist in a simple hierarchy which shared functionality throughout. However 9 the tagged structures are not yet implemented so that will wait. 10 11 A single built in exception is at the top of the hierarchy and all other 12 exceptions are its children. When you match against an exception, you match 13 for an exception and its children, so the top of the hierarchy is used as a 14 catch-all option. 15 16 The shared functionality across exceptions has not been finalized, but will 17 probably include things like human readable descriptions and default handlers. 6 18 7 19 8 What is an Exception: 20 Throwing: 21 There are currently two kinds of throws, "throw" for termination and 22 "throwResume" for resumption. Both keywords can be used to create a throw 23 statement. The kind of throw decides what handlers may catch the exception 24 and weither control flow can return to the throw site. 9 25 10 In other words what do we throw? What is matched against, how does it carry 11 data with it? A very important question that has not been answered. 26 Syntax 27 "throw" exception ";" 28 "throwResume" exception ";" 12 29 13 Option 1: Strutures 30 Non-local throws are allowed for resumption only. A target is an object with 31 a stack, with which it may propagate and handle the exception. 14 32 15 Considering the current state of Cforall the most natural form of the 16 exception would likely be a struture, implementing a trait repersenting the 17 minimum features of an exception. This has many advantages, including arbitray 18 fields, some polymorphism and it matches exceptations of many current systems. 33 Syntax 34 "throwResume" exception "_At" target ";" 19 35 20 The main issue with this is matching, without OOP inheritance there is no 21 exception hierarchy. Meaning all handling has to happen on the exact exception 22 without the ease of grouping parents. There are several ways to attempt to 23 recover this. 24 25 The first is with conditional matching (a check after the type has been 26 matched) which allows for matching particular values of a known type. However 27 this does not dynamically expand and requires an extra step (as opposed to 28 mearly allowing one). I would not recomend this as the primary method. 29 30 Second is to try and use type casts/conversions to create an implicate 31 hierachy, so that a catch clause catches anything of the given type or 32 anything that converts to the given type. 33 34 Plan9 (from what I know of it) would be a powerful tool here. Even with it, 35 creating a hierarchy of types at runtime might be too expencive. Esecially 36 since they are less likely to be tree like at that point. 37 38 Option 2: Tags 39 40 The other option is to introduce a new construct into the language. A tag 41 repersents a type of exception, it is not a structure or variable (or even 42 a normal type). It may be usable in some of those contexts. 43 44 Tags can declare an existing tag as its parent. Tags can be caught by handlers 45 that catch their parents. (There is a single base_exception that all other 46 exceptions are children of eventually.) This allows for grouping of exceptions 47 that repersent similar errors. 48 49 Tags should also have some assotiated data, where and on what did the error 50 occur. Keeping with the otherness of exception tags and allowing them to be 51 expanded, using a parameter list. Each exception can have a list of paramters 52 given to it on a throw. Each tag would have a declared list of parameters 53 (which could be treated more like a set of fields as well). Child tags must 54 use their parent's list as a prefix to their own, so that the parameters can 55 be accessed when the child tag is matched against the parent. 56 57 Option N: ... 58 59 This list is not complete. 36 Termination throws unwind the stack until a handler is reached, control moves 37 onwards from the end of the handler. Resumption throws do not unwind, if a 38 handler is found and control will return to the throw after the exception is 39 handled. 60 40 61 41 62 Seperating Termination and Resumption: 42 Catching: 43 The catch and handle of an exception is preformed with a try statement, which 44 also can have finally clauses to exceute on exit from the scope. 63 45 64 Differentating the types of exceptions based on exception would be hard with 65 exceptions as structures. It is possible with exceptions as tags by having 66 two base exceptions, one for each type of throw. However recompining them 67 to dual types would be harder. 46 Syntax 47 "try" 48 try-block 49 ( ("catch" | "catchResume") 50 "(" exception_type [identifier] [";" conditional_expression] ")" 51 catch-block 52 )* 53 ("finally" 54 finally-block 55 )? 68 56 69 Reguardless, using different keywords would also be useful for clarity, even 70 if it does not add functality. Using the C++ like keywords would be a good 71 base. Resumption exceptions could use a different set (ex. raise->handle) or 72 use resume as a qualifier on the existing statements. 57 Either at least 1 handler clause or the finally clasue must be given on each 58 try block. Each handler clause handles 1 of the two types of throws. Each 59 handler also specifies a type of exception it handles, and will handle all 60 children exceptions as well. In addition, a conditional expression which, if 61 included, must be true for the handler to catch the exception. 62 63 The two types of handlers may be intermixed. Multiple handlers catching the 64 same type may also be used, to allow for fallbacks on false conditionals. 73 65 74 66 75 Conditional Matching:67 Implementation Overview: 76 68 77 A possible useful feature, it allows for arbitrary checks on a catch block 78 instead of merely matching a type. However there are few use cases that 79 cannot be avoided with proper use of a type hierarchy, and this shrinks even 80 further with a good use of re-throws.69 The implementation has two main parts. The first is just a collection of the 70 support definitions we need, the data types and functions used within the 71 exception handling code. Second is a translation from Cforall code to C code 72 that uses those definitions to throw, catch and handle exceptions. 81 73 82 Also it assumes one sweep, that might also be a problem. But might also give 83 it an advantage over re-throws. 74 Termination handlers call a specially annotated function, passing it inner 75 functions that act as the varius sub-blocks. Termination throws use the 76 unwind library that checks the underlying code for those annotations. Each 77 time one is found some magic is used to check for a matching handler, if one 78 is found control goes to the special function which excecutes the handler and 79 returns. 80 81 Resumption handlers maintain a linked list of stack allocated nodes that have 82 the handler functions attached. Throwing a resumption exception traverses this 83 list, and calls each handler, the handlers handle the exception if they can 84 and return if they did or not. 85 86 Finally clauses just use stack cleanup to force a nested function, which has 87 the code from the finally clause, to execute when we leave that section. 84 88 85 89 86 Alternative s: Implicate Handlers &Return Unions90 Alternative Error Handling: Return Unions 87 91 88 Both act as a kind of local version of an exception. Implicate handlers act as 89 resumption exceptions and return unions like termination exceptions. By local 90 I mean they work well at one or two levels of calls, but do not cover N levels 91 as cleanly. 92 Return unions (Maybe and Result), are types that can encode a success or 93 other result in a single value. Maybe stores a value or nothing, Result stores 94 a value or an error. 92 95 93 Implicate handles require a growing number of function pointers (which should 94 not be used very often) to be passed to functions, creating and additional 95 preformance cost. Return unions have to be checked and handled at every level, 96 which has some preformance cost, but also can significantly clutter code. 97 Proper tools can help with the latter. 96 For errors that are usually handled quite close to where they occur, these 97 can replace exceptions. 98 98 99 However, they may work better at that local level as they do not require stack 100 walking or unwinding. In addition they are closer to regular control flow and 101 are easier to staticly check. So even if they can't replace exceptions 102 (directly) they may still be worth using together. 103 104 For instance, consider the Python iterator interface. It uses a single 105 function, __next__, to access the next value and to signal the end of the 106 sequence. If a value is returned, it is the next value, if the StopIteration 107 exception is thrown the sequence has finished. 108 109 However almost every use of an iterator will add a try-except block around the 110 call site (possibly through for or next) to catch and handle the exception 111 immediately, ignoring the advantages of more distant exception handling. 112 113 In this case it may be cleaner to use a Maybe for both cases (as in Rust) 114 which gives similar results without having to jump to the exception handler. 115 This will likely handle the error case more efficiently and the success case a 116 bit less so. 117 118 It also mixes the error and regular control flow, which can hurt readablity, 119 but very little if the handling is simple, for instance providing a default 120 value. Similarly, if the error (or alternate outcome) is common enough 121 encoding it in the function signature may be good commuication. 99 They tend to be faster and require similar or less amounts of code to handle. 100 However they can slow down the normal path with some extra conditionals and 101 can mix the normal and exceptional control flow path. If handling the error 102 is simple, and happens relatively frequently, this might be prefered but in 103 other cases it just hurts speed and readability. 122 104 123 105 In short, these errors seem to be more effective when errors are likely and … … 125 107 be handled locally, might be better off using these instead of exceptions. 126 108 127 Also the implicate handlers and return unions could use exceptions as well. 128 For instance, a useful default might handler might be to throw an exception, 129 seaching up the stack for a solution if one is not locally provided. 130 131 Or here is a possible helper for unpacking a Result value: 109 Also the return unions could use exceptions as well. Getting the improper 110 side of a return union might throw an exception. Or we can provide helpers 111 for results withe exceptions as in: 132 112 forall(otype T, otype E | exception(E)) 133 113 T get_or_throw (Result(T, E) * this) { 134 if ( is_success(this)) {135 return get_ success(this);114 if (has_value(this)) { 115 return get_value(this); 136 116 } else { 137 throw get_ failure(this);117 throw get_error(this); 138 118 } 139 119 } 140 So they can feed off of each other. -
src/CodeGen/CodeGenerator.cc
rade20d0 r436c0de 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 10 14:45:00 201713 // Update Count : 48 412 // Last Modified On : Thu Jun 8 16:00:00 2017 13 // Update Count : 485 14 14 // 15 15 … … 112 112 113 113 CodeGenerator::CodeGenerator( std::ostream & os, bool pretty, bool genC, bool lineMarks ) : indent( *this), cur_indent( 0 ), insideFunction( false ), output( os ), printLabels( *this ), pretty( pretty ), genC( genC ), lineMarks( lineMarks ) {} 114 115 CodeGenerator::CodeGenerator( std::ostream & os, std::string init, int indentation, bool infunp )116 : indent( *this), cur_indent( indentation ), insideFunction( infunp ), output( os ), printLabels( *this ) {117 //output << std::string( init );118 }119 120 CodeGenerator::CodeGenerator( std::ostream & os, char * init, int indentation, bool infunp )121 : indent( *this ), cur_indent( indentation ), insideFunction( infunp ), output( os ), printLabels( *this ) {122 //output << std::string( init );123 }124 114 125 115 string CodeGenerator::mangleName( DeclarationWithType * decl ) { … … 918 908 } 919 909 910 void CodeGenerator::visit( ThrowStmt * throwStmt ) { 911 assertf( ! genC, "Throw statements should not reach code generation." ); 912 913 output << ((throwStmt->get_kind() == ThrowStmt::Terminate) ? 914 "throw" : "throwResume"); 915 if (throwStmt->get_expr()) { 916 output << " "; 917 throwStmt->get_expr()->accept( *this ); 918 } 919 if (throwStmt->get_target()) { 920 output << " _At "; 921 throwStmt->get_target()->accept( *this ); 922 } 923 output << ";"; 924 } 925 920 926 void CodeGenerator::visit( WhileStmt * whileStmt ) { 921 927 if ( whileStmt->get_isDoWhile() ) { -
src/CodeGen/CodeGenerator.h
rade20d0 r436c0de 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 10 10:57:00 201713 // Update Count : 5 112 // Last Modified On : Thu Jun 8 15:48:00 2017 13 // Update Count : 52 14 14 // 15 15 … … 91 91 virtual void visit( BranchStmt * ); 92 92 virtual void visit( ReturnStmt * ); 93 virtual void visit( ThrowStmt * ); 93 94 virtual void visit( WhileStmt * ); 94 95 virtual void visit( ForStmt * ); -
src/Common/PassVisitor.h
rade20d0 r436c0de 54 54 virtual void visit( BranchStmt *branchStmt ) override final; 55 55 virtual void visit( ReturnStmt *returnStmt ) override final; 56 virtual void visit( ThrowStmt *throwStmt ) override final; 56 57 virtual void visit( TryStmt *tryStmt ) override final; 57 58 virtual void visit( CatchStmt *catchStmt ) override final; … … 90 91 virtual void visit( TupleExpr *tupleExpr ) override final; 91 92 virtual void visit( TupleIndexExpr *tupleExpr ) override final; 92 virtual void visit( MemberTupleExpr *tupleExpr ) override final;93 93 virtual void visit( TupleAssignExpr *assignExpr ) override final; 94 94 virtual void visit( StmtExpr * stmtExpr ) override final; … … 140 140 virtual Statement* mutate( BranchStmt *branchStmt ) override final; 141 141 virtual Statement* mutate( ReturnStmt *returnStmt ) override final; 142 virtual Statement* mutate( ThrowStmt *throwStmt ) override final; 142 143 virtual Statement* mutate( TryStmt *returnStmt ) override final; 143 144 virtual Statement* mutate( CatchStmt *catchStmt ) override final; … … 176 177 virtual Expression* mutate( TupleExpr *tupleExpr ) override final; 177 178 virtual Expression* mutate( TupleIndexExpr *tupleExpr ) override final; 178 virtual Expression* mutate( MemberTupleExpr *tupleExpr ) override final;179 179 virtual Expression* mutate( TupleAssignExpr *assignExpr ) override final; 180 180 virtual Expression* mutate( StmtExpr * stmtExpr ) override final; … … 232 232 std::list< Statement* > * get_afterStmts () { return stmtsToAddAfter_impl ( pass, 0); } 233 233 bool visit_children() { bool* skip = skip_children_impl(pass, 0); return ! (skip && *skip); } 234 }; 234 void reset_visit() { bool* skip = skip_children_impl(pass, 0); if(skip) *skip = false; } 235 236 guard_value_impl init_guard() { 237 guard_value_impl guard; 238 auto at_cleanup = at_cleanup_impl(pass, 0); 239 if( at_cleanup ) { 240 *at_cleanup = [&guard]( cleanup_func_t && func, void* val ) { 241 guard.push( std::move( func ), val ); 242 }; 243 } 244 return guard; 245 } 246 }; 247 248 template<typename pass_type, typename T> 249 void GuardValue( pass_type * pass, T& val ) { 250 pass->at_cleanup( [ val ]( void * newVal ) { 251 * static_cast< T * >( newVal ) = val; 252 }, static_cast< void * >( & val ) ); 253 } 254 255 class WithTypeSubstitution { 256 protected: 257 WithTypeSubstitution() = default; 258 ~WithTypeSubstitution() = default; 259 260 public: 261 TypeSubstitution * env; 262 }; 263 264 class WithStmtsToAdd { 265 protected: 266 WithStmtsToAdd() = default; 267 ~WithStmtsToAdd() = default; 268 269 public: 270 std::list< Statement* > stmtsToAddBefore; 271 std::list< Statement* > stmtsToAddAfter; 272 }; 273 274 class WithShortCircuiting { 275 protected: 276 WithShortCircuiting() = default; 277 ~WithShortCircuiting() = default; 278 279 public: 280 bool skip_children; 281 }; 282 283 class WithScopes { 284 protected: 285 WithScopes() = default; 286 ~WithScopes() = default; 287 288 public: 289 at_cleanup_t at_cleanup; 290 291 template< typename T > 292 void GuardValue( T& val ) { 293 at_cleanup( [ val ]( void * newVal ) { 294 * static_cast< T * >( newVal ) = val; 295 }, static_cast< void * >( & val ) ); 296 } 297 }; 298 235 299 236 300 #include "PassVisitor.impl.h" -
src/Common/PassVisitor.impl.h
rade20d0 r436c0de 1 1 #pragma once 2 2 3 #define VISIT_START( node ) \ 4 call_previsit( node ); \ 5 if( visit_children() ) { \ 6 7 #define VISIT_END( node ) \ 8 } \ 9 return call_postvisit( node ); \ 10 11 #define MUTATE_START( node ) \ 12 call_premutate( node ); \ 13 if( visit_children() ) { \ 3 #define VISIT_START( node ) \ 4 __attribute__((unused)) \ 5 const auto & guard = init_guard(); \ 6 call_previsit( node ); \ 7 if( visit_children() ) { \ 8 reset_visit(); \ 9 10 #define VISIT_END( node ) \ 11 } \ 12 call_postvisit( node ); \ 13 14 #define MUTATE_START( node ) \ 15 __attribute__((unused)) \ 16 const auto & guard = init_guard(); \ 17 call_premutate( node ); \ 18 if( visit_children() ) { \ 19 reset_visit(); \ 14 20 15 21 #define MUTATE_END( type, node ) \ … … 18 24 19 25 20 #define VISIT_BODY( node ) \21 VISIT_START( node ); \22 Visitor::visit( node ); \23 VISIT_END( node ); \26 #define VISIT_BODY( node ) \ 27 VISIT_START( node ); \ 28 Visitor::visit( node ); \ 29 VISIT_END( node ); \ 24 30 25 31 … … 389 395 390 396 //-------------------------------------------------------------------------- 397 // ThrowStmt 398 399 template< typename pass_type > 400 void PassVisitor< pass_type >::visit( ThrowStmt * node ) { 401 VISIT_BODY( node ); 402 } 403 404 template< typename pass_type > 405 Statement * PassVisitor< pass_type >::mutate( ThrowStmt * node ) { 406 MUTATE_BODY( Statement, node ); 407 } 408 409 //-------------------------------------------------------------------------- 391 410 // TryStmt 392 411 template< typename pass_type > … … 617 636 618 637 template< typename pass_type > 619 void PassVisitor< pass_type >::visit( MemberTupleExpr * node ) {620 VISIT_BODY( node );621 }622 623 template< typename pass_type >624 638 void PassVisitor< pass_type >::visit( TupleAssignExpr * node ) { 625 639 VISIT_BODY( node ); … … 999 1013 1000 1014 template< typename pass_type > 1001 Expression * PassVisitor< pass_type >::mutate( MemberTupleExpr * node ) {1002 MUTATE_BODY( Expression, node );1003 }1004 1005 template< typename pass_type >1006 1015 Expression * PassVisitor< pass_type >::mutate( TupleAssignExpr * node ) { 1007 1016 MUTATE_BODY( Expression, node ); -
src/Common/PassVisitor.proto.h
rade20d0 r436c0de 1 1 #pragma once 2 3 typedef std::function<void( void * )> cleanup_func_t; 4 5 class guard_value_impl { 6 public: 7 guard_value_impl() = default; 8 9 ~guard_value_impl() { 10 while( !cleanups.empty() ) { 11 auto& cleanup = cleanups.top(); 12 cleanup.func( cleanup.val ); 13 cleanups.pop(); 14 } 15 } 16 17 void push( cleanup_func_t && func, void* val ) { 18 cleanups.emplace( std::move(func), val ); 19 } 20 21 private: 22 struct cleanup_t { 23 cleanup_func_t func; 24 void * val; 25 26 cleanup_t( cleanup_func_t&& func, void * val ) : func(func), val(val) {} 27 }; 28 29 std::stack< cleanup_t > cleanups; 30 }; 31 32 typedef std::function< void( cleanup_func_t, void * ) > at_cleanup_t; 2 33 3 34 //------------------------------------------------------------------------------------------------------------------------------------------------------------------------- … … 18 49 // Visit 19 50 template<typename pass_type, typename node_type> 20 static inline auto previsit_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.previsit( node ), void() ) {51 static inline auto previsit_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.previsit( node ), void() ) { 21 52 pass.previsit( node ); 22 53 } … … 27 58 28 59 template<typename pass_type, typename node_type> 29 static inline auto postvisit_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.postvisit( node ), void() ) {60 static inline auto postvisit_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.postvisit( node ), void() ) { 30 61 pass.postvisit( node ); 31 62 } … … 36 67 // Mutate 37 68 template<typename pass_type, typename node_type> 38 static inline auto premutate_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.premutate( node ), void() ) {69 static inline auto premutate_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.premutate( node ), void() ) { 39 70 return pass.premutate( node ); 40 71 } … … 45 76 46 77 template<typename return_type, typename pass_type, typename node_type> 47 static inline auto postmutate_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.postmutate( node ) ) {78 static inline auto postmutate_impl( pass_type& pass, node_type * node, __attribute__((unused)) int unused ) -> decltype( pass.postmutate( node ) ) { 48 79 return pass.postmutate( node ); 49 80 } … … 54 85 // Begin/End scope 55 86 template<typename pass_type> 56 static inline auto begin_scope_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( pass.beginScope(), void() ) {87 static inline auto begin_scope_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( pass.beginScope(), void() ) { 57 88 pass.beginScope(); 58 89 } … … 63 94 64 95 template<typename pass_type> 65 static inline auto end_scope_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( pass.endScope(), void() ) {96 static inline auto end_scope_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( pass.endScope(), void() ) { 66 97 pass.endScope(); 67 98 } … … 73 104 #define FIELD_PTR( type, name ) \ 74 105 template<typename pass_type> \ 75 static inline auto name##_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( &pass.name ) { return &pass.name; }\106 static inline auto name##_impl( pass_type& pass, __attribute__((unused)) int unused ) -> decltype( &pass.name ) { return &pass.name; } \ 76 107 \ 77 108 template<typename pass_type> \ … … 82 113 FIELD_PTR( std::list< Statement* >, stmtsToAddAfter ) 83 114 FIELD_PTR( bool, skip_children ) 115 FIELD_PTR( at_cleanup_t, at_cleanup ) -
src/GenPoly/Box.cc
rade20d0 r436c0de 108 108 Type *replaceWithConcrete( ApplicationExpr *appExpr, Type *type, bool doClone = true ); 109 109 /// wraps a function application returning a polymorphic type with a new temporary for the out-parameter return value 110 Expression *addDynRetParam( ApplicationExpr *appExpr, FunctionType *function,Type *polyType, std::list< Expression *>::iterator &arg );110 Expression *addDynRetParam( ApplicationExpr *appExpr, Type *polyType, std::list< Expression *>::iterator &arg ); 111 111 Expression *applyAdapter( ApplicationExpr *appExpr, FunctionType *function, std::list< Expression *>::iterator &arg, const TyVarMap &exprTyVars ); 112 112 void boxParam( Type *formal, Expression *&arg, const TyVarMap &exprTyVars ); … … 726 726 } 727 727 728 Expression *Pass1::addDynRetParam( ApplicationExpr *appExpr, FunctionType *function,Type *dynType, std::list< Expression *>::iterator &arg ) {728 Expression *Pass1::addDynRetParam( ApplicationExpr *appExpr, Type *dynType, std::list< Expression *>::iterator &arg ) { 729 729 assert( env ); 730 730 Type *concrete = replaceWithConcrete( appExpr, dynType ); … … 1146 1146 if ( dynRetType ) { 1147 1147 Type *concRetType = appExpr->get_result()->isVoid() ? nullptr : appExpr->get_result(); 1148 ret = addDynRetParam( appExpr, function,concRetType, arg ); // xxx - used to use dynRetType instead of concRetType1148 ret = addDynRetParam( appExpr, concRetType, arg ); // xxx - used to use dynRetType instead of concRetType 1149 1149 } else if ( needsAdapter( function, scopeTyVars ) && ! needsAdapter( function, exprTyVars) ) { // xxx - exprTyVars is used above...? 1150 1150 // xxx - the ! needsAdapter check may be incorrect. It seems there is some situation where an adapter is applied where it shouldn't be, and this fixes it for some cases. More investigation is needed. -
src/GenPoly/Specialize.cc
rade20d0 r436c0de 93 93 } 94 94 95 bool needsTupleSpecialization( Type *formalType, Type *actualType , TypeSubstitution *env) {95 bool needsTupleSpecialization( Type *formalType, Type *actualType ) { 96 96 // Needs tuple specialization if the structure of the formal type and actual type do not match. 97 97 // This is the case if the formal type has ttype polymorphism, or if the structure of tuple types … … 112 112 113 113 bool needsSpecialization( Type *formalType, Type *actualType, TypeSubstitution *env ) { 114 return needsPolySpecialization( formalType, actualType, env ) || needsTupleSpecialization( formalType, actualType , env);114 return needsPolySpecialization( formalType, actualType, env ) || needsTupleSpecialization( formalType, actualType ); 115 115 } 116 116 -
src/InitTweak/FixInit.cc
rade20d0 r436c0de 902 902 } 903 903 904 void InsertDtors::visit( ReturnStmt * returnStmt ) {904 void InsertDtors::visit( __attribute((unused)) ReturnStmt * returnStmt ) { 905 905 // return exits all scopes, so dump destructors for all scopes 906 906 for ( OrderedDecls & od : reverseDeclOrder ) { -
src/InitTweak/GenInit.cc
rade20d0 r436c0de 39 39 40 40 namespace InitTweak { 41 class ReturnFixer final : public GenPoly::PolyMutator { 41 namespace { 42 const std::list<Label> noLabels; 43 const std::list<Expression *> noDesignators; 44 } 45 46 class ReturnFixer : public WithStmtsToAdd, public WithScopes { 42 47 public: 43 48 /// consistently allocates a temporary variable for the return value … … 46 51 static void makeReturnTemp( std::list< Declaration * > &translationUnit ); 47 52 48 typedef GenPoly::PolyMutator Parent; 49 using Parent::mutate; 50 virtual DeclarationWithType * mutate( FunctionDecl *functionDecl ) override; 51 virtual Statement * mutate( ReturnStmt * returnStmt ) override; 53 void premutate( FunctionDecl *functionDecl ); 54 void premutate( ReturnStmt * returnStmt ); 52 55 53 56 protected: … … 131 134 132 135 void ReturnFixer::makeReturnTemp( std::list< Declaration * > & translationUnit ) { 133 ReturnFixerfixer;136 PassVisitor<ReturnFixer> fixer; 134 137 mutateAll( translationUnit, fixer ); 135 138 } 136 139 137 Statement *ReturnFixer::mutate( ReturnStmt *returnStmt ) {140 void ReturnFixer::premutate( ReturnStmt *returnStmt ) { 138 141 std::list< DeclarationWithType * > & returnVals = ftype->get_returnVals(); 139 142 assert( returnVals.size() == 0 || returnVals.size() == 1 ); … … 146 149 construct->get_args().push_back( new AddressExpr( new VariableExpr( returnVals.front() ) ) ); 147 150 construct->get_args().push_back( returnStmt->get_expr() ); 148 stmtsToAdd .push_back(new ExprStmt(noLabels, construct));151 stmtsToAddBefore.push_back(new ExprStmt(noLabels, construct)); 149 152 150 153 // return the retVal object 151 154 returnStmt->set_expr( new VariableExpr( returnVals.front() ) ); 152 155 } // if 153 return returnStmt; 154 } 155 156 DeclarationWithType* ReturnFixer::mutate( FunctionDecl *functionDecl ) { 157 ValueGuard< FunctionType * > oldFtype( ftype ); 158 ValueGuard< std::string > oldFuncName( funcName ); 156 } 157 158 void ReturnFixer::premutate( FunctionDecl *functionDecl ) { 159 GuardValue( ftype ); 160 GuardValue( funcName ); 159 161 160 162 ftype = functionDecl->get_functionType(); 161 163 funcName = functionDecl->get_name(); 162 return Parent::mutate( functionDecl );163 164 } 164 165 -
src/Parser/ExpressionNode.cc
rade20d0 r436c0de 223 223 } // build_field_name_REALDECIMALconstant 224 224 225 NameExpr * build_varref( const string *name , bool labelp) {225 NameExpr * build_varref( const string *name ) { 226 226 NameExpr *expr = new NameExpr( *name, nullptr ); 227 227 delete name; -
src/Parser/ParseNode.h
rade20d0 r436c0de 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Sat May 16 13:28:16 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Mar 17 15:42:18201713 // Update Count : 77 711 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Jun 12 13:00:00 2017 13 // Update Count : 779 14 14 // 15 15 … … 166 166 Expression * build_field_name_REALDECIMALconstant( const std::string & str ); 167 167 168 NameExpr * build_varref( const std::string * name , bool labelp = false);168 NameExpr * build_varref( const std::string * name ); 169 169 Expression * build_typevalue( DeclarationNode * decl ); 170 170 … … 393 393 Statement * build_return( ExpressionNode * ctl ); 394 394 Statement * build_throw( ExpressionNode * ctl ); 395 Statement * build_resume( ExpressionNode * ctl ); 396 Statement * build_resume_at( ExpressionNode * ctl , ExpressionNode * target ); 395 397 Statement * build_try( StatementNode * try_stmt, StatementNode * catch_stmt, StatementNode * finally_stmt ); 396 Statement * build_catch( DeclarationNode * decl, StatementNode * stmt, bool catchAny = false);398 Statement * build_catch( CatchStmt::Kind kind, DeclarationNode *decl, ExpressionNode *cond, StatementNode *body ); 397 399 Statement * build_finally( StatementNode * stmt ); 398 400 Statement * build_compound( StatementNode * first ); -
src/Parser/StatementNode.cc
rade20d0 r436c0de 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Sat May 16 14:59:41 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Feb 2 22:16:40 201713 // Update Count : 32 711 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Jun 12 13:03:00 2017 13 // Update Count : 329 14 14 // 15 15 … … 152 152 return new ReturnStmt( noLabels, exps.size() > 0 ? exps.back() : nullptr ); 153 153 } 154 154 155 Statement *build_throw( ExpressionNode *ctl ) { 155 156 std::list< Expression * > exps; 156 157 buildMoveList( ctl, exps ); 157 158 assertf( exps.size() < 2, "This means we are leaking memory"); 158 return new ReturnStmt( noLabels, !exps.empty() ? exps.back() : nullptr, true ); 159 return new ThrowStmt( noLabels, ThrowStmt::Terminate, !exps.empty() ? exps.back() : nullptr ); 160 } 161 162 Statement *build_resume( ExpressionNode *ctl ) { 163 std::list< Expression * > exps; 164 buildMoveList( ctl, exps ); 165 assertf( exps.size() < 2, "This means we are leaking memory"); 166 return new ThrowStmt( noLabels, ThrowStmt::Resume, !exps.empty() ? exps.back() : nullptr ); 167 } 168 169 Statement *build_resume_at( ExpressionNode *ctl, ExpressionNode *target ) { 170 std::list< Expression * > exps; 171 buildMoveList( ctl, exps ); 172 assertf( exps.size() < 2, "This means we are leaking memory"); 173 return new ThrowStmt( noLabels, ThrowStmt::Resume, !exps.empty() ? exps.back() : nullptr ); 159 174 } 160 175 … … 166 181 return new TryStmt( noLabels, tryBlock, branches, finallyBlock ); 167 182 } 168 Statement *build_catch( DeclarationNode *decl, StatementNode *stmt, bool catchAny ) {169 std::list< Statement * > branches; 170 buildMoveList< Statement, StatementNode >( stmt, branches );171 assert( branches.size() == 1 ); 172 return new CatchStmt( noLabels, maybeMoveBuild< Declaration >(decl), branches.front(), catchAny);183 Statement *build_catch( CatchStmt::Kind kind, DeclarationNode *decl, ExpressionNode *cond, StatementNode *body ) { 184 std::list< Statement * > branches; 185 buildMoveList< Statement, StatementNode >( body, branches ); 186 assert( branches.size() == 1 ); 187 return new CatchStmt( noLabels, kind, maybeMoveBuild< Declaration >(decl), maybeMoveBuild< Expression >(cond), branches.front() ); 173 188 } 174 189 Statement *build_finally( StatementNode *stmt ) { -
src/Parser/parser.yy
rade20d0 r436c0de 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu May 25 15:21:59201713 // Update Count : 2 39812 // Last Modified On : Mon Jun 12 12:59:00 2017 13 // Update Count : 2402 14 14 // 15 15 … … 193 193 %type<sn> case_value_list case_label case_label_list 194 194 %type<sn> switch_clause_list_opt switch_clause_list choose_clause_list_opt choose_clause_list 195 %type<sn> handler_listhandler_clause finally_clause195 %type<sn> /* handler_list */ handler_clause finally_clause 196 196 197 197 // declarations … … 547 547 { $$ = new ExpressionNode( build_attrtype( build_varref( $1 ), $3 ) ); } 548 548 // | ANDAND IDENTIFIER // GCC, address of label 549 // { $$ = new ExpressionNode( new OperatorNode( OperKinds::LabelAddress ), new ExpressionNode( build_varref( $2 , true) ); }549 // { $$ = new ExpressionNode( new OperatorNode( OperKinds::LabelAddress ), new ExpressionNode( build_varref( $2 ) ); } 550 550 ; 551 551 … … 931 931 { $$ = new StatementNode( build_throw( $2 ) ); } 932 932 | THROWRESUME assignment_expression_opt ';' // handles reresume 933 { $$ = new StatementNode( build_ throw( $2 ) ); }933 { $$ = new StatementNode( build_resume( $2 ) ); } 934 934 | THROWRESUME assignment_expression_opt AT assignment_expression ';' // handles reresume 935 { $$ = new StatementNode( build_ throw( $2) ); }935 { $$ = new StatementNode( build_resume_at( $2, $4 ) ); } 936 936 ; 937 937 938 938 exception_statement: 939 TRY compound_statement handler_ list939 TRY compound_statement handler_clause 940 940 { $$ = new StatementNode( build_try( $2, $3, 0 ) ); } 941 941 | TRY compound_statement finally_clause 942 942 { $$ = new StatementNode( build_try( $2, 0, $3 ) ); } 943 | TRY compound_statement handler_ listfinally_clause943 | TRY compound_statement handler_clause finally_clause 944 944 { $$ = new StatementNode( build_try( $2, $3, $4 ) ); } 945 945 ; 946 946 947 handler_list:948 handler_clause949 // ISO/IEC 9899:1999 Section 15.3(6 ) If present, a "..." handler shall be the last handler for its try block.950 | CATCH '(' ELLIPSIS ')' compound_statement951 { $$ = new StatementNode( build_catch( 0, $5, true ) ); }952 | handler_clause CATCH '(' ELLIPSIS ')' compound_statement953 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( 0, $6, true ) ) ); }954 | CATCHRESUME '(' ELLIPSIS ')' compound_statement955 { $$ = new StatementNode( build_catch( 0, $5, true ) ); }956 | handler_clause CATCHRESUME '(' ELLIPSIS ')' compound_statement957 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( 0, $6, true ) ) ); }958 ;947 //handler_list: 948 // handler_clause 949 // // ISO/IEC 9899:1999 Section 15.3(6 ) If present, a "..." handler shall be the last handler for its try block. 950 // | CATCH '(' ELLIPSIS ')' compound_statement 951 // { $$ = new StatementNode( build_catch( 0, $5, true ) ); } 952 // | handler_clause CATCH '(' ELLIPSIS ')' compound_statement 953 // { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( 0, $6, true ) ) ); } 954 // | CATCHRESUME '(' ELLIPSIS ')' compound_statement 955 // { $$ = new StatementNode( build_catch( 0, $5, true ) ); } 956 // | handler_clause CATCHRESUME '(' ELLIPSIS ')' compound_statement 957 // { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( 0, $6, true ) ) ); } 958 // ; 959 959 960 960 handler_clause: 961 961 CATCH '(' push push exception_declaration pop ')' compound_statement pop 962 { $$ = new StatementNode( build_catch( $5, $8 ) ); }962 { $$ = new StatementNode( build_catch( CatchStmt::Terminate, $5, nullptr, $8 ) ); } 963 963 | handler_clause CATCH '(' push push exception_declaration pop ')' compound_statement pop 964 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( $6, $9 ) ) ); }964 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( CatchStmt::Terminate, $6, nullptr, $9 ) ) ); } 965 965 | CATCHRESUME '(' push push exception_declaration pop ')' compound_statement pop 966 { $$ = new StatementNode( build_catch( $5, $8 ) ); }966 { $$ = new StatementNode( build_catch( CatchStmt::Resume, $5, nullptr, $8 ) ); } 967 967 | handler_clause CATCHRESUME '(' push push exception_declaration pop ')' compound_statement pop 968 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( $6, $9 ) ) ); }968 { $$ = (StatementNode *)$1->set_last( new StatementNode( build_catch( CatchStmt::Resume, $6, nullptr, $9 ) ) ); } 969 969 ; 970 970 -
src/ResolvExpr/AlternativeFinder.cc
rade20d0 r436c0de 97 97 /// Prunes a list of alternatives down to those that have the minimum conversion cost for a given return type; skips ambiguous interpretations 98 98 template< typename InputIterator, typename OutputIterator > 99 void pruneAlternatives( InputIterator begin, InputIterator end, OutputIterator out , const SymTab::Indexer &indexer) {99 void pruneAlternatives( InputIterator begin, InputIterator end, OutputIterator out ) { 100 100 // select the alternatives that have the minimum conversion cost for a particular set of result types 101 101 std::map< std::string, PruneStruct > selected; … … 183 183 ) 184 184 AltList::iterator oldBegin = alternatives.begin(); 185 pruneAlternatives( alternatives.begin(), alternatives.end(), front_inserter( alternatives ) , indexer);185 pruneAlternatives( alternatives.begin(), alternatives.end(), front_inserter( alternatives ) ); 186 186 if ( alternatives.begin() == oldBegin ) { 187 187 std::ostringstream stream; -
src/ResolvExpr/CommonType.cc
rade20d0 r436c0de 157 157 void CommonType::visit( PointerType *pointerType ) { 158 158 if ( PointerType *otherPointer = dynamic_cast< PointerType* >( type2 ) ) { 159 if ( widenFirst && dynamic_cast< VoidType* >( otherPointer->get_base() ) && ! isFtype(pointerType->get_base() , indexer) ) {159 if ( widenFirst && dynamic_cast< VoidType* >( otherPointer->get_base() ) && ! isFtype(pointerType->get_base()) ) { 160 160 getCommonWithVoidPointer( otherPointer, pointerType ); 161 } else if ( widenSecond && dynamic_cast< VoidType* >( pointerType->get_base() ) && ! isFtype(otherPointer->get_base() , indexer) ) {161 } else if ( widenSecond && dynamic_cast< VoidType* >( pointerType->get_base() ) && ! isFtype(otherPointer->get_base()) ) { 162 162 getCommonWithVoidPointer( pointerType, otherPointer ); 163 163 } else if ( ( pointerType->get_base()->get_qualifiers() >= otherPointer->get_base()->get_qualifiers() || widenFirst ) -
src/ResolvExpr/PtrsCastable.cc
rade20d0 r436c0de 135 135 } 136 136 137 void PtrsCastable::visit(TraitInstType *inst) { 138 // I definitely don't think we should be doing anything here 139 } 137 void PtrsCastable::visit( __attribute__((unused)) TraitInstType *inst ) {} 140 138 141 139 void PtrsCastable::visit(TypeInstType *inst) { -
src/ResolvExpr/Unify.cc
rade20d0 r436c0de 114 114 } 115 115 116 bool isFtype( Type *type , const SymTab::Indexer &indexer) {116 bool isFtype( Type *type ) { 117 117 if ( dynamic_cast< FunctionType* >( type ) ) { 118 118 return true; … … 123 123 } 124 124 125 bool tyVarCompatible( const TypeDecl::Data & data, Type *type , const SymTab::Indexer &indexer) {125 bool tyVarCompatible( const TypeDecl::Data & data, Type *type ) { 126 126 switch ( data.kind ) { 127 127 case TypeDecl::Any: … … 131 131 // type must also be complete 132 132 // xxx - should this also check that type is not a tuple type and that it's not a ttype? 133 return ! isFtype( type , indexer) && (! data.isComplete || type->isComplete() );133 return ! isFtype( type ) && (! data.isComplete || type->isComplete() ); 134 134 case TypeDecl::Ftype: 135 return isFtype( type , indexer);135 return isFtype( type ); 136 136 case TypeDecl::Ttype: 137 137 // ttype unifies with any tuple type … … 144 144 OpenVarSet::const_iterator tyvar = openVars.find( typeInst->get_name() ); 145 145 assert( tyvar != openVars.end() ); 146 if ( ! tyVarCompatible( tyvar->second, other , indexer) ) {146 if ( ! tyVarCompatible( tyvar->second, other ) ) { 147 147 return false; 148 148 } // if … … 388 388 } 389 389 390 void Unify::visit( VoidType *voidType) {390 void Unify::visit( __attribute__((unused)) VoidType *voidType) { 391 391 result = dynamic_cast< VoidType* >( type2 ); 392 392 } … … 683 683 684 684 template< typename Iterator1, typename Iterator2 > 685 bool unifyList( Iterator1 list1Begin, Iterator1 list1End, Iterator2 list2Begin, Iterator2 list2End, TypeEnvironment &env, AssertionSet &needAssertions, AssertionSet &haveAssertions, const OpenVarSet &openVars, WidenMode widenMode,const SymTab::Indexer &indexer ) {685 bool unifyList( Iterator1 list1Begin, Iterator1 list1End, Iterator2 list2Begin, Iterator2 list2End, TypeEnvironment &env, AssertionSet &needAssertions, AssertionSet &haveAssertions, const OpenVarSet &openVars, const SymTab::Indexer &indexer ) { 686 686 auto get_type = [](Type * t) { return t; }; 687 687 for ( ; list1Begin != list1End && list2Begin != list2End; ++list1Begin, ++list2Begin ) { … … 733 733 flatten( flat2.get(), back_inserter( types2 ) ); 734 734 735 result = unifyList( types1.begin(), types1.end(), types2.begin(), types2.end(), env, needAssertions, haveAssertions, openVars, widenMode,indexer );736 } // if 737 } 738 739 void Unify::visit( VarArgsType *varArgsType) {735 result = unifyList( types1.begin(), types1.end(), types2.begin(), types2.end(), env, needAssertions, haveAssertions, openVars, indexer ); 736 } // if 737 } 738 739 void Unify::visit( __attribute__((unused)) VarArgsType *varArgsType ) { 740 740 result = dynamic_cast< VarArgsType* >( type2 ); 741 741 } 742 742 743 void Unify::visit( ZeroType *zeroType) {743 void Unify::visit( __attribute__((unused)) ZeroType *zeroType ) { 744 744 result = dynamic_cast< ZeroType* >( type2 ); 745 745 } 746 746 747 void Unify::visit( OneType *oneType) {747 void Unify::visit( __attribute__((unused)) OneType *oneType ) { 748 748 result = dynamic_cast< OneType* >( type2 ); 749 749 } -
src/ResolvExpr/typeops.h
rade20d0 r436c0de 118 118 119 119 // in Unify.cc 120 bool isFtype( Type *type , const SymTab::Indexer &indexer);120 bool isFtype( Type *type ); 121 121 bool typesCompatible( Type *, Type *, const SymTab::Indexer &indexer, const TypeEnvironment &env ); 122 122 bool typesCompatibleIgnoreQualifiers( Type *, Type *, const SymTab::Indexer &indexer, const TypeEnvironment &env ); -
src/SymTab/Autogen.cc
rade20d0 r436c0de 262 262 // E ?=?(E volatile*, int), 263 263 // ?=?(E _Atomic volatile*, int); 264 void makeEnumFunctions( Enum Decl *enumDecl, EnumInstType *refType, unsigned int functionNesting, std::list< Declaration * > &declsToAdd ) {264 void makeEnumFunctions( EnumInstType *refType, unsigned int functionNesting, std::list< Declaration * > &declsToAdd ) { 265 265 266 266 // T ?=?(E *, E); … … 486 486 487 487 /// generates the body of a union assignment/copy constructor/field constructor 488 void makeUnionAssignBody( FunctionDecl * funcDecl , bool isDynamicLayout) {488 void makeUnionAssignBody( FunctionDecl * funcDecl ) { 489 489 FunctionType * ftype = funcDecl->get_functionType(); 490 490 assert( ftype->get_parameters().size() == 2 ); … … 506 506 // Make function polymorphic in same parameters as generic union, if applicable 507 507 const std::list< TypeDecl* > & typeParams = aggregateDecl->get_parameters(); // List of type variables to be placed on the generated functions 508 bool isDynamicLayout = hasDynamicLayout( aggregateDecl ); // NOTE this flag is an incredibly ugly kludge; we should fix the assignment signature instead (ditto for struct) 509 508 510 509 // default ctor/dtor need only first parameter 511 510 // void ?{}(T *); void ^?{}(T *); … … 533 532 FunctionDecl *dtorDecl = genFunc( "^?{}", dtorType, functionNesting ); 534 533 535 makeUnionAssignBody( assignDecl , isDynamicLayout);534 makeUnionAssignBody( assignDecl ); 536 535 537 536 // body of assignment and copy ctor is the same 538 makeUnionAssignBody( copyCtorDecl , isDynamicLayout);537 makeUnionAssignBody( copyCtorDecl ); 539 538 540 539 // create a constructor which takes the first member type as a parameter. … … 551 550 FunctionDecl * ctor = genFunc( "?{}", memCtorType, functionNesting ); 552 551 553 makeUnionAssignBody( ctor , isDynamicLayout);552 makeUnionAssignBody( ctor ); 554 553 memCtors.push_back( ctor ); 555 554 // only generate a ctor for the first field … … 578 577 EnumInstType *enumInst = new EnumInstType( Type::Qualifiers(), enumDecl->get_name() ); 579 578 // enumInst->set_baseEnum( enumDecl ); 580 makeEnumFunctions( enum Decl, enumInst, functionNesting, declsToAddAfter );579 makeEnumFunctions( enumInst, functionNesting, declsToAddAfter ); 581 580 } 582 581 } -
src/SymTab/ImplementationType.cc
rade20d0 r436c0de 76 76 } 77 77 78 void ImplementationType::visit(FunctionType *functionType) { 79 /// FunctionType *newType = functionType->clone(); 80 /// for ( std::list< DeclarationWithType* >::iterator i = newType->get_parameters().begin(); i != newType->get_parameters().end(); ++i ) { 81 /// i->set_type( implementationType( i->get_type(), indexer ) ); 82 /// } 83 /// for ( std::list< DeclarationWithType* >::iterator i = newType->get_parameters().begin(); i != newType->get_parameters().end(); ++i ) { 84 /// i->set_type( implementationType( i->get_type(), indexer ) ); 85 /// } 86 } 87 78 void ImplementationType::visit( __attribute__((unused)) FunctionType *functionType ) {} 88 79 void ImplementationType::visit( __attribute__((unused)) StructInstType * aggregateUseType ) {} 89 80 void ImplementationType::visit( __attribute__((unused)) UnionInstType * aggregateUseType ) {} -
src/SymTab/Indexer.cc
rade20d0 r436c0de 518 518 acceptNewScope( tupleExpr->get_result(), *this ); 519 519 maybeAccept( tupleExpr->get_tuple(), *this ); 520 }521 522 void Indexer::visit( MemberTupleExpr *tupleExpr ) {523 acceptNewScope( tupleExpr->get_result(), *this );524 maybeAccept( tupleExpr->get_member(), *this );525 maybeAccept( tupleExpr->get_aggregate(), *this );526 520 } 527 521 -
src/SymTab/Indexer.h
rade20d0 r436c0de 74 74 virtual void visit( TupleExpr *tupleExpr ); 75 75 virtual void visit( TupleIndexExpr *tupleExpr ); 76 virtual void visit( MemberTupleExpr *tupleExpr );77 76 virtual void visit( TupleAssignExpr *tupleExpr ); 78 77 virtual void visit( StmtExpr * stmtExpr ); -
src/SymTab/Mangler.cc
rade20d0 r436c0de 236 236 } 237 237 238 void Mangler::visit( ZeroType *zeroType ) {238 void Mangler::visit( __attribute__((unused)) ZeroType *zeroType ) { 239 239 mangleName << "Z"; 240 240 } 241 241 242 void Mangler::visit( OneType *oneType ) {242 void Mangler::visit( __attribute__((unused)) OneType *oneType ) { 243 243 mangleName << "O"; 244 244 } -
src/SymTab/Validate.cc
rade20d0 r436c0de 611 611 returnVals = functionDecl->get_functionType()->get_returnVals(); 612 612 } 613 void ReturnChecker::postvisit( FunctionDecl * functionDecl ) {613 void ReturnChecker::postvisit( __attribute__((unused)) FunctionDecl * functionDecl ) { 614 614 returnVals = returnValsStack.top(); 615 615 returnValsStack.pop(); -
src/SynTree/Expression.h
rade20d0 r436c0de 690 690 }; 691 691 692 /// MemberTupleExpr represents a tuple member selection operation on a struct type, e.g. s.[a, b, c] after processing by the expression analyzer693 class MemberTupleExpr : public Expression {694 public:695 MemberTupleExpr( Expression * member, Expression * aggregate, Expression * _aname = nullptr );696 MemberTupleExpr( const MemberTupleExpr & other );697 virtual ~MemberTupleExpr();698 699 Expression * get_member() const { return member; }700 Expression * get_aggregate() const { return aggregate; }701 MemberTupleExpr * set_member( Expression * newValue ) { member = newValue; return this; }702 MemberTupleExpr * set_aggregate( Expression * newValue ) { aggregate = newValue; return this; }703 704 virtual MemberTupleExpr * clone() const { return new MemberTupleExpr( * this ); }705 virtual void accept( Visitor & v ) { v.visit( this ); }706 virtual Expression * acceptMutator( Mutator & m ) { return m.mutate( this ); }707 virtual void print( std::ostream & os, int indent = 0 ) const;708 private:709 Expression * member;710 Expression * aggregate;711 };712 713 692 /// TupleAssignExpr represents a multiple assignment operation, where both sides of the assignment have tuple type, e.g. [a, b, c] = [d, e, f];, a mass assignment operation, where the left hand side has tuple type and the right hand side does not, e.g. [a, b, c] = 5.0;, or a tuple ctor/dtor expression 714 693 class TupleAssignExpr : public Expression { -
src/SynTree/Initializer.cc
rade20d0 r436c0de 33 33 } 34 34 35 void Initializer::print( std::ostream &os,int indent ) {}35 // void Initializer::print( __attribute__((unused)) std::ostream &os, __attribute__((unused)) int indent ) {} 36 36 37 37 SingleInit::SingleInit( Expression *v, const std::list< Expression *> &_designators, bool maybeConstructed ) : Initializer( maybeConstructed ), value ( v ), designators( _designators ) { -
src/SynTree/Initializer.h
rade20d0 r436c0de 53 53 virtual void accept( Visitor &v ) = 0; 54 54 virtual Initializer *acceptMutator( Mutator &m ) = 0; 55 virtual void print( std::ostream &os, int indent = 0 ) ;55 virtual void print( std::ostream &os, int indent = 0 ) = 0; 56 56 private: 57 57 // std::string name; -
src/SynTree/Mutator.cc
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Mar 30 16:45:19201713 // Update Count : 2 211 // Last Modified By : Andrew Beach 12 // Last Modified On : Thu Mar 8 16:36:00 2017 13 // Update Count : 23 14 14 // 15 15 … … 153 153 } 154 154 155 Statement *Mutator::mutate( ThrowStmt *throwStmt ) { 156 throwStmt->set_expr( maybeMutate( throwStmt->get_expr(), *this ) ); 157 throwStmt->set_target( maybeMutate( throwStmt->get_target(), *this ) ); 158 return throwStmt; 159 } 160 155 161 Statement *Mutator::mutate( TryStmt *tryStmt ) { 156 162 tryStmt->set_block( maybeMutate( tryStmt->get_block(), *this ) ); … … 408 414 } 409 415 410 Expression *Mutator::mutate( MemberTupleExpr *tupleExpr ) {411 tupleExpr->set_env( maybeMutate( tupleExpr->get_env(), *this ) );412 tupleExpr->set_result( maybeMutate( tupleExpr->get_result(), *this ) );413 tupleExpr->set_member( maybeMutate( tupleExpr->get_member(), *this ) );414 tupleExpr->set_aggregate( maybeMutate( tupleExpr->get_aggregate(), *this ) );415 return tupleExpr;416 }417 418 416 Expression *Mutator::mutate( TupleAssignExpr *assignExpr ) { 419 417 assignExpr->set_env( maybeMutate( assignExpr->get_env(), *this ) ); -
src/SynTree/Mutator.h
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Feb 9 14:23:23201713 // Update Count : 1 311 // Last Modified By : Andrew Beach 12 // Last Modified On : Thu Jun 8 15:45:00 2017 13 // Update Count : 14 14 14 // 15 15 #include <cassert> … … 46 46 virtual Statement* mutate( BranchStmt *branchStmt ); 47 47 virtual Statement* mutate( ReturnStmt *returnStmt ); 48 virtual Statement* mutate( TryStmt *returnStmt ); 48 virtual Statement* mutate( ThrowStmt *throwStmt ); 49 virtual Statement* mutate( TryStmt *tryStmt ); 49 50 virtual Statement* mutate( CatchStmt *catchStmt ); 50 51 virtual Statement* mutate( FinallyStmt *catchStmt ); … … 82 83 virtual Expression* mutate( TupleExpr *tupleExpr ); 83 84 virtual Expression* mutate( TupleIndexExpr *tupleExpr ); 84 virtual Expression* mutate( MemberTupleExpr *tupleExpr );85 85 virtual Expression* mutate( TupleAssignExpr *assignExpr ); 86 86 virtual Expression* mutate( StmtExpr * stmtExpr ); -
src/SynTree/Statement.cc
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Aug 12 13:58:48 201613 // Update Count : 6 211 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Jun 12 10:37:00 2017 13 // Update Count : 64 14 14 // 15 15 … … 101 101 } 102 102 103 ReturnStmt::ReturnStmt( std::list<Label> labels, Expression *_expr , bool throwP ) : Statement( labels ), expr( _expr ), isThrow( throwP) {}104 105 ReturnStmt::ReturnStmt( const ReturnStmt & other ) : Statement( other ), expr( maybeClone( other.expr ) ) , isThrow( other.isThrow ){}103 ReturnStmt::ReturnStmt( std::list<Label> labels, Expression *_expr ) : Statement( labels ), expr( _expr ) {} 104 105 ReturnStmt::ReturnStmt( const ReturnStmt & other ) : Statement( other ), expr( maybeClone( other.expr ) ) {} 106 106 107 107 ReturnStmt::~ReturnStmt() { … … 110 110 111 111 void ReturnStmt::print( std::ostream &os, int indent ) const { 112 os << string ( isThrow? "Throw":"Return" ) << "Statement, returning: ";112 os << "Return Statement, returning: "; 113 113 if ( expr != 0 ) { 114 114 os << endl << string( indent+2, ' ' ); … … 287 287 } 288 288 289 ThrowStmt::ThrowStmt( std::list<Label> labels, Kind kind, Expression * expr, Expression * target ) : 290 Statement( labels ), kind(kind), expr(expr), target(target) { 291 assertf(Resume == kind || nullptr == target, "Non-local termination throw is not accepted." ); 292 } 293 294 ThrowStmt::ThrowStmt( const ThrowStmt &other ) : 295 Statement ( other ), kind( other.kind ), expr( maybeClone( other.expr ) ), target( maybeClone( other.target ) ) { 296 } 297 298 ThrowStmt::~ThrowStmt() { 299 delete expr; 300 delete target; 301 } 302 303 void ThrowStmt::print( std::ostream &os, int indent) const { 304 if ( target ) { 305 os << "Non-Local "; 306 } 307 os << "Throw Statement, raising: "; 308 expr->print(os, indent + 4); 309 if ( target ) { 310 os << "At: "; 311 target->print(os, indent + 4); 312 } 313 } 314 289 315 TryStmt::TryStmt( std::list<Label> labels, CompoundStmt *tryBlock, std::list<Statement *> &_handlers, FinallyStmt *_finallyBlock ) : 290 316 Statement( labels ), block( tryBlock ), handlers( _handlers ), finallyBlock( _finallyBlock ) { … … 318 344 } 319 345 320 CatchStmt::CatchStmt( std::list<Label> labels, Declaration *_decl, Statement *_body, bool catchAny ) :321 Statement( labels ), decl ( _decl ), body( _body ), catchRest ( catchAny ) {346 CatchStmt::CatchStmt( std::list<Label> labels, Kind _kind, Declaration *_decl, Expression *_cond, Statement *_body ) : 347 Statement( labels ), kind ( _kind ), decl ( _decl ), cond ( _cond ), body( _body ) { 322 348 } 323 349 324 350 CatchStmt::CatchStmt( const CatchStmt & other ) : 325 Statement( other ), decl ( maybeClone( other.decl ) ), body( maybeClone( other.body ) ), catchRest ( other.catchRest) {351 Statement( other ), kind ( other.kind ), decl ( maybeClone( other.decl ) ), cond ( maybeClone( other.cond ) ), body( maybeClone( other.body ) ) { 326 352 } 327 353 … … 332 358 333 359 void CatchStmt::print( std::ostream &os, int indent ) const { 334 os << "Catch Statement" << endl;360 os << "Catch " << ((Terminate == kind) ? "Terminate" : "Resume") << " Statement" << endl; 335 361 336 362 os << string( indent, ' ' ) << "... catching" << endl; … … 338 364 decl->printShort( os, indent + 4 ); 339 365 os << endl; 340 } else if ( catchRest ) 341 os << string( indent + 4 , ' ' ) << "the rest" << endl; 366 } 342 367 else 343 368 os << string( indent + 4 , ' ' ) << ">>> Error: this catch clause must have a declaration <<<" << endl; -
src/SynTree/Statement.h
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Aug 12 13:57:46 201613 // Update Count : 6 511 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Jun 12 13:35:00 2017 13 // Update Count : 67 14 14 // 15 15 … … 57 57 private: 58 58 std::list<Statement*> kids; 59 }; 60 61 class NullStmt : public CompoundStmt { 62 public: 63 NullStmt(); 64 NullStmt( std::list<Label> labels ); 65 66 virtual NullStmt *clone() const { return new NullStmt( *this ); } 67 virtual void accept( Visitor &v ) { v.visit( this ); } 68 virtual NullStmt *acceptMutator( Mutator &m ) { return m.mutate( this ); } 69 virtual void print( std::ostream &os, int indent = 0 ) const; 70 71 private: 59 72 }; 60 73 … … 261 274 class ReturnStmt : public Statement { 262 275 public: 263 ReturnStmt( std::list<Label> labels, Expression *expr , bool throwP = false);276 ReturnStmt( std::list<Label> labels, Expression *expr ); 264 277 ReturnStmt( const ReturnStmt &other ); 265 278 virtual ~ReturnStmt(); … … 274 287 private: 275 288 Expression *expr; 276 bool isThrow; 277 }; 278 279 280 class NullStmt : public CompoundStmt { 281 public: 282 NullStmt(); 283 NullStmt( std::list<Label> labels ); 284 285 virtual NullStmt *clone() const { return new NullStmt( *this ); } 286 virtual void accept( Visitor &v ) { v.visit( this ); } 287 virtual NullStmt *acceptMutator( Mutator &m ) { return m.mutate( this ); } 288 virtual void print( std::ostream &os, int indent = 0 ) const; 289 290 private: 289 }; 290 291 class ThrowStmt : public Statement { 292 public: 293 enum Kind { Terminate, Resume }; 294 295 ThrowStmt( std::list<Label> labels, Kind kind, Expression * expr, Expression * target = nullptr ); 296 ThrowStmt( const ThrowStmt &other ); 297 virtual ~ThrowStmt(); 298 299 Kind get_kind() { return kind; } 300 Expression * get_expr() { return expr; } 301 void set_expr( Expression * newExpr ) { expr = newExpr; } 302 Expression * get_target() { return target; } 303 void set_target( Expression * newTarget ) { target = newTarget; } 304 305 virtual ThrowStmt *clone() const { return new ThrowStmt( *this ); } 306 virtual void accept( Visitor &v ) { v.visit( this ); } 307 virtual Statement *acceptMutator( Mutator &m ) { return m.mutate( this ); } 308 virtual void print( std::ostream &os, int indent = 0 ) const; 309 private: 310 Kind kind; 311 Expression * expr; 312 Expression * target; 291 313 }; 292 314 … … 317 339 class CatchStmt : public Statement { 318 340 public: 319 CatchStmt( std::list<Label> labels, Declaration *decl, Statement *body, bool catchAny = false ); 341 enum Kind { Terminate, Resume }; 342 343 CatchStmt( std::list<Label> labels, Kind kind, Declaration *decl, 344 Expression *cond, Statement *body ); 320 345 CatchStmt( const CatchStmt &other ); 321 346 virtual ~CatchStmt(); 322 347 348 Kind get_kind() { return kind; } 323 349 Declaration *get_decl() { return decl; } 324 350 void set_decl( Declaration *newValue ) { decl = newValue; } 325 351 Expression *get_cond() { return cond; } 352 void set_cond( Expression *newCond ) { cond = newCond; } 326 353 Statement *get_body() { return body; } 327 354 void set_body( Statement *newValue ) { body = newValue; } … … 333 360 334 361 private: 362 Kind kind; 335 363 Declaration *decl; 364 Expression *cond; 336 365 Statement *body; 337 bool catchRest;338 366 }; 339 367 -
src/SynTree/SynTree.h
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Feb 9 14:23:49201713 // Update Count : 811 // Last Modified By : Andrew Beach 12 // Last Modified On : Thu Jun 8 17:00:00 2017 13 // Update Count : 9 14 14 // 15 15 … … 51 51 class BranchStmt; 52 52 class ReturnStmt; 53 class ThrowStmt; 53 54 class TryStmt; 54 55 class CatchStmt; … … 89 90 class TupleExpr; 90 91 class TupleIndexExpr; 91 class MemberTupleExpr;92 92 class TupleAssignExpr; 93 93 class StmtExpr; -
src/SynTree/TupleExpr.cc
rade20d0 r436c0de 78 78 } 79 79 80 MemberTupleExpr::MemberTupleExpr( Expression * member, Expression * aggregate, Expression * _aname ) : Expression( _aname ) {81 set_result( maybeClone( member->get_result() ) ); // xxx - ???82 }83 84 MemberTupleExpr::MemberTupleExpr( const MemberTupleExpr &other ) : Expression( other ), member( other.member->clone() ), aggregate( other.aggregate->clone() ) {85 }86 87 MemberTupleExpr::~MemberTupleExpr() {88 delete member;89 delete aggregate;90 }91 92 void MemberTupleExpr::print( std::ostream &os, int indent ) const {93 os << "Member Tuple Expression, with aggregate:" << std::endl;94 os << std::string( indent+2, ' ' );95 aggregate->print( os, indent+2 );96 os << std::string( indent+2, ' ' ) << "with member: " << std::endl;97 os << std::string( indent+2, ' ' );98 member->print( os, indent+2 );99 Expression::print( os, indent );100 }101 102 80 TupleAssignExpr::TupleAssignExpr( const std::list< Expression * > & assigns, const std::list< ObjectDecl * > & tempDecls, Expression * _aname ) : Expression( _aname ) { 103 81 // convert internally into a StmtExpr which contains the declarations and produces the tuple of the assignments -
src/SynTree/Visitor.cc
rade20d0 r436c0de 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Mar 30 16:45:25201713 // Update Count : 2 411 // Last Modified By : Andrew Beach 12 // Last Modified On : Thu Jun 8 16:31:00 2017 13 // Update Count : 25 14 14 // 15 15 … … 129 129 } 130 130 131 void Visitor::visit( ThrowStmt * throwStmt ) { 132 maybeAccept( throwStmt->get_expr(), *this ); 133 maybeAccept( throwStmt->get_target(), *this ); 134 } 135 131 136 void Visitor::visit( TryStmt *tryStmt ) { 132 137 maybeAccept( tryStmt->get_block(), *this ); … … 319 324 maybeAccept( tupleExpr->get_result(), *this ); 320 325 maybeAccept( tupleExpr->get_tuple(), *this ); 321 }322 323 void Visitor::visit( MemberTupleExpr *tupleExpr ) {324 maybeAccept( tupleExpr->get_result(), *this );325 maybeAccept( tupleExpr->get_member(), *this );326 maybeAccept( tupleExpr->get_aggregate(), *this );327 326 } 328 327 -
src/SynTree/Visitor.h
rade20d0 r436c0de 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 3 08:58:00 201713 // Update Count : 1 012 // Last Modified On : Thr Jun 08 15:45:00 2017 13 // Update Count : 11 14 14 // 15 15 … … 49 49 virtual void visit( BranchStmt *branchStmt ); 50 50 virtual void visit( ReturnStmt *returnStmt ); 51 virtual void visit( ThrowStmt *throwStmt ); 51 52 virtual void visit( TryStmt *tryStmt ); 52 53 virtual void visit( CatchStmt *catchStmt ); … … 85 86 virtual void visit( TupleExpr *tupleExpr ); 86 87 virtual void visit( TupleIndexExpr *tupleExpr ); 87 virtual void visit( MemberTupleExpr *tupleExpr );88 88 virtual void visit( TupleAssignExpr *assignExpr ); 89 89 virtual void visit( StmtExpr * stmtExpr ); -
src/SynTree/ZeroOneType.cc
rade20d0 r436c0de 20 20 ZeroType::ZeroType( Type::Qualifiers tq, const std::list< Attribute * > & attributes ) : Type( tq, attributes ) {} 21 21 22 void ZeroType::print( std::ostream &os, int indent ) const {22 void ZeroType::print( std::ostream &os, __attribute__((unused)) int indent ) const { 23 23 os << "zero_t"; 24 24 } … … 28 28 OneType::OneType( Type::Qualifiers tq, const std::list< Attribute * > & attributes ) : Type( tq, attributes ) {} 29 29 30 void OneType::print( std::ostream &os, int indent ) const {30 void OneType::print( std::ostream &os, __attribute__((unused)) int indent ) const { 31 31 os << "one_t"; 32 32 } -
src/Tuples/TupleExpansion.cc
rade20d0 r436c0de 354 354 maybeImpure = true; 355 355 } 356 virtual void visit( UntypedExpr * untypedExpr ) { maybeImpure = true; }356 virtual void visit( __attribute__((unused)) UntypedExpr * untypedExpr ) { maybeImpure = true; } 357 357 bool maybeImpure = false; 358 358 }; -
src/libcfa/concurrency/alarm.c
rade20d0 r436c0de 104 104 105 105 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) { 106 assert( it );107 assert( (*it)->next == n );106 verify( it ); 107 verify( (*it)->next == n ); 108 108 109 109 (*it)->next = n->next; -
src/libcfa/concurrency/coroutine
rade20d0 r436c0de 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 73 coroutine_desc * src = this_coroutine(); // optimization 74 74 75 75 assertf( src->last != 0, … … 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 93 93 if( unlikely(!dst->stack.base) ) { 94 94 create_stack(&dst->stack, dst->stack.size); 95 95 CtxStart(cor, CtxInvokeCoroutine); 96 96 } 97 97 98 98 // not resuming self ? 99 99 if ( src != dst ) { 100 100 assertf( dst->state != Halted , … … 103 103 src->name, src, dst->name, dst ); 104 104 105 105 // set last resumer 106 106 dst->last = src; 107 107 } // if 108 108 109 109 // always done for performance testing 110 110 CoroutineCtxSwitch( src, dst ); 111 111 } … … 114 114 coroutine_desc * src = this_coroutine(); // optimization 115 115 116 116 // not resuming self ? 117 117 if ( src != dst ) { 118 118 assertf( dst->state != Halted , … … 121 121 src->name, src, dst->name, dst ); 122 122 123 123 // set last resumer 124 124 dst->last = src; 125 125 } // if 126 126 127 127 // always done for performance testing 128 128 CoroutineCtxSwitch( src, dst ); 129 129 } -
src/libcfa/concurrency/kernel.c
rade20d0 r436c0de 311 311 // appropriate stack. 312 312 proc_cor_storage.__cor.state = Active; 313 314 313 main( &proc_cor_storage ); 314 proc_cor_storage.__cor.state = Halted; 315 315 316 316 // Main routine of the core returned, the core is now fully terminated … … 333 333 if( !thrd ) return; 334 334 335 assertf( thrd->next == NULL, "Expected null got %p", thrd->next );335 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 336 337 337 lock( &systemProcessor->proc.cltr->lock ); … … 577 577 578 578 void append( __thread_queue_t * this, thread_desc * t ) { 579 assert(this->tail != NULL);579 verify(this->tail != NULL); 580 580 *this->tail = t; 581 581 this->tail = &t->next; … … 599 599 600 600 void push( __condition_stack_t * this, __condition_criterion_t * t ) { 601 assert( !t->next );601 verify( !t->next ); 602 602 t->next = this->top; 603 603 this->top = t; -
src/libcfa/concurrency/kernel_private.h
rade20d0 r436c0de 22 22 23 23 #include "alarm.h" 24 25 #include "libhdr.h" 24 26 25 27 //----------------------------------------------------------------------------- … … 66 68 67 69 static inline void enable_interrupts_noRF() { 68 unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );69 assert( prev != (unsigned short) 0 );70 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 71 verify( prev != (unsigned short) 0 ); 70 72 } 71 73 72 74 static inline void enable_interrupts() { 73 unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );74 assert( prev != (unsigned short) 0 );75 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 76 verify( prev != (unsigned short) 0 ); 75 77 if( prev == 1 && this_processor->pending_preemption ) { 76 78 ScheduleInternal( this_processor->current_thread ); -
src/libcfa/concurrency/monitor
rade20d0 r436c0de 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 28 this->stack_owner = NULL; 29 29 this->recursion = 0; 30 30 } … … 33 33 monitor_desc ** m; 34 34 int count; 35 36 35 monitor_desc ** prev_mntrs; 36 unsigned short prev_count; 37 37 }; 38 38 -
src/libcfa/concurrency/monitor.c
rade20d0 r436c0de 56 56 else if( this->owner == thrd) { 57 57 //We already have the monitor, just not how many times we took it 58 assert( this->recursion > 0 );58 verify( this->recursion > 0 ); 59 59 this->recursion += 1; 60 60 } … … 78 78 lock( &this->lock ); 79 79 80 thread_desc * thrd = this_thread();81 82 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 83 assertf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );81 verifyf( this_thread() == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion ); 84 82 85 83 //Leaving a recursion level, decrement the counter … … 167 165 //Check that everything is as expected 168 166 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors ); 169 assertf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );170 assertf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );167 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 168 verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count ); 171 169 172 170 unsigned short count = this->monitor_count; … … 229 227 230 228 //Check that everything is as expected 231 assert( this->monitors );232 assert( this->monitor_count != 0 );229 verify( this->monitors ); 230 verify( this->monitor_count != 0 ); 233 231 234 232 unsigned short count = this->monitor_count; … … 278 276 279 277 //Check that everything is as expected 280 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );281 assertf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );278 verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors ); 279 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 282 280 283 281 unsigned short count = this->monitor_count; … … 327 325 328 326 uintptr_t front( condition * this ) { 329 LIB_DEBUG_DO( 330 if( is_empty(this) ) { 331 abortf( "Attempt to access user data on an empty condition.\n" 332 "Possible cause is not checking if the condition is empty before reading stored data." ); 333 } 327 verifyf( !is_empty(this), 328 "Attempt to access user data on an empty condition.\n" 329 "Possible cause is not checking if the condition is empty before reading stored data." 334 330 ); 335 331 return this->blocked.head->user_info; … … 491 487 492 488 void append( __condition_blocked_queue_t * this, __condition_node_t * c ) { 493 assert(this->tail != NULL);489 verify(this->tail != NULL); 494 490 *this->tail = c; 495 491 this->tail = &c->next; -
src/libcfa/containers/maybe
rade20d0 r436c0de 10 10 // Created On : Wed May 24 14:43:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr May 25 16:36:00 201713 // Update Count : 112 // Last Modified On : Fri Jun 16 15:42:00 2017 13 // Update Count : 2 14 14 // 15 15 … … 46 46 bool ?!=?(maybe(T) this, zero_t); 47 47 48 /* Waiting for bug#11 to be fixed. 48 49 forall(otype T) 49 50 maybe(T) maybe_value(T value); … … 51 52 forall(otype T) 52 53 maybe(T) maybe_none(); 54 */ 53 55 54 56 forall(otype T) -
src/libcfa/containers/result
rade20d0 r436c0de 10 10 // Created On : Wed May 24 14:45:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr May 25 16:39:00 201713 // Update Count : 112 // Last Modified On : Fri Jun 16 15:41:00 2017 13 // Update Count : 2 14 14 // 15 15 … … 55 55 bool ?!=?(result(T, E) this, zero_t); 56 56 57 /* Wating for bug#11 to be fixed. 57 58 forall(otype T, otype E) 58 59 result(T, E) result_value(T value); … … 60 61 forall(otype T, otype E) 61 62 result(T, E) result_error(E error); 63 */ 62 64 63 65 forall(otype T, otype E) -
src/libcfa/containers/result.c
rade20d0 r436c0de 74 74 forall(otype T, otype E) 75 75 bool ?!=?(result(T, E) this, zero_t) { 76 return !this.has_value;76 return this.has_value; 77 77 } 78 78 … … 100 100 forall(otype T, otype E) 101 101 E get_error(result(T, E) * this) { 102 assertf( this->has_value, "attempt to get from result without error");102 assertf(!this->has_value, "attempt to get from result without error"); 103 103 return this->error; 104 104 } -
src/libcfa/libhdr/libdebug.h
rade20d0 r436c0de 18 18 19 19 #ifdef __CFA_DEBUG__ 20 21 20 #define LIB_DEBUG_DO(x) x 21 #define LIB_NO_DEBUG_DO(x) ((void)0) 22 22 #else 23 24 23 #define LIB_DEBUG_DO(x) ((void)0) 24 #define LIB_NO_DEBUG_DO(x) x 25 25 #endif 26 27 #if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 28 #define verify(x) assert(x) 29 #define verifyf(x, ...) assertf(x, __VA_ARGS__) 30 #else 31 #define verify(x) 32 #define verifyf(x, ...) 33 #endif 34 26 35 27 36 #ifdef __cforall -
src/tests/.expect/io.txt
rade20d0 r436c0de 4 4 123 5 5 6 x (1 x [2 x {3 x =4 x $5 x £6 x ¥7 x ¡8 x ¿9 x «10 7 1, x 2. x 3; x 4! x 5? x 6% x 7¢ x 8» x 9) x 10] x 11} x 8 x`1`x'2'x"3"x:4:x 5 x 6 x 9 7 10 x 11 8 12 x 13 9 14 x 15 10 16 x 17 x ( 1 ) x 2 , x 3 :x: 4 6 18 A 7 19 1 2 3 4 5 6 7 8 … … 18 30 abc, $xyz 19 31 20 v(27 v[27 v{27 $27 =27 £27 ¥27 ¡27 ¿27 «27 21 25, 25. 25: 25; 25! 25? 25% 25¢ 25» 25) 25] 25} 22 25'27 25`27 25"27 25 27 25 23 27 25 24 27 25 25 27 25 27 25 26 27 32 1, 2, 3, 4 33 1, $2, $3 ", $" 34 1 2 3 " " 35 1 2 3 36 12 3 37 123 38 1 23 39 1 2 3 40 1 2 3 4 " " 41 1, 2, 3, 4 ", " 42 1, 2, 3, 4 27 43 3, 4, a, 7.2 28 44 3, 4, a, 7.2 29 45 3 4 a 7.2 30 3 4 a 7.234a7.2 46 3 4 a 7.234a7.23 4 a 7.2 31 47 3-4-a-7.2^3^4-3-4-a-7.2 -
src/tests/Makefile.am
rade20d0 r436c0de 11 11 ## Created On : Sun May 31 09:08:15 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Thu May 25 14:39:15201714 ## Update Count : 4 313 ## Last Modified On : Thu Jun 8 07:41:43 2017 14 ## Update Count : 44 15 15 ############################################################################### 16 16 … … 20 20 21 21 if BUILD_CONCURRENCY 22 concurrent =yes23 quick_test += coroutine thread monitor24 concurrent_test =coroutine thread monitor multi-monitor sched-int-barge sched-int-block sched-int-disjoint sched-int-wait sched-ext sched-ext-multi preempt22 concurrent = yes 23 quick_test += coroutine thread monitor 24 concurrent_test = coroutine thread monitor multi-monitor sched-int-barge sched-int-block sched-int-disjoint sched-int-wait sched-ext sched-ext-multi preempt 25 25 else 26 26 concurrent=no … … 57 57 @+python test.py --debug=${debug} --concurrent=${concurrent} ${concurrent_test} 58 58 59 .dummy : .dummy.c 59 .dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@ 60 60 ${CC} ${BUILD_FLAGS} -XCFA -n ${<} -o ${@} #don't use CFLAGS, this rule is not a real test 61 61 62 dtor-early-exit-ERR1: dtor-early-exit.c 62 63 % : %.c @CFA_BINDIR@/@CFA_NAME@ 64 ${CC} ${CFLAGS} ${<} -o ${@} 65 66 dtor-early-exit-ERR1: dtor-early-exit.c @CFA_BINDIR@/@CFA_NAME@ 63 67 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} 64 68 65 dtor-early-exit-ERR2: dtor-early-exit.c 69 dtor-early-exit-ERR2: dtor-early-exit.c @CFA_BINDIR@/@CFA_NAME@ 66 70 ${CC} ${CFLAGS} -DERR2 ${<} -o ${@} 67 71 68 declarationSpecifier: declarationSpecifier.c 72 declarationSpecifier: declarationSpecifier.c @CFA_BINDIR@/@CFA_NAME@ 69 73 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 70 74 71 gccExtensions : gccExtensions.c 75 gccExtensions : gccExtensions.c @CFA_BINDIR@/@CFA_NAME@ 72 76 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 73 77 74 extension : extension.c 78 extension : extension.c @CFA_BINDIR@/@CFA_NAME@ 75 79 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 76 80 77 attributes : attributes.c 81 attributes : attributes.c @CFA_BINDIR@/@CFA_NAME@ 78 82 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 79 83 80 KRfunctions : KRfunctions.c 84 KRfunctions : KRfunctions.c @CFA_BINDIR@/@CFA_NAME@ 81 85 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 82 86 83 gmp : gmp.c 87 gmp : gmp.c @CFA_BINDIR@/@CFA_NAME@ 84 88 ${CC} ${CFLAGS} -lgmp ${<} -o ${@} 85 89 86 memberCtors-ERR1: memberCtors.c 90 memberCtors-ERR1: memberCtors.c @CFA_BINDIR@/@CFA_NAME@ 87 91 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} 88 92 89 completeTypeError : completeTypeError.c 93 completeTypeError : completeTypeError.c @CFA_BINDIR@/@CFA_NAME@ 90 94 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} -
src/tests/Makefile.in
rade20d0 r436c0de 661 661 @+python test.py --debug=${debug} --concurrent=${concurrent} ${concurrent_test} 662 662 663 .dummy : .dummy.c 663 .dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@ 664 664 ${CC} ${BUILD_FLAGS} -XCFA -n ${<} -o ${@} #don't use CFLAGS, this rule is not a real test 665 665 666 dtor-early-exit-ERR1: dtor-early-exit.c 666 % : %.c @CFA_BINDIR@/@CFA_NAME@ 667 ${CC} ${CFLAGS} ${<} -o ${@} 668 669 dtor-early-exit-ERR1: dtor-early-exit.c @CFA_BINDIR@/@CFA_NAME@ 667 670 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} 668 671 669 dtor-early-exit-ERR2: dtor-early-exit.c 672 dtor-early-exit-ERR2: dtor-early-exit.c @CFA_BINDIR@/@CFA_NAME@ 670 673 ${CC} ${CFLAGS} -DERR2 ${<} -o ${@} 671 674 672 declarationSpecifier: declarationSpecifier.c 675 declarationSpecifier: declarationSpecifier.c @CFA_BINDIR@/@CFA_NAME@ 673 676 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 674 677 675 gccExtensions : gccExtensions.c 678 gccExtensions : gccExtensions.c @CFA_BINDIR@/@CFA_NAME@ 676 679 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 677 680 678 extension : extension.c 681 extension : extension.c @CFA_BINDIR@/@CFA_NAME@ 679 682 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 680 683 681 attributes : attributes.c 684 attributes : attributes.c @CFA_BINDIR@/@CFA_NAME@ 682 685 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 683 686 684 KRfunctions : KRfunctions.c 687 KRfunctions : KRfunctions.c @CFA_BINDIR@/@CFA_NAME@ 685 688 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 686 689 687 gmp : gmp.c 690 gmp : gmp.c @CFA_BINDIR@/@CFA_NAME@ 688 691 ${CC} ${CFLAGS} -lgmp ${<} -o ${@} 689 692 690 memberCtors-ERR1: memberCtors.c 693 memberCtors-ERR1: memberCtors.c @CFA_BINDIR@/@CFA_NAME@ 691 694 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} 692 695 693 completeTypeError : completeTypeError.c 696 completeTypeError : completeTypeError.c @CFA_BINDIR@/@CFA_NAME@ 694 697 ${CC} ${CFLAGS} -DERR1 ${<} -o ${@} 695 698 -
src/tests/coroutine.c
rade20d0 r436c0de 1 // 2 // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo 3 // 4 // The contents of this file are covered under the licence agreement in the 5 // file "LICENCE" distributed with Cforall. 6 // 7 // fibonacci.c -- 8 // 9 // Author : Thierry Delisle 10 // Created On : Thu Jun 8 07:29:37 2017 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jun 8 07:37:12 2017 13 // Update Count : 5 14 // 15 1 16 #include <fstream> 2 17 #include <coroutine> 3 18 4 19 coroutine Fibonacci { 5 int fn;// used for communication20 int fn; // used for communication 6 21 }; 7 22 8 void ?{}( Fibonacci* this) {9 23 void ?{}( Fibonacci * this ) { 24 this->fn = 0; 10 25 } 11 26 12 void main( Fibonacci* this) {13 int fn1, fn2;// retained between resumes14 this->fn = 0; 15 16 suspend();// return to last resume27 void main( Fibonacci * this ) { 28 int fn1, fn2; // retained between resumes 29 this->fn = 0; // case 0 30 fn1 = this->fn; 31 suspend(); // return to last resume 17 32 18 this->fn = 1; 19 20 21 suspend();// return to last resume33 this->fn = 1; // case 1 34 fn2 = fn1; 35 fn1 = this->fn; 36 suspend(); // return to last resume 22 37 23 for ( ;; ) { 24 25 26 27 suspend();// return to last resume28 } 38 for ( ;; ) { // general case 39 this->fn = fn1 + fn2; 40 fn2 = fn1; 41 fn1 = this->fn; 42 suspend(); // return to last resume 43 } // for 29 44 } 30 45 31 int next( Fibonacci* this) {32 resume(this);// transfer to last suspend33 46 int next( Fibonacci * this ) { 47 resume( this ); // transfer to last suspend 48 return this->fn; 34 49 } 35 50 36 51 int main() { 37 Fibonacci f1, f2; 38 for ( int i = 1; i <= 10; i += 1 ) { 39 sout | next(&f1) | ' ' | next(&f2) | endl; 40 } 52 Fibonacci f1, f2; 53 for ( int i = 1; i <= 10; i += 1 ) { 54 sout | next( &f1 ) | ' ' | next( &f2 ) | endl; 55 } // for 56 } 41 57 42 return 0; 43 } 58 // Local Variables: // 59 // tab-width: 4 // 60 // compile-command: "cfa fibonacci.c" // 61 // End: // -
src/tests/identity.c
rade20d0 r436c0de 7 7 // identity.c -- 8 8 // 9 // Author : Richard C. Bilson9 // Author : Peter A. Buhr 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Mar 8 22:15:08 201613 // Update Count : 1 312 // Last Modified On : Thu Jun 8 08:21:32 2017 13 // Update Count : 18 14 14 // 15 15 … … 32 32 sout | "double\t\t\t" | identity( 4.1 ) | endl; 33 33 sout | "long double\t\t" | identity( 4.1l ) | endl; 34 sout | "float _Complex\t\t" | identity( -4.1F-2.0iF ) | endl; 35 sout | "double _Complex\t\t" | identity( -4.1D-2.0iD ) | endl; 36 sout | "long double _Complex\t" | identity( -4.1L-2.0iL ) | endl; 34 37 } 35 38 -
src/tests/io.c
rade20d0 r436c0de 10 10 // Created On : Wed Mar 2 16:56:02 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Mar 21 22:36:06201713 // Update Count : 4812 // Last Modified On : Thu Jun 8 09:52:10 2017 13 // Update Count : 51 14 14 // 15 15 … … 17 17 18 18 int main() { 19 char c; 19 char c; // basic types 20 20 short int si; 21 21 unsigned short int usi; … … 32 32 double _Complex dc; 33 33 long double _Complex ldc; 34 char s1[10], s2[10]; 34 enum { size = 10 }; 35 char s1[size], s2[size]; 35 36 36 37 int x = 3, y = 5, z = 7; … … 41 42 sout | endl; 42 43 43 ifstream in; // create / open file 44 sout 45 // opening delimiters 46 | "x (" | 1 47 | "x [" | 2 48 | "x {" | 3 49 | "x =" | 4 50 | "x $" | 5 51 | "x £" | 6 52 | "x ¥" | 7 53 | "x ¡" | 8 54 | "x ¿" | 9 55 | "x «" | 10 56 | endl; 57 sout 58 // closing delimiters 59 | 1 | ", x" 60 | 2 | ". x" 61 | 3 | "; x" 62 | 4 | "! x" 63 | 5 | "? x" 64 | 6 | "% x" 65 | 7 | "¢ x" 66 | 8 | "» x" 67 | 9 | ") x" 68 | 10 | "] x" 69 | 11 | "} x" 70 | endl; 71 sout 72 // opening-closing delimiters 73 | "x`" | 1 | "`x'" | 2 74 | "'x\"" | 3 | "\"x:" | 4 75 | ":x " | 5 | " x\t" | 6 76 | "\tx\f" | 7 | "\fx\v" | 8 77 | "\vx\n" | 9 | "\nx\r" | 10 78 | "\rx" | 79 endl; 80 sout | "x ( " | 1 | " ) x" | 2 | " , x" | 3 | " :x: " | 4 | endl; 81 82 ifstream in; // create / open file 44 83 open( &in, "io.data", "r" ); 45 84 46 &in | &c 47 | &si | &usi | &i | &ui | &li | &uli | &lli | &ulli 48 | &f | &d | &ld 49 | &fc | &dc | &ldc 50 | cstr( s1 ) | cstr( s2, 10 );// C string, length unchecked and checked85 &in | &c // character 86 | &si | &usi | &i | &ui | &li | &uli | &lli | &ulli // integral 87 | &f | &d | &ld // floating point 88 | &fc | &dc | &ldc // floating-point complex 89 | cstr( s1 ) | cstr( s2, size ); // C string, length unchecked and checked 51 90 52 sout | c | ' ' | endl 53 | si | usi | i | ui | li | uli | lli | ulli | endl// integral54 | f | d | ld | endl// floating point55 | fc | dc | ldc | endl;// complex91 sout | c | ' ' | endl // character 92 | si | usi | i | ui | li | uli | lli | ulli | endl // integral 93 | f | d | ld | endl // floating point 94 | fc | dc | ldc | endl; // complex 56 95 sout | endl; 57 sout | f | "" | d | "" | ld | endl 58 | sepDisable | fc | dc | ldc | sepEnable | endl// complex without separator59 | sepOn | s1 | sepOff | s2 | endl// local separator removal60 | s1 | "" | s2 | endl;// C string without separator96 sout | f | "" | d | "" | ld | endl // floating point without separator 97 | sepDisable | fc | dc | ldc | sepEnable | endl // complex without separator 98 | sepOn | s1 | sepOff | s2 | endl // local separator removal 99 | s1 | "" | s2 | endl; // C string without separator 61 100 sout | endl; 62 101 63 sepSet( sout, ", $" ); 102 sepSet( sout, ", $" ); // change separator, maximum of 15 characters 64 103 sout | f | d | ld | endl 65 66 104 | fc | dc | ldc | endl 105 | s1 | s2 | endl; 67 106 sout | endl; 107 108 [int, int] t1 = [1, 2], t2 = [3, 4]; 109 sout | t1 | t2 | endl; // print tuple 110 68 111 sepSet( sout, " " ); 112 sepSet( sout, ", $" ); // set separator from " " to ", $" 113 sout | 1 | 2 | 3 | " \"" | sepGet( sout ) | "\"" | endl; 114 sepSet( sout, " " ); // reset separator to " " 115 sout | 1 | 2 | 3 | " \"" | sepGet( sout ) | "\"" | endl; 69 116 70 sout 71 // opening delimiters 72 | "v(" | 27 73 | "v[" | 27 74 | "v{" | 27 75 | "$" | 27 76 | "=" | 27 77 | "£" | 27 78 | "¥" | 27 79 | "¡" | 27 80 | "¿" | 27 81 | "«" | 27 82 | endl 83 // closing delimiters 84 | 25 | "," 85 | 25 | "." 86 | 25 | ":" 87 | 25 | ";" 88 | 25 | "!" 89 | 25 | "?" 90 | 25 | "%" 91 | 25 | "¢" 92 | 25 | "»" 93 | 25 | ")" 94 | 25 | "]" 95 | 25 | "}" 96 | endl 97 // opening-closing delimiters 98 | 25 | "'" | 27 99 | 25 | "`" | 27 100 | 25 | "\"" | 27 101 | 25 | " " | 27 102 | 25 | "\f" | 27 103 | 25 | "\n" | 27 104 | 25 | "\r" | 27 105 | 25 | "\t" | 27 106 | 25 | "\v" | 27 107 | endl; 117 sout | sepOn | 1 | 2 | 3 | sepOn | endl; // separator at start of line 118 sout | 1 | sepOff | 2 | 3 | endl; // locally turn off implicit separator 108 119 109 [int, int, const char *, double] t = { 3, 4, "a", 7.2 }; 120 sout | sepDisable | 1 | 2 | 3 | endl; // globally turn off implicit separation 121 sout | 1 | sepOn | 2 | 3 | endl; // locally turn on implicit separator 122 sout | sepEnable | 1 | 2 | 3 | endl; // globally turn on implicit separation 123 124 sepSetTuple( sout, " " ); // set tuple separator from ", " to " " 125 sout | t1 | t2 | " \"" | sepGetTuple( sout ) | "\"" | endl; 126 sepSetTuple( sout, ", " ); // reset tuple separator to ", " 127 sout | t1 | t2 | " \"" | sepGetTuple( sout ) | "\"" | endl; 128 129 sout | t1 | t2 | endl; // print tuple 130 131 [int, int, const char *, double] t3 = { 3, 4, "a", 7.2 }; 110 132 sout | [ 3, 4, "a", 7.2 ] | endl; 111 sout | t | endl;133 sout | t3 | endl; 112 134 sepSetTuple( sout, " " ); 113 sout | t | endl;114 sout | sepOn | t | sepDisable | t | sepEnable | t| endl;135 sout | t3 | endl; 136 sout | sepOn | t3 | sepDisable | t3 | sepEnable | t3 | endl; 115 137 sepSet( sout, "^" ); 116 138 sepSetTuple( sout, "-" ); 117 sout | t | 3 | 4 | t| endl;139 sout | t3 | 3 | 4 | t3 | endl; 118 140 } 119 141 -
tools/cfa.nanorc
rade20d0 r436c0de 33 33 ## Update/Redistribute 34 34 # GCC builtins 35 ##color cyan "__attribute__[[:space:]]*\(\([^)]*\)\)" "__(aligned|asm|builtin|hidden|inline|packed|restrict|section|typeof|weak)__" 35 color cyan "__attribute__[[:space:]]*\(\([^()]*(\([^()]*\)[^()]*)*\)\)" 36 ##color cyan "__(aligned|asm|builtin|hidden|inline|packed|restrict|section|typeof|weak)__" 36 37 37 38 # Preprocesser Directives
Note: See TracChangeset
for help on using the changeset viewer.