Changeset 1ed33fed
- Timestamp:
- May 11, 2017, 2:49:34 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 3476a0d
- Parents:
- 6ac2ada (diff), 6a4f3d4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 4 added
- 2 deleted
- 36 edited
Legend:
- Unmodified
- Added
- Removed
-
Jenkinsfile
r6ac2ada r1ed33fed 274 274 //Run the tests from the tests directory 275 275 if ( do_alltests ) { 276 sh 'make -C src/tests all-tests debug=yes '277 sh 'make -C src/tests all-tests debug=no '276 sh 'make -C src/tests all-tests debug=yes --no-print-directory' 277 sh 'make -C src/tests all-tests debug=no --no-print-directory' 278 278 } 279 279 else { 280 sh 'make -C src/tests '280 sh 'make -C src/tests --no-print-directory' 281 281 } 282 282 } -
doc/LaTeXmacros/common.tex
r6ac2ada r1ed33fed 251 251 \lstdefinelanguage{CFA}[ANSI]{C}{ 252 252 morekeywords={_Alignas,_Alignof,__alignof,__alignof__,asm,__asm,__asm__,_At,_Atomic,__attribute,__attribute__,auto, 253 _Bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__, disable,dtype,enable,__extension__,254 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue, _Noreturn,one_t,otype,restrict,_Static_assert,255 _Thread_local,throw,throwResume,trait,try,ttype,typeof,__typeof,__typeof__,zero_t},253 _Bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__,coroutine,disable,dtype,enable,__extension__, 254 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,monitor,mutex,_Noreturn,one_t,otype,restrict,_Static_assert, 255 thread,_Thread_local,throw,throwResume,trait,try,ttype,typeof,__typeof,__typeof__,zero_t}, 256 256 }% 257 257 -
doc/proposals/concurrency/Makefile
r6ac2ada r1ed33fed 10 10 concurrency \ 11 11 style \ 12 cfa-format \ 12 13 glossary \ 13 14 } -
doc/proposals/concurrency/concurrency.tex
r6ac2ada r1ed33fed 9 9 % math escape $...$ (dollar symbol) 10 10 11 \documentclass[ twoside,11pt]{article}11 \documentclass[letterpaper,12pt,titlepage,oneside,final]{book} 12 12 13 13 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% … … 24 24 \usepackage{graphicx} 25 25 \usepackage{tabularx} 26 \usepackage{multicol} 26 27 \usepackage[acronym]{glossaries} 27 \usepackage{varioref} % extended references 28 \usepackage{inconsolata} 28 \usepackage{varioref} 29 29 \usepackage{listings} % format program code 30 30 \usepackage[flushmargin]{footmisc} % support label/reference in footnote … … 62 62 \newcommand{\cit}{\textsuperscript{[Citation Needed]}\xspace} 63 63 \newcommand{\code}[1]{\lstinline[language=CFA]{#1}} 64 \newcommand{\pseudo}[1]{\lstinline[language=Pseudo]{#1}} 64 \newcommand{\pscode}[1]{\lstinline[language=pseudo]{#1}} 65 \newcommand{\TODO}{{\Textbf{TODO}}} 65 66 66 67 \input{glossary} … … 99 100 % ### # # # # # ####### 100 101 101 \ section{Introduction}102 \chapter{Introduction} 102 103 This proposal provides a minimal core concurrency API that is both simple, efficient and can be reused to build higher-level features. The simplest possible concurrency core is a thread and a lock but this low-level approach is hard to master. An easier approach for users is to support higher-level constructs as the basis of the concurrency in \CFA. Indeed, for highly productive parallel programming, high-level approaches are much more popular~\cite{HPP:Study}. Examples are task based, message passing and implicit threading. 103 104 … … 112 113 % ##### ####### # # ##### ##### # # # # ####### # # ##### # 113 114 114 \ section{Concurrency}115 \chapter{Concurrency} 115 116 Several tool can be used to solve concurrency challenges. Since these challenges always appear with the use of mutable shared-state, some languages and libraries simply disallow mutable shared-state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}). In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms that closely relate to networking concepts (channels\cit for example). However, in languages that use routine calls as their core abstraction mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine call). Which in turn means that, in order to be effective, programmers need to learn two sets of designs patterns. This distinction can be hidden away in library code, but effective use of the librairy still has to take both paradigms into account. Approaches based on shared memory are more closely related to non-concurrent paradigms since they often rely on basic constructs like routine calls and objects. At a lower level these can be implemented as locks and atomic operations. Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. However, for productivity reasons it is desireable to have a higher-level construct be the core concurrency paradigm~\cite{HPP:Study}. An approach that is worth mentionning because it is gaining in popularity is transactionnal memory~\cite{Dice10}[Check citation]. While this approach is even pursued by system languages like \CC\cit, the performance and feature set is currently too restrictive to add such a paradigm to a language like C or \CC\cit, which is why it was rejected as the core paradigm for concurrency in \CFA. One of the most natural, elegant, and efficient mechanisms for synchronization and communication, especially for shared memory systems, is the \emph{monitor}. Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}. Many programming languages---e.g., Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs. In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors. For these reasons, this project proposes monitors as the core concurrency construct. 116 117 … … 123 124 % # # ####### # # ### # ####### # # ##### 124 125 125 \s ubsection{Monitors}126 \section{Monitors} 126 127 A monitor is a set of routines that ensure mutual exclusion when accessing shared state. This concept is generally associated with Object-Oriented Languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OOP semantics. The only requirements is the ability to declare a handle to a shared object and a set of routines that act on it : 127 \begin{ lstlisting}128 \begin{cfacode} 128 129 typedef /*some monitor type*/ monitor; 129 130 int f(monitor & m); … … 133 134 f(m); 134 135 } 135 \end{ lstlisting}136 \end{cfacode} 136 137 137 138 % ##### # # # … … 143 144 % ##### # # ####### ####### 144 145 145 \subs ubsection{Call semantics} \label{call}146 \subsection{Call semantics} \label{call} 146 147 The above monitor example displays some of the intrinsic characteristics. Indeed, it is necessary to use pass-by-reference over pass-by-value for monitor routines. This semantics is important because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. Therefore, monitors are implicitly non-copyable. 147 148 148 149 Another aspect to consider is when a monitor acquires its mutual exclusion. For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. Pass through can be both generic helper routines (\code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter : 149 150 150 \begin{ lstlisting}151 m utex struct counter_t { /*...see section §\ref{data}§...*/ };151 \begin{cfacode} 152 monitor counter_t { /*...see section $\ref{data}$...*/ }; 152 153 153 154 void ?{}(counter_t & nomutex this); //constructor … … 156 157 //need for mutex is platform dependent here 157 158 void ?{}(size_t * this, counter_t & mutex cnt); //conversion 158 \end{ lstlisting}159 \end{cfacode} 159 160 160 161 Here, the constructor(\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual exclusion when constructing. This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} key word depending on whether or not reading an \code{size_t} is an atomic operation or not. 161 162 162 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without quualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. In the end, which solution should be picked is still up for debate. For the reminder of this proposal, the explicit approach is used for clarity. 163 164 The next semantic decision is to establish when mutex/nomutex may be used as a type qualifier. Consider the following declarations: 165 \begin{lstlisting} 163 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without quualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. However, since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. For this reason, \CFA only has the \code{mutex} keyword. 164 165 166 The next semantic decision is to establish when \code{mutex} may be used as a type qualifier. Consider the following declarations: 167 \begin{cfacode} 166 168 int f1(monitor & mutex m); 167 169 int f2(const monitor & mutex m); … … 169 171 int f4(monitor *[] mutex m); 170 172 int f5(graph(monitor*) & mutex m); 171 \end{lstlisting} 172 The problem is to indentify which object(s) should be acquired. Furthermore, each object needs to be acquired only once. In the case of simple routines like \code{f1} and \code{f2} it is easy to identify an exhaustive list of objects to acquire on entry. Adding indirections (\code{f3}) still allows the compiler and programmer to indentify which object is acquired. However, adding in arrays (\code{f4}) makes it much harder. Array lengths are not necessarily known in C and even then making sure we only acquire objects once becomes also none trivial. This can be extended to absurd limits like \code{f5}, which uses a graph of monitors. To keep everyone as sane as possible~\cite{Chicken}, this projects imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter (ignoring potential qualifiers and indirections). Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m} is be acquired, passing an array to this routine would be type safe and yet result in undefined behavior because only the first element of the array is acquired. However, this ambiguity is part of the C type system with respects to arrays. For this reason, it would also be reasonnable to disallow mutex in the context where arrays may be passed. 173 \end{cfacode} 174 The problem is to indentify which object(s) should be acquired. Furthermore, each object needs to be acquired only once. In the case of simple routines like \code{f1} and \code{f2} it is easy to identify an exhaustive list of objects to acquire on entry. Adding indirections (\code{f3}) still allows the compiler and programmer to indentify which object is acquired. However, adding in arrays (\code{f4}) makes it much harder. Array lengths are not necessarily known in C and even then making sure we only acquire objects once becomes also none trivial. This can be extended to absurd limits like \code{f5}, which uses a graph of monitors. To keep everyone as sane as possible~\cite{Chicken}, this projects imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with one level of indirection (ignoring potential qualifiers). Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m} is be acquired, passing an array to this routine would be type safe and yet result in undefined behavior because only the first element of the array is acquired. However, this ambiguity is part of the C type system with respects to arrays. For this reason, \code{mutex} is disallowed in the context where arrays may be passed. 175 176 Finally, for convenience, monitors support multiple acquireing, that is acquireing a monitor while already holding it does not cause a deadlock. It simply increments an internal counter which is then used to release the monitor after the number of acquires and releases match up. 173 177 174 178 % ###### # ####### # … … 180 184 % ###### # # # # # 181 185 182 \subs ubsection{Data semantics} \label{data}186 \subsection{Data semantics} \label{data} 183 187 Once the call semantics are established, the next step is to establish data semantics. Indeed, until now a monitor is used simply as a generic handle but in most cases monitors contian shared data. This data should be intrinsic to the monitor declaration to prevent any accidental use of data without its appropriate protection. For example, here is a complete version of the counter showed in section \ref{call}: 184 \begin{ lstlisting}185 m utex structcounter_t {188 \begin{cfacode} 189 monitor counter_t { 186 190 int value; 187 191 }; 188 192 189 void ?{}(counter_t & nomutexthis) {193 void ?{}(counter_t & this) { 190 194 this.cnt = 0; 191 195 } … … 199 203 *this = (int)cnt; 200 204 } 201 \end{ lstlisting}205 \end{cfacode} 202 206 203 207 This simple counter is used as follows: 204 208 \begin{center} 205 209 \begin{tabular}{c @{\hskip 0.35in} c @{\hskip 0.35in} c} 206 \begin{ lstlisting}210 \begin{cfacode} 207 211 //shared counter 208 212 counter_t cnt; … … 214 218 ... 215 219 thread N : cnt++; 216 \end{ lstlisting}220 \end{cfacode} 217 221 \end{tabular} 218 222 \end{center} 219 223 220 224 Notice how the counter is used without any explicit synchronisation and yet supports thread-safe semantics for both reading and writting. Unlike object-oriented monitors, where calling a mutex member \emph{implicitly} acquires mutual-exclusion, \CFA uses an explicit mechanism to acquire mutual-exclusion. A consequence of this approach is that it extends to multi-monitor calls. 221 \begin{ lstlisting}225 \begin{cfacode} 222 226 int f(MonitorA & mutex a, MonitorB & mutex b); 223 227 … … 225 229 MonitorB b; 226 230 f(a,b); 227 \end{ lstlisting}231 \end{cfacode} 228 232 This code acquires both locks before entering the critical section, called \emph{\gls{group-acquire}}. In practice, writing multi-locking routines that do not lead to deadlocks is tricky. Having language support for such a feature is therefore a significant asset for \CFA. In the case presented above, \CFA guarantees that the order of aquisition is consistent across calls to routines using the same monitors as arguments. However, since \CFA monitors use multi-acquisition locks, users can effectively force the acquiring order. For example, notice which routines use \code{mutex}/\code{nomutex} and how this affects aquiring order : 229 \begin{ lstlisting}233 \begin{cfacode} 230 234 void foo(A & mutex a, B & mutex b) { //acquire a & b 231 235 //... 232 236 } 233 237 234 void bar(A & mutex a, B & nomutexb) { //acquire a238 void bar(A & mutex a, B & /*nomutex*/ b) { //acquire a 235 239 //... 236 240 foo(a, b); //acquire b … … 238 242 } 239 243 240 void baz(A & nomutexa, B & mutex b) { //acquire b244 void baz(A & /*nomutex*/ a, B & mutex b) { //acquire b 241 245 //... 242 246 foo(a, b); //acquire a 243 247 //... 244 248 } 245 \end{ lstlisting}246 247 The multi-acquisition monitor lock allows a monitor lock to be acquired by both \code{bar} or \code{baz} and acquired again in \code{foo}. In the calls to \code{bar} and \code{baz} the monitors are acquired in opposite order. such use leads to nested monitor call problems~\cite{Lister77}, which is a specific implementation of the lock acquiring order problem. In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}. This subtle mistake means that calling these routines concurrently may lead to deadlock and is therefore undefined behavior. As shown on several occasion\cit, solving this problem requires :249 \end{cfacode} 250 251 The multi-acquisition monitor lock allows a monitor lock to be acquired by both \code{bar} or \code{baz} and acquired again in \code{foo}. In the calls to \code{bar} and \code{baz} the monitors are acquired in opposite order. such use leads to nested monitor call problems~\cite{Lister77}, which is a more specific variation of the lock acquiring order problem. In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}. This subtle mistake means that calling these routines concurrently may lead to deadlock and is therefore undefined behavior. As shown on several occasion\cit, solving this problem requires : 248 252 \begin{enumerate} 249 253 \item Dynamically tracking of the monitor-call order. … … 269 273 % # ####### ####### # # # ####### # # # # 270 274 271 \subs ubsection{Implementation Details: Interaction with polymorphism}275 \subsection{Implementation Details: Interaction with polymorphism} 272 276 At first glance, interaction between monitors and \CFA's concept of polymorphism seems complex to support. However, it is shown that entry-point locking can solve most of the issues. 273 277 … … 279 283 \CFA & pseudo-code & pseudo-code \\ 280 284 \hline 281 \begin{ lstlisting}282 void foo(monitor & mutex a){285 \begin{cfacode}[tabsize=3] 286 void foo(monitor& mutex a){ 283 287 284 288 … … 297 301 298 302 } 299 \end{ lstlisting} &\begin{lstlisting}303 \end{cfacode} & \begin{pseudo}[tabsize=3] 300 304 foo(& a) { 301 305 … … 315 319 release(a); 316 320 } 317 \end{ lstlisting} &\begin{lstlisting}321 \end{pseudo} & \begin{pseudo}[tabsize=3] 318 322 foo(& a) { 319 323 //called routine … … 333 337 334 338 } 335 \end{ lstlisting}339 \end{pseudo} 336 340 \end{tabular} 337 341 \end{center} 338 342 339 First of all, interaction between \code{otype} polymorphism and monitors is impossible since monitors do not support copying. Therefore, the main question is how to support \code{dtype} polymorphism. Since a monitor's main purpose is to ensure mutual exclusion when accessing shared data, this implies that mutual exclusion is only required for routines that do in fact access shared data. However, since \code{dtype} polymorphism always handles incomplete types (by definition), no \code{dtype} polymorphic routine can access shared data since the data requires knowledge about the type. Therefore, the only concern when combining \code{dtype} polymorphism and monitors is to protect access to routines. \Gls{callsite-locking} would require a significant amount of work, since any \code{dtype} routine may have to obtain some lock before calling a routine, depending on whether or not the type passed is a monitor. However, with \gls{entry-point-locking} calling a monitor routine becomes exactly the same as calling it from anywhere else. 340 343 First of all, interaction between \code{otype} polymorphism and monitors is impossible since monitors do not support copying. Therefore, the main question is how to support \code{dtype} polymorphism. Since a monitor's main purpose is to ensure mutual exclusion when accessing shared data, this implies that mutual exclusion is only required for routines that do in fact access shared data. However, since \code{dtype} polymorphism always handles incomplete types (by definition), no \code{dtype} polymorphic routine can access shared data since the data requires knowledge about the type. Therefore, the only concern when combining \code{dtype} polymorphism and monitors is to protect access to routines. \Gls{callsite-locking} would require a significant amount of work, since any \code{dtype} routine may have to obtain some lock before calling a routine, depending on whether or not the type passed is a monitor. However, with \gls{entry-point-locking} calling a monitor routine becomes exactly the same as calling it from anywhere else. Note that the \code{mutex} keyword relies on the resolver, which mean that in cases where generic monitor routines is actually desired, writing mutex routine is possible with the proper trait. 341 344 342 345 … … 349 352 % ### # # # ### ##### ##### # # ####### ###### 350 353 351 \subsection{Internal scheduling} \label{insched} 352 353 \begin{center} 354 \begin{tabular}{ c @{\hskip 0.65in} c } 355 \begin{lstlisting}[language=Pseudo] 354 \section{Internal scheduling} \label{insched} 355 In addition to mutual exclusion, the monitors at the core of \CFA's concurrency can also be used to achieve synchronisation. With monitors, this is generally achieved with internal or external scheduling as in\cit. Since internal scheduling of single monitors is mostly a solved problem, this proposal concentraits on extending internal scheduling to multiple monitors at once. Indeed, like the \gls{group-acquire} semantics, internal scheduling extends to multiple monitors at once in a way that is natural to the user but requires additional complexity on the implementation side. 356 357 First, Here is a simple example of such a technique : 358 359 \begin{cfacode} 360 monitor A { 361 condition e; 362 } 363 364 void foo(A & mutex a) { 365 // ... 366 // We need someone else to do something now 367 wait(a.e); 368 // ... 369 } 370 371 void bar(A & mutex a) { 372 // Do the thing foo is waiting on 373 // ... 374 // Signal foo it's done 375 signal(a.e); 376 } 377 \end{cfacode} 378 379 Note that in \CFA, \code{condition} have no particular need to be stored inside a monitor, beyond any software engineering reasons. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. An important aspect to take into account here is that \CFA does not allow barging, which means that once function \code{bar} releases the monitor, foo is guaranteed to resume immediately after (unless some other function waited on the same condition). This guarantees offers the benefit of not having to loop arount waits in order to guarantee that a condition is still met. The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding a barging prevention or barging avoidance is more involved without language support. 380 381 Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design of \CFA concurrency. 382 383 \subsection{Internal Scheduling - multi monitor} 384 It easier to understand the problem of multi monitor scheduling using a series of pseudo code though experiment. Note that in the following snippets of pseudo-code waiting and signalling is done without the use of a condition variable. While \CFA requires condition variables to use signalling, the variable itself only really holds the data needed for the implementation of internal schedulling. Some languages like JAVA\cit simply define an implicit condition variable for every monitor while other languages like \uC use explicit condition variables. Since the following pseudo-codes are simple and focused experiments, all condition variables are implicit. 385 386 \begin{multicols}{2} 387 \begin{pseudo} 356 388 acquire A 357 389 wait A 358 390 release A 359 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 391 \end{pseudo} 392 393 \columnbreak 394 395 \begin{pseudo} 360 396 acquire A 361 397 signal A 362 398 release A 363 \end{lstlisting} 364 \end{tabular} 365 \end{center} 366 367 Easy : like uC++ 368 369 \begin{center} 370 \begin{tabular}{ c @{\hskip 0.65in} c } 371 \begin{lstlisting}[language=Pseudo] 399 \end{pseudo} 400 \end{multicols} 401 402 The previous example shows the simple case of having two threads (one for each column) and a single monitor A. One thread acquires before waiting and the other acquires before signalling. There are a few important things to note here. First, both \code{wait} and \code{signal} must be called with the proper monitor(s) already acquired. This can be hidden on the user side but is a logical requirement for barging prevention. Secondly, as stated above, while it is argued that not all problems regarding single monitors are solved, this paper only regards challenges of \gls{group-acquire} and considers other problems related to monitors as solved. 403 404 An important note about this example is that signalling a monitor is a delayed operation. The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the \code{signal} statement. 405 406 A direct extension of the previous example is the \gls{group-acquire} version : 407 408 \begin{multicols}{2} 409 \begin{pseudo} 410 acquire A & B 411 wait A & B 412 release A & B 413 \end{pseudo} 414 415 \columnbreak 416 417 \begin{pseudo} 418 acquire A & B 419 signal A & B 420 release A & B 421 \end{pseudo} 422 \end{multicols} 423 424 This version uses \gls{group-acquire} (denoted using the \& symbol), but the presence of multiple monitors does not add a particularly new meaning. Synchronization will happen between the two threads in exactly the same way and order. The only difference is that mutual exclusion will cover more monitors. On the implementation side, handling multiple monitors at once does add a degree of complexity but it is not significant compared to the next few examples. 425 426 For the sake of completeness, here is another example of the single-monitor case, this time with nesting. 427 428 \begin{multicols}{2} 429 \begin{pseudo} 372 430 acquire A 373 431 acquire B … … 375 433 release B 376 434 release A 377 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 378 acquire A 379 acquire B 380 signal B 381 release B 382 release A 383 \end{lstlisting} 384 \end{tabular} 385 \end{center} 386 387 Also easy : like uC++ 388 389 \begin{center} 390 \begin{tabular}{ c @{\hskip 0.65in} c } 391 \begin{lstlisting}[language=Pseudo] 392 acquire A & B 393 wait A & B 394 release A & B 395 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 396 acquire A & B 397 signal A & B 398 release A & B 399 \end{lstlisting} 400 \end{tabular} 401 \end{center} 402 403 Simplest extension : can be made like uC++ by tying B to A 404 405 \begin{center} 406 \begin{tabular}{ c @{\hskip 0.65in} c } 407 \begin{lstlisting}[language=Pseudo] 435 \end{pseudo} 436 437 \columnbreak 438 439 \begin{pseudo} 440 441 acquire B 442 signal B 443 release B 444 445 \end{pseudo} 446 \end{multicols} 447 448 While these cases can cause some deadlock issues, we consider that these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable. However, for monitors as for locks, it is possible to write program that using nesting without encountering any problems if they are nested carefully. 449 450 The next example is where \gls{group-acquire} adds a significant layer of complexity to the internal signalling semantics. 451 452 \begin{multicols}{2} 453 \begin{pseudo} 408 454 acquire A 409 455 // Code Section 1 410 acquire B456 acquire A & B 411 457 // Code Section 2 412 458 wait A & B 413 459 // Code Section 3 414 release B460 release A & B 415 461 // Code Section 4 416 462 release A 417 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 463 \end{pseudo} 464 465 \columnbreak 466 467 \begin{pseudo} 418 468 acquire A 419 469 // Code Section 5 420 acquire B470 acquire A & B 421 471 // Code Section 6 422 472 signal A & B 423 473 // Code Section 7 424 release B474 release A & B 425 475 // Code Section 8 426 476 release A 427 \end{lstlisting} 428 \end{tabular} 429 \end{center} 430 431 Hard extension : 432 433 Incorrect options for the signal : 434 435 \begin{description} 436 \item[-] Release B and baton pass after Code Section 8 : Passing b without having it 437 \item[-] Keep B during Code Section 8 : Can lead to deadlocks since we secretly keep a lock longer than specified by the user 438 \item[-] Instead of release B transfer A and B to waiter then try to reacquire A before running Code Section 8 : This allows barging 439 \end{description} 440 441 Since we don't want barging we need to pass A \& B and somehow block and get A back. 442 443 \begin{center} 444 \begin{tabular}{ c @{\hskip 0.65in} c } 445 \begin{lstlisting}[language=Pseudo] 477 \end{pseudo} 478 \end{multicols} 479 480 It is particularly important to pay attention to code sections 8 and 3 which are where the existing semantics of internal scheduling are undefined. The root of the problem is that \gls{group-acquire} is used in a context where one of the monitors is already acquired. As mentionned in previous sections, monitors support multiple acquiring which means the that nesting \gls{group-acquire} can be done safely. However, in the context of internal scheduling it is important to define the behaviour of the previous pseudo-code. When the signaller thread reaches the location where it should "release A \& B", it actually only needs to release the monitor B. Since the other thread is waiting on monitor B, the signaller thread cannot simply release the monitor into the wild. This would mean that the waiting thread would have to reacquire the monitor and would therefore open the door to barging threads. Since the signalling thread still needs the monitor A, simply transferring ownership to the waiting thread is not an option because it would pottentially violate mutual exclusion. We are therefore left with three options : 481 482 \subsubsection{Delaying signals} 483 The first more obvious solution to solve the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred. It can be argued that that moment is the correct time to transfer ownership when the last lock is no longer needed is what fits most closely to the behaviour of single monitor scheduling. However, this solution can become much more complicated depending on the content of the code section 8. Indeed, nothing prevents a user from signalling monitor A on a different condition variable. In that case, if monitor B is transferred with monitor A, then it means the system needs to handle threads having ownership on more monitors than expected and how to tie monitors together. On the other hand if the signalling thread only transfers monitor A then somehow both monitors A and B have to be transferred to the waiting thread from two different threads. While this solution may work, it was not fully explored because there is no apparent upper bound on the complexity of ownership transfer. 484 485 \subsubsection{Dependency graphs} 486 In the previous pseudo-code, there is a solution which would statisfy both barging prevention and mutual exclusion. If ownership of both monitors is transferred to the waiter when the signaller releases A and then the waiter transfers back ownership of A when it releases it then the problem is solved. This is the second solution. The problem it encounters is that it effectively boils down to resolving a dependency graph of ownership requirements. Here even the simplest of code snippets requires two transfers and it seems to increase in a manner closer to polynomial. For example the following code which is just a direct extension to three monitors requires at least three ownership transfer and has multiple solutions. 487 488 \begin{multicols}{2} 489 \begin{pseudo} 446 490 acquire A 447 491 acquire B 448 492 acquire C 449 493 wait A & B & C 450 1: release C 451 2: release B 452 3: release A 453 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 494 release C 495 release B 496 release A 497 \end{pseudo} 498 499 \columnbreak 500 501 \begin{pseudo} 454 502 acquire A 455 503 acquire B 456 504 acquire C 457 505 signal A & B & C 458 4: release C 459 5: release B 460 6: release A 461 \end{lstlisting} 462 \end{tabular} 463 \end{center} 464 465 To prevent barging : 466 467 \begin{description} 468 \item[-] When the signaller hits 4 : pass A, B, C to waiter 469 \item[-] When the waiter hits 2 : pass A, B to signaller 470 \item[-] When the signaller hits 5 : pass A to waiter 471 \end{description} 472 473 474 \begin{center} 475 \begin{tabular}{ c @{\hskip 0.65in} c } 476 \begin{lstlisting}[language=Pseudo] 506 release C 507 release B 508 release A 509 \end{pseudo} 510 \end{multicols} 511 512 \subsubsection{Partial signalling} 513 Finally, the solution that was chosen for \CFA is to use partial signalling. Consider the following case : 514 515 \begin{multicols}{2} 516 \begin{pseudo}[numbers=left] 477 517 acquire A 478 acquire C 479 acquire B 480 wait A & B & C 481 1: release B 482 2: release C 483 3: release A 484 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 485 acquire B 486 acquire A 487 acquire C 488 signal A & B & C 489 4: release C 490 5: release A 491 6: release B 492 \end{lstlisting} 493 \end{tabular} 494 \end{center} 495 496 To prevent barging : When the signaller hits 4 : pass A, B, C to waiter. When the waiter hits 1 it must release B, 497 498 \begin{description} 499 \item[-] 500 \item[-] When the waiter hits 1 : pass A, B to signaller 501 \item[-] When the signaller hits 5 : pass A, B to waiter 502 \item[-] When the waiter hits 2 : pass A to signaller 503 \end{description} 518 acquire A & B 519 wait A & B 520 release A & B 521 release A 522 \end{pseudo} 523 524 \columnbreak 525 526 \begin{pseudo}[numbers=left, firstnumber=6] 527 acquire A 528 acquire A & B 529 signal A & B 530 release A & B 531 // ... More code 532 release A 533 \end{pseudo} 534 \end{multicols} 535 536 The partial signalling solution transfers ownership of monitor B at lines 10 but does not wake the waiting thread since it is still using monitor A. Only when it reaches line 11 does it actually wakeup the waiting thread. This solution has the benefit that complexity is encapsulated in to only two actions, passing monitors to the next owner when they should be release and conditionnaly waking threads if all conditions are met. Contrary to the other solutions, this solution quickly hits an upper bound on complexity of implementation. 537 538 % Hard extension : 539 540 % Incorrect options for the signal : 541 542 % \begin{description} 543 % \item[-] Release B and baton pass after Code Section 8 : Passing b without having it 544 % \item[-] Keep B during Code Section 8 : Can lead to deadlocks since we secretly keep a lock longer than specified by the user 545 % \item[-] Instead of release B transfer A and B to waiter then try to reacquire A before running Code Section 8 : This allows barging 546 % \end{description} 547 548 % Since we don't want barging we need to pass A \& B and somehow block and get A back. 549 550 % \begin{center} 551 % \begin{tabular}{ c @{\hskip 0.65in} c } 552 % \begin{lstlisting}[language=Pseudo] 553 % acquire A 554 % acquire B 555 % acquire C 556 % wait A & B & C 557 % 1: release C 558 % 2: release B 559 % 3: release A 560 % \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 561 % acquire A 562 % acquire B 563 % acquire C 564 % signal A & B & C 565 % 4: release C 566 % 5: release B 567 % 6: release A 568 % \end{lstlisting} 569 % \end{tabular} 570 % \end{center} 571 572 % To prevent barging : 573 574 % \begin{description} 575 % \item[-] When the signaller hits 4 : pass A, B, C to waiter 576 % \item[-] When the waiter hits 2 : pass A, B to signaller 577 % \item[-] When the signaller hits 5 : pass A to waiter 578 % \end{description} 579 580 581 % \begin{center} 582 % \begin{tabular}{ c @{\hskip 0.65in} c } 583 % \begin{lstlisting}[language=Pseudo] 584 % acquire A 585 % acquire C 586 % acquire B 587 % wait A & B & C 588 % 1: release B 589 % 2: release C 590 % 3: release A 591 % \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 592 % acquire B 593 % acquire A 594 % acquire C 595 % signal A & B & C 596 % 4: release C 597 % 5: release A 598 % 6: release B 599 % \end{lstlisting} 600 % \end{tabular} 601 % \end{center} 602 603 % To prevent barging : When the signaller hits 4 : pass A, B, C to waiter. When the waiter hits 1 it must release B, 604 605 % \begin{description} 606 % \item[-] 607 % \item[-] When the waiter hits 1 : pass A, B to signaller 608 % \item[-] When the signaller hits 5 : pass A, B to waiter 609 % \item[-] When the waiter hits 2 : pass A to signaller 610 % \end{description} 504 611 505 612 % Monitors also need to schedule waiting threads internally as a mean of synchronization. Internal scheduling is one of the simple examples of such a feature. It allows users to declare condition variables and have threads wait and signaled from them. Here is a simple example of such a technique : … … 920 1027 % # # # # ### # # # # # # # # # 921 1028 % ####### # # # ### ##### ##### # # ####### ###### 922 \newpage 923 \subsection{External scheduling} \label{extsched} 1029 \section{External scheduling} \label{extsched} 924 1030 An alternative to internal scheduling is to use external scheduling instead. This method is more constrained and explicit which may help users tone down the undeterministic nature of concurrency. Indeed, as the following examples demonstrates, external scheduling allows users to wait for events from other threads without the concern of unrelated events occuring. External scheduling can generally be done either in terms of control flow (ex: \uC) or in terms of data (ex: Go). Of course, both of these paradigms have their own strenghts and weaknesses but for this project control flow semantics where chosen to stay consistent with the rest of the languages semantics. Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multi-monitor routines. The following example shows a simple use \code{accept} versus \code{wait}/\code{signal} and its advantages. 925 1031 … … 959 1065 % ####### ####### ####### ##### ####### ####### ###### ##### ##### 960 1066 961 \subs ubsection{Loose object definitions}1067 \subsection{Loose object definitions} 962 1068 In \uC, monitor declarations include an exhaustive list of monitor operations. Since \CFA is not object oriented it becomes both more difficult to implement but also less clear for the user : 963 1069 … … 984 1090 \end{center} 985 1091 986 For the \ps eudo{monitor is free} condition it is easy to implement a check that can evaluate the condition in a few instruction. However, a fast check for \pseudo{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. Indeed, monitors are often expressed as an entry queue and some acceptor queue as in the following figure :1092 For the \pscode{monitor is free} condition it is easy to implement a check that can evaluate the condition in a few instruction. However, a fast check for \pscode{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. Indeed, monitors are often expressed as an entry queue and some acceptor queue as in the following figure : 987 1093 988 1094 \begin{center} … … 1057 1163 % # # ##### ####### # ### # # ####### # # 1058 1164 1059 \subs ubsection{Multi-monitor scheduling}1165 \subsection{Multi-monitor scheduling} 1060 1166 1061 1167 External scheduling, like internal scheduling, becomes orders of magnitude more complex when we start introducing multi-monitor syntax. Even in the simplest possible case some new semantics need to be established : … … 1116 1222 1117 1223 1118 \subs ubsection{Implementation Details: External scheduling queues}1224 \subsection{Implementation Details: External scheduling queues} 1119 1225 To support multi-monitor external scheduling means that some kind of entry-queues must be used that is aware of both monitors. However, acceptable routines must be aware of the entry queues which means they must be stored inside at least one of the monitors that will be acquired. This in turn adds the requirement a systematic algorithm of disambiguating which queue is relavant regardless of user ordering. The proposed algorithm is to fall back on monitors lock ordering and specify that the monitor that is acquired first is the lock with the relevant entry queue. This assumes that the lock acquiring order is static for the lifetime of all concerned objects but that is a reasonnable constraint. This algorithm choice has two consequences, the entry queue of the highest priority monitor is no longer a true FIFO queue and the queue of the lowest priority monitor is both required and probably unused. The queue can no longer be a FIFO queue because instead of simply containing the waiting threads in order arrival, they also contain the second mutex. Therefore, another thread with the same highest priority monitor but a different lowest priority monitor may arrive first but enter the critical section after a thread with the correct pairing. Secondly, since it may not be known at compile time which monitor will be the lowest priority monitor, every monitor needs to have the correct queues even though it is probable that half the multi-monitor queues will go unused for the entire duration of the program. 1120 1226 1121 \s ubsection{Other concurrency tools}1227 \section{Other concurrency tools} 1122 1228 TO BE CONTINUED... 1123 1229 … … 1131 1237 1132 1238 1133 \newpage1134 1239 % ###### # ###### # # # ####### # ### ##### # # 1135 1240 % # # # # # # # # # # # # # # # ## ## … … 1139 1244 % # # # # # # # # # # # # # # # # 1140 1245 % # # # # # # # ####### ####### ####### ####### ### ##### # # 1141 \ section{Parallelism}1246 \chapter{Parallelism} 1142 1247 Historically, computer performance was about processor speeds and instructions count. However, with heat dissipation being a direct consequence of speed increase, parallelism has become the new source for increased performance~\cite{Sutter05, Sutter05b}. In this decade, it is not longer reasonnable to create a high-performance application without caring about parallelism. Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization. The lowest-level approach of parallelism is to use \glspl{kthread} in combination with semantics like \code{fork}, \code{join}, etc. However, since these have significant costs and limitations, \glspl{kthread} are now mostly used as an implementation tool rather than a user oriented one. There are several alternatives to solve these issues that all have strengths and weaknesses. While there are many variations of the presented paradigms, most of these variations do not actually change the guarantees or the semantics, they simply move costs in order to achieve better performance for certain workloads. 1143 1248 1249 \section{Paradigm} 1144 1250 \subsection{User-level threads} 1145 1251 A direct improvement on the \gls{kthread} approach is to use \glspl{uthread}. These threads offer most of the same features that the operating system already provide but can be used on a much larger scale. This approach is the most powerfull solution as it allows all the features of multi-threading, while removing several of the more expensives costs of using kernel threads. The down side is that almost none of the low-level threading problems are hidden, users still have to think about data races, deadlocks and synchronization issues. These issues can be somewhat alleviated by a concurrency toolkit with strong garantees but the parallelism toolkit offers very little to reduce complexity in itself. … … 1147 1253 Examples of languages that support \glspl{uthread} are Erlang~\cite{Erlang} and \uC~\cite{uC++book}. 1148 1254 1149 \subs ubsection{Fibers : user-level threads without preemption}1255 \subsection{Fibers : user-level threads without preemption} 1150 1256 A popular varient of \glspl{uthread} is what is often reffered to as \glspl{fiber}. However, \glspl{fiber} do not present meaningful semantical differences with \glspl{uthread}. Advocates of \glspl{fiber} list their high performance and ease of implementation as majors strenghts of \glspl{fiber} but the performance difference between \glspl{uthread} and \glspl{fiber} is controversial and the ease of implementation, while true, is a weak argument in the context of language design. Therefore this proposal largely ignore fibers. 1151 1257 … … 1494 1600 % # # # # 1495 1601 % # # ####### ####### 1496 \section{Putting it all together} 1497 1498 1499 1500 1602 \chapter{Putting it all together} 1603 1604 1605 1606 1607 1608 \chapter{Conclusion} 1501 1609 1502 1610 … … 1512 1620 % # # # # # # # # # 1513 1621 % # ##### # ##### # # ###### 1514 \ section{Future work}1622 \chapter{Future work} 1515 1623 Concurrency and parallelism is still a very active field that strongly benefits from hardware advances. As such certain features that aren't necessarily mature enough in their current state could become relevant in the lifetime of \CFA. 1516 1624 \subsection{Transactions} -
doc/proposals/concurrency/style.tex
r6ac2ada r1ed33fed 1 1 \input{common} % bespoke macros used in the document 2 \input{cfa-format} 2 3 3 4 % \CFADefaultStyle -
doc/proposals/concurrency/version
r6ac2ada r1ed33fed 1 0. 7.1411 0.8.2 -
src/CodeGen/CodeGenerator.cc
r6ac2ada r1ed33fed 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tus May 9 14:32:00 201713 // Update Count : 48 312 // Last Modified On : Wed May 10 14:45:00 2017 13 // Update Count : 484 14 14 // 15 15 … … 41 41 namespace CodeGen { 42 42 int CodeGenerator::tabsize = 4; 43 44 // Pseudo Function: output << lineDirective(*currentNode);45 struct lineDirective {46 CodeLocation const & loc;47 lineDirective(CodeLocation const & location) : loc(location) {}48 lineDirective(BaseSyntaxNode const * node) : loc(node->location) {}49 };50 std::ostream & operator<<(std::ostream & out, lineDirective const & ld) {51 if (ld.loc.isSet())52 return out << "\n# " << ld.loc.linenumber << " \""53 << ld.loc.filename << "\"\n";54 return out << "\n// Unset Location\n";55 }56 43 57 44 // the kinds of statements that would ideally be followed by whitespace … … 102 89 } 103 90 104 CodeGenerator::CodeGenerator( std::ostream & os, bool pretty, bool genC ) : indent( *this), cur_indent( 0 ), insideFunction( false ), output( os ), printLabels( *this ), pretty( pretty ), genC( genC ) {} 91 CodeGenerator::LineMarker::LineMarker( 92 CodeLocation const & loc, bool toPrint) : 93 loc(loc), toPrint(toPrint) 94 {} 95 96 CodeGenerator::LineMarker CodeGenerator::lineDirective( 97 BaseSyntaxNode const * node) { 98 return LineMarker(node->location, lineMarks); 99 } 100 101 std::ostream & operator<<(std::ostream & out, 102 CodeGenerator::LineMarker const & marker) { 103 if (marker.toPrint && marker.loc.isSet()) { 104 return out << "\n# " << marker.loc.linenumber << " \"" 105 << marker.loc.filename << "\"\n"; 106 } else if (marker.toPrint) { 107 return out << "\n/* Missing CodeLocation */\n"; 108 } else { 109 return out; 110 } 111 } 112 113 CodeGenerator::CodeGenerator( std::ostream & os, bool pretty, bool genC, bool lineMarks ) : indent( *this), cur_indent( 0 ), insideFunction( false ), output( os ), printLabels( *this ), pretty( pretty ), genC( genC ), lineMarks( lineMarks ) {} 105 114 106 115 CodeGenerator::CodeGenerator( std::ostream & os, std::string init, int indentation, bool infunp ) … … 195 204 } 196 205 197 output << lineDirective( aggDecl ) <<kind;206 output << kind; 198 207 if ( aggDecl->get_name() != "" ) 199 208 output << aggDecl->get_name(); … … 700 709 void CodeGenerator::visit( UntypedTupleExpr * tupleExpr ) { 701 710 assertf( ! genC, "UntypedTupleExpr should not reach code generation." ); 711 extension( tupleExpr ); 702 712 output << "["; 703 713 genCommaList( tupleExpr->get_exprs().begin(), tupleExpr->get_exprs().end() ); … … 707 717 void CodeGenerator::visit( TupleExpr * tupleExpr ) { 708 718 assertf( ! genC, "TupleExpr should not reach code generation." ); 719 extension( tupleExpr ); 709 720 output << "["; 710 721 genCommaList( tupleExpr->get_exprs().begin(), tupleExpr->get_exprs().end() ); 711 722 output << "]"; 723 } 724 725 void CodeGenerator::visit( TupleIndexExpr * tupleExpr ) { 726 assertf( ! genC, "TupleIndexExpr should not reach code generation." ); 727 extension( tupleExpr ); 728 tupleExpr->get_tuple()->accept( *this ); 729 output << "." << tupleExpr->get_index(); 712 730 } 713 731 -
src/CodeGen/CodeGenerator.h
r6ac2ada r1ed33fed 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Ma r 1 16:20:04201713 // Update Count : 5 011 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 10 10:57:00 2017 13 // Update Count : 51 14 14 // 15 15 … … 25 25 #include "SymTab/Indexer.h" 26 26 27 #include "Common/utility.h" 28 27 29 namespace CodeGen { 28 30 class CodeGenerator : public Visitor { … … 30 32 static int tabsize; 31 33 32 CodeGenerator( std::ostream &os, bool pretty = false, bool genC = false );34 CodeGenerator( std::ostream &os, bool pretty = false, bool genC = false, bool lineMarks = false ); 33 35 CodeGenerator( std::ostream &os, std::string, int indent = 0, bool infun = false ); 34 36 CodeGenerator( std::ostream &os, char *, int indent = 0, bool infun = false ); … … 74 76 virtual void visit( UntypedTupleExpr *tupleExpr ); 75 77 virtual void visit( TupleExpr *tupleExpr ); 78 virtual void visit( TupleIndexExpr * tupleExpr ); 76 79 virtual void visit( TypeExpr *typeExpr ); 77 80 virtual void visit( AsmExpr * ); … … 110 113 }; 111 114 115 struct LineMarker { 116 CodeLocation const & loc; 117 bool toPrint; 118 119 LineMarker(CodeLocation const & loc, bool toPrint); 120 }; 121 122 LineMarker lineDirective(BaseSyntaxNode const * node); 123 112 124 void asmName( DeclarationWithType *decl ); 113 125 … … 122 134 bool pretty = false; // pretty print 123 135 bool genC = false; // true if output has to be C code 136 bool lineMarks = false; 124 137 125 138 void printDesignators( std::list< Expression * > & ); … … 149 162 /// returns C-compatible name of declaration 150 163 std::string genName( DeclarationWithType * decl ); 164 165 std::ostream & operator<<(std::ostream &, 166 CodeGenerator::LineMarker const &); 151 167 } // namespace CodeGen 152 168 -
src/CodeGen/GenType.cc
r6ac2ada r1ed33fed 28 28 class GenType : public Visitor { 29 29 public: 30 GenType( const std::string &typeString, bool pretty = false, bool genC = false );30 GenType( const std::string &typeString, bool pretty = false, bool genC = false, bool lineMarks = false ); 31 31 std::string get_typeString() const { return typeString; } 32 32 void set_typeString( const std::string &newValue ) { typeString = newValue; } … … 54 54 bool pretty = false; // pretty print 55 55 bool genC = false; // generating C code? 56 bool lineMarks = false; 56 57 }; 57 58 58 std::string genType( Type *type, const std::string &baseString, bool pretty, bool genC ) {59 GenType gt( baseString, pretty, genC );59 std::string genType( Type *type, const std::string &baseString, bool pretty, bool genC , bool lineMarks ) { 60 GenType gt( baseString, pretty, genC, lineMarks ); 60 61 std::ostringstream os; 61 62 62 63 if ( ! type->get_attributes().empty() ) { 63 CodeGenerator cg( os, pretty, genC );64 CodeGenerator cg( os, pretty, genC, lineMarks ); 64 65 cg.genAttributes( type->get_attributes() ); 65 66 } // if … … 73 74 } 74 75 75 GenType::GenType( const std::string &typeString, bool pretty, bool genC ) : typeString( typeString ), pretty( pretty ), genC( genC) {}76 GenType::GenType( const std::string &typeString, bool pretty, bool genC, bool lineMarks ) : typeString( typeString ), pretty( pretty ), genC( genC ), lineMarks( lineMarks ) {} 76 77 77 78 void GenType::visit( VoidType *voidType ) { … … 114 115 } // if 115 116 if ( dimension != 0 ) { 116 CodeGenerator cg( os, pretty, genC );117 CodeGenerator cg( os, pretty, genC, lineMarks ); 117 118 dimension->accept( cg ); 118 119 } else if ( isVarLen ) { … … 168 169 } // if 169 170 } else { 170 CodeGenerator cg( os, pretty, genC );171 CodeGenerator cg( os, pretty, genC, lineMarks ); 171 172 os << "(" ; 172 173 … … 191 192 // assertf( ! genC, "Aggregate type parameters should not reach code generation." ); 192 193 std::ostringstream os; 193 CodeGenerator cg( os, pretty, genC );194 CodeGenerator cg( os, pretty, genC, lineMarks ); 194 195 os << "forall("; 195 196 cg.genCommaList( funcType->get_forall().begin(), funcType->get_forall().end() ); … … 202 203 if ( ! refType->get_parameters().empty() ) { 203 204 std::ostringstream os; 204 CodeGenerator cg( os, pretty, genC );205 CodeGenerator cg( os, pretty, genC, lineMarks ); 205 206 os << "("; 206 207 cg.genCommaList( refType->get_parameters().begin(), refType->get_parameters().end() ); … … 242 243 for ( Type * t : *tupleType ) { 243 244 i++; 244 os << genType( t, "", pretty, genC ) << (i == tupleType->size() ? "" : ", ");245 os << genType( t, "", pretty, genC, lineMarks ) << (i == tupleType->size() ? "" : ", "); 245 246 } 246 247 os << "]"; -
src/CodeGen/GenType.h
r6ac2ada r1ed33fed 21 21 22 22 namespace CodeGen { 23 std::string genType( Type *type, const std::string &baseString, bool pretty = false, bool genC = false );23 std::string genType( Type *type, const std::string &baseString, bool pretty = false, bool genC = false, bool lineMarks = false ); 24 24 std::string genPrettyType( Type * type, const std::string & baseString ); 25 25 } // namespace CodeGen -
src/CodeGen/Generate.cc
r6ac2ada r1ed33fed 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Thu Jun 4 14:04:25 201513 // Update Count : 511 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed May 19 13:05:00 2017 13 // Update Count : 6 14 14 // 15 15 … … 31 31 32 32 namespace CodeGen { 33 void generate( std::list< Declaration* > translationUnit, std::ostream &os, bool doIntrinsics, bool pretty, bool generateC ) {34 CodeGen::CodeGenerator cgv( os, pretty, generateC );33 void generate( std::list< Declaration* > translationUnit, std::ostream &os, bool doIntrinsics, bool pretty, bool generateC, bool lineMarks ) { 34 CodeGen::CodeGenerator cgv( os, pretty, generateC, lineMarks ); 35 35 for ( auto & dcl : translationUnit ) { 36 36 if ( LinkageSpec::isGeneratable( dcl->get_linkage() ) && (doIntrinsics || ! LinkageSpec::isBuiltin( dcl->get_linkage() ) ) ) { 37 os << cgv.lineDirective(dcl); 37 38 dcl->accept(cgv); 38 39 if ( doSemicolon( dcl ) ) { … … 48 49 os << CodeGen::genPrettyType( type, "" ); 49 50 } else { 50 CodeGen::CodeGenerator cgv( os, true, false );51 CodeGen::CodeGenerator cgv( os, true, false, false ); 51 52 node->accept( cgv ); 52 53 } -
src/CodeGen/Generate.h
r6ac2ada r1ed33fed 24 24 namespace CodeGen { 25 25 /// Generates code. doIntrinsics determines if intrinsic functions are printed, pretty formats output nicely (e.g., uses unmangled names, etc.), generateC is true when the output must consist only of C code (allows some assertions, etc.) 26 void generate( std::list< Declaration* > translationUnit, std::ostream &os, bool doIntrinsics, bool pretty, bool generateC = false );26 void generate( std::list< Declaration* > translationUnit, std::ostream &os, bool doIntrinsics, bool pretty, bool generateC = false , bool lineMarks = false ); 27 27 28 28 /// Generate code for a single node -- helpful for debugging in gdb -
src/Common/utility.h
r6ac2ada r1ed33fed 322 322 std::string filename; 323 323 324 324 /// Create a new unset CodeLocation. 325 325 CodeLocation() 326 326 : linenumber( -1 ) … … 328 328 {} 329 329 330 330 /// Create a new CodeLocation with the given values. 331 331 CodeLocation( const char* filename, int lineno ) 332 332 : linenumber( lineno ) … … 334 334 {} 335 335 336 337 338 339 340 341 342 336 bool isSet () const { 337 return -1 != linenumber; 338 } 339 340 bool isUnset () const { 341 return !isSet(); 342 } 343 343 344 344 void unset () { … … 353 353 return location.isSet() ? location.filename + ":" + std::to_string(location.linenumber) + " " : ""; 354 354 } 355 355 356 #endif // _UTILITY_H 356 357 -
src/GenPoly/Box.cc
r6ac2ada r1ed33fed 765 765 arg = new AddressExpr( arg ); 766 766 } 767 if ( ! ResolvExpr::typesCompatible( param, arg->get_result(), SymTab::Indexer() ) ) { 768 // silence warnings by casting boxed parameters when the actual type does not match up with the formal type. 769 arg = new CastExpr( arg, param->clone() ); 770 } 767 771 } else { 768 772 // use type computed in unification to declare boxed variables … … 902 906 } // if 903 907 UntypedExpr *assign = new UntypedExpr( new NameExpr( "?=?" ) ); 904 UntypedExpr *deref = new UntypedExpr( new NameExpr( "*?" ) ); 905 deref->get_args().push_back( new CastExpr( new VariableExpr( *param++ ), new PointerType( Type::Qualifiers(), realType->get_returnVals().front()->get_type()->clone() ) ) ); 908 UntypedExpr *deref = UntypedExpr::createDeref( new CastExpr( new VariableExpr( *param++ ), new PointerType( Type::Qualifiers(), realType->get_returnVals().front()->get_type()->clone() ) ) ); 906 909 assign->get_args().push_back( deref ); 907 910 addAdapterParams( adapteeApp, arg, param, adapterType->get_parameters().end(), realParam, tyVars ); … … 1217 1220 1218 1221 Statement * Pass1::mutate( ReturnStmt *returnStmt ) { 1219 // maybe need access to the env when mutating the expr1220 if ( Expression * expr = returnStmt->get_expr() ) {1221 if ( expr->get_env() ) {1222 env = expr->get_env();1223 }1224 }1225 1226 1222 if ( retval && returnStmt->get_expr() ) { 1227 1223 assert( returnStmt->get_expr()->has_result() && ! returnStmt->get_expr()->get_result()->isVoid() ); … … 1302 1298 FunctionType * ftype = functionDecl->get_functionType(); 1303 1299 if ( ! ftype->get_returnVals().empty() && functionDecl->get_statements() ) { 1304 if ( functionDecl->get_name() != "?=?" && ! isPrefix( functionDecl->get_name(), "_thunk" ) && ! isPrefix( functionDecl->get_name(), "_adapter" ) ) { // xxx - remove check for ?=? once reference types are in;remove check for prefix once thunks properly use ctor/dtors1300 if ( ! isPrefix( functionDecl->get_name(), "_thunk" ) && ! isPrefix( functionDecl->get_name(), "_adapter" ) ) { // xxx - remove check for prefix once thunks properly use ctor/dtors 1305 1301 assert( ftype->get_returnVals().size() == 1 ); 1306 1302 DeclarationWithType * retval = ftype->get_returnVals().front(); … … 1539 1535 Type *declType = objectDecl->get_type(); 1540 1536 std::string bufName = bufNamer.newName(); 1541 ObjectDecl *newBuf = new ObjectDecl( bufName, Type::StorageClasses(), LinkageSpec::C, 0, 1542 new ArrayType( Type::Qualifiers(), new BasicType( Type::Qualifiers(), BasicType::Kind::Char), new NameExpr( sizeofName( mangleType(declType) ) ), 1537 ObjectDecl *newBuf = new ObjectDecl( bufName, Type::StorageClasses(), LinkageSpec::C, 0, 1538 new ArrayType( Type::Qualifiers(), new BasicType( Type::Qualifiers(), BasicType::Kind::Char), new NameExpr( sizeofName( mangleType(declType) ) ), 1543 1539 true, false, std::list<Attribute*>{ new Attribute( std::string{"aligned"}, std::list<Expression*>{ new ConstantExpr( Constant::from_int(8) ) } ) } ), 0 ); 1544 1540 stmtsToAdd.push_back( new DeclStmt( noLabels, newBuf ) ); … … 1578 1574 } 1579 1575 1580 /// Returns an expression dereferenced n times1581 Expression *makeDerefdVar( Expression *derefdVar, long n ) {1582 for ( int i = 1; i < n; ++i ) {1583 UntypedExpr *derefExpr = new UntypedExpr( new NameExpr( "*?" ) );1584 derefExpr->get_args().push_back( derefdVar );1585 // xxx - should set results on derefExpr1586 derefdVar = derefExpr;1587 }1588 return derefdVar;1589 }1590 1591 1576 Expression *PolyGenericCalculator::mutate( MemberExpr *memberExpr ) { 1592 1577 // mutate, exiting early if no longer MemberExpr … … 1595 1580 if ( ! memberExpr ) return expr; 1596 1581 1597 // get declaration for base struct, exiting early if not found1598 int varDepth;1599 VariableExpr *varExpr = getBaseVar( memberExpr->get_aggregate(), &varDepth );1600 if ( ! varExpr ) return memberExpr;1601 ObjectDecl *objectDecl = dynamic_cast< ObjectDecl* >( varExpr->get_var() );1602 if ( ! objectDecl ) return memberExpr;1603 1604 1582 // only mutate member expressions for polymorphic types 1605 1583 int tyDepth; 1606 Type *objectType = hasPolyBase( objectDecl->get_type(), scopeTyVars, &tyDepth );1584 Type *objectType = hasPolyBase( memberExpr->get_aggregate()->get_result(), scopeTyVars, &tyDepth ); 1607 1585 if ( ! objectType ) return memberExpr; 1608 1586 findGeneric( objectType ); // ensure layout for this type is available … … 1622 1600 fieldLoc->get_args().push_back( aggr ); 1623 1601 fieldLoc->get_args().push_back( makeOffsetIndex( objectType, i ) ); 1602 fieldLoc->set_result( memberExpr->get_result()->clone() ); 1624 1603 newMemberExpr = fieldLoc; 1625 1604 } else if ( dynamic_cast< UnionInstType* >( objectType ) ) { 1626 // union members are all at offset zero, so build appropriately-dereferenced variable 1627 newMemberExpr = makeDerefdVar( varExpr->clone(), varDepth ); 1605 // union members are all at offset zero, so just use the aggregate expr 1606 Expression * aggr = memberExpr->get_aggregate()->clone(); 1607 delete aggr->get_env(); // xxx - there's a problem with keeping the env for some reason, so for now just get rid of it 1608 aggr->set_env( nullptr ); 1609 newMemberExpr = aggr; 1610 newMemberExpr->set_result( memberExpr->get_result()->clone() ); 1628 1611 } else return memberExpr; 1629 1612 assert( newMemberExpr ); … … 1633 1616 // Not all members of a polymorphic type are themselves of polymorphic type; in this case the member expression should be wrapped and dereferenced to form an lvalue 1634 1617 CastExpr *ptrCastExpr = new CastExpr( newMemberExpr, new PointerType( Type::Qualifiers(), memberType->clone() ) ); 1635 UntypedExpr *derefExpr = new UntypedExpr( new NameExpr( "*?" ) ); 1636 derefExpr->get_args().push_back( ptrCastExpr ); 1618 UntypedExpr *derefExpr = UntypedExpr::createDeref( ptrCastExpr ); 1637 1619 newMemberExpr = derefExpr; 1638 1620 } -
src/InitTweak/FixInit.cc
r6ac2ada r1ed33fed 361 361 FunctionType * ftype = dynamic_cast< FunctionType * >( GenPoly::getFunctionType( funcDecl->get_type() ) ); 362 362 assert( ftype ); 363 if ( (isConstructor( funcDecl->get_name() ) || funcDecl->get_name() == "?=?") && ftype->get_parameters().size() == 2 ) {363 if ( isConstructor( funcDecl->get_name() ) && ftype->get_parameters().size() == 2 ) { 364 364 Type * t1 = ftype->get_parameters().front()->get_type(); 365 365 Type * t2 = ftype->get_parameters().back()->get_type(); … … 367 367 368 368 if ( ResolvExpr::typesCompatible( ptrType->get_base(), t2, SymTab::Indexer() ) ) { 369 // optimization: don't need to copy construct in order to call a copy constructor or 370 // assignment operator 369 // optimization: don't need to copy construct in order to call a copy constructor 371 370 return appExpr; 372 371 } // if … … 636 635 assert( ! stmtExpr->get_returnDecls().empty() ); 637 636 body->get_kids().push_back( new ExprStmt( noLabels, new VariableExpr( stmtExpr->get_returnDecls().front() ) ) ); 638 } 639 stmtExpr->get_returnDecls().clear(); 640 stmtExpr->get_dtors().clear(); 637 stmtExpr->get_returnDecls().clear(); 638 stmtExpr->get_dtors().clear(); 639 } 640 assert( stmtExpr->get_returnDecls().empty() ); 641 assert( stmtExpr->get_dtors().empty() ); 641 642 return stmtExpr; 642 643 } … … 667 668 stmtsToAdd.splice( stmtsToAdd.end(), fixer.stmtsToAdd ); 668 669 unqMap[unqExpr->get_id()] = unqExpr; 670 if ( unqCount[ unqExpr->get_id() ] == 0 ) { // insert destructor after the last use of the unique expression 671 stmtsToAdd.splice( stmtsToAddAfter.end(), dtors[ unqExpr->get_id() ] ); 672 } else { // remember dtors for last instance of unique expr 673 dtors[ unqExpr->get_id() ] = fixer.stmtsToAddAfter; 674 } 669 675 if ( UntypedExpr * deref = dynamic_cast< UntypedExpr * >( unqExpr->get_expr() ) ) { 670 676 // unique expression is now a dereference, because the inner expression is an lvalue returning function call. … … 675 681 getCallArg( deref, 0 ) = unqExpr; 676 682 addDeref.insert( unqExpr->get_id() ); 677 if ( unqCount[ unqExpr->get_id() ] == 0 ) { // insert destructor after the last use of the unique expression678 stmtsToAdd.splice( stmtsToAddAfter.end(), dtors[ unqExpr->get_id() ] );679 } else { // remember dtors for last instance of unique expr680 dtors[ unqExpr->get_id() ] = fixer.stmtsToAddAfter;681 }682 683 return deref; 683 684 } -
src/InitTweak/GenInit.cc
r6ac2ada r1ed33fed 142 142 // hands off if the function returns an lvalue - we don't want to allocate a temporary if a variable's address 143 143 // is being returned 144 // Note: under the assumption that assignments return *this, checking for ?=? here is an optimization, since it shouldn't be necessary to copy construct `this`. This is a temporary optimization until reference types are added, at which point this should be removed, along with the analogous optimization in copy constructor generation. 145 if ( returnStmt->get_expr() && returnVals.size() == 1 && funcName != "?=?" && ! returnVals.front()->get_type()->get_lvalue() ) { 144 if ( returnStmt->get_expr() && returnVals.size() == 1 && ! returnVals.front()->get_type()->get_lvalue() ) { 146 145 // explicitly construct the return value using the return expression and the retVal object 147 146 assertf( returnVals.front()->get_name() != "", "Function %s has unnamed return value\n", funcName.c_str() ); -
src/main.cc
r6ac2ada r1ed33fed 1 1 2 // 2 3 // Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo … … 9 10 // Author : Richard C. Bilson 10 11 // Created On : Fri May 15 23:12:02 2015 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Wed Dec 14 14:35:54 201613 // Update Count : 43 612 // Last Modified By : Andrew Beach 13 // Last Modified On : Wed May 10 14:45:00 2017 14 // Update Count : 437 14 15 // 15 16 … … 79 80 errorp = false, 80 81 codegenp = false, 81 prettycodegenp = false; 82 prettycodegenp = false, 83 nolinemarks = false; 82 84 83 85 static void parse_cmdline( int argc, char *argv[], const char *& filename ); … … 310 312 311 313 CodeTools::fillLocations( translationUnit ); 312 CodeGen::generate( translationUnit, *output, ! noprotop, prettycodegenp, true );314 CodeGen::generate( translationUnit, *output, ! noprotop, prettycodegenp, true, ! nolinemarks ); 313 315 314 316 CodeGen::FixMain::fix( *output, treep ? "../prelude/bootloader.c" : CFA_LIBDIR "/bootloader.c" ); … … 336 338 } catch ( CompilerError &e ) { 337 339 cerr << "Compiler Error: " << e.get_what() << endl; 338 cerr << "(please report bugs to " << endl;340 cerr << "(please report bugs to [REDACTED])" << endl; 339 341 if ( output != &cout ) { 340 342 delete output; … … 375 377 376 378 int c; 377 while ( (c = getopt_long( argc, argv, "abBcdefgl mnpqrstTvyzZD:F:", long_opts, &long_index )) != -1 ) {379 while ( (c = getopt_long( argc, argv, "abBcdefglLmnpqrstTvyzZD:F:", long_opts, &long_index )) != -1 ) { 378 380 switch ( c ) { 379 381 case Ast: … … 411 413 case 'l': // generate libcfa.c 412 414 libcfap = true; 415 break; 416 case 'L': // surpress lines marks 417 nolinemarks = true; 413 418 break; 414 419 case Nopreamble: -
src/prelude/Makefile.am
r6ac2ada r1ed33fed 42 42 43 43 bootloader.c : bootloader.cf prelude.cf extras.cf builtins.cf ${abs_top_srcdir}/src/driver/cfa-cpp 44 ${AM_V_GEN}${abs_top_srcdir}/src/driver/cfa-cpp -tpm bootloader.cf $@ # use src/cfa-cpp as not in lib until after install44 ${AM_V_GEN}${abs_top_srcdir}/src/driver/cfa-cpp -tpmL bootloader.cf $@ # use src/cfa-cpp as not in lib until after install 45 45 46 46 MAINTAINERCLEANFILES = builtins.c builtins.cf extras.cf bootloader.c ${addprefix ${libdir}/,${cfalib_DATA}} ${addprefix ${libdir}/,${lib_LIBRARIES}} -
src/prelude/Makefile.in
r6ac2ada r1ed33fed 439 439 440 440 bootloader.c : bootloader.cf prelude.cf extras.cf builtins.cf ${abs_top_srcdir}/src/driver/cfa-cpp 441 ${AM_V_GEN}${abs_top_srcdir}/src/driver/cfa-cpp -tpm bootloader.cf $@ # use src/cfa-cpp as not in lib until after install441 ${AM_V_GEN}${abs_top_srcdir}/src/driver/cfa-cpp -tpmL bootloader.cf $@ # use src/cfa-cpp as not in lib until after install 442 442 443 443 # Tell versions [3.59,3.63) of GNU make to not export all variables. -
src/tests/.expect/32/KRfunctions.txt
r6ac2ada r1ed33fed 31 31 } 32 32 static inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___1(struct S *___dst__P2sS_1, struct S ___src__2sS_1){ 33 struct S ___ret__2sS_1; 33 34 ((void)((*___dst__P2sS_1).__i__i_1=___src__2sS_1.__i__i_1)); 34 return ((struct S )___src__2sS_1); 35 ((void)___constructor__F_P2sS2sS_autogen___1((&___ret__2sS_1), ___src__2sS_1)); 36 return ((struct S )___ret__2sS_1); 35 37 } 36 38 static inline void ___constructor__F_P2sSi_autogen___1(struct S *___dst__P2sS_1, int __i__i_1){ -
src/tests/.expect/32/attributes.txt
r6ac2ada r1ed33fed 22 22 } 23 23 static inline struct __anonymous0 ___operator_assign__F13s__anonymous0_P13s__anonymous013s__anonymous0_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, struct __anonymous0 ___src__13s__anonymous0_1){ 24 return ((struct __anonymous0 )___src__13s__anonymous0_1); 24 struct __anonymous0 ___ret__13s__anonymous0_1; 25 ((void)___constructor__F_P13s__anonymous013s__anonymous0_autogen___1((&___ret__13s__anonymous0_1), ___src__13s__anonymous0_1)); 26 return ((struct __anonymous0 )___ret__13s__anonymous0_1); 25 27 } 26 28 __attribute__ ((unused)) struct Agn1; … … 38 40 } 39 41 static inline struct Agn2 ___operator_assign__F5sAgn2_P5sAgn25sAgn2_autogen___1(struct Agn2 *___dst__P5sAgn2_1, struct Agn2 ___src__5sAgn2_1){ 40 return ((struct Agn2 )___src__5sAgn2_1); 42 struct Agn2 ___ret__5sAgn2_1; 43 ((void)___constructor__F_P5sAgn25sAgn2_autogen___1((&___ret__5sAgn2_1), ___src__5sAgn2_1)); 44 return ((struct Agn2 )___ret__5sAgn2_1); 41 45 } 42 46 enum __attribute__ ((unused)) __anonymous1 { … … 99 103 } 100 104 static inline struct Fdl ___operator_assign__F4sFdl_P4sFdl4sFdl_autogen___1(struct Fdl *___dst__P4sFdl_1, struct Fdl ___src__4sFdl_1){ 105 struct Fdl ___ret__4sFdl_1; 101 106 ((void)((*___dst__P4sFdl_1).__f1__i_1=___src__4sFdl_1.__f1__i_1)); 102 107 ((void)((*___dst__P4sFdl_1).__f2__i_1=___src__4sFdl_1.__f2__i_1)); … … 108 113 ((void)((*___dst__P4sFdl_1).__f8__i_1=___src__4sFdl_1.__f8__i_1)); 109 114 ((void)((*___dst__P4sFdl_1).__f9__Pi_1=___src__4sFdl_1.__f9__Pi_1)); 110 return ((struct Fdl )___src__4sFdl_1); 115 ((void)___constructor__F_P4sFdl4sFdl_autogen___1((&___ret__4sFdl_1), ___src__4sFdl_1)); 116 return ((struct Fdl )___ret__4sFdl_1); 111 117 } 112 118 static inline void ___constructor__F_P4sFdli_autogen___1(struct Fdl *___dst__P4sFdl_1, int __f1__i_1){ … … 292 298 } 293 299 inline struct __anonymous4 ___operator_assign__F13s__anonymous4_P13s__anonymous413s__anonymous4_autogen___2(struct __anonymous4 *___dst__P13s__anonymous4_2, struct __anonymous4 ___src__13s__anonymous4_2){ 300 struct __anonymous4 ___ret__13s__anonymous4_2; 294 301 ((void)((*___dst__P13s__anonymous4_2).__i__i_2=___src__13s__anonymous4_2.__i__i_2)); 295 return ((struct __anonymous4 )___src__13s__anonymous4_2); 302 ((void)___constructor__F_P13s__anonymous413s__anonymous4_autogen___2((&___ret__13s__anonymous4_2), ___src__13s__anonymous4_2)); 303 return ((struct __anonymous4 )___ret__13s__anonymous4_2); 296 304 } 297 305 inline void ___constructor__F_P13s__anonymous4i_autogen___2(struct __anonymous4 *___dst__P13s__anonymous4_2, int __i__i_2){ … … 310 318 } 311 319 inline enum __anonymous5 ___operator_assign__F13e__anonymous5_P13e__anonymous513e__anonymous5_intrinsic___2(enum __anonymous5 *___dst__P13e__anonymous5_2, enum __anonymous5 ___src__13e__anonymous5_2){ 312 return ((enum __anonymous5 )((*___dst__P13e__anonymous5_2)=___src__13e__anonymous5_2)); 320 enum __anonymous5 ___ret__13e__anonymous5_2; 321 ((void)(___ret__13e__anonymous5_2=((*___dst__P13e__anonymous5_2)=___src__13e__anonymous5_2)) /* ?{} */); 322 return ((enum __anonymous5 )___ret__13e__anonymous5_2); 313 323 } 314 324 ((void)sizeof(enum __anonymous5 )); … … 338 348 } 339 349 static inline struct Vad ___operator_assign__F4sVad_P4sVad4sVad_autogen___1(struct Vad *___dst__P4sVad_1, struct Vad ___src__4sVad_1){ 340 return ((struct Vad )___src__4sVad_1); 341 } 350 struct Vad ___ret__4sVad_1; 351 ((void)___constructor__F_P4sVad4sVad_autogen___1((&___ret__4sVad_1), ___src__4sVad_1)); 352 return ((struct Vad )___ret__4sVad_1); 353 } -
src/tests/.expect/32/declarationSpecifier.txt
r6ac2ada r1ed33fed 30 30 } 31 31 static inline struct __anonymous0 ___operator_assign__F13s__anonymous0_P13s__anonymous013s__anonymous0_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, struct __anonymous0 ___src__13s__anonymous0_1){ 32 struct __anonymous0 ___ret__13s__anonymous0_1; 32 33 ((void)((*___dst__P13s__anonymous0_1).__i__i_1=___src__13s__anonymous0_1.__i__i_1)); 33 return ((struct __anonymous0 )___src__13s__anonymous0_1); 34 ((void)___constructor__F_P13s__anonymous013s__anonymous0_autogen___1((&___ret__13s__anonymous0_1), ___src__13s__anonymous0_1)); 35 return ((struct __anonymous0 )___ret__13s__anonymous0_1); 34 36 } 35 37 static inline void ___constructor__F_P13s__anonymous0i_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, int __i__i_1){ … … 54 56 } 55 57 static inline struct __anonymous1 ___operator_assign__F13s__anonymous1_P13s__anonymous113s__anonymous1_autogen___1(struct __anonymous1 *___dst__P13s__anonymous1_1, struct __anonymous1 ___src__13s__anonymous1_1){ 58 struct __anonymous1 ___ret__13s__anonymous1_1; 56 59 ((void)((*___dst__P13s__anonymous1_1).__i__i_1=___src__13s__anonymous1_1.__i__i_1)); 57 return ((struct __anonymous1 )___src__13s__anonymous1_1); 60 ((void)___constructor__F_P13s__anonymous113s__anonymous1_autogen___1((&___ret__13s__anonymous1_1), ___src__13s__anonymous1_1)); 61 return ((struct __anonymous1 )___ret__13s__anonymous1_1); 58 62 } 59 63 static inline void ___constructor__F_P13s__anonymous1i_autogen___1(struct __anonymous1 *___dst__P13s__anonymous1_1, int __i__i_1){ … … 78 82 } 79 83 static inline struct __anonymous2 ___operator_assign__F13s__anonymous2_P13s__anonymous213s__anonymous2_autogen___1(struct __anonymous2 *___dst__P13s__anonymous2_1, struct __anonymous2 ___src__13s__anonymous2_1){ 84 struct __anonymous2 ___ret__13s__anonymous2_1; 80 85 ((void)((*___dst__P13s__anonymous2_1).__i__i_1=___src__13s__anonymous2_1.__i__i_1)); 81 return ((struct __anonymous2 )___src__13s__anonymous2_1); 86 ((void)___constructor__F_P13s__anonymous213s__anonymous2_autogen___1((&___ret__13s__anonymous2_1), ___src__13s__anonymous2_1)); 87 return ((struct __anonymous2 )___ret__13s__anonymous2_1); 82 88 } 83 89 static inline void ___constructor__F_P13s__anonymous2i_autogen___1(struct __anonymous2 *___dst__P13s__anonymous2_1, int __i__i_1){ … … 102 108 } 103 109 static inline struct __anonymous3 ___operator_assign__F13s__anonymous3_P13s__anonymous313s__anonymous3_autogen___1(struct __anonymous3 *___dst__P13s__anonymous3_1, struct __anonymous3 ___src__13s__anonymous3_1){ 110 struct __anonymous3 ___ret__13s__anonymous3_1; 104 111 ((void)((*___dst__P13s__anonymous3_1).__i__i_1=___src__13s__anonymous3_1.__i__i_1)); 105 return ((struct __anonymous3 )___src__13s__anonymous3_1); 112 ((void)___constructor__F_P13s__anonymous313s__anonymous3_autogen___1((&___ret__13s__anonymous3_1), ___src__13s__anonymous3_1)); 113 return ((struct __anonymous3 )___ret__13s__anonymous3_1); 106 114 } 107 115 static inline void ___constructor__F_P13s__anonymous3i_autogen___1(struct __anonymous3 *___dst__P13s__anonymous3_1, int __i__i_1){ … … 126 134 } 127 135 static inline struct __anonymous4 ___operator_assign__F13s__anonymous4_P13s__anonymous413s__anonymous4_autogen___1(struct __anonymous4 *___dst__P13s__anonymous4_1, struct __anonymous4 ___src__13s__anonymous4_1){ 136 struct __anonymous4 ___ret__13s__anonymous4_1; 128 137 ((void)((*___dst__P13s__anonymous4_1).__i__i_1=___src__13s__anonymous4_1.__i__i_1)); 129 return ((struct __anonymous4 )___src__13s__anonymous4_1); 138 ((void)___constructor__F_P13s__anonymous413s__anonymous4_autogen___1((&___ret__13s__anonymous4_1), ___src__13s__anonymous4_1)); 139 return ((struct __anonymous4 )___ret__13s__anonymous4_1); 130 140 } 131 141 static inline void ___constructor__F_P13s__anonymous4i_autogen___1(struct __anonymous4 *___dst__P13s__anonymous4_1, int __i__i_1){ … … 150 160 } 151 161 static inline struct __anonymous5 ___operator_assign__F13s__anonymous5_P13s__anonymous513s__anonymous5_autogen___1(struct __anonymous5 *___dst__P13s__anonymous5_1, struct __anonymous5 ___src__13s__anonymous5_1){ 162 struct __anonymous5 ___ret__13s__anonymous5_1; 152 163 ((void)((*___dst__P13s__anonymous5_1).__i__i_1=___src__13s__anonymous5_1.__i__i_1)); 153 return ((struct __anonymous5 )___src__13s__anonymous5_1); 164 ((void)___constructor__F_P13s__anonymous513s__anonymous5_autogen___1((&___ret__13s__anonymous5_1), ___src__13s__anonymous5_1)); 165 return ((struct __anonymous5 )___ret__13s__anonymous5_1); 154 166 } 155 167 static inline void ___constructor__F_P13s__anonymous5i_autogen___1(struct __anonymous5 *___dst__P13s__anonymous5_1, int __i__i_1){ … … 174 186 } 175 187 static inline struct __anonymous6 ___operator_assign__F13s__anonymous6_P13s__anonymous613s__anonymous6_autogen___1(struct __anonymous6 *___dst__P13s__anonymous6_1, struct __anonymous6 ___src__13s__anonymous6_1){ 188 struct __anonymous6 ___ret__13s__anonymous6_1; 176 189 ((void)((*___dst__P13s__anonymous6_1).__i__i_1=___src__13s__anonymous6_1.__i__i_1)); 177 return ((struct __anonymous6 )___src__13s__anonymous6_1); 190 ((void)___constructor__F_P13s__anonymous613s__anonymous6_autogen___1((&___ret__13s__anonymous6_1), ___src__13s__anonymous6_1)); 191 return ((struct __anonymous6 )___ret__13s__anonymous6_1); 178 192 } 179 193 static inline void ___constructor__F_P13s__anonymous6i_autogen___1(struct __anonymous6 *___dst__P13s__anonymous6_1, int __i__i_1){ … … 198 212 } 199 213 static inline struct __anonymous7 ___operator_assign__F13s__anonymous7_P13s__anonymous713s__anonymous7_autogen___1(struct __anonymous7 *___dst__P13s__anonymous7_1, struct __anonymous7 ___src__13s__anonymous7_1){ 214 struct __anonymous7 ___ret__13s__anonymous7_1; 200 215 ((void)((*___dst__P13s__anonymous7_1).__i__i_1=___src__13s__anonymous7_1.__i__i_1)); 201 return ((struct __anonymous7 )___src__13s__anonymous7_1); 216 ((void)___constructor__F_P13s__anonymous713s__anonymous7_autogen___1((&___ret__13s__anonymous7_1), ___src__13s__anonymous7_1)); 217 return ((struct __anonymous7 )___ret__13s__anonymous7_1); 202 218 } 203 219 static inline void ___constructor__F_P13s__anonymous7i_autogen___1(struct __anonymous7 *___dst__P13s__anonymous7_1, int __i__i_1){ … … 230 246 } 231 247 static inline struct __anonymous8 ___operator_assign__F13s__anonymous8_P13s__anonymous813s__anonymous8_autogen___1(struct __anonymous8 *___dst__P13s__anonymous8_1, struct __anonymous8 ___src__13s__anonymous8_1){ 248 struct __anonymous8 ___ret__13s__anonymous8_1; 232 249 ((void)((*___dst__P13s__anonymous8_1).__i__s_1=___src__13s__anonymous8_1.__i__s_1)); 233 return ((struct __anonymous8 )___src__13s__anonymous8_1); 250 ((void)___constructor__F_P13s__anonymous813s__anonymous8_autogen___1((&___ret__13s__anonymous8_1), ___src__13s__anonymous8_1)); 251 return ((struct __anonymous8 )___ret__13s__anonymous8_1); 234 252 } 235 253 static inline void ___constructor__F_P13s__anonymous8s_autogen___1(struct __anonymous8 *___dst__P13s__anonymous8_1, short __i__s_1){ … … 254 272 } 255 273 static inline struct __anonymous9 ___operator_assign__F13s__anonymous9_P13s__anonymous913s__anonymous9_autogen___1(struct __anonymous9 *___dst__P13s__anonymous9_1, struct __anonymous9 ___src__13s__anonymous9_1){ 274 struct __anonymous9 ___ret__13s__anonymous9_1; 256 275 ((void)((*___dst__P13s__anonymous9_1).__i__s_1=___src__13s__anonymous9_1.__i__s_1)); 257 return ((struct __anonymous9 )___src__13s__anonymous9_1); 276 ((void)___constructor__F_P13s__anonymous913s__anonymous9_autogen___1((&___ret__13s__anonymous9_1), ___src__13s__anonymous9_1)); 277 return ((struct __anonymous9 )___ret__13s__anonymous9_1); 258 278 } 259 279 static inline void ___constructor__F_P13s__anonymous9s_autogen___1(struct __anonymous9 *___dst__P13s__anonymous9_1, short __i__s_1){ … … 278 298 } 279 299 static inline struct __anonymous10 ___operator_assign__F14s__anonymous10_P14s__anonymous1014s__anonymous10_autogen___1(struct __anonymous10 *___dst__P14s__anonymous10_1, struct __anonymous10 ___src__14s__anonymous10_1){ 300 struct __anonymous10 ___ret__14s__anonymous10_1; 280 301 ((void)((*___dst__P14s__anonymous10_1).__i__s_1=___src__14s__anonymous10_1.__i__s_1)); 281 return ((struct __anonymous10 )___src__14s__anonymous10_1); 302 ((void)___constructor__F_P14s__anonymous1014s__anonymous10_autogen___1((&___ret__14s__anonymous10_1), ___src__14s__anonymous10_1)); 303 return ((struct __anonymous10 )___ret__14s__anonymous10_1); 282 304 } 283 305 static inline void ___constructor__F_P14s__anonymous10s_autogen___1(struct __anonymous10 *___dst__P14s__anonymous10_1, short __i__s_1){ … … 302 324 } 303 325 static inline struct __anonymous11 ___operator_assign__F14s__anonymous11_P14s__anonymous1114s__anonymous11_autogen___1(struct __anonymous11 *___dst__P14s__anonymous11_1, struct __anonymous11 ___src__14s__anonymous11_1){ 326 struct __anonymous11 ___ret__14s__anonymous11_1; 304 327 ((void)((*___dst__P14s__anonymous11_1).__i__s_1=___src__14s__anonymous11_1.__i__s_1)); 305 return ((struct __anonymous11 )___src__14s__anonymous11_1); 328 ((void)___constructor__F_P14s__anonymous1114s__anonymous11_autogen___1((&___ret__14s__anonymous11_1), ___src__14s__anonymous11_1)); 329 return ((struct __anonymous11 )___ret__14s__anonymous11_1); 306 330 } 307 331 static inline void ___constructor__F_P14s__anonymous11s_autogen___1(struct __anonymous11 *___dst__P14s__anonymous11_1, short __i__s_1){ … … 326 350 } 327 351 static inline struct __anonymous12 ___operator_assign__F14s__anonymous12_P14s__anonymous1214s__anonymous12_autogen___1(struct __anonymous12 *___dst__P14s__anonymous12_1, struct __anonymous12 ___src__14s__anonymous12_1){ 352 struct __anonymous12 ___ret__14s__anonymous12_1; 328 353 ((void)((*___dst__P14s__anonymous12_1).__i__s_1=___src__14s__anonymous12_1.__i__s_1)); 329 return ((struct __anonymous12 )___src__14s__anonymous12_1); 354 ((void)___constructor__F_P14s__anonymous1214s__anonymous12_autogen___1((&___ret__14s__anonymous12_1), ___src__14s__anonymous12_1)); 355 return ((struct __anonymous12 )___ret__14s__anonymous12_1); 330 356 } 331 357 static inline void ___constructor__F_P14s__anonymous12s_autogen___1(struct __anonymous12 *___dst__P14s__anonymous12_1, short __i__s_1){ … … 350 376 } 351 377 static inline struct __anonymous13 ___operator_assign__F14s__anonymous13_P14s__anonymous1314s__anonymous13_autogen___1(struct __anonymous13 *___dst__P14s__anonymous13_1, struct __anonymous13 ___src__14s__anonymous13_1){ 378 struct __anonymous13 ___ret__14s__anonymous13_1; 352 379 ((void)((*___dst__P14s__anonymous13_1).__i__s_1=___src__14s__anonymous13_1.__i__s_1)); 353 return ((struct __anonymous13 )___src__14s__anonymous13_1); 380 ((void)___constructor__F_P14s__anonymous1314s__anonymous13_autogen___1((&___ret__14s__anonymous13_1), ___src__14s__anonymous13_1)); 381 return ((struct __anonymous13 )___ret__14s__anonymous13_1); 354 382 } 355 383 static inline void ___constructor__F_P14s__anonymous13s_autogen___1(struct __anonymous13 *___dst__P14s__anonymous13_1, short __i__s_1){ … … 374 402 } 375 403 static inline struct __anonymous14 ___operator_assign__F14s__anonymous14_P14s__anonymous1414s__anonymous14_autogen___1(struct __anonymous14 *___dst__P14s__anonymous14_1, struct __anonymous14 ___src__14s__anonymous14_1){ 404 struct __anonymous14 ___ret__14s__anonymous14_1; 376 405 ((void)((*___dst__P14s__anonymous14_1).__i__s_1=___src__14s__anonymous14_1.__i__s_1)); 377 return ((struct __anonymous14 )___src__14s__anonymous14_1); 406 ((void)___constructor__F_P14s__anonymous1414s__anonymous14_autogen___1((&___ret__14s__anonymous14_1), ___src__14s__anonymous14_1)); 407 return ((struct __anonymous14 )___ret__14s__anonymous14_1); 378 408 } 379 409 static inline void ___constructor__F_P14s__anonymous14s_autogen___1(struct __anonymous14 *___dst__P14s__anonymous14_1, short __i__s_1){ … … 398 428 } 399 429 static inline struct __anonymous15 ___operator_assign__F14s__anonymous15_P14s__anonymous1514s__anonymous15_autogen___1(struct __anonymous15 *___dst__P14s__anonymous15_1, struct __anonymous15 ___src__14s__anonymous15_1){ 430 struct __anonymous15 ___ret__14s__anonymous15_1; 400 431 ((void)((*___dst__P14s__anonymous15_1).__i__s_1=___src__14s__anonymous15_1.__i__s_1)); 401 return ((struct __anonymous15 )___src__14s__anonymous15_1); 432 ((void)___constructor__F_P14s__anonymous1514s__anonymous15_autogen___1((&___ret__14s__anonymous15_1), ___src__14s__anonymous15_1)); 433 return ((struct __anonymous15 )___ret__14s__anonymous15_1); 402 434 } 403 435 static inline void ___constructor__F_P14s__anonymous15s_autogen___1(struct __anonymous15 *___dst__P14s__anonymous15_1, short __i__s_1){ … … 438 470 } 439 471 static inline struct __anonymous16 ___operator_assign__F14s__anonymous16_P14s__anonymous1614s__anonymous16_autogen___1(struct __anonymous16 *___dst__P14s__anonymous16_1, struct __anonymous16 ___src__14s__anonymous16_1){ 472 struct __anonymous16 ___ret__14s__anonymous16_1; 440 473 ((void)((*___dst__P14s__anonymous16_1).__i__i_1=___src__14s__anonymous16_1.__i__i_1)); 441 return ((struct __anonymous16 )___src__14s__anonymous16_1); 474 ((void)___constructor__F_P14s__anonymous1614s__anonymous16_autogen___1((&___ret__14s__anonymous16_1), ___src__14s__anonymous16_1)); 475 return ((struct __anonymous16 )___ret__14s__anonymous16_1); 442 476 } 443 477 static inline void ___constructor__F_P14s__anonymous16i_autogen___1(struct __anonymous16 *___dst__P14s__anonymous16_1, int __i__i_1){ … … 462 496 } 463 497 static inline struct __anonymous17 ___operator_assign__F14s__anonymous17_P14s__anonymous1714s__anonymous17_autogen___1(struct __anonymous17 *___dst__P14s__anonymous17_1, struct __anonymous17 ___src__14s__anonymous17_1){ 498 struct __anonymous17 ___ret__14s__anonymous17_1; 464 499 ((void)((*___dst__P14s__anonymous17_1).__i__i_1=___src__14s__anonymous17_1.__i__i_1)); 465 return ((struct __anonymous17 )___src__14s__anonymous17_1); 500 ((void)___constructor__F_P14s__anonymous1714s__anonymous17_autogen___1((&___ret__14s__anonymous17_1), ___src__14s__anonymous17_1)); 501 return ((struct __anonymous17 )___ret__14s__anonymous17_1); 466 502 } 467 503 static inline void ___constructor__F_P14s__anonymous17i_autogen___1(struct __anonymous17 *___dst__P14s__anonymous17_1, int __i__i_1){ … … 486 522 } 487 523 static inline struct __anonymous18 ___operator_assign__F14s__anonymous18_P14s__anonymous1814s__anonymous18_autogen___1(struct __anonymous18 *___dst__P14s__anonymous18_1, struct __anonymous18 ___src__14s__anonymous18_1){ 524 struct __anonymous18 ___ret__14s__anonymous18_1; 488 525 ((void)((*___dst__P14s__anonymous18_1).__i__i_1=___src__14s__anonymous18_1.__i__i_1)); 489 return ((struct __anonymous18 )___src__14s__anonymous18_1); 526 ((void)___constructor__F_P14s__anonymous1814s__anonymous18_autogen___1((&___ret__14s__anonymous18_1), ___src__14s__anonymous18_1)); 527 return ((struct __anonymous18 )___ret__14s__anonymous18_1); 490 528 } 491 529 static inline void ___constructor__F_P14s__anonymous18i_autogen___1(struct __anonymous18 *___dst__P14s__anonymous18_1, int __i__i_1){ … … 510 548 } 511 549 static inline struct __anonymous19 ___operator_assign__F14s__anonymous19_P14s__anonymous1914s__anonymous19_autogen___1(struct __anonymous19 *___dst__P14s__anonymous19_1, struct __anonymous19 ___src__14s__anonymous19_1){ 550 struct __anonymous19 ___ret__14s__anonymous19_1; 512 551 ((void)((*___dst__P14s__anonymous19_1).__i__i_1=___src__14s__anonymous19_1.__i__i_1)); 513 return ((struct __anonymous19 )___src__14s__anonymous19_1); 552 ((void)___constructor__F_P14s__anonymous1914s__anonymous19_autogen___1((&___ret__14s__anonymous19_1), ___src__14s__anonymous19_1)); 553 return ((struct __anonymous19 )___ret__14s__anonymous19_1); 514 554 } 515 555 static inline void ___constructor__F_P14s__anonymous19i_autogen___1(struct __anonymous19 *___dst__P14s__anonymous19_1, int __i__i_1){ … … 534 574 } 535 575 static inline struct __anonymous20 ___operator_assign__F14s__anonymous20_P14s__anonymous2014s__anonymous20_autogen___1(struct __anonymous20 *___dst__P14s__anonymous20_1, struct __anonymous20 ___src__14s__anonymous20_1){ 576 struct __anonymous20 ___ret__14s__anonymous20_1; 536 577 ((void)((*___dst__P14s__anonymous20_1).__i__i_1=___src__14s__anonymous20_1.__i__i_1)); 537 return ((struct __anonymous20 )___src__14s__anonymous20_1); 578 ((void)___constructor__F_P14s__anonymous2014s__anonymous20_autogen___1((&___ret__14s__anonymous20_1), ___src__14s__anonymous20_1)); 579 return ((struct __anonymous20 )___ret__14s__anonymous20_1); 538 580 } 539 581 static inline void ___constructor__F_P14s__anonymous20i_autogen___1(struct __anonymous20 *___dst__P14s__anonymous20_1, int __i__i_1){ … … 558 600 } 559 601 static inline struct __anonymous21 ___operator_assign__F14s__anonymous21_P14s__anonymous2114s__anonymous21_autogen___1(struct __anonymous21 *___dst__P14s__anonymous21_1, struct __anonymous21 ___src__14s__anonymous21_1){ 602 struct __anonymous21 ___ret__14s__anonymous21_1; 560 603 ((void)((*___dst__P14s__anonymous21_1).__i__i_1=___src__14s__anonymous21_1.__i__i_1)); 561 return ((struct __anonymous21 )___src__14s__anonymous21_1); 604 ((void)___constructor__F_P14s__anonymous2114s__anonymous21_autogen___1((&___ret__14s__anonymous21_1), ___src__14s__anonymous21_1)); 605 return ((struct __anonymous21 )___ret__14s__anonymous21_1); 562 606 } 563 607 static inline void ___constructor__F_P14s__anonymous21i_autogen___1(struct __anonymous21 *___dst__P14s__anonymous21_1, int __i__i_1){ … … 582 626 } 583 627 static inline struct __anonymous22 ___operator_assign__F14s__anonymous22_P14s__anonymous2214s__anonymous22_autogen___1(struct __anonymous22 *___dst__P14s__anonymous22_1, struct __anonymous22 ___src__14s__anonymous22_1){ 628 struct __anonymous22 ___ret__14s__anonymous22_1; 584 629 ((void)((*___dst__P14s__anonymous22_1).__i__i_1=___src__14s__anonymous22_1.__i__i_1)); 585 return ((struct __anonymous22 )___src__14s__anonymous22_1); 630 ((void)___constructor__F_P14s__anonymous2214s__anonymous22_autogen___1((&___ret__14s__anonymous22_1), ___src__14s__anonymous22_1)); 631 return ((struct __anonymous22 )___ret__14s__anonymous22_1); 586 632 } 587 633 static inline void ___constructor__F_P14s__anonymous22i_autogen___1(struct __anonymous22 *___dst__P14s__anonymous22_1, int __i__i_1){ … … 606 652 } 607 653 static inline struct __anonymous23 ___operator_assign__F14s__anonymous23_P14s__anonymous2314s__anonymous23_autogen___1(struct __anonymous23 *___dst__P14s__anonymous23_1, struct __anonymous23 ___src__14s__anonymous23_1){ 654 struct __anonymous23 ___ret__14s__anonymous23_1; 608 655 ((void)((*___dst__P14s__anonymous23_1).__i__i_1=___src__14s__anonymous23_1.__i__i_1)); 609 return ((struct __anonymous23 )___src__14s__anonymous23_1); 656 ((void)___constructor__F_P14s__anonymous2314s__anonymous23_autogen___1((&___ret__14s__anonymous23_1), ___src__14s__anonymous23_1)); 657 return ((struct __anonymous23 )___ret__14s__anonymous23_1); 610 658 } 611 659 static inline void ___constructor__F_P14s__anonymous23i_autogen___1(struct __anonymous23 *___dst__P14s__anonymous23_1, int __i__i_1){ -
src/tests/.expect/32/extension.txt
r6ac2ada r1ed33fed 33 33 } 34 34 static inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___1(struct S *___dst__P2sS_1, struct S ___src__2sS_1){ 35 struct S ___ret__2sS_1; 35 36 ((void)((*___dst__P2sS_1).__a__i_1=___src__2sS_1.__a__i_1)); 36 37 ((void)((*___dst__P2sS_1).__b__i_1=___src__2sS_1.__b__i_1)); 37 38 ((void)((*___dst__P2sS_1).__c__i_1=___src__2sS_1.__c__i_1)); 38 return ((struct S )___src__2sS_1); 39 ((void)___constructor__F_P2sS2sS_autogen___1((&___ret__2sS_1), ___src__2sS_1)); 40 return ((struct S )___ret__2sS_1); 39 41 } 40 42 static inline void ___constructor__F_P2sSi_autogen___1(struct S *___dst__P2sS_1, int __a__i_1){ … … 66 68 } 67 69 static inline union U ___operator_assign__F2uU_P2uU2uU_autogen___1(union U *___dst__P2uU_1, union U ___src__2uU_1){ 70 union U ___ret__2uU_1; 68 71 ((void)__builtin_memcpy(((void *)___dst__P2uU_1), ((const void *)(&___src__2uU_1)), sizeof(union U ))); 69 return ((union U )___src__2uU_1); 72 ((void)___constructor__F_P2uU2uU_autogen___1((&___ret__2uU_1), ___src__2uU_1)); 73 return ((union U )___ret__2uU_1); 70 74 } 71 75 static inline void ___constructor__F_P2uUi_autogen___1(union U *___dst__P2uU_1, int __src__i_1){ -
src/tests/.expect/32/gccExtensions.txt
r6ac2ada r1ed33fed 59 59 } 60 60 inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___2(struct S *___dst__P2sS_2, struct S ___src__2sS_2){ 61 struct S ___ret__2sS_2; 61 62 ((void)((*___dst__P2sS_2).__a__i_2=___src__2sS_2.__a__i_2)); 62 63 ((void)((*___dst__P2sS_2).__b__i_2=___src__2sS_2.__b__i_2)); 63 64 ((void)((*___dst__P2sS_2).__c__i_2=___src__2sS_2.__c__i_2)); 64 return ((struct S )___src__2sS_2); 65 ((void)___constructor__F_P2sS2sS_autogen___2((&___ret__2sS_2), ___src__2sS_2)); 66 return ((struct S )___ret__2sS_2); 65 67 } 66 68 inline void ___constructor__F_P2sSi_autogen___2(struct S *___dst__P2sS_2, int __a__i_2){ … … 109 111 } 110 112 inline struct s2 ___operator_assign__F3ss2_P3ss23ss2_autogen___2(struct s2 *___dst__P3ss2_2, struct s2 ___src__3ss2_2){ 113 struct s2 ___ret__3ss2_2; 111 114 ((void)((*___dst__P3ss2_2).__i__i_2=___src__3ss2_2.__i__i_2)); 112 return ((struct s2 )___src__3ss2_2); 115 ((void)___constructor__F_P3ss23ss2_autogen___2((&___ret__3ss2_2), ___src__3ss2_2)); 116 return ((struct s2 )___ret__3ss2_2); 113 117 } 114 118 inline void ___constructor__F_P3ss2i_autogen___2(struct s2 *___dst__P3ss2_2, int __i__i_2){ … … 128 132 } 129 133 inline struct s3 ___operator_assign__F3ss3_P3ss33ss3_autogen___2(struct s3 *___dst__P3ss3_2, struct s3 ___src__3ss3_2){ 134 struct s3 ___ret__3ss3_2; 130 135 ((void)((*___dst__P3ss3_2).__i__i_2=___src__3ss3_2.__i__i_2)); 131 return ((struct s3 )___src__3ss3_2); 136 ((void)___constructor__F_P3ss33ss3_autogen___2((&___ret__3ss3_2), ___src__3ss3_2)); 137 return ((struct s3 )___ret__3ss3_2); 132 138 } 133 139 inline void ___constructor__F_P3ss3i_autogen___2(struct s3 *___dst__P3ss3_2, int __i__i_2){ … … 149 155 } 150 156 inline struct s4 ___operator_assign__F3ss4_P3ss43ss4_autogen___2(struct s4 *___dst__P3ss4_2, struct s4 ___src__3ss4_2){ 157 struct s4 ___ret__3ss4_2; 151 158 ((void)((*___dst__P3ss4_2).__i__i_2=___src__3ss4_2.__i__i_2)); 152 return ((struct s4 )___src__3ss4_2); 159 ((void)___constructor__F_P3ss43ss4_autogen___2((&___ret__3ss4_2), ___src__3ss4_2)); 160 return ((struct s4 )___ret__3ss4_2); 153 161 } 154 162 inline void ___constructor__F_P3ss4i_autogen___2(struct s4 *___dst__P3ss4_2, int __i__i_2){ -
src/tests/.expect/64/KRfunctions.txt
r6ac2ada r1ed33fed 31 31 } 32 32 static inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___1(struct S *___dst__P2sS_1, struct S ___src__2sS_1){ 33 struct S ___ret__2sS_1; 33 34 ((void)((*___dst__P2sS_1).__i__i_1=___src__2sS_1.__i__i_1)); 34 return ((struct S )___src__2sS_1); 35 ((void)___constructor__F_P2sS2sS_autogen___1((&___ret__2sS_1), ___src__2sS_1)); 36 return ((struct S )___ret__2sS_1); 35 37 } 36 38 static inline void ___constructor__F_P2sSi_autogen___1(struct S *___dst__P2sS_1, int __i__i_1){ -
src/tests/.expect/64/attributes.txt
r6ac2ada r1ed33fed 22 22 } 23 23 static inline struct __anonymous0 ___operator_assign__F13s__anonymous0_P13s__anonymous013s__anonymous0_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, struct __anonymous0 ___src__13s__anonymous0_1){ 24 return ((struct __anonymous0 )___src__13s__anonymous0_1); 24 struct __anonymous0 ___ret__13s__anonymous0_1; 25 ((void)___constructor__F_P13s__anonymous013s__anonymous0_autogen___1((&___ret__13s__anonymous0_1), ___src__13s__anonymous0_1)); 26 return ((struct __anonymous0 )___ret__13s__anonymous0_1); 25 27 } 26 28 __attribute__ ((unused)) struct Agn1; … … 38 40 } 39 41 static inline struct Agn2 ___operator_assign__F5sAgn2_P5sAgn25sAgn2_autogen___1(struct Agn2 *___dst__P5sAgn2_1, struct Agn2 ___src__5sAgn2_1){ 40 return ((struct Agn2 )___src__5sAgn2_1); 42 struct Agn2 ___ret__5sAgn2_1; 43 ((void)___constructor__F_P5sAgn25sAgn2_autogen___1((&___ret__5sAgn2_1), ___src__5sAgn2_1)); 44 return ((struct Agn2 )___ret__5sAgn2_1); 41 45 } 42 46 enum __attribute__ ((unused)) __anonymous1 { … … 99 103 } 100 104 static inline struct Fdl ___operator_assign__F4sFdl_P4sFdl4sFdl_autogen___1(struct Fdl *___dst__P4sFdl_1, struct Fdl ___src__4sFdl_1){ 105 struct Fdl ___ret__4sFdl_1; 101 106 ((void)((*___dst__P4sFdl_1).__f1__i_1=___src__4sFdl_1.__f1__i_1)); 102 107 ((void)((*___dst__P4sFdl_1).__f2__i_1=___src__4sFdl_1.__f2__i_1)); … … 108 113 ((void)((*___dst__P4sFdl_1).__f8__i_1=___src__4sFdl_1.__f8__i_1)); 109 114 ((void)((*___dst__P4sFdl_1).__f9__Pi_1=___src__4sFdl_1.__f9__Pi_1)); 110 return ((struct Fdl )___src__4sFdl_1); 115 ((void)___constructor__F_P4sFdl4sFdl_autogen___1((&___ret__4sFdl_1), ___src__4sFdl_1)); 116 return ((struct Fdl )___ret__4sFdl_1); 111 117 } 112 118 static inline void ___constructor__F_P4sFdli_autogen___1(struct Fdl *___dst__P4sFdl_1, int __f1__i_1){ … … 292 298 } 293 299 inline struct __anonymous4 ___operator_assign__F13s__anonymous4_P13s__anonymous413s__anonymous4_autogen___2(struct __anonymous4 *___dst__P13s__anonymous4_2, struct __anonymous4 ___src__13s__anonymous4_2){ 300 struct __anonymous4 ___ret__13s__anonymous4_2; 294 301 ((void)((*___dst__P13s__anonymous4_2).__i__i_2=___src__13s__anonymous4_2.__i__i_2)); 295 return ((struct __anonymous4 )___src__13s__anonymous4_2); 302 ((void)___constructor__F_P13s__anonymous413s__anonymous4_autogen___2((&___ret__13s__anonymous4_2), ___src__13s__anonymous4_2)); 303 return ((struct __anonymous4 )___ret__13s__anonymous4_2); 296 304 } 297 305 inline void ___constructor__F_P13s__anonymous4i_autogen___2(struct __anonymous4 *___dst__P13s__anonymous4_2, int __i__i_2){ … … 310 318 } 311 319 inline enum __anonymous5 ___operator_assign__F13e__anonymous5_P13e__anonymous513e__anonymous5_intrinsic___2(enum __anonymous5 *___dst__P13e__anonymous5_2, enum __anonymous5 ___src__13e__anonymous5_2){ 312 return ((enum __anonymous5 )((*___dst__P13e__anonymous5_2)=___src__13e__anonymous5_2)); 320 enum __anonymous5 ___ret__13e__anonymous5_2; 321 ((void)(___ret__13e__anonymous5_2=((*___dst__P13e__anonymous5_2)=___src__13e__anonymous5_2)) /* ?{} */); 322 return ((enum __anonymous5 )___ret__13e__anonymous5_2); 313 323 } 314 324 ((void)sizeof(enum __anonymous5 )); … … 338 348 } 339 349 static inline struct Vad ___operator_assign__F4sVad_P4sVad4sVad_autogen___1(struct Vad *___dst__P4sVad_1, struct Vad ___src__4sVad_1){ 340 return ((struct Vad )___src__4sVad_1); 341 } 350 struct Vad ___ret__4sVad_1; 351 ((void)___constructor__F_P4sVad4sVad_autogen___1((&___ret__4sVad_1), ___src__4sVad_1)); 352 return ((struct Vad )___ret__4sVad_1); 353 } -
src/tests/.expect/64/declarationSpecifier.txt
r6ac2ada r1ed33fed 30 30 } 31 31 static inline struct __anonymous0 ___operator_assign__F13s__anonymous0_P13s__anonymous013s__anonymous0_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, struct __anonymous0 ___src__13s__anonymous0_1){ 32 struct __anonymous0 ___ret__13s__anonymous0_1; 32 33 ((void)((*___dst__P13s__anonymous0_1).__i__i_1=___src__13s__anonymous0_1.__i__i_1)); 33 return ((struct __anonymous0 )___src__13s__anonymous0_1); 34 ((void)___constructor__F_P13s__anonymous013s__anonymous0_autogen___1((&___ret__13s__anonymous0_1), ___src__13s__anonymous0_1)); 35 return ((struct __anonymous0 )___ret__13s__anonymous0_1); 34 36 } 35 37 static inline void ___constructor__F_P13s__anonymous0i_autogen___1(struct __anonymous0 *___dst__P13s__anonymous0_1, int __i__i_1){ … … 54 56 } 55 57 static inline struct __anonymous1 ___operator_assign__F13s__anonymous1_P13s__anonymous113s__anonymous1_autogen___1(struct __anonymous1 *___dst__P13s__anonymous1_1, struct __anonymous1 ___src__13s__anonymous1_1){ 58 struct __anonymous1 ___ret__13s__anonymous1_1; 56 59 ((void)((*___dst__P13s__anonymous1_1).__i__i_1=___src__13s__anonymous1_1.__i__i_1)); 57 return ((struct __anonymous1 )___src__13s__anonymous1_1); 60 ((void)___constructor__F_P13s__anonymous113s__anonymous1_autogen___1((&___ret__13s__anonymous1_1), ___src__13s__anonymous1_1)); 61 return ((struct __anonymous1 )___ret__13s__anonymous1_1); 58 62 } 59 63 static inline void ___constructor__F_P13s__anonymous1i_autogen___1(struct __anonymous1 *___dst__P13s__anonymous1_1, int __i__i_1){ … … 78 82 } 79 83 static inline struct __anonymous2 ___operator_assign__F13s__anonymous2_P13s__anonymous213s__anonymous2_autogen___1(struct __anonymous2 *___dst__P13s__anonymous2_1, struct __anonymous2 ___src__13s__anonymous2_1){ 84 struct __anonymous2 ___ret__13s__anonymous2_1; 80 85 ((void)((*___dst__P13s__anonymous2_1).__i__i_1=___src__13s__anonymous2_1.__i__i_1)); 81 return ((struct __anonymous2 )___src__13s__anonymous2_1); 86 ((void)___constructor__F_P13s__anonymous213s__anonymous2_autogen___1((&___ret__13s__anonymous2_1), ___src__13s__anonymous2_1)); 87 return ((struct __anonymous2 )___ret__13s__anonymous2_1); 82 88 } 83 89 static inline void ___constructor__F_P13s__anonymous2i_autogen___1(struct __anonymous2 *___dst__P13s__anonymous2_1, int __i__i_1){ … … 102 108 } 103 109 static inline struct __anonymous3 ___operator_assign__F13s__anonymous3_P13s__anonymous313s__anonymous3_autogen___1(struct __anonymous3 *___dst__P13s__anonymous3_1, struct __anonymous3 ___src__13s__anonymous3_1){ 110 struct __anonymous3 ___ret__13s__anonymous3_1; 104 111 ((void)((*___dst__P13s__anonymous3_1).__i__i_1=___src__13s__anonymous3_1.__i__i_1)); 105 return ((struct __anonymous3 )___src__13s__anonymous3_1); 112 ((void)___constructor__F_P13s__anonymous313s__anonymous3_autogen___1((&___ret__13s__anonymous3_1), ___src__13s__anonymous3_1)); 113 return ((struct __anonymous3 )___ret__13s__anonymous3_1); 106 114 } 107 115 static inline void ___constructor__F_P13s__anonymous3i_autogen___1(struct __anonymous3 *___dst__P13s__anonymous3_1, int __i__i_1){ … … 126 134 } 127 135 static inline struct __anonymous4 ___operator_assign__F13s__anonymous4_P13s__anonymous413s__anonymous4_autogen___1(struct __anonymous4 *___dst__P13s__anonymous4_1, struct __anonymous4 ___src__13s__anonymous4_1){ 136 struct __anonymous4 ___ret__13s__anonymous4_1; 128 137 ((void)((*___dst__P13s__anonymous4_1).__i__i_1=___src__13s__anonymous4_1.__i__i_1)); 129 return ((struct __anonymous4 )___src__13s__anonymous4_1); 138 ((void)___constructor__F_P13s__anonymous413s__anonymous4_autogen___1((&___ret__13s__anonymous4_1), ___src__13s__anonymous4_1)); 139 return ((struct __anonymous4 )___ret__13s__anonymous4_1); 130 140 } 131 141 static inline void ___constructor__F_P13s__anonymous4i_autogen___1(struct __anonymous4 *___dst__P13s__anonymous4_1, int __i__i_1){ … … 150 160 } 151 161 static inline struct __anonymous5 ___operator_assign__F13s__anonymous5_P13s__anonymous513s__anonymous5_autogen___1(struct __anonymous5 *___dst__P13s__anonymous5_1, struct __anonymous5 ___src__13s__anonymous5_1){ 162 struct __anonymous5 ___ret__13s__anonymous5_1; 152 163 ((void)((*___dst__P13s__anonymous5_1).__i__i_1=___src__13s__anonymous5_1.__i__i_1)); 153 return ((struct __anonymous5 )___src__13s__anonymous5_1); 164 ((void)___constructor__F_P13s__anonymous513s__anonymous5_autogen___1((&___ret__13s__anonymous5_1), ___src__13s__anonymous5_1)); 165 return ((struct __anonymous5 )___ret__13s__anonymous5_1); 154 166 } 155 167 static inline void ___constructor__F_P13s__anonymous5i_autogen___1(struct __anonymous5 *___dst__P13s__anonymous5_1, int __i__i_1){ … … 174 186 } 175 187 static inline struct __anonymous6 ___operator_assign__F13s__anonymous6_P13s__anonymous613s__anonymous6_autogen___1(struct __anonymous6 *___dst__P13s__anonymous6_1, struct __anonymous6 ___src__13s__anonymous6_1){ 188 struct __anonymous6 ___ret__13s__anonymous6_1; 176 189 ((void)((*___dst__P13s__anonymous6_1).__i__i_1=___src__13s__anonymous6_1.__i__i_1)); 177 return ((struct __anonymous6 )___src__13s__anonymous6_1); 190 ((void)___constructor__F_P13s__anonymous613s__anonymous6_autogen___1((&___ret__13s__anonymous6_1), ___src__13s__anonymous6_1)); 191 return ((struct __anonymous6 )___ret__13s__anonymous6_1); 178 192 } 179 193 static inline void ___constructor__F_P13s__anonymous6i_autogen___1(struct __anonymous6 *___dst__P13s__anonymous6_1, int __i__i_1){ … … 198 212 } 199 213 static inline struct __anonymous7 ___operator_assign__F13s__anonymous7_P13s__anonymous713s__anonymous7_autogen___1(struct __anonymous7 *___dst__P13s__anonymous7_1, struct __anonymous7 ___src__13s__anonymous7_1){ 214 struct __anonymous7 ___ret__13s__anonymous7_1; 200 215 ((void)((*___dst__P13s__anonymous7_1).__i__i_1=___src__13s__anonymous7_1.__i__i_1)); 201 return ((struct __anonymous7 )___src__13s__anonymous7_1); 216 ((void)___constructor__F_P13s__anonymous713s__anonymous7_autogen___1((&___ret__13s__anonymous7_1), ___src__13s__anonymous7_1)); 217 return ((struct __anonymous7 )___ret__13s__anonymous7_1); 202 218 } 203 219 static inline void ___constructor__F_P13s__anonymous7i_autogen___1(struct __anonymous7 *___dst__P13s__anonymous7_1, int __i__i_1){ … … 230 246 } 231 247 static inline struct __anonymous8 ___operator_assign__F13s__anonymous8_P13s__anonymous813s__anonymous8_autogen___1(struct __anonymous8 *___dst__P13s__anonymous8_1, struct __anonymous8 ___src__13s__anonymous8_1){ 248 struct __anonymous8 ___ret__13s__anonymous8_1; 232 249 ((void)((*___dst__P13s__anonymous8_1).__i__s_1=___src__13s__anonymous8_1.__i__s_1)); 233 return ((struct __anonymous8 )___src__13s__anonymous8_1); 250 ((void)___constructor__F_P13s__anonymous813s__anonymous8_autogen___1((&___ret__13s__anonymous8_1), ___src__13s__anonymous8_1)); 251 return ((struct __anonymous8 )___ret__13s__anonymous8_1); 234 252 } 235 253 static inline void ___constructor__F_P13s__anonymous8s_autogen___1(struct __anonymous8 *___dst__P13s__anonymous8_1, short __i__s_1){ … … 254 272 } 255 273 static inline struct __anonymous9 ___operator_assign__F13s__anonymous9_P13s__anonymous913s__anonymous9_autogen___1(struct __anonymous9 *___dst__P13s__anonymous9_1, struct __anonymous9 ___src__13s__anonymous9_1){ 274 struct __anonymous9 ___ret__13s__anonymous9_1; 256 275 ((void)((*___dst__P13s__anonymous9_1).__i__s_1=___src__13s__anonymous9_1.__i__s_1)); 257 return ((struct __anonymous9 )___src__13s__anonymous9_1); 276 ((void)___constructor__F_P13s__anonymous913s__anonymous9_autogen___1((&___ret__13s__anonymous9_1), ___src__13s__anonymous9_1)); 277 return ((struct __anonymous9 )___ret__13s__anonymous9_1); 258 278 } 259 279 static inline void ___constructor__F_P13s__anonymous9s_autogen___1(struct __anonymous9 *___dst__P13s__anonymous9_1, short __i__s_1){ … … 278 298 } 279 299 static inline struct __anonymous10 ___operator_assign__F14s__anonymous10_P14s__anonymous1014s__anonymous10_autogen___1(struct __anonymous10 *___dst__P14s__anonymous10_1, struct __anonymous10 ___src__14s__anonymous10_1){ 300 struct __anonymous10 ___ret__14s__anonymous10_1; 280 301 ((void)((*___dst__P14s__anonymous10_1).__i__s_1=___src__14s__anonymous10_1.__i__s_1)); 281 return ((struct __anonymous10 )___src__14s__anonymous10_1); 302 ((void)___constructor__F_P14s__anonymous1014s__anonymous10_autogen___1((&___ret__14s__anonymous10_1), ___src__14s__anonymous10_1)); 303 return ((struct __anonymous10 )___ret__14s__anonymous10_1); 282 304 } 283 305 static inline void ___constructor__F_P14s__anonymous10s_autogen___1(struct __anonymous10 *___dst__P14s__anonymous10_1, short __i__s_1){ … … 302 324 } 303 325 static inline struct __anonymous11 ___operator_assign__F14s__anonymous11_P14s__anonymous1114s__anonymous11_autogen___1(struct __anonymous11 *___dst__P14s__anonymous11_1, struct __anonymous11 ___src__14s__anonymous11_1){ 326 struct __anonymous11 ___ret__14s__anonymous11_1; 304 327 ((void)((*___dst__P14s__anonymous11_1).__i__s_1=___src__14s__anonymous11_1.__i__s_1)); 305 return ((struct __anonymous11 )___src__14s__anonymous11_1); 328 ((void)___constructor__F_P14s__anonymous1114s__anonymous11_autogen___1((&___ret__14s__anonymous11_1), ___src__14s__anonymous11_1)); 329 return ((struct __anonymous11 )___ret__14s__anonymous11_1); 306 330 } 307 331 static inline void ___constructor__F_P14s__anonymous11s_autogen___1(struct __anonymous11 *___dst__P14s__anonymous11_1, short __i__s_1){ … … 326 350 } 327 351 static inline struct __anonymous12 ___operator_assign__F14s__anonymous12_P14s__anonymous1214s__anonymous12_autogen___1(struct __anonymous12 *___dst__P14s__anonymous12_1, struct __anonymous12 ___src__14s__anonymous12_1){ 352 struct __anonymous12 ___ret__14s__anonymous12_1; 328 353 ((void)((*___dst__P14s__anonymous12_1).__i__s_1=___src__14s__anonymous12_1.__i__s_1)); 329 return ((struct __anonymous12 )___src__14s__anonymous12_1); 354 ((void)___constructor__F_P14s__anonymous1214s__anonymous12_autogen___1((&___ret__14s__anonymous12_1), ___src__14s__anonymous12_1)); 355 return ((struct __anonymous12 )___ret__14s__anonymous12_1); 330 356 } 331 357 static inline void ___constructor__F_P14s__anonymous12s_autogen___1(struct __anonymous12 *___dst__P14s__anonymous12_1, short __i__s_1){ … … 350 376 } 351 377 static inline struct __anonymous13 ___operator_assign__F14s__anonymous13_P14s__anonymous1314s__anonymous13_autogen___1(struct __anonymous13 *___dst__P14s__anonymous13_1, struct __anonymous13 ___src__14s__anonymous13_1){ 378 struct __anonymous13 ___ret__14s__anonymous13_1; 352 379 ((void)((*___dst__P14s__anonymous13_1).__i__s_1=___src__14s__anonymous13_1.__i__s_1)); 353 return ((struct __anonymous13 )___src__14s__anonymous13_1); 380 ((void)___constructor__F_P14s__anonymous1314s__anonymous13_autogen___1((&___ret__14s__anonymous13_1), ___src__14s__anonymous13_1)); 381 return ((struct __anonymous13 )___ret__14s__anonymous13_1); 354 382 } 355 383 static inline void ___constructor__F_P14s__anonymous13s_autogen___1(struct __anonymous13 *___dst__P14s__anonymous13_1, short __i__s_1){ … … 374 402 } 375 403 static inline struct __anonymous14 ___operator_assign__F14s__anonymous14_P14s__anonymous1414s__anonymous14_autogen___1(struct __anonymous14 *___dst__P14s__anonymous14_1, struct __anonymous14 ___src__14s__anonymous14_1){ 404 struct __anonymous14 ___ret__14s__anonymous14_1; 376 405 ((void)((*___dst__P14s__anonymous14_1).__i__s_1=___src__14s__anonymous14_1.__i__s_1)); 377 return ((struct __anonymous14 )___src__14s__anonymous14_1); 406 ((void)___constructor__F_P14s__anonymous1414s__anonymous14_autogen___1((&___ret__14s__anonymous14_1), ___src__14s__anonymous14_1)); 407 return ((struct __anonymous14 )___ret__14s__anonymous14_1); 378 408 } 379 409 static inline void ___constructor__F_P14s__anonymous14s_autogen___1(struct __anonymous14 *___dst__P14s__anonymous14_1, short __i__s_1){ … … 398 428 } 399 429 static inline struct __anonymous15 ___operator_assign__F14s__anonymous15_P14s__anonymous1514s__anonymous15_autogen___1(struct __anonymous15 *___dst__P14s__anonymous15_1, struct __anonymous15 ___src__14s__anonymous15_1){ 430 struct __anonymous15 ___ret__14s__anonymous15_1; 400 431 ((void)((*___dst__P14s__anonymous15_1).__i__s_1=___src__14s__anonymous15_1.__i__s_1)); 401 return ((struct __anonymous15 )___src__14s__anonymous15_1); 432 ((void)___constructor__F_P14s__anonymous1514s__anonymous15_autogen___1((&___ret__14s__anonymous15_1), ___src__14s__anonymous15_1)); 433 return ((struct __anonymous15 )___ret__14s__anonymous15_1); 402 434 } 403 435 static inline void ___constructor__F_P14s__anonymous15s_autogen___1(struct __anonymous15 *___dst__P14s__anonymous15_1, short __i__s_1){ … … 438 470 } 439 471 static inline struct __anonymous16 ___operator_assign__F14s__anonymous16_P14s__anonymous1614s__anonymous16_autogen___1(struct __anonymous16 *___dst__P14s__anonymous16_1, struct __anonymous16 ___src__14s__anonymous16_1){ 472 struct __anonymous16 ___ret__14s__anonymous16_1; 440 473 ((void)((*___dst__P14s__anonymous16_1).__i__i_1=___src__14s__anonymous16_1.__i__i_1)); 441 return ((struct __anonymous16 )___src__14s__anonymous16_1); 474 ((void)___constructor__F_P14s__anonymous1614s__anonymous16_autogen___1((&___ret__14s__anonymous16_1), ___src__14s__anonymous16_1)); 475 return ((struct __anonymous16 )___ret__14s__anonymous16_1); 442 476 } 443 477 static inline void ___constructor__F_P14s__anonymous16i_autogen___1(struct __anonymous16 *___dst__P14s__anonymous16_1, int __i__i_1){ … … 462 496 } 463 497 static inline struct __anonymous17 ___operator_assign__F14s__anonymous17_P14s__anonymous1714s__anonymous17_autogen___1(struct __anonymous17 *___dst__P14s__anonymous17_1, struct __anonymous17 ___src__14s__anonymous17_1){ 498 struct __anonymous17 ___ret__14s__anonymous17_1; 464 499 ((void)((*___dst__P14s__anonymous17_1).__i__i_1=___src__14s__anonymous17_1.__i__i_1)); 465 return ((struct __anonymous17 )___src__14s__anonymous17_1); 500 ((void)___constructor__F_P14s__anonymous1714s__anonymous17_autogen___1((&___ret__14s__anonymous17_1), ___src__14s__anonymous17_1)); 501 return ((struct __anonymous17 )___ret__14s__anonymous17_1); 466 502 } 467 503 static inline void ___constructor__F_P14s__anonymous17i_autogen___1(struct __anonymous17 *___dst__P14s__anonymous17_1, int __i__i_1){ … … 486 522 } 487 523 static inline struct __anonymous18 ___operator_assign__F14s__anonymous18_P14s__anonymous1814s__anonymous18_autogen___1(struct __anonymous18 *___dst__P14s__anonymous18_1, struct __anonymous18 ___src__14s__anonymous18_1){ 524 struct __anonymous18 ___ret__14s__anonymous18_1; 488 525 ((void)((*___dst__P14s__anonymous18_1).__i__i_1=___src__14s__anonymous18_1.__i__i_1)); 489 return ((struct __anonymous18 )___src__14s__anonymous18_1); 526 ((void)___constructor__F_P14s__anonymous1814s__anonymous18_autogen___1((&___ret__14s__anonymous18_1), ___src__14s__anonymous18_1)); 527 return ((struct __anonymous18 )___ret__14s__anonymous18_1); 490 528 } 491 529 static inline void ___constructor__F_P14s__anonymous18i_autogen___1(struct __anonymous18 *___dst__P14s__anonymous18_1, int __i__i_1){ … … 510 548 } 511 549 static inline struct __anonymous19 ___operator_assign__F14s__anonymous19_P14s__anonymous1914s__anonymous19_autogen___1(struct __anonymous19 *___dst__P14s__anonymous19_1, struct __anonymous19 ___src__14s__anonymous19_1){ 550 struct __anonymous19 ___ret__14s__anonymous19_1; 512 551 ((void)((*___dst__P14s__anonymous19_1).__i__i_1=___src__14s__anonymous19_1.__i__i_1)); 513 return ((struct __anonymous19 )___src__14s__anonymous19_1); 552 ((void)___constructor__F_P14s__anonymous1914s__anonymous19_autogen___1((&___ret__14s__anonymous19_1), ___src__14s__anonymous19_1)); 553 return ((struct __anonymous19 )___ret__14s__anonymous19_1); 514 554 } 515 555 static inline void ___constructor__F_P14s__anonymous19i_autogen___1(struct __anonymous19 *___dst__P14s__anonymous19_1, int __i__i_1){ … … 534 574 } 535 575 static inline struct __anonymous20 ___operator_assign__F14s__anonymous20_P14s__anonymous2014s__anonymous20_autogen___1(struct __anonymous20 *___dst__P14s__anonymous20_1, struct __anonymous20 ___src__14s__anonymous20_1){ 576 struct __anonymous20 ___ret__14s__anonymous20_1; 536 577 ((void)((*___dst__P14s__anonymous20_1).__i__i_1=___src__14s__anonymous20_1.__i__i_1)); 537 return ((struct __anonymous20 )___src__14s__anonymous20_1); 578 ((void)___constructor__F_P14s__anonymous2014s__anonymous20_autogen___1((&___ret__14s__anonymous20_1), ___src__14s__anonymous20_1)); 579 return ((struct __anonymous20 )___ret__14s__anonymous20_1); 538 580 } 539 581 static inline void ___constructor__F_P14s__anonymous20i_autogen___1(struct __anonymous20 *___dst__P14s__anonymous20_1, int __i__i_1){ … … 558 600 } 559 601 static inline struct __anonymous21 ___operator_assign__F14s__anonymous21_P14s__anonymous2114s__anonymous21_autogen___1(struct __anonymous21 *___dst__P14s__anonymous21_1, struct __anonymous21 ___src__14s__anonymous21_1){ 602 struct __anonymous21 ___ret__14s__anonymous21_1; 560 603 ((void)((*___dst__P14s__anonymous21_1).__i__i_1=___src__14s__anonymous21_1.__i__i_1)); 561 return ((struct __anonymous21 )___src__14s__anonymous21_1); 604 ((void)___constructor__F_P14s__anonymous2114s__anonymous21_autogen___1((&___ret__14s__anonymous21_1), ___src__14s__anonymous21_1)); 605 return ((struct __anonymous21 )___ret__14s__anonymous21_1); 562 606 } 563 607 static inline void ___constructor__F_P14s__anonymous21i_autogen___1(struct __anonymous21 *___dst__P14s__anonymous21_1, int __i__i_1){ … … 582 626 } 583 627 static inline struct __anonymous22 ___operator_assign__F14s__anonymous22_P14s__anonymous2214s__anonymous22_autogen___1(struct __anonymous22 *___dst__P14s__anonymous22_1, struct __anonymous22 ___src__14s__anonymous22_1){ 628 struct __anonymous22 ___ret__14s__anonymous22_1; 584 629 ((void)((*___dst__P14s__anonymous22_1).__i__i_1=___src__14s__anonymous22_1.__i__i_1)); 585 return ((struct __anonymous22 )___src__14s__anonymous22_1); 630 ((void)___constructor__F_P14s__anonymous2214s__anonymous22_autogen___1((&___ret__14s__anonymous22_1), ___src__14s__anonymous22_1)); 631 return ((struct __anonymous22 )___ret__14s__anonymous22_1); 586 632 } 587 633 static inline void ___constructor__F_P14s__anonymous22i_autogen___1(struct __anonymous22 *___dst__P14s__anonymous22_1, int __i__i_1){ … … 606 652 } 607 653 static inline struct __anonymous23 ___operator_assign__F14s__anonymous23_P14s__anonymous2314s__anonymous23_autogen___1(struct __anonymous23 *___dst__P14s__anonymous23_1, struct __anonymous23 ___src__14s__anonymous23_1){ 654 struct __anonymous23 ___ret__14s__anonymous23_1; 608 655 ((void)((*___dst__P14s__anonymous23_1).__i__i_1=___src__14s__anonymous23_1.__i__i_1)); 609 return ((struct __anonymous23 )___src__14s__anonymous23_1); 656 ((void)___constructor__F_P14s__anonymous2314s__anonymous23_autogen___1((&___ret__14s__anonymous23_1), ___src__14s__anonymous23_1)); 657 return ((struct __anonymous23 )___ret__14s__anonymous23_1); 610 658 } 611 659 static inline void ___constructor__F_P14s__anonymous23i_autogen___1(struct __anonymous23 *___dst__P14s__anonymous23_1, int __i__i_1){ -
src/tests/.expect/64/extension.txt
r6ac2ada r1ed33fed 33 33 } 34 34 static inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___1(struct S *___dst__P2sS_1, struct S ___src__2sS_1){ 35 struct S ___ret__2sS_1; 35 36 ((void)((*___dst__P2sS_1).__a__i_1=___src__2sS_1.__a__i_1)); 36 37 ((void)((*___dst__P2sS_1).__b__i_1=___src__2sS_1.__b__i_1)); 37 38 ((void)((*___dst__P2sS_1).__c__i_1=___src__2sS_1.__c__i_1)); 38 return ((struct S )___src__2sS_1); 39 ((void)___constructor__F_P2sS2sS_autogen___1((&___ret__2sS_1), ___src__2sS_1)); 40 return ((struct S )___ret__2sS_1); 39 41 } 40 42 static inline void ___constructor__F_P2sSi_autogen___1(struct S *___dst__P2sS_1, int __a__i_1){ … … 66 68 } 67 69 static inline union U ___operator_assign__F2uU_P2uU2uU_autogen___1(union U *___dst__P2uU_1, union U ___src__2uU_1){ 70 union U ___ret__2uU_1; 68 71 ((void)__builtin_memcpy(((void *)___dst__P2uU_1), ((const void *)(&___src__2uU_1)), sizeof(union U ))); 69 return ((union U )___src__2uU_1); 72 ((void)___constructor__F_P2uU2uU_autogen___1((&___ret__2uU_1), ___src__2uU_1)); 73 return ((union U )___ret__2uU_1); 70 74 } 71 75 static inline void ___constructor__F_P2uUi_autogen___1(union U *___dst__P2uU_1, int __src__i_1){ -
src/tests/.expect/64/gccExtensions.txt
r6ac2ada r1ed33fed 59 59 } 60 60 inline struct S ___operator_assign__F2sS_P2sS2sS_autogen___2(struct S *___dst__P2sS_2, struct S ___src__2sS_2){ 61 struct S ___ret__2sS_2; 61 62 ((void)((*___dst__P2sS_2).__a__i_2=___src__2sS_2.__a__i_2)); 62 63 ((void)((*___dst__P2sS_2).__b__i_2=___src__2sS_2.__b__i_2)); 63 64 ((void)((*___dst__P2sS_2).__c__i_2=___src__2sS_2.__c__i_2)); 64 return ((struct S )___src__2sS_2); 65 ((void)___constructor__F_P2sS2sS_autogen___2((&___ret__2sS_2), ___src__2sS_2)); 66 return ((struct S )___ret__2sS_2); 65 67 } 66 68 inline void ___constructor__F_P2sSi_autogen___2(struct S *___dst__P2sS_2, int __a__i_2){ … … 109 111 } 110 112 inline struct s2 ___operator_assign__F3ss2_P3ss23ss2_autogen___2(struct s2 *___dst__P3ss2_2, struct s2 ___src__3ss2_2){ 113 struct s2 ___ret__3ss2_2; 111 114 ((void)((*___dst__P3ss2_2).__i__i_2=___src__3ss2_2.__i__i_2)); 112 return ((struct s2 )___src__3ss2_2); 115 ((void)___constructor__F_P3ss23ss2_autogen___2((&___ret__3ss2_2), ___src__3ss2_2)); 116 return ((struct s2 )___ret__3ss2_2); 113 117 } 114 118 inline void ___constructor__F_P3ss2i_autogen___2(struct s2 *___dst__P3ss2_2, int __i__i_2){ … … 128 132 } 129 133 inline struct s3 ___operator_assign__F3ss3_P3ss33ss3_autogen___2(struct s3 *___dst__P3ss3_2, struct s3 ___src__3ss3_2){ 134 struct s3 ___ret__3ss3_2; 130 135 ((void)((*___dst__P3ss3_2).__i__i_2=___src__3ss3_2.__i__i_2)); 131 return ((struct s3 )___src__3ss3_2); 136 ((void)___constructor__F_P3ss33ss3_autogen___2((&___ret__3ss3_2), ___src__3ss3_2)); 137 return ((struct s3 )___ret__3ss3_2); 132 138 } 133 139 inline void ___constructor__F_P3ss3i_autogen___2(struct s3 *___dst__P3ss3_2, int __i__i_2){ … … 149 155 } 150 156 inline struct s4 ___operator_assign__F3ss4_P3ss43ss4_autogen___2(struct s4 *___dst__P3ss4_2, struct s4 ___src__3ss4_2){ 157 struct s4 ___ret__3ss4_2; 151 158 ((void)((*___dst__P3ss4_2).__i__i_2=___src__3ss4_2.__i__i_2)); 152 return ((struct s4 )___src__3ss4_2); 159 ((void)___constructor__F_P3ss43ss4_autogen___2((&___ret__3ss4_2), ___src__3ss4_2)); 160 return ((struct s4 )___ret__3ss4_2); 153 161 } 154 162 inline void ___constructor__F_P3ss4i_autogen___2(struct s4 *___dst__P3ss4_2, int __i__i_2){ -
src/tests/.expect/memberCtors-ERR1.txt
r6ac2ada r1ed33fed 1 memberCtors.c: 62error: in void ?{}(B *b), field a2 used before being constructed1 memberCtors.c:71 error: in void ?{}(B *b), field a2 used before being constructed -
src/tests/.expect/memberCtors.txt
r6ac2ada r1ed33fed 16 16 assigning int: 0 0 17 17 end construct A 18 copy constructing int: 0 19 copy constructing int: 0 20 begin copy construct A 21 copy construct this->x 22 copy constructing int: 1001 23 assign this->y 24 copy constructing int: 0 25 destructing int: 0 26 destructing int: 0 27 end copy construct A 28 begin ?=? A 29 copy constructing int: 1001 30 destructing int: 1001 31 destructing int: 1001 32 copy constructing int: 0 33 destructing int: 0 34 destructing int: 0 35 copy constructing int: 0 36 destructing int: 0 37 destructing int: 0 38 end ?=? A 39 copy constructing int: 0 40 copy constructing int: 0 41 begin copy construct A 42 copy construct this->x 43 copy constructing int: 1001 44 assign this->y 45 copy constructing int: 0 46 destructing int: 0 47 destructing int: 0 48 end copy construct A 49 destructing int: 0 50 destructing int: 0 51 destructing int: 1001 52 destructing int: 0 53 destructing int: 0 54 destructing int: 1001 18 55 construct b->a1 19 56 constructing int … … 36 73 copy constructing int: 1000 37 74 assign this->y 38 end copy construct A 39 copy constructing int: 0 40 copy constructing int: 0 41 begin copy construct A 42 copy construct this->x 43 copy constructing int: 1001 44 assign this->y 45 end copy construct A 46 copy constructing int: 0 47 copy constructing int: 0 48 begin copy construct A 49 copy construct this->x 50 copy constructing int: 0 51 assign this->y 75 copy constructing int: 0 76 destructing int: 0 77 destructing int: 0 78 end copy construct A 79 copy constructing int: 0 80 copy constructing int: 0 81 begin copy construct A 82 copy construct this->x 83 copy constructing int: 1001 84 assign this->y 85 copy constructing int: 0 86 destructing int: 0 87 destructing int: 0 88 end copy construct A 89 copy constructing int: 0 90 copy constructing int: 0 91 begin copy construct A 92 copy construct this->x 93 copy constructing int: 0 94 assign this->y 95 copy constructing int: 0 96 destructing int: 0 97 destructing int: 0 52 98 end copy construct A 53 99 End of main … … 60 106 assigning int: 0 0 61 107 end construct A 108 copy constructing int: 0 109 copy constructing int: 0 110 begin copy construct A 111 copy construct this->x 112 copy constructing int: 999 113 assign this->y 114 copy constructing int: 0 115 destructing int: 0 116 destructing int: 0 117 end copy construct A 118 begin ?=? A 119 copy constructing int: 999 120 destructing int: 999 121 destructing int: 999 122 copy constructing int: 0 123 destructing int: 0 124 destructing int: 0 125 copy constructing int: 0 126 destructing int: 0 127 destructing int: 0 128 end ?=? A 129 copy constructing int: 0 130 copy constructing int: 0 131 begin copy construct A 132 copy construct this->x 133 copy constructing int: 999 134 assign this->y 135 copy constructing int: 0 136 destructing int: 0 137 destructing int: 0 138 end copy construct A 139 destructing int: 0 140 destructing int: 0 141 destructing int: 999 142 destructing int: 0 143 destructing int: 0 144 destructing int: 999 62 145 destructing int: 0 63 146 destructing int: 0 … … 80 163 assigning int: 0 0 81 164 end construct A 165 copy constructing int: 0 166 copy constructing int: 0 167 begin copy construct A 168 copy construct this->x 169 copy constructing int: 999 170 assign this->y 171 copy constructing int: 0 172 destructing int: 0 173 destructing int: 0 174 end copy construct A 175 begin ?=? A 176 copy constructing int: 999 177 destructing int: 999 178 destructing int: 999 179 copy constructing int: 0 180 destructing int: 0 181 destructing int: 0 182 copy constructing int: 0 183 destructing int: 0 184 destructing int: 0 185 end ?=? A 186 copy constructing int: 0 187 copy constructing int: 0 188 begin copy construct A 189 copy construct this->x 190 copy constructing int: 999 191 assign this->y 192 copy constructing int: 0 193 destructing int: 0 194 destructing int: 0 195 end copy construct A 196 destructing int: 0 197 destructing int: 0 198 destructing int: 999 199 destructing int: 0 200 destructing int: 0 201 destructing int: 999 82 202 destructing int: 0 83 203 destructing int: 0 -
src/tests/Makefile.am
r6ac2ada r1ed33fed 17 17 debug=yes 18 18 19 quick_test=vector_test avl_test operators numericConstants expression enum array typeof cast dtor-early-exit init_once 19 quick_test=vector_test avl_test operators numericConstants expression enum array typeof cast dtor-early-exit init_once attributes 20 20 21 21 if BUILD_CONCURRENCY … … 30 30 # applies to both programs 31 31 EXTRA_FLAGS = 32 BUILD_FLAGS = -g -Wall -Wno-unused-function @CFA_FLAGS@ ${EXTRA_FLAGS}32 BUILD_FLAGS = -g -Wall -Wno-unused-function -quiet @CFA_FLAGS@ ${EXTRA_FLAGS} 33 33 TEST_FLAGS = $(if $(test), 2> .err/${@}.log, ) 34 34 CFLAGS = ${TEST_FLAGS} ${BUILD_FLAGS} … … 76 76 77 77 declarationSpecifier: declarationSpecifier.c 78 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}78 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 79 79 80 80 gccExtensions : gccExtensions.c 81 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}81 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 82 82 83 83 extension : extension.c 84 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}84 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 85 85 86 86 attributes : attributes.c 87 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}87 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 88 88 89 89 KRfunctions : KRfunctions.c 90 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}90 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 91 91 92 92 memberCtors-ERR1: memberCtors.c -
src/tests/Makefile.in
r6ac2ada r1ed33fed 226 226 quick_test = vector_test avl_test operators numericConstants \ 227 227 expression enum array typeof cast dtor-early-exit init_once \ 228 $(am__append_1)228 attributes $(am__append_1) 229 229 @BUILD_CONCURRENCY_FALSE@concurrent = no 230 230 @BUILD_CONCURRENCY_TRUE@concurrent = yes … … 234 234 # applies to both programs 235 235 EXTRA_FLAGS = 236 BUILD_FLAGS = -g -Wall -Wno-unused-function @CFA_FLAGS@ ${EXTRA_FLAGS}236 BUILD_FLAGS = -g -Wall -Wno-unused-function -quiet @CFA_FLAGS@ ${EXTRA_FLAGS} 237 237 TEST_FLAGS = $(if $(test), 2> .err/${@}.log, ) 238 238 fstream_test_SOURCES = fstream_test.c … … 695 695 696 696 declarationSpecifier: declarationSpecifier.c 697 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}697 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 698 698 699 699 gccExtensions : gccExtensions.c 700 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}700 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 701 701 702 702 extension : extension.c 703 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}703 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 704 704 705 705 attributes : attributes.c 706 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}706 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 707 707 708 708 KRfunctions : KRfunctions.c 709 ${CC} ${CFLAGS} -CFA -XCFA -p ${<} -o ${@}709 ${CC} ${CFLAGS} -CFA -XCFA -p -XCFA -L ${<} -o ${@} 710 710 711 711 memberCtors-ERR1: memberCtors.c -
src/tests/memberCtors.c
r6ac2ada r1ed33fed 53 53 } // z never constructed - will be automatically copy constructed 54 54 55 A ?=?(A * this, A other) { 56 printf("begin ?=? A\n"); 57 this->x = other.x; 58 this->y = other.y; 59 this->z = other.z; 60 printf("end ?=? A\n"); 61 return *this; 62 } 63 55 64 struct B { 56 65 A a1, a2, a3; -
src/tests/sched-int-wait.c
r6ac2ada r1ed33fed 4 4 #include <stdlib> 5 5 #include <thread> 6 7 static const int N = 10_000; 6 8 7 9 monitor global_t {}; … … 13 15 condition condAB, condAC, condBC, condABC; 14 16 15 thread Signaler { 16 int signals[4]; 17 }; 18 19 void ?{}( Signaler * this ){ 20 this->signals[0] = 0; 21 this->signals[1] = 0; 22 this->signals[2] = 0; 23 this->signals[3] = 0; 24 } 25 17 thread Signaler {}; 26 18 thread WaiterAB {}; 27 19 thread WaiterAC {}; … … 29 21 thread WaiterABC{}; 30 22 31 volatile bool done;23 volatile int waiter_left; 32 24 33 25 //---------------------------------------------------------------------------------------------------- … … 53 45 void main( Signaler* this ) { 54 46 55 while( true ) { 56 int action = (unsigned)rand48() % 4; 57 bool finished = true; 58 59 for(int i = 0; i < 4; i++) { 60 if( this->signals[action] < 10_000 ) { 61 finished = false; 62 break; 63 } 64 else { 65 action = (action + 1) % 4; 66 } 67 } 68 69 this->signals[action]++; 70 if( finished ) break; 71 72 //sout | action | this->signals[0] | this->signals[1] | this->signals[2] | this->signals[3] | endl; 73 47 while( waiter_left != 0 ) { 48 unsigned action = (unsigned)rand48() % 4; 74 49 switch( action ) { 75 50 case 0: … … 89 64 abort(); 90 65 } 66 yield(); 91 67 } 92 68 } … … 95 71 // Waiter ABC 96 72 void main( WaiterABC* this ) { 97 while( !done) {73 for( int i = 0; i < N; i++ ) { 98 74 wait( &condABC, &globalA, &globalB, &globalC ); 99 75 } 76 77 __sync_fetch_and_sub_4( &waiter_left, 1); 100 78 } 101 79 … … 103 81 // Waiter AB 104 82 void main( WaiterAB* this ) { 105 while( !done) {83 for( int i = 0; i < N; i++ ) { 106 84 wait( &condAB , &globalA, &globalB ); 107 85 } 86 87 __sync_fetch_and_sub_4( &waiter_left, 1); 108 88 } 109 89 … … 111 91 // Waiter AC 112 92 void main( WaiterAC* this ) { 113 while( !done) {93 for( int i = 0; i < N; i++ ) { 114 94 wait( &condAC , &globalA, &globalC ); 115 95 } 96 97 __sync_fetch_and_sub_4( &waiter_left, 1); 116 98 } 117 99 … … 119 101 // Waiter BC 120 102 void main( WaiterBC* this ) { 121 while( !done) {103 for( int i = 0; i < N; i++ ) { 122 104 wait( &condBC , &globalB, &globalC ); 123 105 } 106 107 __sync_fetch_and_sub_4( &waiter_left, 1); 124 108 } 125 109 … … 127 111 // Main 128 112 int main(int argc, char* argv[]) { 129 done = false;113 waiter_left = 4; 130 114 processor p; 131 115 { 132 WaiterABC a; 133 WaiterAB b; 134 WaiterBC c; 135 WaiterAC d; 116 Signaler e; 136 117 { 137 Signaler e; 118 WaiterABC a; 119 WaiterAB b; 120 WaiterBC c; 121 WaiterAC d; 138 122 } 139 done = true;140 signal( &condABC, &globalA, &globalB, &globalC );141 signal( &condAB , &globalA, &globalB );142 signal( &condBC , &globalB, &globalC );143 signal( &condAC , &globalA, &globalC );144 123 } 145 124 } -
src/tests/test.py
r6ac2ada r1ed33fed 28 28 sh('echo "void ?{}(int*a,int b){}int main(){return 0;}" > .dummy.c') 29 29 ret, out = sh("make .dummy -s", print2stdout=True) 30 30 31 31 if ret != 0: 32 32 print("Failed to identify architecture:") … … 161 161 162 162 # build, skipping to next test on error 163 make_ret, _ = sh("""%s test=yes EXTRA_FLAGS=" -quiet%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run)163 make_ret, _ = sh("""%s test=yes EXTRA_FLAGS="%s" %s 2> %s 1> /dev/null""" % (make_cmd, options, test.name, out_file), dry_run) 164 164 165 165 # if the make command succeds continue otherwise skip to diff … … 192 192 # fetch return code and error from the diff command 193 193 retcode, error = diff(".expect/%s.txt" % test.path, ".out/%s.log" % test.name, dry_run) 194 194 195 195 # clean the executable 196 196 sh("rm -f %s > /dev/null 2>&1" % test.name, dry_run) … … 269 269 if __name__ == "__main__": 270 270 #always run from same folder 271 chdir() 272 271 chdir() 272 273 273 # parse the command line arguments 274 274 options = getOptions()
Note:
See TracChangeset
for help on using the changeset viewer.