Changeset 695e00d
- Timestamp:
- Sep 19, 2017, 2:14:39 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 8f98b78
- Parents:
- e149f77 (diff), e06be49 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 5 added
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/proposals/concurrency/Makefile
re149f77 r695e00d 22 22 monitor \ 23 23 ext_monitor \ 24 int_monitor \ 24 25 }} 25 26 -
doc/proposals/concurrency/annex/local.bib
re149f77 r695e00d 39 39 title = {Intel Thread Building Blocks}, 40 40 } 41 42 @manual{www-cfa, 43 keywords = {Cforall}, 44 title = {Cforall Programmming Language}, 45 address = {https://plg.uwaterloo.ca/~cforall/} 46 } 47 48 @article{rob-thesis, 49 keywords = {Constructors, Destructors, Tuples}, 50 author = {Rob Schluntz}, 51 title = {Resource Management and Tuples in Cforall}, 52 year = 2017 53 } -
doc/proposals/concurrency/style/cfa-format.tex
re149f77 r695e00d 166 166 xleftmargin=\parindentlnth, % indent code to paragraph indentation 167 167 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 168 morekeywords=[2]{accept, signal, signal_block, wait },168 morekeywords=[2]{accept, signal, signal_block, wait, waitfor}, 169 169 } 170 170 -
doc/proposals/concurrency/text/basics.tex
re149f77 r695e00d 1 1 % ====================================================================== 2 2 % ====================================================================== 3 \chapter{Basics} 3 \chapter{Basics}\label{basics} 4 4 % ====================================================================== 5 5 % ====================================================================== … … 13 13 14 14 \section{Coroutines: A stepping stone}\label{coroutine} 15 While the main focus of this proposal is concurrency and parallelism, as mentionned above it is important to adress coroutines, which are actually a significant underlying aspect of a concurrency system. Indeed, while having nothing to do with parallelism and arguably little to do with concurrency, coroutines need to deal with context-switchs andand other context-management operations. Therefore, this proposal includes coroutines both as an intermediate step for the implementation of threads, and a first class feature of \CFA. Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. The core API of coroutines revolve around two features: independent call stacks and \code{suspend}/\code{resume}.15 While the main focus of this proposal is concurrency and parallelism, as mentionned above it is important to adress coroutines, which are actually a significant underlying aspect of a concurrency system. Indeed, while having nothing to do with parallelism and arguably little to do with concurrency, coroutines need to deal with context-switchs and other context-management operations. Therefore, this proposal includes coroutines both as an intermediate step for the implementation of threads, and a first class feature of \CFA. Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. The core API of coroutines revolve around two features: independent call stacks and \code{suspend}/\code{resume}. 16 16 17 17 Here is an example of a solution to the fibonnaci problem using \CFA coroutines: … … 21 21 }; 22 22 23 void ?{}(Fibonacci *this) { // constructor24 this ->fn = 0;23 void ?{}(Fibonacci & this) { // constructor 24 this.fn = 0; 25 25 } 26 26 27 27 // main automacically called on first resume 28 void main(Fibonacci *this) {28 void main(Fibonacci & this) { 29 29 int fn1, fn2; // retained between resumes 30 this ->fn = 0;31 fn1 = this ->fn;30 this.fn = 0; 31 fn1 = this.fn; 32 32 suspend(this); // return to last resume 33 33 34 this ->fn = 1;34 this.fn = 1; 35 35 fn2 = fn1; 36 fn1 = this ->fn;36 fn1 = this.fn; 37 37 suspend(this); // return to last resume 38 38 39 39 for ( ;; ) { 40 this ->fn = fn1 + fn2;40 this.fn = fn1 + fn2; 41 41 fn2 = fn1; 42 fn1 = this ->fn;42 fn1 = this.fn; 43 43 suspend(this); // return to last resume 44 44 } 45 45 } 46 46 47 int next(Fibonacci *this) {47 int next(Fibonacci & this) { 48 48 resume(this); // transfer to last suspend 49 49 return this.fn; … … 53 53 Fibonacci f1, f2; 54 54 for ( int i = 1; i <= 10; i += 1 ) { 55 sout | next( &f1) | next(&f2) | endl;55 sout | next( f1 ) | next( f2 ) | endl; 56 56 } 57 57 } … … 106 106 }; 107 107 108 void ?{}(Fibonacci *this) {109 this ->fn = 0;110 ( &this->c){};108 void ?{}(Fibonacci & this) { 109 this.fn = 0; 110 (this.c){}; 111 111 } 112 112 \end{cfacode} … … 126 126 \subsection{Alternative: Lamda Objects} 127 127 128 For coroutines as for threads, many implementations are based on routine pointers or function objects\cit. For example, Boost implements coroutines in terms of four functor object types: 128 For coroutines as for threads, many implementations are based on routine pointers or function objects\cit. For example, Boost implements coroutines in terms of four functor object types: 129 129 \begin{cfacode} 130 130 asymmetric_coroutine<>::pull_type … … 132 132 symmetric_coroutine<>::call_type 133 133 symmetric_coroutine<>::yield_type 134 \end{cfacode} 135 Often, the canonical threading paradigm in languages is based on function pointers, pthread being one of the most well known examples. The main problem of this approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda based coroutines adds very little. 136 137 A variation of this would be to use an simple function pointer in the same way pthread does for threads : 134 \end{cfacode} 135 Often, the canonical threading paradigm in languages is based on function pointers, pthread being one of the most well known examples. The main problem of this approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda based coroutines adds very little. 136 137 A variation of this would be to use an simple function pointer in the same way pthread does for threads : 138 138 \begin{cfacode} 139 139 void foo( coroutine_t cid, void * arg ) { … … 152 152 \subsection{Alternative: Trait-based coroutines} 153 153 154 Finally the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. This approach defines a coroutine as anything that satisfies the trait \code{is_coroutine} and is used as a coroutine is a coroutine.154 Finally the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. This approach defines a coroutine as anything that satisfies the trait \code{is_coroutine} and is used as a coroutine. 155 155 156 156 \begin{cfacode} 157 157 trait is_coroutine(dtype T) { 158 void main(T *this);159 coroutine_desc * get_coroutine(T *this);160 }; 161 \end{cfacode} 162 This ensures an object is not a coroutine until \code{resume} (or \code{prime}) is called on the object. Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine} trait to compile. The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory foot print of a coroutine is trivial when implementing the \code{get_coroutine} routine. The \CFA keyword \code{coroutine} only has the effect of implementing the getter and forward declarations required for users to only have to implement the main routine. 158 void main(T & this); 159 coroutine_desc * get_coroutine(T & this); 160 }; 161 \end{cfacode} 162 This ensures an object is not a coroutine until \code{resume} (or \code{prime}) is called on the object. Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine} trait to compile. The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory foot print of a coroutine is trivial when implementing the \code{get_coroutine} routine. The \CFA keyword \code{coroutine} only has the effect of implementing the getter and forward declarations required for users to only have to implement the main routine. 163 163 164 164 \begin{center} … … 174 174 }; 175 175 176 static inline 177 coroutine_desc * get_coroutine( 178 struct MyCoroutine * this176 static inline 177 coroutine_desc * get_coroutine( 178 struct MyCoroutine & this 179 179 ) { 180 return &this ->__cor;180 return &this.__cor; 181 181 } 182 182 … … 186 186 \end{center} 187 187 188 188 The combination of these two approaches allows users new to concurrency to have a easy and concise method while more advanced users can expose themselves to otherwise hidden pitfalls at the benefit of tighter control on memory layout and initialization. 189 189 190 190 \section{Thread Interface}\label{threads} 191 The basic building blocks of multi-threading in \CFA are \glspl{cfathread}. Both use and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism. User threads offer a flexible and lightweight interface. A thread can be declared using a struct declaration \code{thread} as follows:191 The basic building blocks of multi-threading in \CFA are \glspl{cfathread}. Both user and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism. User threads offer a flexible and lightweight interface. A thread can be declared using a struct declaration \code{thread} as follows: 192 192 193 193 \begin{cfacode} … … 199 199 \begin{cfacode} 200 200 trait is_thread(dtype T) { 201 void ^?{}(T *mutex this);202 void main(T *this);203 thread_desc* get_thread(T *this);201 void ^?{}(T & mutex this); 202 void main(T & this); 203 thread_desc* get_thread(T & this); 204 204 }; 205 205 \end{cfacode} … … 209 209 thread foo {}; 210 210 211 void main(foo *this) {211 void main(foo & this) { 212 212 sout | "Hello World!" | endl; 213 213 } … … 223 223 224 224 //ctor 225 void ?{}(FuncRunner *this, voidFunc inFunc) {226 func = inFunc;225 void ?{}(FuncRunner & this, voidFunc inFunc) { 226 this.func = inFunc; 227 227 } 228 228 229 229 //main 230 void main(FuncRunner *this) {231 this ->func();230 void main(FuncRunner & this) { 231 this.func(); 232 232 } 233 233 \end{cfacode} … … 239 239 thread World; 240 240 241 void main( thread World*this) {241 void main(World & this) { 242 242 sout | "World!" | endl; 243 243 } … … 257 257 258 258 \begin{cfacode} 259 thread MyThread { 260 //... 261 }; 262 263 //main 264 void main(MyThread* this) { 265 //... 266 } 267 268 void foo() { 269 MyThread thrds[10]; 270 //Start 10 threads at the beginning of the scope 259 thread MyThread { 260 //... 261 }; 262 263 //main 264 void main(MyThread & this) { 265 //... 266 } 267 268 void foo() { 269 MyThread thrds[10]; 270 //Start 10 threads at the beginning of the scope 271 272 DoStuff(); 273 274 //Wait for the 10 threads to finish 275 } 276 \end{cfacode} 277 278 However, one of the apparent drawbacks of this system is that threads now always form a lattice, that is they are always destroyed in opposite order of construction because of block structure. However, storage allocation is not limited to blocks; dynamic allocation can create threads that outlive the scope in which the thread is created much like dynamically allocating memory lets objects outlive the scope in which they are created 279 280 \begin{cfacode} 281 thread MyThread { 282 //... 283 }; 284 285 //main 286 void main(MyThread & this) { 287 //... 288 } 289 290 void foo() { 291 MyThread * long_lived; 292 { 293 MyThread short_lived; 294 //Start a thread at the beginning of the scope 271 295 272 296 DoStuff(); 273 297 274 //Wait for the 10 threads to finish 275 } 276 \end{cfacode} 277 278 However, one of the apparent drawbacks of this system is that threads now always form a lattice, that is they are always destroyed in opposite order of construction because of block structure. However, storage allocation os not limited to blocks; dynamic allocation can create threads that outlive the scope in which the thread is created much like dynamically allocating memory lets objects outlive the scope in which they are created 279 280 \begin{cfacode} 281 thread MyThread { 282 //... 283 }; 284 285 //main 286 void main(MyThread* this) { 287 //... 288 } 289 290 void foo() { 291 MyThread* long_lived; 292 { 293 MyThread short_lived; 294 //Start a thread at the beginning of the scope 295 296 DoStuff(); 297 298 //create another thread that will outlive the thread in this scope 299 long_lived = new MyThread; 300 301 //Wait for the thread short_lived to finish 302 } 303 DoMoreStuff(); 304 305 //Now wait for the short_lived to finish 306 delete long_lived; 307 } 308 \end{cfacode} 298 //create another thread that will outlive the thread in this scope 299 long_lived = new MyThread; 300 301 //Wait for the thread short_lived to finish 302 } 303 DoMoreStuff(); 304 305 //Now wait for the short_lived to finish 306 delete long_lived; 307 } 308 \end{cfacode} -
doc/proposals/concurrency/text/cforall.tex
re149f77 r695e00d 4 4 % ====================================================================== 5 5 % ====================================================================== 6 7 As mentionned in the introduction, the document presents the design for the concurrency features in \CFA. Since it is a new language here is a quick review of the language specifically tailored to the features needed to support concurrency. 8 9 \CFA is a extension of ISO C and therefore supports much of the same paradigms as C. It is a non-object oriented system level language, meaning it has very most of the major abstractions have either no runtime cost or can be opt-out easily. Like C, the basics of \CFA revolve around structures and routines, which are thin abstractions over assembly. The vast majority of the code produced by a \CFA compiler respects memory-layouts and calling-conventions laid out by C. However, while \CFA is not an object-oriented language according to a strict definition. It does have some notion of objects, most importantly construction and destruction of objects. Most of the following pieces of code can be found as is on the \CFA website : \cite{www-cfa} 10 11 \section{References} 12 13 Like \CC, \CFA introduces references as an alternative to pointers. In regards to concurrency, the semantics difference between pointers and references aren't particularly relevant but since this document uses mostly references here is a quick overview of the semantics : 14 \begin{cfacode} 15 int x, *p1 = &x, **p2 = &p1, ***p3 = &p2, 16 &r1 = x, &&r2 = r1, &&&r3 = r2; 17 ***p3 = 3; // change x 18 r3 = 3; // change x, ***r3 19 **p3 = ...; // change p1 20 &r3 = ...; // change r1, (&*)**r3 21 *p3 = ...; // change p2 22 &&r3 = ...; // change r2, (&(&*)*)*r3 23 &&&r3 = p3; // change r3 to p3, (&(&(&*)*)*)r3 24 int y, z, & ar[3] = { x, y, z }; // initialize array of references 25 &ar[1] = &z; // change reference array element 26 typeof( ar[1] ) p; // is int, i.e., the type of referenced object 27 typeof( &ar[1] ) q; // is int &, i.e., the type of reference 28 sizeof( ar[1] ) == sizeof( int ); // is true, i.e., the size of referenced object 29 sizeof( &ar[1] ) == sizeof( int *); // is true, i.e., the size of a reference 30 \end{cfacode} 31 The important thing to take away from this code snippet is that references offer a handle to an object much like pointers but which is automatically derefferenced when convinient. 32 33 \section{Overloading} 34 35 Another important feature \CFA has in common with \CC is function overloading : 36 \begin{cfacode} 37 // selection based on type and number of parameters 38 void f( void ); // (1) 39 void f( char ); // (2) 40 void f( int, double ); // (3) 41 f(); // select (1) 42 f( 'a' ); // select (2) 43 f( 3, 5.2 ); // select (3) 44 45 // selection based on type and number of returns 46 char f( int ); // (1) 47 double f( int ); // (2) 48 [ int, double ] f( int ); // (3) 49 char c = f( 3 ); // select (1) 50 double d = f( 4 ); // select (2) 51 [ int, double ] t = f( 5 ); // select (3) 52 \end{cfacode} 53 This feature is particularly important for concurrency since the runtime system relies on creating different types do represent concurrency objects. Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions that prevent clashes. As seen in chapter \ref{basics}, the main is an example of routine that benefits from overloading when concurrency in introduced. 54 55 \section{Operators} 56 Overloading also extends to operators. The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation would be, like so : 57 \begin{cfacode} 58 int ++?( int op ); // unary prefix increment 59 int ?++( int op ); // unary postfix increment 60 int ?+?( int op1, int op2 ); // binary plus 61 int ?<=?( int op1, int op2 ); // binary less than 62 int ?=?( int & op1, int op2 ); // binary assignment 63 int ?+=?( int & op1, int op2 ); // binary plus-assignment 64 65 struct S { int i, j; }; 66 S ?+?( S op1, S op2 ) { // add two structures 67 return (S){ op1.i + op2.i, op1.j + op2.j }; 68 } 69 S s1 = { 1, 2 }, s2 = { 2, 3 }, s3; 70 s3 = s1 + s2; // compute sum: s3 == { 2, 5 } 71 \end{cfacode} 72 73 Since concurrency does not use operator overloading, this feature is more important as an introduction for the syntax of constructors. 74 75 \section{Constructors/Destructors} 76 \CFA uses the following syntax for constructors and destructors : 77 \begin{cfacode} 78 struct S { 79 size_t size; 80 int * ia; 81 }; 82 void ?{}( S & s, int asize ) with s { // constructor operator 83 size = asize; // initialize fields 84 ia = calloc( size, sizeof( S ) ); 85 } 86 void ^?{}( S & s ) with s { // destructor operator 87 free( ia ); // de-initialization fields 88 } 89 int main() { 90 S x = { 10 }, y = { 100 }; // implict calls: ?{}( x, 10 ), ?{}( y, 100 ) 91 ... // use x and y 92 ^x{}; ^y{}; // explicit calls to de-initialize 93 x{ 20 }; y{ 200 }; // explicit calls to reinitialize 94 ... // reuse x and y 95 } // implict calls: ^?{}( y ), ^?{}( x ) 96 \end{cfacode} 97 The language guarantees that every object and all their fields are constructed. Like \CC construction is automatically done on declaration and destruction done when the declared variables reach the end of its scope. 98 99 For more information see \cite{cforall-ug,rob-thesis,www-cfa}. -
doc/proposals/concurrency/text/concurrency.tex
re149f77 r695e00d 300 300 % ====================================================================== 301 301 % ====================================================================== 302 It easier to understand the problem of multi-monitor scheduling using a series of pseudo-code. Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors.302 It is easier to understand the problem of multi-monitor scheduling using a series of pseudo-code. Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors. 303 303 304 304 \begin{multicols}{2} … … 397 397 \end{center} 398 398 399 It is particularly important to pay attention to code sections 8 and 3, which are where the existing semantics of internal scheduling need to be extended for multiple monitors. The root of the problem is that \gls{group-acquire} is used in a context where one of the monitors is already acquired and is why it is important to define the behaviour of the previous pseudo-code. When the signaller thread reaches the location where it should "release A \& B" (line 16), it must actually transfer ownership of monitor B to the waiting thread. This ownership trasnfer is required in order to prevent barging. Since the signalling thread still needs the monitor A, simply waking up the waiting thread is not an option because it would violate mutual exclusion. There are three options:399 It is particularly important to pay attention to code sections 8 and 4, which are where the existing semantics of internal scheduling need to be extended for multiple monitors. The root of the problem is that \gls{group-acquire} is used in a context where one of the monitors is already acquired and is why it is important to define the behaviour of the previous pseudo-code. When the signaller thread reaches the location where it should "release A \& B" (line 16), it must actually transfer ownership of monitor B to the waiting thread. This ownership trasnfer is required in order to prevent barging. Since the signalling thread still needs the monitor A, simply waking up the waiting thread is not an option because it would violate mutual exclusion. There are three options: 400 400 401 401 \subsubsection{Delaying signals} … … 467 467 Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order. However, regardless of the answer, users can move line 15 before line 11 and get the reverse effect. 468 468 469 In both cases however, the threads need to be able to distinguish on a per monitor basis which ones need to be released and which ones need to be transferred. Which means monitors cannot be handled as a single homogenous group.469 In both cases, the threads need to be able to distinguish on a per monitor basis which ones need to be released and which ones need to be transferred. Which means monitors cannot be handled as a single homogenous group. 470 470 471 471 \subsubsection{Dependency graphs} … … 497 497 Resolving dependency graph being a complex and expensive endeavour, this solution is not the preffered one. 498 498 499 \subsubsection{Partial signalling} 499 \subsubsection{Partial signalling} \label{partial-sig} 500 500 Finally, the solution that is chosen for \CFA is to use partial signalling. Consider the following case: 501 501 … … 605 605 % ====================================================================== 606 606 % ====================================================================== 607 \subsection{Internal scheduling: Implementation} \label{insched-impl} 608 % ====================================================================== 609 % ====================================================================== 610 \TODO 611 607 \subsection{Internal scheduling: Implementation} \label{inschedimpl} 608 % ====================================================================== 609 % ====================================================================== 610 There are several challenges specific to \CFA when implementing internal scheduling. These challenges are direct results of \gls{group-acquire} and loose object definitions. These two constraints are to root cause of most design decisions in the implementation of internal scheduling. Furthermore, to avoid the head-aches of dynamically allocating memory in a concurrent environment, the internal-scheduling design is entirely free of mallocs and other dynamic memory allocation scheme. This is to avoid the chicken and egg problem of having a memory allocator that relies on the threading system and a threading system that relies on the runtime. This extra goal, means that memory management is a constant concern in the design of the system. 611 612 The main memory concern for concurrency is queues. All blocking operations are made by parking threads onto queues. These queues need to be intrinsic\cit to avoid the need memory allocation. This entails that all the fields needed to keep track of all needed information. Since internal scheduling can use an unbound amount of memory (depending on \gls{group-acquire}) statically defining information information in the intrusive fields of threads is insufficient. The only variable sized container that does not require memory allocation is the callstack, which is heavily used in the implementation of internal scheduling. Particularly the GCC extension variable length arrays which is used extensively. 613 614 Since stack allocation is based around scope, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable length. In the case of external scheduling, the threads and the condition both allow a fixed amount of memory to be stored, while mutex-routines and the actual blocking call allow for an unbound amount (though adding too much to the mutex routine stack size can become expansive faster). 615 616 The following figure is the traditionnal illustration of a monitor : 617 618 \begin{center} 619 {\resizebox{0.4\textwidth}{!}{\input{monitor}}} 620 \end{center} 621 622 For \CFA, the previous picture does not have support for blocking multiple monitors on a single condition. To support \gls{group-acquire} two changes to this picture are required. First, it doesn't make sense to tie the condition to a single monitor since blocking two monitors as one would require arbitrarily picking a monitor to hold the condition. Secondly, the object waiting on the conditions and AS-stack cannot simply contain the waiting thread since a single thread can potentially wait on multiple monitors. As mentionned in section \ref{inschedimpl}, the handling in multiple monitors is done by partially passing, which entails that each concerned monitor needs to have a node object. However, for waiting on the condition, since all threads need to wait together, a single object needs to be queued in the condition. Moving out the condition and updating the node types yields : 623 624 \begin{center} 625 {\resizebox{0.8\textwidth}{!}{\input{int_monitor}}} 626 \end{center} 627 628 \newpage 629 630 This picture and the proper entry and leave algorithms is the fundamental implementation of internal scheduling. 631 632 \begin{multicols}{2} 633 Entry 634 \begin{pseudo}[numbers=left] 635 if monitor is free 636 enter 637 elif I already own the monitor 638 continue 639 else 640 block 641 increment recursion 642 643 \end{pseudo} 644 \columnbreak 645 Exit 646 \begin{pseudo}[numbers=left, firstnumber=8] 647 decrement recursion 648 if recursion == 0 649 if signal_stack not empty 650 set_owner to thread 651 if all monitors ready 652 wake-up thread 653 654 if entry queue not empty 655 wake-up thread 656 \end{pseudo} 657 \end{multicols} 658 659 Some important things to notice about the exit routine. The solution discussed in \ref{inschedimpl} can be seen on line 11 of the previous pseudo code. Basically, the solution boils down to having a seperate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has trasnferred ownership. This solution is safe as well as preventing any potential barging. 612 660 613 661 % ====================================================================== … … 644 692 inUse = true; 645 693 } 646 void g() {694 void V() { 647 695 inUse = false; 648 696 … … 652 700 \end{tabular} 653 701 \end{center} 654 This method is more constrained and explicit, which may help users tone down the undeterministic nature of concurrency. Indeed, as the following examples demonstrates, external scheduling allows users to wait for events from other threads without the concern of unrelated events occuring. External scheduling can generally be done either in terms of control flow (e.g. \uC) or in terms of data (e.g. Go). Of course, both of these paradigms have their own strenghts and weaknesses but for this project control flow semantics where chosen to stay consistent with the rest of the languages semantics. Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multi-monitor routines. The following example shows a simple use \code{accept} versus \code{wait}/\code{signal} and its advantages.655 656 In the case of internal scheduling, the call to \code{wait} only guarantees that \code{ g} is the last routine to access the monitor. This entails that the routine \code{f} may have acquired mutual exclusion several times while routine \code{h} was waiting. On the other hand, external scheduling guarantees that while routine \code{h} was waiting, no routine other than \code{g} could acquire the monitor.702 This method is more constrained and explicit, which may help users tone down the undeterministic nature of concurrency. Indeed, as the following examples demonstrates, external scheduling allows users to wait for events from other threads without the concern of unrelated events occuring. External scheduling can generally be done either in terms of control flow (e.g., \uC) or in terms of data (e.g. Go). Of course, both of these paradigms have their own strenghts and weaknesses but for this project control-flow semantics were chosen to stay consistent with the rest of the languages semantics. Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multi-monitor routines. The previous example shows a simple use \code{_Accept} versus \code{wait}/\code{signal} and its advantages. Note that while other languages often use \code{accept} as the core external scheduling keyword, \CFA uses \code{waitfor} to prevent name collisions with existing socket APIs. 703 704 In the case of internal scheduling, the call to \code{wait} only guarantees that \code{V} is the last routine to access the monitor. This entails that the routine \code{V} may have acquired mutual exclusion several times while routine \code{P} was waiting. On the other hand, external scheduling guarantees that while routine \code{P} was waiting, no routine other than \code{V} could acquire the monitor. 657 705 658 706 % ====================================================================== … … 667 715 668 716 void f(A & mutex a); 669 void g(A & mutex a) { accept(f); } 670 \end{cfacode} 671 672 However, external scheduling is an example where implementation constraints become visible from the interface. Indeed, since there is no hard limit to the number of threads trying to acquire a monitor concurrently, performance is a significant concern. Here is the pseudo code for the entering phase of a monitor: 717 void f(int a, float b); 718 void g(A & mutex a) { 719 waitfor(f); // Less obvious which f() to wait for 720 } 721 \end{cfacode} 722 723 Furthermore, external scheduling is an example where implementation constraints become visible from the interface. Indeed, since there is no hard limit to the number of threads trying to acquire a monitor concurrently, performance is a significant concern. Here is the pseudo code for the entering phase of a monitor: 673 724 674 725 \begin{center} … … 677 728 if monitor is free 678 729 enter 730 elif I already own the monitor 731 continue 679 732 elif monitor accepts me 680 733 enter … … 685 738 \end{center} 686 739 687 For the \pscode{monitor is free} conditionit is easy to implement a check that can evaluate the condition in a few instruction. However, a fast check for \pscode{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. Indeed, monitors are often expressed as an entry queue and some acceptor queue as in the following figure:740 For the fist two conditions, it is easy to implement a check that can evaluate the condition in a few instruction. However, a fast check for \pscode{monitor accepts me} is much harder to implement depending on the constraints put on the monitors. Indeed, monitors are often expressed as an entry queue and some acceptor queue as in the following figure: 688 741 689 742 \begin{center} … … 691 744 \end{center} 692 745 693 There are other alternatives to these pictures but in the case of this picture implementing a fast accept check is relatively easy. Indeed simply updating a bitmask when the acceptor queue changes is enough to have a check that executes in a single instruction, even with a fairly large number (e.g. 128) of mutex members. However, this relies on the fact that all the acceptable routines are declared with the monitor type. For OO languages this does not compromise much since monitors already have an exhaustive list of member routines. However, for \CFA this is not the case; routines can be added to a type anywhere after its declaration. Its important to note that the bitmask approach does not actually require an exhaustive list of routines, but it requires a dense unique ordering of routines with an upper-bound and that ordering must be consistent across translation units.746 There are other alternatives to these pictures but in the case of this picture implementing a fast accept check is relatively easy. Indeed simply updating a bitmask when the acceptor queue changes is enough to have a check that executes in a single instruction, even with a fairly large number (e.g. 128) of mutex members. This technique cannot be used in \CFA because it relies on the fact that the monitor type declares all the acceptable routines. For OO languages this does not compromise much since monitors already have an exhaustive list of member routines. However, for \CFA this is not the case; routines can be added to a type anywhere after its declaration. Its important to note that the bitmask approach does not actually require an exhaustive list of routines, but it requires a dense unique ordering of routines with an upper-bound and that ordering must be consistent across translation units. 694 747 The alternative would be to have a picture more like this one: 695 748 … … 698 751 \end{center} 699 752 700 Not storing the queues inside the monitor means that the storage can vary between routines, allowing for more flexibility and extensions. Storing an array of function-pointers would solve the issue of uniquely identifying acceptable routines. However, the single instruction bitmask compare has been replaced by dereferencing a pointer followed by a linear search. Furthermore, supporting nested external scheduling may now require additionnal searches on calls to accept to check if a routine is already queued in. 701 702 At this point we must make a decision between flexibility and performance. Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost. Here however, the cost of flexibility cannot be trivially removed. 703 704 In either cases here are a few alternatives for the different syntaxes this syntax : \\ 705 \begin{center} 706 {\renewcommand{\arraystretch}{1.5} 707 \begin{tabular}[t]{l @{\hskip 0.35in} l} 708 \hline 709 \multicolumn{2}{ c }{\code{accept} on type}\\ 710 \hline 711 Alternative 1 & Alternative 2 \\ 712 \begin{lstlisting} 713 mutex struct A 714 accept( void f(A & mutex a) ) 715 {}; 716 \end{lstlisting} &\begin{lstlisting} 717 mutex struct A {} 718 accept( void f(A & mutex a) ); 719 720 \end{lstlisting} \\ 721 Alternative 3 & Alternative 4 \\ 722 \begin{lstlisting} 723 mutex struct A { 724 accept( void f(A & mutex a) ) 725 }; 726 727 \end{lstlisting} &\begin{lstlisting} 728 mutex struct A { 729 accept : 730 void f(A & mutex a) ); 731 }; 732 \end{lstlisting}\\ 733 \hline 734 \multicolumn{2}{ c }{\code{accept} on routine}\\ 735 \hline 736 \begin{lstlisting} 737 mutex struct A {}; 738 739 void f(A & mutex a) 740 741 accept( void f(A & mutex a) ) 742 void g(A & mutex a) { 743 /*...*/ 744 } 745 \end{lstlisting}&\\ 746 \end{tabular} 747 } 748 \end{center} 749 750 Another aspect to consider is what happens if multiple overloads of the same routine are used. For the time being it is assumed that multiple overloads of the same routine should be scheduled regardless of the overload used. However, this could easily be extended in the future. 753 Not storing the queues inside the monitor means that the storage can vary between routines, allowing for more flexibility and extensions. Storing an array of function-pointers would solve the issue of uniquely identifying acceptable routines. However, the single instruction bitmask compare has been replaced by dereferencing a pointer followed by a linear search. Furthermore, supporting nested external scheduling may now require additionnal searches on calls to waitfor to check if a routine is already queued in. 754 755 At this point we must make a decision between flexibility and performance. Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost. Here however, the cost of flexibility cannot be trivially removed. In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be prohibitively hard to write. This is based on the assumption that writing fast but inflexible locks is closer to a solved problems than writing locks that are as flexible as external scheduling in \CFA. 756 757 Another aspect to consider is what happens if multiple overloads of the same routine are used. For the time being it is assumed that multiple overloads of the same routine are considered as distinct routines. However, this could easily be extended in the future. 751 758 752 759 % ====================================================================== … … 758 765 External scheduling, like internal scheduling, becomes orders of magnitude more complex when we start introducing multi-monitor syntax. Even in the simplest possible case some new semantics need to be established: 759 766 \begin{cfacode} 760 accept( void f(mutex struct A & mutex this))761 767 mutex struct A {}; 762 768 … … 764 770 765 771 void g(A & mutex a, B & mutex b) { 766 accept(f); //ambiguous, which monitor772 waitfor(f); //ambiguous, which monitor 767 773 } 768 774 \end{cfacode} … … 771 777 772 778 \begin{cfacode} 773 accept( void f(mutex struct A & mutex this))774 779 mutex struct A {}; 775 780 … … 777 782 778 783 void g(A & mutex a, B & mutex b) { 779 accept( b, f ); 780 } 781 \end{cfacode} 782 783 This is unambiguous. Both locks will be acquired and kept, when routine \code{f} is called the lock for monitor \code{a} will be temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{b}). This behavior can be extended to multi-monitor accept statment as follows. 784 785 \begin{cfacode} 786 accept( void f(mutex struct A & mutex, mutex struct A & mutex)) 784 waitfor( f, b ); 785 } 786 \end{cfacode} 787 788 This is unambiguous. Both locks will be acquired and kept, when routine \code{f} is called the lock for monitor \code{b} will be temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{a}). This behavior can be extended to multi-monitor waitfor statment as follows. 789 790 \begin{cfacode} 787 791 mutex struct A {}; 788 792 … … 790 794 791 795 void g(A & mutex a, B & mutex b) { 792 accept( b, a, f); 793 } 794 \end{cfacode} 795 796 Note that the set of monitors passed to the \code{accept} statement must be entirely contained in the set of monitor already acquired in the routine. \code{accept} used in any other context is Undefined Behaviour. 796 waitfor( f, a, b); 797 } 798 \end{cfacode} 799 800 Note that the set of monitors passed to the \code{waitfor} statement must be entirely contained in the set of monitor already acquired in the routine. \code{waitfor} used in any other context is Undefined Behaviour. 801 802 An important behavior to note is that what happens when set of monitors only match partially : 803 804 \begin{cfacode} 805 mutex struct A {}; 806 807 mutex struct B {}; 808 809 void g(A & mutex a, B & mutex b) { 810 waitfor(f, a, b); 811 } 812 813 A a1, a2; 814 B b; 815 816 void foo() { 817 g(a1, b); 818 } 819 820 void bar() { 821 f(a2, b); 822 } 823 \end{cfacode} 824 825 While the equivalent can happen when using internal scheduling, the fact that conditions are branded on first use means that users have to use two different condition variables. In both cases, partially matching monitor sets will not wake-up the waiting thread. It is also important to note that in the case of external scheduling, as for routine calls, the order of parameters is important; \code{waitfor(f,a,b)} and \code{waitfor(f,b,a)} are to distinct waiting condition. 797 826 798 827 % ====================================================================== -
doc/proposals/concurrency/version
re149f77 r695e00d 1 0.9.1 221 0.9.180 -
src/Common/PassVisitor.impl.h
re149f77 r695e00d 183 183 184 184 } catch ( SemanticError &e ) { 185 e.set_location( (*i)->location ); 185 186 errors.append( e ); 186 187 } … … 292 293 //------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 293 294 295 // A NOTE ON THE ORDER OF TRAVERSAL 296 // 297 // Types and typedefs have their base types visited before they are added to the type table. This is ok, since there is 298 // no such thing as a recursive type or typedef. 299 // 300 // typedef struct { T *x; } T; // never allowed 301 // 302 // for structs/unions, it is possible to have recursion, so the decl should be added as if it's incomplete to begin, the 303 // members are traversed, and then the complete type should be added (assuming the type is completed by this particular 304 // declaration). 305 // 306 // struct T { struct T *x; }; // allowed 307 // 308 // It is important to add the complete type to the symbol table *after* the members/base has been traversed, since that 309 // traversal may modify the definition of the type and these modifications should be visible when the symbol table is 310 // queried later in this pass. 311 // 312 // TODO: figure out whether recursive contexts are sensible/possible/reasonable. 294 313 295 314 //-------------------------------------------------------------------------- … … 449 468 indexerAddEnum( node ); 450 469 451 // unlike structs, contexts, and unions, enums inject their members into the global scope470 // unlike structs, traits, and unions, enums inject their members into the global scope 452 471 maybeAccept( node->parameters, *this ); 453 472 maybeAccept( node->members , *this ); … … 513 532 } 514 533 534 // see A NOTE ON THE ORDER OF TRAVERSAL, above 535 // note that assertions come after the type is added to the symtab, since they are not part of the type proper 536 // and may depend on the type itself 515 537 indexerAddType( node ); 516 538 … … 532 554 } 533 555 556 // see A NOTE ON THE ORDER OF TRAVERSAL, above 557 // note that assertions come after the type is added to the symtab, since they are not part of the type proper 558 // and may depend on the type itself 534 559 indexerAddType( node ); 535 560 … … 658 683 void PassVisitor< pass_type >::visit( IfStmt * node ) { 659 684 VISIT_START( node ); 660 661 visitExpression( node->condition ); 662 node->thenPart = visitStatement( node->thenPart ); 663 node->elsePart = visitStatement( node->elsePart ); 664 685 { 686 // if statements introduce a level of scope (for the initialization) 687 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 688 acceptAll( node->get_initialization(), *this ); 689 visitExpression( node->condition ); 690 node->thenPart = visitStatement( node->thenPart ); 691 node->elsePart = visitStatement( node->elsePart ); 692 } 665 693 VISIT_END( node ); 666 694 } … … 670 698 MUTATE_START( node ); 671 699 { 672 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 700 // if statements introduce a level of scope (for the initialization) 701 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 702 maybeMutateRef( node->get_initialization(), *this ); 673 703 node->condition = mutateExpression( node->condition ); 674 704 node->thenPart = mutateStatement ( node->thenPart ); … … 706 736 VISIT_START( node ); 707 737 { 738 // for statements introduce a level of scope (for the initialization) 708 739 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 709 740 maybeAccept( node->initialization, *this ); … … 719 750 MUTATE_START( node ); 720 751 { 752 // for statements introduce a level of scope (for the initialization) 721 753 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 722 754 maybeMutateRef( node->initialization, *this ); … … 847 879 VISIT_START( node ); 848 880 { 881 // catch statements introduce a level of scope (for the caught exception) 849 882 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 850 883 maybeAccept( node->decl, *this ); … … 859 892 MUTATE_START( node ); 860 893 { 894 // catch statements introduce a level of scope (for the caught exception) 861 895 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 862 896 maybeMutateRef( node->decl, *this ); -
src/GenPoly/Box.cc
re149f77 r695e00d 628 628 } else { 629 629 // xxx - should this be an assertion? 630 std::string x = env ? toString( *env ) : "missing env"; 631 throw SemanticError( x + "\n" + "unbound type variable: " + tyParm->first + " in application ", appExpr ); 630 throw SemanticError( toString( *env, "\nunbound type variable: ", tyParm->first, " in application " ), appExpr ); 632 631 } // if 633 632 } // if … … 706 705 if ( concrete == 0 ) { 707 706 return typeInst; 708 // xxx - should this be an assertion?709 // std::string x = env ? toString( *env ) : "missing env";710 // throw SemanticError( x + "\n" + "Unbound type variable " + typeInst->get_name() + " in ", appExpr );711 707 } // if 712 708 return concrete; -
src/InitTweak/FixInit.cc
re149f77 r695e00d 70 70 namespace InitTweak { 71 71 namespace { 72 typedef std::unordered_map< Expression *, TypeSubstitution * > EnvMap;73 72 typedef std::unordered_map< int, int > UnqCount; 74 73 75 class InsertImplicitCalls : public WithTypeSubstitution { 76 public: 74 struct InsertImplicitCalls : public WithTypeSubstitution { 77 75 /// wrap function application expressions as ImplicitCopyCtorExpr nodes so that it is easy to identify which 78 76 /// function calls need their parameters to be copy constructed 79 static void insert( std::list< Declaration * > & translationUnit, EnvMap & envMap ); 80 81 InsertImplicitCalls( EnvMap & envMap ) : envMap( envMap ) {} 77 static void insert( std::list< Declaration * > & translationUnit ); 82 78 83 79 Expression * postmutate( ApplicationExpr * appExpr ); 84 void premutate( StmtExpr * stmtExpr );85 86 // collects environments for relevant nodes87 EnvMap & envMap;88 80 }; 89 81 90 class ResolveCopyCtors final : public SymTab::Indexer { 91 public: 82 struct ResolveCopyCtors final : public WithIndexer, public WithShortCircuiting, public WithTypeSubstitution { 92 83 /// generate temporary ObjectDecls for each argument and return value of each ImplicitCopyCtorExpr, 93 84 /// generate/resolve copy construction expressions for each, and generate/resolve destructors for both 94 85 /// arguments and return value temporaries 95 static void resolveImplicitCalls( std::list< Declaration * > & translationUnit, const EnvMap & envMap, UnqCount & unqCount ); 96 97 typedef SymTab::Indexer Parent; 98 using Parent::visit; 99 100 ResolveCopyCtors( const EnvMap & envMap, UnqCount & unqCount ) : envMap( envMap ), unqCount( unqCount ) {} 101 102 virtual void visit( ImplicitCopyCtorExpr * impCpCtorExpr ) override; 103 virtual void visit( UniqueExpr * unqExpr ) override; 104 virtual void visit( StmtExpr * stmtExpr ) override; 86 static void resolveImplicitCalls( std::list< Declaration * > & translationUnit, UnqCount & unqCount ); 87 88 ResolveCopyCtors( UnqCount & unqCount ) : unqCount( unqCount ) {} 89 90 void postvisit( ImplicitCopyCtorExpr * impCpCtorExpr ); 91 void postvisit( StmtExpr * stmtExpr ); 92 void previsit( UniqueExpr * unqExpr ); 93 void postvisit( UniqueExpr * unqExpr ); 105 94 106 95 /// create and resolve ctor/dtor expression: fname(var, [cpArg]) … … 111 100 void destructRet( ObjectDecl * ret, ImplicitCopyCtorExpr * impCpCtorExpr ); 112 101 113 TypeSubstitution * env;114 const EnvMap & envMap;115 102 UnqCount & unqCount; // count the number of times each unique expr ID appears 103 std::unordered_set< int > vars; 116 104 }; 117 105 … … 238 226 }; 239 227 240 class GenStructMemberCalls final : public SymTab::Indexer { 241 public: 242 typedef Indexer Parent; 228 struct GenStructMemberCalls final : public WithGuards, public WithShortCircuiting, public WithIndexer { 243 229 /// generate default/copy ctor and dtor calls for user-defined struct ctor/dtors 244 230 /// for any member that is missing a corresponding ctor/dtor call. … … 246 232 static void generate( std::list< Declaration * > & translationUnit ); 247 233 248 using Parent::visit; 249 250 virtual void visit( FunctionDecl * funcDecl ) override; 251 252 virtual void visit( MemberExpr * memberExpr ) override; 253 virtual void visit( ApplicationExpr * appExpr ) override; 234 void previsit( FunctionDecl * funcDecl ); 235 void postvisit( FunctionDecl * funcDecl ); 236 237 void previsit( MemberExpr * memberExpr ); 238 void previsit( ApplicationExpr * appExpr ); 254 239 255 240 SemanticError errors; … … 295 280 InitTweak::fixGlobalInit( translationUnit, filename, inLibrary ); 296 281 297 EnvMap envMap;298 282 UnqCount unqCount; 299 283 300 InsertImplicitCalls::insert( translationUnit , envMap);301 ResolveCopyCtors::resolveImplicitCalls( translationUnit, envMap,unqCount );284 InsertImplicitCalls::insert( translationUnit ); 285 ResolveCopyCtors::resolveImplicitCalls( translationUnit, unqCount ); 302 286 InsertDtors::insert( translationUnit ); 303 287 FixInit::fixInitializers( translationUnit ); … … 318 302 319 303 namespace { 320 void InsertImplicitCalls::insert( std::list< Declaration * > & translationUnit , EnvMap & envMap) {321 PassVisitor<InsertImplicitCalls> inserter ( envMap );304 void InsertImplicitCalls::insert( std::list< Declaration * > & translationUnit ) { 305 PassVisitor<InsertImplicitCalls> inserter; 322 306 mutateAll( translationUnit, inserter ); 323 307 } 324 308 325 void ResolveCopyCtors::resolveImplicitCalls( std::list< Declaration * > & translationUnit, const EnvMap & envMap,UnqCount & unqCount ) {326 ResolveCopyCtors resolver( envMap,unqCount );309 void ResolveCopyCtors::resolveImplicitCalls( std::list< Declaration * > & translationUnit, UnqCount & unqCount ) { 310 PassVisitor<ResolveCopyCtors> resolver( unqCount ); 327 311 acceptAll( translationUnit, resolver ); 328 312 } … … 360 344 361 345 void GenStructMemberCalls::generate( std::list< Declaration * > & translationUnit ) { 362 GenStructMemberCallswarner;346 PassVisitor<GenStructMemberCalls> warner; 363 347 acceptAll( translationUnit, warner ); 364 348 } … … 398 382 // wrap each function call so that it is easy to identify nodes that have to be copy constructed 399 383 ImplicitCopyCtorExpr * expr = new ImplicitCopyCtorExpr( appExpr ); 400 // save the type substitution into the envMap so that it is easy to find.384 // Move the type substitution to the new top-level, if it is attached to the appExpr. 401 385 // Ensure it is not deleted with the ImplicitCopyCtorExpr by removing it before deletion. 402 386 // The substitution is needed to obtain the type of temporary variables so that copy constructor 403 // calls can be resolved. Normally this is what PolyMutator is for, but the pass that resolves 404 // copy constructor calls must be an Indexer. We could alternatively make a PolyIndexer which 405 // saves the environment, or compute the types of temporaries here, but it's much simpler to 406 // save the environment here, and more cohesive to compute temporary variables and resolve copy 407 // constructor calls together. 387 // calls can be resolved. 408 388 assert( env ); 409 envMap[expr] = env;389 std::swap( expr->env, appExpr->env ); 410 390 return expr; 411 }412 413 void InsertImplicitCalls::premutate( StmtExpr * stmtExpr ) {414 assert( env );415 envMap[stmtExpr] = env;416 391 } 417 392 … … 431 406 // (VariableExpr and already resolved expression) 432 407 CP_CTOR_PRINT( std::cerr << "ResolvingCtorDtor " << untyped << std::endl; ) 433 Expression * resolved = ResolvExpr::findVoidExpression( untyped, *this);408 Expression * resolved = ResolvExpr::findVoidExpression( untyped, indexer ); 434 409 assert( resolved ); 435 410 if ( resolved->get_env() ) { … … 480 455 } 481 456 482 void ResolveCopyCtors:: visit( ImplicitCopyCtorExpr *impCpCtorExpr ) {457 void ResolveCopyCtors::postvisit( ImplicitCopyCtorExpr *impCpCtorExpr ) { 483 458 CP_CTOR_PRINT( std::cerr << "ResolveCopyCtors: " << impCpCtorExpr << std::endl; ) 484 Parent::visit( impCpCtorExpr );485 env = envMap.at(impCpCtorExpr);486 assert( env );487 459 488 460 ApplicationExpr * appExpr = impCpCtorExpr->get_callExpr(); … … 513 485 } 514 486 515 void ResolveCopyCtors::visit( StmtExpr * stmtExpr ) { 516 Parent::visit( stmtExpr ); 517 env = envMap.at(stmtExpr); 487 void ResolveCopyCtors::postvisit( StmtExpr * stmtExpr ) { 488 assert( env ); 518 489 assert( stmtExpr->get_result() ); 519 490 Type * result = stmtExpr->get_result(); … … 537 508 stmtExpr->get_dtors().push_front( makeCtorDtor( "^?{}", ret ) ); 538 509 } // if 539 540 } 541 542 void ResolveCopyCtors::visit( UniqueExpr * unqExpr ) { 543 static std::unordered_set< int > vars; 510 } 511 512 void ResolveCopyCtors::previsit( UniqueExpr * unqExpr ) { 544 513 unqCount[ unqExpr->get_id() ]++; // count the number of unique expressions for each ID 545 514 if ( vars.count( unqExpr->get_id() ) ) { 546 515 // xxx - hack to prevent double-handling of unique exprs, otherwise too many temporary variables and destructors are generated 516 visit_children = false; 517 } 518 } 519 520 void ResolveCopyCtors::postvisit( UniqueExpr * unqExpr ) { 521 if ( vars.count( unqExpr->get_id() ) ) { 522 // xxx - hack to prevent double-handling of unique exprs, otherwise too many temporary variables and destructors are generated 547 523 return; 548 524 } 549 525 550 Parent::visit( unqExpr );551 526 // it should never be necessary to wrap a void-returning expression in a UniqueExpr - if this assumption changes, this needs to be rethought 552 527 assert( unqExpr->get_result() ); … … 585 560 586 561 // xxx - update to work with multiple return values 587 ObjectDecl * returnDecl = returnDecls.empty() ? NULL: returnDecls.front();562 ObjectDecl * returnDecl = returnDecls.empty() ? nullptr : returnDecls.front(); 588 563 Expression * callExpr = impCpCtorExpr->get_callExpr(); 589 564 … … 594 569 tempDecls.clear(); 595 570 returnDecls.clear(); 596 impCpCtorExpr->set_callExpr( NULL ); 597 impCpCtorExpr->set_env( NULL ); 571 impCpCtorExpr->set_callExpr( nullptr ); 572 std::swap( impCpCtorExpr->env, callExpr->env ); 573 assert( impCpCtorExpr->env == nullptr ); 598 574 delete impCpCtorExpr; 599 575 … … 978 954 } 979 955 980 void GenStructMemberCalls:: visit( FunctionDecl * funcDecl ) {981 ValueGuard< FunctionDecl * > oldFunction( funcDecl );982 ValueGuard< std::set< DeclarationWithType * > > oldUnhandled( unhandled );983 ValueGuard< std::map< DeclarationWithType *, CodeLocation > > oldUsedUninit( usedUninit );984 ValueGuard< ObjectDecl * > oldThisParam( thisParam );985 ValueGuard< bool > oldIsCtor( isCtor );986 ValueGuard< StructDecl * > oldStructDecl( structDecl );956 void GenStructMemberCalls::previsit( FunctionDecl * funcDecl ) { 957 GuardValue( funcDecl ); 958 GuardValue( unhandled ); 959 GuardValue( usedUninit ); 960 GuardValue( thisParam ); 961 GuardValue( isCtor ); 962 GuardValue( structDecl ); 987 963 errors = SemanticError(); // clear previous errors 988 964 … … 1010 986 } 1011 987 } 1012 Parent::visit( function ); 1013 988 } 989 990 void addIds( SymTab::Indexer & indexer, const std::list< DeclarationWithType * > & decls ) { 991 for ( auto d : decls ) { 992 indexer.addId( d ); 993 } 994 } 995 996 void addTypes( SymTab::Indexer & indexer, const std::list< TypeDecl * > & tds ) { 997 for ( auto td : tds ) { 998 indexer.addType( td ); 999 addIds( indexer, td->assertions ); 1000 } 1001 } 1002 1003 void GenStructMemberCalls::postvisit( FunctionDecl * funcDecl ) { 1014 1004 // remove the unhandled objects from usedUninit, because a call is inserted 1015 1005 // to handle them - only objects that are later constructed are used uninitialized. … … 1032 1022 if ( ! unhandled.empty() ) { 1033 1023 // need to explicitly re-add function parameters to the indexer in order to resolve copy constructors 1034 enterScope(); 1035 maybeAccept( function->get_functionType(), *this ); 1024 auto guard = makeFuncGuard( [this]() { indexer.enterScope(); }, [this]() { indexer.leaveScope(); } ); 1025 addTypes( indexer, function->type->forall ); 1026 addIds( indexer, function->type->returnVals ); 1027 addIds( indexer, function->type->parameters ); 1036 1028 1037 1029 // need to iterate through members in reverse in order for … … 1063 1055 Statement * callStmt = stmt.front(); 1064 1056 1065 MutatingResolver resolver( *this);1057 MutatingResolver resolver( indexer ); 1066 1058 try { 1067 1059 callStmt->acceptMutator( resolver ); … … 1077 1069 } 1078 1070 } 1079 leaveScope();1080 1071 } 1081 1072 if (! errors.isEmpty()) { … … 1107 1098 } 1108 1099 1109 void GenStructMemberCalls::visit( ApplicationExpr * appExpr ) { 1110 if ( ! checkWarnings( function ) ) return; 1100 void GenStructMemberCalls::previsit( ApplicationExpr * appExpr ) { 1101 if ( ! checkWarnings( function ) ) { 1102 visit_children = false; 1103 return; 1104 } 1111 1105 1112 1106 std::string fname = getFunctionName( appExpr ); … … 1127 1121 } 1128 1122 } 1129 Parent::visit( appExpr ); 1130 } 1131 1132 void GenStructMemberCalls::visit( MemberExpr * memberExpr ) { 1133 if ( ! checkWarnings( function ) ) return; 1134 if ( ! isCtor ) return; 1123 } 1124 1125 void GenStructMemberCalls::previsit( MemberExpr * memberExpr ) { 1126 if ( ! checkWarnings( function ) || ! isCtor ) { 1127 visit_children = false; 1128 return; 1129 } 1135 1130 1136 1131 if ( isThisExpression( memberExpr->get_aggregate(), thisParam ) ) { … … 1140 1135 } 1141 1136 } 1142 Parent::visit( memberExpr );1143 1137 } 1144 1138 … … 1161 1155 // in generated code. If this changes, add mutate methods for entities with 1162 1156 // scope and call {enter,leave}Scope explicitly. 1163 objectDecl->accept( indexer);1157 indexer.addId( objectDecl ); 1164 1158 return objectDecl; 1165 1159 } -
src/Parser/ExpressionNode.cc
re149f77 r695e00d 10 10 // Created On : Sat May 16 13:17:07 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Sep 13 14:54:19201713 // Update Count : 6 8312 // Last Modified On : Thu Sep 14 23:09:34 2017 13 // Update Count : 690 14 14 // 15 15 … … 363 363 } // build_pfieldSel 364 364 365 Expression * build_addressOf( ExpressionNode * expr_node ) {366 return new AddressExpr( maybeMoveBuild< Expression >(expr_node) );367 } // build_addressOf368 369 Expression * build_sizeOfexpr( ExpressionNode * expr_node ) {370 return new SizeofExpr( maybeMoveBuild< Expression >(expr_node) );371 } // build_sizeOfexpr372 373 Expression * build_sizeOftype( DeclarationNode * decl_node ) {374 return new SizeofExpr( maybeMoveBuildType( decl_node ) );375 } // build_sizeOftype376 377 Expression * build_alignOfexpr( ExpressionNode * expr_node ) {378 return new AlignofExpr( maybeMoveBuild< Expression >(expr_node) );379 } // build_alignOfexpr380 381 Expression * build_alignOftype( DeclarationNode * decl_node ) {382 return new AlignofExpr( maybeMoveBuildType( decl_node) );383 } // build_alignOftype384 385 365 Expression * build_offsetOf( DeclarationNode * decl_node, NameExpr * member ) { 386 366 Expression * ret = new UntypedOffsetofExpr( maybeMoveBuildType( decl_node ), member->get_name() ); … … 423 403 } // build_cond 424 404 425 Expression * build_attrexpr( NameExpr * var, ExpressionNode * expr_node ) {426 return new AttrExpr( var, maybeMoveBuild< Expression >(expr_node) );427 } // build_attrexpr428 429 Expression * build_attrtype( NameExpr * var, DeclarationNode * decl_node ) {430 return new AttrExpr( var, maybeMoveBuildType( decl_node ) );431 } // build_attrtype432 433 405 Expression * build_tuple( ExpressionNode * expr_node ) { 434 406 list< Expression * > exprs; … … 442 414 return new UntypedExpr( maybeMoveBuild< Expression >(function), args, nullptr ); 443 415 } // build_func 444 445 Expression * build_range( ExpressionNode * low, ExpressionNode * high ) {446 return new RangeExpr( maybeMoveBuild< Expression >( low ), maybeMoveBuild< Expression >( high ) );447 } // build_range448 416 449 417 Expression * build_compoundLiteral( DeclarationNode * decl_node, InitializerNode * kids ) { -
src/Parser/ParseNode.h
re149f77 r695e00d 10 10 // Created On : Sat May 16 13:28:16 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Sep 13 12:35:10201713 // Update Count : 8 0712 // Last Modified On : Thu Sep 14 23:09:39 2017 13 // Update Count : 815 14 14 // 15 15 … … 175 175 Expression * build_fieldSel( ExpressionNode * expr_node, Expression * member ); 176 176 Expression * build_pfieldSel( ExpressionNode * expr_node, Expression * member ); 177 Expression * build_addressOf( ExpressionNode * expr_node );178 Expression * build_sizeOfexpr( ExpressionNode * expr_node );179 Expression * build_sizeOftype( DeclarationNode * decl_node );180 Expression * build_alignOfexpr( ExpressionNode * expr_node );181 Expression * build_alignOftype( DeclarationNode * decl_node );182 177 Expression * build_offsetOf( DeclarationNode * decl_node, NameExpr * member ); 183 178 Expression * build_and( ExpressionNode * expr_node1, ExpressionNode * expr_node2 ); … … 188 183 Expression * build_binary_ptr( OperKinds op, ExpressionNode * expr_node1, ExpressionNode * expr_node2 ); 189 184 Expression * build_cond( ExpressionNode * expr_node1, ExpressionNode * expr_node2, ExpressionNode * expr_node3 ); 190 Expression * build_attrexpr( NameExpr * var, ExpressionNode * expr_node );191 Expression * build_attrtype( NameExpr * var, DeclarationNode * decl_node );192 185 Expression * build_tuple( ExpressionNode * expr_node = nullptr ); 193 186 Expression * build_func( ExpressionNode * function, ExpressionNode * expr_node ); 194 Expression * build_range( ExpressionNode * low, ExpressionNode * high );195 187 Expression * build_compoundLiteral( DeclarationNode * decl_node, InitializerNode * kids ); 196 188 -
src/Parser/parser.yy
re149f77 r695e00d 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Sep 13 11:01:20201713 // Update Count : 28 0312 // Last Modified On : Thu Sep 14 23:07:12 2017 13 // Update Count : 2815 14 14 // 15 15 … … 563 563 switch ( $1 ) { 564 564 case OperKinds::AddressOf: 565 $$ = new ExpressionNode( build_addressOf( $2) );565 $$ = new ExpressionNode( new AddressExpr( maybeMoveBuild< Expression >( $2 ) ) ); 566 566 break; 567 567 case OperKinds::PointTo: … … 569 569 break; 570 570 case OperKinds::And: 571 $$ = new ExpressionNode( new AddressExpr( build_addressOf( $2) ) );571 $$ = new ExpressionNode( new AddressExpr( new AddressExpr( maybeMoveBuild< Expression >( $2 ) ) ) ); 572 572 break; 573 573 default: … … 582 582 { $$ = new ExpressionNode( build_unary_ptr( OperKinds::Decr, $2 ) ); } 583 583 | SIZEOF unary_expression 584 { $$ = new ExpressionNode( build_sizeOfexpr( $2) ); }584 { $$ = new ExpressionNode( new SizeofExpr( maybeMoveBuild< Expression >( $2 ) ) ); } 585 585 | SIZEOF '(' type_no_function ')' 586 { $$ = new ExpressionNode( build_sizeOftype( $3) ); }586 { $$ = new ExpressionNode( new SizeofExpr( maybeMoveBuildType( $3 ) ) ); } 587 587 | ALIGNOF unary_expression // GCC, variable alignment 588 { $$ = new ExpressionNode( build_alignOfexpr( $2) ); }588 { $$ = new ExpressionNode( new AlignofExpr( maybeMoveBuild< Expression >( $2 ) ) ); } 589 589 | ALIGNOF '(' type_no_function ')' // GCC, type alignment 590 { $$ = new ExpressionNode( build_alignOftype( $3) ); }590 { $$ = new ExpressionNode( new AlignofExpr( maybeMoveBuildType( $3 ) ) ); } 591 591 | OFFSETOF '(' type_no_function ',' no_attr_identifier ')' 592 592 { $$ = new ExpressionNode( build_offsetOf( $3, build_varref( $5 ) ) ); } 593 593 | ATTR_IDENTIFIER 594 { $$ = new ExpressionNode( build_attrexpr( build_varref( $1 ), nullptr) ); }594 { $$ = new ExpressionNode( new AttrExpr( build_varref( $1 ), maybeMoveBuild< Expression >( (ExpressionNode *)nullptr ) ) ); } 595 595 | ATTR_IDENTIFIER '(' argument_expression ')' 596 { $$ = new ExpressionNode( build_attrexpr( build_varref( $1 ), $3) ); }596 { $$ = new ExpressionNode( new AttrExpr( build_varref( $1 ), maybeMoveBuild< Expression >( $3 ) ) ); } 597 597 | ATTR_IDENTIFIER '(' type ')' 598 { $$ = new ExpressionNode( build_attrtype( build_varref( $1 ), $3) ); }598 { $$ = new ExpressionNode( new AttrExpr( build_varref( $1 ), maybeMoveBuildType( $3 ) ) ); } 599 599 ; 600 600 … … 895 895 constant_expression { $$ = $1; } 896 896 | constant_expression ELLIPSIS constant_expression // GCC, subrange 897 { $$ = new ExpressionNode( build_range( $1, $3) ); }897 { $$ = new ExpressionNode( new RangeExpr( maybeMoveBuild< Expression >( $1 ), maybeMoveBuild< Expression >( $3 ) ) ); } 898 898 | subrange // CFA, subrange 899 899 ; … … 2089 2089 { $$ = $3; } 2090 2090 | '[' push constant_expression ELLIPSIS constant_expression pop ']' // GCC, multiple array elements 2091 { $$ = new ExpressionNode( build_range( $3, $5) ); }2091 { $$ = new ExpressionNode( new RangeExpr( maybeMoveBuild< Expression >( $3 ), maybeMoveBuild< Expression >( $5 ) ) ); } 2092 2092 | '.' '[' push field_list pop ']' // CFA, tuple field selector 2093 2093 { $$ = $4; } … … 2407 2407 subrange: 2408 2408 constant_expression '~' constant_expression // CFA, integer subrange 2409 { $$ = new ExpressionNode( build_range( $1, $3) ); }2409 { $$ = new ExpressionNode( new RangeExpr( maybeMoveBuild< Expression >( $1 ), maybeMoveBuild< Expression >( $3 ) ) ); } 2410 2410 ; 2411 2411 -
src/ResolvExpr/AlternativeFinder.cc
re149f77 r695e00d 523 523 for ( AssertionSet::iterator i = assertSet.begin(); i != assertSet.end(); ++i ) { 524 524 if ( i->second.isUsed ) { 525 i ->first->accept( indexer);525 indexer.addId( i->first ); 526 526 } 527 527 } -
src/ResolvExpr/AlternativePrinter.cc
re149f77 r695e00d 26 26 27 27 namespace ResolvExpr { 28 AlternativePrinter::AlternativePrinter( std::ostream &os ) : SymTab::Indexer( false ),os( os ) {}28 AlternativePrinter::AlternativePrinter( std::ostream &os ) : os( os ) {} 29 29 30 void AlternativePrinter:: visit( ExprStmt *exprStmt ) {30 void AlternativePrinter::postvisit( ExprStmt *exprStmt ) { 31 31 TypeEnvironment env; 32 AlternativeFinder finder( *this, env );32 AlternativeFinder finder( indexer, env ); 33 33 finder.findWithAdjustment( exprStmt->get_expr() ); 34 34 int count = 1; -
src/ResolvExpr/AlternativePrinter.h
re149f77 r695e00d 18 18 #include <iostream> // for ostream 19 19 20 #include " SymTab/Indexer.h" // for Indexer20 #include "Common/PassVisitor.h" 21 21 22 22 class ExprStmt; 23 23 24 24 namespace ResolvExpr { 25 class AlternativePrinter final : public SymTab::Indexer {25 class AlternativePrinter final : public WithIndexer { 26 26 public: 27 27 AlternativePrinter( std::ostream &os ); 28 28 29 using SymTab::Indexer::visit; 30 virtual void visit( ExprStmt *exprStmt ) override; 29 void postvisit( ExprStmt *exprStmt ); 31 30 private: 32 31 std::ostream &os; -
src/ResolvExpr/Resolver.cc
re149f77 r695e00d 21 21 #include "Alternative.h" // for Alternative, AltList 22 22 #include "AlternativeFinder.h" // for AlternativeFinder, resolveIn... 23 #include "Common/PassVisitor.h" // for PassVisitor 23 24 #include "Common/SemanticError.h" // for SemanticError 24 25 #include "Common/utility.h" // for ValueGuard, group_iterate … … 44 45 45 46 namespace ResolvExpr { 46 class Resolver final : public SymTab::Indexer { 47 public: 48 Resolver() : SymTab::Indexer( false ) {} 49 Resolver( const SymTab:: Indexer & other ) : SymTab::Indexer( other ) { 50 if ( const Resolver * res = dynamic_cast< const Resolver * >( &other ) ) { 51 functionReturn = res->functionReturn; 52 currentObject = res->currentObject; 53 inEnumDecl = res->inEnumDecl; 54 } 55 } 56 57 typedef SymTab::Indexer Parent; 58 using Parent::visit; 59 virtual void visit( FunctionDecl *functionDecl ) override; 60 virtual void visit( ObjectDecl *functionDecl ) override; 61 virtual void visit( TypeDecl *typeDecl ) override; 62 virtual void visit( EnumDecl * enumDecl ) override; 63 64 virtual void visit( ArrayType * at ) override; 65 virtual void visit( PointerType * at ) override; 66 67 virtual void visit( ExprStmt *exprStmt ) override; 68 virtual void visit( AsmExpr *asmExpr ) override; 69 virtual void visit( AsmStmt *asmStmt ) override; 70 virtual void visit( IfStmt *ifStmt ) override; 71 virtual void visit( WhileStmt *whileStmt ) override; 72 virtual void visit( ForStmt *forStmt ) override; 73 virtual void visit( SwitchStmt *switchStmt ) override; 74 virtual void visit( CaseStmt *caseStmt ) override; 75 virtual void visit( BranchStmt *branchStmt ) override; 76 virtual void visit( ReturnStmt *returnStmt ) override; 77 virtual void visit( ThrowStmt *throwStmt ) override; 78 virtual void visit( CatchStmt *catchStmt ) override; 79 virtual void visit( WaitForStmt *waitforStmt ) override; 80 81 virtual void visit( SingleInit *singleInit ) override; 82 virtual void visit( ListInit *listInit ) override; 83 virtual void visit( ConstructorInit *ctorInit ) override; 47 struct Resolver final : public WithIndexer, public WithGuards, public WithVisitorRef<Resolver>, public WithShortCircuiting { 48 Resolver() {} 49 Resolver( const SymTab::Indexer & other ) { 50 indexer = other; 51 } 52 53 void previsit( FunctionDecl *functionDecl ); 54 void postvisit( FunctionDecl *functionDecl ); 55 void previsit( ObjectDecl *functionDecl ); 56 void previsit( TypeDecl *typeDecl ); 57 void previsit( EnumDecl * enumDecl ); 58 59 void previsit( ArrayType * at ); 60 void previsit( PointerType * at ); 61 62 void previsit( ExprStmt *exprStmt ); 63 void previsit( AsmExpr *asmExpr ); 64 void previsit( AsmStmt *asmStmt ); 65 void previsit( IfStmt *ifStmt ); 66 void previsit( WhileStmt *whileStmt ); 67 void previsit( ForStmt *forStmt ); 68 void previsit( SwitchStmt *switchStmt ); 69 void previsit( CaseStmt *caseStmt ); 70 void previsit( BranchStmt *branchStmt ); 71 void previsit( ReturnStmt *returnStmt ); 72 void previsit( ThrowStmt *throwStmt ); 73 void previsit( CatchStmt *catchStmt ); 74 void previsit( WaitForStmt * stmt ); 75 76 void previsit( SingleInit *singleInit ); 77 void previsit( ListInit *listInit ); 78 void previsit( ConstructorInit *ctorInit ); 84 79 private: 85 80 typedef std::list< Initializer * >::iterator InitIterator; … … 98 93 99 94 void resolve( std::list< Declaration * > translationUnit ) { 100 Resolverresolver;95 PassVisitor<Resolver> resolver; 101 96 acceptAll( translationUnit, resolver ); 102 97 } 103 98 99 // used in resolveTypeof 104 100 Expression *resolveInVoidContext( Expression *expr, const SymTab::Indexer &indexer ) { 105 101 TypeEnvironment env; 106 102 return resolveInVoidContext( expr, indexer, env ); 107 103 } 108 109 104 110 105 namespace { … … 192 187 } 193 188 194 void Resolver:: visit( ObjectDecl *objectDecl ) {195 Type *new_type = resolveTypeof( objectDecl->get_type(), *this);189 void Resolver::previsit( ObjectDecl *objectDecl ) { 190 Type *new_type = resolveTypeof( objectDecl->get_type(), indexer ); 196 191 objectDecl->set_type( new_type ); 197 192 // To handle initialization of routine pointers, e.g., int (*fp)(int) = foo(), means that class-variable … … 200 195 // each value of initContext is retained, so the type on the first analysis is preserved and used for selecting 201 196 // the RHS. 202 ValueGuard<CurrentObject> temp( currentObject );197 GuardValue( currentObject ); 203 198 currentObject = CurrentObject( objectDecl->get_type() ); 204 199 if ( inEnumDecl && dynamic_cast< EnumInstType * >( objectDecl->get_type() ) ) { … … 207 202 currentObject = CurrentObject( new BasicType( Type::Qualifiers(), BasicType::SignedInt ) ); 208 203 } 209 Parent::visit( objectDecl );210 if ( inEnumDecl && dynamic_cast< EnumInstType * >( objectDecl->get_type() ) ) {211 // delete newly created signed int type212 // delete currentObject.getType();213 }214 204 } 215 205 … … 218 208 if ( type->get_dimension() ) { 219 209 CastExpr *castExpr = new CastExpr( type->get_dimension(), SymTab::SizeType->clone() ); 220 Expression *newExpr = findSingleExpression( castExpr, *this);210 Expression *newExpr = findSingleExpression( castExpr, indexer ); 221 211 delete type->get_dimension(); 222 212 type->set_dimension( newExpr ); … … 224 214 } 225 215 226 void Resolver:: visit( ArrayType * at ) {216 void Resolver::previsit( ArrayType * at ) { 227 217 handlePtrType( at ); 228 Parent::visit( at ); 229 } 230 231 void Resolver::visit( PointerType * pt ) { 218 } 219 220 void Resolver::previsit( PointerType * pt ) { 232 221 handlePtrType( pt ); 233 Parent::visit( pt ); 234 } 235 236 void Resolver::visit( TypeDecl *typeDecl ) { 222 } 223 224 void Resolver::previsit( TypeDecl *typeDecl ) { 237 225 if ( typeDecl->get_base() ) { 238 Type *new_type = resolveTypeof( typeDecl->get_base(), *this);226 Type *new_type = resolveTypeof( typeDecl->get_base(), indexer ); 239 227 typeDecl->set_base( new_type ); 240 228 } // if 241 Parent::visit( typeDecl ); 242 } 243 244 void Resolver::visit( FunctionDecl *functionDecl ) { 229 } 230 231 void Resolver::previsit( FunctionDecl *functionDecl ) { 245 232 #if 0 246 std::c out<< "resolver visiting functiondecl ";247 functionDecl->print( std::c out);248 std::c out<< std::endl;233 std::cerr << "resolver visiting functiondecl "; 234 functionDecl->print( std::cerr ); 235 std::cerr << std::endl; 249 236 #endif 250 Type *new_type = resolveTypeof( functionDecl->get_type(), *this);237 Type *new_type = resolveTypeof( functionDecl->get_type(), indexer ); 251 238 functionDecl->set_type( new_type ); 252 ValueGuard< Type * > oldFunctionReturn( functionReturn );239 GuardValue( functionReturn ); 253 240 functionReturn = ResolvExpr::extractResultType( functionDecl->get_functionType() ); 254 Parent::visit( functionDecl ); 255 241 } 242 243 244 void Resolver::postvisit( FunctionDecl *functionDecl ) { 256 245 // default value expressions have an environment which shouldn't be there and trips up later passes. 257 246 // xxx - it might be necessary to somehow keep the information from this environment, but I can't currently … … 267 256 } 268 257 269 void Resolver:: visit( EnumDecl * enumDecl) {258 void Resolver::previsit( EnumDecl * ) { 270 259 // in case we decide to allow nested enums 271 ValueGuard< bool > oldInEnumDecl( inEnumDecl );260 GuardValue( inEnumDecl ); 272 261 inEnumDecl = true; 273 Parent::visit( enumDecl );274 } 275 276 void Resolver::visit( ExprStmt *exprStmt ) {262 } 263 264 void Resolver::previsit( ExprStmt *exprStmt ) { 265 visit_children = false; 277 266 assertf( exprStmt->get_expr(), "ExprStmt has null Expression in resolver" ); 278 Expression *newExpr = findVoidExpression( exprStmt->get_expr(), *this);267 Expression *newExpr = findVoidExpression( exprStmt->get_expr(), indexer ); 279 268 delete exprStmt->get_expr(); 280 269 exprStmt->set_expr( newExpr ); 281 270 } 282 271 283 void Resolver::visit( AsmExpr *asmExpr ) { 284 Expression *newExpr = findVoidExpression( asmExpr->get_operand(), *this ); 272 void Resolver::previsit( AsmExpr *asmExpr ) { 273 visit_children = false; 274 Expression *newExpr = findVoidExpression( asmExpr->get_operand(), indexer ); 285 275 delete asmExpr->get_operand(); 286 276 asmExpr->set_operand( newExpr ); 287 277 if ( asmExpr->get_inout() ) { 288 newExpr = findVoidExpression( asmExpr->get_inout(), *this);278 newExpr = findVoidExpression( asmExpr->get_inout(), indexer ); 289 279 delete asmExpr->get_inout(); 290 280 asmExpr->set_inout( newExpr ); … … 292 282 } 293 283 294 void Resolver::visit( AsmStmt *asmStmt ) { 295 acceptAll( asmStmt->get_input(), *this); 296 acceptAll( asmStmt->get_output(), *this); 297 } 298 299 void Resolver::visit( IfStmt *ifStmt ) { 300 Expression *newExpr = findSingleExpression( ifStmt->get_condition(), *this ); 284 void Resolver::previsit( AsmStmt *asmStmt ) { 285 visit_children = false; 286 acceptAll( asmStmt->get_input(), *visitor ); 287 acceptAll( asmStmt->get_output(), *visitor ); 288 } 289 290 void Resolver::previsit( IfStmt *ifStmt ) { 291 Expression *newExpr = findSingleExpression( ifStmt->get_condition(), indexer ); 301 292 delete ifStmt->get_condition(); 302 293 ifStmt->set_condition( newExpr ); 303 Parent::visit( ifStmt ); 304 } 305 306 void Resolver::visit( WhileStmt *whileStmt ) { 307 Expression *newExpr = findSingleExpression( whileStmt->get_condition(), *this ); 294 } 295 296 void Resolver::previsit( WhileStmt *whileStmt ) { 297 Expression *newExpr = findSingleExpression( whileStmt->get_condition(), indexer ); 308 298 delete whileStmt->get_condition(); 309 299 whileStmt->set_condition( newExpr ); 310 Parent::visit( whileStmt ); 311 } 312 313 void Resolver::visit( ForStmt *forStmt ) { 314 Parent::visit( forStmt ); 315 300 } 301 302 void Resolver::previsit( ForStmt *forStmt ) { 316 303 if ( forStmt->get_condition() ) { 317 Expression * newExpr = findSingleExpression( forStmt->get_condition(), *this);304 Expression * newExpr = findSingleExpression( forStmt->get_condition(), indexer ); 318 305 delete forStmt->get_condition(); 319 306 forStmt->set_condition( newExpr ); … … 321 308 322 309 if ( forStmt->get_increment() ) { 323 Expression * newExpr = findVoidExpression( forStmt->get_increment(), *this);310 Expression * newExpr = findVoidExpression( forStmt->get_increment(), indexer ); 324 311 delete forStmt->get_increment(); 325 312 forStmt->set_increment( newExpr ); … … 327 314 } 328 315 329 void Resolver:: visit( SwitchStmt *switchStmt ) {330 ValueGuard< CurrentObject > oldCurrentObject( currentObject );316 void Resolver::previsit( SwitchStmt *switchStmt ) { 317 GuardValue( currentObject ); 331 318 Expression *newExpr; 332 newExpr = findIntegralExpression( switchStmt->get_condition(), *this);319 newExpr = findIntegralExpression( switchStmt->get_condition(), indexer ); 333 320 delete switchStmt->get_condition(); 334 321 switchStmt->set_condition( newExpr ); 335 322 336 323 currentObject = CurrentObject( newExpr->get_result() ); 337 Parent::visit( switchStmt ); 338 } 339 340 void Resolver::visit( CaseStmt *caseStmt ) { 324 } 325 326 void Resolver::previsit( CaseStmt *caseStmt ) { 341 327 if ( caseStmt->get_condition() ) { 342 328 std::list< InitAlternative > initAlts = currentObject.getOptions(); 343 329 assertf( initAlts.size() == 1, "SwitchStmt did not correctly resolve an integral expression." ); 344 330 CastExpr * castExpr = new CastExpr( caseStmt->get_condition(), initAlts.front().type->clone() ); 345 Expression * newExpr = findSingleExpression( castExpr, *this);331 Expression * newExpr = findSingleExpression( castExpr, indexer ); 346 332 castExpr = strict_dynamic_cast< CastExpr * >( newExpr ); 347 333 caseStmt->set_condition( castExpr->get_arg() ); … … 349 335 delete castExpr; 350 336 } 351 Parent::visit( caseStmt );352 } 353 354 void Resolver::visit( BranchStmt *branchStmt ) {337 } 338 339 void Resolver::previsit( BranchStmt *branchStmt ) { 340 visit_children = false; 355 341 // must resolve the argument for a computed goto 356 342 if ( branchStmt->get_type() == BranchStmt::Goto ) { // check for computed goto statement … … 359 345 PointerType pt( Type::Qualifiers(), v.clone() ); 360 346 CastExpr * castExpr = new CastExpr( arg, pt.clone() ); 361 Expression * newExpr = findSingleExpression( castExpr, *this); // find best expression347 Expression * newExpr = findSingleExpression( castExpr, indexer ); // find best expression 362 348 branchStmt->set_target( newExpr ); 363 349 } // if … … 365 351 } 366 352 367 void Resolver::visit( ReturnStmt *returnStmt ) { 353 void Resolver::previsit( ReturnStmt *returnStmt ) { 354 visit_children = false; 368 355 if ( returnStmt->get_expr() ) { 369 356 CastExpr *castExpr = new CastExpr( returnStmt->get_expr(), functionReturn->clone() ); 370 Expression *newExpr = findSingleExpression( castExpr, *this);357 Expression *newExpr = findSingleExpression( castExpr, indexer ); 371 358 delete castExpr; 372 359 returnStmt->set_expr( newExpr ); … … 374 361 } 375 362 376 void Resolver::visit( ThrowStmt *throwStmt ) { 363 void Resolver::previsit( ThrowStmt *throwStmt ) { 364 visit_children = false; 377 365 // TODO: Replace *exception type with &exception type. 378 366 if ( throwStmt->get_expr() ) { 379 367 StructDecl * exception_decl = 380 lookupStruct( "__cfaehm__base_exception_t" );368 indexer.lookupStruct( "__cfaehm__base_exception_t" ); 381 369 assert( exception_decl ); 382 370 Expression * wrapped = new CastExpr( … … 390 378 ) 391 379 ); 392 Expression * newExpr = findSingleExpression( wrapped, *this);380 Expression * newExpr = findSingleExpression( wrapped, indexer ); 393 381 throwStmt->set_expr( newExpr ); 394 382 } 395 383 } 396 384 397 void Resolver::visit( CatchStmt *catchStmt ) { 398 // inline Indexer::visit so that the exception variable is still in-scope for 399 // findSingleExpression() below 400 Parent::enterScope(); 401 Visitor::visit( catchStmt ); 402 385 void Resolver::previsit( CatchStmt *catchStmt ) { 403 386 if ( catchStmt->get_cond() ) { 404 387 Expression * wrapped = new CastExpr( … … 406 389 new BasicType( noQualifiers, BasicType::Bool ) 407 390 ); 408 catchStmt->set_cond( findSingleExpression( wrapped, *this ) ); 409 } 410 411 Parent::leaveScope(); 391 catchStmt->set_cond( findSingleExpression( wrapped, indexer ) ); 392 } 412 393 } 413 394 … … 435 416 } 436 417 437 void Resolver:: visit( WaitForStmt * stmt ) {418 void Resolver::previsit( WaitForStmt * stmt ) { 438 419 439 420 // Resolve all clauses first … … 623 604 } 624 605 625 void Resolver::visit( SingleInit *singleInit ) { 606 void Resolver::previsit( SingleInit *singleInit ) { 607 visit_children = false; 626 608 // resolve initialization using the possibilities as determined by the currentObject cursor 627 609 UntypedInitExpr * untyped = new UntypedInitExpr( singleInit->get_value(), currentObject.getOptions() ); 628 Expression * newExpr = findSingleExpression( untyped, *this);610 Expression * newExpr = findSingleExpression( untyped, indexer ); 629 611 InitExpr * initExpr = strict_dynamic_cast< InitExpr * >( newExpr ); 630 612 … … 665 647 } 666 648 667 void Resolver::visit( ListInit * listInit ) { 649 void Resolver::previsit( ListInit * listInit ) { 650 visit_children = false; 668 651 // move cursor into brace-enclosed initializer-list 669 652 currentObject.enterListInit(); … … 676 659 Initializer * init = std::get<1>(p); 677 660 newDesignations.push_back( currentObject.findNext( des ) ); 678 init->accept( * this);661 init->accept( *visitor ); 679 662 } 680 663 // set the set of 'resolved' designations and leave the brace-enclosed initializer-list … … 705 688 delete ctorInit->get_dtor(); 706 689 ctorInit->set_dtor( NULL ); 707 maybeAccept( ctorInit->get_init(), * this);690 maybeAccept( ctorInit->get_init(), *visitor ); 708 691 } 709 692 … … 711 694 void resolveCtorInit( ConstructorInit * ctorInit, const SymTab::Indexer & indexer ) { 712 695 assert( ctorInit ); 713 Resolverresolver( indexer );696 PassVisitor<Resolver> resolver( indexer ); 714 697 ctorInit->accept( resolver ); 715 698 } … … 717 700 void resolveStmtExpr( StmtExpr * stmtExpr, const SymTab::Indexer & indexer ) { 718 701 assert( stmtExpr ); 719 Resolverresolver( indexer );702 PassVisitor<Resolver> resolver( indexer ); 720 703 stmtExpr->accept( resolver ); 721 704 } 722 705 723 void Resolver::visit( ConstructorInit *ctorInit ) { 706 void Resolver::previsit( ConstructorInit *ctorInit ) { 707 visit_children = false; 724 708 // xxx - fallback init has been removed => remove fallbackInit function and remove complexity from FixInit and remove C-init from ConstructorInit 725 maybeAccept( ctorInit->get_ctor(), * this);726 maybeAccept( ctorInit->get_dtor(), * this);709 maybeAccept( ctorInit->get_ctor(), *visitor ); 710 maybeAccept( ctorInit->get_dtor(), *visitor ); 727 711 728 712 // found a constructor - can get rid of C-style initializer -
src/SymTab/Indexer.cc
re149f77 r695e00d 230 230 231 231 return *this; 232 }233 234 void Indexer::visit( ObjectDecl *objectDecl ) {235 enterScope();236 maybeAccept( objectDecl->get_type(), *this );237 leaveScope();238 maybeAccept( objectDecl->get_init(), *this );239 maybeAccept( objectDecl->get_bitfieldWidth(), *this );240 if ( objectDecl->get_name() != "" ) {241 debugPrint( "Adding object " << objectDecl->get_name() << std::endl );242 addId( objectDecl );243 } // if244 }245 246 void Indexer::visit( FunctionDecl *functionDecl ) {247 if ( functionDecl->get_name() == "" ) return;248 debugPrint( "Adding function " << functionDecl->get_name() << std::endl );249 addId( functionDecl );250 enterScope();251 maybeAccept( functionDecl->get_functionType(), *this );252 maybeAccept( functionDecl->get_statements(), *this );253 leaveScope();254 }255 256 257 // A NOTE ON THE ORDER OF TRAVERSAL258 //259 // Types and typedefs have their base types visited before they are added to the type table. This is ok, since there is260 // no such thing as a recursive type or typedef.261 //262 // typedef struct { T *x; } T; // never allowed263 //264 // for structs/unions, it is possible to have recursion, so the decl should be added as if it's incomplete to begin, the265 // members are traversed, and then the complete type should be added (assuming the type is completed by this particular266 // declaration).267 //268 // struct T { struct T *x; }; // allowed269 //270 // It is important to add the complete type to the symbol table *after* the members/base has been traversed, since that271 // traversal may modify the definition of the type and these modifications should be visible when the symbol table is272 // queried later in this pass.273 //274 // TODO: figure out whether recursive contexts are sensible/possible/reasonable.275 276 277 void Indexer::visit( TypeDecl *typeDecl ) {278 // see A NOTE ON THE ORDER OF TRAVERSAL, above279 // note that assertions come after the type is added to the symtab, since they are not part of the type proper280 // and may depend on the type itself281 enterScope();282 acceptAll( typeDecl->get_parameters(), *this );283 maybeAccept( typeDecl->get_base(), *this );284 leaveScope();285 debugPrint( "Adding type " << typeDecl->get_name() << std::endl );286 addType( typeDecl );287 acceptAll( typeDecl->get_assertions(), *this );288 acceptNewScope( typeDecl->get_init(), *this );289 }290 291 void Indexer::visit( TypedefDecl *typeDecl ) {292 enterScope();293 acceptAll( typeDecl->get_parameters(), *this );294 maybeAccept( typeDecl->get_base(), *this );295 leaveScope();296 debugPrint( "Adding typedef " << typeDecl->get_name() << std::endl );297 addType( typeDecl );298 }299 300 void Indexer::visit( StructDecl *aggregateDecl ) {301 // make up a forward declaration and add it before processing the members302 // needs to be on the heap because addStruct saves the pointer303 StructDecl &fwdDecl = *new StructDecl( aggregateDecl->get_name() );304 cloneAll( aggregateDecl->get_parameters(), fwdDecl.get_parameters() );305 debugPrint( "Adding fwd decl for struct " << fwdDecl.get_name() << std::endl );306 addStruct( &fwdDecl );307 308 enterScope();309 acceptAll( aggregateDecl->get_parameters(), *this );310 acceptAll( aggregateDecl->get_members(), *this );311 leaveScope();312 313 debugPrint( "Adding struct " << aggregateDecl->get_name() << std::endl );314 // this addition replaces the forward declaration315 addStruct( aggregateDecl );316 }317 318 void Indexer::visit( UnionDecl *aggregateDecl ) {319 // make up a forward declaration and add it before processing the members320 UnionDecl fwdDecl( aggregateDecl->get_name() );321 cloneAll( aggregateDecl->get_parameters(), fwdDecl.get_parameters() );322 debugPrint( "Adding fwd decl for union " << fwdDecl.get_name() << std::endl );323 addUnion( &fwdDecl );324 325 enterScope();326 acceptAll( aggregateDecl->get_parameters(), *this );327 acceptAll( aggregateDecl->get_members(), *this );328 leaveScope();329 330 debugPrint( "Adding union " << aggregateDecl->get_name() << std::endl );331 addUnion( aggregateDecl );332 }333 334 void Indexer::visit( EnumDecl *aggregateDecl ) {335 debugPrint( "Adding enum " << aggregateDecl->get_name() << std::endl );336 addEnum( aggregateDecl );337 // unlike structs, contexts, and unions, enums inject their members into the global scope338 acceptAll( aggregateDecl->get_members(), *this );339 }340 341 void Indexer::visit( TraitDecl *aggregateDecl ) {342 enterScope();343 acceptAll( aggregateDecl->get_parameters(), *this );344 acceptAll( aggregateDecl->get_members(), *this );345 leaveScope();346 347 debugPrint( "Adding trait " << aggregateDecl->get_name() << std::endl );348 addTrait( aggregateDecl );349 }350 351 void Indexer::visit( CompoundStmt *compoundStmt ) {352 enterScope();353 acceptAll( compoundStmt->get_kids(), *this );354 leaveScope();355 }356 357 void Indexer::visit( IfStmt *ifStmt ) {358 // for statements introduce a level of scope359 enterScope();360 Visitor::visit( ifStmt );361 leaveScope();362 }363 364 void Indexer::visit( ForStmt *forStmt ) {365 // for statements introduce a level of scope366 enterScope();367 Visitor::visit( forStmt );368 leaveScope();369 }370 371 void Indexer::visit( CatchStmt *catchStmt ) {372 // catch statements introduce a level of scope (for the caught exception)373 enterScope();374 Visitor::visit( catchStmt );375 leaveScope();376 }377 378 void Indexer::visit( ApplicationExpr *applicationExpr ) {379 acceptNewScope( applicationExpr->get_result(), *this );380 maybeAccept( applicationExpr->get_function(), *this );381 acceptAll( applicationExpr->get_args(), *this );382 }383 384 void Indexer::visit( UntypedExpr *untypedExpr ) {385 acceptNewScope( untypedExpr->get_result(), *this );386 acceptAll( untypedExpr->get_args(), *this );387 }388 389 void Indexer::visit( NameExpr *nameExpr ) {390 acceptNewScope( nameExpr->get_result(), *this );391 }392 393 void Indexer::visit( AddressExpr *addressExpr ) {394 acceptNewScope( addressExpr->get_result(), *this );395 maybeAccept( addressExpr->get_arg(), *this );396 }397 398 void Indexer::visit( LabelAddressExpr *labAddressExpr ) {399 acceptNewScope( labAddressExpr->get_result(), *this );400 }401 402 void Indexer::visit( CastExpr *castExpr ) {403 acceptNewScope( castExpr->get_result(), *this );404 maybeAccept( castExpr->get_arg(), *this );405 }406 407 void Indexer::visit( UntypedMemberExpr *memberExpr ) {408 acceptNewScope( memberExpr->get_result(), *this );409 maybeAccept( memberExpr->get_aggregate(), *this );410 }411 412 void Indexer::visit( MemberExpr *memberExpr ) {413 acceptNewScope( memberExpr->get_result(), *this );414 maybeAccept( memberExpr->get_aggregate(), *this );415 }416 417 void Indexer::visit( VariableExpr *variableExpr ) {418 acceptNewScope( variableExpr->get_result(), *this );419 }420 421 void Indexer::visit( ConstantExpr *constantExpr ) {422 acceptNewScope( constantExpr->get_result(), *this );423 maybeAccept( constantExpr->get_constant(), *this );424 }425 426 void Indexer::visit( SizeofExpr *sizeofExpr ) {427 acceptNewScope( sizeofExpr->get_result(), *this );428 if ( sizeofExpr->get_isType() ) {429 maybeAccept( sizeofExpr->get_type(), *this );430 } else {431 maybeAccept( sizeofExpr->get_expr(), *this );432 }433 }434 435 void Indexer::visit( AlignofExpr *alignofExpr ) {436 acceptNewScope( alignofExpr->get_result(), *this );437 if ( alignofExpr->get_isType() ) {438 maybeAccept( alignofExpr->get_type(), *this );439 } else {440 maybeAccept( alignofExpr->get_expr(), *this );441 }442 }443 444 void Indexer::visit( UntypedOffsetofExpr *offsetofExpr ) {445 acceptNewScope( offsetofExpr->get_result(), *this );446 maybeAccept( offsetofExpr->get_type(), *this );447 }448 449 void Indexer::visit( OffsetofExpr *offsetofExpr ) {450 acceptNewScope( offsetofExpr->get_result(), *this );451 maybeAccept( offsetofExpr->get_type(), *this );452 maybeAccept( offsetofExpr->get_member(), *this );453 }454 455 void Indexer::visit( OffsetPackExpr *offsetPackExpr ) {456 acceptNewScope( offsetPackExpr->get_result(), *this );457 maybeAccept( offsetPackExpr->get_type(), *this );458 }459 460 void Indexer::visit( AttrExpr *attrExpr ) {461 acceptNewScope( attrExpr->get_result(), *this );462 if ( attrExpr->get_isType() ) {463 maybeAccept( attrExpr->get_type(), *this );464 } else {465 maybeAccept( attrExpr->get_expr(), *this );466 }467 }468 469 void Indexer::visit( LogicalExpr *logicalExpr ) {470 acceptNewScope( logicalExpr->get_result(), *this );471 maybeAccept( logicalExpr->get_arg1(), *this );472 maybeAccept( logicalExpr->get_arg2(), *this );473 }474 475 void Indexer::visit( ConditionalExpr *conditionalExpr ) {476 acceptNewScope( conditionalExpr->get_result(), *this );477 maybeAccept( conditionalExpr->get_arg1(), *this );478 maybeAccept( conditionalExpr->get_arg2(), *this );479 maybeAccept( conditionalExpr->get_arg3(), *this );480 }481 482 void Indexer::visit( CommaExpr *commaExpr ) {483 acceptNewScope( commaExpr->get_result(), *this );484 maybeAccept( commaExpr->get_arg1(), *this );485 maybeAccept( commaExpr->get_arg2(), *this );486 }487 488 void Indexer::visit( TypeExpr *typeExpr ) {489 acceptNewScope( typeExpr->get_result(), *this );490 maybeAccept( typeExpr->get_type(), *this );491 }492 493 void Indexer::visit( AsmExpr *asmExpr ) {494 maybeAccept( asmExpr->get_inout(), *this );495 maybeAccept( asmExpr->get_constraint(), *this );496 maybeAccept( asmExpr->get_operand(), *this );497 }498 499 void Indexer::visit( ImplicitCopyCtorExpr *impCpCtorExpr ) {500 acceptNewScope( impCpCtorExpr->get_result(), *this );501 maybeAccept( impCpCtorExpr->get_callExpr(), *this );502 acceptAll( impCpCtorExpr->get_tempDecls(), *this );503 acceptAll( impCpCtorExpr->get_returnDecls(), *this );504 acceptAll( impCpCtorExpr->get_dtors(), *this );505 }506 507 void Indexer::visit( ConstructorExpr * ctorExpr ) {508 acceptNewScope( ctorExpr->get_result(), *this );509 maybeAccept( ctorExpr->get_callExpr(), *this );510 }511 512 void Indexer::visit( CompoundLiteralExpr *compLitExpr ) {513 acceptNewScope( compLitExpr->get_result(), *this );514 maybeAccept( compLitExpr->get_initializer(), *this );515 }516 517 void Indexer::visit( RangeExpr *rangeExpr ) {518 maybeAccept( rangeExpr->get_low(), *this );519 maybeAccept( rangeExpr->get_high(), *this );520 }521 522 void Indexer::visit( UntypedTupleExpr *tupleExpr ) {523 acceptNewScope( tupleExpr->get_result(), *this );524 acceptAll( tupleExpr->get_exprs(), *this );525 }526 527 void Indexer::visit( TupleExpr *tupleExpr ) {528 acceptNewScope( tupleExpr->get_result(), *this );529 acceptAll( tupleExpr->get_exprs(), *this );530 }531 532 void Indexer::visit( TupleIndexExpr *tupleExpr ) {533 acceptNewScope( tupleExpr->get_result(), *this );534 maybeAccept( tupleExpr->get_tuple(), *this );535 }536 537 void Indexer::visit( TupleAssignExpr *tupleExpr ) {538 acceptNewScope( tupleExpr->get_result(), *this );539 maybeAccept( tupleExpr->get_stmtExpr(), *this );540 }541 542 void Indexer::visit( StmtExpr *stmtExpr ) {543 acceptNewScope( stmtExpr->get_result(), *this );544 maybeAccept( stmtExpr->get_statements(), *this );545 acceptAll( stmtExpr->get_returnDecls(), *this );546 acceptAll( stmtExpr->get_dtors(), *this );547 }548 549 void Indexer::visit( UniqueExpr *uniqueExpr ) {550 acceptNewScope( uniqueExpr->get_result(), *this );551 maybeAccept( uniqueExpr->get_expr(), *this );552 }553 554 555 void Indexer::visit( TraitInstType *traitInst ) {556 acceptAll( traitInst->get_parameters(), *this );557 }558 559 void Indexer::visit( StructInstType *structInst ) {560 if ( ! lookupStruct( structInst->get_name() ) ) {561 debugPrint( "Adding struct " << structInst->get_name() << " from implicit forward declaration" << std::endl );562 addStruct( structInst->get_name() );563 }564 enterScope();565 acceptAll( structInst->get_parameters(), *this );566 leaveScope();567 }568 569 void Indexer::visit( UnionInstType *unionInst ) {570 if ( ! lookupUnion( unionInst->get_name() ) ) {571 debugPrint( "Adding union " << unionInst->get_name() << " from implicit forward declaration" << std::endl );572 addUnion( unionInst->get_name() );573 }574 enterScope();575 acceptAll( unionInst->get_parameters(), *this );576 leaveScope();577 232 } 578 233 … … 762 417 763 418 void Indexer::addId( DeclarationWithType *decl ) { 419 debugPrint( "Adding Id " << decl->name << std::endl ); 764 420 makeWritable(); 765 421 … … 811 467 812 468 void Indexer::addType( NamedTypeDecl *decl ) { 469 debugPrint( "Adding type " << decl->name << std::endl ); 813 470 makeWritable(); 814 471 … … 838 495 839 496 void Indexer::addStruct( const std::string &id ) { 497 debugPrint( "Adding fwd decl for struct " << id << std::endl ); 840 498 addStruct( new StructDecl( id ) ); 841 499 } 842 500 843 501 void Indexer::addStruct( StructDecl *decl ) { 502 debugPrint( "Adding struct " << decl->name << std::endl ); 844 503 makeWritable(); 845 504 … … 860 519 861 520 void Indexer::addEnum( EnumDecl *decl ) { 521 debugPrint( "Adding enum " << decl->name << std::endl ); 862 522 makeWritable(); 863 523 … … 878 538 879 539 void Indexer::addUnion( const std::string &id ) { 540 debugPrint( "Adding fwd decl for union " << id << std::endl ); 880 541 addUnion( new UnionDecl( id ) ); 881 542 } 882 543 883 544 void Indexer::addUnion( UnionDecl *decl ) { 545 debugPrint( "Adding union " << decl->name << std::endl ); 884 546 makeWritable(); 885 547 … … 900 562 901 563 void Indexer::addTrait( TraitDecl *decl ) { 564 debugPrint( "Adding trait " << decl->name << std::endl ); 902 565 makeWritable(); 903 566 -
src/SymTab/Indexer.h
re149f77 r695e00d 24 24 25 25 namespace SymTab { 26 class Indexer : public Visitor{26 class Indexer { 27 27 public: 28 28 explicit Indexer( bool useDebug = false ); … … 33 33 Indexer& operator= ( const Indexer &that ); 34 34 Indexer& operator= ( Indexer &&that ); 35 36 using Visitor::visit;37 virtual void visit( ObjectDecl *objectDecl );38 virtual void visit( FunctionDecl *functionDecl );39 virtual void visit( TypeDecl *typeDecl );40 virtual void visit( TypedefDecl *typeDecl );41 virtual void visit( StructDecl *aggregateDecl );42 virtual void visit( UnionDecl *aggregateDecl );43 virtual void visit( EnumDecl *aggregateDecl );44 virtual void visit( TraitDecl *aggregateDecl );45 46 virtual void visit( CompoundStmt *compoundStmt );47 virtual void visit( IfStmt *ifStmt );48 virtual void visit( ForStmt *forStmt );49 virtual void visit( CatchStmt *catchStmt );50 51 virtual void visit( ApplicationExpr *applicationExpr );52 virtual void visit( UntypedExpr *untypedExpr );53 virtual void visit( NameExpr *nameExpr );54 virtual void visit( CastExpr *castExpr );55 virtual void visit( AddressExpr *addressExpr );56 virtual void visit( LabelAddressExpr *labAddressExpr );57 virtual void visit( UntypedMemberExpr *memberExpr );58 virtual void visit( MemberExpr *memberExpr );59 virtual void visit( VariableExpr *variableExpr );60 virtual void visit( ConstantExpr *constantExpr );61 virtual void visit( SizeofExpr *sizeofExpr );62 virtual void visit( AlignofExpr *alignofExpr );63 virtual void visit( UntypedOffsetofExpr *offsetofExpr );64 virtual void visit( OffsetofExpr *offsetofExpr );65 virtual void visit( OffsetPackExpr *offsetPackExpr );66 virtual void visit( AttrExpr *attrExpr );67 virtual void visit( LogicalExpr *logicalExpr );68 virtual void visit( ConditionalExpr *conditionalExpr );69 virtual void visit( CommaExpr *commaExpr );70 virtual void visit( TypeExpr *typeExpr );71 virtual void visit( AsmExpr *asmExpr );72 virtual void visit( ImplicitCopyCtorExpr *impCpCtorExpr );73 virtual void visit( ConstructorExpr * ctorExpr );74 virtual void visit( CompoundLiteralExpr *compLitExpr );75 virtual void visit( RangeExpr *rangeExpr );76 virtual void visit( UntypedTupleExpr *tupleExpr );77 virtual void visit( TupleExpr *tupleExpr );78 virtual void visit( TupleIndexExpr *tupleExpr );79 virtual void visit( TupleAssignExpr *tupleExpr );80 virtual void visit( StmtExpr * stmtExpr );81 virtual void visit( UniqueExpr * uniqueExpr );82 83 virtual void visit( TraitInstType *contextInst );84 virtual void visit( StructInstType *contextInst );85 virtual void visit( UnionInstType *contextInst );86 35 87 36 // when using an indexer manually (e.g., within a mutator traversal), it is necessary to tell the indexer -
src/libcfa/concurrency/coroutine.c
re149f77 r695e00d 74 74 } 75 75 76 void ^?{}(coStack_t & this) {77 if ( ! this.userStack ) {76 void ^?{}(coStack_t & this) { 77 if ( ! this.userStack && this.storage ) { 78 78 LIB_DEBUG_DO( 79 79 if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) { … … 122 122 //TEMP HACK do this on proper kernel startup 123 123 if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE ); 124 125 LIB_DEBUG_PRINT_SAFE("FRED"); 124 126 125 127 size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment -
src/libcfa/concurrency/preemption.c
re149f77 r695e00d 243 243 244 244 // Setup proper signal handlers 245 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // CtxSwitch handler245 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler 246 246 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); // Failure handler 247 247 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); // Failure handler -
src/libcfa/iostream.c
re149f77 r695e00d 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Sep 11 09:21:24201713 // Update Count : 42 012 // Last Modified On : Sun Sep 17 23:24:25 2017 13 // Update Count : 422 14 14 // 15 15 … … 34 34 forall( dtype ostype | ostream( ostype ) ) 35 35 ostype * ?|?( ostype * os, signed char c ) { 36 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 36 37 fmt( os, "%hhd", c ); 37 sepOff( os );38 38 return os; 39 39 } // ?|? … … 41 41 forall( dtype ostype | ostream( ostype ) ) 42 42 ostype * ?|?( ostype * os, unsigned char c ) { 43 if ( sepPrt( os ) ) fmt( os, "%s", sepGetCur( os ) ); 43 44 fmt( os, "%hhu", c ); 44 sepOff( os );45 45 return os; 46 46 } // ?|? -
src/main.cc
re149f77 r695e00d 35 35 #include "CodeTools/DeclStats.h" // for printDeclStats 36 36 #include "CodeTools/TrackLoc.h" // for fillLocations 37 #include "Common/PassVisitor.h" 37 38 #include "Common/CompilerError.h" // for CompilerError 38 39 #include "Common/SemanticError.h" // for SemanticError … … 250 251 251 252 if ( expraltp ) { 252 ResolvExpr::AlternativePrinterprinter( cout );253 PassVisitor<ResolvExpr::AlternativePrinter> printer( cout ); 253 254 acceptAll( translationUnit, printer ); 254 255 return 0;
Note: See TracChangeset
for help on using the changeset viewer.