Changes in / [97c215f:f5a51db]
- Files:
-
- 8 added
- 75 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/http/Makefile.am
r97c215f rf5a51db 21 21 include $(top_srcdir)/tools/build/cfa.make 22 22 23 AM_CFLAGS = -O 2-Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror23 AM_CFLAGS = -O3 -Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror 24 24 AM_CFAFLAGS = -quiet -nodebug 25 25 AM_LDFLAGS = -quiet -nodebug -
benchmark/io/http/main.cfa
r97c215f rf5a51db 1 #define _ _USE_GNU1 #define _GNU_SOURCE 2 2 3 3 #include <errno.h> … … 6 6 #include <unistd.h> 7 7 extern "C" { 8 #include <sched.h> 8 9 #include <signal.h> 9 10 #include <sys/socket.h> … … 67 68 (this.self){ "Server Cluster", options.clopts.params }; 68 69 70 cpu_set_t fullset; 71 CPU_ZERO(&fullset); 72 int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset); 73 if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno ); 74 int cnt = CPU_COUNT(&fullset); 75 69 76 this.procs = alloc(options.clopts.nprocs); 70 77 for(i; options.clopts.nprocs) { 71 78 (this.procs[i]){ "Benchmark Processor", this.self }; 79 80 int c = 0; 81 int n = 1 + (i % cnt); 82 for(int j = 0; j < CPU_SETSIZE; j++) { 83 if(CPU_ISSET(j, &fullset)) n--; 84 if(n == 0) { 85 c = j; 86 break; 87 } 88 } 89 cpu_set_t localset; 90 CPU_ZERO(&localset); 91 CPU_SET(c, &localset); 92 ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset); 93 if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret ); 72 94 73 95 #if !defined(__CFA_NO_STATISTICS__) … … 146 168 int waited = 0; 147 169 for() { 148 ret = bind( server_fd, (struct sockaddr *)&address, sizeof(address) ); 170 int sockfd = server_fd; 171 __CONST_SOCKADDR_ARG addr; 172 addr.__sockaddr__ = (struct sockaddr *)&address; 173 socklen_t addrlen = sizeof(address); 174 ret = bind( sockfd, addr, addrlen ); 149 175 if(ret < 0) { 150 176 if(errno == EADDRINUSE) { -
doc/theses/thierry_delisle_PhD/thesis/Makefile
r97c215f rf5a51db 48 48 ## Define the documents that need to be made. 49 49 all: thesis.pdf 50 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib 50 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty 51 51 52 52 DOCUMENT = thesis.pdf -
doc/theses/thierry_delisle_PhD/thesis/fig/base.fig
r97c215f rf5a51db 12 12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6900 4200 20 20 6900 4200 6920 4200 13 13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6975 4200 20 20 6975 4200 6995 4200 14 -6 15 6 6375 5100 6675 5250 16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6450 5175 20 20 6450 5175 6470 5175 17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6525 5175 20 20 6525 5175 6545 5175 18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6600 5175 20 20 6600 5175 6620 5175 14 19 -6 15 20 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 2400 300 300 3900 2400 4200 2400 … … 75 80 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 76 81 2400 2475 3000 2475 82 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 83 3300 5210 3150 4950 2850 4950 2700 5210 2850 5470 3150 5470 84 3300 5210 85 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 86 4500 5210 4350 4950 4050 4950 3900 5210 4050 5470 4350 5470 87 4500 5210 88 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 89 5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470 90 5700 5210 77 91 4 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001 78 92 4 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001 … … 82 96 4 1 -1 50 -1 0 11 0.0000 2 135 180 2700 3550 TS\001 83 97 4 1 -1 50 -1 0 11 0.0000 2 135 180 2700 2650 TS\001 98 4 2 -1 50 -1 0 12 0.0000 2 135 900 2100 5175 Processors\001 -
doc/theses/thierry_delisle_PhD/thesis/glossary.tex
r97c215f rf5a51db 40 40 41 41 \textit{Synonyms : User threads, Lightweight threads, Green threads, Virtual threads, Tasks.} 42 } 43 44 \longnewglossaryentry{rmr} 45 {name={remote memory reference}} 46 { 47 42 48 } 43 49 -
doc/theses/thierry_delisle_PhD/thesis/text/core.tex
r97c215f rf5a51db 49 49 50 50 \section{Design} 51 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.51 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering. 52 52 53 \subsection{Sharding} \label{sec:sharding} 54 An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again. 53 Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis. 55 54 55 \subsection{Work-Stealing} 56 57 As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work. 58 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention. 59 On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work. 60 This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work. 61 Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation. 62 63 64 Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises. 65 66 \subsection{Relaxed-Fifo} 67 An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from. 68 \Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element. 69 The result is a queue that has both decent scalability and sufficient fairness. 70 The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element. 71 This contrasts with work-stealing, where \emph{any} \gls{proc} busy for an extended period of time results in all the elements on its local queue to have to wait. Unless another \gls{proc} runs out of work. 72 73 An important aspects of this scheme's fairness approach is that the timestamps make it possible to evaluate how long elements have been on the queue. 74 However, another major aspect is that \glspl{proc} will eagerly search for these older elements instead of focusing on specific queues. 75 76 While the fairness, of this scheme is good, it does suffer in terms of performance. 77 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult. 78 79 \section{\CFA} 80 The \CFA is effectively attempting to merge these two approaches, keeping the best of both. 81 It is based on the 56 82 \begin{figure} 57 83 \centering 58 84 \input{base.pstex_t} 59 \caption[ Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}85 \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.} 60 86 \label{fig:base} 61 87 \end{figure} 62 88 63 \subsection{Finding threads}64 Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.65 89 66 \begin{figure}67 \centering68 \input{empty.pstex_t}69 \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}70 \label{fig:empty}71 \end{figure}72 90 73 There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information: 91 % The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance. 74 92 75 \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done. 93 % \subsection{Sharding} \label{sec:sharding} 94 % An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again. 76 95 77 \begin{figure} 78 \centering 79 \vspace*{-5pt} 80 {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}} 81 \vspace*{-5pt} 82 \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.} 83 \label{fig:emptybit} 96 % \begin{figure} 97 % \centering 98 % \input{base.pstex_t} 99 % \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.} 100 % \label{fig:base} 101 % \end{figure} 84 102 85 \vspace*{10pt} 86 {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}} 87 \vspace*{-5pt} 88 \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.} 89 \label{fig:emptytree} 103 % \subsection{Finding threads} 104 % Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses. 90 105 91 \vspace*{10pt}92 {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}} 93 \vspace*{-5pt}94 \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}95 \label{fig:emptytls}96 \end{figure}106 % \begin{figure} 107 % \centering 108 % \input{empty.pstex_t} 109 % \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.} 110 % \label{fig:empty} 111 % \end{figure} 97 112 98 \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow. 113 % There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information: 99 114 100 \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.115 % \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done. 101 116 102 I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach. 117 % \begin{figure} 118 % \centering 119 % \vspace*{-5pt} 120 % {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}} 121 % \vspace*{-5pt} 122 % \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.} 123 % \label{fig:emptybit} 103 124 104 \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 105 In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later. 125 % \vspace*{10pt} 126 % {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}} 127 % \vspace*{-5pt} 128 % \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.} 129 % \label{fig:emptytree} 106 130 107 To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards. Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached. 131 % \vspace*{10pt} 132 % {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}} 133 % \vspace*{-5pt} 134 % \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.} 135 % \label{fig:emptytls} 136 % \end{figure} 108 137 109 The algorithm works as follows: 110 \begin{itemize} 111 \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$. 112 \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions: 113 \begin{itemize} 114 \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$. 115 \item Pop operations use $B$ going backwards on each try. 116 \end{itemize} 117 \end{itemize} 138 % \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow. 118 139 119 The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.140 % \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries. 120 141 121 \section{Details} 142 % I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach. 143 144 % \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 145 % In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later. 146 147 % To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards. Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached. 148 149 % The algorithm works as follows: 150 % \begin{itemize} 151 % \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$. 152 % \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions: 153 % \begin{itemize} 154 % \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$. 155 % \item Pop operations use $B$ going backwards on each try. 156 % \end{itemize} 157 % \end{itemize} 158 159 % The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm. 160 161 % \section{Details} -
doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
r97c215f rf5a51db 4 4 5 5 In Memory Plain Text 6 7 Networked Plain Text8 6 9 7 Networked ZIPF -
doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
r97c215f rf5a51db 2 2 3 3 The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly. 4 This sections presents f ourdifferent experimental setup, evaluating some of the basic features of \CFA's scheduler.4 This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler. 5 5 6 6 \section{Cycling latency} 7 7 The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue. 8 While these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.9 However, yielding can be treated as a special case, since it also carries the information that the length of the ready queuewill not change.8 Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark. 9 However, yielding can be treated as a special case, since it also carries the information that the number of the ready \glspl{at} will not change. 10 10 Not all systems use this information, but those which do may appear to have better performance than they would for disconnected push/pop pairs. 11 11 For this reason, I chose a different first benchmark, which I call the Cycle Benchmark. 12 This benchmark arranges many threads into multiple rings of threads.12 This benchmark arranges many \glspl{at} into multiple rings of \glspl{at}. 13 13 Each ring is effectively a circular singly-linked list. 14 At runtime, each thread unparks the next threadbefore parking itself.14 At runtime, each \gls{at} unparks the next \gls{at} before parking itself. 15 15 This corresponds to the desired pair of ready queue operations. 16 Unparking the next thread requires pushing that thread onto the ready queue and the ensuing park will cause the runtime to pop a threadfrom the ready-queue.16 Unparking the next \gls{at} requires pushing that \gls{at} onto the ready queue and the ensuing park will cause the runtime to pop a \gls{at} from the ready-queue. 17 17 Figure~\ref{fig:cycle} shows a visual representation of this arrangement. 18 18 19 The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready threads will stay constant over the duration of the experiment. 20 In fact, the total number of threads waiting on the ready is expected to vary a little because of the race between the next thread unparking and the current thread parking. 21 The size of the cycle is also decided based on this race: cycles that are too small may see the 22 chain of unparks go full circle before the first thread can park. 19 The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready \glspl{at} will stay constant over the duration of the experiment. 20 In fact, the total number of \glspl{at} waiting on the ready queue is expected to vary because of the race between the next \gls{at} unparking and the current \gls{at} parking. 21 The size of the cycle is also decided based on this race: cycles that are too small may see the chain of unparks go full circle before the first \gls{at} can park. 23 22 While this would not be a correctness problem, every runtime system must handle that race, it could lead to pushes and pops being optimized away. 24 Since silently omitting ready-queue operations would throw off the measuring of these operations. 25 Therefore the ring of threads must be big enough so the threads have the time to fully park before they are unparked. 23 Since silently omitting ready-queue operations would throw off the measuring of these operations, the ring of \glspl{at} must be big enough so the \glspl{at} have the time to fully park before they are unparked. 26 24 Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system. 27 25 … … 29 27 \centering 30 28 \input{cycle.pstex_t} 31 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each thread unparks the next threadin the cycle before parking itself.}29 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.} 32 30 \label{fig:cycle} 33 31 \end{figure} 34 32 35 33 \todo{check term ``idle sleep handling''} 36 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of processorsavailable.34 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available. 37 35 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling. 38 This is to avoid the case where one of the worker threads runs out of work because of the variation on the number of ready threadsmentionned above.36 This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above. 39 37 40 38 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition. 41 39 42 \todo{mention where to get the code.} 40 \todo{code, setup, results} 41 \begin{lstlisting} 42 Thread.main() { 43 count := 0 44 for { 45 wait() 46 this.next.wake() 47 count ++ 48 if must_stop() { break } 49 } 50 global.count += count 51 } 52 \end{lstlisting} 53 43 54 44 55 \section{Yield} 45 56 For completion, I also include the yield benchmark. 46 This benchmark is much simpler than the cycle tests, it simply creates many threads that call \texttt{yield}. 57 This benchmark is much simpler than the cycle tests, it simply creates many \glspl{at} that call \texttt{yield}. 58 As mentionned in the previous section, this benchmark may be less representative of usages that only make limited use of \texttt{yield}, due to potential shortcuts in the routine. 59 Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty. 60 This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done. 61 62 \todo{code, setup, results} 63 64 \begin{lstlisting} 65 Thread.main() { 66 count := 0 67 while !stop { 68 yield() 69 count ++ 70 } 71 global.count += count 72 } 73 \end{lstlisting} 74 75 76 \section{Churn} 77 The Cycle and Yield benchmark represents an ``easy'' scenario for a scheduler, \eg, an embarrassingly parallel application. 78 In these benchmarks, \glspl{at} can be easily partitioned over the different \glspl{proc} up-front and none of the \glspl{at} communicate with each other. 79 80 The Churn benchmark represents more chaotic usages, where there is no relation between the last \gls{proc} on which a \gls{at} ran and the \gls{proc} that unblocked it. 81 When a \gls{at} is unblocked from a different \gls{proc} than the one on which it last ran, the unblocking \gls{proc} must either ``steal'' the \gls{at} or place it on a remote queue. 82 This results can result in either contention on the remote queue or \glspl{rmr} on \gls{at} data structure. 83 In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly. 84 85 To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}. 86 When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any. 87 This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves. 88 For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}. 89 90 \todo{code, setup, results} 91 \begin{lstlisting} 92 Thread.main() { 93 count := 0 94 for { 95 r := random() % len(spots) 96 next := xchg(spots[r], this) 97 if next { next.wake() } 98 wait() 99 count ++ 100 if must_stop() { break } 101 } 102 global.count += count 103 } 104 \end{lstlisting} 47 105 48 106 \section{Locality} 49 107 108 \todo{code, setup, results} 109 50 110 \section{Transfer} 111 The last benchmark is more exactly characterize as an experiment than a benchmark. 112 It tests the behavior of the schedulers for a particularly misbehaved workload. 113 In this workload, one of the \gls{at} is selected at random to be the leader. 114 The leader then spins in a tight loop until it has observed that all other \glspl{at} have acknowledged its leadership. 115 The leader \gls{at} then picks a new \gls{at} to be the ``spinner'' and the cycle repeats. 116 117 The benchmark comes in two flavours for the behavior of the non-leader \glspl{at}: 118 once they acknowledged the leader, they either block on a semaphore or yield repeatadly. 119 120 This experiment is designed to evaluate the short term load balancing of the scheduler. 121 Indeed, schedulers where the runnable \glspl{at} are partitioned on the \glspl{proc} may need to balance the \glspl{at} for this experient to terminate. 122 This is because the spinning \gls{at} is effectively preventing the \gls{proc} from runnning any other \glspl{thrd}. 123 In the semaphore flavour, the number of runnable \glspl{at} will eventually dwindle down to only the leader. 124 This is a simpler case to handle for schedulers since \glspl{proc} eventually run out of work. 125 In the yielding flavour, the number of runnable \glspl{at} stays constant. 126 This is a harder case to handle because corrective measures must be taken even if work is still available. 127 Note that languages that have mandatory preemption do circumvent this problem by forcing the spinner to yield. 128 129 \todo{code, setup, results} 130 \begin{lstlisting} 131 Thread.lead() { 132 this.idx_seen = ++lead_idx 133 if lead_idx > stop_idx { 134 done := true 135 return 136 } 137 138 // Wait for everyone to acknowledge my leadership 139 start: = timeNow() 140 for t in threads { 141 while t.idx_seen != lead_idx { 142 asm pause 143 if (timeNow() - start) > 5 seconds { error() } 144 } 145 } 146 147 // pick next leader 148 leader := threads[ prng() % len(threads) ] 149 150 // wake every one 151 if !exhaust { 152 for t in threads { 153 if t != me { t.wake() } 154 } 155 } 156 } 157 158 Thread.wait() { 159 this.idx_seen := lead_idx 160 if exhaust { wait() } 161 else { yield() } 162 } 163 164 Thread.main() { 165 while !done { 166 if leader == me { this.lead() } 167 else { this.wait() } 168 } 169 } 170 \end{lstlisting} -
doc/theses/thierry_delisle_PhD/thesis/text/existing.tex
r97c215f rf5a51db 33 33 34 34 35 \section{Work Stealing} 35 \section{Work Stealing}\label{existing:workstealing} 36 36 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations. 37 37 -
libcfa/src/concurrency/io.cfa
r97c215f rf5a51db 306 306 ctx->proc->io.pending = true; 307 307 ctx->proc->io.dirty = true; 308 if(sq.to_submit > 30 || !lazy) { 308 if(sq.to_submit > 30) { 309 __tls_stats()->io.flush.full++; 310 __cfa_io_flush( ctx->proc, 0 ); 311 } 312 if(!lazy) { 313 __tls_stats()->io.flush.eager++; 309 314 __cfa_io_flush( ctx->proc, 0 ); 310 315 } -
libcfa/src/concurrency/kernel.cfa
r97c215f rf5a51db 42 42 43 43 #if !defined(__CFA_NO_STATISTICS__) 44 #define __STATS ( ...) __VA_ARGS__44 #define __STATS_DEF( ...) __VA_ARGS__ 45 45 #else 46 #define __STATS ( ...)46 #define __STATS_DEF( ...) 47 47 #endif 48 48 … … 122 122 static thread$ * __next_thread(cluster * this); 123 123 static thread$ * __next_thread_slow(cluster * this); 124 static thread$ * __next_thread_search(cluster * this); 124 125 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 125 126 static void __run_thread(processor * this, thread$ * dst); … … 187 188 MAIN_LOOP: 188 189 for() { 189 #define OLD_MAIN 1190 #if OLD_MAIN191 190 // Check if there is pending io 192 191 __maybe_io_drain( this ); … … 196 195 197 196 if( !readyThread ) { 197 __IO_STATS__(true, io.flush.idle++; ) 198 198 __cfa_io_flush( this, 0 ); 199 199 200 readyThread = __next_thread( this->cltr ); 201 } 202 203 if( !readyThread ) for(5) { 204 __IO_STATS__(true, io.flush.idle++; ) 205 200 206 readyThread = __next_thread_slow( this->cltr ); 207 208 if( readyThread ) break; 209 210 __cfa_io_flush( this, 0 ); 201 211 } 202 212 … … 206 216 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 207 217 208 #if !defined(__CFA_NO_STATISTICS__)209 __tls_stats()->ready.sleep.halts++;210 #endif211 212 218 // Push self to idle stack 213 219 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 214 220 215 221 // Confirm the ready-queue is empty 216 readyThread = __next_thread_s low( this->cltr );222 readyThread = __next_thread_search( this->cltr ); 217 223 if( readyThread ) { 218 224 // A thread was found, cancel the halt 219 225 mark_awake(this->cltr->procs, * this); 220 226 221 #if !defined(__CFA_NO_STATISTICS__) 222 __tls_stats()->ready.sleep.cancels++; 223 #endif 227 __STATS__(true, ready.sleep.cancels++; ) 224 228 225 229 // continue the mai loop … … 248 252 249 253 if(this->io.pending && !this->io.dirty) { 254 __IO_STATS__(true, io.flush.dirty++; ) 250 255 __cfa_io_flush( this, 0 ); 251 256 } 252 253 #else254 #warning new kernel loop255 SEARCH: {256 /* paranoid */ verify( ! __preemption_enabled() );257 258 // First, lock the scheduler since we are searching for a thread259 ready_schedule_lock();260 261 // Try to get the next thread262 readyThread = pop_fast( this->cltr );263 if(readyThread) { ready_schedule_unlock(); break SEARCH; }264 265 // If we can't find a thread, might as well flush any outstanding I/O266 if(this->io.pending) { __cfa_io_flush( this, 0 ); }267 268 // Spin a little on I/O, just in case269 for(5) {270 __maybe_io_drain( this );271 readyThread = pop_fast( this->cltr );272 if(readyThread) { ready_schedule_unlock(); break SEARCH; }273 }274 275 // no luck, try stealing a few times276 for(5) {277 if( __maybe_io_drain( this ) ) {278 readyThread = pop_fast( this->cltr );279 } else {280 readyThread = pop_slow( this->cltr );281 }282 if(readyThread) { ready_schedule_unlock(); break SEARCH; }283 }284 285 // still no luck, search for a thread286 readyThread = pop_search( this->cltr );287 if(readyThread) { ready_schedule_unlock(); break SEARCH; }288 289 // Don't block if we are done290 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {291 ready_schedule_unlock();292 break MAIN_LOOP;293 }294 295 __STATS( __tls_stats()->ready.sleep.halts++; )296 297 // Push self to idle stack298 ready_schedule_unlock();299 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;300 ready_schedule_lock();301 302 // Confirm the ready-queue is empty303 __maybe_io_drain( this );304 readyThread = pop_search( this->cltr );305 ready_schedule_unlock();306 307 if( readyThread ) {308 // A thread was found, cancel the halt309 mark_awake(this->cltr->procs, * this);310 311 __STATS( __tls_stats()->ready.sleep.cancels++; )312 313 // continue the main loop314 break SEARCH;315 }316 317 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )318 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);319 320 {321 eventfd_t val;322 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );323 if(ret < 0) {324 switch((int)errno) {325 case EAGAIN:326 #if EAGAIN != EWOULDBLOCK327 case EWOULDBLOCK:328 #endif329 case EINTR:330 // No need to do anything special here, just assume it's a legitimate wake-up331 break;332 default:333 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );334 }335 }336 }337 338 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )339 340 // We were woken up, remove self from idle341 mark_awake(this->cltr->procs, * this);342 343 // DON'T just proceed, start looking again344 continue MAIN_LOOP;345 }346 347 RUN_THREAD:348 /* paranoid */ verify( ! __preemption_enabled() );349 /* paranoid */ verify( readyThread );350 351 // Reset io dirty bit352 this->io.dirty = false;353 354 // We found a thread run it355 __run_thread(this, readyThread);356 357 // Are we done?358 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;359 360 if(this->io.pending && !this->io.dirty) {361 __cfa_io_flush( this, 0 );362 }363 364 ready_schedule_lock();365 __maybe_io_drain( this );366 ready_schedule_unlock();367 #endif368 257 } 369 258 … … 476 365 break RUNNING; 477 366 case TICKET_UNBLOCK: 478 #if !defined(__CFA_NO_STATISTICS__) 479 __tls_stats()->ready.threads.threads++; 480 #endif 367 __STATS__(true, ready.threads.threads++; ) 481 368 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 482 369 // In this case, just run it again. … … 493 380 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); 494 381 495 #if !defined(__CFA_NO_STATISTICS__) 496 __tls_stats()->ready.threads.threads--; 497 #endif 382 __STATS__(true, ready.threads.threads--; ) 498 383 499 384 /* paranoid */ verify( ! __preemption_enabled() ); … … 506 391 thread$ * thrd_src = kernelTLS().this_thread; 507 392 508 __STATS ( thrd_src->last_proc = kernelTLS().this_processor; )393 __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; ) 509 394 510 395 // Run the thread on this processor … … 558 443 // Dereference the thread now because once we push it, there is not guaranteed it's still valid. 559 444 struct cluster * cl = thrd->curr_cluster; 560 __STATS (bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )445 __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 561 446 562 447 // push the thread to the cluster ready-queue … … 609 494 610 495 ready_schedule_lock(); 611 thread$ * thrd; 612 for(25) { 613 thrd = pop_slow( this ); 614 if(thrd) goto RET; 615 } 616 thrd = pop_search( this ); 617 618 RET: 496 thread$ * thrd = pop_slow( this ); 497 ready_schedule_unlock(); 498 499 /* paranoid */ verify( ! __preemption_enabled() ); 500 return thrd; 501 } 502 503 // KERNEL ONLY 504 static inline thread$ * __next_thread_search(cluster * this) with( *this ) { 505 /* paranoid */ verify( ! __preemption_enabled() ); 506 507 ready_schedule_lock(); 508 thread$ * thrd = pop_search( this ); 619 509 ready_schedule_unlock(); 620 510 … … 732 622 // Wake a thread from the front if there are any 733 623 static void __wake_one(cluster * this) { 624 eventfd_t val; 625 734 626 /* paranoid */ verify( ! __preemption_enabled() ); 735 627 /* paranoid */ verify( ready_schedule_islocked() ); 736 628 737 629 // Check if there is a sleeping processor 738 // int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 739 int fd = 0; 740 if( __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST) != 0 ) { 741 fd = __atomic_exchange_n(&this->procs.fd, 0, __ATOMIC_RELAXED); 742 } 743 744 // If no one is sleeping, we are done 745 if( fd == 0 ) return; 746 747 // We found a processor, wake it up 748 eventfd_t val; 749 val = 1; 750 eventfd_write( fd, val ); 751 752 #if !defined(__CFA_NO_STATISTICS__) 753 if( kernelTLS().this_stats ) { 754 __tls_stats()->ready.sleep.wakes++; 755 } 756 else { 757 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); 758 } 759 #endif 630 struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST); 631 632 // If no one is sleeping: we are done 633 if( fdp == 0p ) return; 634 635 int fd = 1; 636 if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) { 637 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED); 638 } 639 640 switch(fd) { 641 case 0: 642 // If the processor isn't ready to sleep then the exchange will already wake it up 643 #if !defined(__CFA_NO_STATISTICS__) 644 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 645 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 646 #endif 647 break; 648 case 1: 649 // If someone else already said they will wake them: we are done 650 #if !defined(__CFA_NO_STATISTICS__) 651 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 652 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 653 #endif 654 break; 655 default: 656 // If the processor was ready to sleep, we need to wake it up with an actual write 657 val = 1; 658 eventfd_write( fd, val ); 659 660 #if !defined(__CFA_NO_STATISTICS__) 661 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 662 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 663 #endif 664 break; 665 } 760 666 761 667 /* paranoid */ verify( ready_schedule_islocked() ); … … 770 676 771 677 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 679 this->idle_wctx.fd = 1; 772 680 773 681 eventfd_t val; … … 779 687 780 688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 689 // Tell everyone we are ready to go do sleep 690 for() { 691 int expected = this->idle_wctx.fd; 692 693 // Someone already told us to wake-up! No time for a nap. 694 if(expected == 1) { return; } 695 696 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 // Every one agreed, taking a nap 699 break; 700 } 701 } 702 703 781 704 #if !defined(CFA_WITH_IO_URING_IDLE) 782 705 #if !defined(__CFA_NO_STATISTICS__) … … 825 748 826 749 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 750 __STATS__(true, ready.sleep.halts++; ) 751 752 proc.idle_wctx.fd = 0; 753 827 754 /* paranoid */ verify( ! __preemption_enabled() ); 828 755 if(!try_lock( this )) return false; … … 832 759 insert_first(this.idles, proc); 833 760 834 __atomic_store_n(&this.fd , proc.idle_fd, __ATOMIC_SEQ_CST);761 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); 835 762 unlock( this ); 836 763 /* paranoid */ verify( ! __preemption_enabled() ); … … 848 775 849 776 { 850 int fd= 0;851 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd;852 __atomic_store_n(&this.fd , fd, __ATOMIC_SEQ_CST);777 struct __fd_waitctx * wctx = 0; 778 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; 779 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 853 780 } 854 781 … … 914 841 unsigned tail = *ctx->cq.tail; 915 842 if(head == tail) return false; 916 #if OLD_MAIN 917 ready_schedule_lock(); 918 ret = __cfa_io_drain( proc ); 919 ready_schedule_unlock(); 920 #else 921 ret = __cfa_io_drain( proc ); 922 #endif 843 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc ); 845 ready_schedule_unlock(); 923 846 #endif 924 847 return ret; -
libcfa/src/concurrency/kernel.hfa
r97c215f rf5a51db 53 53 coroutine processorCtx_t { 54 54 struct processor * proc; 55 }; 56 57 58 struct __fd_waitctx { 59 volatile int fd; 55 60 }; 56 61 … … 101 106 int idle_fd; 102 107 108 // Idle waitctx 109 struct __fd_waitctx idle_wctx; 110 103 111 // Termination synchronisation (user semaphore) 104 112 oneshot terminated; … … 207 215 208 216 // FD to use to wake a processor 209 volatile int fd;217 struct __fd_waitctx * volatile fdw; 210 218 211 219 // Total number of processors -
libcfa/src/concurrency/kernel/fwd.hfa
r97c215f rf5a51db 396 396 if( !(in_kernel) ) enable_interrupts(); \ 397 397 } 398 #if defined(CFA_HAVE_LINUX_IO_URING_H) 399 #define __IO_STATS__(in_kernel, ...) { \ 400 if( !(in_kernel) ) disable_interrupts(); \ 401 with( *__tls_stats() ) { \ 402 __VA_ARGS__ \ 403 } \ 404 if( !(in_kernel) ) enable_interrupts(); \ 405 } 406 #else 407 #define __IO_STATS__(in_kernel, ...) 408 #endif 398 409 #else 399 410 #define __STATS__(in_kernel, ...) 411 #define __IO_STATS__(in_kernel, ...) 400 412 #endif 401 413 } -
libcfa/src/concurrency/kernel/startup.cfa
r97c215f rf5a51db 537 537 } 538 538 539 this.idle_wctx.fd = 0; 540 541 // I'm assuming these two are reserved for standard input and output 542 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 ); 544 /* paranoid */ verify( this.idle_fd != 1 ); 545 539 546 #if !defined(__CFA_NO_STATISTICS__) 540 547 print_stats = 0; … … 590 597 // Cluster 591 598 static void ?{}(__cluster_proc_list & this) { 592 this.fd = 0;599 this.fdw = 0p; 593 600 this.idle = 0; 594 601 this.total = 0; -
libcfa/src/concurrency/mutex_stmt.hfa
r97c215f rf5a51db 38 38 } 39 39 40 struct scoped_lock { 41 L * internal_lock; 42 }; 43 44 static inline void ?{}( scoped_lock(L) & this, L & internal_lock ) { 45 this.internal_lock = &internal_lock; 46 lock(internal_lock); 47 } 48 49 static inline void ^?{}( scoped_lock(L) & this ) with(this) { 50 unlock(*internal_lock); 51 } 52 40 53 static inline L * __get_ptr( L & this ) { 41 54 return &this; -
libcfa/src/concurrency/preemption.cfa
r97c215f rf5a51db 251 251 bool enabled = __cfaabi_tls.preemption_state.enabled; 252 252 253 // Check if there is a pending preemption 254 processor * proc = __cfaabi_tls.this_processor; 255 bool pending = proc ? proc->pending_preemption : false; 256 if( enabled && pending ) proc->pending_preemption = false; 257 253 258 // create a assembler label after 254 259 // marked as clobber all to avoid movement 255 260 __cfaasm_label(check, after); 261 262 // If we can preempt and there is a pending one 263 // this is a good time to yield 264 if( enabled && pending ) { 265 force_yield( __POLL_PREEMPTION ); 266 } 256 267 return enabled; 257 268 } … … 282 293 // marked as clobber all to avoid movement 283 294 __cfaasm_label(get, after); 295 296 // This is used everywhere, to avoid cost, we DO NOT poll pending preemption 284 297 return val; 285 298 } … … 358 371 if(!ready) { abort("Preemption should be ready"); } 359 372 360 __cfaasm_label(debug, before); 361 362 sigset_t oldset; 363 int ret; 364 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 365 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 366 367 ret = sigismember(&oldset, SIGUSR1); 368 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 369 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 370 371 ret = sigismember(&oldset, SIGALRM); 372 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 373 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 374 375 ret = sigismember(&oldset, SIGTERM); 376 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 377 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 378 379 __cfaasm_label(debug, after); 373 sigset_t oldset; 374 int ret; 375 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 376 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 377 378 ret = sigismember(&oldset, SIGUSR1); 379 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 380 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 381 382 ret = sigismember(&oldset, SIGALRM); 383 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 384 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 385 386 ret = sigismember(&oldset, SIGTERM); 387 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 388 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 380 389 } 381 390 … … 548 557 __cfaasm_label( check ); 549 558 __cfaasm_label( dsable ); 550 __cfaasm_label( debug );559 // __cfaasm_label( debug ); 551 560 552 561 // Check if preemption is safe … … 555 564 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 556 565 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 557 if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; };566 // if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 558 567 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 559 568 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; … … 661 670 662 671 // Check if it is safe to preempt here 663 if( !preemption_ready( ip ) ) { return; } 672 if( !preemption_ready( ip ) ) { 673 #if !defined(__CFA_NO_STATISTICS__) 674 __cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++; 675 #endif 676 return; 677 } 664 678 665 679 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); … … 680 694 681 695 // Preemption can occur here 696 697 #if !defined(__CFA_NO_STATISTICS__) 698 __cfaabi_tls.this_stats->ready.threads.preempt.yield++; 699 #endif 682 700 683 701 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch -
libcfa/src/concurrency/ready_queue.cfa
r97c215f rf5a51db 201 201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 202 202 /* paranoid */ verify( ! __preemption_enabled() ); 203 /* paranoid */ verify( ! kernelTLS().sched_lock );204 203 205 204 // Step 1 : lock global lock … … 207 206 // to simply lock their own lock and enter. 208 207 __atomic_acquire( &write_lock ); 208 209 // Make sure we won't deadlock ourself 210 // Checking before acquiring the writer lock isn't safe 211 // because someone else could have locked us. 212 /* paranoid */ verify( ! kernelTLS().sched_lock ); 209 213 210 214 // Step 2 : lock per-proc lock -
libcfa/src/concurrency/stats.cfa
r97c215f rf5a51db 29 29 stats->ready.threads.threads = 0; 30 30 stats->ready.threads.cthreads = 0; 31 stats->ready.threads.preempt.yield = 0; 32 stats->ready.threads.preempt.rllfwd = 0; 31 33 stats->ready.sleep.halts = 0; 32 34 stats->ready.sleep.cancels = 0; 35 stats->ready.sleep.early = 0; 33 36 stats->ready.sleep.wakes = 0; 37 stats->ready.sleep.seen = 0; 34 38 stats->ready.sleep.exits = 0; 35 39 … … 43 47 stats->io.submit.slow = 0; 44 48 stats->io.flush.external = 0; 49 stats->io.flush.dirty = 0; 50 stats->io.flush.full = 0; 51 stats->io.flush.idle = 0; 52 stats->io.flush.eager = 0; 45 53 stats->io.calls.flush = 0; 46 54 stats->io.calls.submitted = 0; … … 71 79 72 80 void __tally_stats( struct __stats_t * cltr, struct __stats_t * proc ) { 73 tally_one( &cltr->ready.push.local.attempt, &proc->ready.push.local.attempt ); 74 tally_one( &cltr->ready.push.local.success, &proc->ready.push.local.success ); 75 tally_one( &cltr->ready.push.share.attempt, &proc->ready.push.share.attempt ); 76 tally_one( &cltr->ready.push.share.success, &proc->ready.push.share.success ); 77 tally_one( &cltr->ready.push.extrn.attempt, &proc->ready.push.extrn.attempt ); 78 tally_one( &cltr->ready.push.extrn.success, &proc->ready.push.extrn.success ); 79 tally_one( &cltr->ready.pop.local .attempt, &proc->ready.pop.local .attempt ); 80 tally_one( &cltr->ready.pop.local .success, &proc->ready.pop.local .success ); 81 tally_one( &cltr->ready.pop.help .attempt, &proc->ready.pop.help .attempt ); 82 tally_one( &cltr->ready.pop.help .success, &proc->ready.pop.help .success ); 83 tally_one( &cltr->ready.pop.steal .attempt, &proc->ready.pop.steal .attempt ); 84 tally_one( &cltr->ready.pop.steal .success, &proc->ready.pop.steal .success ); 85 tally_one( &cltr->ready.pop.search.attempt, &proc->ready.pop.search.attempt ); 86 tally_one( &cltr->ready.pop.search.success, &proc->ready.pop.search.success ); 87 tally_one( &cltr->ready.threads.migration , &proc->ready.threads.migration ); 88 tally_one( &cltr->ready.threads.extunpark , &proc->ready.threads.extunpark ); 89 tally_one( &cltr->ready.threads.threads , &proc->ready.threads.threads ); 90 tally_one( &cltr->ready.threads.cthreads , &proc->ready.threads.cthreads ); 91 tally_one( &cltr->ready.sleep.halts , &proc->ready.sleep.halts ); 92 tally_one( &cltr->ready.sleep.cancels , &proc->ready.sleep.cancels ); 93 tally_one( &cltr->ready.sleep.wakes , &proc->ready.sleep.wakes ); 94 tally_one( &cltr->ready.sleep.exits , &proc->ready.sleep.exits ); 81 tally_one( &cltr->ready.push.local.attempt , &proc->ready.push.local.attempt ); 82 tally_one( &cltr->ready.push.local.success , &proc->ready.push.local.success ); 83 tally_one( &cltr->ready.push.share.attempt , &proc->ready.push.share.attempt ); 84 tally_one( &cltr->ready.push.share.success , &proc->ready.push.share.success ); 85 tally_one( &cltr->ready.push.extrn.attempt , &proc->ready.push.extrn.attempt ); 86 tally_one( &cltr->ready.push.extrn.success , &proc->ready.push.extrn.success ); 87 tally_one( &cltr->ready.pop.local .attempt , &proc->ready.pop.local .attempt ); 88 tally_one( &cltr->ready.pop.local .success , &proc->ready.pop.local .success ); 89 tally_one( &cltr->ready.pop.help .attempt , &proc->ready.pop.help .attempt ); 90 tally_one( &cltr->ready.pop.help .success , &proc->ready.pop.help .success ); 91 tally_one( &cltr->ready.pop.steal .attempt , &proc->ready.pop.steal .attempt ); 92 tally_one( &cltr->ready.pop.steal .success , &proc->ready.pop.steal .success ); 93 tally_one( &cltr->ready.pop.search.attempt , &proc->ready.pop.search.attempt ); 94 tally_one( &cltr->ready.pop.search.success , &proc->ready.pop.search.success ); 95 tally_one( &cltr->ready.threads.migration , &proc->ready.threads.migration ); 96 tally_one( &cltr->ready.threads.extunpark , &proc->ready.threads.extunpark ); 97 tally_one( &cltr->ready.threads.threads , &proc->ready.threads.threads ); 98 tally_one( &cltr->ready.threads.cthreads , &proc->ready.threads.cthreads ); 99 tally_one( &cltr->ready.threads.preempt.yield , &proc->ready.threads.preempt.yield ); 100 tally_one( &cltr->ready.threads.preempt.rllfwd, &proc->ready.threads.preempt.rllfwd ); 101 tally_one( &cltr->ready.sleep.halts , &proc->ready.sleep.halts ); 102 tally_one( &cltr->ready.sleep.cancels , &proc->ready.sleep.cancels ); 103 tally_one( &cltr->ready.sleep.early , &proc->ready.sleep.early ); 104 tally_one( &cltr->ready.sleep.wakes , &proc->ready.sleep.wakes ); 105 tally_one( &cltr->ready.sleep.seen , &proc->ready.sleep.wakes ); 106 tally_one( &cltr->ready.sleep.exits , &proc->ready.sleep.exits ); 95 107 96 108 #if defined(CFA_HAVE_LINUX_IO_URING_H) … … 103 115 tally_one( &cltr->io.submit.slow , &proc->io.submit.slow ); 104 116 tally_one( &cltr->io.flush.external , &proc->io.flush.external ); 117 tally_one( &cltr->io.flush.dirty , &proc->io.flush.dirty ); 118 tally_one( &cltr->io.flush.full , &proc->io.flush.full ); 119 tally_one( &cltr->io.flush.idle , &proc->io.flush.idle ); 120 tally_one( &cltr->io.flush.eager , &proc->io.flush.eager ); 105 121 tally_one( &cltr->io.calls.flush , &proc->io.calls.flush ); 106 122 tally_one( &cltr->io.calls.submitted , &proc->io.calls.submitted ); … … 153 169 | " (" | eng3(ready.pop.search.attempt) | " try)"; 154 170 155 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," | eng3(ready.sleep.wakes) | "wake," | eng3(ready.sleep.exits) | "exit"; 171 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," 172 | eng3(ready.sleep.wakes + ready.sleep.early) | '(' | eng3(ready.sleep.early) | ',' | eng3(ready.sleep.seen) | ')' | " wake(early, seen)," 173 | eng3(ready.sleep.exits) | "exit"; 174 sstr | "- Preemption : " | eng3(ready.threads.preempt.yield) | "yields," | eng3(ready.threads.preempt.rllfwd) | "delayed"; 156 175 sstr | nl; 157 176 } … … 178 197 if(io.alloc.fail || io.alloc.revoke || io.alloc.block) 179 198 sstr | "- failures : " | eng3(io.alloc.fail) | "oom, " | eng3(io.alloc.revoke) | "rvk, " | eng3(io.alloc.block) | "blk"; 180 if(io.flush.external)181 sstr | "- flush external : " | eng3(io.flush.external);199 // if(io.flush.external) 200 // sstr | "- flush external : " | eng3(io.flush.external); 182 201 183 202 double avgsubs = ((double)io.calls.submitted) / io.calls.flush; 184 203 double avgcomp = ((double)io.calls.completed) / io.calls.drain; 185 204 sstr | "- syscll : " 186 | " sub " | eng3(io.calls. flush) | "/" | eng3(io.calls.submitted) | "(" | ws(3, 3, avgsubs) | "/flush)"187 | " - cmp " | eng3(io.calls. drain) | "/" | eng3(io.calls.completed) | "(" | ws(3, 3, avgcomp) | "/drain)"205 | " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)" 206 | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)" 188 207 | " - " | eng3(io.calls.errors.busy) | " EBUSY"; 208 sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext"; 189 209 sstr | "- ops blk: " 190 210 | " sk rd: " | eng3(io.ops.sockread) | "epll: " | eng3(io.ops.epllread) -
libcfa/src/concurrency/stats.hfa
r97c215f rf5a51db 65 65 volatile int64_t threads; // number of threads in the system, includes only local change 66 66 volatile int64_t cthreads; // number of threads in the system, includes only local change 67 struct { 68 volatile uint64_t yield; 69 volatile uint64_t rllfwd; 70 } preempt; 67 71 } threads; 68 72 struct { 69 73 volatile uint64_t halts; 70 74 volatile uint64_t cancels; 75 volatile uint64_t early; 71 76 volatile uint64_t wakes; 77 volatile uint64_t seen; 72 78 volatile uint64_t exits; 73 79 } sleep; … … 89 95 struct { 90 96 volatile uint64_t external; 97 volatile uint64_t dirty; 98 volatile uint64_t full; 99 volatile uint64_t idle; 100 volatile uint64_t eager; 91 101 } flush; 92 102 struct { -
libcfa/src/stdhdr/pthread.h
r97c215f rf5a51db 10 10 // Created On : Wed Jun 16 13:39:06 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 16 13:39:42 202113 // Update Count : 1 12 // Last Modified On : Thu Feb 3 21:53:26 2022 13 // Update Count : 13 14 14 // 15 15 16 // pthread.h and setjmp.h cannot agree on the type of __sigsetjmp: 17 // 18 // extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__)); 19 // extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__)); 20 // 21 // With -Wall, gcc-11 warns about the disagreement unless the CPP directive 22 // 23 // # 1 "/usr/include/pthread.h" 1 3 4 24 // 25 // appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the 26 // batchtest fails because of the spurious warning message. Hence, the warning is elided. 27 16 28 extern "C" { 29 #if defined(__GNUC__) && __GNUC__ == 11 30 #pragma GCC diagnostic push 31 #pragma GCC diagnostic ignored "-Warray-parameter" 32 #endif // defined(__GNUC__) && __GNUC__ == 11 33 17 34 #include_next <pthread.h> // has internal check for multiple expansion 35 36 #if defined(__GNUC__) && __GNUC__ == 11 37 #pragma GCC diagnostic pop 38 #endif // defined(__GNUC__) && __GNUC__ == 11 18 39 } // extern "C" 19 40 20 41 // Local Variables: // 21 // tab-width: 4 //22 42 // mode: c++ // 23 // compile-command: "make install" //24 43 // End: // -
libcfa/src/stdhdr/setjmp.h
r97c215f rf5a51db 10 10 // Created On : Mon Jul 4 23:25:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jul 5 20:38:33 201613 // Update Count : 1 212 // Last Modified On : Thu Feb 3 21:53:28 2022 13 // Update Count : 18 14 14 // 15 15 16 // pthread.h and setjmp.h cannot agree on the type of __sigsetjmp: 17 // 18 // extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__)); 19 // extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__)); 20 // 21 // With -Wall, gcc-11 warns about the disagreement unless the CPP directive 22 // 23 // # 1 "/usr/include/pthread.h" 1 3 4 24 // 25 // appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the 26 // batchtest fails because of the spurious warning message. Hence, the warning is elided. 27 16 28 extern "C" { 29 #if defined(__GNUC__) && __GNUC__ == 11 30 #pragma GCC diagnostic push 31 #pragma GCC diagnostic ignored "-Warray-parameter" 32 #endif // defined(__GNUC__) && __GNUC__ == 11 33 17 34 #include_next <setjmp.h> // has internal check for multiple expansion 35 36 #if defined(__GNUC__) && __GNUC__ == 11 37 #pragma GCC diagnostic pop 38 #endif // defined(__GNUC__) && __GNUC__ == 11 18 39 } // extern "C" 19 40 20 41 // Local Variables: // 21 // tab-width: 4 //22 42 // mode: c++ // 23 // compile-command: "make install" //24 43 // End: // -
src/AST/Convert.cpp
r97c215f rf5a51db 9 9 // Author : Thierry Delisle 10 10 // Created On : Thu May 09 15::37::05 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed Jul 14 16:15:00 202113 // Update Count : 3711 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 13:19:22 2022 13 // Update Count : 41 14 14 // 15 15 … … 393 393 auto stmt = new IfStmt( 394 394 get<Expression>().accept1( node->cond ), 395 get<Statement>().accept1( node->then Part),396 get<Statement>().accept1( node->else Part),395 get<Statement>().accept1( node->then ), 396 get<Statement>().accept1( node->else_ ), 397 397 get<Statement>().acceptL( node->inits ) 398 398 ); … … 419 419 } 420 420 421 const ast::Stmt * visit( const ast::While Stmt * node ) override final {421 const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final { 422 422 if ( inCache( node ) ) return nullptr; 423 423 auto inits = get<Statement>().acceptL( node->inits ); 424 auto stmt = new While Stmt(424 auto stmt = new WhileDoStmt( 425 425 get<Expression>().accept1( node->cond ), 426 426 get<Statement>().accept1( node->body ), 427 get<Statement>().accept1( node->else_ ), 427 428 inits, 428 429 node->isDoWhile … … 437 438 get<Expression>().accept1( node->cond ), 438 439 get<Expression>().accept1( node->inc ), 439 get<Statement>().accept1( node->body ) 440 get<Statement>().accept1( node->body ), 441 get<Statement>().accept1( node->else_ ) 440 442 ); 441 443 return stmtPostamble( stmt, node ); … … 1872 1874 old->location, 1873 1875 GET_ACCEPT_1(condition, Expr), 1874 GET_ACCEPT_1(then Part, Stmt),1875 GET_ACCEPT_1(else Part, Stmt),1876 GET_ACCEPT_1(then, Stmt), 1877 GET_ACCEPT_1(else_, Stmt), 1876 1878 GET_ACCEPT_V(initialization, Stmt), 1877 1879 GET_LABELS_V(old->labels) … … 1902 1904 } 1903 1905 1904 virtual void visit( const While Stmt * old ) override final {1906 virtual void visit( const WhileDoStmt * old ) override final { 1905 1907 if ( inCache( old ) ) return; 1906 this->node = new ast::While Stmt(1908 this->node = new ast::WhileDoStmt( 1907 1909 old->location, 1908 1910 GET_ACCEPT_1(condition, Expr), 1909 1911 GET_ACCEPT_1(body, Stmt), 1912 GET_ACCEPT_1(else_, Stmt), 1910 1913 GET_ACCEPT_V(initialization, Stmt), 1911 1914 old->isDoWhile, … … 1923 1926 GET_ACCEPT_1(increment, Expr), 1924 1927 GET_ACCEPT_1(body, Stmt), 1928 GET_ACCEPT_1(else_, Stmt), 1925 1929 GET_LABELS_V(old->labels) 1926 1930 ); -
src/AST/Copy.hpp
r97c215f rf5a51db 10 10 // Created On : Wed Jul 10 16:13:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr Nov 11 9:22:00 202113 // Update Count : 212 // Last Modified On : Wed Dec 15 11:07:00 2021 13 // Update Count : 3 14 14 // 15 15 … … 52 52 Node * deepCopy<Node>( const Node * localRoot ); 53 53 54 template<typename node_t, enum Node::ref_type ref_t> 55 node_t * shallowCopy( const ptr_base<node_t, ref_t> & localRoot ) { 56 return shallowCopy( localRoot.get() ); 57 } 58 59 template<typename node_t, enum Node::ref_type ref_t> 60 node_t * deepCopy( const ptr_base<node_t, ref_t> & localRoot ) { 61 return deepCopy( localRoot.get() ); 62 } 63 54 64 } 55 65 -
src/AST/Fwd.hpp
r97c215f rf5a51db 10 10 // Created On : Wed May 8 16:05:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:37:39 202113 // Update Count : 412 // Last Modified On : Tue Feb 1 09:08:33 2022 13 // Update Count : 5 14 14 // 15 15 … … 44 44 class DirectiveStmt; 45 45 class IfStmt; 46 class While Stmt;46 class WhileDoStmt; 47 47 class ForStmt; 48 48 class SwitchStmt; -
src/AST/Node.cpp
r97c215f rf5a51db 10 10 // Created On : Thu May 16 14:16:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:25:06 202113 // Update Count : 212 // Last Modified On : Tue Feb 1 09:09:39 2022 13 // Update Count : 3 14 14 // 15 15 … … 146 146 template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::weak >; 147 147 template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::strong >; 148 template class ast::ptr_base< ast::While Stmt, ast::Node::ref_type::weak >;149 template class ast::ptr_base< ast::While Stmt, ast::Node::ref_type::strong >;148 template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::weak >; 149 template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::strong >; 150 150 template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::weak >; 151 151 template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::strong >; -
src/AST/Node.hpp
r97c215f rf5a51db 188 188 } 189 189 190 ptr_base & operator=( const node_t * node ) { 191 assign( node ); 192 return *this; 193 } 194 190 195 template<typename o_node_t> 191 196 ptr_base & operator=( const o_node_t * node ) { -
src/AST/Pass.hpp
r97c215f rf5a51db 146 146 const ast::Stmt * visit( const ast::DirectiveStmt * ) override final; 147 147 const ast::Stmt * visit( const ast::IfStmt * ) override final; 148 const ast::Stmt * visit( const ast::While Stmt* ) override final;148 const ast::Stmt * visit( const ast::WhileDoStmt * ) override final; 149 149 const ast::Stmt * visit( const ast::ForStmt * ) override final; 150 150 const ast::Stmt * visit( const ast::SwitchStmt * ) override final; … … 238 238 239 239 private: 240 const ast::Stmt * call_accept( const ast::Stmt * ); 241 const ast::Expr * call_accept( const ast::Expr * ); 242 243 // requests WithStmtsToAdd directly add to this statement, as if it is a compound. 244 245 const ast::Stmt * call_accept_as_compound(const ast::Stmt *); 246 240 241 // Regular nodes 247 242 template< typename node_t > 248 auto call_accept( const node_t * node ) -> typename std::enable_if< 243 struct result1 { 244 bool differs; 245 const node_t * value; 246 247 template< typename object_t, typename super_t, typename field_t > 248 void apply(object_t *, field_t super_t::* field); 249 }; 250 251 result1<ast::Stmt> call_accept( const ast::Stmt * ); 252 result1<ast::Expr> call_accept( const ast::Expr * ); 253 254 template< typename node_t > 255 auto call_accept( const node_t * node ) 256 -> typename std::enable_if< 249 257 !std::is_base_of<ast::Expr, node_t>::value && 250 258 !std::is_base_of<ast::Stmt, node_t>::value 251 , decltype( node->accept(*this) ) 259 , result1< 260 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 261 > 252 262 >::type; 253 263 264 // requests WithStmtsToAdd directly add to this statement, as if it is a compound. 265 result1<ast::Stmt> call_accept_as_compound(const ast::Stmt *); 266 267 template<typename it_t, template <class...> class container_t> 268 static inline void take_all_delta( it_t it, container_t<ast::ptr<ast::Decl>> * decls, bool * mutated = nullptr ) { 269 if(empty(decls)) return; 270 271 std::transform(decls->begin(), decls->end(), it, [](ast::ptr<ast::Decl>&& decl) -> auto { 272 auto loc = decl->location; 273 auto stmt = new DeclStmt( loc, decl.release() ); 274 return { {stmt}, -1, false }; 275 }); 276 decls->clear(); 277 if(mutated) *mutated = true; 278 } 279 280 // Container of statements 254 281 template< template <class...> class container_t > 255 container_t< ptr<Stmt> > call_accept( const container_t< ptr<Stmt> > & ); 256 282 struct resultNstmt { 283 struct delta { 284 ptr<Stmt> nval; 285 ssize_t old_idx; 286 bool is_old; 287 288 delta(const Stmt * s, ssize_t i, bool old) : nval{s}, old_idx{i}, is_old{old} {} 289 }; 290 291 bool differs; 292 container_t< delta > values; 293 294 resultNstmt() : differs(false), values{} {} 295 resultNstmt(bool diff, container_t< delta > && vals) : differs(diff), values(vals) {} 296 297 template< typename object_t, typename super_t, typename field_t > 298 void apply(object_t *, field_t super_t::* field); 299 300 template< template <class...> class incontainer_t > 301 void take_all( incontainer_t<ast::ptr<ast::Stmt>> * stmts ) { 302 if(!stmts || stmts->empty()) return; 303 304 std::transform(stmts->begin(), stmts->end(), std::back_inserter( values ), [](ast::ptr<ast::Stmt>& decl) -> delta { 305 return delta( decl.release(), -1, false ); 306 }); 307 stmts->clear(); 308 differs = true; 309 } 310 311 template< template <class...> class incontainer_t > 312 void take_all( incontainer_t<ast::ptr<ast::Decl>> * decls ) { 313 if(!decls || decls->empty()) return; 314 315 std::transform(decls->begin(), decls->end(), std::back_inserter( values ), [](ast::ptr<ast::Decl>& decl) -> auto { 316 auto loc = decl->location; 317 auto stmt = new DeclStmt( loc, decl.release() ); 318 return delta( stmt, -1, false ); 319 }); 320 decls->clear(); 321 differs = true; 322 } 323 }; 324 325 template< template <class...> class container_t > 326 resultNstmt<container_t> call_accept( const container_t< ptr<Stmt> > & ); 327 328 // Container of something 257 329 template< template <class...> class container_t, typename node_t > 258 container_t< ptr<node_t> > call_accept( const container_t< ptr<node_t> > & container ); 330 struct resultN { 331 bool differs; 332 container_t<ptr<node_t>> values; 333 334 template< typename object_t, typename super_t, typename field_t > 335 void apply(object_t *, field_t super_t::* field); 336 }; 337 338 template< template <class...> class container_t, typename node_t > 339 resultN< container_t, node_t > call_accept( const container_t< ptr<node_t> > & container ); 259 340 260 341 public: 261 342 /// Logic to call the accept and mutate the parent if needed, delegates call to accept 262 template<typename node_t, typename parent_t, typename child_t>263 void maybe_accept(const node_t * &, child_t parent_t::* child);264 265 template<typename node_t, typename parent_t, typename child_t>266 void maybe_accept_as_compound(const node_t * &, child_t parent_t::* child);343 template<typename node_t, typename parent_t, typename field_t> 344 void maybe_accept(const node_t * &, field_t parent_t::* field); 345 346 template<typename node_t, typename parent_t, typename field_t> 347 void maybe_accept_as_compound(const node_t * &, field_t parent_t::* field); 267 348 268 349 private: -
src/AST/Pass.impl.hpp
r97c215f rf5a51db 34 34 __pass::previsit( core, node, 0 ); 35 35 36 #define VISIT( code... ) \37 /* if this node should visit its children */ \38 if ( __visit_children() ) { \39 /* visit the children */ \40 code \41 }42 43 36 #define VISIT_END( type, node ) \ 44 37 /* call the implementation of the postvisit of this pass */ \ … … 86 79 87 80 template<typename it_t, template <class...> class container_t> 88 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * decls, bool * mutated = nullptr ) {89 if(empty( decls)) return;90 91 std::move( decls->begin(), decls->end(), it);92 decls->clear();81 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * stmts, bool * mutated = nullptr ) { 82 if(empty(stmts)) return; 83 84 std::move(stmts->begin(), stmts->end(), it); 85 stmts->clear(); 93 86 if(mutated) *mutated = true; 94 87 } … … 130 123 return !new_val.empty(); 131 124 } 125 } 126 127 128 template< typename core_t > 129 template< typename node_t > 130 template< typename object_t, typename super_t, typename field_t > 131 void ast::Pass< core_t >::result1< node_t >::apply(object_t * object, field_t super_t::* field) { 132 object->*field = value; 132 133 } 133 134 … … 138 139 !std::is_base_of<ast::Expr, node_t>::value && 139 140 !std::is_base_of<ast::Stmt, node_t>::value 140 , decltype( node->accept(*this) ) 141 , ast::Pass< core_t >::result1< 142 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 143 > 141 144 >::type 142 145 { … … 147 150 static_assert( !std::is_base_of<ast::Stmt, node_t>::value, "ERROR"); 148 151 149 return node->accept( *this ); 152 auto nval = node->accept( *this ); 153 ast::Pass< core_t >::result1< 154 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 155 > res; 156 res.differs = nval != node; 157 res.value = nval; 158 return res; 150 159 } 151 160 152 161 template< typename core_t > 153 const ast::Expr *ast::Pass< core_t >::call_accept( const ast::Expr * expr ) {162 ast::Pass< core_t >::result1<ast::Expr> ast::Pass< core_t >::call_accept( const ast::Expr * expr ) { 154 163 __pedantic_pass_assert( __visit_children() ); 155 164 __pedantic_pass_assert( expr ); … … 160 169 } 161 170 162 return expr->accept( *this ); 171 auto nval = expr->accept( *this ); 172 return { nval != expr, nval }; 163 173 } 164 174 165 175 template< typename core_t > 166 const ast::Stmt *ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) {176 ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) { 167 177 __pedantic_pass_assert( __visit_children() ); 168 178 __pedantic_pass_assert( stmt ); 169 179 170 return stmt->accept( *this ); 180 const ast::Stmt * nval = stmt->accept( *this ); 181 return { nval != stmt, nval }; 171 182 } 172 183 173 184 template< typename core_t > 174 const ast::Stmt *ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) {185 ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) { 175 186 __pedantic_pass_assert( __visit_children() ); 176 187 __pedantic_pass_assert( stmt ); … … 197 208 // If the pass doesn't want to add anything then we are done 198 209 if( empty(stmts_before) && empty(stmts_after) && empty(decls_before) && empty(decls_after) ) { 199 return nstmt;210 return { nstmt != stmt, nstmt }; 200 211 } 201 212 … … 219 230 __pass::take_all( std::back_inserter( compound->kids ), stmts_after ); 220 231 221 return compound;232 return {true, compound}; 222 233 } 223 234 224 235 template< typename core_t > 225 236 template< template <class...> class container_t > 226 container_t< ptr<Stmt> > ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 237 template< typename object_t, typename super_t, typename field_t > 238 void ast::Pass< core_t >::resultNstmt<container_t>::apply(object_t * object, field_t super_t::* field) { 239 auto & container = object->*field; 240 __pedantic_pass_assert( container.size() <= values.size() ); 241 242 auto cit = enumerate(container).begin(); 243 244 container_t<ptr<Stmt>> nvals; 245 for(delta & d : values) { 246 if( d.is_old ) { 247 __pedantic_pass_assert( cit.idx <= d.old_idx ); 248 std::advance( cit, d.old_idx - cit.idx ); 249 nvals.push_back( std::move( (*cit).val) ); 250 } else { 251 nvals.push_back( std::move(d.nval) ); 252 } 253 } 254 255 object->*field = std::move(nvals); 256 } 257 258 template< typename core_t > 259 template< template <class...> class container_t > 260 ast::Pass< core_t >::resultNstmt<container_t> ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 227 261 __pedantic_pass_assert( __visit_children() ); 228 262 if( statements.empty() ) return {}; … … 251 285 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 252 286 253 bool mutated = false; 254 container_t< ptr<Stmt> > new_kids; 255 for( const Stmt * stmt : statements ) { 287 resultNstmt<container_t> new_kids; 288 for( auto value : enumerate( statements ) ) { 256 289 try { 290 size_t i = value.idx; 291 const Stmt * stmt = value.val; 257 292 __pedantic_pass_assert( stmt ); 258 293 const ast::Stmt * new_stmt = stmt->accept( *this ); 259 294 assert( new_stmt ); 260 if(new_stmt != stmt ) mutated = true;295 if(new_stmt != stmt ) { new_kids.differs = true; } 261 296 262 297 // Make sure that it is either adding statements or declartions but not both … … 268 303 269 304 // Take all the statements which should have gone after, N/A for first iteration 270 __pass::take_all( std::back_inserter( new_kids ), decls_before, &mutated);271 __pass::take_all( std::back_inserter( new_kids ), stmts_before, &mutated);305 new_kids.take_all( decls_before ); 306 new_kids.take_all( stmts_before ); 272 307 273 308 // Now add the statement if there is one 274 new_kids.emplace_back( new_stmt ); 309 if(new_stmt != stmt) { 310 new_kids.values.emplace_back( new_stmt, i, false ); 311 } else { 312 new_kids.values.emplace_back( nullptr, i, true ); 313 } 275 314 276 315 // Take all the declarations that go before 277 __pass::take_all( std::back_inserter( new_kids ), decls_after, &mutated);278 __pass::take_all( std::back_inserter( new_kids ), stmts_after, &mutated);316 new_kids.take_all( decls_after ); 317 new_kids.take_all( stmts_after ); 279 318 } 280 319 catch ( SemanticErrorException &e ) { … … 285 324 if ( !errors.isEmpty() ) { throw errors; } 286 325 287 return mutated ? new_kids : container_t< ptr<Stmt> >();326 return new_kids; 288 327 } 289 328 290 329 template< typename core_t > 291 330 template< template <class...> class container_t, typename node_t > 292 container_t< ast::ptr<node_t> > ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 331 template< typename object_t, typename super_t, typename field_t > 332 void ast::Pass< core_t >::resultN<container_t, node_t>::apply(object_t * object, field_t super_t::* field) { 333 auto & container = object->*field; 334 __pedantic_pass_assert( container.size() == values.size() ); 335 336 for(size_t i = 0; i < container.size(); i++) { 337 // Take all the elements that are different in 'values' 338 // and swap them into 'container' 339 if( values[i] != nullptr ) std::swap(container[i], values[i]); 340 } 341 342 // Now the original containers should still have the unchanged values 343 // but also contain the new values 344 } 345 346 template< typename core_t > 347 template< template <class...> class container_t, typename node_t > 348 ast::Pass< core_t >::resultN<container_t, node_t> ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 293 349 __pedantic_pass_assert( __visit_children() ); 294 350 if( container.empty() ) return {}; … … 300 356 301 357 bool mutated = false; 302 container_t< ast::ptr<node_t>> new_kids;358 container_t<ptr<node_t>> new_kids; 303 359 for ( const node_t * node : container ) { 304 360 try { 305 361 __pedantic_pass_assert( node ); 306 362 const node_t * new_stmt = strict_dynamic_cast< const node_t * >( node->accept( *this ) ); 307 if(new_stmt != node ) mutated = true; 308 309 new_kids.emplace_back( new_stmt ); 363 if(new_stmt != node ) { 364 mutated = true; 365 new_kids.emplace_back( new_stmt ); 366 } else { 367 new_kids.emplace_back( nullptr ); 368 } 369 310 370 } 311 371 catch( SemanticErrorException &e ) { … … 313 373 } 314 374 } 375 376 __pedantic_pass_assert( new_kids.size() == container.size() ); 315 377 pass_visitor_stats.depth--; 316 378 if ( ! errors.isEmpty() ) { throw errors; } 317 379 318 return mutated ? new_kids : container_t< ast::ptr<node_t> >();380 return ast::Pass< core_t >::resultN<container_t, node_t>{ mutated, new_kids }; 319 381 } 320 382 … … 334 396 auto new_val = call_accept( old_val ); 335 397 336 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR");337 338 if( __pass::differs(old_val, new_val)) {398 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR"); 399 400 if( new_val.differs ) { 339 401 auto new_parent = __pass::mutate<core_t>(parent); 340 new_ parent->*child = new_val;402 new_val.apply(new_parent, child); 341 403 parent = new_parent; 342 404 } … … 360 422 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR"); 361 423 362 if( __pass::differs(old_val, new_val)) {424 if( new_val.differs ) { 363 425 auto new_parent = __pass::mutate<core_t>(parent); 364 new_ parent->*child = new_val;426 new_val.apply( new_parent, child ); 365 427 parent = new_parent; 366 428 } … … 452 514 VISIT_START( node ); 453 515 454 VISIT(516 if ( __visit_children() ) { 455 517 { 456 518 guard_symtab guard { *this }; … … 460 522 maybe_accept( node, &ObjectDecl::bitfieldWidth ); 461 523 maybe_accept( node, &ObjectDecl::attributes ); 462 )524 } 463 525 464 526 __pass::symtab::addId( core, 0, node ); … … 475 537 __pass::symtab::addId( core, 0, node ); 476 538 477 VISIT(maybe_accept( node, &FunctionDecl::withExprs );) 539 if ( __visit_children() ) { 540 maybe_accept( node, &FunctionDecl::withExprs ); 541 } 478 542 { 479 543 // with clause introduces a level of scope (for the with expression members). … … 493 557 } }; 494 558 __pass::symtab::addId( core, 0, func ); 495 VISIT(559 if ( __visit_children() ) { 496 560 // parameter declarations 497 561 maybe_accept( node, &FunctionDecl::params ); … … 509 573 maybe_accept( node, &FunctionDecl::stmts ); 510 574 maybe_accept( node, &FunctionDecl::attributes ); 511 )575 } 512 576 } 513 577 } … … 526 590 __pass::symtab::addStructFwd( core, 0, node ); 527 591 528 VISIT({592 if ( __visit_children() ) { 529 593 guard_symtab guard { * this }; 530 594 maybe_accept( node, &StructDecl::params ); 531 595 maybe_accept( node, &StructDecl::members ); 532 596 maybe_accept( node, &StructDecl::attributes ); 533 } )597 } 534 598 535 599 // this addition replaces the forward declaration … … 548 612 __pass::symtab::addUnionFwd( core, 0, node ); 549 613 550 VISIT({614 if ( __visit_children() ) { 551 615 guard_symtab guard { * this }; 552 616 maybe_accept( node, &UnionDecl::params ); 553 617 maybe_accept( node, &UnionDecl::members ); 554 618 maybe_accept( node, &UnionDecl::attributes ); 555 } )619 } 556 620 557 621 __pass::symtab::addUnion( core, 0, node ); … … 568 632 __pass::symtab::addEnum( core, 0, node ); 569 633 570 VISIT(634 if ( __visit_children() ) { 571 635 // unlike structs, traits, and unions, enums inject their members into the global scope 572 636 maybe_accept( node, &EnumDecl::params ); 573 637 maybe_accept( node, &EnumDecl::members ); 574 638 maybe_accept( node, &EnumDecl::attributes ); 575 )639 } 576 640 577 641 VISIT_END( Decl, node ); … … 584 648 VISIT_START( node ); 585 649 586 VISIT({650 if ( __visit_children() ) { 587 651 guard_symtab guard { *this }; 588 652 maybe_accept( node, &TraitDecl::params ); 589 653 maybe_accept( node, &TraitDecl::members ); 590 654 maybe_accept( node, &TraitDecl::attributes ); 591 } )655 } 592 656 593 657 __pass::symtab::addTrait( core, 0, node ); … … 602 666 VISIT_START( node ); 603 667 604 VISIT({668 if ( __visit_children() ) { 605 669 guard_symtab guard { *this }; 606 670 maybe_accept( node, &TypeDecl::base ); 607 } )671 } 608 672 609 673 // see A NOTE ON THE ORDER OF TRAVERSAL, above … … 612 676 __pass::symtab::addType( core, 0, node ); 613 677 614 VISIT(678 if ( __visit_children() ) { 615 679 maybe_accept( node, &TypeDecl::assertions ); 616 680 … … 619 683 maybe_accept( node, &TypeDecl::init ); 620 684 } 621 )685 } 622 686 623 687 VISIT_END( Decl, node ); … … 630 694 VISIT_START( node ); 631 695 632 VISIT({696 if ( __visit_children() ) { 633 697 guard_symtab guard { *this }; 634 698 maybe_accept( node, &TypedefDecl::base ); 635 } )699 } 636 700 637 701 __pass::symtab::addType( core, 0, node ); 638 702 639 VISIT( maybe_accept( node, &TypedefDecl::assertions ); ) 703 if ( __visit_children() ) { 704 maybe_accept( node, &TypedefDecl::assertions ); 705 } 640 706 641 707 VISIT_END( Decl, node ); … … 648 714 VISIT_START( node ); 649 715 650 VISIT(716 if ( __visit_children() ) { 651 717 maybe_accept( node, &AsmDecl::stmt ); 652 )718 } 653 719 654 720 VISIT_END( AsmDecl, node ); … … 661 727 VISIT_START( node ); 662 728 663 VISIT(729 if ( __visit_children() ) { 664 730 maybe_accept( node, &DirectiveDecl::stmt ); 665 )731 } 666 732 667 733 VISIT_END( DirectiveDecl, node ); … … 674 740 VISIT_START( node ); 675 741 676 VISIT(742 if ( __visit_children() ) { 677 743 maybe_accept( node, &StaticAssertDecl::cond ); 678 744 maybe_accept( node, &StaticAssertDecl::msg ); 679 )745 } 680 746 681 747 VISIT_END( StaticAssertDecl, node ); … … 687 753 const ast::CompoundStmt * ast::Pass< core_t >::visit( const ast::CompoundStmt * node ) { 688 754 VISIT_START( node ); 689 VISIT( 755 756 if ( __visit_children() ) { 690 757 // Do not enter (or leave) a new scope if atFunctionTop. Remember to save the result. 691 758 auto guard1 = makeFuncGuard( [this, enterScope = !this->atFunctionTop]() { … … 704 771 guard_scope guard3 { *this }; 705 772 maybe_accept( node, &CompoundStmt::kids ); 706 ) 773 } 774 707 775 VISIT_END( CompoundStmt, node ); 708 776 } … … 714 782 VISIT_START( node ); 715 783 716 VISIT(784 if ( __visit_children() ) { 717 785 maybe_accept( node, &ExprStmt::expr ); 718 )786 } 719 787 720 788 VISIT_END( Stmt, node ); … … 727 795 VISIT_START( node ) 728 796 729 VISIT(797 if ( __visit_children() ) { 730 798 maybe_accept( node, &AsmStmt::instruction ); 731 799 maybe_accept( node, &AsmStmt::output ); 732 800 maybe_accept( node, &AsmStmt::input ); 733 801 maybe_accept( node, &AsmStmt::clobber ); 734 )802 } 735 803 736 804 VISIT_END( Stmt, node ); … … 752 820 VISIT_START( node ); 753 821 754 VISIT({822 if ( __visit_children() ) { 755 823 // if statements introduce a level of scope (for the initialization) 756 824 guard_symtab guard { *this }; 757 825 maybe_accept( node, &IfStmt::inits ); 758 826 maybe_accept( node, &IfStmt::cond ); 759 maybe_accept_as_compound( node, &IfStmt::then Part);760 maybe_accept_as_compound( node, &IfStmt::else Part);761 } )827 maybe_accept_as_compound( node, &IfStmt::then ); 828 maybe_accept_as_compound( node, &IfStmt::else_ ); 829 } 762 830 763 831 VISIT_END( Stmt, node ); … … 765 833 766 834 //-------------------------------------------------------------------------- 767 // While Stmt768 template< typename core_t > 769 const ast::Stmt * ast::Pass< core_t >::visit( const ast::While Stmt * node ) {770 VISIT_START( node ); 771 772 VISIT({835 // WhileDoStmt 836 template< typename core_t > 837 const ast::Stmt * ast::Pass< core_t >::visit( const ast::WhileDoStmt * node ) { 838 VISIT_START( node ); 839 840 if ( __visit_children() ) { 773 841 // while statements introduce a level of scope (for the initialization) 774 842 guard_symtab guard { *this }; 775 maybe_accept( node, &While Stmt::inits );776 maybe_accept( node, &While Stmt::cond );777 maybe_accept_as_compound( node, &While Stmt::body );778 } )843 maybe_accept( node, &WhileDoStmt::inits ); 844 maybe_accept( node, &WhileDoStmt::cond ); 845 maybe_accept_as_compound( node, &WhileDoStmt::body ); 846 } 779 847 780 848 VISIT_END( Stmt, node ); … … 787 855 VISIT_START( node ); 788 856 789 VISIT({857 if ( __visit_children() ) { 790 858 // for statements introduce a level of scope (for the initialization) 791 859 guard_symtab guard { *this }; … … 795 863 maybe_accept( node, &ForStmt::inc ); 796 864 maybe_accept_as_compound( node, &ForStmt::body ); 797 } )865 } 798 866 799 867 VISIT_END( Stmt, node ); … … 806 874 VISIT_START( node ); 807 875 808 VISIT(876 if ( __visit_children() ) { 809 877 maybe_accept( node, &SwitchStmt::cond ); 810 878 maybe_accept( node, &SwitchStmt::stmts ); 811 )879 } 812 880 813 881 VISIT_END( Stmt, node ); … … 820 888 VISIT_START( node ); 821 889 822 VISIT(890 if ( __visit_children() ) { 823 891 maybe_accept( node, &CaseStmt::cond ); 824 892 maybe_accept( node, &CaseStmt::stmts ); 825 )893 } 826 894 827 895 VISIT_END( Stmt, node ); … … 842 910 VISIT_START( node ); 843 911 844 VISIT(912 if ( __visit_children() ) { 845 913 maybe_accept( node, &ReturnStmt::expr ); 846 )914 } 847 915 848 916 VISIT_END( Stmt, node ); … … 855 923 VISIT_START( node ); 856 924 857 VISIT(925 if ( __visit_children() ) { 858 926 maybe_accept( node, &ThrowStmt::expr ); 859 927 maybe_accept( node, &ThrowStmt::target ); 860 )928 } 861 929 862 930 VISIT_END( Stmt, node ); … … 869 937 VISIT_START( node ); 870 938 871 VISIT(939 if ( __visit_children() ) { 872 940 maybe_accept( node, &TryStmt::body ); 873 941 maybe_accept( node, &TryStmt::handlers ); 874 942 maybe_accept( node, &TryStmt::finally ); 875 )943 } 876 944 877 945 VISIT_END( Stmt, node ); … … 884 952 VISIT_START( node ); 885 953 886 VISIT({954 if ( __visit_children() ) { 887 955 // catch statements introduce a level of scope (for the caught exception) 888 956 guard_symtab guard { *this }; … … 890 958 maybe_accept( node, &CatchStmt::cond ); 891 959 maybe_accept_as_compound( node, &CatchStmt::body ); 892 } )960 } 893 961 894 962 VISIT_END( Stmt, node ); … … 901 969 VISIT_START( node ); 902 970 903 VISIT(971 if ( __visit_children() ) { 904 972 maybe_accept( node, &FinallyStmt::body ); 905 )973 } 906 974 907 975 VISIT_END( Stmt, node ); … … 914 982 VISIT_START( node ); 915 983 916 VISIT(984 if ( __visit_children() ) { 917 985 maybe_accept( node, &SuspendStmt::then ); 918 )986 } 919 987 920 988 VISIT_END( Stmt, node ); … … 934 1002 // } 935 1003 936 VISIT({1004 if ( __visit_children() ) { 937 1005 std::vector<WaitForStmt::Clause> new_clauses; 938 1006 new_clauses.reserve( node->clauses.size() ); … … 942 1010 const Expr * func = clause.target.func ? clause.target.func->accept(*this) : nullptr; 943 1011 if(func != clause.target.func) mutated = true; 1012 else func = nullptr; 944 1013 945 1014 std::vector<ptr<Expr>> new_args; … … 947 1016 for( const auto & arg : clause.target.args ) { 948 1017 auto a = arg->accept(*this); 949 new_args.push_back( a ); 950 if( a != arg ) mutated = true; 1018 if( a != arg ) { 1019 mutated = true; 1020 new_args.push_back( a ); 1021 } else 1022 new_args.push_back( nullptr ); 951 1023 } 952 1024 953 1025 const Stmt * stmt = clause.stmt ? clause.stmt->accept(*this) : nullptr; 954 1026 if(stmt != clause.stmt) mutated = true; 1027 else stmt = nullptr; 955 1028 956 1029 const Expr * cond = clause.cond ? clause.cond->accept(*this) : nullptr; 957 1030 if(cond != clause.cond) mutated = true; 1031 else cond = nullptr; 958 1032 959 1033 new_clauses.push_back( WaitForStmt::Clause{ {func, std::move(new_args) }, stmt, cond } ); … … 962 1036 if(mutated) { 963 1037 auto n = __pass::mutate<core_t>(node); 964 n->clauses = std::move( new_clauses ); 1038 for(size_t i = 0; i < new_clauses.size(); i++) { 1039 if(new_clauses.at(i).target.func != nullptr) std::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func); 1040 1041 for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) { 1042 if(new_clauses.at(i).target.args.at(j) != nullptr) std::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j)); 1043 } 1044 1045 if(new_clauses.at(i).stmt != nullptr) std::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt); 1046 if(new_clauses.at(i).cond != nullptr) std::swap(n->clauses.at(i).cond, new_clauses.at(i).cond); 1047 } 965 1048 node = n; 966 1049 } 967 } )1050 } 968 1051 969 1052 #define maybe_accept(field) \ 970 1053 if(node->field) { \ 971 1054 auto nval = call_accept( node->field ); \ 972 if(nval != node->field) { \1055 if(nval.differs ) { \ 973 1056 auto nparent = __pass::mutate<core_t>(node); \ 974 nparent->field = nval ; \1057 nparent->field = nval.value; \ 975 1058 node = nparent; \ 976 1059 } \ 977 1060 } 978 1061 979 VISIT(1062 if ( __visit_children() ) { 980 1063 maybe_accept( timeout.time ); 981 1064 maybe_accept( timeout.stmt ); … … 983 1066 maybe_accept( orElse.stmt ); 984 1067 maybe_accept( orElse.cond ); 985 )1068 } 986 1069 987 1070 #undef maybe_accept … … 996 1079 VISIT_START( node ); 997 1080 998 VISIT(1081 if ( __visit_children() ) { 999 1082 maybe_accept( node, &WithStmt::exprs ); 1000 1083 { … … 1004 1087 maybe_accept( node, &WithStmt::stmt ); 1005 1088 } 1006 ) 1089 } 1090 1007 1091 VISIT_END( Stmt, node ); 1008 1092 } … … 1022 1106 VISIT_START( node ); 1023 1107 1024 VISIT(1108 if ( __visit_children() ) { 1025 1109 maybe_accept( node, &DeclStmt::decl ); 1026 )1110 } 1027 1111 1028 1112 VISIT_END( Stmt, node ); … … 1037 1121 // For now this isn't visited, it is unclear if this causes problem 1038 1122 // if all tests are known to pass, remove this code 1039 VISIT(1123 if ( __visit_children() ) { 1040 1124 maybe_accept( node, &ImplicitCtorDtorStmt::callStmt ); 1041 )1125 } 1042 1126 1043 1127 VISIT_END( Stmt, node ); … … 1050 1134 VISIT_START( node ); 1051 1135 1052 VISIT({1136 if ( __visit_children() ) { 1053 1137 // mutex statements introduce a level of scope (for the initialization) 1054 1138 guard_symtab guard { *this }; 1055 1139 maybe_accept( node, &MutexStmt::stmt ); 1056 1140 maybe_accept( node, &MutexStmt::mutexObjs ); 1057 } )1141 } 1058 1142 1059 1143 VISIT_END( Stmt, node ); … … 1066 1150 VISIT_START( node ); 1067 1151 1068 VISIT(1152 if ( __visit_children() ) { 1069 1153 { 1070 1154 guard_symtab guard { *this }; … … 1073 1157 maybe_accept( node, &ApplicationExpr::func ); 1074 1158 maybe_accept( node, &ApplicationExpr::args ); 1075 )1159 } 1076 1160 1077 1161 VISIT_END( Expr, node ); … … 1084 1168 VISIT_START( node ); 1085 1169 1086 VISIT(1170 if ( __visit_children() ) { 1087 1171 { 1088 1172 guard_symtab guard { *this }; … … 1091 1175 1092 1176 maybe_accept( node, &UntypedExpr::args ); 1093 )1177 } 1094 1178 1095 1179 VISIT_END( Expr, node ); … … 1102 1186 VISIT_START( node ); 1103 1187 1104 VISIT({1188 if ( __visit_children() ) { 1105 1189 guard_symtab guard { *this }; 1106 1190 maybe_accept( node, &NameExpr::result ); 1107 } )1191 } 1108 1192 1109 1193 VISIT_END( Expr, node ); … … 1116 1200 VISIT_START( node ); 1117 1201 1118 VISIT({ 1202 if ( __visit_children() ) { 1203 { 1119 1204 guard_symtab guard { *this }; 1120 1205 maybe_accept( node, &CastExpr::result ); 1121 1206 } 1122 1207 maybe_accept( node, &CastExpr::arg ); 1123 )1208 } 1124 1209 1125 1210 VISIT_END( Expr, node ); … … 1132 1217 VISIT_START( node ); 1133 1218 1134 VISIT({ 1219 if ( __visit_children() ) { 1220 { 1135 1221 guard_symtab guard { *this }; 1136 1222 maybe_accept( node, &KeywordCastExpr::result ); 1137 1223 } 1138 1224 maybe_accept( node, &KeywordCastExpr::arg ); 1139 )1225 } 1140 1226 1141 1227 VISIT_END( Expr, node ); … … 1148 1234 VISIT_START( node ); 1149 1235 1150 VISIT({ 1236 if ( __visit_children() ) { 1237 { 1151 1238 guard_symtab guard { *this }; 1152 1239 maybe_accept( node, &VirtualCastExpr::result ); 1153 1240 } 1154 1241 maybe_accept( node, &VirtualCastExpr::arg ); 1155 )1242 } 1156 1243 1157 1244 VISIT_END( Expr, node ); … … 1164 1251 VISIT_START( node ); 1165 1252 1166 VISIT({ 1253 if ( __visit_children() ) { 1254 { 1167 1255 guard_symtab guard { *this }; 1168 1256 maybe_accept( node, &AddressExpr::result ); 1169 1257 } 1170 1258 maybe_accept( node, &AddressExpr::arg ); 1171 )1259 } 1172 1260 1173 1261 VISIT_END( Expr, node ); … … 1180 1268 VISIT_START( node ); 1181 1269 1182 VISIT({1270 if ( __visit_children() ) { 1183 1271 guard_symtab guard { *this }; 1184 1272 maybe_accept( node, &LabelAddressExpr::result ); 1185 } )1273 } 1186 1274 1187 1275 VISIT_END( Expr, node ); … … 1194 1282 VISIT_START( node ); 1195 1283 1196 VISIT({ 1284 if ( __visit_children() ) { 1285 { 1197 1286 guard_symtab guard { *this }; 1198 1287 maybe_accept( node, &UntypedMemberExpr::result ); … … 1200 1289 maybe_accept( node, &UntypedMemberExpr::aggregate ); 1201 1290 maybe_accept( node, &UntypedMemberExpr::member ); 1202 )1291 } 1203 1292 1204 1293 VISIT_END( Expr, node ); … … 1211 1300 VISIT_START( node ); 1212 1301 1213 VISIT({ 1302 if ( __visit_children() ) { 1303 { 1214 1304 guard_symtab guard { *this }; 1215 1305 maybe_accept( node, &MemberExpr::result ); 1216 1306 } 1217 1307 maybe_accept( node, &MemberExpr::aggregate ); 1218 )1308 } 1219 1309 1220 1310 VISIT_END( Expr, node ); … … 1227 1317 VISIT_START( node ); 1228 1318 1229 VISIT({1319 if ( __visit_children() ) { 1230 1320 guard_symtab guard { *this }; 1231 1321 maybe_accept( node, &VariableExpr::result ); 1232 } )1322 } 1233 1323 1234 1324 VISIT_END( Expr, node ); … … 1241 1331 VISIT_START( node ); 1242 1332 1243 VISIT({1333 if ( __visit_children() ) { 1244 1334 guard_symtab guard { *this }; 1245 1335 maybe_accept( node, &ConstantExpr::result ); 1246 } )1336 } 1247 1337 1248 1338 VISIT_END( Expr, node ); … … 1255 1345 VISIT_START( node ); 1256 1346 1257 VISIT({ 1347 if ( __visit_children() ) { 1348 { 1258 1349 guard_symtab guard { *this }; 1259 1350 maybe_accept( node, &SizeofExpr::result ); … … 1264 1355 maybe_accept( node, &SizeofExpr::expr ); 1265 1356 } 1266 )1357 } 1267 1358 1268 1359 VISIT_END( Expr, node ); … … 1275 1366 VISIT_START( node ); 1276 1367 1277 VISIT({ 1368 if ( __visit_children() ) { 1369 { 1278 1370 guard_symtab guard { *this }; 1279 1371 maybe_accept( node, &AlignofExpr::result ); … … 1284 1376 maybe_accept( node, &AlignofExpr::expr ); 1285 1377 } 1286 )1378 } 1287 1379 1288 1380 VISIT_END( Expr, node ); … … 1295 1387 VISIT_START( node ); 1296 1388 1297 VISIT({ 1389 if ( __visit_children() ) { 1390 { 1298 1391 guard_symtab guard { *this }; 1299 1392 maybe_accept( node, &UntypedOffsetofExpr::result ); 1300 1393 } 1301 1394 maybe_accept( node, &UntypedOffsetofExpr::type ); 1302 )1395 } 1303 1396 1304 1397 VISIT_END( Expr, node ); … … 1311 1404 VISIT_START( node ); 1312 1405 1313 VISIT({ 1406 if ( __visit_children() ) { 1407 { 1314 1408 guard_symtab guard { *this }; 1315 1409 maybe_accept( node, &OffsetofExpr::result ); 1316 1410 } 1317 1411 maybe_accept( node, &OffsetofExpr::type ); 1318 )1412 } 1319 1413 1320 1414 VISIT_END( Expr, node ); … … 1327 1421 VISIT_START( node ); 1328 1422 1329 VISIT({ 1423 if ( __visit_children() ) { 1424 { 1330 1425 guard_symtab guard { *this }; 1331 1426 maybe_accept( node, &OffsetPackExpr::result ); 1332 1427 } 1333 1428 maybe_accept( node, &OffsetPackExpr::type ); 1334 )1429 } 1335 1430 1336 1431 VISIT_END( Expr, node ); … … 1343 1438 VISIT_START( node ); 1344 1439 1345 VISIT({ 1440 if ( __visit_children() ) { 1441 { 1346 1442 guard_symtab guard { *this }; 1347 1443 maybe_accept( node, &LogicalExpr::result ); … … 1349 1445 maybe_accept( node, &LogicalExpr::arg1 ); 1350 1446 maybe_accept( node, &LogicalExpr::arg2 ); 1351 )1447 } 1352 1448 1353 1449 VISIT_END( Expr, node ); … … 1360 1456 VISIT_START( node ); 1361 1457 1362 VISIT({ 1458 if ( __visit_children() ) { 1459 { 1363 1460 guard_symtab guard { *this }; 1364 1461 maybe_accept( node, &ConditionalExpr::result ); … … 1367 1464 maybe_accept( node, &ConditionalExpr::arg2 ); 1368 1465 maybe_accept( node, &ConditionalExpr::arg3 ); 1369 )1466 } 1370 1467 1371 1468 VISIT_END( Expr, node ); … … 1378 1475 VISIT_START( node ); 1379 1476 1380 VISIT({ 1477 if ( __visit_children() ) { 1478 { 1381 1479 guard_symtab guard { *this }; 1382 1480 maybe_accept( node, &CommaExpr::result ); … … 1384 1482 maybe_accept( node, &CommaExpr::arg1 ); 1385 1483 maybe_accept( node, &CommaExpr::arg2 ); 1386 )1484 } 1387 1485 1388 1486 VISIT_END( Expr, node ); … … 1395 1493 VISIT_START( node ); 1396 1494 1397 VISIT({ 1495 if ( __visit_children() ) { 1496 { 1398 1497 guard_symtab guard { *this }; 1399 1498 maybe_accept( node, &TypeExpr::result ); 1400 1499 } 1401 1500 maybe_accept( node, &TypeExpr::type ); 1402 )1501 } 1403 1502 1404 1503 VISIT_END( Expr, node ); … … 1411 1510 VISIT_START( node ); 1412 1511 1413 VISIT({ 1512 if ( __visit_children() ) { 1513 { 1414 1514 guard_symtab guard { *this }; 1415 1515 maybe_accept( node, &AsmExpr::result ); … … 1417 1517 maybe_accept( node, &AsmExpr::constraint ); 1418 1518 maybe_accept( node, &AsmExpr::operand ); 1419 )1519 } 1420 1520 1421 1521 VISIT_END( Expr, node ); … … 1428 1528 VISIT_START( node ); 1429 1529 1430 VISIT({ 1530 if ( __visit_children() ) { 1531 { 1431 1532 guard_symtab guard { *this }; 1432 1533 maybe_accept( node, &ImplicitCopyCtorExpr::result ); 1433 1534 } 1434 1535 maybe_accept( node, &ImplicitCopyCtorExpr::callExpr ); 1435 )1536 } 1436 1537 1437 1538 VISIT_END( Expr, node ); … … 1444 1545 VISIT_START( node ); 1445 1546 1446 VISIT({ 1547 if ( __visit_children() ) { 1548 { 1447 1549 guard_symtab guard { *this }; 1448 1550 maybe_accept( node, &ConstructorExpr::result ); 1449 1551 } 1450 1552 maybe_accept( node, &ConstructorExpr::callExpr ); 1451 )1553 } 1452 1554 1453 1555 VISIT_END( Expr, node ); … … 1460 1562 VISIT_START( node ); 1461 1563 1462 VISIT({ 1564 if ( __visit_children() ) { 1565 { 1463 1566 guard_symtab guard { *this }; 1464 1567 maybe_accept( node, &CompoundLiteralExpr::result ); 1465 1568 } 1466 1569 maybe_accept( node, &CompoundLiteralExpr::init ); 1467 )1570 } 1468 1571 1469 1572 VISIT_END( Expr, node ); … … 1476 1579 VISIT_START( node ); 1477 1580 1478 VISIT({ 1581 if ( __visit_children() ) { 1582 { 1479 1583 guard_symtab guard { *this }; 1480 1584 maybe_accept( node, &RangeExpr::result ); … … 1482 1586 maybe_accept( node, &RangeExpr::low ); 1483 1587 maybe_accept( node, &RangeExpr::high ); 1484 )1588 } 1485 1589 1486 1590 VISIT_END( Expr, node ); … … 1493 1597 VISIT_START( node ); 1494 1598 1495 VISIT({ 1599 if ( __visit_children() ) { 1600 { 1496 1601 guard_symtab guard { *this }; 1497 1602 maybe_accept( node, &UntypedTupleExpr::result ); 1498 1603 } 1499 1604 maybe_accept( node, &UntypedTupleExpr::exprs ); 1500 )1605 } 1501 1606 1502 1607 VISIT_END( Expr, node ); … … 1509 1614 VISIT_START( node ); 1510 1615 1511 VISIT({ 1616 if ( __visit_children() ) { 1617 { 1512 1618 guard_symtab guard { *this }; 1513 1619 maybe_accept( node, &TupleExpr::result ); 1514 1620 } 1515 1621 maybe_accept( node, &TupleExpr::exprs ); 1516 )1622 } 1517 1623 1518 1624 VISIT_END( Expr, node ); … … 1525 1631 VISIT_START( node ); 1526 1632 1527 VISIT({ 1633 if ( __visit_children() ) { 1634 { 1528 1635 guard_symtab guard { *this }; 1529 1636 maybe_accept( node, &TupleIndexExpr::result ); 1530 1637 } 1531 1638 maybe_accept( node, &TupleIndexExpr::tuple ); 1532 )1639 } 1533 1640 1534 1641 VISIT_END( Expr, node ); … … 1541 1648 VISIT_START( node ); 1542 1649 1543 VISIT({ 1650 if ( __visit_children() ) { 1651 { 1544 1652 guard_symtab guard { *this }; 1545 1653 maybe_accept( node, &TupleAssignExpr::result ); 1546 1654 } 1547 1655 maybe_accept( node, &TupleAssignExpr::stmtExpr ); 1548 )1656 } 1549 1657 1550 1658 VISIT_END( Expr, node ); … … 1557 1665 VISIT_START( node ); 1558 1666 1559 VISIT(// don't want statements from outer CompoundStmts to be added to this StmtExpr 1667 if ( __visit_children() ) { 1668 // don't want statements from outer CompoundStmts to be added to this StmtExpr 1560 1669 // get the stmts that will need to be spliced in 1561 1670 auto stmts_before = __pass::stmtsToAddBefore( core, 0); … … 1574 1683 maybe_accept( node, &StmtExpr::returnDecls ); 1575 1684 maybe_accept( node, &StmtExpr::dtors ); 1576 )1685 } 1577 1686 1578 1687 VISIT_END( Expr, node ); … … 1585 1694 VISIT_START( node ); 1586 1695 1587 VISIT({ 1696 if ( __visit_children() ) { 1697 { 1588 1698 guard_symtab guard { *this }; 1589 1699 maybe_accept( node, &UniqueExpr::result ); 1590 1700 } 1591 1701 maybe_accept( node, &UniqueExpr::expr ); 1592 )1702 } 1593 1703 1594 1704 VISIT_END( Expr, node ); … … 1601 1711 VISIT_START( node ); 1602 1712 1603 VISIT({ 1713 if ( __visit_children() ) { 1714 { 1604 1715 guard_symtab guard { *this }; 1605 1716 maybe_accept( node, &UntypedInitExpr::result ); … … 1607 1718 maybe_accept( node, &UntypedInitExpr::expr ); 1608 1719 // not currently visiting initAlts, but this doesn't matter since this node is only used in the resolver. 1609 )1720 } 1610 1721 1611 1722 VISIT_END( Expr, node ); … … 1618 1729 VISIT_START( node ); 1619 1730 1620 VISIT({ 1731 if ( __visit_children() ) { 1732 { 1621 1733 guard_symtab guard { *this }; 1622 1734 maybe_accept( node, &InitExpr::result ); … … 1624 1736 maybe_accept( node, &InitExpr::expr ); 1625 1737 maybe_accept( node, &InitExpr::designation ); 1626 )1738 } 1627 1739 1628 1740 VISIT_END( Expr, node ); … … 1635 1747 VISIT_START( node ); 1636 1748 1637 VISIT({ 1749 if ( __visit_children() ) { 1750 { 1638 1751 guard_symtab guard { *this }; 1639 1752 maybe_accept( node, &DeletedExpr::result ); … … 1641 1754 maybe_accept( node, &DeletedExpr::expr ); 1642 1755 // don't visit deleteStmt, because it is a pointer to somewhere else in the tree. 1643 )1756 } 1644 1757 1645 1758 VISIT_END( Expr, node ); … … 1652 1765 VISIT_START( node ); 1653 1766 1654 VISIT({ 1767 if ( __visit_children() ) { 1768 { 1655 1769 guard_symtab guard { *this }; 1656 1770 maybe_accept( node, &DefaultArgExpr::result ); 1657 1771 } 1658 1772 maybe_accept( node, &DefaultArgExpr::expr ); 1659 )1773 } 1660 1774 1661 1775 VISIT_END( Expr, node ); … … 1668 1782 VISIT_START( node ); 1669 1783 1670 VISIT({ 1784 if ( __visit_children() ) { 1785 { 1671 1786 guard_symtab guard { *this }; 1672 1787 maybe_accept( node, &GenericExpr::result ); … … 1697 1812 node = n; 1698 1813 } 1699 )1814 } 1700 1815 1701 1816 VISIT_END( Expr, node ); … … 1726 1841 VISIT_START( node ); 1727 1842 1728 VISIT(1843 if ( __visit_children() ) { 1729 1844 // xxx - should PointerType visit/mutate dimension? 1730 1845 maybe_accept( node, &PointerType::base ); 1731 )1846 } 1732 1847 1733 1848 VISIT_END( Type, node ); … … 1740 1855 VISIT_START( node ); 1741 1856 1742 VISIT(1857 if ( __visit_children() ) { 1743 1858 maybe_accept( node, &ArrayType::dimension ); 1744 1859 maybe_accept( node, &ArrayType::base ); 1745 )1860 } 1746 1861 1747 1862 VISIT_END( Type, node ); … … 1754 1869 VISIT_START( node ); 1755 1870 1756 VISIT(1871 if ( __visit_children() ) { 1757 1872 maybe_accept( node, &ReferenceType::base ); 1758 )1873 } 1759 1874 1760 1875 VISIT_END( Type, node ); … … 1767 1882 VISIT_START( node ); 1768 1883 1769 VISIT(1884 if ( __visit_children() ) { 1770 1885 maybe_accept( node, &QualifiedType::parent ); 1771 1886 maybe_accept( node, &QualifiedType::child ); 1772 )1887 } 1773 1888 1774 1889 VISIT_END( Type, node ); … … 1781 1896 VISIT_START( node ); 1782 1897 1783 VISIT({1898 if ( __visit_children() ) { 1784 1899 // guard_forall_subs forall_guard { *this, node }; 1785 1900 // mutate_forall( node ); … … 1787 1902 maybe_accept( node, &FunctionType::returns ); 1788 1903 maybe_accept( node, &FunctionType::params ); 1789 } )1904 } 1790 1905 1791 1906 VISIT_END( Type, node ); … … 1800 1915 __pass::symtab::addStruct( core, 0, node->name ); 1801 1916 1802 VISIT({1917 if ( __visit_children() ) { 1803 1918 guard_symtab guard { *this }; 1804 1919 maybe_accept( node, &StructInstType::params ); 1805 } )1920 } 1806 1921 1807 1922 VISIT_END( Type, node ); … … 1816 1931 __pass::symtab::addUnion( core, 0, node->name ); 1817 1932 1818 VISIT({1933 if ( __visit_children() ) { 1819 1934 guard_symtab guard { *this }; 1820 1935 maybe_accept( node, &UnionInstType::params ); 1821 } )1936 } 1822 1937 1823 1938 VISIT_END( Type, node ); … … 1830 1945 VISIT_START( node ); 1831 1946 1832 VISIT({1947 if ( __visit_children() ) { 1833 1948 maybe_accept( node, &EnumInstType::params ); 1834 } )1949 } 1835 1950 1836 1951 VISIT_END( Type, node ); … … 1843 1958 VISIT_START( node ); 1844 1959 1845 VISIT({1960 if ( __visit_children() ) { 1846 1961 maybe_accept( node, &TraitInstType::params ); 1847 } )1962 } 1848 1963 1849 1964 VISIT_END( Type, node ); … … 1856 1971 VISIT_START( node ); 1857 1972 1858 VISIT(1973 if ( __visit_children() ) { 1859 1974 { 1860 1975 maybe_accept( node, &TypeInstType::params ); … … 1862 1977 // ensure that base re-bound if doing substitution 1863 1978 __pass::forall::replace( core, 0, node ); 1864 )1979 } 1865 1980 1866 1981 VISIT_END( Type, node ); … … 1873 1988 VISIT_START( node ); 1874 1989 1875 VISIT(1990 if ( __visit_children() ) { 1876 1991 maybe_accept( node, &TupleType::types ); 1877 1992 maybe_accept( node, &TupleType::members ); 1878 )1993 } 1879 1994 1880 1995 VISIT_END( Type, node ); … … 1887 2002 VISIT_START( node ); 1888 2003 1889 VISIT(2004 if ( __visit_children() ) { 1890 2005 maybe_accept( node, &TypeofType::expr ); 1891 )2006 } 1892 2007 1893 2008 VISIT_END( Type, node ); … … 1900 2015 VISIT_START( node ); 1901 2016 1902 VISIT(2017 if ( __visit_children() ) { 1903 2018 maybe_accept( node, &VTableType::base ); 1904 )2019 } 1905 2020 1906 2021 VISIT_END( Type, node ); … … 1950 2065 VISIT_START( node ); 1951 2066 1952 VISIT( maybe_accept( node, &Designation::designators ); ) 2067 if ( __visit_children() ) { 2068 maybe_accept( node, &Designation::designators ); 2069 } 1953 2070 1954 2071 VISIT_END( Designation, node ); … … 1961 2078 VISIT_START( node ); 1962 2079 1963 VISIT(2080 if ( __visit_children() ) { 1964 2081 maybe_accept( node, &SingleInit::value ); 1965 )2082 } 1966 2083 1967 2084 VISIT_END( Init, node ); … … 1974 2091 VISIT_START( node ); 1975 2092 1976 VISIT(2093 if ( __visit_children() ) { 1977 2094 maybe_accept( node, &ListInit::designations ); 1978 2095 maybe_accept( node, &ListInit::initializers ); 1979 )2096 } 1980 2097 1981 2098 VISIT_END( Init, node ); … … 1988 2105 VISIT_START( node ); 1989 2106 1990 VISIT(2107 if ( __visit_children() ) { 1991 2108 maybe_accept( node, &ConstructorInit::ctor ); 1992 2109 maybe_accept( node, &ConstructorInit::dtor ); 1993 2110 maybe_accept( node, &ConstructorInit::init ); 1994 )2111 } 1995 2112 1996 2113 VISIT_END( Init, node ); … … 2003 2120 VISIT_START( node ); 2004 2121 2005 VISIT(2122 if ( __visit_children() ) { 2006 2123 maybe_accept( node, &Attribute::params ); 2007 )2124 } 2008 2125 2009 2126 VISIT_END( Attribute, node ); … … 2016 2133 VISIT_START( node ); 2017 2134 2018 VISIT(2135 if ( __visit_children() ) { 2019 2136 { 2020 2137 bool mutated = false; … … 2032 2149 } 2033 2150 } 2034 )2151 } 2035 2152 2036 2153 VISIT_END( TypeSubstitution, node ); … … 2038 2155 2039 2156 #undef VISIT_START 2040 #undef VISIT2041 2157 #undef VISIT_END -
src/AST/Print.cpp
r97c215f rf5a51db 333 333 print( node->funcSpec ); 334 334 335 if ( node->type ) { 335 336 337 if ( node->type && node->isTypeFixed ) { 336 338 node->type->accept( *this ); 337 339 } else { 338 os << "untyped entity"; 340 if (!node->type_params.empty()) { 341 os << "forall" << endl; 342 ++indent; 343 printAll(node->type_params); 344 os << indent; 345 --indent; 346 347 if (!node->assertions.empty()) { 348 os << "with assertions" << endl; 349 ++indent; 350 printAll(node->assertions); 351 os << indent; 352 --indent; 353 } 354 } 355 356 os << "function" << endl; 357 if ( ! node->params.empty() ) { 358 os << indent << "... with parameters" << endl; 359 ++indent; 360 printAll( node->params ); 361 if ( node->type->isVarArgs ) { 362 os << indent << "and a variable number of other arguments" << endl; 363 } 364 --indent; 365 } else if ( node->type->isVarArgs ) { 366 os << indent+1 << "accepting unspecified arguments" << endl; 367 } 368 369 os << indent << "... returning"; 370 if ( node->returns.empty() ) { 371 os << " nothing" << endl; 372 } else { 373 os << endl; 374 ++indent; 375 printAll( node->returns ); 376 --indent; 377 } 339 378 } 340 379 … … 472 511 ++indent; 473 512 os << indent; 474 safe_print( node->then Part);475 --indent; 476 477 if ( node->else Part!= 0 ) {513 safe_print( node->then ); 514 --indent; 515 516 if ( node->else_ != 0 ) { 478 517 os << indent << "... else:" << endl; 479 518 ++indent; 480 519 os << indent; 481 node->else Part->accept( *this );520 node->else_->accept( *this ); 482 521 --indent; 483 522 } // if … … 485 524 } 486 525 487 virtual const ast::Stmt * visit( const ast::While Stmt * node ) override final {526 virtual const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final { 488 527 if ( node->isDoWhile ) { os << "Do-"; } 489 528 os << "While on condition:" << endl; -
src/AST/Stmt.cpp
r97c215f rf5a51db 9 9 // Author : Aaron B. Moss 10 10 // Created On : Wed May 8 13:00:00 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed May 15 15:53:00 201913 // Update Count : 211 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 19:01:20 2022 13 // Update Count : 3 14 14 // 15 15 … … 56 56 57 57 // --- BranchStmt 58 BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, std::vector<Label>&& labels )59 : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) {58 BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, const std::vector<Label>&& labels ) 59 : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) { 60 60 // Make sure a syntax error hasn't slipped through. 61 61 assert( Goto != kind || !target.empty() ); -
src/AST/Stmt.hpp
r97c215f rf5a51db 9 9 // Author : Aaron B. Moss 10 10 // Created On : Wed May 8 13:00:00 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Fri May 17 12:45:00 201913 // Update Count : 511 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 20:06:41 2022 13 // Update Count : 34 14 14 // 15 15 … … 17 17 18 18 #include <list> 19 #include <utility> 19 #include <utility> // for move 20 20 #include <vector> 21 21 22 22 #include "Label.hpp" 23 #include "Node.hpp" 23 #include "Node.hpp" // for node, ptr 24 24 #include "ParseNode.hpp" 25 25 #include "Visitor.hpp" … … 27 27 28 28 // Must be included in *all* AST classes; should be #undef'd at the end of the file 29 #define MUTATE_FRIEND 29 #define MUTATE_FRIEND \ 30 30 template<typename node_t> friend node_t * mutate(const node_t * node); \ 31 31 template<typename node_t> friend node_t * shallowCopy(const node_t * node); 32 32 33 33 namespace ast { 34 35 34 class Expr; 36 35 37 // /Base statement node36 // Base statement node 38 37 class Stmt : public ParseNode { 39 public:38 public: 40 39 std::vector<Label> labels; 41 40 42 Stmt( const CodeLocation & loc, std::vector<Label> && labels = {} )43 : ParseNode(loc), labels(std::move(labels)) {}44 45 Stmt(const Stmt & o) : ParseNode(o), labels(o.labels) {}41 Stmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 42 : ParseNode(loc), labels(std::move(labels)) {} 43 44 Stmt(const Stmt & o) : ParseNode(o), labels(o.labels) {} 46 45 47 46 const Stmt * accept( Visitor & v ) const override = 0; 48 private:47 private: 49 48 Stmt * clone() const override = 0; 50 49 MUTATE_FRIEND 51 50 }; 52 51 53 // / Compound statement `{ ... }`52 // Compound statement: { ... } 54 53 class CompoundStmt final : public Stmt { 55 public:54 public: 56 55 std::list<ptr<Stmt>> kids; 57 56 58 CompoundStmt(const CodeLocation & loc, std::list<ptr<Stmt>> && ks = {}, 59 std::vector<Label>&& labels = {} ) 60 : Stmt(loc, std::move(labels)), kids(std::move(ks)) {} 61 62 CompoundStmt( const CompoundStmt& o ); 63 CompoundStmt( CompoundStmt&& o ) = default; 57 CompoundStmt(const CodeLocation & loc, const std::list<ptr<Stmt>> && ks = {}, const std::vector<Label> && labels = {} ) 58 : Stmt(loc, std::move(labels)), kids(std::move(ks)) {} 59 60 CompoundStmt( const CompoundStmt & o ); 61 CompoundStmt( CompoundStmt && o ) = default; 64 62 65 63 void push_back( const Stmt * s ) { kids.emplace_back( s ); } … … 67 65 68 66 const CompoundStmt * accept( Visitor & v ) const override { return v.visit( this ); } 69 private:67 private: 70 68 CompoundStmt * clone() const override { return new CompoundStmt{ *this }; } 71 69 MUTATE_FRIEND 72 70 }; 73 71 74 // / Empty statment `;`72 // Empty statment: ; 75 73 class NullStmt final : public Stmt { 76 public:77 NullStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )78 : Stmt(loc, std::move(labels)) {}74 public: 75 NullStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 76 : Stmt(loc, std::move(labels)) {} 79 77 80 78 const NullStmt * accept( Visitor & v ) const override { return v.visit( this ); } 81 private:79 private: 82 80 NullStmt * clone() const override { return new NullStmt{ *this }; } 83 81 MUTATE_FRIEND 84 82 }; 85 83 86 // /Expression wrapped by statement84 // Expression wrapped by statement 87 85 class ExprStmt final : public Stmt { 88 public:86 public: 89 87 ptr<Expr> expr; 90 88 91 ExprStmt( const CodeLocation & loc, const Expr* e, std::vector<Label>&& labels = {} )92 : Stmt(loc, std::move(labels)), expr(e) {}93 94 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 95 private:89 ExprStmt( const CodeLocation & loc, const Expr* e, const std::vector<Label> && labels = {} ) 90 : Stmt(loc, std::move(labels)), expr(e) {} 91 92 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 93 private: 96 94 ExprStmt * clone() const override { return new ExprStmt{ *this }; } 97 95 MUTATE_FRIEND 98 96 }; 99 97 100 // / Assembly statement `asm ... ( "..." : ... )`98 // Assembly statement: asm ... ( "..." : ... ) 101 99 class AsmStmt final : public Stmt { 102 public:100 public: 103 101 bool isVolatile; 104 102 ptr<Expr> instruction; … … 108 106 109 107 AsmStmt( const CodeLocation & loc, bool isVolatile, const Expr * instruction, 110 std::vector<ptr<Expr>> && output,std::vector<ptr<Expr>> && input,111 std::vector<ptr<ConstantExpr>> && clobber,std::vector<Label> && gotoLabels,112 std::vector<Label> && labels = {})113 : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction),114 output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)),115 gotoLabels(std::move(gotoLabels)) {}116 117 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 118 private:108 const std::vector<ptr<Expr>> && output, const std::vector<ptr<Expr>> && input, 109 const std::vector<ptr<ConstantExpr>> && clobber, const std::vector<Label> && gotoLabels, 110 const std::vector<Label> && labels = {}) 111 : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction), 112 output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)), 113 gotoLabels(std::move(gotoLabels)) {} 114 115 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 116 private: 119 117 AsmStmt * clone() const override { return new AsmStmt{ *this }; } 120 118 MUTATE_FRIEND 121 119 }; 122 120 123 // / C-preprocessor directive `#...`121 // C-preprocessor directive: #... 124 122 class DirectiveStmt final : public Stmt { 125 public:123 public: 126 124 std::string directive; 127 125 128 126 DirectiveStmt( const CodeLocation & loc, const std::string & directive, 129 std::vector<Label> && labels = {} )130 : Stmt(loc, std::move(labels)), directive(directive) {}131 132 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 133 private:127 std::vector<Label> && labels = {} ) 128 : Stmt(loc, std::move(labels)), directive(directive) {} 129 130 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 131 private: 134 132 DirectiveStmt * clone() const override { return new DirectiveStmt{ *this }; } 135 133 MUTATE_FRIEND 136 134 }; 137 135 138 // / If conditional statement `if (...) ... else ...`136 // If statement: if (...) ... else ... 139 137 class IfStmt final : public Stmt { 140 public:141 ptr<Expr> cond; 142 ptr<Stmt> then Part;143 ptr<Stmt> else Part;138 public: 139 ptr<Expr> cond; 140 ptr<Stmt> then; 141 ptr<Stmt> else_; 144 142 std::vector<ptr<Stmt>> inits; 145 143 146 IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * then Part,147 const Stmt * elsePart = nullptr,std::vector<ptr<Stmt>> && inits = {},148 std::vector<Label> && labels = {} )149 : Stmt(loc, std::move(labels)), cond(cond), thenPart(thenPart), elsePart(elsePart),150 inits(std::move(inits)) {}151 152 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 153 private:144 IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * then, 145 const Stmt * else_ = nullptr, const std::vector<ptr<Stmt>> && inits = {}, 146 const std::vector<Label> && labels = {} ) 147 : Stmt(loc, std::move(labels)), cond(cond), then(then), else_(else_), 148 inits(std::move(inits)) {} 149 150 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 151 private: 154 152 IfStmt * clone() const override { return new IfStmt{ *this }; } 155 153 MUTATE_FRIEND 156 154 }; 157 155 158 // / Switch or choose conditional statement `switch (...) { ... }`156 // Switch or choose statement: switch (...) { ... } 159 157 class SwitchStmt final : public Stmt { 160 public:158 public: 161 159 ptr<Expr> cond; 162 160 std::vector<ptr<Stmt>> stmts; 163 161 164 SwitchStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,165 std::vector<Label> && labels = {} )166 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}167 168 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 169 private:162 SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 163 const std::vector<Label> && labels = {} ) 164 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 165 166 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 167 private: 170 168 SwitchStmt * clone() const override { return new SwitchStmt{ *this }; } 171 169 MUTATE_FRIEND 172 170 }; 173 171 174 // / Case label `case ...:` `default:`172 // Case label: case ...: or default: 175 173 class CaseStmt final : public Stmt { 176 public:177 // /Null for the default label.174 public: 175 // Null for the default label. 178 176 ptr<Expr> cond; 179 177 std::vector<ptr<Stmt>> stmts; 180 178 181 CaseStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,182 std::vector<Label> && labels = {} )183 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}179 CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 180 const std::vector<Label> && labels = {} ) 181 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 184 182 185 183 bool isDefault() const { return !cond; } 186 184 187 185 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 188 private:186 private: 189 187 CaseStmt * clone() const override { return new CaseStmt{ *this }; } 190 188 MUTATE_FRIEND 191 189 }; 192 190 193 // / While loop `while (...) ...` `do ... while (...);194 class While Stmt final : public Stmt {195 public:191 // While loop: while (...) ... else ... or do ... while (...) else ...; 192 class WhileDoStmt final : public Stmt { 193 public: 196 194 ptr<Expr> cond; 197 195 ptr<Stmt> body; 196 ptr<Stmt> else_; 198 197 std::vector<ptr<Stmt>> inits; 199 198 bool isDoWhile; 200 199 201 WhileStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, 202 std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, std::vector<Label> && labels = {} ) 203 : Stmt(loc, std::move(labels)), cond(cond), body(body), inits(std::move(inits)), 204 isDoWhile(isDoWhile) {} 205 206 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 207 private: 208 WhileStmt * clone() const override { return new WhileStmt{ *this }; } 209 MUTATE_FRIEND 210 }; 211 212 /// For loop `for (... ; ... ; ...) ...` 200 WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, 201 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} ) 202 : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(nullptr), inits(std::move(inits)), isDoWhile(isDoWhile) {} 203 204 WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, const Stmt * else_, 205 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} ) 206 : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(else_), inits(std::move(inits)), isDoWhile(isDoWhile) {} 207 208 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 209 private: 210 WhileDoStmt * clone() const override { return new WhileDoStmt{ *this }; } 211 MUTATE_FRIEND 212 }; 213 214 // For loop: for (... ; ... ; ...) ... else ... 213 215 class ForStmt final : public Stmt { 214 public:216 public: 215 217 std::vector<ptr<Stmt>> inits; 216 218 ptr<Expr> cond; 217 219 ptr<Expr> inc; 218 220 ptr<Stmt> body; 219 220 ForStmt( const CodeLocation & loc, std::vector<ptr<Stmt>> && inits, const Expr * cond, 221 const Expr * inc, const Stmt * body, std::vector<Label> && labels = {} ) 222 : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc), 223 body(body) {} 224 225 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 226 private: 221 ptr<Stmt> else_; 222 223 ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond, 224 const Expr * inc, const Stmt * body, const std::vector<Label> && label = {} ) 225 : Stmt(loc, std::move(label)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(nullptr) {} 226 227 ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond, 228 const Expr * inc, const Stmt * body, const Stmt * else_, const std::vector<Label> && labels = {} ) 229 : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(else_) {} 230 231 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 232 private: 227 233 ForStmt * clone() const override { return new ForStmt{ *this }; } 228 234 MUTATE_FRIEND 229 235 }; 230 236 231 // / Branch control flow statement `goto ...` `break` `continue` `fallthru`237 // Branch control flow statement: goto ... or break or continue or fallthru 232 238 class BranchStmt final : public Stmt { 233 public:239 public: 234 240 enum Kind { Goto, Break, Continue, FallThrough, FallThroughDefault }; 235 241 static constexpr size_t kindEnd = 1 + (size_t)FallThroughDefault; … … 240 246 Kind kind; 241 247 242 BranchStmt( const CodeLocation & loc, Kind kind, Label target, 243 std::vector<Label> && labels = {} ); 244 BranchStmt( const CodeLocation & loc, const Expr * computedTarget, 245 std::vector<Label> && labels = {} ) 246 : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc), 247 computedTarget(computedTarget), kind(Goto) {} 248 BranchStmt( const CodeLocation & loc, Kind kind, Label target, const std::vector<Label> && labels = {} ); 249 BranchStmt( const CodeLocation & loc, const Expr * computedTarget, const std::vector<Label> && labels = {} ) 250 : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc), computedTarget(computedTarget), kind(Goto) {} 248 251 249 252 const char * kindName() const { return kindNames[kind]; } 250 253 251 254 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 252 private:255 private: 253 256 BranchStmt * clone() const override { return new BranchStmt{ *this }; } 254 257 MUTATE_FRIEND … … 257 260 }; 258 261 259 // / Return statement `return ...`262 // Return statement: return ... 260 263 class ReturnStmt final : public Stmt { 261 public:264 public: 262 265 ptr<Expr> expr; 263 266 264 ReturnStmt( const CodeLocation & loc, const Expr * expr, std::vector<Label> && labels = {} )265 : Stmt(loc, std::move(labels)), expr(expr) {}266 267 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 268 private:267 ReturnStmt( const CodeLocation & loc, const Expr * expr, const std::vector<Label> && labels = {} ) 268 : Stmt(loc, std::move(labels)), expr(expr) {} 269 270 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 271 private: 269 272 ReturnStmt * clone() const override { return new ReturnStmt{ *this }; } 270 273 MUTATE_FRIEND 271 274 }; 272 275 273 // /Kind of exception276 // Kind of exception 274 277 enum ExceptionKind { Terminate, Resume }; 275 278 276 // / Throw statement `throw ...`279 // Throw statement: throw ... 277 280 class ThrowStmt final : public Stmt { 278 public:281 public: 279 282 ptr<Expr> expr; 280 283 ptr<Expr> target; 281 284 ExceptionKind kind; 282 285 283 ThrowStmt( 284 const CodeLocation & loc, ExceptionKind kind, const Expr * expr, const Expr * target, 285 std::vector<Label> && labels = {} ) 286 : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {} 287 288 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 289 private: 286 ThrowStmt( const CodeLocation & loc, ExceptionKind kind, const Expr * expr, 287 const Expr * target, const std::vector<Label> && labels = {} ) 288 : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {} 289 290 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 291 private: 290 292 ThrowStmt * clone() const override { return new ThrowStmt{ *this }; } 291 293 MUTATE_FRIEND 292 294 }; 293 295 294 // / Try statement `try { ... } ...`296 // Try statement: try { ... } ... 295 297 class TryStmt final : public Stmt { 296 public:298 public: 297 299 ptr<CompoundStmt> body; 298 300 std::vector<ptr<CatchStmt>> handlers; 299 301 ptr<FinallyStmt> finally; 300 302 301 TryStmt( 302 const CodeLocation & loc, const CompoundStmt * body, 303 std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally, 304 std::vector<Label> && labels = {} ) 305 : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {} 306 307 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 308 private: 303 TryStmt( const CodeLocation & loc, const CompoundStmt * body, 304 const std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally, 305 const std::vector<Label> && labels = {} ) 306 : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {} 307 308 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 309 private: 309 310 TryStmt * clone() const override { return new TryStmt{ *this }; } 310 311 MUTATE_FRIEND 311 312 }; 312 313 313 // /Catch clause of try statement314 // Catch clause of try statement 314 315 class CatchStmt final : public Stmt { 315 public:316 public: 316 317 ptr<Decl> decl; 317 318 ptr<Expr> cond; … … 319 320 ExceptionKind kind; 320 321 321 CatchStmt( 322 const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond, 323 const Stmt * body, std::vector<Label> && labels = {} ) 324 : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {} 325 326 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 327 private: 322 CatchStmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond, 323 const Stmt * body, const std::vector<Label> && labels = {} ) 324 : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {} 325 326 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 327 private: 328 328 CatchStmt * clone() const override { return new CatchStmt{ *this }; } 329 329 MUTATE_FRIEND 330 330 }; 331 331 332 // /Finally clause of try statement332 // Finally clause of try statement 333 333 class FinallyStmt final : public Stmt { 334 public:334 public: 335 335 ptr<CompoundStmt> body; 336 336 337 337 FinallyStmt( const CodeLocation & loc, const CompoundStmt * body, 338 std::vector<Label> && labels = {} )339 : Stmt(loc, std::move(labels)), body(body) {}340 341 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 342 private:338 std::vector<Label> && labels = {} ) 339 : Stmt(loc, std::move(labels)), body(body) {} 340 341 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 342 private: 343 343 FinallyStmt * clone() const override { return new FinallyStmt{ *this }; } 344 344 MUTATE_FRIEND 345 345 }; 346 346 347 // /Suspend statement347 // Suspend statement 348 348 class SuspendStmt final : public Stmt { 349 public:349 public: 350 350 ptr<CompoundStmt> then; 351 351 enum Type { None, Coroutine, Generator } type = None; 352 352 353 SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, std::vector<Label> && labels = {} )354 : Stmt(loc, std::move(labels)), then(then), type(type) {}355 356 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 357 private:353 SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, const std::vector<Label> && labels = {} ) 354 : Stmt(loc, std::move(labels)), then(then), type(type) {} 355 356 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 357 private: 358 358 SuspendStmt * clone() const override { return new SuspendStmt{ *this }; } 359 359 MUTATE_FRIEND 360 360 }; 361 361 362 // / Wait for concurrency statement `when (...) waitfor (... , ...) ... timeout(...) ... else ...`362 // Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ... 363 363 class WaitForStmt final : public Stmt { 364 public:364 public: 365 365 struct Target { 366 366 ptr<Expr> func; … … 389 389 OrElse orElse; 390 390 391 WaitForStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )392 : Stmt(loc, std::move(labels)) {}393 394 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 395 private:391 WaitForStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 392 : Stmt(loc, std::move(labels)) {} 393 394 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 395 private: 396 396 WaitForStmt * clone() const override { return new WaitForStmt{ *this }; } 397 397 MUTATE_FRIEND 398 398 }; 399 399 400 // /Any declaration in a (compound) statement.400 // Any declaration in a (compound) statement. 401 401 class DeclStmt final : public Stmt { 402 public:402 public: 403 403 ptr<Decl> decl; 404 404 405 DeclStmt( const CodeLocation & loc, const Decl * decl, std::vector<Label> && labels = {} )406 : Stmt(loc, std::move(labels)), decl(decl) {}407 408 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 409 private:405 DeclStmt( const CodeLocation & loc, const Decl * decl, const std::vector<Label> && labels = {} ) 406 : Stmt(loc, std::move(labels)), decl(decl) {} 407 408 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 409 private: 410 410 DeclStmt * clone() const override { return new DeclStmt{ *this }; } 411 411 MUTATE_FRIEND 412 412 }; 413 413 414 // /Represents an implicit application of a constructor or destructor.414 // Represents an implicit application of a constructor or destructor. 415 415 class ImplicitCtorDtorStmt final : public Stmt { 416 public:416 public: 417 417 ptr<Stmt> callStmt; 418 418 419 419 ImplicitCtorDtorStmt( const CodeLocation & loc, const Stmt * callStmt, 420 std::vector<Label> && labels = {} )421 : Stmt(loc, std::move(labels)), callStmt(callStmt) {}422 423 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 424 private:420 std::vector<Label> && labels = {} ) 421 : Stmt(loc, std::move(labels)), callStmt(callStmt) {} 422 423 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 424 private: 425 425 ImplicitCtorDtorStmt * clone() const override { return new ImplicitCtorDtorStmt{ *this }; } 426 426 MUTATE_FRIEND 427 427 }; 428 428 429 // /Mutex Statement429 // Mutex Statement 430 430 class MutexStmt final : public Stmt { 431 public:431 public: 432 432 ptr<Stmt> stmt; 433 433 std::vector<ptr<Expr>> mutexObjs; 434 434 435 435 MutexStmt( const CodeLocation & loc, const Stmt * stmt, 436 std::vector<ptr<Expr>> && mutexes,std::vector<Label> && labels = {} )437 : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {}438 439 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 440 private:436 const std::vector<ptr<Expr>> && mutexes, const std::vector<Label> && labels = {} ) 437 : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {} 438 439 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 440 private: 441 441 MutexStmt * clone() const override { return new MutexStmt{ *this }; } 442 442 MUTATE_FRIEND 443 443 }; 444 445 } 444 } // namespace ast 446 445 447 446 #undef MUTATE_FRIEND 448 447 449 448 // Local Variables: // 450 // tab-width: 4 //451 449 // mode: c++ // 452 // compile-command: "make install" //453 450 // End: // -
src/AST/Visitor.hpp
r97c215f rf5a51db 10 10 // Created On : Thr May 9 15:28:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:25:07 202113 // Update Count : 112 // Last Modified On : Tue Feb 1 09:09:34 2022 13 // Update Count : 2 14 14 // 15 15 … … 38 38 virtual const ast::Stmt * visit( const ast::DirectiveStmt * ) = 0; 39 39 virtual const ast::Stmt * visit( const ast::IfStmt * ) = 0; 40 virtual const ast::Stmt * visit( const ast::While Stmt* ) = 0;40 virtual const ast::Stmt * visit( const ast::WhileDoStmt * ) = 0; 41 41 virtual const ast::Stmt * visit( const ast::ForStmt * ) = 0; 42 42 virtual const ast::Stmt * visit( const ast::SwitchStmt * ) = 0; -
src/CodeGen/CodeGenerator.cc
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 19:00:42 202113 // Update Count : 5 3612 // Last Modified On : Wed Feb 2 20:30:30 2022 13 // Update Count : 541 14 14 // 15 15 #include "CodeGenerator.h" … … 42 42 bool wantSpacing( Statement * stmt) { 43 43 return dynamic_cast< IfStmt * >( stmt ) || dynamic_cast< CompoundStmt * >( stmt ) || 44 dynamic_cast< While Stmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt );44 dynamic_cast< WhileDoStmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt ); 45 45 } 46 46 … … 955 955 output << " ) "; 956 956 957 ifStmt->get_then Part()->accept( *visitor );958 959 if ( ifStmt->get_else Part() != 0) {957 ifStmt->get_then()->accept( *visitor ); 958 959 if ( ifStmt->get_else() != 0) { 960 960 output << " else "; 961 ifStmt->get_else Part()->accept( *visitor );961 ifStmt->get_else()->accept( *visitor ); 962 962 } // if 963 963 } … … 1020 1020 output << "fallthru"; 1021 1021 break; 1022 default: ; // prevent warning 1022 1023 } // switch 1023 1024 // print branch target for labelled break/continue/fallthru in debug mode … … 1125 1126 } 1126 1127 1127 void CodeGenerator::postvisit( While Stmt * whileStmt ) {1128 if ( while Stmt->get_isDoWhile() ) {1128 void CodeGenerator::postvisit( WhileDoStmt * whileDoStmt ) { 1129 if ( whileDoStmt->get_isDoWhile() ) { 1129 1130 output << "do"; 1130 1131 } else { 1131 1132 output << "while ("; 1132 while Stmt->get_condition()->accept( *visitor );1133 whileDoStmt->get_condition()->accept( *visitor ); 1133 1134 output << ")"; 1134 1135 } // if 1135 1136 output << " "; 1136 1137 1137 output << CodeGenerator::printLabels( while Stmt->get_body()->get_labels() );1138 while Stmt->get_body()->accept( *visitor );1138 output << CodeGenerator::printLabels( whileDoStmt->get_body()->get_labels() ); 1139 whileDoStmt->get_body()->accept( *visitor ); 1139 1140 1140 1141 output << indent; 1141 1142 1142 if ( while Stmt->get_isDoWhile() ) {1143 if ( whileDoStmt->get_isDoWhile() ) { 1143 1144 output << " while ("; 1144 while Stmt->get_condition()->accept( *visitor );1145 whileDoStmt->get_condition()->accept( *visitor ); 1145 1146 output << ");"; 1146 1147 } // if -
src/CodeGen/CodeGenerator.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:38 202113 // Update Count : 6 312 // Last Modified On : Tue Feb 1 09:23:21 2022 13 // Update Count : 64 14 14 // 15 15 … … 116 116 void postvisit( WaitForStmt * ); 117 117 void postvisit( WithStmt * ); 118 void postvisit( While Stmt * );118 void postvisit( WhileDoStmt * ); 119 119 void postvisit( ForStmt * ); 120 120 void postvisit( NullStmt * ); -
src/Common/CodeLocationTools.cpp
r97c215f rf5a51db 10 10 // Created On : Fri Dec 4 15:42:00 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:37 202113 // Update Count : 212 // Last Modified On : Tue Feb 1 09:14:39 2022 13 // Update Count : 3 14 14 // 15 15 … … 109 109 macro(DirectiveStmt, Stmt) \ 110 110 macro(IfStmt, Stmt) \ 111 macro(While Stmt, Stmt) \111 macro(WhileDoStmt, Stmt) \ 112 112 macro(ForStmt, Stmt) \ 113 113 macro(SwitchStmt, Stmt) \ -
src/Common/PassVisitor.h
r97c215f rf5a51db 92 92 virtual void visit( IfStmt * ifStmt ) override final; 93 93 virtual void visit( const IfStmt * ifStmt ) override final; 94 virtual void visit( While Stmt * whileStmt ) override final;95 virtual void visit( const While Stmt * whileStmt ) override final;94 virtual void visit( WhileDoStmt * whileDoStmt ) override final; 95 virtual void visit( const WhileDoStmt * whileDoStmt ) override final; 96 96 virtual void visit( ForStmt * forStmt ) override final; 97 97 virtual void visit( const ForStmt * forStmt ) override final; … … 277 277 virtual Statement * mutate( DirectiveStmt * dirStmt ) override final; 278 278 virtual Statement * mutate( IfStmt * ifStmt ) override final; 279 virtual Statement * mutate( While Stmt * whileStmt ) override final;279 virtual Statement * mutate( WhileDoStmt * whileDoStmt ) override final; 280 280 virtual Statement * mutate( ForStmt * forStmt ) override final; 281 281 virtual Statement * mutate( SwitchStmt * switchStmt ) override final; -
src/Common/PassVisitor.impl.h
r97c215f rf5a51db 1189 1189 maybeAccept_impl( node->initialization, *this ); 1190 1190 visitExpression ( node->condition ); 1191 node->then Part = visitStatement( node->thenPart);1192 node->else Part = visitStatement( node->elsePart);1191 node->then = visitStatement( node->then ); 1192 node->else_ = visitStatement( node->else_ ); 1193 1193 } 1194 1194 VISIT_END( node ); … … 1203 1203 maybeAccept_impl( node->initialization, *this ); 1204 1204 visitExpression ( node->condition ); 1205 visitStatement ( node->then Part);1206 visitStatement ( node->else Part);1205 visitStatement ( node->then ); 1206 visitStatement ( node->else_ ); 1207 1207 } 1208 1208 VISIT_END( node ); … … 1217 1217 maybeMutate_impl( node->initialization, *this ); 1218 1218 node->condition = mutateExpression( node->condition ); 1219 node->then Part = mutateStatement ( node->thenPart);1220 node->else Part = mutateStatement ( node->elsePart);1219 node->then = mutateStatement ( node->then ); 1220 node->else_ = mutateStatement ( node->else_ ); 1221 1221 } 1222 1222 MUTATE_END( Statement, node ); … … 1224 1224 1225 1225 //-------------------------------------------------------------------------- 1226 // While Stmt1227 template< typename pass_type > 1228 void PassVisitor< pass_type >::visit( While Stmt * node ) {1226 // WhileDoStmt 1227 template< typename pass_type > 1228 void PassVisitor< pass_type >::visit( WhileDoStmt * node ) { 1229 1229 VISIT_START( node ); 1230 1230 … … 1241 1241 1242 1242 template< typename pass_type > 1243 void PassVisitor< pass_type >::visit( const While Stmt * node ) {1243 void PassVisitor< pass_type >::visit( const WhileDoStmt * node ) { 1244 1244 VISIT_START( node ); 1245 1245 … … 1256 1256 1257 1257 template< typename pass_type > 1258 Statement * PassVisitor< pass_type >::mutate( While Stmt * node ) {1258 Statement * PassVisitor< pass_type >::mutate( WhileDoStmt * node ) { 1259 1259 MUTATE_START( node ); 1260 1260 -
src/Common/utility.h
r97c215f rf5a51db 371 371 } 372 372 373 template< typename T > 374 struct enumerate_t { 375 template<typename val_t> 376 struct value_t { 377 val_t & val; 378 size_t idx; 379 }; 380 381 template< typename iter_t, typename val_t > 382 struct iterator_t { 383 iter_t it; 384 size_t idx; 385 386 iterator_t( iter_t _it, size_t _idx ) : it(_it), idx(_idx) {} 387 388 value_t<val_t> operator*() const { return value_t<val_t>{ *it, idx }; } 389 390 bool operator==(const iterator_t & o) const { return o.it == it; } 391 bool operator!=(const iterator_t & o) const { return o.it != it; } 392 393 iterator_t & operator++() { 394 it++; 395 idx++; 396 return *this; 397 } 398 399 using difference_type = typename std::iterator_traits< iter_t >::difference_type; 400 using value_type = value_t<val_t>; 401 using pointer = value_t<val_t> *; 402 using reference = value_t<val_t> &; 403 using iterator_category = std::forward_iterator_tag; 404 }; 405 406 T & ref; 407 408 using iterator = iterator_t< typename T::iterator, typename T::value_type >; 409 using const_iterator = iterator_t< typename T::const_iterator, const typename T::value_type >; 410 411 iterator begin() { return iterator( ref.begin(), 0 ); } 412 iterator end() { return iterator( ref.end(), ref.size() ); } 413 414 const_iterator begin() const { return const_iterator( ref.cbegin(), 0 ); } 415 const_iterator end() const { return const_iterator( ref.cend(), ref.size() ); } 416 417 const_iterator cbegin() const { return const_iterator( ref.cbegin(), 0 ); } 418 const_iterator cend() const { return const_iterator( ref.cend(), ref.size() ); } 419 }; 420 421 template< typename T > 422 enumerate_t<T> enumerate( T & ref ) { 423 return enumerate_t< T >{ ref }; 424 } 425 426 template< typename T > 427 const enumerate_t< const T > enumerate( const T & ref ) { 428 return enumerate_t< const T >{ ref }; 429 } 430 373 431 template< typename OutType, typename Range, typename Functor > 374 432 OutType map_range( const Range& range, Functor&& functor ) { -
src/ControlStruct/ExceptTranslate.h
r97c215f rf5a51db 31 31 32 32 void translateTries( std::list< Declaration *> & translationUnit ); 33 void translateTries( ast::TranslationUnit & transUnit ); 33 34 /* Replaces all try blocks (and their many clauses) with function definitions and calls. 34 35 * This uses the exception built-ins to produce typed output and should take place after -
src/ControlStruct/ExceptTranslateNew.cpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 8 11:53:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 16:50:00 202113 // Update Count : 011 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 18:49:58 2022 13 // Update Count : 1 14 14 // 15 15 … … 20 20 #include "AST/Stmt.hpp" 21 21 #include "AST/TranslationUnit.hpp" 22 #include "AST/DeclReplacer.hpp" 22 23 23 24 namespace ControlStruct { 24 25 25 26 namespace { 27 28 typedef std::list<ast::CatchStmt*> CatchList; 29 30 void split( CatchList& allHandlers, CatchList& terHandlers, 31 CatchList& resHandlers ) { 32 while ( !allHandlers.empty() ) { 33 ast::CatchStmt * stmt = allHandlers.front(); 34 allHandlers.pop_front(); 35 if (stmt->kind == ast::ExceptionKind::Terminate) { 36 terHandlers.push_back(stmt); 37 } else { 38 resHandlers.push_back(stmt); 39 } 40 } 41 } 42 43 void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) { 44 block->push_back(new ast::DeclStmt(block->location, item)); 45 } 26 46 27 47 class TranslateThrowsCore : public ast::WithGuards { … … 128 148 } 129 149 150 151 class TryMutatorCore { 152 // The built in types used in translation. 153 const ast::StructDecl * except_decl; 154 const ast::StructDecl * node_decl; 155 const ast::StructDecl * hook_decl; 156 157 // The many helper functions for code/syntree generation. 158 ast::CompoundStmt * take_try_block( ast::TryStmt * tryStmt ); 159 ast::FunctionDecl * create_try_wrapper( const ast::CompoundStmt * body ); 160 ast::FunctionDecl * create_terminate_catch( CatchList &handlers ); 161 ast::CompoundStmt * create_single_matcher( 162 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ); 163 ast::FunctionDecl * create_terminate_match( CatchList &handlers ); 164 ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper, 165 ast::FunctionDecl * terminate_catch, ast::FunctionDecl * terminate_match ); 166 ast::FunctionDecl * create_resume_handler( CatchList &handlers ); 167 ast::CompoundStmt * create_resume_wrapper( 168 const ast::Stmt * wraps, const ast::FunctionDecl * resume_handler ); 169 ast::FunctionDecl * create_finally_wrapper( ast::TryStmt * tryStmt ); 170 ast::ObjectDecl * create_finally_hook( ast::FunctionDecl * finally_wrapper ); 171 ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt ); 172 173 // Types used in translation, make sure to use clone. 174 // void (*function)(); 175 ast::FunctionDecl * try_func_t; 176 // void (*function)(int, exception); 177 ast::FunctionDecl * catch_func_t; 178 // int (*function)(exception); 179 ast::FunctionDecl * match_func_t; 180 // bool (*function)(exception); 181 ast::FunctionDecl * handle_func_t; 182 // void (*function)(__attribute__((unused)) void *); 183 ast::FunctionDecl * finally_func_t; 184 185 ast::StructInstType * create_except_type() { 186 assert( except_decl ); 187 return new ast::StructInstType( except_decl ); 188 } 189 void init_func_types(); 190 191 public: 192 TryMutatorCore() : 193 except_decl( nullptr ), node_decl( nullptr ), hook_decl( nullptr ) 194 {} 195 196 void previsit( const ast::StructDecl *structDecl ); 197 ast::Stmt * postvisit( const ast::TryStmt *tryStmt ); 198 ast::Stmt * postvisit( const ast::ThrowStmt *throwStmt ); 199 }; 200 201 void TryMutatorCore::init_func_types() { 202 assert( except_decl ); 203 204 ast::ObjectDecl index_obj( 205 {}, 206 "__handler_index", 207 new ast::BasicType(ast::BasicType::SignedInt) 208 ); 209 ast::ObjectDecl exception_obj( 210 {}, 211 "__exception_inst", 212 new ast::PointerType( 213 new ast::StructInstType( except_decl ) 214 ), 215 NULL 216 ); 217 ast::ObjectDecl bool_obj( 218 {}, 219 "__ret_bool", 220 new ast::BasicType( ast::BasicType::Bool ), 221 nullptr, //init 222 ast::Storage::Classes{}, 223 ast::Linkage::Cforall, 224 nullptr, //width 225 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 226 ); 227 ast::ObjectDecl voidptr_obj( 228 {}, 229 "__hook", 230 new ast::PointerType( 231 new ast::VoidType() 232 ), 233 nullptr, //init 234 ast::Storage::Classes{}, 235 ast::Linkage::Cforall, 236 nullptr, //width 237 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 238 ); 239 240 ast::ObjectDecl unused_index_obj( 241 {}, 242 "__handler_index", 243 new ast::BasicType(ast::BasicType::SignedInt), 244 nullptr, 245 ast::Storage::Classes{}, 246 ast::Linkage::Cforall, 247 nullptr, //width 248 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 249 ); 250 //unused_index_obj->attributes.push_back( new Attribute( "unused" ) ); 251 252 try_func_t = new ast::FunctionDecl( 253 {}, 254 "try", 255 {}, //forall 256 {}, //no param 257 {}, //no return 258 nullptr, 259 ast::Storage::Classes{}, 260 ast::Linkage::Cforall 261 ); 262 263 catch_func_t = new ast::FunctionDecl( 264 {}, 265 "catch", 266 {}, //forall 267 {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param 268 {}, //return void 269 nullptr, 270 ast::Storage::Classes{}, 271 ast::Linkage::Cforall 272 ); 273 274 match_func_t = new ast::FunctionDecl( 275 {}, 276 "match", 277 {}, //forall 278 {ast::deepCopy(&exception_obj)}, 279 {ast::deepCopy(&unused_index_obj)}, 280 nullptr, 281 ast::Storage::Classes{}, 282 ast::Linkage::Cforall 283 ); 284 285 handle_func_t = new ast::FunctionDecl( 286 {}, 287 "handle", 288 {}, //forall 289 {ast::deepCopy(&exception_obj)}, 290 {ast::deepCopy(&bool_obj)}, 291 nullptr, 292 ast::Storage::Classes{}, 293 ast::Linkage::Cforall 294 ); 295 296 finally_func_t = new ast::FunctionDecl( 297 {}, 298 "finally", 299 {}, //forall 300 {ast::deepCopy(&voidptr_obj)}, 301 {}, //return void 302 nullptr, 303 ast::Storage::Classes{}, 304 ast::Linkage::Cforall 305 ); 306 307 //catch_func_t.get_parameters().push_back( index_obj.clone() ); 308 //catch_func_t.get_parameters().push_back( exception_obj.clone() ); 309 //match_func_t.get_returnVals().push_back( unused_index_obj ); 310 //match_func_t.get_parameters().push_back( exception_obj.clone() ); 311 //handle_func_t.get_returnVals().push_back( bool_obj.clone() ); 312 //handle_func_t.get_parameters().push_back( exception_obj.clone() ); 313 //finally_func_t.get_parameters().push_back( voidptr_obj.clone() ); 314 } 315 316 // TryStmt Mutation Helpers 317 318 /* 319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) { 320 ast::CompoundStmt * block = tryStmt->body; 321 tryStmt->body = nullptr; 322 return block; 323 } 324 */ 325 326 ast::FunctionDecl * TryMutatorCore::create_try_wrapper( 327 const ast::CompoundStmt *body ) { 328 329 ast::FunctionDecl * ret = ast::deepCopy(try_func_t); 330 ret->stmts = body; 331 return ret; 332 } 333 334 ast::FunctionDecl * TryMutatorCore::create_terminate_catch( 335 CatchList &handlers ) { 336 std::vector<ast::ptr<ast::Stmt>> handler_wrappers; 337 338 assert (!handlers.empty()); 339 const CodeLocation loc = handlers.front()->location; 340 341 ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t); 342 const ast::DeclWithType * index_obj = func_t->params.front(); 343 const ast::DeclWithType * except_obj = func_t->params.back(); 344 345 // Index 1..{number of handlers} 346 int index = 0; 347 CatchList::iterator it = handlers.begin(); 348 for ( ; it != handlers.end() ; ++it ) { 349 ++index; 350 ast::CatchStmt * handler = *it; 351 const CodeLocation loc = handler->location; 352 353 // case `index`: 354 // { 355 // `handler.decl` = { (virtual `decl.type`)`except` }; 356 // `handler.body`; 357 // } 358 // return; 359 ast::CompoundStmt * block = new ast::CompoundStmt(loc); 360 361 // Just copy the exception value. (Post Validation) 362 const ast::ObjectDecl * handler_decl = 363 handler->decl.strict_as<ast::ObjectDecl>(); 364 ast::ObjectDecl * local_except = ast::deepCopy(handler_decl); 365 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 366 new ast::VariableExpr( loc, except_obj ), 367 local_except->get_type() 368 ); 369 vcex->location = handler->location; 370 local_except->init = new ast::ListInit(loc, { new ast::SingleInit( loc, vcex ) }); 371 block->push_back( new ast::DeclStmt( loc, local_except ) ); 372 373 // Add the cleanup attribute. 374 local_except->attributes.push_back( new ast::Attribute( 375 "cleanup", 376 { new ast::NameExpr( loc, "__cfaehm_cleanup_terminate" ) } 377 ) ); 378 379 ast::DeclReplacer::DeclMap mapping; 380 mapping[handler_decl] = local_except; 381 const ast::Stmt * mutBody = strict_dynamic_cast<const ast::Stmt *>( 382 ast::DeclReplacer::replace(handler->body, mapping)); 383 384 385 block->push_back( mutBody ); 386 // handler->body = nullptr; 387 388 handler_wrappers.push_back( new ast::CaseStmt(loc, 389 ast::ConstantExpr::from_int(loc, index) , 390 { block, new ast::ReturnStmt( loc, nullptr ) } 391 )); 392 } 393 // TODO: Some sort of meaningful error on default perhaps? 394 395 /* 396 std::list<Statement*> stmt_handlers; 397 while ( !handler_wrappers.empty() ) { 398 stmt_handlers.push_back( handler_wrappers.front() ); 399 handler_wrappers.pop_front(); 400 } 401 */ 402 403 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc, 404 new ast::VariableExpr( loc, index_obj ), 405 std::move(handler_wrappers) 406 ); 407 ast::CompoundStmt * body = new ast::CompoundStmt(loc, 408 {handler_lookup}); 409 410 func_t->stmts = body; 411 return func_t; 412 } 413 414 // Create a single check from a moddified handler. 415 // except_obj is referenced, modded_handler will be freed. 416 ast::CompoundStmt * TryMutatorCore::create_single_matcher( 417 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ) { 418 // { 419 // `modded_handler.decl` 420 // if ( `decl.name = (virtual `decl.type`)`except` 421 // [&& `modded_handler.cond`] ) { 422 // `modded_handler.body` 423 // } 424 // } 425 426 const CodeLocation loc = modded_handler->location; 427 ast::CompoundStmt * block = new ast::CompoundStmt(loc); 428 429 // Local Declaration 430 const ast::ObjectDecl * local_except = 431 modded_handler->decl.strict_as<ast::ObjectDecl>(); 432 block->push_back( new ast::DeclStmt( loc, local_except ) ); 433 434 // Check for type match. 435 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 436 new ast::VariableExpr(loc, except_obj ), 437 local_except->get_type() 438 ); 439 ast::Expr * cond = ast::UntypedExpr::createAssign(loc, 440 new ast::VariableExpr(loc, local_except ), vcex ); 441 442 // Add the check on the conditional if it is provided. 443 if ( modded_handler->cond ) { 444 cond = new ast::LogicalExpr( loc, cond, modded_handler->cond, ast::LogicalFlag::AndExpr ); 445 } 446 // Construct the match condition. 447 block->push_back( new ast::IfStmt(loc, 448 cond, modded_handler->body, nullptr ) ); 449 450 // xxx - how does this work in new ast 451 //modded_handler->set_decl( nullptr ); 452 //modded_handler->set_cond( nullptr ); 453 //modded_handler->set_body( nullptr ); 454 //delete modded_handler; 455 return block; 456 } 457 458 ast::FunctionDecl * TryMutatorCore::create_terminate_match( 459 CatchList &handlers ) { 460 // int match(exception * except) { 461 // HANDLER WRAPPERS { return `index`; } 462 // } 463 464 assert (!handlers.empty()); 465 const CodeLocation loc = handlers.front()->location; 466 467 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 468 469 ast::FunctionDecl * func_t = ast::deepCopy(match_func_t); 470 const ast::DeclWithType * except_obj = func_t->params.back(); 471 472 // Index 1..{number of handlers} 473 int index = 0; 474 CatchList::iterator it; 475 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 476 ++index; 477 ast::CatchStmt * handler = *it; 478 479 // Body should have been taken by create_terminate_catch. 480 // xxx - just ignore it? 481 // assert( nullptr == handler->get_body() ); 482 483 // Create new body. 484 handler->body = new ast::ReturnStmt( handler->location, 485 ast::ConstantExpr::from_int( handler->location, index ) ); 486 487 // Create the handler. 488 body->push_back( create_single_matcher( except_obj, handler ) ); 489 *it = nullptr; 490 } 491 492 body->push_back( new ast::ReturnStmt(loc, 493 ast::ConstantExpr::from_int( loc, 0 ) )); 494 495 func_t->stmts = body; 496 497 return func_t; 498 } 499 500 ast::CompoundStmt * TryMutatorCore::create_terminate_caller( 501 CodeLocation loc, 502 ast::FunctionDecl * try_wrapper, 503 ast::FunctionDecl * terminate_catch, 504 ast::FunctionDecl * terminate_match ) { 505 // { __cfaehm_try_terminate(`try`, `catch`, `match`); } 506 507 ast::UntypedExpr * caller = new ast::UntypedExpr(loc, new ast::NameExpr(loc, 508 "__cfaehm_try_terminate" ) ); 509 caller->args.push_back( new ast::VariableExpr(loc, try_wrapper ) ); 510 caller->args.push_back( new ast::VariableExpr(loc, terminate_catch ) ); 511 caller->args.push_back( new ast::VariableExpr(loc, terminate_match ) ); 512 513 ast::CompoundStmt * callStmt = new ast::CompoundStmt(loc); 514 callStmt->push_back( new ast::ExprStmt( loc, caller ) ); 515 return callStmt; 516 } 517 518 ast::FunctionDecl * TryMutatorCore::create_resume_handler( 519 CatchList &handlers ) { 520 // bool handle(exception * except) { 521 // HANDLER WRAPPERS { `hander->body`; return true; } 522 // } 523 assert (!handlers.empty()); 524 const CodeLocation loc = handlers.front()->location; 525 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 526 527 ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t); 528 const ast::DeclWithType * except_obj = func_t->params.back(); 529 530 CatchList::iterator it; 531 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 532 ast::CatchStmt * handler = *it; 533 const CodeLocation loc = handler->location; 534 // Modifiy body. 535 ast::CompoundStmt * handling_code; 536 if (handler->body.as<ast::CompoundStmt>()) { 537 handling_code = 538 strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() ); 539 } else { 540 handling_code = new ast::CompoundStmt(loc); 541 handling_code->push_back( handler->body ); 542 } 543 handling_code->push_back( new ast::ReturnStmt(loc, 544 ast::ConstantExpr::from_bool(loc, true ) ) ); 545 handler->body = handling_code; 546 547 // Create the handler. 548 body->push_back( create_single_matcher( except_obj, handler ) ); 549 *it = nullptr; 550 } 551 552 body->push_back( new ast::ReturnStmt(loc, 553 ast::ConstantExpr::from_bool(loc, false ) ) ); 554 func_t->stmts = body; 555 556 return func_t; 557 } 558 559 ast::CompoundStmt * TryMutatorCore::create_resume_wrapper( 560 const ast::Stmt * wraps, 561 const ast::FunctionDecl * resume_handler ) { 562 const CodeLocation loc = wraps->location; 563 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 564 565 // struct __try_resume_node __resume_node 566 // __attribute__((cleanup( __cfaehm_try_resume_cleanup ))); 567 // ** unwinding of the stack here could cause problems ** 568 // ** however I don't think that can happen currently ** 569 // __cfaehm_try_resume_setup( &__resume_node, resume_handler ); 570 571 ast::ObjectDecl * obj = new ast::ObjectDecl( 572 loc, 573 "__resume_node", 574 new ast::StructInstType( 575 node_decl 576 ), 577 nullptr, 578 ast::Storage::Classes{}, 579 ast::Linkage::Cforall, 580 nullptr, 581 {new ast::Attribute("cleanup", {new ast::NameExpr(loc, "__cfaehm_try_resume_cleanup")})} 582 ); 583 appendDeclStmt( body, obj ); 584 585 ast::UntypedExpr *setup = new ast::UntypedExpr(loc, new ast::NameExpr(loc, 586 "__cfaehm_try_resume_setup" ) ); 587 setup->args.push_back( new ast::AddressExpr( loc, new ast::VariableExpr(loc, obj ) ) ); 588 setup->args.push_back( new ast::VariableExpr( loc, resume_handler ) ); 589 590 body->push_back( new ast::ExprStmt(loc, setup ) ); 591 592 body->push_back( wraps ); 593 return body; 594 } 595 596 ast::FunctionDecl * TryMutatorCore::create_finally_wrapper( 597 ast::TryStmt * tryStmt ) { 598 // void finally() { `finally->block` } 599 const ast::FinallyStmt * finally = tryStmt->finally; 600 const ast::CompoundStmt * body = finally->body; 601 602 ast::FunctionDecl * func_t = ast::deepCopy(finally_func_t); 603 func_t->stmts = body; 604 605 // finally->set_block( nullptr ); 606 // delete finally; 607 tryStmt->finally = nullptr; 608 609 610 return func_t; 611 } 612 613 ast::ObjectDecl * TryMutatorCore::create_finally_hook( 614 ast::FunctionDecl * finally_wrapper ) { 615 // struct __cfaehm_cleanup_hook __finally_hook 616 // __attribute__((cleanup( `finally_wrapper` ))); 617 618 const CodeLocation loc = finally_wrapper->location; 619 // Make Cleanup Attribute. 620 /* 621 std::list< ast::Attribute * > attributes; 622 { 623 std::list< > attr_params; 624 attr_params.push_back( nameOf( finally_wrapper ) ); 625 attributes.push_back( new Attribute( "cleanup", attr_params ) ); 626 } 627 */ 628 629 return new ast::ObjectDecl( 630 loc, 631 "__finally_hook", 632 new ast::StructInstType( 633 hook_decl 634 ), 635 nullptr, 636 ast::Storage::Classes{}, 637 ast::Linkage::Cforall, 638 nullptr, 639 {new ast::Attribute("cleanup", {new ast::VariableExpr{loc, finally_wrapper}})} 640 ); 641 } 642 643 ast::Stmt * TryMutatorCore::create_resume_rethrow( const ast::ThrowStmt *throwStmt ) { 644 // return false; 645 const CodeLocation loc = throwStmt->location; 646 ast::Stmt * result = new ast::ReturnStmt(loc, 647 ast::ConstantExpr::from_bool( loc, false ) 648 ); 649 result->labels = throwStmt->labels; 650 // delete throwStmt; done by postvisit 651 return result; 652 } 653 654 // Visiting/Mutating Functions 655 void TryMutatorCore::previsit( const ast::StructDecl *structDecl ) { 656 if ( !structDecl->body ) { 657 // Skip children? 658 return; 659 } else if ( structDecl->name == "__cfaehm_base_exception_t" ) { 660 assert( nullptr == except_decl ); 661 except_decl = structDecl; 662 init_func_types(); 663 } else if ( structDecl->name == "__cfaehm_try_resume_node" ) { 664 assert( nullptr == node_decl ); 665 node_decl = structDecl; 666 } else if ( structDecl->name == "__cfaehm_cleanup_hook" ) { 667 assert( nullptr == hook_decl ); 668 hook_decl = structDecl; 669 } 670 } 671 672 ast::Stmt * TryMutatorCore::postvisit( const ast::TryStmt *tryStmt ) { 673 assert( except_decl ); 674 assert( node_decl ); 675 assert( hook_decl ); 676 677 const CodeLocation loc = tryStmt->location; 678 ast::TryStmt * mutStmt = mutate(tryStmt); 679 // Generate a prefix for the function names? 680 681 ast::CompoundStmt * block = new ast::CompoundStmt( loc ); 682 // ast::CompoundStmt * inner = take_try_block( mutStmt ); 683 // this is never mutated so let node deletion do its job? 684 const ast::CompoundStmt * inner = mutStmt->body; 685 686 if ( mutStmt->finally ) { 687 // Define the helper function. 688 ast::FunctionDecl * finally_block = 689 create_finally_wrapper( mutStmt ); 690 appendDeclStmt( block, finally_block ); 691 // Create and add the finally cleanup hook. 692 appendDeclStmt( block, create_finally_hook( finally_block ) ); 693 } 694 695 CatchList termination_handlers; 696 CatchList resumption_handlers; 697 698 for (auto & handler: mutStmt->handlers) { 699 // xxx - should always be unique? mutate as safe const-cast 700 assert(handler->unique()); 701 if (handler->kind == ast::ExceptionKind::Resume) { 702 resumption_handlers.push_back(handler.get_and_mutate()); 703 } 704 else { 705 termination_handlers.push_back(handler.get_and_mutate()); 706 } 707 } 708 // split( mutStmt->handlers, 709 // termination_handlers, resumption_handlers ); 710 711 if ( resumption_handlers.size() ) { 712 // Define the helper function. 713 ast::FunctionDecl * resume_handler = 714 create_resume_handler( resumption_handlers ); 715 appendDeclStmt( block, resume_handler ); 716 // Prepare hooks 717 inner = create_resume_wrapper( inner, resume_handler ); 718 } 719 720 if ( termination_handlers.size() ) { 721 // Define the three helper functions. 722 ast::FunctionDecl * try_wrapper = create_try_wrapper( inner ); 723 appendDeclStmt( block, try_wrapper ); 724 ast::FunctionDecl * terminate_catch = 725 create_terminate_catch( termination_handlers ); 726 appendDeclStmt( block, terminate_catch ); 727 ast::FunctionDecl * terminate_match = 728 create_terminate_match( termination_handlers ); 729 appendDeclStmt( block, terminate_match ); 730 // Build the call to the try wrapper. 731 inner = create_terminate_caller(inner->location, 732 try_wrapper, terminate_catch, terminate_match ); 733 } 734 735 // Embed the try block. 736 block->push_back( inner ); 737 738 return block; 739 } 740 741 ast::Stmt * TryMutatorCore::postvisit( const ast::ThrowStmt *throwStmt ) { 742 // Only valid `throwResume;` statements should remain. (2/3 checks) 743 assert( ast::ExceptionKind::Resume == throwStmt->kind && ! throwStmt->expr ); 744 return create_resume_rethrow( throwStmt ); 745 } 746 130 747 } // namespace 131 748 132 749 void translateThrows( ast::TranslationUnit & transUnit ) { 133 750 ast::Pass<TranslateThrowsCore>::run( transUnit ); 751 } 752 753 void translateTries( ast::TranslationUnit & transUnit ) { 754 ast::Pass<TryMutatorCore>::run(transUnit); 134 755 } 135 756 -
src/ControlStruct/FixLabels.cpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 1 09:39:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 10:53:00 202113 // Update Count : 311 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 22:19:17 2022 13 // Update Count : 9 14 14 // 15 15 … … 20 20 #include "AST/Stmt.hpp" 21 21 #include "ControlStruct/MultiLevelExit.hpp" 22 using namespace ast; 22 23 23 24 namespace ControlStruct { 24 25 namespace { 26 27 class FixLabelsCore final : public ast::WithGuards { 25 class FixLabelsCore final : public WithGuards { 28 26 LabelToStmt labelTable; 29 public:27 public: 30 28 FixLabelsCore() : labelTable() {} 31 29 32 void previsit( const ast::FunctionDecl * );33 const ast::FunctionDecl * postvisit( const ast::FunctionDecl * );34 void previsit( const ast::Stmt * );35 void previsit( const ast::BranchStmt * );36 void previsit( const ast::LabelAddressExpr * );30 void previsit( const FunctionDecl * ); 31 const FunctionDecl * postvisit( const FunctionDecl * ); 32 void previsit( const Stmt * ); 33 void previsit( const BranchStmt * ); 34 void previsit( const LabelAddressExpr * ); 37 35 38 void setLabelsDef( const std::vector< ast::Label> &, const ast::Stmt * );39 void setLabelsUsage( const ast::Label & );36 void setLabelsDef( const std::vector<Label> &, const Stmt * ); 37 void setLabelsUsage( const Label & ); 40 38 }; 41 39 42 void FixLabelsCore::previsit( const ast::FunctionDecl * ) {40 void FixLabelsCore::previsit( const FunctionDecl * ) { 43 41 GuardValue( labelTable ).clear(); 44 42 } 45 43 46 const ast::FunctionDecl * FixLabelsCore::postvisit(47 const ast::FunctionDecl * decl ) {44 const FunctionDecl * FixLabelsCore::postvisit( 45 const FunctionDecl * decl ) { 48 46 if ( nullptr == decl->stmts ) return decl; 49 47 for ( auto kvp : labelTable ) { 50 48 if ( nullptr == kvp.second ) { 51 49 SemanticError( kvp.first.location, 52 "Use of undefined label: " + kvp.first.name );50 "Use of undefined label: " + kvp.first.name ); 53 51 } 54 52 } 55 return ast::mutate_field( decl, &ast::FunctionDecl::stmts,56 multiLevelExitUpdate( decl->stmts.get(), labelTable ) );53 return mutate_field( decl, &FunctionDecl::stmts, 54 multiLevelExitUpdate( decl->stmts.get(), labelTable ) ); 57 55 } 58 56 59 void FixLabelsCore::previsit( const ast::Stmt * stmt ) {57 void FixLabelsCore::previsit( const Stmt * stmt ) { 60 58 if ( !stmt->labels.empty() ) { 61 59 setLabelsDef( stmt->labels, stmt ); … … 63 61 } 64 62 65 void FixLabelsCore::previsit( const ast::BranchStmt * stmt ) {63 void FixLabelsCore::previsit( const BranchStmt * stmt ) { 66 64 if ( !stmt->labels.empty() ) { 67 65 setLabelsDef( stmt->labels, stmt ); … … 72 70 } 73 71 74 void FixLabelsCore::previsit( const ast::LabelAddressExpr * expr ) {72 void FixLabelsCore::previsit( const LabelAddressExpr * expr ) { 75 73 assert( !expr->arg.empty() ); 76 74 setLabelsUsage( expr->arg ); … … 78 76 79 77 void FixLabelsCore::setLabelsDef( 80 const std::vector<ast::Label> & labels, const ast::Stmt * stmt ) {78 const std::vector<Label> & labels, const Stmt * stmt ) { 81 79 assert( !labels.empty() ); 82 80 assert( stmt ); … … 89 87 // Duplicate definition, this is an error. 90 88 SemanticError( label.location, 91 "Duplicate definition of label: " + label.name );89 "Duplicate definition of label: " + label.name ); 92 90 } else { 93 91 // Perviously used, but not defined until now. … … 98 96 99 97 // Label was used, if it is new add it to the table. 100 void FixLabelsCore::setLabelsUsage( const ast::Label & label ) {98 void FixLabelsCore::setLabelsUsage( const Label & label ) { 101 99 if ( labelTable.find( label ) == labelTable.end() ) { 102 100 labelTable[ label ] = nullptr; … … 104 102 } 105 103 106 } // namespace 107 108 void fixLabels( ast::TranslationUnit & translationUnit ) { 109 ast::Pass<FixLabelsCore>::run( translationUnit ); 104 void fixLabels( TranslationUnit & translationUnit ) { 105 Pass<FixLabelsCore>::run( translationUnit ); 110 106 } 111 112 107 } // namespace ControlStruct 113 108 -
src/ControlStruct/FixLabels.hpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 1 09:36:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 1 09:40:00 202113 // Update Count : 011 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 22:18:43 2022 13 // Update Count : 2 14 14 // 15 15 … … 17 17 18 18 namespace ast { 19 19 class TranslationUnit; 20 20 } 21 21 22 22 namespace ControlStruct { 23 24 /// normalizes label definitions and generates multi-level exit labels 23 // normalizes label definitions and generates multi-level exit labels 25 24 void fixLabels( ast::TranslationUnit & translationUnit ); 26 27 25 } 28 26 -
src/ControlStruct/ForExprMutator.cc
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Mar 11 22:26:52 201913 // Update Count : 1 412 // Last Modified On : Tue Feb 1 09:26:12 2022 13 // Update Count : 16 14 14 // 15 15 … … 45 45 return hoist( forStmt, forStmt->initialization ); 46 46 } 47 Statement * ForExprMutator::postmutate( While Stmt * whileStmt ) {48 return hoist( while Stmt, whileStmt->initialization );47 Statement * ForExprMutator::postmutate( WhileDoStmt * whileDoStmt ) { 48 return hoist( whileDoStmt, whileDoStmt->initialization ); 49 49 } 50 50 } // namespace ControlStruct -
src/ControlStruct/ForExprMutator.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Aug 17 15:32:48 201713 // Update Count : 512 // Last Modified On : Tue Feb 1 09:18:50 2022 13 // Update Count : 7 14 14 // 15 15 … … 18 18 class IfStmt; 19 19 class ForStmt; 20 class While Stmt;20 class WhileDoStmt; 21 21 class Statement; 22 22 … … 24 24 class ForExprMutator { 25 25 public: 26 Statement * postmutate( IfStmt * );27 Statement * postmutate( ForStmt * );28 Statement * postmutate( WhileStmt * );26 Statement * postmutate( IfStmt * ); 27 Statement * postmutate( ForStmt * ); 28 Statement * postmutate( WhileDoStmt * ); 29 29 }; 30 30 } // namespace ControlStruct -
src/ControlStruct/LabelFixer.cc
r97c215f rf5a51db 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Tue Jan 21 10:32:00 202013 // Update Count : 16 011 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 1 09:12:09 2022 13 // Update Count : 162 14 14 // 15 15 … … 27 27 28 28 namespace ControlStruct { 29 bool LabelFixer::Entry::insideLoop() { 30 return ( dynamic_cast< ForStmt * > ( definition ) || 31 dynamic_cast< WhileStmt * > ( definition ) ); 29 bool LabelFixer::Entry::insideLoop() { 30 return ( dynamic_cast< ForStmt * > ( definition ) || 31 dynamic_cast< WhileDoStmt * > ( definition ) ); 32 } 33 34 LabelFixer::LabelFixer( LabelGenerator * gen ) : generator ( gen ) { 35 if ( generator == 0 ) 36 generator = LabelGenerator::getGenerator(); 37 } 38 39 void LabelFixer::previsit( FunctionDecl * ) { 40 // need to go into a nested function in a fresh state 41 GuardValue( labelTable ); 42 labelTable.clear(); 43 } 44 45 void LabelFixer::postvisit( FunctionDecl * functionDecl ) { 46 PassVisitor<MultiLevelExitMutator> mlem( resolveJumps(), generator ); 47 // We start in the body so we can stop when we hit another FunctionDecl. 48 maybeMutate( functionDecl->statements, mlem ); 49 } 50 51 // prune to at most one label definition for each statement 52 void LabelFixer::previsit( Statement * stmt ) { 53 std::list< Label > &labels = stmt->get_labels(); 54 55 if ( ! labels.empty() ) { 56 // only remember one label for each statement 57 Label current = setLabelsDef( labels, stmt ); 58 } // if 59 } 60 61 void LabelFixer::previsit( BranchStmt * branchStmt ) { 62 previsit( ( Statement *)branchStmt ); 63 64 // for labeled branches, add an entry to the label table 65 Label target = branchStmt->get_target(); 66 if ( target != "" ) { 67 setLabelsUsg( target, branchStmt ); 68 } 69 } 70 71 void LabelFixer::previsit( LabelAddressExpr * addrExpr ) { 72 Label & target = addrExpr->arg; 73 assert( target != "" ); 74 setLabelsUsg( target, addrExpr ); 75 } 76 77 78 // Sets the definition of the labelTable entry to be the provided statement for every label in 79 // the list parameter. Happens for every kind of statement. 80 Label LabelFixer::setLabelsDef( std::list< Label > & llabel, Statement * definition ) { 81 assert( definition != 0 ); 82 assert( llabel.size() > 0 ); 83 84 for ( std::list< Label >::iterator i = llabel.begin(); i != llabel.end(); i++ ) { 85 Label & l = *i; 86 l.set_statement( definition ); // attach statement to the label to be used later 87 if ( labelTable.find( l ) == labelTable.end() ) { 88 // All labels on this statement need to use the same entry, 89 // so this should only be created once. 90 // undefined and unused until now, add an entry 91 labelTable[ l ] = new Entry( definition ); 92 } else if ( labelTable[ l ]->defined() ) { 93 // defined twice, error 94 SemanticError( l.get_statement()->location, 95 "Duplicate definition of label: " + l.get_name() ); 96 } else { 97 // used previously, but undefined until now -> link with this entry 98 // Question: Is changing objects important? 99 delete labelTable[ l ]; 100 labelTable[ l ] = new Entry( definition ); 101 } // if 102 } // for 103 104 // Produce one of the labels attached to this statement to be temporarily used as the 105 // canonical label. 106 return labelTable[ llabel.front() ]->get_label(); 107 } 108 109 // A label was used, add it to the table if it isn't already there 110 template< typename UsageNode > 111 void LabelFixer::setLabelsUsg( Label orgValue, UsageNode *use ) { 112 assert( use != 0 ); 113 114 // add label with an unknown origin 115 if ( labelTable.find( orgValue ) == labelTable.end() ) { 116 labelTable[ orgValue ] = new Entry( 0 ); 117 } 118 } 119 120 // Builds a table that maps a label to its defining statement. 121 std::map<Label, Statement * > * LabelFixer::resolveJumps() throw ( SemanticErrorException ) { 122 std::map< Label, Statement * > *ret = new std::map< Label, Statement * >(); 123 for ( std::map< Label, Entry * >::iterator i = labelTable.begin(); i != labelTable.end(); ++i ) { 124 if ( ! i->second->defined() ) { 125 SemanticError( i->first.get_statement()->location, "Use of undefined label: " + i->first.get_name() ); 126 } 127 (*ret)[ i->first ] = i->second->get_definition(); 32 128 } 33 129 34 LabelFixer::LabelFixer( LabelGenerator * gen ) : generator ( gen ) { 35 if ( generator == 0 ) 36 generator = LabelGenerator::getGenerator(); 37 } 38 39 void LabelFixer::previsit( FunctionDecl * ) { 40 // need to go into a nested function in a fresh state 41 GuardValue( labelTable ); 42 labelTable.clear(); 43 } 44 45 void LabelFixer::postvisit( FunctionDecl * functionDecl ) { 46 PassVisitor<MultiLevelExitMutator> mlem( resolveJumps(), generator ); 47 // We start in the body so we can stop when we hit another FunctionDecl. 48 maybeMutate( functionDecl->statements, mlem ); 49 } 50 51 // prune to at most one label definition for each statement 52 void LabelFixer::previsit( Statement * stmt ) { 53 std::list< Label > &labels = stmt->get_labels(); 54 55 if ( ! labels.empty() ) { 56 // only remember one label for each statement 57 Label current = setLabelsDef( labels, stmt ); 58 } // if 59 } 60 61 void LabelFixer::previsit( BranchStmt * branchStmt ) { 62 previsit( ( Statement *)branchStmt ); 63 64 // for labeled branches, add an entry to the label table 65 Label target = branchStmt->get_target(); 66 if ( target != "" ) { 67 setLabelsUsg( target, branchStmt ); 68 } 69 } 70 71 void LabelFixer::previsit( LabelAddressExpr * addrExpr ) { 72 Label & target = addrExpr->arg; 73 assert( target != "" ); 74 setLabelsUsg( target, addrExpr ); 75 } 76 77 78 // Sets the definition of the labelTable entry to be the provided statement for every label in 79 // the list parameter. Happens for every kind of statement. 80 Label LabelFixer::setLabelsDef( std::list< Label > & llabel, Statement * definition ) { 81 assert( definition != 0 ); 82 assert( llabel.size() > 0 ); 83 84 for ( std::list< Label >::iterator i = llabel.begin(); i != llabel.end(); i++ ) { 85 Label & l = *i; 86 l.set_statement( definition ); // attach statement to the label to be used later 87 if ( labelTable.find( l ) == labelTable.end() ) { 88 // All labels on this statement need to use the same entry, 89 // so this should only be created once. 90 // undefined and unused until now, add an entry 91 labelTable[ l ] = new Entry( definition ); 92 } else if ( labelTable[ l ]->defined() ) { 93 // defined twice, error 94 SemanticError( l.get_statement()->location, 95 "Duplicate definition of label: " + l.get_name() ); 96 } else { 97 // used previously, but undefined until now -> link with this entry 98 // Question: Is changing objects important? 99 delete labelTable[ l ]; 100 labelTable[ l ] = new Entry( definition ); 101 } // if 102 } // for 103 104 // Produce one of the labels attached to this statement to be temporarily used as the 105 // canonical label. 106 return labelTable[ llabel.front() ]->get_label(); 107 } 108 109 // A label was used, add it to the table if it isn't already there 110 template< typename UsageNode > 111 void LabelFixer::setLabelsUsg( Label orgValue, UsageNode *use ) { 112 assert( use != 0 ); 113 114 // add label with an unknown origin 115 if ( labelTable.find( orgValue ) == labelTable.end() ) { 116 labelTable[ orgValue ] = new Entry( 0 ); 117 } 118 } 119 120 // Builds a table that maps a label to its defining statement. 121 std::map<Label, Statement * > * LabelFixer::resolveJumps() throw ( SemanticErrorException ) { 122 std::map< Label, Statement * > *ret = new std::map< Label, Statement * >(); 123 for ( std::map< Label, Entry * >::iterator i = labelTable.begin(); i != labelTable.end(); ++i ) { 124 if ( ! i->second->defined() ) { 125 SemanticError( i->first.get_statement()->location, "Use of undefined label: " + i->first.get_name() ); 126 } 127 (*ret)[ i->first ] = i->second->get_definition(); 128 } 129 130 return ret; 131 } 130 return ret; 131 } 132 132 } // namespace ControlStruct 133 133 -
src/ControlStruct/LabelFixer.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 22 09:17:24 201713 // Update Count : 3 412 // Last Modified On : Mon Jan 31 22:28:04 2022 13 // Update Count : 35 14 14 // 15 15 … … 26 26 27 27 namespace ControlStruct { 28 /// normalizes label definitions and generates multi-level exit labels29 28 // normalizes label definitions and generates multi-level exit labels 29 class LabelGenerator; 30 30 31 32 33 31 class LabelFixer final : public WithGuards { 32 public: 33 LabelFixer( LabelGenerator *gen = 0 ); 34 34 35 35 std::map < Label, Statement * > *resolveJumps() throw ( SemanticErrorException ); 36 36 37 38 39 37 // Declarations 38 void previsit( FunctionDecl *functionDecl ); 39 void postvisit( FunctionDecl *functionDecl ); 40 40 41 42 43 41 // Statements 42 void previsit( Statement *stmt ); 43 void previsit( BranchStmt *branchStmt ); 44 44 45 46 45 // Expressions 46 void previsit( LabelAddressExpr *addrExpr ); 47 47 48 Label setLabelsDef( std::list< Label > &, Statement *definition ); 49 template< typename UsageNode > 50 void setLabelsUsg( Label, UsageNode *usage = 0 ); 48 Label setLabelsDef( std::list< Label > &, Statement *definition ); 49 template< typename UsageNode > 50 void setLabelsUsg( Label, UsageNode *usage = 0 ); 51 52 private: 53 class Entry { 54 public: 55 Entry( Statement *to ) : definition( to ) {} 56 bool defined() { return ( definition != 0 ); } 57 bool insideLoop(); 58 59 Label get_label() const { return label; } 60 void set_label( Label lab ) { label = lab; } 61 62 Statement *get_definition() const { return definition; } 63 void set_definition( Statement *def ) { definition = def; } 51 64 52 65 private: 53 class Entry { 54 public: 55 Entry( Statement *to ) : definition( to ) {} 56 bool defined() { return ( definition != 0 ); } 57 bool insideLoop(); 66 Label label; 67 Statement *definition; 68 }; 58 69 59 Label get_label() const { return label; } 60 void set_label( Label lab ) { label = lab; } 61 62 Statement *get_definition() const { return definition; } 63 void set_definition( Statement *def ) { definition = def; } 64 65 private: 66 Label label; 67 Statement *definition; 68 }; 69 70 std::map < Label, Entry *> labelTable; 71 LabelGenerator *generator; 72 }; 70 std::map < Label, Entry *> labelTable; 71 LabelGenerator *generator; 72 }; 73 73 } // namespace ControlStruct 74 74 -
src/ControlStruct/LabelGenerator.cc
r97c215f rf5a51db 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 10:18:00 202113 // Update Count : 1711 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 22:30:26 2022 13 // Update Count : 28 14 14 // 15 15 … … 17 17 #include <sstream> // for ostringstream 18 18 #include <list> // for list 19 using namespace std; 19 20 20 21 #include "LabelGenerator.h" 21 22 22 #include "AST/Attribute.hpp"23 #include "AST/Label.hpp"24 #include "AST/Stmt.hpp"25 23 #include "SynTree/Attribute.h" // for Attribute 26 24 #include "SynTree/Label.h" // for Label, operator<< … … 28 26 29 27 namespace ControlStruct { 30 31 28 int LabelGenerator::current = 0; 32 29 LabelGenerator * LabelGenerator::labelGenerator = nullptr; 33 30 34 LabelGenerator * LabelGenerator::getGenerator() { 35 if ( LabelGenerator::labelGenerator == 0 ) 36 LabelGenerator::labelGenerator = new LabelGenerator(); 37 return labelGenerator; 38 } 39 40 Label LabelGenerator::newLabel( std::string suffix, Statement * stmt ) { 41 std::ostringstream os; 42 os << "__L" << current++ << "__" << suffix; 43 if ( stmt && ! stmt->get_labels().empty() ) { 44 os << "_" << stmt->get_labels().front() << "__"; 45 } // if 46 std::string ret = os.str(); 47 Label l( ret ); 48 l.get_attributes().push_back( new Attribute("unused") ); 49 return l; 50 } 51 52 ast::Label LabelGenerator::newLabel( 53 const std::string & suffix, const ast::Stmt * stmt ) { 54 assert( stmt ); 55 56 std::ostringstream os; 57 os << "__L" << current++ << "__" << suffix; 58 if ( stmt && !stmt->labels.empty() ) { 59 os << "_" << stmt->labels.front() << "__"; 60 } 61 ast::Label ret_label( stmt->location, os.str() ); 62 ret_label.attributes.push_back( new ast::Attribute( "unused" ) ); 63 return ret_label; 31 LabelGenerator * LabelGenerator::getGenerator() { 32 if ( LabelGenerator::labelGenerator == 0 ) 33 LabelGenerator::labelGenerator = new LabelGenerator(); 34 return labelGenerator; 64 35 } 65 36 37 Label LabelGenerator::newLabel( string suffix, Statement * stmt ) { 38 ostringstream os; 39 os << "__L_OLD" << current++ << "__" << suffix; 40 if ( stmt && ! stmt->get_labels().empty() ) { 41 os << "_" << stmt->get_labels().front() << "__"; 42 } // if 43 string ret = os.str(); 44 Label l( ret ); 45 l.get_attributes().push_back( new Attribute( "unused" ) ); 46 return l; 47 } 66 48 } // namespace ControlStruct 67 49 68 50 // Local Variables: // 69 // tab-width: 4 //70 51 // mode: c++ // 71 // compile-command: "make install" //72 52 // End: // -
src/ControlStruct/LabelGenerator.h
r97c215f rf5a51db 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 10:16:00 202113 // Update Count : 811 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 22:30:10 2022 13 // Update Count : 16 14 14 // 15 15 … … 21 21 22 22 class Statement; 23 23 24 namespace ast { 24 25 25 class Stmt; 26 class Label; 26 27 } 27 28 28 29 namespace ControlStruct { 29 30 30 class LabelGenerator { 31 31 static int current; 32 32 static LabelGenerator *labelGenerator; 33 protected:33 protected: 34 34 LabelGenerator() {} 35 public:35 public: 36 36 static LabelGenerator *getGenerator(); 37 37 static Label newLabel(std::string suffix, Statement * stmt = nullptr); 38 static ast::Label newLabel( const std::string&, const ast::Stmt * );39 static void reset() { current = 0; }40 static void rewind() { current--; }41 38 }; 42 43 39 } // namespace ControlStruct 44 40 -
src/ControlStruct/MLEMutator.cc
r97c215f rf5a51db 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed Jan 22 11:50:00 202013 // Update Count : 22 311 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 20:18:57 2022 13 // Update Count : 227 14 14 // 15 15 … … 39 39 namespace { 40 40 bool isLoop( const MultiLevelExitMutator::Entry & e ) { 41 return dynamic_cast< While Stmt * >( e.get_controlStructure() )41 return dynamic_cast< WhileDoStmt * >( e.get_controlStructure() ) 42 42 || dynamic_cast< ForStmt * >( e.get_controlStructure() ); 43 43 } … … 136 136 } 137 137 } 138 assertf( false, "C ould not find label '%s' on statement %s",138 assertf( false, "CFA internal error: could not find label '%s' on statement %s", 139 139 originalTarget.get_name().c_str(), toString( stmt ).c_str() ); 140 140 } … … 295 295 } 296 296 297 void MultiLevelExitMutator::premutate( While Stmt * whileStmt ) {298 return prehandleLoopStmt( while Stmt );297 void MultiLevelExitMutator::premutate( WhileDoStmt * whileDoStmt ) { 298 return prehandleLoopStmt( whileDoStmt ); 299 299 } 300 300 … … 303 303 } 304 304 305 Statement * MultiLevelExitMutator::postmutate( While Stmt * whileStmt ) {306 return posthandleLoopStmt( while Stmt );305 Statement * MultiLevelExitMutator::postmutate( WhileDoStmt * whileDoStmt ) { 306 return posthandleLoopStmt( whileDoStmt ); 307 307 } 308 308 … … 395 395 } 396 396 assert( ! enclosingControlStructures.empty() ); 397 assertf( dynamic_cast<SwitchStmt *>( enclosingControlStructures.back().get_controlStructure() ), "Control structure enclosing a case clause must be a switch, but is: %s", toCString( enclosingControlStructures.back().get_controlStructure() ) ); 397 assertf( dynamic_cast<SwitchStmt *>( enclosingControlStructures.back().get_controlStructure() ), 398 "CFA internal error: control structure enclosing a case clause must be a switch, but is: %s", 399 toCString( enclosingControlStructures.back().get_controlStructure() ) ); 398 400 if ( caseStmt->isDefault() ) { 399 401 if ( enclosingControlStructures.back().isFallDefaultUsed() ) { -
src/ControlStruct/MLEMutator.h
r97c215f rf5a51db 9 9 // Author : Rodolfo G. Esteves 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed Jan 22 11:50:00 202013 // Update Count : 4811 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 1 09:27:24 2022 13 // Update Count : 50 14 14 // 15 15 … … 42 42 void premutate( CompoundStmt *cmpndStmt ); 43 43 Statement * postmutate( BranchStmt *branchStmt ) throw ( SemanticErrorException ); 44 void premutate( While Stmt *whileStmt );45 Statement * postmutate( While Stmt *whileStmt );44 void premutate( WhileDoStmt *whileDoStmt ); 45 Statement * postmutate( WhileDoStmt *whileDoStmt ); 46 46 void premutate( ForStmt *forStmt ); 47 47 Statement * postmutate( ForStmt *forStmt ); … … 67 67 stmt( stmt ), breakExit( breakExit ), contExit( contExit ) {} 68 68 69 explicit Entry( While Stmt *stmt, Label breakExit, Label contExit ) :69 explicit Entry( WhileDoStmt *stmt, Label breakExit, Label contExit ) : 70 70 stmt( stmt ), breakExit( breakExit ), contExit( contExit ) {} 71 71 -
src/ControlStruct/MultiLevelExit.cpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 1 13:48:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 10:56:00 202113 // Update Count : 211 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 23:07:54 2022 13 // Update Count : 33 14 14 // 15 15 … … 18 18 #include "AST/Pass.hpp" 19 19 #include "AST/Stmt.hpp" 20 #include " ControlStruct/LabelGenerator.h"20 #include "LabelGeneratorNew.hpp" 21 21 22 22 #include <set> 23 using namespace std; 24 using namespace ast; 23 25 24 26 namespace ControlStruct { 25 26 namespace {27 28 27 class Entry { 29 public:30 const ast::Stmt * stmt;31 private:28 public: 29 const Stmt * stmt; 30 private: 32 31 // Organized like a manual ADT. Avoids creating a bunch of dead data. 33 32 struct Target { 34 ast::Label label;33 Label label; 35 34 bool used = false; 36 Target( const ast::Label & label ) : label( label ) {}35 Target( const Label & label ) : label( label ) {} 37 36 Target() : label( CodeLocation() ) {} 38 37 }; … … 41 40 42 41 enum Kind { 43 ForStmt , WhileStmt, CompoundStmt, IfStmt, CaseStmt, SwitchStmt, TryStmt42 ForStmtK, WhileDoStmtK, CompoundStmtK, IfStmtK, CaseStmtK, SwitchStmtK, TryStmtK 44 43 } kind; 45 44 46 45 bool fallDefaultValid = true; 47 46 48 static ast::Label & useTarget( Target & target ) {47 static Label & useTarget( Target & target ) { 49 48 target.used = true; 50 49 return target.label; 51 50 } 52 53 public: 54 Entry( const ast::ForStmt * stmt, ast::Label breakExit, ast::Label contExit ) : 55 stmt( stmt ), firstTarget( breakExit ), secondTarget( contExit ), kind( ForStmt ) {} 56 Entry( const ast::WhileStmt * stmt, ast::Label breakExit, ast::Label contExit ) : 57 stmt( stmt ), firstTarget( breakExit ), secondTarget( contExit ), kind( WhileStmt ) {} 58 Entry( const ast::CompoundStmt *stmt, ast::Label breakExit ) : 59 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( CompoundStmt ) {} 60 Entry( const ast::IfStmt *stmt, ast::Label breakExit ) : 61 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( IfStmt ) {} 62 Entry( const ast::CaseStmt *stmt, ast::Label fallExit ) : 63 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseStmt ) {} 64 Entry( const ast::SwitchStmt *stmt, ast::Label breakExit, ast::Label fallDefaultExit ) : 65 stmt( stmt ), firstTarget( breakExit ), secondTarget( fallDefaultExit ), kind( SwitchStmt ) {} 66 Entry( const ast::TryStmt *stmt, ast::Label breakExit ) : 67 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( TryStmt ) {} 68 69 bool isContTarget() const { return kind <= WhileStmt; } 70 bool isBreakTarget() const { return CaseStmt != kind; } 71 bool isFallTarget() const { return CaseStmt == kind; } 72 bool isFallDefaultTarget() const { return SwitchStmt == kind; } 73 74 ast::Label useContExit() { assert( kind <= WhileStmt ); return useTarget(secondTarget); } 75 ast::Label useBreakExit() { assert( CaseStmt != kind ); return useTarget(firstTarget); } 76 ast::Label useFallExit() { assert( CaseStmt == kind ); return useTarget(firstTarget); } 77 ast::Label useFallDefaultExit() { assert( SwitchStmt == kind ); return useTarget(secondTarget); } 78 79 bool isContUsed() const { assert( kind <= WhileStmt ); return secondTarget.used; } 80 bool isBreakUsed() const { assert( CaseStmt != kind ); return firstTarget.used; } 81 bool isFallUsed() const { assert( CaseStmt == kind ); return firstTarget.used; } 82 bool isFallDefaultUsed() const { assert( SwitchStmt == kind ); return secondTarget.used; } 51 public: 52 Entry( const ForStmt * stmt, Label breakExit, Label contExit ) : 53 stmt( stmt ), firstTarget( breakExit ), secondTarget( contExit ), kind( ForStmtK ) {} 54 Entry( const WhileDoStmt * stmt, Label breakExit, Label contExit ) : 55 stmt( stmt ), firstTarget( breakExit ), secondTarget( contExit ), kind( WhileDoStmtK ) {} 56 Entry( const CompoundStmt *stmt, Label breakExit ) : 57 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( CompoundStmtK ) {} 58 Entry( const IfStmt *stmt, Label breakExit ) : 59 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( IfStmtK ) {} 60 Entry( const CaseStmt *stmt, Label fallExit ) : 61 stmt( stmt ), firstTarget( fallExit ), secondTarget(), kind( CaseStmtK ) {} 62 Entry( const SwitchStmt *stmt, Label breakExit, Label fallDefaultExit ) : 63 stmt( stmt ), firstTarget( breakExit ), secondTarget( fallDefaultExit ), kind( SwitchStmtK ) {} 64 Entry( const TryStmt *stmt, Label breakExit ) : 65 stmt( stmt ), firstTarget( breakExit ), secondTarget(), kind( TryStmtK ) {} 66 67 bool isContTarget() const { return kind <= WhileDoStmtK; } 68 bool isBreakTarget() const { return kind != CaseStmtK; } 69 bool isFallTarget() const { return kind == CaseStmtK; } 70 bool isFallDefaultTarget() const { return kind == SwitchStmtK; } 71 72 // These routines set a target as being "used" by a BranchStmt 73 Label useContExit() { assert( kind <= WhileDoStmtK ); return useTarget(secondTarget); } 74 Label useBreakExit() { assert( kind != CaseStmtK ); return useTarget(firstTarget); } 75 Label useFallExit() { assert( kind == CaseStmtK ); return useTarget(firstTarget); } 76 Label useFallDefaultExit() { assert( kind == SwitchStmtK ); return useTarget(secondTarget); } 77 78 // These routines check if a specific label for a statement is used by a BranchStmt 79 bool isContUsed() const { assert( kind <= WhileDoStmtK ); return secondTarget.used; } 80 bool isBreakUsed() const { assert( kind != CaseStmtK ); return firstTarget.used; } 81 bool isFallUsed() const { assert( kind == CaseStmtK ); return firstTarget.used; } 82 bool isFallDefaultUsed() const { assert( kind == SwitchStmtK ); return secondTarget.used; } 83 83 void seenDefault() { fallDefaultValid = false; } 84 84 bool isFallDefaultValid() const { return fallDefaultValid; } 85 85 }; 86 86 87 // Helper predicates used in std::find_if calls (it doesn't take methods):87 // Helper predicates used in find_if calls (it doesn't take methods): 88 88 bool isBreakTarget( const Entry & entry ) { 89 89 return entry.isBreakTarget(); … … 103 103 104 104 struct MultiLevelExitCore final : 105 public ast::WithVisitorRef<MultiLevelExitCore>,106 public ast::WithShortCircuiting, public ast::WithGuards {105 public WithVisitorRef<MultiLevelExitCore>, 106 public WithShortCircuiting, public WithGuards { 107 107 MultiLevelExitCore( const LabelToStmt & lt ); 108 108 109 void previsit( const ast::FunctionDecl * );110 111 const ast::CompoundStmt * previsit( const ast::CompoundStmt * );112 const ast::BranchStmt * postvisit( const ast::BranchStmt * );113 void previsit( const ast::WhileStmt * );114 const ast::WhileStmt * postvisit( const ast::WhileStmt * );115 void previsit( const ast::ForStmt * );116 const ast::ForStmt * postvisit( const ast::ForStmt * );117 const ast::CaseStmt * previsit( const ast::CaseStmt * );118 void previsit( const ast::IfStmt * );119 const ast::IfStmt * postvisit( const ast::IfStmt * );120 void previsit( const ast::SwitchStmt * );121 const ast::SwitchStmt * postvisit( const ast::SwitchStmt * );122 void previsit( const ast::ReturnStmt * );123 void previsit( const ast::TryStmt * );124 void postvisit( const ast::TryStmt * );125 void previsit( const ast::FinallyStmt * );126 127 const ast::Stmt * mutateLoop( const ast::Stmt * body, Entry& );109 void previsit( const FunctionDecl * ); 110 111 const CompoundStmt * previsit( const CompoundStmt * ); 112 const BranchStmt * postvisit( const BranchStmt * ); 113 void previsit( const WhileDoStmt * ); 114 const WhileDoStmt * postvisit( const WhileDoStmt * ); 115 void previsit( const ForStmt * ); 116 const ForStmt * postvisit( const ForStmt * ); 117 const CaseStmt * previsit( const CaseStmt * ); 118 void previsit( const IfStmt * ); 119 const IfStmt * postvisit( const IfStmt * ); 120 void previsit( const SwitchStmt * ); 121 const SwitchStmt * postvisit( const SwitchStmt * ); 122 void previsit( const ReturnStmt * ); 123 void previsit( const TryStmt * ); 124 void postvisit( const TryStmt * ); 125 void previsit( const FinallyStmt * ); 126 127 const Stmt * mutateLoop( const Stmt * body, Entry& ); 128 128 129 129 const LabelToStmt & target_table; 130 s td::set<ast::Label> fallthrough_labels;131 std::vector<Entry> enclosing_control_structures;132 ast::Label break_label;130 set<Label> fallthrough_labels; 131 vector<Entry> enclosing_control_structures; 132 Label break_label; 133 133 bool inFinally; 134 134 … … 138 138 const LoopNode * posthandleLoopStmt( const LoopNode * loopStmt ); 139 139 140 std::list<ast::ptr<ast::Stmt>> fixBlock(141 const std::list<ast::ptr<ast::Stmt>> & kids, bool caseClause );140 list<ptr<Stmt>> fixBlock( 141 const list<ptr<Stmt>> & kids, bool caseClause ); 142 142 143 143 template<typename UnaryPredicate> 144 144 auto findEnclosingControlStructure( UnaryPredicate pred ) { 145 return std::find_if( enclosing_control_structures.rbegin(),146 enclosing_control_structures.rend(), pred );145 return find_if( enclosing_control_structures.rbegin(), 146 enclosing_control_structures.rend(), pred ); 147 147 } 148 148 }; 149 149 150 ast::NullStmt * labelledNullStmt(151 const CodeLocation & cl, const ast::Label & label ) {152 return new ast::NullStmt( cl, std::vector<ast::Label>{ label } );150 NullStmt * labelledNullStmt( 151 const CodeLocation & cl, const Label & label ) { 152 return new NullStmt( cl, vector<Label>{ label } ); 153 153 } 154 154 … … 158 158 {} 159 159 160 void MultiLevelExitCore::previsit( const ast::FunctionDecl * ) {160 void MultiLevelExitCore::previsit( const FunctionDecl * ) { 161 161 visit_children = false; 162 162 } 163 163 164 const ast::CompoundStmt * MultiLevelExitCore::previsit(165 const ast::CompoundStmt * stmt ) {164 const CompoundStmt * MultiLevelExitCore::previsit( 165 const CompoundStmt * stmt ) { 166 166 visit_children = false; 167 bool isLabeled = !stmt->labels.empty(); 167 168 // if the stmt is labelled then generate a label to check in postvisit if the label is used 169 bool isLabeled = ! stmt->labels.empty(); 168 170 if ( isLabeled ) { 169 ast::Label breakLabel = LabelGenerator::newLabel( "blockBreak", stmt );171 Label breakLabel = newLabel( "blockBreak", stmt ); 170 172 enclosing_control_structures.emplace_back( stmt, breakLabel ); 171 173 GuardAction( [this]() { enclosing_control_structures.pop_back(); } ); 172 174 } 173 175 174 auto mutStmt = ast::mutate( stmt );176 auto mutStmt = mutate( stmt ); 175 177 // A child statement may set the break label. 176 mutStmt->kids = std::move( fixBlock( stmt->kids, false ) );178 mutStmt->kids = move( fixBlock( stmt->kids, false ) ); 177 179 178 180 if ( isLabeled ) { 179 assert( ! enclosing_control_structures.empty() );181 assert( ! enclosing_control_structures.empty() ); 180 182 Entry & entry = enclosing_control_structures.back(); 181 if ( ! entry.useBreakExit().empty() ) {183 if ( ! entry.useBreakExit().empty() ) { 182 184 break_label = entry.useBreakExit(); 183 185 } … … 187 189 188 190 size_t getUnusedIndex( 189 const ast::Stmt * stmt, const ast::Label & originalTarget ) {191 const Stmt * stmt, const Label & originalTarget ) { 190 192 const size_t size = stmt->labels.size(); 191 193 192 // If the label is empty, we can skip adding the unused attribute:193 194 // If the label is empty, do not add unused attribute. 195 if ( originalTarget.empty() ) return size; 194 196 195 197 // Search for a label that matches the originalTarget. 196 198 for ( size_t i = 0 ; i < size ; ++i ) { 197 const ast::Label & label = stmt->labels[i];199 const Label & label = stmt->labels[i]; 198 200 if ( label == originalTarget ) { 199 for ( const ast::Attribute * attr : label.attributes ) {201 for ( const Attribute * attr : label.attributes ) { 200 202 if ( attr->name == "unused" ) return size; 201 203 } … … 203 205 } 204 206 } 205 assertf( false, "C ould not find label '%s' on statement %s",206 originalTarget.name.c_str(), toString( stmt ).c_str() );207 } 208 209 const ast::Stmt * addUnused(210 const ast::Stmt * stmt, const ast::Label & originalTarget ) {207 assertf( false, "CFA internal error: could not find label '%s' on statement %s", 208 originalTarget.name.c_str(), toString( stmt ).c_str() ); 209 } 210 211 const Stmt * addUnused( 212 const Stmt * stmt, const Label & originalTarget ) { 211 213 size_t i = getUnusedIndex( stmt, originalTarget ); 212 214 if ( i == stmt->labels.size() ) { 213 215 return stmt; 214 216 } 215 ast::Stmt * mutStmt = ast::mutate( stmt );216 mutStmt->labels[i].attributes.push_back( new ast::Attribute( "unused" ) );217 Stmt * mutStmt = mutate( stmt ); 218 mutStmt->labels[i].attributes.push_back( new Attribute( "unused" ) ); 217 219 return mutStmt; 218 220 } 219 221 220 const ast::BranchStmt * MultiLevelExitCore::postvisit( const ast::BranchStmt * stmt ) { 221 std::vector<Entry>::reverse_iterator targetEntry = 222 // This routine updates targets on enclosing control structures to indicate which 223 // label is used by the BranchStmt that is passed 224 const BranchStmt * MultiLevelExitCore::postvisit( const BranchStmt * stmt ) { 225 vector<Entry>::reverse_iterator targetEntry = 222 226 enclosing_control_structures.rend(); 227 228 // Labels on different stmts require different approaches to access 223 229 switch ( stmt->kind ) { 224 case ast::BranchStmt::Goto:230 case BranchStmt::Goto: 225 231 return stmt; 226 case ast::BranchStmt::Continue: 227 case ast::BranchStmt::Break: { 228 bool isContinue = stmt->kind == ast::BranchStmt::Continue; 229 // Handle unlabeled break and continue. 230 if ( stmt->target.empty() ) { 231 if ( isContinue ) { 232 targetEntry = findEnclosingControlStructure( isContinueTarget ); 233 } else { 234 if ( enclosing_control_structures.empty() ) { 235 SemanticError( stmt->location, 236 "'break' outside a loop, 'switch', or labelled block" ); 237 } 238 targetEntry = findEnclosingControlStructure( isBreakTarget ); 239 } 240 // Handle labeled break and continue. 241 } else { 242 // Lookup label in table to find attached control structure. 243 targetEntry = findEnclosingControlStructure( 244 [ targetStmt = target_table.at(stmt->target) ](auto entry){ 245 return entry.stmt == targetStmt; 246 } ); 247 } 248 // Ensure that selected target is valid. 249 if ( targetEntry == enclosing_control_structures.rend() || ( isContinue && !isContinueTarget( *targetEntry ) ) ) { 250 SemanticError( 251 stmt->location, 252 toString( (isContinue ? "'continue'" : "'break'"), 253 " target must be an enclosing ", 254 (isContinue ? "loop: " : "control structure: "), 255 stmt->originalTarget ) ); 256 } 257 break; 258 } 259 case ast::BranchStmt::FallThrough: { 260 targetEntry = findEnclosingControlStructure( isFallthroughTarget ); 261 // Check that target is valid. 262 if ( targetEntry == enclosing_control_structures.rend() ) { 263 SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" ); 264 } 265 if ( !stmt->target.empty() ) { 266 // Labelled fallthrough: target must be a valid fallthough label. 267 if ( !fallthrough_labels.count( stmt->target ) ) { 268 SemanticError( stmt->location, toString( "'fallthrough' target must be a later case statement: ", stmt->originalTarget ) ); 269 } 270 return new ast::BranchStmt( 271 stmt->location, ast::BranchStmt::Goto, stmt->originalTarget ); 272 } 273 break; 274 } 275 case ast::BranchStmt::FallThroughDefault: { 276 targetEntry = findEnclosingControlStructure( isFallthroughDefaultTarget ); 277 278 // Check that this is in a switch or choose statement. 279 if ( targetEntry == enclosing_control_structures.rend() ) { 280 SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" ); 281 } 282 283 // Check that the switch or choose has a default clause. 284 auto switchStmt = strict_dynamic_cast< const ast::SwitchStmt * >( 285 targetEntry->stmt ); 286 bool foundDefault = false; 287 for ( auto subStmt : switchStmt->stmts ) { 288 const ast::CaseStmt * caseStmt = subStmt.strict_as<ast::CaseStmt>(); 289 if ( caseStmt->isDefault() ) { 290 foundDefault = true; 291 break; 292 } 293 } 294 if ( !foundDefault ) { 295 SemanticError( stmt->location, "'fallthrough default' must be enclosed in a 'switch' or 'choose' control structure with a 'default' clause" ); 296 } 297 break; 298 } 299 default: 232 case BranchStmt::Continue: 233 case BranchStmt::Break: { 234 bool isContinue = stmt->kind == BranchStmt::Continue; 235 // Handle unlabeled break and continue. 236 if ( stmt->target.empty() ) { 237 if ( isContinue ) { 238 targetEntry = findEnclosingControlStructure( isContinueTarget ); 239 } else { 240 if ( enclosing_control_structures.empty() ) { 241 SemanticError( stmt->location, 242 "'break' outside a loop, 'switch', or labelled block" ); 243 } 244 targetEntry = findEnclosingControlStructure( isBreakTarget ); 245 } 246 // Handle labeled break and continue. 247 } else { 248 // Lookup label in table to find attached control structure. 249 targetEntry = findEnclosingControlStructure( 250 [ targetStmt = target_table.at(stmt->target) ](auto entry){ 251 return entry.stmt == targetStmt; 252 } ); 253 } 254 // Ensure that selected target is valid. 255 if ( targetEntry == enclosing_control_structures.rend() || ( isContinue && ! isContinueTarget( *targetEntry ) ) ) { 256 SemanticError( stmt->location, toString( (isContinue ? "'continue'" : "'break'"), 257 " target must be an enclosing ", (isContinue ? "loop: " : "control structure: "), 258 stmt->originalTarget ) ); 259 } 260 break; 261 } 262 // handle fallthrough in case/switch stmts 263 case BranchStmt::FallThrough: { 264 targetEntry = findEnclosingControlStructure( isFallthroughTarget ); 265 // Check that target is valid. 266 if ( targetEntry == enclosing_control_structures.rend() ) { 267 SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" ); 268 } 269 if ( ! stmt->target.empty() ) { 270 // Labelled fallthrough: target must be a valid fallthough label. 271 if ( ! fallthrough_labels.count( stmt->target ) ) { 272 SemanticError( stmt->location, toString( "'fallthrough' target must be a later case statement: ", 273 stmt->originalTarget ) ); 274 } 275 return new BranchStmt( stmt->location, BranchStmt::Goto, stmt->originalTarget ); 276 } 277 break; 278 } 279 case BranchStmt::FallThroughDefault: { 280 targetEntry = findEnclosingControlStructure( isFallthroughDefaultTarget ); 281 282 // Check if in switch or choose statement. 283 if ( targetEntry == enclosing_control_structures.rend() ) { 284 SemanticError( stmt->location, "'fallthrough' must be enclosed in a 'switch' or 'choose'" ); 285 } 286 287 // Check if switch or choose has default clause. 288 auto switchStmt = strict_dynamic_cast< const SwitchStmt * >( targetEntry->stmt ); 289 bool foundDefault = false; 290 for ( auto subStmt : switchStmt->stmts ) { 291 const CaseStmt * caseStmt = subStmt.strict_as<CaseStmt>(); 292 if ( caseStmt->isDefault() ) { 293 foundDefault = true; 294 break; 295 } 296 } 297 if ( ! foundDefault ) { 298 SemanticError( stmt->location, "'fallthrough default' must be enclosed in a 'switch' or 'choose'" 299 "control structure with a 'default' clause" ); 300 } 301 break; 302 } 303 default: 300 304 assert( false ); 301 305 } 302 306 303 // Branch error checks: get the appropriate label name: 304 // (This label will always be replaced.) 305 ast::Label exitLabel( CodeLocation(), "" ); 307 // Branch error checks: get the appropriate label name, which is always replaced. 308 Label exitLabel( CodeLocation(), "" ); 306 309 switch ( stmt->kind ) { 307 case ast::BranchStmt::Break:308 assert( ! targetEntry->useBreakExit().empty() );310 case BranchStmt::Break: 311 assert( ! targetEntry->useBreakExit().empty() ); 309 312 exitLabel = targetEntry->useBreakExit(); 310 313 break; 311 case ast::BranchStmt::Continue:312 assert( ! targetEntry->useContExit().empty() );314 case BranchStmt::Continue: 315 assert( ! targetEntry->useContExit().empty() ); 313 316 exitLabel = targetEntry->useContExit(); 314 317 break; 315 case ast::BranchStmt::FallThrough:316 assert( ! targetEntry->useFallExit().empty() );318 case BranchStmt::FallThrough: 319 assert( ! targetEntry->useFallExit().empty() ); 317 320 exitLabel = targetEntry->useFallExit(); 318 321 break; 319 case ast::BranchStmt::FallThroughDefault:320 assert( ! targetEntry->useFallDefaultExit().empty() );322 case BranchStmt::FallThroughDefault: 323 assert( ! targetEntry->useFallDefaultExit().empty() ); 321 324 exitLabel = targetEntry->useFallDefaultExit(); 322 325 // Check that fallthrough default comes before the default clause. 323 if ( !targetEntry->isFallDefaultValid() ) { 324 SemanticError( stmt->location, 325 "'fallthrough default' must precede the 'default' clause" ); 326 if ( ! targetEntry->isFallDefaultValid() ) { 327 SemanticError( stmt->location, "'fallthrough default' must precede the 'default' clause" ); 326 328 } 327 329 break; 328 default:330 default: 329 331 assert(0); 330 332 } … … 333 335 targetEntry->stmt = addUnused( targetEntry->stmt, stmt->originalTarget ); 334 336 335 // Replace this with agoto to make later passes more uniform.336 return new ast::BranchStmt( stmt->location, ast::BranchStmt::Goto, exitLabel );337 } 338 339 void MultiLevelExitCore::previsit( const ast::WhileStmt * stmt ) {337 // Replace with goto to make later passes more uniform. 338 return new BranchStmt( stmt->location, BranchStmt::Goto, exitLabel ); 339 } 340 341 void MultiLevelExitCore::previsit( const WhileDoStmt * stmt ) { 340 342 return prehandleLoopStmt( stmt ); 341 343 } 342 344 343 const ast::WhileStmt * MultiLevelExitCore::postvisit( const ast::WhileStmt * stmt ) {345 const WhileDoStmt * MultiLevelExitCore::postvisit( const WhileDoStmt * stmt ) { 344 346 return posthandleLoopStmt( stmt ); 345 347 } 346 348 347 void MultiLevelExitCore::previsit( const ast::ForStmt * stmt ) {349 void MultiLevelExitCore::previsit( const ForStmt * stmt ) { 348 350 return prehandleLoopStmt( stmt ); 349 351 } 350 352 351 const ast::ForStmt * MultiLevelExitCore::postvisit( const ast::ForStmt * stmt ) {353 const ForStmt * MultiLevelExitCore::postvisit( const ForStmt * stmt ) { 352 354 return posthandleLoopStmt( stmt ); 353 355 } … … 355 357 // Mimic what the built-in push_front would do anyways. It is O(n). 356 358 void push_front( 357 std::vector<ast::ptr<ast::Stmt>> & vec, const ast::Stmt * element ) {359 vector<ptr<Stmt>> & vec, const Stmt * element ) { 358 360 vec.emplace_back( nullptr ); 359 361 for ( size_t i = vec.size() - 1 ; 0 < i ; --i ) { 360 vec[ i ] = std::move( vec[ i - 1 ] );362 vec[ i ] = move( vec[ i - 1 ] ); 361 363 } 362 364 vec[ 0 ] = element; 363 365 } 364 366 365 const ast::CaseStmt * MultiLevelExitCore::previsit( const ast::CaseStmt * stmt ) {367 const CaseStmt * MultiLevelExitCore::previsit( const CaseStmt * stmt ) { 366 368 visit_children = false; 367 369 368 // If it is the default, mark the default asseen.370 // If default, mark seen. 369 371 if ( stmt->isDefault() ) { 370 assert( ! enclosing_control_structures.empty() );372 assert( ! enclosing_control_structures.empty() ); 371 373 enclosing_control_structures.back().seenDefault(); 372 374 } 373 375 374 376 // The cond may not exist, but if it does update it now. 375 visitor->maybe_accept( stmt, & ast::CaseStmt::cond );377 visitor->maybe_accept( stmt, &CaseStmt::cond ); 376 378 377 379 // Just save the mutated node for simplicity. 378 ast::CaseStmt * mutStmt = ast::mutate( stmt );379 380 ast::Label fallLabel = LabelGenerator::newLabel( "fallThrough", stmt );381 if ( ! mutStmt->stmts.empty() ) {380 CaseStmt * mutStmt = mutate( stmt ); 381 382 Label fallLabel = newLabel( "fallThrough", stmt ); 383 if ( ! mutStmt->stmts.empty() ) { 382 384 // Ensure that the stack isn't corrupted by exceptions in fixBlock. 383 385 auto guard = makeFuncGuard( 384 386 [&](){ enclosing_control_structures.emplace_back( mutStmt, fallLabel ); }, 385 387 [this](){ enclosing_control_structures.pop_back(); } 386 );388 ); 387 389 388 390 // These should already be in a block. 389 auto block = ast::mutate( mutStmt->stmts.front().strict_as<ast::CompoundStmt>() );391 auto block = mutate( mutStmt->stmts.front().strict_as<CompoundStmt>() ); 390 392 block->kids = fixBlock( block->kids, true ); 391 393 392 394 // Add fallthrough label if necessary. 393 assert( ! enclosing_control_structures.empty() );395 assert( ! enclosing_control_structures.empty() ); 394 396 Entry & entry = enclosing_control_structures.back(); 395 397 if ( entry.isFallUsed() ) { 396 mutStmt->stmts.push_back( 397 labelledNullStmt( mutStmt->location, entry.useFallExit() ) ); 398 } 399 } 400 assert( !enclosing_control_structures.empty() ); 398 mutStmt->stmts.push_back( labelledNullStmt( mutStmt->location, entry.useFallExit() ) ); 399 } 400 } 401 assert( ! enclosing_control_structures.empty() ); 401 402 Entry & entry = enclosing_control_structures.back(); 402 assertf( dynamic_cast< const ast::SwitchStmt * >( entry.stmt ),403 "Control structure enclosing a case clause must be a switch, but is: %s",404 toString( entry.stmt ).c_str() );403 assertf( dynamic_cast< const SwitchStmt * >( entry.stmt ), 404 "CFA internal error: control structure enclosing a case clause must be a switch, but is: %s", 405 toString( entry.stmt ).c_str() ); 405 406 if ( mutStmt->isDefault() ) { 406 407 if ( entry.isFallDefaultUsed() ) { 407 408 // Add fallthrough default label if necessary. 408 push_front( mutStmt->stmts, labelledNullStmt( 409 stmt->location, entry.useFallDefaultExit() 410 ) ); 409 push_front( mutStmt->stmts, labelledNullStmt( stmt->location, entry.useFallDefaultExit() ) ); 411 410 } 412 411 } … … 414 413 } 415 414 416 void MultiLevelExitCore::previsit( const ast::IfStmt * stmt ) {417 bool labeledBlock = ! stmt->labels.empty();415 void MultiLevelExitCore::previsit( const IfStmt * stmt ) { 416 bool labeledBlock = ! stmt->labels.empty(); 418 417 if ( labeledBlock ) { 419 ast::Label breakLabel = LabelGenerator::newLabel( "blockBreak", stmt );418 Label breakLabel = newLabel( "blockBreak", stmt ); 420 419 enclosing_control_structures.emplace_back( stmt, breakLabel ); 421 420 GuardAction( [this](){ enclosing_control_structures.pop_back(); } ); … … 423 422 } 424 423 425 const ast::IfStmt * MultiLevelExitCore::postvisit( const ast::IfStmt * stmt ) {426 bool labeledBlock = ! stmt->labels.empty();424 const IfStmt * MultiLevelExitCore::postvisit( const IfStmt * stmt ) { 425 bool labeledBlock = ! stmt->labels.empty(); 427 426 if ( labeledBlock ) { 428 427 auto this_label = enclosing_control_structures.back().useBreakExit(); 429 if ( ! this_label.empty() ) {428 if ( ! this_label.empty() ) { 430 429 break_label = this_label; 431 430 } … … 434 433 } 435 434 436 bool isDefaultCase( const ast::ptr<ast::Stmt> & stmt ) {437 const ast::CaseStmt * caseStmt = stmt.strict_as<ast::CaseStmt>();435 bool isDefaultCase( const ptr<Stmt> & stmt ) { 436 const CaseStmt * caseStmt = stmt.strict_as<CaseStmt>(); 438 437 return caseStmt->isDefault(); 439 438 } 440 439 441 void MultiLevelExitCore::previsit( const ast::SwitchStmt * stmt ) { 442 ast::Label label = LabelGenerator::newLabel( "switchBreak", stmt ); 443 auto it = std::find_if( stmt->stmts.rbegin(), stmt->stmts.rend(), isDefaultCase ); 444 445 const ast::CaseStmt * defaultCase = it != stmt->stmts.rend() 446 ? (it)->strict_as<ast::CaseStmt>() : nullptr; 447 ast::Label defaultLabel = defaultCase 448 ? LabelGenerator::newLabel( "fallThroughDefault", defaultCase ) 449 : ast::Label( stmt->location, "" ); 440 void MultiLevelExitCore::previsit( const SwitchStmt * stmt ) { 441 Label label = newLabel( "switchBreak", stmt ); 442 auto it = find_if( stmt->stmts.rbegin(), stmt->stmts.rend(), isDefaultCase ); 443 444 const CaseStmt * defaultCase = it != stmt->stmts.rend() ? (it)->strict_as<CaseStmt>() : nullptr; 445 Label defaultLabel = defaultCase ? newLabel( "fallThroughDefault", defaultCase ) : Label( stmt->location, "" ); 450 446 enclosing_control_structures.emplace_back( stmt, label, defaultLabel ); 451 447 GuardAction( [this]() { enclosing_control_structures.pop_back(); } ); 452 448 453 // Collect valid labels for fallthrough. It starts with all labels at 454 // t his level, then removed as we see them in traversal.455 for ( const ast::Stmt * stmt : stmt->stmts ) {456 auto * caseStmt = strict_dynamic_cast< const ast::CaseStmt * >( stmt );449 // Collect valid labels for fallthrough. It starts with all labels at this level, then remove as each is seen during 450 // traversal. 451 for ( const Stmt * stmt : stmt->stmts ) { 452 auto * caseStmt = strict_dynamic_cast< const CaseStmt * >( stmt ); 457 453 if ( caseStmt->stmts.empty() ) continue; 458 auto block = caseStmt->stmts.front().strict_as< ast::CompoundStmt>();459 for ( const ast::Stmt * stmt : block->kids ) {460 for ( const ast::Label & l : stmt->labels ) {454 auto block = caseStmt->stmts.front().strict_as<CompoundStmt>(); 455 for ( const Stmt * stmt : block->kids ) { 456 for ( const Label & l : stmt->labels ) { 461 457 fallthrough_labels.insert( l ); 462 458 } … … 465 461 } 466 462 467 const ast::SwitchStmt * MultiLevelExitCore::postvisit( const ast::SwitchStmt * stmt ) {468 assert( ! enclosing_control_structures.empty() );463 const SwitchStmt * MultiLevelExitCore::postvisit( const SwitchStmt * stmt ) { 464 assert( ! enclosing_control_structures.empty() ); 469 465 Entry & entry = enclosing_control_structures.back(); 470 466 assert( entry.stmt == stmt ); 471 467 472 // Only run if we needto generate the break label.468 // Only run to generate the break label. 473 469 if ( entry.isBreakUsed() ) { 474 // To keep the switch statements uniform (all direct children of a 475 // SwitchStmt should be CastStmts), append the exit label and break 476 // to the last case, create a default case is there are no cases. 477 ast::SwitchStmt * mutStmt = ast::mutate( stmt ); 470 // To keep the switch statements uniform (all direct children of a SwitchStmt should be CastStmts), append the 471 // exit label and break to the last case, create a default case if no cases. 472 SwitchStmt * mutStmt = mutate( stmt ); 478 473 if ( mutStmt->stmts.empty() ) { 479 mutStmt->stmts.push_back( new ast::CaseStmt( 480 mutStmt->location, nullptr, {} )); 481 } 482 483 auto caseStmt = mutStmt->stmts.back().strict_as<ast::CaseStmt>(); 484 auto mutCase = ast::mutate( caseStmt ); 474 mutStmt->stmts.push_back( new CaseStmt( mutStmt->location, nullptr, {} ) ); 475 } 476 477 auto caseStmt = mutStmt->stmts.back().strict_as<CaseStmt>(); 478 auto mutCase = mutate( caseStmt ); 485 479 mutStmt->stmts.back() = mutCase; 486 480 487 ast::Label label( mutCase->location, "breakLabel" );488 auto branch = new ast::BranchStmt( mutCase->location, ast::BranchStmt::Break, label );481 Label label( mutCase->location, "breakLabel" ); 482 auto branch = new BranchStmt( mutCase->location, BranchStmt::Break, label ); 489 483 branch->labels.push_back( entry.useBreakExit() ); 490 484 mutCase->stmts.push_back( branch ); … … 495 489 } 496 490 497 void MultiLevelExitCore::previsit( const ast::ReturnStmt * stmt ) {491 void MultiLevelExitCore::previsit( const ReturnStmt * stmt ) { 498 492 if ( inFinally ) { 499 493 SemanticError( stmt->location, "'return' may not appear in a finally clause" ); … … 501 495 } 502 496 503 void MultiLevelExitCore::previsit( const ast::TryStmt * stmt ) {504 bool isLabeled = ! stmt->labels.empty();497 void MultiLevelExitCore::previsit( const TryStmt * stmt ) { 498 bool isLabeled = ! stmt->labels.empty(); 505 499 if ( isLabeled ) { 506 ast::Label breakLabel = LabelGenerator::newLabel( "blockBreak", stmt );500 Label breakLabel = newLabel( "blockBreak", stmt ); 507 501 enclosing_control_structures.emplace_back( stmt, breakLabel ); 508 502 GuardAction([this](){ enclosing_control_structures.pop_back(); } ); … … 510 504 } 511 505 512 void MultiLevelExitCore::postvisit( const ast::TryStmt * stmt ) {513 bool isLabeled = ! stmt->labels.empty();506 void MultiLevelExitCore::postvisit( const TryStmt * stmt ) { 507 bool isLabeled = ! stmt->labels.empty(); 514 508 if ( isLabeled ) { 515 509 auto this_label = enclosing_control_structures.back().useBreakExit(); 516 if ( ! this_label.empty() ) {510 if ( ! this_label.empty() ) { 517 511 break_label = this_label; 518 512 } … … 520 514 } 521 515 522 void MultiLevelExitCore::previsit( const ast::FinallyStmt * ) { 523 GuardAction([this, old = std::move(enclosing_control_structures)](){ 524 enclosing_control_structures = std::move(old); 525 }); 526 enclosing_control_structures = std::vector<Entry>(); 516 void MultiLevelExitCore::previsit( const FinallyStmt * ) { 517 GuardAction([this, old = move( enclosing_control_structures)](){ enclosing_control_structures = move(old); }); 518 enclosing_control_structures = vector<Entry>(); 527 519 GuardValue( inFinally ) = true; 528 520 } 529 521 530 const ast::Stmt * MultiLevelExitCore::mutateLoop(531 const ast::Stmt * body, Entry & entry ) {522 const Stmt * MultiLevelExitCore::mutateLoop( 523 const Stmt * body, Entry & entry ) { 532 524 if ( entry.isBreakUsed() ) { 533 525 break_label = entry.useBreakExit(); 534 526 } 535 527 528 // if continue is used insert a continue label into the back of the body of the loop 536 529 if ( entry.isContUsed() ) { 537 ast::CompoundStmt * new_body = new ast::CompoundStmt( body->location ); 530 CompoundStmt * new_body = new CompoundStmt( body->location ); 531 // {} 538 532 new_body->kids.push_back( body ); 533 // { 534 // body 535 // } 539 536 new_body->kids.push_back( 540 537 labelledNullStmt( body->location, entry.useContExit() ) ); 538 // { 539 // body 540 // ContinueLabel: {} 541 // } 541 542 return new_body; 542 543 } … … 549 550 // Remember is loop before going onto mutate the body. 550 551 // The labels will be folded in if they are used. 551 ast::Label breakLabel = LabelGenerator::newLabel( "loopBreak", loopStmt );552 ast::Label contLabel = LabelGenerator::newLabel( "loopContinue", loopStmt );552 Label breakLabel = newLabel( "loopBreak", loopStmt ); 553 Label contLabel = newLabel( "loopContinue", loopStmt ); 553 554 enclosing_control_structures.emplace_back( loopStmt, breakLabel, contLabel ); 555 // labels are added temporarily to see if they are used and then added permanently in postvisit if ther are used 556 // children will tag labels as being used during their traversal which occurs before postvisit 557 558 // GuardAction calls the lambda after the node is done being visited 554 559 GuardAction( [this](){ enclosing_control_structures.pop_back(); } ); 555 560 } … … 557 562 template<typename LoopNode> 558 563 const LoopNode * MultiLevelExitCore::posthandleLoopStmt( const LoopNode * loopStmt ) { 559 assert( ! enclosing_control_structures.empty() );564 assert( ! enclosing_control_structures.empty() ); 560 565 Entry & entry = enclosing_control_structures.back(); 561 566 assert( entry.stmt == loopStmt ); 562 567 563 // Now we check if the labels are used and add them if so. 564 return ast::mutate_field( 565 loopStmt, &LoopNode::body, mutateLoop( loopStmt->body, entry ) ); 566 } 567 568 std::list<ast::ptr<ast::Stmt>> MultiLevelExitCore::fixBlock( 569 const std::list<ast::ptr<ast::Stmt>> & kids, bool is_case_clause ) { 570 // Unfortunately we can't use the automatic error collection. 568 // Now check if the labels are used and add them if so. 569 return mutate_field( loopStmt, &LoopNode::body, mutateLoop( loopStmt->body, entry ) ); 570 // this call to mutate_field compares loopStmt->body and the result of mutateLoop 571 // if they are the same the node isn't mutated, if they differ then the new mutated node is returned 572 // the stmts will only differ if a label is used 573 } 574 575 list<ptr<Stmt>> MultiLevelExitCore::fixBlock( 576 const list<ptr<Stmt>> & kids, bool is_case_clause ) { 577 // Unfortunately cannot use automatic error collection. 571 578 SemanticErrorException errors; 572 579 573 std::list<ast::ptr<ast::Stmt>> ret;580 list<ptr<Stmt>> ret; 574 581 575 582 // Manually visit each child. 576 for ( const ast::ptr<ast::Stmt> & kid : kids ) {583 for ( const ptr<Stmt> & kid : kids ) { 577 584 if ( is_case_clause ) { 578 585 // Once a label is seen, it's no longer a valid for fallthrough. 579 for ( const ast::Label & l : kid->labels ) {586 for ( const Label & l : kid->labels ) { 580 587 fallthrough_labels.erase( l ); 581 588 } … … 588 595 } 589 596 590 if ( !break_label.empty() ) { 591 ret.push_back( 592 labelledNullStmt( ret.back()->location, break_label ) ); 593 break_label = ast::Label( CodeLocation(), "" ); 594 } 595 } 596 597 if ( !errors.isEmpty() ) { 597 if ( ! break_label.empty() ) { 598 ret.push_back( labelledNullStmt( ret.back()->location, break_label ) ); 599 break_label = Label( CodeLocation(), "" ); 600 } 601 } 602 603 if ( ! errors.isEmpty() ) { 598 604 throw errors; 599 605 } … … 601 607 } 602 608 603 } // namespace 604 605 const ast::CompoundStmt * multiLevelExitUpdate( 606 const ast::CompoundStmt * stmt, 607 const LabelToStmt & labelTable ) { 609 const CompoundStmt * multiLevelExitUpdate( 610 const CompoundStmt * stmt, 611 const LabelToStmt & labelTable ) { 608 612 // Must start in the body, so FunctionDecls can be a stopping point. 609 ast::Pass<MultiLevelExitCore> visitor( labelTable );610 const ast::CompoundStmt * ret = stmt->accept( visitor );613 Pass<MultiLevelExitCore> visitor( labelTable ); 614 const CompoundStmt * ret = stmt->accept( visitor ); 611 615 return ret; 612 616 } 613 614 617 } // namespace ControlStruct 615 618 -
src/ControlStruct/MultiLevelExit.hpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 1 13:49:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 10:53:00 202113 // Update Count : 311 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 22:34:06 2022 13 // Update Count : 6 14 14 // 15 15 … … 19 19 20 20 namespace ast { 21 22 23 21 class CompoundStmt; 22 class Label; 23 class Stmt; 24 24 } 25 25 26 26 namespace ControlStruct { 27 28 27 using LabelToStmt = std::map<ast::Label, const ast::Stmt *>; 29 28 30 /// Mutate a function body to handle multi-level exits. 31 const ast::CompoundStmt * multiLevelExitUpdate( 32 const ast::CompoundStmt *, const LabelToStmt & ); 33 29 // Mutate a function body to handle multi-level exits. 30 const ast::CompoundStmt * multiLevelExitUpdate( const ast::CompoundStmt *, const LabelToStmt & ); 34 31 } 35 32 -
src/ControlStruct/module.mk
r97c215f rf5a51db 10 10 ## Author : Richard C. Bilson 11 11 ## Created On : Mon Jun 1 17:49:17 2015 12 ## Last Modified By : Henry Xue13 ## Last Modified On : Tue Jul 20 04:10:50 202114 ## Update Count : 512 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Sat Jan 29 12:04:19 2022 14 ## Update Count : 7 15 15 ############################################################################### 16 16 … … 22 22 ControlStruct/ForExprMutator.cc \ 23 23 ControlStruct/ForExprMutator.h \ 24 ControlStruct/HoistControlDecls.cpp \ 25 ControlStruct/HoistControlDecls.hpp \ 24 26 ControlStruct/LabelFixer.cc \ 25 27 ControlStruct/LabelFixer.h \ 26 28 ControlStruct/LabelGenerator.cc \ 27 29 ControlStruct/LabelGenerator.h \ 30 ControlStruct/LabelGeneratorNew.cpp \ 31 ControlStruct/LabelGeneratorNew.hpp \ 28 32 ControlStruct/MLEMutator.cc \ 29 33 ControlStruct/MLEMutator.h \ -
src/InitTweak/InitTweak.cc
r97c215f rf5a51db 10 10 // Created On : Fri May 13 11:26:36 2016 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Nov 19 19:22:00 202113 // Update Count : 1912 // Last Modified On : Mon Dec 6 13:21:00 2021 13 // Update Count : 20 14 14 // 15 15 … … 1191 1191 } 1192 1192 1193 bool isCopyFunction( const ast::FunctionDecl * decl ) { 1194 const ast::FunctionType * ftype = decl->type; 1195 if ( ftype->params.size() != 2 ) return false; 1196 1197 const ast::Type * t1 = getPointerBase( ftype->params.front() ); 1198 if ( ! t1 ) return false; 1199 const ast::Type * t2 = ftype->params.back(); 1200 1201 return ResolvExpr::typesCompatibleIgnoreQualifiers( t1, t2, ast::SymbolTable{} ); 1202 } 1193 bool isAssignment( const ast::FunctionDecl * decl ) { 1194 return isAssignment( decl->name ) && isCopyFunction( decl ); 1195 } 1196 1197 bool isDestructor( const ast::FunctionDecl * decl ) { 1198 return isDestructor( decl->name ); 1199 } 1200 1201 bool isDefaultConstructor( const ast::FunctionDecl * decl ) { 1202 return isConstructor( decl->name ) && 1 == decl->params.size(); 1203 } 1204 1205 bool isCopyConstructor( const ast::FunctionDecl * decl ) { 1206 return isConstructor( decl->name ) && 2 == decl->params.size(); 1207 } 1208 1209 bool isCopyFunction( const ast::FunctionDecl * decl ) { 1210 const ast::FunctionType * ftype = decl->type; 1211 if ( ftype->params.size() != 2 ) return false; 1212 1213 const ast::Type * t1 = getPointerBase( ftype->params.front() ); 1214 if ( ! t1 ) return false; 1215 const ast::Type * t2 = ftype->params.back(); 1216 1217 return ResolvExpr::typesCompatibleIgnoreQualifiers( t1, t2, ast::SymbolTable{} ); 1218 } 1203 1219 1204 1220 const FunctionDecl * isAssignment( const Declaration * decl ) { -
src/InitTweak/InitTweak.h
r97c215f rf5a51db 10 10 // Created On : Fri May 13 11:26:36 2016 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Fri Nov 19 14:18:00 202113 // Update Count : 712 // Last Modified On : Mon Dec 6 13:20:00 2021 13 // Update Count : 8 14 14 // 15 15 … … 31 31 const FunctionDecl * isCopyConstructor( const Declaration * decl ); 32 32 const FunctionDecl * isCopyFunction( const Declaration * decl, const std::string & fname ); 33 bool isAssignment( const ast::FunctionDecl * decl ); 34 bool isDestructor( const ast::FunctionDecl * decl ); 35 bool isDefaultConstructor( const ast::FunctionDecl * decl ); 36 bool isCopyConstructor( const ast::FunctionDecl * decl ); 33 37 bool isCopyFunction( const ast::FunctionDecl * decl ); 34 38 -
src/Parser/ParseNode.h
r97c215f rf5a51db 10 10 // Created On : Sat May 16 13:28:16 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 14 17:28:53 202113 // Update Count : 90 012 // Last Modified On : Wed Feb 2 09:15:49 2022 13 // Update Count : 905 14 14 // 15 15 … … 390 390 Statement * build_expr( ExpressionNode * ctl ); 391 391 392 struct IfCtrl {393 IfCtrl( DeclarationNode * decl, ExpressionNode * condition ) :392 struct CondCtl { 393 CondCtl( DeclarationNode * decl, ExpressionNode * condition ) : 394 394 init( decl ? new StatementNode( decl ) : nullptr ), condition( condition ) {} 395 395 … … 409 409 }; 410 410 411 Expression * build_if_control( IfCtrl * ctl, std::list< Statement * > & init );412 Statement * build_if( IfCtrl * ctl, StatementNode * then_stmt, StatementNode * else_stmt);411 Expression * build_if_control( CondCtl * ctl, std::list< Statement * > & init ); 412 Statement * build_if( CondCtl * ctl, StatementNode * then, StatementNode * else_ ); 413 413 Statement * build_switch( bool isSwitch, ExpressionNode * ctl, StatementNode * stmt ); 414 414 Statement * build_case( ExpressionNode * ctl ); 415 415 Statement * build_default(); 416 Statement * build_while( IfCtrl * ctl, StatementNode * stmt);417 Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt );418 Statement * build_for( ForCtrl * forctl, StatementNode * stmt );416 Statement * build_while( CondCtl * ctl, StatementNode * stmt, StatementNode * else_ = nullptr ); 417 Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt, StatementNode * else_ = nullptr ); 418 Statement * build_for( ForCtrl * forctl, StatementNode * stmt, StatementNode * else_ = nullptr ); 419 419 Statement * build_branch( BranchStmt::Type kind ); 420 420 Statement * build_branch( std::string * identifier, BranchStmt::Type kind ); … … 424 424 Statement * build_resume( ExpressionNode * ctl ); 425 425 Statement * build_resume_at( ExpressionNode * ctl , ExpressionNode * target ); 426 Statement * build_try( StatementNode * try_ stmt, StatementNode * catch_stmt, StatementNode * finally_stmt);427 Statement * build_catch( CatchStmt::Kind kind, DeclarationNode * decl, ExpressionNode *cond, StatementNode *body );426 Statement * build_try( StatementNode * try_, StatementNode * catch_, StatementNode * finally_ ); 427 Statement * build_catch( CatchStmt::Kind kind, DeclarationNode * decl, ExpressionNode * cond, StatementNode * body ); 428 428 Statement * build_finally( StatementNode * stmt ); 429 429 Statement * build_compound( StatementNode * first ); -
src/Parser/StatementNode.cc
r97c215f rf5a51db 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // StatementNode.cc -- 7 // StatementNode.cc -- Transform from parse data-structures to AST data-structures, usually deleting the parse 8 // data-structure after the transformation. 8 9 // 9 10 // Author : Rodolfo G. Esteves 10 11 // Created On : Sat May 16 14:59:41 2015 11 12 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 24 04:20:55 202013 // Update Count : 38313 // Last Modified On : Wed Feb 2 20:29:30 2022 14 // Update Count : 425 14 15 // 15 16 … … 63 64 // convert from StatementNode list to Statement list 64 65 StatementNode * node = dynamic_cast< StatementNode * >(prev); 65 std::list< Statement * > stmts;66 list< Statement * > stmts; 66 67 buildMoveList( stmt, stmts ); 67 68 // splice any new Statements to end of current Statements … … 78 79 } // build_expr 79 80 80 Expression * build_if_control( IfCtrl * ctl, std::list< Statement * > & init ) {81 Expression * build_if_control( CondCtl * ctl, list< Statement * > & init ) { 81 82 if ( ctl->init != 0 ) { 82 83 buildMoveList( ctl->init, init ); … … 100 101 } // build_if_control 101 102 102 Statement * build_if( IfCtrl * ctl, StatementNode * then_stmt, StatementNode * else_stmt ) { 103 Statement * thenb, * elseb = nullptr; 104 std::list< Statement * > branches; 105 buildMoveList< Statement, StatementNode >( then_stmt, branches ); 106 assert( branches.size() == 1 ); 107 thenb = branches.front(); 108 109 if ( else_stmt ) { 110 std::list< Statement * > branches; 111 buildMoveList< Statement, StatementNode >( else_stmt, branches ); 112 assert( branches.size() == 1 ); 113 elseb = branches.front(); 114 } // if 115 116 std::list< Statement * > init; 117 Expression * cond = build_if_control( ctl, init ); 118 return new IfStmt( cond, thenb, elseb, init ); 103 Statement * build_if( CondCtl * ctl, StatementNode * then, StatementNode * else_ ) { 104 list< Statement * > astinit; // maybe empty 105 Expression * astcond = build_if_control( ctl, astinit ); // ctl deleted, cond/init set 106 107 Statement * astthen, * astelse = nullptr; 108 list< Statement * > aststmt; 109 buildMoveList< Statement, StatementNode >( then, aststmt ); 110 assert( aststmt.size() == 1 ); 111 astthen = aststmt.front(); 112 113 if ( else_ ) { 114 list< Statement * > aststmt; 115 buildMoveList< Statement, StatementNode >( else_, aststmt ); 116 assert( aststmt.size() == 1 ); 117 astelse = aststmt.front(); 118 } // if 119 120 return new IfStmt( astcond, astthen, astelse, astinit ); 119 121 } // build_if 120 122 121 123 Statement * build_switch( bool isSwitch, ExpressionNode * ctl, StatementNode * stmt ) { 122 std::list< Statement * > branches;123 buildMoveList< Statement, StatementNode >( stmt, branches);124 if ( ! isSwitch ) { 125 for ( Statement * stmt : branches) {124 list< Statement * > aststmt; 125 buildMoveList< Statement, StatementNode >( stmt, aststmt ); 126 if ( ! isSwitch ) { // choose statement 127 for ( Statement * stmt : aststmt ) { 126 128 CaseStmt * caseStmt = strict_dynamic_cast< CaseStmt * >( stmt ); 127 129 if ( ! caseStmt->stmts.empty() ) { // code after "case" => end of case list … … 131 133 } // for 132 134 } // if 133 // branches.size() == 0 for switch (...) {}, i.e., no declaration or statements134 return new SwitchStmt( maybeMoveBuild< Expression >(ctl), branches);135 // aststmt.size() == 0 for switch (...) {}, i.e., no declaration or statements 136 return new SwitchStmt( maybeMoveBuild< Expression >(ctl), aststmt ); 135 137 } // build_switch 136 138 137 139 Statement * build_case( ExpressionNode * ctl ) { 138 std::list< Statement * > branches; 139 return new CaseStmt( maybeMoveBuild< Expression >(ctl), branches ); 140 return new CaseStmt( maybeMoveBuild< Expression >(ctl), {} ); // stmt starts empty and then added to 140 141 } // build_case 141 142 142 143 Statement * build_default() { 143 std::list< Statement * > branches; 144 return new CaseStmt( nullptr, branches, true ); 144 return new CaseStmt( nullptr, {}, true ); // stmt starts empty and then added to 145 145 } // build_default 146 146 147 Statement * build_while( IfCtrl * ctl, StatementNode * stmt ) { 148 std::list< Statement * > branches; 149 buildMoveList< Statement, StatementNode >( stmt, branches ); 150 assert( branches.size() == 1 ); 151 152 std::list< Statement * > init; 153 Expression * cond = build_if_control( ctl, init ); 154 return new WhileStmt( cond, branches.front(), init, false ); 147 Statement * build_while( CondCtl * ctl, StatementNode * stmt, StatementNode * else_ ) { 148 list< Statement * > astinit; // maybe empty 149 Expression * astcond = build_if_control( ctl, astinit ); // ctl deleted, cond/init set 150 151 list< Statement * > aststmt; // loop body, compound created if empty 152 buildMoveList< Statement, StatementNode >( stmt, aststmt ); 153 assert( aststmt.size() == 1 ); 154 155 list< Statement * > astelse; // else clause, maybe empty 156 buildMoveList< Statement, StatementNode >( else_, astelse ); 157 158 return new WhileDoStmt( astcond, aststmt.front(), astelse.front(), astinit, false ); 155 159 } // build_while 156 160 157 Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt ) { 158 std::list< Statement * > branches; 159 buildMoveList< Statement, StatementNode >( stmt, branches ); 160 assert( branches.size() == 1 ); 161 162 std::list< Statement * > init; 163 return new WhileStmt( notZeroExpr( maybeMoveBuild< Expression >(ctl) ), branches.front(), init, true ); 161 Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt, StatementNode * else_ ) { 162 list< Statement * > aststmt; // loop body, compound created if empty 163 buildMoveList< Statement, StatementNode >( stmt, aststmt ); 164 assert( aststmt.size() == 1 ); // compound created if empty 165 166 list< Statement * > astelse; // else clause, maybe empty 167 buildMoveList< Statement, StatementNode >( else_, astelse ); 168 169 // do-while cannot have declarations in the contitional, so init is always empty 170 return new WhileDoStmt( notZeroExpr( maybeMoveBuild< Expression >(ctl) ), aststmt.front(), astelse.front(), {}, true ); 164 171 } // build_do_while 165 172 166 Statement * build_for( ForCtrl * forctl, StatementNode * stmt ) { 167 std::list< Statement * > branches; 168 buildMoveList< Statement, StatementNode >( stmt, branches ); 169 assert( branches.size() == 1 ); 170 171 std::list< Statement * > init; 172 if ( forctl->init != 0 ) { 173 buildMoveList( forctl->init, init ); 174 } // if 175 176 Expression * cond = 0; 177 if ( forctl->condition != 0 ) 178 cond = notZeroExpr( maybeMoveBuild< Expression >(forctl->condition) ); 179 180 Expression * incr = 0; 181 if ( forctl->change != 0 ) 182 incr = maybeMoveBuild< Expression >(forctl->change); 183 173 Statement * build_for( ForCtrl * forctl, StatementNode * stmt, StatementNode * else_ ) { 174 list< Statement * > astinit; // maybe empty 175 buildMoveList( forctl->init, astinit ); 176 177 Expression * astcond = nullptr; // maybe empty 178 astcond = notZeroExpr( maybeMoveBuild< Expression >(forctl->condition) ); 179 180 Expression * astincr = nullptr; // maybe empty 181 astincr = maybeMoveBuild< Expression >(forctl->change); 184 182 delete forctl; 185 return new ForStmt( init, cond, incr, branches.front() ); 183 184 list< Statement * > aststmt; // loop body, compound created if empty 185 buildMoveList< Statement, StatementNode >( stmt, aststmt ); 186 assert( aststmt.size() == 1 ); 187 188 list< Statement * > astelse; // else clause, maybe empty 189 buildMoveList< Statement, StatementNode >( else_, astelse ); 190 191 return new ForStmt( astinit, astcond, astincr, aststmt.front(), astelse.front() ); 186 192 } // build_for 187 193 … … 191 197 } // build_branch 192 198 193 Statement * build_branch( st d::string * identifier, BranchStmt::Type kind ) {199 Statement * build_branch( string * identifier, BranchStmt::Type kind ) { 194 200 Statement * ret = new BranchStmt( * identifier, kind ); 195 201 delete identifier; // allocated by lexer … … 202 208 203 209 Statement * build_return( ExpressionNode * ctl ) { 204 std::list< Expression * > exps;210 list< Expression * > exps; 205 211 buildMoveList( ctl, exps ); 206 212 return new ReturnStmt( exps.size() > 0 ? exps.back() : nullptr ); … … 208 214 209 215 Statement * build_throw( ExpressionNode * ctl ) { 210 std::list< Expression * > exps;216 list< Expression * > exps; 211 217 buildMoveList( ctl, exps ); 212 assertf( exps.size() < 2, " This means we are leaking memory");218 assertf( exps.size() < 2, "CFA internal error: leaking memory" ); 213 219 return new ThrowStmt( ThrowStmt::Terminate, !exps.empty() ? exps.back() : nullptr ); 214 220 } // build_throw 215 221 216 222 Statement * build_resume( ExpressionNode * ctl ) { 217 std::list< Expression * > exps;223 list< Expression * > exps; 218 224 buildMoveList( ctl, exps ); 219 assertf( exps.size() < 2, " This means we are leaking memory");225 assertf( exps.size() < 2, "CFA internal error: leaking memory" ); 220 226 return new ThrowStmt( ThrowStmt::Resume, !exps.empty() ? exps.back() : nullptr ); 221 227 } // build_resume … … 227 233 } // build_resume_at 228 234 229 Statement * build_try( StatementNode * try_ stmt, StatementNode * catch_stmt, StatementNode * finally_stmt) {230 std::list< CatchStmt * > branches;231 buildMoveList< CatchStmt, StatementNode >( catch_ stmt, branches);232 CompoundStmt * tryBlock = strict_dynamic_cast< CompoundStmt * >(maybeMoveBuild< Statement >(try_ stmt));233 FinallyStmt * finallyBlock = dynamic_cast< FinallyStmt * >(maybeMoveBuild< Statement >(finally_ stmt) );234 return new TryStmt( tryBlock, branches, finallyBlock );235 Statement * build_try( StatementNode * try_, StatementNode * catch_, StatementNode * finally_ ) { 236 list< CatchStmt * > aststmt; 237 buildMoveList< CatchStmt, StatementNode >( catch_, aststmt ); 238 CompoundStmt * tryBlock = strict_dynamic_cast< CompoundStmt * >(maybeMoveBuild< Statement >(try_)); 239 FinallyStmt * finallyBlock = dynamic_cast< FinallyStmt * >(maybeMoveBuild< Statement >(finally_) ); 240 return new TryStmt( tryBlock, aststmt, finallyBlock ); 235 241 } // build_try 236 242 237 243 Statement * build_catch( CatchStmt::Kind kind, DeclarationNode * decl, ExpressionNode * cond, StatementNode * body ) { 238 std::list< Statement * > branches;239 buildMoveList< Statement, StatementNode >( body, branches);240 assert( branches.size() == 1 );241 return new CatchStmt( kind, maybeMoveBuild< Declaration >(decl), maybeMoveBuild< Expression >(cond), branches.front() );244 list< Statement * > aststmt; 245 buildMoveList< Statement, StatementNode >( body, aststmt ); 246 assert( aststmt.size() == 1 ); 247 return new CatchStmt( kind, maybeMoveBuild< Declaration >(decl), maybeMoveBuild< Expression >(cond), aststmt.front() ); 242 248 } // build_catch 243 249 244 250 Statement * build_finally( StatementNode * stmt ) { 245 std::list< Statement * > branches;246 buildMoveList< Statement, StatementNode >( stmt, branches);247 assert( branches.size() == 1 );248 return new FinallyStmt( dynamic_cast< CompoundStmt * >( branches.front() ) );251 list< Statement * > aststmt; 252 buildMoveList< Statement, StatementNode >( stmt, aststmt ); 253 assert( aststmt.size() == 1 ); 254 return new FinallyStmt( dynamic_cast< CompoundStmt * >( aststmt.front() ) ); 249 255 } // build_finally 250 256 … … 254 260 node->type = type; 255 261 256 std::list< Statement * > stmts;262 list< Statement * > stmts; 257 263 buildMoveList< Statement, StatementNode >( then, stmts ); 258 264 if(!stmts.empty()) { … … 319 325 } // build_waitfor_timeout 320 326 321 WaitForStmt * build_waitfor_timeout( ExpressionNode * timeout, StatementNode * stmt, ExpressionNode * when, StatementNode * else_ stmt, ExpressionNode * else_when ) {327 WaitForStmt * build_waitfor_timeout( ExpressionNode * timeout, StatementNode * stmt, ExpressionNode * when, StatementNode * else_, ExpressionNode * else_when ) { 322 328 auto node = new WaitForStmt(); 323 329 … … 326 332 node->timeout.condition = notZeroExpr( maybeMoveBuild<Expression>( when ) ); 327 333 328 node->orelse.statement = maybeMoveBuild<Statement >( else_ stmt);334 node->orelse.statement = maybeMoveBuild<Statement >( else_ ); 329 335 node->orelse.condition = notZeroExpr( maybeMoveBuild<Expression>( else_when ) ); 330 336 … … 333 339 334 340 Statement * build_with( ExpressionNode * exprs, StatementNode * stmt ) { 335 std::list< Expression * > e;341 list< Expression * > e; 336 342 buildMoveList( exprs, e ); 337 343 Statement * s = maybeMoveBuild<Statement>( stmt ); … … 361 367 362 368 Statement * build_asm( bool voltile, Expression * instruction, ExpressionNode * output, ExpressionNode * input, ExpressionNode * clobber, LabelNode * gotolabels ) { 363 std::list< Expression * > out, in;364 std::list< ConstantExpr * > clob;369 list< Expression * > out, in; 370 list< ConstantExpr * > clob; 365 371 366 372 buildMoveList( output, out ); … … 375 381 376 382 Statement * build_mutex( ExpressionNode * exprs, StatementNode * stmt ) { 377 std::list< Expression * > expList;383 list< Expression * > expList; 378 384 buildMoveList( exprs, expList ); 379 385 Statement * body = maybeMoveBuild<Statement>( stmt ); -
src/Parser/parser.yy
r97c215f rf5a51db 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Oct 15 09:20:17 202113 // Update Count : 516 312 // Last Modified On : Tue Feb 1 11:06:13 2022 13 // Update Count : 5167 14 14 // 15 15 … … 238 238 WaitForStmt * wfs; 239 239 Expression * constant; 240 IfCtrl * ifctl;240 CondCtl * ifctl; 241 241 ForCtrl * fctl; 242 242 enum OperKinds compop; … … 327 327 %type<en> comma_expression comma_expression_opt 328 328 %type<en> argument_expression_list_opt argument_expression_list argument_expression default_initializer_opt 329 %type<ifctl> if_control_expression329 %type<ifctl> conditional_declaration 330 330 %type<fctl> for_control_expression for_control_expression_list 331 331 %type<compop> inclexcl … … 1123 1123 1124 1124 if_statement: 1125 IF '(' if_control_expression ')' statement%prec THEN1125 IF '(' conditional_declaration ')' statement %prec THEN 1126 1126 // explicitly deal with the shift/reduce conflict on if/else 1127 1127 { $$ = new StatementNode( build_if( $3, maybe_build_compound( $5 ), nullptr ) ); } 1128 | IF '(' if_control_expression ')' statement ELSE statement1128 | IF '(' conditional_declaration ')' statement ELSE statement 1129 1129 { $$ = new StatementNode( build_if( $3, maybe_build_compound( $5 ), maybe_build_compound( $7 ) ) ); } 1130 1130 ; 1131 1131 1132 if_control_expression:1132 conditional_declaration: 1133 1133 comma_expression 1134 { $$ = new IfCtrl( nullptr, $1 ); }1134 { $$ = new CondCtl( nullptr, $1 ); } 1135 1135 | c_declaration // no semi-colon 1136 { $$ = new IfCtrl( $1, nullptr ); }1136 { $$ = new CondCtl( $1, nullptr ); } 1137 1137 | cfa_declaration // no semi-colon 1138 { $$ = new IfCtrl( $1, nullptr ); }1138 { $$ = new CondCtl( $1, nullptr ); } 1139 1139 | declaration comma_expression // semi-colon separated 1140 { $$ = new IfCtrl( $1, $2 ); }1140 { $$ = new CondCtl( $1, $2 ); } 1141 1141 ; 1142 1142 … … 1193 1193 iteration_statement: 1194 1194 WHILE '(' ')' statement // CFA => while ( 1 ) 1195 { $$ = new StatementNode( build_while( new IfCtrl( nullptr, new ExpressionNode( build_constantInteger( *new string( "1" ) ) ) ), maybe_build_compound( $4 ) ) ); }1196 | WHILE '(' if_control_expression ')' statement%prec THEN1195 { $$ = new StatementNode( build_while( new CondCtl( nullptr, new ExpressionNode( build_constantInteger( *new string( "1" ) ) ) ), maybe_build_compound( $4 ) ) ); } 1196 | WHILE '(' conditional_declaration ')' statement %prec THEN 1197 1197 { $$ = new StatementNode( build_while( $3, maybe_build_compound( $5 ) ) ); } 1198 | WHILE '(' if_control_expression ')' statement ELSE statement // CFA 1199 { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1198 | WHILE '(' conditional_declaration ')' statement ELSE statement // CFA 1199 // { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1200 { $$ = new StatementNode( build_while( $3, maybe_build_compound( $5 ), $7 ) ); } 1200 1201 | DO statement WHILE '(' ')' ';' // CFA => do while( 1 ) 1201 1202 { $$ = new StatementNode( build_do_while( new ExpressionNode( build_constantInteger( *new string( "1" ) ) ), maybe_build_compound( $2 ) ) ); } … … 1203 1204 { $$ = new StatementNode( build_do_while( $5, maybe_build_compound( $2 ) ) ); } 1204 1205 | DO statement WHILE '(' comma_expression ')' ELSE statement // CFA 1205 { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1206 // { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1207 { $$ = new StatementNode( build_do_while( $5, maybe_build_compound( $2 ), $8 ) ); } 1206 1208 | FOR '(' ')' statement // CFA => for ( ;; ) 1207 1209 { $$ = new StatementNode( build_for( new ForCtrl( (ExpressionNode * )nullptr, (ExpressionNode * )nullptr, (ExpressionNode * )nullptr ), maybe_build_compound( $4 ) ) ); } … … 1209 1211 { $$ = new StatementNode( build_for( $3, maybe_build_compound( $5 ) ) ); } 1210 1212 | FOR '(' for_control_expression_list ')' statement ELSE statement // CFA 1211 { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1213 // { SemanticError( yylloc, "Loop default block is currently unimplemented." ); $$ = nullptr; } 1214 { $$ = new StatementNode( build_for( $3, maybe_build_compound( $5 ), $7 ) ); } 1212 1215 ; 1213 1216 -
src/ResolvExpr/Resolver.cc
r97c215f rf5a51db 9 9 // Author : Aaron B. Moss 10 10 // Created On : Sun May 17 12:17:01 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Fri Mar 27 11:58:00 202013 // Update Count : 24 211 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 1 16:27:14 2022 13 // Update Count : 245 14 14 // 15 15 … … 80 80 void previsit( AsmStmt * asmStmt ); 81 81 void previsit( IfStmt * ifStmt ); 82 void previsit( While Stmt * whileStmt );82 void previsit( WhileDoStmt * whileDoStmt ); 83 83 void previsit( ForStmt * forStmt ); 84 84 void previsit( SwitchStmt * switchStmt ); … … 502 502 } 503 503 504 void Resolver_old::previsit( While Stmt * whileStmt ) {505 findIntegralExpression( while Stmt->condition, indexer );504 void Resolver_old::previsit( WhileDoStmt * whileDoStmt ) { 505 findIntegralExpression( whileDoStmt->condition, indexer ); 506 506 } 507 507 … … 572 572 573 573 void Resolver_old::previsit( CatchStmt * catchStmt ) { 574 // Until we are very sure this invarent (ifs that move between passes have then Part)574 // Until we are very sure this invarent (ifs that move between passes have then) 575 575 // holds, check it. This allows a check for when to decode the mangling. 576 576 if ( IfStmt * ifStmt = dynamic_cast<IfStmt *>( catchStmt->body ) ) { 577 assert( ifStmt->then Part);577 assert( ifStmt->then ); 578 578 } 579 579 // Encode the catchStmt so the condition can see the declaration. … … 588 588 // Decode the catchStmt so everything is stored properly. 589 589 IfStmt * ifStmt = dynamic_cast<IfStmt *>( catchStmt->body ); 590 if ( nullptr != ifStmt && nullptr == ifStmt->then Part) {590 if ( nullptr != ifStmt && nullptr == ifStmt->then ) { 591 591 assert( ifStmt->condition ); 592 assert( ifStmt->else Part);592 assert( ifStmt->else_ ); 593 593 catchStmt->cond = ifStmt->condition; 594 catchStmt->body = ifStmt->else Part;594 catchStmt->body = ifStmt->else_; 595 595 ifStmt->condition = nullptr; 596 ifStmt->else Part= nullptr;596 ifStmt->else_ = nullptr; 597 597 delete ifStmt; 598 598 } … … 1272 1272 const ast::AsmStmt * previsit( const ast::AsmStmt * ); 1273 1273 const ast::IfStmt * previsit( const ast::IfStmt * ); 1274 const ast::While Stmt * previsit( const ast::WhileStmt * );1274 const ast::WhileDoStmt * previsit( const ast::WhileDoStmt * ); 1275 1275 const ast::ForStmt * previsit( const ast::ForStmt * ); 1276 1276 const ast::SwitchStmt * previsit( const ast::SwitchStmt * ); … … 1581 1581 } 1582 1582 1583 const ast::While Stmt * Resolver_new::previsit( const ast::WhileStmt * whileStmt ) {1583 const ast::WhileDoStmt * Resolver_new::previsit( const ast::WhileDoStmt * whileDoStmt ) { 1584 1584 return ast::mutate_field( 1585 while Stmt, &ast::WhileStmt::cond, findIntegralExpression( whileStmt->cond, symtab ) );1585 whileDoStmt, &ast::WhileDoStmt::cond, findIntegralExpression( whileDoStmt->cond, symtab ) ); 1586 1586 } 1587 1587 … … 1669 1669 1670 1670 const ast::CatchStmt * Resolver_new::previsit( const ast::CatchStmt * catchStmt ) { 1671 // Until we are very sure this invarent (ifs that move between passes have then Part)1671 // Until we are very sure this invarent (ifs that move between passes have then) 1672 1672 // holds, check it. This allows a check for when to decode the mangling. 1673 1673 if ( auto ifStmt = catchStmt->body.as<ast::IfStmt>() ) { 1674 assert( ifStmt->then Part);1674 assert( ifStmt->then ); 1675 1675 } 1676 1676 // Encode the catchStmt so the condition can see the declaration. … … 1687 1687 // Decode the catchStmt so everything is stored properly. 1688 1688 const ast::IfStmt * ifStmt = catchStmt->body.as<ast::IfStmt>(); 1689 if ( nullptr != ifStmt && nullptr == ifStmt->then Part) {1689 if ( nullptr != ifStmt && nullptr == ifStmt->then ) { 1690 1690 assert( ifStmt->cond ); 1691 assert( ifStmt->else Part);1691 assert( ifStmt->else_ ); 1692 1692 ast::CatchStmt * stmt = ast::mutate( catchStmt ); 1693 1693 stmt->cond = ifStmt->cond; 1694 stmt->body = ifStmt->else Part;1694 stmt->body = ifStmt->else_; 1695 1695 // ifStmt should be implicately deleted here. 1696 1696 return stmt; -
src/SymTab/Validate.cc
r97c215f rf5a51db 453 453 } 454 454 455 void decayForallPointers( std::list< Declaration * > & translationUnit ) { 456 PassVisitor<ForallPointerDecay_old> fpd; 457 acceptAll( translationUnit, fpd ); 458 } 459 455 460 void validate( std::list< Declaration * > &translationUnit, __attribute__((unused)) bool doDebug ) { 456 461 validate_A( translationUnit ); … … 470 475 type->accept( fpd ); 471 476 } 472 473 477 474 478 void HoistTypeDecls::handleType( Type * type ) { -
src/SymTab/Validate.h
r97c215f rf5a51db 42 42 void validate_E( std::list< Declaration * > &translationUnit ); 43 43 void validate_F( std::list< Declaration * > &translationUnit ); 44 void decayForallPointers( std::list< Declaration * > & translationUnit ); 44 45 45 46 const ast::Type * validateType( -
src/SynTree/Mutator.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:36 202113 // Update Count : 1812 // Last Modified On : Tue Feb 1 09:26:49 2022 13 // Update Count : 20 14 14 // 15 15 #pragma once … … 42 42 virtual Statement * mutate( DirectiveStmt * dirStmt ) = 0; 43 43 virtual Statement * mutate( IfStmt * ifStmt ) = 0; 44 virtual Statement * mutate( While Stmt * whileStmt ) = 0;44 virtual Statement * mutate( WhileDoStmt * whileDoStmt ) = 0; 45 45 virtual Statement * mutate( ForStmt * forStmt ) = 0; 46 46 virtual Statement * mutate( SwitchStmt * switchStmt ) = 0; -
src/SynTree/Statement.cc
r97c215f rf5a51db 9 9 // Author : Richard C. Bilson 10 10 // Created On : Mon May 18 07:44:20 2015 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Jan 20 16:03:00 202013 // Update Count : 7111 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 20:19:33 2022 13 // Update Count : 90 14 14 // 15 15 … … 29 29 #include "SynTree/Label.h" // for Label, operator<< 30 30 31 using std::string;32 using std::endl; 33 34 Statement::Statement( const std::list<Label> & labels ) : labels( labels ) {}35 36 void Statement::print( std::ostream & os, Indenter indent ) const {31 using namespace std; 32 33 34 Statement::Statement( const list<Label> & labels ) : labels( labels ) {} 35 36 void Statement::print( ostream & os, Indenter indent ) const { 37 37 if ( ! labels.empty() ) { 38 38 os << indent << "... Labels: {"; … … 54 54 } 55 55 56 void ExprStmt::print( std::ostream & os, Indenter indent ) const {57 os << "Expression Statement:" << endl << indent +1;58 expr->print( os, indent +1 );59 } 60 61 62 AsmStmt::AsmStmt( bool voltile, Expression * instruction, std::list<Expression *> output, std::list<Expression *> input, std::list<ConstantExpr *> clobber, std::list<Label> gotolabels ) : Statement(), voltile( voltile ), instruction( instruction ), output( output ), input( input ), clobber( clobber ), gotolabels( gotolabels ) {}56 void ExprStmt::print( ostream & os, Indenter indent ) const { 57 os << "Expression Statement:" << endl << indent + 1; 58 expr->print( os, indent + 1 ); 59 } 60 61 62 AsmStmt::AsmStmt( bool voltile, Expression * instruction, const list<Expression *> output, const list<Expression *> input, const list<ConstantExpr *> clobber, const list<Label> gotolabels ) : Statement(), voltile( voltile ), instruction( instruction ), output( output ), input( input ), clobber( clobber ), gotolabels( gotolabels ) {} 63 63 64 64 AsmStmt::AsmStmt( const AsmStmt & other ) : Statement( other ), voltile( other.voltile ), instruction( maybeClone( other.instruction ) ), gotolabels( other.gotolabels ) { … … 75 75 } 76 76 77 void AsmStmt::print( std::ostream & os, Indenter indent ) const {77 void AsmStmt::print( ostream & os, Indenter indent ) const { 78 78 os << "Assembler Statement:" << endl; 79 os << indent +1 << "instruction: " << endl << indent;80 instruction->print( os, indent +1 );79 os << indent + 1 << "instruction: " << endl << indent; 80 instruction->print( os, indent + 1 ); 81 81 if ( ! output.empty() ) { 82 os << endl << indent +1 << "output: " << endl;83 printAll( output, os, indent +1 );82 os << endl << indent + 1 << "output: " << endl; 83 printAll( output, os, indent + 1 ); 84 84 } // if 85 85 if ( ! input.empty() ) { 86 os << indent +1 << "input: " << endl;87 printAll( input, os, indent +1 );86 os << indent + 1 << "input: " << endl; 87 printAll( input, os, indent + 1 ); 88 88 } // if 89 89 if ( ! clobber.empty() ) { 90 os << indent +1 << "clobber: " << endl;91 printAll( clobber, os, indent +1 );90 os << indent + 1 << "clobber: " << endl; 91 printAll( clobber, os, indent + 1 ); 92 92 } // if 93 93 } 94 94 95 95 96 DirectiveStmt::DirectiveStmt( const st d::string & directive ) : Statement(), directive( directive ) {}97 98 void DirectiveStmt::print( std::ostream & os, Indenter ) const {96 DirectiveStmt::DirectiveStmt( const string & directive ) : Statement(), directive( directive ) {} 97 98 void DirectiveStmt::print( ostream & os, Indenter ) const { 99 99 os << "GCC Directive:" << directive << endl; 100 100 } … … 120 120 } 121 121 122 void BranchStmt::print( std::ostream & os, Indenter indent ) const {123 assert (type < 5);122 void BranchStmt::print( ostream & os, Indenter indent ) const { 123 assertf(type < BranchStmts, "CFA internal error: invalid branch statement" ); 124 124 os << "Branch (" << brType[type] << ")" << endl ; 125 if ( target != "" ) os << indent +1 << "with target: " << target << endl;126 if ( originalTarget != "" ) os << indent +1 << "with original target: " << originalTarget << endl;127 if ( computedTarget != nullptr ) os << indent +1 << "with computed target: " << computedTarget << endl;125 if ( target != "" ) os << indent + 1 << "with target: " << target << endl; 126 if ( originalTarget != "" ) os << indent + 1 << "with original target: " << originalTarget << endl; 127 if ( computedTarget != nullptr ) os << indent + 1 << "with computed target: " << computedTarget << endl; 128 128 } 129 129 … … 136 136 } 137 137 138 void ReturnStmt::print( std::ostream & os, Indenter indent ) const {138 void ReturnStmt::print( ostream & os, Indenter indent ) const { 139 139 os << "Return Statement, returning: "; 140 140 if ( expr != nullptr ) { 141 os << endl << indent +1;142 expr->print( os, indent +1 );141 os << endl << indent + 1; 142 expr->print( os, indent + 1 ); 143 143 } 144 144 os << endl; 145 145 } 146 146 147 IfStmt::IfStmt( Expression * condition, Statement * then Part, Statement * elsePart, std::list<Statement *> initialization ):148 Statement(), condition( condition ), then Part( thenPart ), elsePart( elsePart), initialization( initialization ) {}147 IfStmt::IfStmt( Expression * condition, Statement * then, Statement * else_, const list<Statement *> initialization ): 148 Statement(), condition( condition ), then( then ), else_( else_ ), initialization( initialization ) {} 149 149 150 150 IfStmt::IfStmt( const IfStmt & other ) : 151 Statement( other ), condition( maybeClone( other.condition ) ), then Part( maybeClone( other.thenPart ) ), elsePart( maybeClone( other.elsePart) ) {151 Statement( other ), condition( maybeClone( other.condition ) ), then( maybeClone( other.then ) ), else_( maybeClone( other.else_ ) ) { 152 152 cloneAll( other.initialization, initialization ); 153 153 } … … 156 156 deleteAll( initialization ); 157 157 delete condition; 158 delete then Part;159 delete else Part;160 } 161 162 void IfStmt::print( std::ostream & os, Indenter indent ) const {158 delete then; 159 delete else_; 160 } 161 162 void IfStmt::print( ostream & os, Indenter indent ) const { 163 163 os << "If on condition: " << endl; 164 os << indent +1;165 condition->print( os, indent +1 );164 os << indent + 1; 165 condition->print( os, indent + 1 ); 166 166 167 167 if ( !initialization.empty() ) { 168 168 os << indent << "... with initialization: \n"; 169 169 for ( const Statement * stmt : initialization ) { 170 os << indent +1;171 stmt->print( os, indent +1 );170 os << indent + 1; 171 stmt->print( os, indent + 1 ); 172 172 } 173 173 os << endl; … … 176 176 os << indent << "... then: " << endl; 177 177 178 os << indent +1;179 then Part->print( os, indent+1 );180 181 if ( else Part!= nullptr ) {178 os << indent + 1; 179 then->print( os, indent + 1 ); 180 181 if ( else_ != nullptr ) { 182 182 os << indent << "... else: " << endl; 183 os << indent +1;184 else Part->print( os, indent+1 );183 os << indent + 1; 184 else_->print( os, indent + 1 ); 185 185 } // if 186 186 } 187 187 188 SwitchStmt::SwitchStmt( Expression * condition, const std::list<Statement *> & statements ):188 SwitchStmt::SwitchStmt( Expression * condition, const list<Statement *> & statements ): 189 189 Statement(), condition( condition ), statements( statements ) { 190 190 } … … 201 201 } 202 202 203 void SwitchStmt::print( std::ostream & os, Indenter indent ) const {203 void SwitchStmt::print( ostream & os, Indenter indent ) const { 204 204 os << "Switch on condition: "; 205 205 condition->print( os ); … … 207 207 208 208 for ( const Statement * stmt : statements ) { 209 stmt->print( os, indent +1 );210 } 211 } 212 213 CaseStmt::CaseStmt( Expression * condition, const std::list<Statement *> & statements, bool deflt ) throw ( SemanticErrorException ) :214 Statement(), condition( condition ), stmts( statements ), _isDefault( deflt ) {209 stmt->print( os, indent + 1 ); 210 } 211 } 212 213 CaseStmt::CaseStmt( Expression * condition, const list<Statement *> & statements, bool deflt ) throw ( SemanticErrorException ) : 214 Statement(), condition( condition ), stmts( statements ), _isDefault( deflt ) { 215 215 if ( isDefault() && condition != nullptr ) SemanticError( condition, "default case with condition: " ); 216 216 } 217 217 218 218 CaseStmt::CaseStmt( const CaseStmt & other ) : 219 Statement( other ), condition( maybeClone(other.condition ) ), _isDefault( other._isDefault ) {219 Statement( other ), condition( maybeClone(other.condition ) ), _isDefault( other._isDefault ) { 220 220 cloneAll( other.stmts, stmts ); 221 221 } … … 226 226 } 227 227 228 CaseStmt * CaseStmt::makeDefault( const std::list<Label> & labels, std::list<Statement *> stmts ) {228 CaseStmt * CaseStmt::makeDefault( const list<Label> & labels, list<Statement *> stmts ) { 229 229 CaseStmt * stmt = new CaseStmt( nullptr, stmts, true ); 230 230 stmt->labels = labels; … … 232 232 } 233 233 234 void CaseStmt::print( std::ostream & os, Indenter indent ) const {234 void CaseStmt::print( ostream & os, Indenter indent ) const { 235 235 if ( isDefault() ) os << indent << "Default "; 236 236 else { … … 241 241 242 242 for ( Statement * stmt : stmts ) { 243 os << indent+1; 244 stmt->print( os, indent+1 ); 245 } 246 } 247 248 WhileStmt::WhileStmt( Expression * condition, Statement * body, std::list< Statement * > & initialization, bool isDoWhile ): 249 Statement(), condition( condition), body( body), initialization( initialization ), isDoWhile( isDoWhile) { 250 } 251 252 WhileStmt::WhileStmt( const WhileStmt & other ): 243 os << indent + 1; 244 stmt->print( os, indent + 1 ); 245 } 246 } 247 248 WhileDoStmt::WhileDoStmt( Expression * condition, Statement * body, const list< Statement * > & initialization, bool isDoWhile ): 249 Statement(), condition( condition ), body( body ), else_( nullptr ), initialization( initialization ), isDoWhile( isDoWhile) { 250 } 251 252 WhileDoStmt::WhileDoStmt( Expression * condition, Statement * body, Statement * else_, const list< Statement * > & initialization, bool isDoWhile ): 253 Statement(), condition( condition), body( body ), else_( else_ ), initialization( initialization ), isDoWhile( isDoWhile) { 254 } 255 256 WhileDoStmt::WhileDoStmt( const WhileDoStmt & other ): 253 257 Statement( other ), condition( maybeClone( other.condition ) ), body( maybeClone( other.body ) ), isDoWhile( other.isDoWhile ) { 254 258 } 255 259 256 While Stmt::~WhileStmt() {260 WhileDoStmt::~WhileDoStmt() { 257 261 delete body; 258 262 delete condition; 259 263 } 260 264 261 void While Stmt::print( std::ostream & os, Indenter indent ) const {265 void WhileDoStmt::print( ostream & os, Indenter indent ) const { 262 266 os << "While on condition: " << endl ; 263 condition->print( os, indent +1 );267 condition->print( os, indent + 1 ); 264 268 265 269 os << indent << "... with body: " << endl; 266 270 267 if ( body != nullptr ) body->print( os, indent +1 );268 } 269 270 ForStmt::ForStmt( std::list<Statement *> initialization, Expression * condition, Expression * increment, Statement * body):271 Statement(), initialization( initialization ), condition( condition ), increment( increment ), body( body ) {271 if ( body != nullptr ) body->print( os, indent + 1 ); 272 } 273 274 ForStmt::ForStmt( const list<Statement *> initialization, Expression * condition, Expression * increment, Statement * body, Statement * else_ ): 275 Statement(), initialization( initialization ), condition( condition ), increment( increment ), body( body ), else_( else_ ) { 272 276 } 273 277 274 278 ForStmt::ForStmt( const ForStmt & other ): 275 Statement( other ), condition( maybeClone( other.condition ) ), increment( maybeClone( other.increment ) ), body( maybeClone( other.body ) ) {279 Statement( other ), condition( maybeClone( other.condition ) ), increment( maybeClone( other.increment ) ), body( maybeClone( other.body ) ), else_( maybeClone( other.else_ ) ) { 276 280 cloneAll( other.initialization, initialization ); 277 281 … … 283 287 delete increment; 284 288 delete body; 285 } 286 287 void ForStmt::print( std::ostream & os, Indenter indent ) const { 289 delete else_; 290 } 291 292 void ForStmt::print( ostream & os, Indenter indent ) const { 288 293 Statement::print( os, indent ); // print labels 289 294 … … 293 298 os << indent << "... initialization: \n"; 294 299 for ( Statement * stmt : initialization ) { 295 os << indent +1;296 stmt->print( os, indent +1 );300 os << indent + 1; 301 stmt->print( os, indent + 1 ); 297 302 } 298 303 } 299 304 300 305 if ( condition != nullptr ) { 301 os << indent << "... condition: \n" << indent +1;302 condition->print( os, indent +1 );306 os << indent << "... condition: \n" << indent + 1; 307 condition->print( os, indent + 1 ); 303 308 } 304 309 305 310 if ( increment != nullptr ) { 306 os << "\n" << indent << "... increment: \n" << indent +1;307 increment->print( os, indent +1 );311 os << "\n" << indent << "... increment: \n" << indent + 1; 312 increment->print( os, indent + 1 ); 308 313 } 309 314 310 315 if ( body != nullptr ) { 311 os << "\n" << indent << "... with body: \n" << indent+1; 312 body->print( os, indent+1 ); 316 os << "\n" << indent << "... with body: \n" << indent + 1; 317 body->print( os, indent + 1 ); 318 } 319 320 if ( else_ != nullptr ) { 321 os << "\n" << indent << "... with body: \n" << indent + 1; 322 else_->print( os, indent + 1 ); 313 323 } 314 324 os << endl; … … 329 339 } 330 340 331 void ThrowStmt::print( std::ostream & os, Indenter indent) const {341 void ThrowStmt::print( ostream & os, Indenter indent) const { 332 342 if ( target ) os << "Non-Local "; 333 343 os << "Throw Statement, raising: "; 334 expr->print(os, indent +1);344 expr->print(os, indent + 1); 335 345 if ( target ) { 336 346 os << "... at: "; 337 target->print(os, indent +1);338 } 339 } 340 341 TryStmt::TryStmt( CompoundStmt * tryBlock, std::list<CatchStmt *> & handlers, FinallyStmt * finallyBlock ) :347 target->print(os, indent + 1); 348 } 349 } 350 351 TryStmt::TryStmt( CompoundStmt * tryBlock, const list<CatchStmt *> & handlers, FinallyStmt * finallyBlock ) : 342 352 Statement(), block( tryBlock ), handlers( handlers ), finallyBlock( finallyBlock ) { 343 353 } … … 353 363 } 354 364 355 void TryStmt::print( std::ostream & os, Indenter indent ) const {365 void TryStmt::print( ostream & os, Indenter indent ) const { 356 366 os << "Try Statement" << endl; 357 os << indent << "... with block:" << endl << indent +1;358 block->print( os, indent +1 );367 os << indent << "... with block:" << endl << indent + 1; 368 block->print( os, indent + 1 ); 359 369 360 370 // handlers 361 371 os << indent << "... and handlers:" << endl; 362 372 for ( const CatchStmt * stmt : handlers ) { 363 os << indent +1;364 stmt->print( os, indent +1 );373 os << indent + 1; 374 stmt->print( os, indent + 1 ); 365 375 } 366 376 367 377 // finally block 368 378 if ( finallyBlock != nullptr ) { 369 os << indent << "... and finally:" << endl << indent +1;370 finallyBlock->print( os, indent +1 );379 os << indent << "... and finally:" << endl << indent + 1; 380 finallyBlock->print( os, indent + 1 ); 371 381 } // if 372 382 } … … 386 396 } 387 397 388 void CatchStmt::print( std::ostream & os, Indenter indent ) const {398 void CatchStmt::print( ostream & os, Indenter indent ) const { 389 399 os << "Catch " << ((Terminate == kind) ? "Terminate" : "Resume") << " Statement" << endl; 390 400 391 401 os << indent << "... catching: "; 392 decl->printShort( os, indent +1 );402 decl->printShort( os, indent + 1 ); 393 403 os << endl; 394 404 395 405 if ( cond ) { 396 os << indent << "... with conditional:" << endl << indent +1;397 cond->print( os, indent +1 );406 os << indent << "... with conditional:" << endl << indent + 1; 407 cond->print( os, indent + 1 ); 398 408 } 399 409 400 410 os << indent << "... with block:" << endl; 401 os << indent +1;402 body->print( os, indent +1 );411 os << indent + 1; 412 body->print( os, indent + 1 ); 403 413 } 404 414 … … 414 424 } 415 425 416 void FinallyStmt::print( std::ostream & os, Indenter indent ) const {426 void FinallyStmt::print( ostream & os, Indenter indent ) const { 417 427 os << "Finally Statement" << endl; 418 os << indent << "... with block:" << endl << indent +1;419 block->print( os, indent +1 );428 os << indent << "... with block:" << endl << indent + 1; 429 block->print( os, indent + 1 ); 420 430 } 421 431 … … 429 439 } 430 440 431 void SuspendStmt::print( std::ostream & os, Indenter indent ) const {441 void SuspendStmt::print( ostream & os, Indenter indent ) const { 432 442 os << "Suspend Statement"; 433 443 switch (type) { … … 486 496 } 487 497 488 void WaitForStmt::print( std::ostream & os, Indenter indent ) const {498 void WaitForStmt::print( ostream & os, Indenter indent ) const { 489 499 os << "Waitfor Statement" << endl; 490 500 indent += 1; … … 521 531 522 532 523 WithStmt::WithStmt( const std::list< Expression * > & exprs, Statement * stmt ) : Declaration("", noStorageClasses, LinkageSpec::Cforall), exprs( exprs ), stmt( stmt ) {}533 WithStmt::WithStmt( const list< Expression * > & exprs, Statement * stmt ) : Declaration("", noStorageClasses, LinkageSpec::Cforall), exprs( exprs ), stmt( stmt ) {} 524 534 WithStmt::WithStmt( const WithStmt & other ) : Declaration( other ), stmt( maybeClone( other.stmt ) ) { 525 535 cloneAll( other.exprs, exprs ); … … 530 540 } 531 541 532 void WithStmt::print( std::ostream & os, Indenter indent ) const {542 void WithStmt::print( ostream & os, Indenter indent ) const { 533 543 os << "With statement" << endl; 534 544 os << indent << "... with expressions: " << endl; 535 printAll( exprs, os, indent +1 );536 os << indent << "... with statement:" << endl << indent +1;537 stmt->print( os, indent +1 );538 } 539 540 541 NullStmt::NullStmt( const std::list<Label> & labels ) : Statement( labels ) {542 } 543 544 void NullStmt::print( std::ostream & os, Indenter indent ) const {545 printAll( exprs, os, indent + 1 ); 546 os << indent << "... with statement:" << endl << indent + 1; 547 stmt->print( os, indent + 1 ); 548 } 549 550 551 NullStmt::NullStmt( const list<Label> & labels ) : Statement( labels ) { 552 } 553 554 void NullStmt::print( ostream & os, Indenter indent ) const { 545 555 os << "Null Statement" << endl; 546 556 Statement::print( os, indent ); … … 558 568 } 559 569 560 void ImplicitCtorDtorStmt::print( std::ostream & os, Indenter indent ) const {570 void ImplicitCtorDtorStmt::print( ostream & os, Indenter indent ) const { 561 571 os << "Implicit Ctor Dtor Statement" << endl; 562 572 os << indent << "... with Ctor/Dtor: "; 563 callStmt->print( os, indent +1);573 callStmt->print( os, indent + 1); 564 574 os << endl; 565 575 } 566 576 567 MutexStmt::MutexStmt( Statement * stmt, std::list<Expression *> mutexObjs )577 MutexStmt::MutexStmt( Statement * stmt, const list<Expression *> mutexObjs ) 568 578 : Statement(), stmt( stmt ), mutexObjs( mutexObjs ) { } 569 579 … … 577 587 } 578 588 579 void MutexStmt::print( std::ostream & os, Indenter indent ) const {589 void MutexStmt::print( ostream & os, Indenter indent ) const { 580 590 os << "Mutex Statement" << endl; 581 591 os << indent << "... with Expressions: " << endl; 582 592 for (auto * obj : mutexObjs) { 583 os << indent +1;584 obj->print( os, indent +1);593 os << indent + 1; 594 obj->print( os, indent + 1); 585 595 os << endl; 586 596 } 587 os << indent << "... with Statement: " << endl << indent +1;588 stmt->print( os, indent +1 );597 os << indent << "... with Statement: " << endl << indent + 1; 598 stmt->print( os, indent + 1 ); 589 599 } 590 600 -
src/SynTree/Statement.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jan 10 14:13:24 202013 // Update Count : 8512 // Last Modified On : Wed Feb 2 20:15:30 2022 13 // Update Count : 98 14 14 // 15 15 … … 107 107 std::list<Label> gotolabels; 108 108 109 AsmStmt( bool voltile, Expression * instruction, std::list<Expression *> output, std::list<Expression *> input, std::list<ConstantExpr *> clobber,std::list<Label> gotolabels );109 AsmStmt( bool voltile, Expression * instruction, const std::list<Expression *> output, const std::list<Expression *> input, const std::list<ConstantExpr *> clobber, const std::list<Label> gotolabels ); 110 110 AsmStmt( const AsmStmt & other ); 111 111 virtual ~AsmStmt(); … … 148 148 public: 149 149 Expression * condition; 150 Statement * then Part;151 Statement * else Part;150 Statement * then; 151 Statement * else_; 152 152 std::list<Statement *> initialization; 153 153 154 IfStmt( Expression * condition, Statement * then Part, Statement * elsePart,155 std::list<Statement *> initialization = std::list<Statement *>() );154 IfStmt( Expression * condition, Statement * then, Statement * else_, 155 const std::list<Statement *> initialization = std::list<Statement *>() ); 156 156 IfStmt( const IfStmt & other ); 157 157 virtual ~IfStmt(); … … 160 160 Expression * get_condition() { return condition; } 161 161 void set_condition( Expression * newValue ) { condition = newValue; } 162 Statement * get_then Part() { return thenPart; }163 void set_then Part( Statement * newValue ) { thenPart= newValue; }164 Statement * get_else Part() { return elsePart; }165 void set_else Part( Statement * newValue ) { elsePart= newValue; }162 Statement * get_then() { return then; } 163 void set_then( Statement * newValue ) { then = newValue; } 164 Statement * get_else() { return else_; } 165 void set_else( Statement * newValue ) { else_ = newValue; } 166 166 167 167 virtual IfStmt * clone() const override { return new IfStmt( *this ); } … … 225 225 }; 226 226 227 class While Stmt : public Statement {227 class WhileDoStmt : public Statement { 228 228 public: 229 229 Expression * condition; 230 230 Statement * body; 231 Statement * else_; 231 232 std::list<Statement *> initialization; 232 233 bool isDoWhile; 233 234 234 WhileStmt( Expression * condition, Statement * body, std::list<Statement *> & initialization, bool isDoWhile = false ); 235 WhileStmt( const WhileStmt & other ); 236 virtual ~WhileStmt(); 235 WhileDoStmt( Expression * condition, Statement * body, const std::list<Statement *> & initialization, bool isDoWhile = false ); 236 WhileDoStmt( Expression * condition, Statement * body, Statement * else_, const std::list<Statement *> & initialization, bool isDoWhile = false ); 237 WhileDoStmt( const WhileDoStmt & other ); 238 virtual ~WhileDoStmt(); 237 239 238 240 Expression * get_condition() { return condition; } … … 243 245 void set_isDoWhile( bool newValue ) { isDoWhile = newValue; } 244 246 245 virtual While Stmt * clone() const override { return new WhileStmt( *this ); }247 virtual WhileDoStmt * clone() const override { return new WhileDoStmt( *this ); } 246 248 virtual void accept( Visitor & v ) override { v.visit( this ); } 247 249 virtual void accept( Visitor & v ) const override { v.visit( this ); } … … 256 258 Expression * increment; 257 259 Statement * body; 258 259 ForStmt( std::list<Statement *> initialization, Expression * condition = nullptr, Expression * increment = nullptr, Statement * body = nullptr ); 260 Statement * else_; 261 262 ForStmt( const std::list<Statement *> initialization, Expression * condition = nullptr, Expression * increment = nullptr, Statement * body = nullptr, Statement * else_ = nullptr ); 260 263 ForStmt( const ForStmt & other ); 261 264 virtual ~ForStmt(); … … 278 281 class BranchStmt : public Statement { 279 282 public: 280 enum Type { Goto = 0, Break, Continue, FallThrough, FallThroughDefault};283 enum Type { Goto, Break, Continue, FallThrough, FallThroughDefault, BranchStmts }; 281 284 282 285 // originalTarget kept for error messages. … … 357 360 FinallyStmt * finallyBlock; 358 361 359 TryStmt( CompoundStmt * tryBlock, std::list<CatchStmt *> & handlers, FinallyStmt * finallyBlock = nullptr );362 TryStmt( CompoundStmt * tryBlock, const std::list<CatchStmt *> & handlers, FinallyStmt * finallyBlock = nullptr ); 360 363 TryStmt( const TryStmt & other ); 361 364 virtual ~TryStmt(); … … 540 543 std::list<Expression *> mutexObjs; // list of mutex objects to acquire 541 544 542 MutexStmt( Statement * stmt, std::list<Expression *> mutexObjs );545 MutexStmt( Statement * stmt, const std::list<Expression *> mutexObjs ); 543 546 MutexStmt( const MutexStmt & other ); 544 547 virtual ~MutexStmt(); -
src/SynTree/SynTree.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:56:44 202113 // Update Count : 1 312 // Last Modified On : Tue Feb 1 09:22:33 2022 13 // Update Count : 14 14 14 // 15 15 … … 45 45 class DirectiveStmt; 46 46 class IfStmt; 47 class While Stmt;47 class WhileDoStmt; 48 48 class ForStmt; 49 49 class SwitchStmt; -
src/SynTree/Visitor.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:35 202113 // Update Count : 1 512 // Last Modified On : Tue Feb 1 09:26:57 2022 13 // Update Count : 17 14 14 // 15 15 … … 60 60 virtual void visit( IfStmt * node ) { visit( const_cast<const IfStmt *>(node) ); } 61 61 virtual void visit( const IfStmt * ifStmt ) = 0; 62 virtual void visit( While Stmt * node ) { visit( const_cast<const WhileStmt *>(node) ); }63 virtual void visit( const While Stmt * whileStmt ) = 0;62 virtual void visit( WhileDoStmt * node ) { visit( const_cast<const WhileDoStmt *>(node) ); } 63 virtual void visit( const WhileDoStmt * whileDoStmt ) = 0; 64 64 virtual void visit( ForStmt * node ) { visit( const_cast<const ForStmt *>(node) ); } 65 65 virtual void visit( const ForStmt * forStmt ) = 0; -
src/Validate/module.mk
r97c215f rf5a51db 16 16 17 17 SRC_VALIDATE = \ 18 Validate/Autogen.cpp \ 19 Validate/Autogen.hpp \ 18 20 Validate/CompoundLiteral.cpp \ 19 21 Validate/CompoundLiteral.hpp \ -
src/main.cc
r97c215f rf5a51db 10 10 // Created On : Fri May 15 23:12:02 2015 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Nov 30 10:25:00 202113 // Update Count : 6 5912 // Last Modified On : Wed Jan 26 14:09:00 2022 13 // Update Count : 670 14 14 // 15 15 … … 55 55 #include "ControlStruct/ExceptTranslate.h" // for translateEHM 56 56 #include "ControlStruct/FixLabels.hpp" // for fixLabels 57 #include "ControlStruct/HoistControlDecls.hpp" // hoistControlDecls 57 58 #include "ControlStruct/Mutate.h" // for mutate 58 59 #include "GenPoly/Box.h" // for box … … 73 74 #include "SynTree/Visitor.h" // for acceptAll 74 75 #include "Tuples/Tuples.h" // for expandMemberTuples, expan... 76 #include "Validate/Autogen.hpp" // for autogenerateRoutines 75 77 #include "Validate/FindSpecialDecls.h" // for findGlobalDecls 76 78 #include "Validate/CompoundLiteral.hpp" // for handleCompoundLiterals … … 78 80 #include "Validate/LabelAddressFixer.hpp" // for fixLabelAddresses 79 81 #include "Virtual/ExpandCasts.h" // for expandCasts 80 81 82 82 83 static void NewPass( const char * const name ) { … … 326 327 PASS( "Validate-B", SymTab::validate_B( translationUnit ) ); 327 328 PASS( "Validate-C", SymTab::validate_C( translationUnit ) ); 328 PASS( "Validate-D", SymTab::validate_D( translationUnit ) );329 329 330 330 CodeTools::fillLocations( translationUnit ); 331 331 332 332 if( useNewAST ) { 333 PASS( "Apply Concurrent Keywords", Concurrency::applyKeywords( translationUnit ) ); 334 PASS( "Forall Pointer Decay", SymTab::decayForallPointers( translationUnit ) ); 335 CodeTools::fillLocations( translationUnit ); 336 333 337 if (Stats::Counters::enabled) { 334 338 ast::pass_visitor_stats.avg = Stats::Counters::build<Stats::Counters::AverageCounter<double>>("Average Depth - New"); … … 338 342 339 343 forceFillCodeLocations( transUnit ); 344 345 // Must happen before autogen routines are added. 346 PASS( "Hoist Control Declarations", ControlStruct::hoistControlDecls( transUnit ) ); 347 348 // Must be after enum and pointer decay. 349 // Must be before compound literals. 350 PASS( "Generate Autogen Routines", Validate::autogenerateRoutines( transUnit ) ); 340 351 341 352 PASS( "Implement Mutex", Concurrency::implementMutex( transUnit ) ); … … 404 415 // Currently not working due to unresolved issues with UniqueExpr 405 416 PASS( "Expand Unique Expr", Tuples::expandUniqueExpr( transUnit ) ); // xxx - is this the right place for this? want to expand ASAP so tha, sequent passes don't need to worry about double-visiting a unique expr - needs to go after InitTweak::fix so that copy constructed return declarations are reused 417 418 PASS( "Translate Tries" , ControlStruct::translateTries( transUnit ) ); 419 406 420 translationUnit = convert( move( transUnit ) ); 407 421 } else { 422 PASS( "Validate-D", SymTab::validate_D( translationUnit ) ); 408 423 PASS( "Validate-E", SymTab::validate_E( translationUnit ) ); 409 424 PASS( "Validate-F", SymTab::validate_F( translationUnit ) ); … … 469 484 470 485 PASS( "Expand Unique Expr", Tuples::expandUniqueExpr( translationUnit ) ); // xxx - is this the right place for this? want to expand ASAP so tha, sequent passes don't need to worry about double-visiting a unique expr - needs to go after InitTweak::fix so that copy constructed return declarations are reused 486 487 PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) ); 471 488 } 472 489 473 PASS( "Translate Tries" , ControlStruct::translateTries( translationUnit ) );490 474 491 475 492 PASS( "Gen Waitfor" , Concurrency::generateWaitFor( translationUnit ) ); -
tests/concurrent/.expect/ctor-check.txt
r97c215f rf5a51db 2 2 ?{}: function 3 3 ... with parameters 4 lvalue reference to instance of struct Empty with body4 this: lvalue reference to instance of struct Empty with body 5 5 ... returning nothing 6 6 with body -
tests/concurrent/mutexstmt/.expect/locks.txt
r97c215f rf5a51db 3 3 Start Test: multi lock deadlock/mutual exclusion 4 4 End Test: multi lock deadlock/mutual exclusion 5 Start Test: single scoped lock mutual exclusion 6 End Test: single scoped lock mutual exclusion 7 Start Test: multi scoped lock deadlock/mutual exclusion 8 End Test: multi scoped lock deadlock/mutual exclusion -
tests/concurrent/mutexstmt/locks.cfa
r97c215f rf5a51db 59 59 } 60 60 61 thread T_Mutex_Scoped {}; 61 62 63 void main( T_Mutex_Scoped & this ) { 64 for (unsigned int i = 0; i < num_times; i++) { 65 { 66 scoped_lock(single_acquisition_lock) s{m1}; 67 count++; 68 } 69 { 70 scoped_lock(single_acquisition_lock) s{m1}; 71 assert(!insideFlag); 72 insideFlag = true; 73 assert(insideFlag); 74 insideFlag = false; 75 } 76 } 77 } 78 79 thread T_Multi_Scoped {}; 80 81 void main( T_Multi_Scoped & this ) { 82 for (unsigned int i = 0; i < num_times; i++) { 83 { 84 scoped_lock(single_acquisition_lock) s{m1}; 85 assert(!insideFlag); 86 insideFlag = true; 87 assert(insideFlag); 88 insideFlag = false; 89 } 90 { 91 scoped_lock(single_acquisition_lock) s1{m1}; 92 scoped_lock(single_acquisition_lock) s2{m2}; 93 scoped_lock(single_acquisition_lock) s3{m3}; 94 scoped_lock(single_acquisition_lock) s4{m4}; 95 scoped_lock(single_acquisition_lock) s5{m5}; 96 assert(!insideFlag); 97 insideFlag = true; 98 assert(insideFlag); 99 insideFlag = false; 100 } 101 { 102 scoped_lock(single_acquisition_lock) s1{m1}; 103 scoped_lock(single_acquisition_lock) s3{m3}; 104 assert(!insideFlag); 105 insideFlag = true; 106 assert(insideFlag); 107 insideFlag = false; 108 } 109 { 110 scoped_lock(single_acquisition_lock) s1{m1}; 111 scoped_lock(single_acquisition_lock) s2{m2}; 112 scoped_lock(single_acquisition_lock) s4{m4}; 113 assert(!insideFlag); 114 insideFlag = true; 115 assert(insideFlag); 116 insideFlag = false; 117 } 118 { 119 scoped_lock(single_acquisition_lock) s1{m1}; 120 scoped_lock(single_acquisition_lock) s3{m3}; 121 scoped_lock(single_acquisition_lock) s4{m4}; 122 scoped_lock(single_acquisition_lock) s5{m5}; 123 assert(!insideFlag); 124 insideFlag = true; 125 assert(insideFlag); 126 insideFlag = false; 127 } 128 } 129 } 130 131 int num_tasks = 10; 62 132 int main() { 63 133 processor p[10]; … … 67 137 T_Mutex t[10]; 68 138 } 139 assert(count == num_tasks * num_times); 69 140 printf("End Test: single lock mutual exclusion\n"); 70 141 printf("Start Test: multi lock deadlock/mutual exclusion\n"); … … 73 144 } 74 145 printf("End Test: multi lock deadlock/mutual exclusion\n"); 146 147 count = 0; 148 printf("Start Test: single scoped lock mutual exclusion\n"); 149 { 150 T_Mutex_Scoped t[10]; 151 } 152 assert(count == num_tasks * num_times); 153 printf("End Test: single scoped lock mutual exclusion\n"); 154 printf("Start Test: multi scoped lock deadlock/mutual exclusion\n"); 155 { 156 T_Multi_Scoped t[10]; 157 } 158 printf("End Test: multi scoped lock deadlock/mutual exclusion\n"); 75 159 } -
tests/concurrent/preempt.cfa
r97c215f rf5a51db 1 #include <clock.hfa> 2 #include <fstream.hfa> 1 3 #include <kernel.hfa> 2 4 #include <thread.hfa> … … 21 23 extern void __cfaabi_check_preemption(); 22 24 23 static volatile int counter = 0; 25 static struct { 26 volatile int counter; 27 volatile Time prev; 28 Duration durations[6]; 29 } globals; 24 30 25 31 thread worker_t { 26 32 int value; 33 unsigned long long spin; 27 34 }; 28 35 29 36 void ?{}( worker_t & this, int value ) { 30 37 this.value = value; 38 this.spin = 0; 31 39 } 32 40 33 41 void main(worker_t & this) { 34 while(TEST(counter < N)) { 42 while(TEST(globals.counter < N)) { 43 if(this.spin > 50_000_000_000) abort | "Worker" | this.value | "has been spinning too long! (" | this.spin | ")"; 35 44 __cfaabi_check_preemption(); 36 if( ( counter % 7) == this.value ) {45 if( (globals.counter % 7) == this.value ) { 37 46 __cfaabi_check_preemption(); 38 int next = __atomic_add_fetch( &counter, 1, __ATOMIC_SEQ_CST ); 47 #if !defined(TEST_LONG) 48 Time now = timeHiRes(); 49 Duration diff = now - globals.prev; 50 globals.prev = now; 51 #endif 52 int next = __atomic_add_fetch( &globals.counter, 1, __ATOMIC_SEQ_CST ); 39 53 __cfaabi_check_preemption(); 40 if( (next % 100) == 0 ) printf("%d\n", (int)next); 54 if( (next % 100) == 0 ) { 55 #if !defined(TEST_LONG) 56 unsigned idx = next / 100; 57 if (idx >= 6) abort | "Idx from next is invalid: " | idx | "vs" | next; 58 globals.durations[idx] = diff; 59 if(diff > 12`s) serr | "Duration suspiciously large:" | diff; 60 #endif 61 printf("%d\n", (int)next); 62 63 } 41 64 __cfaabi_check_preemption(); 65 this.spin = 0; 42 66 } 43 67 __cfaabi_check_preemption(); 44 68 KICK_WATCHDOG; 69 this.spin++; 45 70 } 46 71 } … … 48 73 int main(int argc, char* argv[]) { 49 74 processor p; 75 globals.counter = 0; 76 globals.durations[0] = 0; 77 globals.durations[1] = 0; 78 globals.durations[2] = 0; 79 globals.durations[3] = 0; 80 globals.durations[4] = 0; 81 globals.durations[5] = 0; 50 82 { 83 globals.prev = timeHiRes(); 51 84 worker_t w0 = 0; 52 85 worker_t w1 = 1; -
tests/include/.expect/includes.nast.txt
r97c215f rf5a51db 1 include/includes.cfa:15 4:25: warning: Compiled1 include/includes.cfa:153:25: warning: Compiled -
tests/include/includes.cfa
r97c215f rf5a51db 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 5 10:06:46 202113 // Update Count : 7 5112 // Last Modified On : Thu Feb 3 22:06:07 2022 13 // Update Count : 774 14 14 // 15 15 … … 18 18 #endif // __CFA__ 19 19 20 #if 1 20 21 //#define _GNU_SOURCE 21 22 #include <aio.h> … … 40 41 #include <errno.h> 41 42 #include <error.h> 42 //#include <eti.h> 43 //#include <eti.h> // may not be installed, comes with ncurses 43 44 #include <execinfo.h> 44 45 #include <expat.h> … … 49 50 #include <fmtmsg.h> 50 51 #include <fnmatch.h> 51 //#include <form.h> 52 //#include <form.h> // may not be installed, comes with ncurses 52 53 #include <fstab.h> 53 54 #include <fts.h> … … 77 78 #include <mcheck.h> 78 79 #include <memory.h> 79 //#include <menu.h> 80 //#include <menu.h> // may not be installed, comes with ncurses 80 81 #include <mntent.h> 81 82 #include <monetary.h> 82 83 #include <mqueue.h> 83 //#include <ncurses_dll.h> 84 //#include <ncurses_dll.h> // may not be installed, comes with ncurses 84 85 #include <netdb.h> 85 86 #include <nl_types.h> 86 87 #include <nss.h> 87 88 #include <obstack.h> 88 //#include <panel.h> 89 //#include <panel.h> // may not be installed, comes with ncurses 89 90 #include <paths.h> 90 91 #include <poll.h> … … 117 118 #include <syslog.h> 118 119 #include <tar.h> 119 //#include <term.h> 120 //#include <termcap.h> 120 //#include <term.h> // may not be installed, comes with ncurses 121 //#include <termcap.h> // may not be installed, comes with ncurses 121 122 #include <termio.h> 122 123 #include <termios.h> … … 130 131 #include <ucontext.h> 131 132 #include <ulimit.h> 132 //#include <unctrl.h> 133 //#include <unctrl.h> // may not be installed, comes with ncurses 133 134 #include <unistd.h> 134 135 #include <utime.h> … … 143 144 #include <wctype.h> 144 145 #include <wordexp.h> 145 146 #if 0147 146 #endif // 0 148 147 … … 151 150 #endif // __CFA__ 152 151 153 int main( int argc, char const * argv[] ) {154 #pragma GCC warning "Compiled" 152 int main( int argc, char const * argv[] ) { 153 #pragma GCC warning "Compiled" // force non-empty .expect file, NO TABS!!! 155 154 } 156 155 -
tools/auto-complete.md
r97c215f rf5a51db 32 32 33 33 ### Zsh 34 35 1 - Add the following somewhere: 36 #compdef test.py 37 38 _test_py() { 39 local -a options 40 options=$($words[1] --list-comp) 41 _alternative "files:filenames:($options)" 42 } 43 44 _test_py "$@" 45 46 2 - Add the path to that file to the "fpath" environment variable. 47 48 3 - In ~/.zshrc add 49 autoload -U compinit 50 compinit 51 52 *How it works:* I don't know ;P 53 54
Note:
See TracChangeset
for help on using the changeset viewer.