Changeset f5a51db
- Timestamp:
- Feb 8, 2022, 11:53:13 AM (20 months ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- cc7bbe6
- Parents:
- 97c215f (diff), 1cf8a9f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 8 added
- 75 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/http/Makefile.am
r97c215f rf5a51db 21 21 include $(top_srcdir)/tools/build/cfa.make 22 22 23 AM_CFLAGS = -O 2-Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror23 AM_CFLAGS = -O3 -Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror 24 24 AM_CFAFLAGS = -quiet -nodebug 25 25 AM_LDFLAGS = -quiet -nodebug -
benchmark/io/http/main.cfa
r97c215f rf5a51db 1 #define _ _USE_GNU1 #define _GNU_SOURCE 2 2 3 3 #include <errno.h> … … 6 6 #include <unistd.h> 7 7 extern "C" { 8 #include <sched.h> 8 9 #include <signal.h> 9 10 #include <sys/socket.h> … … 67 68 (this.self){ "Server Cluster", options.clopts.params }; 68 69 70 cpu_set_t fullset; 71 CPU_ZERO(&fullset); 72 int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset); 73 if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno ); 74 int cnt = CPU_COUNT(&fullset); 75 69 76 this.procs = alloc(options.clopts.nprocs); 70 77 for(i; options.clopts.nprocs) { 71 78 (this.procs[i]){ "Benchmark Processor", this.self }; 79 80 int c = 0; 81 int n = 1 + (i % cnt); 82 for(int j = 0; j < CPU_SETSIZE; j++) { 83 if(CPU_ISSET(j, &fullset)) n--; 84 if(n == 0) { 85 c = j; 86 break; 87 } 88 } 89 cpu_set_t localset; 90 CPU_ZERO(&localset); 91 CPU_SET(c, &localset); 92 ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset); 93 if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret ); 72 94 73 95 #if !defined(__CFA_NO_STATISTICS__) … … 146 168 int waited = 0; 147 169 for() { 148 ret = bind( server_fd, (struct sockaddr *)&address, sizeof(address) ); 170 int sockfd = server_fd; 171 __CONST_SOCKADDR_ARG addr; 172 addr.__sockaddr__ = (struct sockaddr *)&address; 173 socklen_t addrlen = sizeof(address); 174 ret = bind( sockfd, addr, addrlen ); 149 175 if(ret < 0) { 150 176 if(errno == EADDRINUSE) { -
doc/theses/thierry_delisle_PhD/thesis/Makefile
r97c215f rf5a51db 48 48 ## Define the documents that need to be made. 49 49 all: thesis.pdf 50 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib 50 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty 51 51 52 52 DOCUMENT = thesis.pdf -
doc/theses/thierry_delisle_PhD/thesis/fig/base.fig
r97c215f rf5a51db 12 12 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6900 4200 20 20 6900 4200 6920 4200 13 13 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6975 4200 20 20 6975 4200 6995 4200 14 -6 15 6 6375 5100 6675 5250 16 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6450 5175 20 20 6450 5175 6470 5175 17 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6525 5175 20 20 6525 5175 6545 5175 18 1 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6600 5175 20 20 6600 5175 6620 5175 14 19 -6 15 20 1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 2400 300 300 3900 2400 4200 2400 … … 75 80 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 76 81 2400 2475 3000 2475 82 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 83 3300 5210 3150 4950 2850 4950 2700 5210 2850 5470 3150 5470 84 3300 5210 85 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 86 4500 5210 4350 4950 4050 4950 3900 5210 4050 5470 4350 5470 87 4500 5210 88 2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7 89 5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470 90 5700 5210 77 91 4 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001 78 92 4 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001 … … 82 96 4 1 -1 50 -1 0 11 0.0000 2 135 180 2700 3550 TS\001 83 97 4 1 -1 50 -1 0 11 0.0000 2 135 180 2700 2650 TS\001 98 4 2 -1 50 -1 0 12 0.0000 2 135 900 2100 5175 Processors\001 -
doc/theses/thierry_delisle_PhD/thesis/glossary.tex
r97c215f rf5a51db 40 40 41 41 \textit{Synonyms : User threads, Lightweight threads, Green threads, Virtual threads, Tasks.} 42 } 43 44 \longnewglossaryentry{rmr} 45 {name={remote memory reference}} 46 { 47 42 48 } 43 49 -
doc/theses/thierry_delisle_PhD/thesis/text/core.tex
r97c215f rf5a51db 49 49 50 50 \section{Design} 51 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.51 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering. 52 52 53 \subsection{Sharding} \label{sec:sharding} 54 An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again. 53 Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis. 55 54 55 \subsection{Work-Stealing} 56 57 As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work. 58 The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention. 59 On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work. 60 This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work. 61 Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation. 62 63 64 Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises. 65 66 \subsection{Relaxed-Fifo} 67 An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from. 68 \Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element. 69 The result is a queue that has both decent scalability and sufficient fairness. 70 The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element. 71 This contrasts with work-stealing, where \emph{any} \gls{proc} busy for an extended period of time results in all the elements on its local queue to have to wait. Unless another \gls{proc} runs out of work. 72 73 An important aspects of this scheme's fairness approach is that the timestamps make it possible to evaluate how long elements have been on the queue. 74 However, another major aspect is that \glspl{proc} will eagerly search for these older elements instead of focusing on specific queues. 75 76 While the fairness, of this scheme is good, it does suffer in terms of performance. 77 It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult. 78 79 \section{\CFA} 80 The \CFA is effectively attempting to merge these two approaches, keeping the best of both. 81 It is based on the 56 82 \begin{figure} 57 83 \centering 58 84 \input{base.pstex_t} 59 \caption[ Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}85 \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.} 60 86 \label{fig:base} 61 87 \end{figure} 62 88 63 \subsection{Finding threads}64 Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.65 89 66 \begin{figure}67 \centering68 \input{empty.pstex_t}69 \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}70 \label{fig:empty}71 \end{figure}72 90 73 There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information: 91 % The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance. 74 92 75 \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done. 93 % \subsection{Sharding} \label{sec:sharding} 94 % An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again. 76 95 77 \begin{figure} 78 \centering 79 \vspace*{-5pt} 80 {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}} 81 \vspace*{-5pt} 82 \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.} 83 \label{fig:emptybit} 96 % \begin{figure} 97 % \centering 98 % \input{base.pstex_t} 99 % \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.} 100 % \label{fig:base} 101 % \end{figure} 84 102 85 \vspace*{10pt} 86 {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}} 87 \vspace*{-5pt} 88 \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.} 89 \label{fig:emptytree} 103 % \subsection{Finding threads} 104 % Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses. 90 105 91 \vspace*{10pt}92 {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}} 93 \vspace*{-5pt}94 \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}95 \label{fig:emptytls}96 \end{figure}106 % \begin{figure} 107 % \centering 108 % \input{empty.pstex_t} 109 % \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.} 110 % \label{fig:empty} 111 % \end{figure} 97 112 98 \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow. 113 % There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information: 99 114 100 \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.115 % \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done. 101 116 102 I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach. 117 % \begin{figure} 118 % \centering 119 % \vspace*{-5pt} 120 % {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}} 121 % \vspace*{-5pt} 122 % \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.} 123 % \label{fig:emptybit} 103 124 104 \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 105 In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later. 125 % \vspace*{10pt} 126 % {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}} 127 % \vspace*{-5pt} 128 % \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.} 129 % \label{fig:emptytree} 106 130 107 To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards. Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached. 131 % \vspace*{10pt} 132 % {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}} 133 % \vspace*{-5pt} 134 % \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.} 135 % \label{fig:emptytls} 136 % \end{figure} 108 137 109 The algorithm works as follows: 110 \begin{itemize} 111 \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$. 112 \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions: 113 \begin{itemize} 114 \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$. 115 \item Pop operations use $B$ going backwards on each try. 116 \end{itemize} 117 \end{itemize} 138 % \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow. 118 139 119 The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.140 % \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries. 120 141 121 \section{Details} 142 % I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach. 143 144 % \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/} 145 % In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later. 146 147 % To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards. Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached. 148 149 % The algorithm works as follows: 150 % \begin{itemize} 151 % \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$. 152 % \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions: 153 % \begin{itemize} 154 % \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$. 155 % \item Pop operations use $B$ going backwards on each try. 156 % \end{itemize} 157 % \end{itemize} 158 159 % The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm. 160 161 % \section{Details} -
doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
r97c215f rf5a51db 4 4 5 5 In Memory Plain Text 6 7 Networked Plain Text8 6 9 7 Networked ZIPF -
doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
r97c215f rf5a51db 2 2 3 3 The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly. 4 This sections presents f ourdifferent experimental setup, evaluating some of the basic features of \CFA's scheduler.4 This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler. 5 5 6 6 \section{Cycling latency} 7 7 The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue. 8 While these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.9 However, yielding can be treated as a special case, since it also carries the information that the length of the ready queuewill not change.8 Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark. 9 However, yielding can be treated as a special case, since it also carries the information that the number of the ready \glspl{at} will not change. 10 10 Not all systems use this information, but those which do may appear to have better performance than they would for disconnected push/pop pairs. 11 11 For this reason, I chose a different first benchmark, which I call the Cycle Benchmark. 12 This benchmark arranges many threads into multiple rings of threads.12 This benchmark arranges many \glspl{at} into multiple rings of \glspl{at}. 13 13 Each ring is effectively a circular singly-linked list. 14 At runtime, each thread unparks the next threadbefore parking itself.14 At runtime, each \gls{at} unparks the next \gls{at} before parking itself. 15 15 This corresponds to the desired pair of ready queue operations. 16 Unparking the next thread requires pushing that thread onto the ready queue and the ensuing park will cause the runtime to pop a threadfrom the ready-queue.16 Unparking the next \gls{at} requires pushing that \gls{at} onto the ready queue and the ensuing park will cause the runtime to pop a \gls{at} from the ready-queue. 17 17 Figure~\ref{fig:cycle} shows a visual representation of this arrangement. 18 18 19 The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready threads will stay constant over the duration of the experiment. 20 In fact, the total number of threads waiting on the ready is expected to vary a little because of the race between the next thread unparking and the current thread parking. 21 The size of the cycle is also decided based on this race: cycles that are too small may see the 22 chain of unparks go full circle before the first thread can park. 19 The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready \glspl{at} will stay constant over the duration of the experiment. 20 In fact, the total number of \glspl{at} waiting on the ready queue is expected to vary because of the race between the next \gls{at} unparking and the current \gls{at} parking. 21 The size of the cycle is also decided based on this race: cycles that are too small may see the chain of unparks go full circle before the first \gls{at} can park. 23 22 While this would not be a correctness problem, every runtime system must handle that race, it could lead to pushes and pops being optimized away. 24 Since silently omitting ready-queue operations would throw off the measuring of these operations. 25 Therefore the ring of threads must be big enough so the threads have the time to fully park before they are unparked. 23 Since silently omitting ready-queue operations would throw off the measuring of these operations, the ring of \glspl{at} must be big enough so the \glspl{at} have the time to fully park before they are unparked. 26 24 Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system. 27 25 … … 29 27 \centering 30 28 \input{cycle.pstex_t} 31 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each thread unparks the next threadin the cycle before parking itself.}29 \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.} 32 30 \label{fig:cycle} 33 31 \end{figure} 34 32 35 33 \todo{check term ``idle sleep handling''} 36 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of processorsavailable.34 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available. 37 35 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling. 38 This is to avoid the case where one of the worker threads runs out of work because of the variation on the number of ready threadsmentionned above.36 This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above. 39 37 40 38 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition. 41 39 42 \todo{mention where to get the code.} 40 \todo{code, setup, results} 41 \begin{lstlisting} 42 Thread.main() { 43 count := 0 44 for { 45 wait() 46 this.next.wake() 47 count ++ 48 if must_stop() { break } 49 } 50 global.count += count 51 } 52 \end{lstlisting} 53 43 54 44 55 \section{Yield} 45 56 For completion, I also include the yield benchmark. 46 This benchmark is much simpler than the cycle tests, it simply creates many threads that call \texttt{yield}. 57 This benchmark is much simpler than the cycle tests, it simply creates many \glspl{at} that call \texttt{yield}. 58 As mentionned in the previous section, this benchmark may be less representative of usages that only make limited use of \texttt{yield}, due to potential shortcuts in the routine. 59 Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty. 60 This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done. 61 62 \todo{code, setup, results} 63 64 \begin{lstlisting} 65 Thread.main() { 66 count := 0 67 while !stop { 68 yield() 69 count ++ 70 } 71 global.count += count 72 } 73 \end{lstlisting} 74 75 76 \section{Churn} 77 The Cycle and Yield benchmark represents an ``easy'' scenario for a scheduler, \eg, an embarrassingly parallel application. 78 In these benchmarks, \glspl{at} can be easily partitioned over the different \glspl{proc} up-front and none of the \glspl{at} communicate with each other. 79 80 The Churn benchmark represents more chaotic usages, where there is no relation between the last \gls{proc} on which a \gls{at} ran and the \gls{proc} that unblocked it. 81 When a \gls{at} is unblocked from a different \gls{proc} than the one on which it last ran, the unblocking \gls{proc} must either ``steal'' the \gls{at} or place it on a remote queue. 82 This results can result in either contention on the remote queue or \glspl{rmr} on \gls{at} data structure. 83 In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly. 84 85 To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}. 86 When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any. 87 This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves. 88 For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}. 89 90 \todo{code, setup, results} 91 \begin{lstlisting} 92 Thread.main() { 93 count := 0 94 for { 95 r := random() % len(spots) 96 next := xchg(spots[r], this) 97 if next { next.wake() } 98 wait() 99 count ++ 100 if must_stop() { break } 101 } 102 global.count += count 103 } 104 \end{lstlisting} 47 105 48 106 \section{Locality} 49 107 108 \todo{code, setup, results} 109 50 110 \section{Transfer} 111 The last benchmark is more exactly characterize as an experiment than a benchmark. 112 It tests the behavior of the schedulers for a particularly misbehaved workload. 113 In this workload, one of the \gls{at} is selected at random to be the leader. 114 The leader then spins in a tight loop until it has observed that all other \glspl{at} have acknowledged its leadership. 115 The leader \gls{at} then picks a new \gls{at} to be the ``spinner'' and the cycle repeats. 116 117 The benchmark comes in two flavours for the behavior of the non-leader \glspl{at}: 118 once they acknowledged the leader, they either block on a semaphore or yield repeatadly. 119 120 This experiment is designed to evaluate the short term load balancing of the scheduler. 121 Indeed, schedulers where the runnable \glspl{at} are partitioned on the \glspl{proc} may need to balance the \glspl{at} for this experient to terminate. 122 This is because the spinning \gls{at} is effectively preventing the \gls{proc} from runnning any other \glspl{thrd}. 123 In the semaphore flavour, the number of runnable \glspl{at} will eventually dwindle down to only the leader. 124 This is a simpler case to handle for schedulers since \glspl{proc} eventually run out of work. 125 In the yielding flavour, the number of runnable \glspl{at} stays constant. 126 This is a harder case to handle because corrective measures must be taken even if work is still available. 127 Note that languages that have mandatory preemption do circumvent this problem by forcing the spinner to yield. 128 129 \todo{code, setup, results} 130 \begin{lstlisting} 131 Thread.lead() { 132 this.idx_seen = ++lead_idx 133 if lead_idx > stop_idx { 134 done := true 135 return 136 } 137 138 // Wait for everyone to acknowledge my leadership 139 start: = timeNow() 140 for t in threads { 141 while t.idx_seen != lead_idx { 142 asm pause 143 if (timeNow() - start) > 5 seconds { error() } 144 } 145 } 146 147 // pick next leader 148 leader := threads[ prng() % len(threads) ] 149 150 // wake every one 151 if !exhaust { 152 for t in threads { 153 if t != me { t.wake() } 154 } 155 } 156 } 157 158 Thread.wait() { 159 this.idx_seen := lead_idx 160 if exhaust { wait() } 161 else { yield() } 162 } 163 164 Thread.main() { 165 while !done { 166 if leader == me { this.lead() } 167 else { this.wait() } 168 } 169 } 170 \end{lstlisting} -
doc/theses/thierry_delisle_PhD/thesis/text/existing.tex
r97c215f rf5a51db 33 33 34 34 35 \section{Work Stealing} 35 \section{Work Stealing}\label{existing:workstealing} 36 36 One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations. 37 37 -
libcfa/src/concurrency/io.cfa
r97c215f rf5a51db 306 306 ctx->proc->io.pending = true; 307 307 ctx->proc->io.dirty = true; 308 if(sq.to_submit > 30 || !lazy) { 308 if(sq.to_submit > 30) { 309 __tls_stats()->io.flush.full++; 310 __cfa_io_flush( ctx->proc, 0 ); 311 } 312 if(!lazy) { 313 __tls_stats()->io.flush.eager++; 309 314 __cfa_io_flush( ctx->proc, 0 ); 310 315 } -
libcfa/src/concurrency/kernel.cfa
r97c215f rf5a51db 42 42 43 43 #if !defined(__CFA_NO_STATISTICS__) 44 #define __STATS ( ...) __VA_ARGS__44 #define __STATS_DEF( ...) __VA_ARGS__ 45 45 #else 46 #define __STATS ( ...)46 #define __STATS_DEF( ...) 47 47 #endif 48 48 … … 122 122 static thread$ * __next_thread(cluster * this); 123 123 static thread$ * __next_thread_slow(cluster * this); 124 static thread$ * __next_thread_search(cluster * this); 124 125 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 125 126 static void __run_thread(processor * this, thread$ * dst); … … 187 188 MAIN_LOOP: 188 189 for() { 189 #define OLD_MAIN 1190 #if OLD_MAIN191 190 // Check if there is pending io 192 191 __maybe_io_drain( this ); … … 196 195 197 196 if( !readyThread ) { 197 __IO_STATS__(true, io.flush.idle++; ) 198 198 __cfa_io_flush( this, 0 ); 199 199 200 readyThread = __next_thread( this->cltr ); 201 } 202 203 if( !readyThread ) for(5) { 204 __IO_STATS__(true, io.flush.idle++; ) 205 200 206 readyThread = __next_thread_slow( this->cltr ); 207 208 if( readyThread ) break; 209 210 __cfa_io_flush( this, 0 ); 201 211 } 202 212 … … 206 216 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 207 217 208 #if !defined(__CFA_NO_STATISTICS__)209 __tls_stats()->ready.sleep.halts++;210 #endif211 212 218 // Push self to idle stack 213 219 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 214 220 215 221 // Confirm the ready-queue is empty 216 readyThread = __next_thread_s low( this->cltr );222 readyThread = __next_thread_search( this->cltr ); 217 223 if( readyThread ) { 218 224 // A thread was found, cancel the halt 219 225 mark_awake(this->cltr->procs, * this); 220 226 221 #if !defined(__CFA_NO_STATISTICS__) 222 __tls_stats()->ready.sleep.cancels++; 223 #endif 227 __STATS__(true, ready.sleep.cancels++; ) 224 228 225 229 // continue the mai loop … … 248 252 249 253 if(this->io.pending && !this->io.dirty) { 254 __IO_STATS__(true, io.flush.dirty++; ) 250 255 __cfa_io_flush( this, 0 ); 251 256 } 252 253 #else254 #warning new kernel loop255 SEARCH: {256 /* paranoid */ verify( ! __preemption_enabled() );257 258 // First, lock the scheduler since we are searching for a thread259 ready_schedule_lock();260 261 // Try to get the next thread262 readyThread = pop_fast( this->cltr );263 if(readyThread) { ready_schedule_unlock(); break SEARCH; }264 265 // If we can't find a thread, might as well flush any outstanding I/O266 if(this->io.pending) { __cfa_io_flush( this, 0 ); }267 268 // Spin a little on I/O, just in case269 for(5) {270 __maybe_io_drain( this );271 readyThread = pop_fast( this->cltr );272 if(readyThread) { ready_schedule_unlock(); break SEARCH; }273 }274 275 // no luck, try stealing a few times276 for(5) {277 if( __maybe_io_drain( this ) ) {278 readyThread = pop_fast( this->cltr );279 } else {280 readyThread = pop_slow( this->cltr );281 }282 if(readyThread) { ready_schedule_unlock(); break SEARCH; }283 }284 285 // still no luck, search for a thread286 readyThread = pop_search( this->cltr );287 if(readyThread) { ready_schedule_unlock(); break SEARCH; }288 289 // Don't block if we are done290 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {291 ready_schedule_unlock();292 break MAIN_LOOP;293 }294 295 __STATS( __tls_stats()->ready.sleep.halts++; )296 297 // Push self to idle stack298 ready_schedule_unlock();299 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;300 ready_schedule_lock();301 302 // Confirm the ready-queue is empty303 __maybe_io_drain( this );304 readyThread = pop_search( this->cltr );305 ready_schedule_unlock();306 307 if( readyThread ) {308 // A thread was found, cancel the halt309 mark_awake(this->cltr->procs, * this);310 311 __STATS( __tls_stats()->ready.sleep.cancels++; )312 313 // continue the main loop314 break SEARCH;315 }316 317 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )318 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);319 320 {321 eventfd_t val;322 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );323 if(ret < 0) {324 switch((int)errno) {325 case EAGAIN:326 #if EAGAIN != EWOULDBLOCK327 case EWOULDBLOCK:328 #endif329 case EINTR:330 // No need to do anything special here, just assume it's a legitimate wake-up331 break;332 default:333 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );334 }335 }336 }337 338 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )339 340 // We were woken up, remove self from idle341 mark_awake(this->cltr->procs, * this);342 343 // DON'T just proceed, start looking again344 continue MAIN_LOOP;345 }346 347 RUN_THREAD:348 /* paranoid */ verify( ! __preemption_enabled() );349 /* paranoid */ verify( readyThread );350 351 // Reset io dirty bit352 this->io.dirty = false;353 354 // We found a thread run it355 __run_thread(this, readyThread);356 357 // Are we done?358 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;359 360 if(this->io.pending && !this->io.dirty) {361 __cfa_io_flush( this, 0 );362 }363 364 ready_schedule_lock();365 __maybe_io_drain( this );366 ready_schedule_unlock();367 #endif368 257 } 369 258 … … 476 365 break RUNNING; 477 366 case TICKET_UNBLOCK: 478 #if !defined(__CFA_NO_STATISTICS__) 479 __tls_stats()->ready.threads.threads++; 480 #endif 367 __STATS__(true, ready.threads.threads++; ) 481 368 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 482 369 // In this case, just run it again. … … 493 380 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); 494 381 495 #if !defined(__CFA_NO_STATISTICS__) 496 __tls_stats()->ready.threads.threads--; 497 #endif 382 __STATS__(true, ready.threads.threads--; ) 498 383 499 384 /* paranoid */ verify( ! __preemption_enabled() ); … … 506 391 thread$ * thrd_src = kernelTLS().this_thread; 507 392 508 __STATS ( thrd_src->last_proc = kernelTLS().this_processor; )393 __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; ) 509 394 510 395 // Run the thread on this processor … … 558 443 // Dereference the thread now because once we push it, there is not guaranteed it's still valid. 559 444 struct cluster * cl = thrd->curr_cluster; 560 __STATS (bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )445 __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 561 446 562 447 // push the thread to the cluster ready-queue … … 609 494 610 495 ready_schedule_lock(); 611 thread$ * thrd; 612 for(25) { 613 thrd = pop_slow( this ); 614 if(thrd) goto RET; 615 } 616 thrd = pop_search( this ); 617 618 RET: 496 thread$ * thrd = pop_slow( this ); 497 ready_schedule_unlock(); 498 499 /* paranoid */ verify( ! __preemption_enabled() ); 500 return thrd; 501 } 502 503 // KERNEL ONLY 504 static inline thread$ * __next_thread_search(cluster * this) with( *this ) { 505 /* paranoid */ verify( ! __preemption_enabled() ); 506 507 ready_schedule_lock(); 508 thread$ * thrd = pop_search( this ); 619 509 ready_schedule_unlock(); 620 510 … … 732 622 // Wake a thread from the front if there are any 733 623 static void __wake_one(cluster * this) { 624 eventfd_t val; 625 734 626 /* paranoid */ verify( ! __preemption_enabled() ); 735 627 /* paranoid */ verify( ready_schedule_islocked() ); 736 628 737 629 // Check if there is a sleeping processor 738 // int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 739 int fd = 0; 740 if( __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST) != 0 ) { 741 fd = __atomic_exchange_n(&this->procs.fd, 0, __ATOMIC_RELAXED); 742 } 743 744 // If no one is sleeping, we are done 745 if( fd == 0 ) return; 746 747 // We found a processor, wake it up 748 eventfd_t val; 749 val = 1; 750 eventfd_write( fd, val ); 751 752 #if !defined(__CFA_NO_STATISTICS__) 753 if( kernelTLS().this_stats ) { 754 __tls_stats()->ready.sleep.wakes++; 755 } 756 else { 757 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); 758 } 759 #endif 630 struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST); 631 632 // If no one is sleeping: we are done 633 if( fdp == 0p ) return; 634 635 int fd = 1; 636 if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) { 637 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED); 638 } 639 640 switch(fd) { 641 case 0: 642 // If the processor isn't ready to sleep then the exchange will already wake it up 643 #if !defined(__CFA_NO_STATISTICS__) 644 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 645 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 646 #endif 647 break; 648 case 1: 649 // If someone else already said they will wake them: we are done 650 #if !defined(__CFA_NO_STATISTICS__) 651 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 652 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 653 #endif 654 break; 655 default: 656 // If the processor was ready to sleep, we need to wake it up with an actual write 657 val = 1; 658 eventfd_write( fd, val ); 659 660 #if !defined(__CFA_NO_STATISTICS__) 661 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 662 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 663 #endif 664 break; 665 } 760 666 761 667 /* paranoid */ verify( ready_schedule_islocked() ); … … 770 676 771 677 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 678 679 this->idle_wctx.fd = 1; 772 680 773 681 eventfd_t val; … … 779 687 780 688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) { 689 // Tell everyone we are ready to go do sleep 690 for() { 691 int expected = this->idle_wctx.fd; 692 693 // Someone already told us to wake-up! No time for a nap. 694 if(expected == 1) { return; } 695 696 // Try to mark that we are going to sleep 697 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 698 // Every one agreed, taking a nap 699 break; 700 } 701 } 702 703 781 704 #if !defined(CFA_WITH_IO_URING_IDLE) 782 705 #if !defined(__CFA_NO_STATISTICS__) … … 825 748 826 749 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 750 __STATS__(true, ready.sleep.halts++; ) 751 752 proc.idle_wctx.fd = 0; 753 827 754 /* paranoid */ verify( ! __preemption_enabled() ); 828 755 if(!try_lock( this )) return false; … … 832 759 insert_first(this.idles, proc); 833 760 834 __atomic_store_n(&this.fd , proc.idle_fd, __ATOMIC_SEQ_CST);761 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); 835 762 unlock( this ); 836 763 /* paranoid */ verify( ! __preemption_enabled() ); … … 848 775 849 776 { 850 int fd= 0;851 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd;852 __atomic_store_n(&this.fd , fd, __ATOMIC_SEQ_CST);777 struct __fd_waitctx * wctx = 0; 778 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; 779 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 853 780 } 854 781 … … 914 841 unsigned tail = *ctx->cq.tail; 915 842 if(head == tail) return false; 916 #if OLD_MAIN 917 ready_schedule_lock(); 918 ret = __cfa_io_drain( proc ); 919 ready_schedule_unlock(); 920 #else 921 ret = __cfa_io_drain( proc ); 922 #endif 843 ready_schedule_lock(); 844 ret = __cfa_io_drain( proc ); 845 ready_schedule_unlock(); 923 846 #endif 924 847 return ret; -
libcfa/src/concurrency/kernel.hfa
r97c215f rf5a51db 53 53 coroutine processorCtx_t { 54 54 struct processor * proc; 55 }; 56 57 58 struct __fd_waitctx { 59 volatile int fd; 55 60 }; 56 61 … … 101 106 int idle_fd; 102 107 108 // Idle waitctx 109 struct __fd_waitctx idle_wctx; 110 103 111 // Termination synchronisation (user semaphore) 104 112 oneshot terminated; … … 207 215 208 216 // FD to use to wake a processor 209 volatile int fd;217 struct __fd_waitctx * volatile fdw; 210 218 211 219 // Total number of processors -
libcfa/src/concurrency/kernel/fwd.hfa
r97c215f rf5a51db 396 396 if( !(in_kernel) ) enable_interrupts(); \ 397 397 } 398 #if defined(CFA_HAVE_LINUX_IO_URING_H) 399 #define __IO_STATS__(in_kernel, ...) { \ 400 if( !(in_kernel) ) disable_interrupts(); \ 401 with( *__tls_stats() ) { \ 402 __VA_ARGS__ \ 403 } \ 404 if( !(in_kernel) ) enable_interrupts(); \ 405 } 406 #else 407 #define __IO_STATS__(in_kernel, ...) 408 #endif 398 409 #else 399 410 #define __STATS__(in_kernel, ...) 411 #define __IO_STATS__(in_kernel, ...) 400 412 #endif 401 413 } -
libcfa/src/concurrency/kernel/startup.cfa
r97c215f rf5a51db 537 537 } 538 538 539 this.idle_wctx.fd = 0; 540 541 // I'm assuming these two are reserved for standard input and output 542 // so I'm using them as sentinels with idle_wctx. 543 /* paranoid */ verify( this.idle_fd != 0 ); 544 /* paranoid */ verify( this.idle_fd != 1 ); 545 539 546 #if !defined(__CFA_NO_STATISTICS__) 540 547 print_stats = 0; … … 590 597 // Cluster 591 598 static void ?{}(__cluster_proc_list & this) { 592 this.fd = 0;599 this.fdw = 0p; 593 600 this.idle = 0; 594 601 this.total = 0; -
libcfa/src/concurrency/mutex_stmt.hfa
r97c215f rf5a51db 38 38 } 39 39 40 struct scoped_lock { 41 L * internal_lock; 42 }; 43 44 static inline void ?{}( scoped_lock(L) & this, L & internal_lock ) { 45 this.internal_lock = &internal_lock; 46 lock(internal_lock); 47 } 48 49 static inline void ^?{}( scoped_lock(L) & this ) with(this) { 50 unlock(*internal_lock); 51 } 52 40 53 static inline L * __get_ptr( L & this ) { 41 54 return &this; -
libcfa/src/concurrency/preemption.cfa
r97c215f rf5a51db 251 251 bool enabled = __cfaabi_tls.preemption_state.enabled; 252 252 253 // Check if there is a pending preemption 254 processor * proc = __cfaabi_tls.this_processor; 255 bool pending = proc ? proc->pending_preemption : false; 256 if( enabled && pending ) proc->pending_preemption = false; 257 253 258 // create a assembler label after 254 259 // marked as clobber all to avoid movement 255 260 __cfaasm_label(check, after); 261 262 // If we can preempt and there is a pending one 263 // this is a good time to yield 264 if( enabled && pending ) { 265 force_yield( __POLL_PREEMPTION ); 266 } 256 267 return enabled; 257 268 } … … 282 293 // marked as clobber all to avoid movement 283 294 __cfaasm_label(get, after); 295 296 // This is used everywhere, to avoid cost, we DO NOT poll pending preemption 284 297 return val; 285 298 } … … 358 371 if(!ready) { abort("Preemption should be ready"); } 359 372 360 __cfaasm_label(debug, before); 361 362 sigset_t oldset; 363 int ret; 364 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 365 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 366 367 ret = sigismember(&oldset, SIGUSR1); 368 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 369 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 370 371 ret = sigismember(&oldset, SIGALRM); 372 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 373 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 374 375 ret = sigismember(&oldset, SIGTERM); 376 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 377 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 378 379 __cfaasm_label(debug, after); 373 sigset_t oldset; 374 int ret; 375 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 376 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 377 378 ret = sigismember(&oldset, SIGUSR1); 379 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 380 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 381 382 ret = sigismember(&oldset, SIGALRM); 383 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 384 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 385 386 ret = sigismember(&oldset, SIGTERM); 387 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 388 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 380 389 } 381 390 … … 548 557 __cfaasm_label( check ); 549 558 __cfaasm_label( dsable ); 550 __cfaasm_label( debug );559 // __cfaasm_label( debug ); 551 560 552 561 // Check if preemption is safe … … 555 564 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 556 565 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 557 if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; };566 // if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 558 567 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 559 568 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; … … 661 670 662 671 // Check if it is safe to preempt here 663 if( !preemption_ready( ip ) ) { return; } 672 if( !preemption_ready( ip ) ) { 673 #if !defined(__CFA_NO_STATISTICS__) 674 __cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++; 675 #endif 676 return; 677 } 664 678 665 679 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); … … 680 694 681 695 // Preemption can occur here 696 697 #if !defined(__CFA_NO_STATISTICS__) 698 __cfaabi_tls.this_stats->ready.threads.preempt.yield++; 699 #endif 682 700 683 701 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch -
libcfa/src/concurrency/ready_queue.cfa
r97c215f rf5a51db 201 201 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 202 202 /* paranoid */ verify( ! __preemption_enabled() ); 203 /* paranoid */ verify( ! kernelTLS().sched_lock );204 203 205 204 // Step 1 : lock global lock … … 207 206 // to simply lock their own lock and enter. 208 207 __atomic_acquire( &write_lock ); 208 209 // Make sure we won't deadlock ourself 210 // Checking before acquiring the writer lock isn't safe 211 // because someone else could have locked us. 212 /* paranoid */ verify( ! kernelTLS().sched_lock ); 209 213 210 214 // Step 2 : lock per-proc lock -
libcfa/src/concurrency/stats.cfa
r97c215f rf5a51db 29 29 stats->ready.threads.threads = 0; 30 30 stats->ready.threads.cthreads = 0; 31 stats->ready.threads.preempt.yield = 0; 32 stats->ready.threads.preempt.rllfwd = 0; 31 33 stats->ready.sleep.halts = 0; 32 34 stats->ready.sleep.cancels = 0; 35 stats->ready.sleep.early = 0; 33 36 stats->ready.sleep.wakes = 0; 37 stats->ready.sleep.seen = 0; 34 38 stats->ready.sleep.exits = 0; 35 39 … … 43 47 stats->io.submit.slow = 0; 44 48 stats->io.flush.external = 0; 49 stats->io.flush.dirty = 0; 50 stats->io.flush.full = 0; 51 stats->io.flush.idle = 0; 52 stats->io.flush.eager = 0; 45 53 stats->io.calls.flush = 0; 46 54 stats->io.calls.submitted = 0; … … 71 79 72 80 void __tally_stats( struct __stats_t * cltr, struct __stats_t * proc ) { 73 tally_one( &cltr->ready.push.local.attempt, &proc->ready.push.local.attempt ); 74 tally_one( &cltr->ready.push.local.success, &proc->ready.push.local.success ); 75 tally_one( &cltr->ready.push.share.attempt, &proc->ready.push.share.attempt ); 76 tally_one( &cltr->ready.push.share.success, &proc->ready.push.share.success ); 77 tally_one( &cltr->ready.push.extrn.attempt, &proc->ready.push.extrn.attempt ); 78 tally_one( &cltr->ready.push.extrn.success, &proc->ready.push.extrn.success ); 79 tally_one( &cltr->ready.pop.local .attempt, &proc->ready.pop.local .attempt ); 80 tally_one( &cltr->ready.pop.local .success, &proc->ready.pop.local .success ); 81 tally_one( &cltr->ready.pop.help .attempt, &proc->ready.pop.help .attempt ); 82 tally_one( &cltr->ready.pop.help .success, &proc->ready.pop.help .success ); 83 tally_one( &cltr->ready.pop.steal .attempt, &proc->ready.pop.steal .attempt ); 84 tally_one( &cltr->ready.pop.steal .success, &proc->ready.pop.steal .success ); 85 tally_one( &cltr->ready.pop.search.attempt, &proc->ready.pop.search.attempt ); 86 tally_one( &cltr->ready.pop.search.success, &proc->ready.pop.search.success ); 87 tally_one( &cltr->ready.threads.migration , &proc->ready.threads.migration ); 88 tally_one( &cltr->ready.threads.extunpark , &proc->ready.threads.extunpark ); 89 tally_one( &cltr->ready.threads.threads , &proc->ready.threads.threads ); 90 tally_one( &cltr->ready.threads.cthreads , &proc->ready.threads.cthreads ); 91 tally_one( &cltr->ready.sleep.halts , &proc->ready.sleep.halts ); 92 tally_one( &cltr->ready.sleep.cancels , &proc->ready.sleep.cancels ); 93 tally_one( &cltr->ready.sleep.wakes , &proc->ready.sleep.wakes ); 94 tally_one( &cltr->ready.sleep.exits , &proc->ready.sleep.exits ); 81 tally_one( &cltr->ready.push.local.attempt , &proc->ready.push.local.attempt ); 82 tally_one( &cltr->ready.push.local.success , &proc->ready.push.local.success ); 83 tally_one( &cltr->ready.push.share.attempt , &proc->ready.push.share.attempt ); 84 tally_one( &cltr->ready.push.share.success , &proc->ready.push.share.success ); 85 tally_one( &cltr->ready.push.extrn.attempt , &proc->ready.push.extrn.attempt ); 86 tally_one( &cltr->ready.push.extrn.success , &proc->ready.push.extrn.success ); 87 tally_one( &cltr->ready.pop.local .attempt , &proc->ready.pop.local .attempt ); 88 tally_one( &cltr->ready.pop.local .success , &proc->ready.pop.local .success ); 89 tally_one( &cltr->ready.pop.help .attempt , &proc->ready.pop.help .attempt ); 90 tally_one( &cltr->ready.pop.help .success , &proc->ready.pop.help .success ); 91 tally_one( &cltr->ready.pop.steal .attempt , &proc->ready.pop.steal .attempt ); 92 tally_one( &cltr->ready.pop.steal .success , &proc->ready.pop.steal .success ); 93 tally_one( &cltr->ready.pop.search.attempt , &proc->ready.pop.search.attempt ); 94 tally_one( &cltr->ready.pop.search.success , &proc->ready.pop.search.success ); 95 tally_one( &cltr->ready.threads.migration , &proc->ready.threads.migration ); 96 tally_one( &cltr->ready.threads.extunpark , &proc->ready.threads.extunpark ); 97 tally_one( &cltr->ready.threads.threads , &proc->ready.threads.threads ); 98 tally_one( &cltr->ready.threads.cthreads , &proc->ready.threads.cthreads ); 99 tally_one( &cltr->ready.threads.preempt.yield , &proc->ready.threads.preempt.yield ); 100 tally_one( &cltr->ready.threads.preempt.rllfwd, &proc->ready.threads.preempt.rllfwd ); 101 tally_one( &cltr->ready.sleep.halts , &proc->ready.sleep.halts ); 102 tally_one( &cltr->ready.sleep.cancels , &proc->ready.sleep.cancels ); 103 tally_one( &cltr->ready.sleep.early , &proc->ready.sleep.early ); 104 tally_one( &cltr->ready.sleep.wakes , &proc->ready.sleep.wakes ); 105 tally_one( &cltr->ready.sleep.seen , &proc->ready.sleep.wakes ); 106 tally_one( &cltr->ready.sleep.exits , &proc->ready.sleep.exits ); 95 107 96 108 #if defined(CFA_HAVE_LINUX_IO_URING_H) … … 103 115 tally_one( &cltr->io.submit.slow , &proc->io.submit.slow ); 104 116 tally_one( &cltr->io.flush.external , &proc->io.flush.external ); 117 tally_one( &cltr->io.flush.dirty , &proc->io.flush.dirty ); 118 tally_one( &cltr->io.flush.full , &proc->io.flush.full ); 119 tally_one( &cltr->io.flush.idle , &proc->io.flush.idle ); 120 tally_one( &cltr->io.flush.eager , &proc->io.flush.eager ); 105 121 tally_one( &cltr->io.calls.flush , &proc->io.calls.flush ); 106 122 tally_one( &cltr->io.calls.submitted , &proc->io.calls.submitted ); … … 153 169 | " (" | eng3(ready.pop.search.attempt) | " try)"; 154 170 155 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," | eng3(ready.sleep.wakes) | "wake," | eng3(ready.sleep.exits) | "exit"; 171 sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," 172 | eng3(ready.sleep.wakes + ready.sleep.early) | '(' | eng3(ready.sleep.early) | ',' | eng3(ready.sleep.seen) | ')' | " wake(early, seen)," 173 | eng3(ready.sleep.exits) | "exit"; 174 sstr | "- Preemption : " | eng3(ready.threads.preempt.yield) | "yields," | eng3(ready.threads.preempt.rllfwd) | "delayed"; 156 175 sstr | nl; 157 176 } … … 178 197 if(io.alloc.fail || io.alloc.revoke || io.alloc.block) 179 198 sstr | "- failures : " | eng3(io.alloc.fail) | "oom, " | eng3(io.alloc.revoke) | "rvk, " | eng3(io.alloc.block) | "blk"; 180 if(io.flush.external)181 sstr | "- flush external : " | eng3(io.flush.external);199 // if(io.flush.external) 200 // sstr | "- flush external : " | eng3(io.flush.external); 182 201 183 202 double avgsubs = ((double)io.calls.submitted) / io.calls.flush; 184 203 double avgcomp = ((double)io.calls.completed) / io.calls.drain; 185 204 sstr | "- syscll : " 186 | " sub " | eng3(io.calls. flush) | "/" | eng3(io.calls.submitted) | "(" | ws(3, 3, avgsubs) | "/flush)"187 | " - cmp " | eng3(io.calls. drain) | "/" | eng3(io.calls.completed) | "(" | ws(3, 3, avgcomp) | "/drain)"205 | " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)" 206 | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)" 188 207 | " - " | eng3(io.calls.errors.busy) | " EBUSY"; 208 sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext"; 189 209 sstr | "- ops blk: " 190 210 | " sk rd: " | eng3(io.ops.sockread) | "epll: " | eng3(io.ops.epllread) -
libcfa/src/concurrency/stats.hfa
r97c215f rf5a51db 65 65 volatile int64_t threads; // number of threads in the system, includes only local change 66 66 volatile int64_t cthreads; // number of threads in the system, includes only local change 67 struct { 68 volatile uint64_t yield; 69 volatile uint64_t rllfwd; 70 } preempt; 67 71 } threads; 68 72 struct { 69 73 volatile uint64_t halts; 70 74 volatile uint64_t cancels; 75 volatile uint64_t early; 71 76 volatile uint64_t wakes; 77 volatile uint64_t seen; 72 78 volatile uint64_t exits; 73 79 } sleep; … … 89 95 struct { 90 96 volatile uint64_t external; 97 volatile uint64_t dirty; 98 volatile uint64_t full; 99 volatile uint64_t idle; 100 volatile uint64_t eager; 91 101 } flush; 92 102 struct { -
libcfa/src/stdhdr/pthread.h
r97c215f rf5a51db 10 10 // Created On : Wed Jun 16 13:39:06 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 16 13:39:42 202113 // Update Count : 1 12 // Last Modified On : Thu Feb 3 21:53:26 2022 13 // Update Count : 13 14 14 // 15 15 16 // pthread.h and setjmp.h cannot agree on the type of __sigsetjmp: 17 // 18 // extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__)); 19 // extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__)); 20 // 21 // With -Wall, gcc-11 warns about the disagreement unless the CPP directive 22 // 23 // # 1 "/usr/include/pthread.h" 1 3 4 24 // 25 // appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the 26 // batchtest fails because of the spurious warning message. Hence, the warning is elided. 27 16 28 extern "C" { 29 #if defined(__GNUC__) && __GNUC__ == 11 30 #pragma GCC diagnostic push 31 #pragma GCC diagnostic ignored "-Warray-parameter" 32 #endif // defined(__GNUC__) && __GNUC__ == 11 33 17 34 #include_next <pthread.h> // has internal check for multiple expansion 35 36 #if defined(__GNUC__) && __GNUC__ == 11 37 #pragma GCC diagnostic pop 38 #endif // defined(__GNUC__) && __GNUC__ == 11 18 39 } // extern "C" 19 40 20 41 // Local Variables: // 21 // tab-width: 4 //22 42 // mode: c++ // 23 // compile-command: "make install" //24 43 // End: // -
libcfa/src/stdhdr/setjmp.h
r97c215f rf5a51db 10 10 // Created On : Mon Jul 4 23:25:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jul 5 20:38:33 201613 // Update Count : 1 212 // Last Modified On : Thu Feb 3 21:53:28 2022 13 // Update Count : 18 14 14 // 15 15 16 // pthread.h and setjmp.h cannot agree on the type of __sigsetjmp: 17 // 18 // extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__)); 19 // extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__)); 20 // 21 // With -Wall, gcc-11 warns about the disagreement unless the CPP directive 22 // 23 // # 1 "/usr/include/pthread.h" 1 3 4 24 // 25 // appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the 26 // batchtest fails because of the spurious warning message. Hence, the warning is elided. 27 16 28 extern "C" { 29 #if defined(__GNUC__) && __GNUC__ == 11 30 #pragma GCC diagnostic push 31 #pragma GCC diagnostic ignored "-Warray-parameter" 32 #endif // defined(__GNUC__) && __GNUC__ == 11 33 17 34 #include_next <setjmp.h> // has internal check for multiple expansion 35 36 #if defined(__GNUC__) && __GNUC__ == 11 37 #pragma GCC diagnostic pop 38 #endif // defined(__GNUC__) && __GNUC__ == 11 18 39 } // extern "C" 19 40 20 41 // Local Variables: // 21 // tab-width: 4 //22 42 // mode: c++ // 23 // compile-command: "make install" //24 43 // End: // -
src/AST/Convert.cpp
r97c215f rf5a51db 9 9 // Author : Thierry Delisle 10 10 // Created On : Thu May 09 15::37::05 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed Jul 14 16:15:00 202113 // Update Count : 3711 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 13:19:22 2022 13 // Update Count : 41 14 14 // 15 15 … … 393 393 auto stmt = new IfStmt( 394 394 get<Expression>().accept1( node->cond ), 395 get<Statement>().accept1( node->then Part),396 get<Statement>().accept1( node->else Part),395 get<Statement>().accept1( node->then ), 396 get<Statement>().accept1( node->else_ ), 397 397 get<Statement>().acceptL( node->inits ) 398 398 ); … … 419 419 } 420 420 421 const ast::Stmt * visit( const ast::While Stmt * node ) override final {421 const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final { 422 422 if ( inCache( node ) ) return nullptr; 423 423 auto inits = get<Statement>().acceptL( node->inits ); 424 auto stmt = new While Stmt(424 auto stmt = new WhileDoStmt( 425 425 get<Expression>().accept1( node->cond ), 426 426 get<Statement>().accept1( node->body ), 427 get<Statement>().accept1( node->else_ ), 427 428 inits, 428 429 node->isDoWhile … … 437 438 get<Expression>().accept1( node->cond ), 438 439 get<Expression>().accept1( node->inc ), 439 get<Statement>().accept1( node->body ) 440 get<Statement>().accept1( node->body ), 441 get<Statement>().accept1( node->else_ ) 440 442 ); 441 443 return stmtPostamble( stmt, node ); … … 1872 1874 old->location, 1873 1875 GET_ACCEPT_1(condition, Expr), 1874 GET_ACCEPT_1(then Part, Stmt),1875 GET_ACCEPT_1(else Part, Stmt),1876 GET_ACCEPT_1(then, Stmt), 1877 GET_ACCEPT_1(else_, Stmt), 1876 1878 GET_ACCEPT_V(initialization, Stmt), 1877 1879 GET_LABELS_V(old->labels) … … 1902 1904 } 1903 1905 1904 virtual void visit( const While Stmt * old ) override final {1906 virtual void visit( const WhileDoStmt * old ) override final { 1905 1907 if ( inCache( old ) ) return; 1906 this->node = new ast::While Stmt(1908 this->node = new ast::WhileDoStmt( 1907 1909 old->location, 1908 1910 GET_ACCEPT_1(condition, Expr), 1909 1911 GET_ACCEPT_1(body, Stmt), 1912 GET_ACCEPT_1(else_, Stmt), 1910 1913 GET_ACCEPT_V(initialization, Stmt), 1911 1914 old->isDoWhile, … … 1923 1926 GET_ACCEPT_1(increment, Expr), 1924 1927 GET_ACCEPT_1(body, Stmt), 1928 GET_ACCEPT_1(else_, Stmt), 1925 1929 GET_LABELS_V(old->labels) 1926 1930 ); -
src/AST/Copy.hpp
r97c215f rf5a51db 10 10 // Created On : Wed Jul 10 16:13:00 2019 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr Nov 11 9:22:00 202113 // Update Count : 212 // Last Modified On : Wed Dec 15 11:07:00 2021 13 // Update Count : 3 14 14 // 15 15 … … 52 52 Node * deepCopy<Node>( const Node * localRoot ); 53 53 54 template<typename node_t, enum Node::ref_type ref_t> 55 node_t * shallowCopy( const ptr_base<node_t, ref_t> & localRoot ) { 56 return shallowCopy( localRoot.get() ); 57 } 58 59 template<typename node_t, enum Node::ref_type ref_t> 60 node_t * deepCopy( const ptr_base<node_t, ref_t> & localRoot ) { 61 return deepCopy( localRoot.get() ); 62 } 63 54 64 } 55 65 -
src/AST/Fwd.hpp
r97c215f rf5a51db 10 10 // Created On : Wed May 8 16:05:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:37:39 202113 // Update Count : 412 // Last Modified On : Tue Feb 1 09:08:33 2022 13 // Update Count : 5 14 14 // 15 15 … … 44 44 class DirectiveStmt; 45 45 class IfStmt; 46 class While Stmt;46 class WhileDoStmt; 47 47 class ForStmt; 48 48 class SwitchStmt; -
src/AST/Node.cpp
r97c215f rf5a51db 10 10 // Created On : Thu May 16 14:16:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:25:06 202113 // Update Count : 212 // Last Modified On : Tue Feb 1 09:09:39 2022 13 // Update Count : 3 14 14 // 15 15 … … 146 146 template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::weak >; 147 147 template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::strong >; 148 template class ast::ptr_base< ast::While Stmt, ast::Node::ref_type::weak >;149 template class ast::ptr_base< ast::While Stmt, ast::Node::ref_type::strong >;148 template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::weak >; 149 template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::strong >; 150 150 template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::weak >; 151 151 template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::strong >; -
src/AST/Node.hpp
r97c215f rf5a51db 188 188 } 189 189 190 ptr_base & operator=( const node_t * node ) { 191 assign( node ); 192 return *this; 193 } 194 190 195 template<typename o_node_t> 191 196 ptr_base & operator=( const o_node_t * node ) { -
src/AST/Pass.hpp
r97c215f rf5a51db 146 146 const ast::Stmt * visit( const ast::DirectiveStmt * ) override final; 147 147 const ast::Stmt * visit( const ast::IfStmt * ) override final; 148 const ast::Stmt * visit( const ast::While Stmt* ) override final;148 const ast::Stmt * visit( const ast::WhileDoStmt * ) override final; 149 149 const ast::Stmt * visit( const ast::ForStmt * ) override final; 150 150 const ast::Stmt * visit( const ast::SwitchStmt * ) override final; … … 238 238 239 239 private: 240 const ast::Stmt * call_accept( const ast::Stmt * ); 241 const ast::Expr * call_accept( const ast::Expr * ); 242 243 // requests WithStmtsToAdd directly add to this statement, as if it is a compound. 244 245 const ast::Stmt * call_accept_as_compound(const ast::Stmt *); 246 240 241 // Regular nodes 247 242 template< typename node_t > 248 auto call_accept( const node_t * node ) -> typename std::enable_if< 243 struct result1 { 244 bool differs; 245 const node_t * value; 246 247 template< typename object_t, typename super_t, typename field_t > 248 void apply(object_t *, field_t super_t::* field); 249 }; 250 251 result1<ast::Stmt> call_accept( const ast::Stmt * ); 252 result1<ast::Expr> call_accept( const ast::Expr * ); 253 254 template< typename node_t > 255 auto call_accept( const node_t * node ) 256 -> typename std::enable_if< 249 257 !std::is_base_of<ast::Expr, node_t>::value && 250 258 !std::is_base_of<ast::Stmt, node_t>::value 251 , decltype( node->accept(*this) ) 259 , result1< 260 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 261 > 252 262 >::type; 253 263 264 // requests WithStmtsToAdd directly add to this statement, as if it is a compound. 265 result1<ast::Stmt> call_accept_as_compound(const ast::Stmt *); 266 267 template<typename it_t, template <class...> class container_t> 268 static inline void take_all_delta( it_t it, container_t<ast::ptr<ast::Decl>> * decls, bool * mutated = nullptr ) { 269 if(empty(decls)) return; 270 271 std::transform(decls->begin(), decls->end(), it, [](ast::ptr<ast::Decl>&& decl) -> auto { 272 auto loc = decl->location; 273 auto stmt = new DeclStmt( loc, decl.release() ); 274 return { {stmt}, -1, false }; 275 }); 276 decls->clear(); 277 if(mutated) *mutated = true; 278 } 279 280 // Container of statements 254 281 template< template <class...> class container_t > 255 container_t< ptr<Stmt> > call_accept( const container_t< ptr<Stmt> > & ); 256 282 struct resultNstmt { 283 struct delta { 284 ptr<Stmt> nval; 285 ssize_t old_idx; 286 bool is_old; 287 288 delta(const Stmt * s, ssize_t i, bool old) : nval{s}, old_idx{i}, is_old{old} {} 289 }; 290 291 bool differs; 292 container_t< delta > values; 293 294 resultNstmt() : differs(false), values{} {} 295 resultNstmt(bool diff, container_t< delta > && vals) : differs(diff), values(vals) {} 296 297 template< typename object_t, typename super_t, typename field_t > 298 void apply(object_t *, field_t super_t::* field); 299 300 template< template <class...> class incontainer_t > 301 void take_all( incontainer_t<ast::ptr<ast::Stmt>> * stmts ) { 302 if(!stmts || stmts->empty()) return; 303 304 std::transform(stmts->begin(), stmts->end(), std::back_inserter( values ), [](ast::ptr<ast::Stmt>& decl) -> delta { 305 return delta( decl.release(), -1, false ); 306 }); 307 stmts->clear(); 308 differs = true; 309 } 310 311 template< template <class...> class incontainer_t > 312 void take_all( incontainer_t<ast::ptr<ast::Decl>> * decls ) { 313 if(!decls || decls->empty()) return; 314 315 std::transform(decls->begin(), decls->end(), std::back_inserter( values ), [](ast::ptr<ast::Decl>& decl) -> auto { 316 auto loc = decl->location; 317 auto stmt = new DeclStmt( loc, decl.release() ); 318 return delta( stmt, -1, false ); 319 }); 320 decls->clear(); 321 differs = true; 322 } 323 }; 324 325 template< template <class...> class container_t > 326 resultNstmt<container_t> call_accept( const container_t< ptr<Stmt> > & ); 327 328 // Container of something 257 329 template< template <class...> class container_t, typename node_t > 258 container_t< ptr<node_t> > call_accept( const container_t< ptr<node_t> > & container ); 330 struct resultN { 331 bool differs; 332 container_t<ptr<node_t>> values; 333 334 template< typename object_t, typename super_t, typename field_t > 335 void apply(object_t *, field_t super_t::* field); 336 }; 337 338 template< template <class...> class container_t, typename node_t > 339 resultN< container_t, node_t > call_accept( const container_t< ptr<node_t> > & container ); 259 340 260 341 public: 261 342 /// Logic to call the accept and mutate the parent if needed, delegates call to accept 262 template<typename node_t, typename parent_t, typename child_t>263 void maybe_accept(const node_t * &, child_t parent_t::* child);264 265 template<typename node_t, typename parent_t, typename child_t>266 void maybe_accept_as_compound(const node_t * &, child_t parent_t::* child);343 template<typename node_t, typename parent_t, typename field_t> 344 void maybe_accept(const node_t * &, field_t parent_t::* field); 345 346 template<typename node_t, typename parent_t, typename field_t> 347 void maybe_accept_as_compound(const node_t * &, field_t parent_t::* field); 267 348 268 349 private: -
src/AST/Pass.impl.hpp
r97c215f rf5a51db 34 34 __pass::previsit( core, node, 0 ); 35 35 36 #define VISIT( code... ) \37 /* if this node should visit its children */ \38 if ( __visit_children() ) { \39 /* visit the children */ \40 code \41 }42 43 36 #define VISIT_END( type, node ) \ 44 37 /* call the implementation of the postvisit of this pass */ \ … … 86 79 87 80 template<typename it_t, template <class...> class container_t> 88 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * decls, bool * mutated = nullptr ) {89 if(empty( decls)) return;90 91 std::move( decls->begin(), decls->end(), it);92 decls->clear();81 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * stmts, bool * mutated = nullptr ) { 82 if(empty(stmts)) return; 83 84 std::move(stmts->begin(), stmts->end(), it); 85 stmts->clear(); 93 86 if(mutated) *mutated = true; 94 87 } … … 130 123 return !new_val.empty(); 131 124 } 125 } 126 127 128 template< typename core_t > 129 template< typename node_t > 130 template< typename object_t, typename super_t, typename field_t > 131 void ast::Pass< core_t >::result1< node_t >::apply(object_t * object, field_t super_t::* field) { 132 object->*field = value; 132 133 } 133 134 … … 138 139 !std::is_base_of<ast::Expr, node_t>::value && 139 140 !std::is_base_of<ast::Stmt, node_t>::value 140 , decltype( node->accept(*this) ) 141 , ast::Pass< core_t >::result1< 142 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 143 > 141 144 >::type 142 145 { … … 147 150 static_assert( !std::is_base_of<ast::Stmt, node_t>::value, "ERROR"); 148 151 149 return node->accept( *this ); 152 auto nval = node->accept( *this ); 153 ast::Pass< core_t >::result1< 154 typename std::remove_pointer< decltype( node->accept(*this) ) >::type 155 > res; 156 res.differs = nval != node; 157 res.value = nval; 158 return res; 150 159 } 151 160 152 161 template< typename core_t > 153 const ast::Expr *ast::Pass< core_t >::call_accept( const ast::Expr * expr ) {162 ast::Pass< core_t >::result1<ast::Expr> ast::Pass< core_t >::call_accept( const ast::Expr * expr ) { 154 163 __pedantic_pass_assert( __visit_children() ); 155 164 __pedantic_pass_assert( expr ); … … 160 169 } 161 170 162 return expr->accept( *this ); 171 auto nval = expr->accept( *this ); 172 return { nval != expr, nval }; 163 173 } 164 174 165 175 template< typename core_t > 166 const ast::Stmt *ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) {176 ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) { 167 177 __pedantic_pass_assert( __visit_children() ); 168 178 __pedantic_pass_assert( stmt ); 169 179 170 return stmt->accept( *this ); 180 const ast::Stmt * nval = stmt->accept( *this ); 181 return { nval != stmt, nval }; 171 182 } 172 183 173 184 template< typename core_t > 174 const ast::Stmt *ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) {185 ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) { 175 186 __pedantic_pass_assert( __visit_children() ); 176 187 __pedantic_pass_assert( stmt ); … … 197 208 // If the pass doesn't want to add anything then we are done 198 209 if( empty(stmts_before) && empty(stmts_after) && empty(decls_before) && empty(decls_after) ) { 199 return nstmt;210 return { nstmt != stmt, nstmt }; 200 211 } 201 212 … … 219 230 __pass::take_all( std::back_inserter( compound->kids ), stmts_after ); 220 231 221 return compound;232 return {true, compound}; 222 233 } 223 234 224 235 template< typename core_t > 225 236 template< template <class...> class container_t > 226 container_t< ptr<Stmt> > ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 237 template< typename object_t, typename super_t, typename field_t > 238 void ast::Pass< core_t >::resultNstmt<container_t>::apply(object_t * object, field_t super_t::* field) { 239 auto & container = object->*field; 240 __pedantic_pass_assert( container.size() <= values.size() ); 241 242 auto cit = enumerate(container).begin(); 243 244 container_t<ptr<Stmt>> nvals; 245 for(delta & d : values) { 246 if( d.is_old ) { 247 __pedantic_pass_assert( cit.idx <= d.old_idx ); 248 std::advance( cit, d.old_idx - cit.idx ); 249 nvals.push_back( std::move( (*cit).val) ); 250 } else { 251 nvals.push_back( std::move(d.nval) ); 252 } 253 } 254 255 object->*field = std::move(nvals); 256 } 257 258 template< typename core_t > 259 template< template <class...> class container_t > 260 ast::Pass< core_t >::resultNstmt<container_t> ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) { 227 261 __pedantic_pass_assert( __visit_children() ); 228 262 if( statements.empty() ) return {}; … … 251 285 pass_visitor_stats.avg->push(pass_visitor_stats.depth); 252 286 253 bool mutated = false; 254 container_t< ptr<Stmt> > new_kids; 255 for( const Stmt * stmt : statements ) { 287 resultNstmt<container_t> new_kids; 288 for( auto value : enumerate( statements ) ) { 256 289 try { 290 size_t i = value.idx; 291 const Stmt * stmt = value.val; 257 292 __pedantic_pass_assert( stmt ); 258 293 const ast::Stmt * new_stmt = stmt->accept( *this ); 259 294 assert( new_stmt ); 260 if(new_stmt != stmt ) mutated = true;295 if(new_stmt != stmt ) { new_kids.differs = true; } 261 296 262 297 // Make sure that it is either adding statements or declartions but not both … … 268 303 269 304 // Take all the statements which should have gone after, N/A for first iteration 270 __pass::take_all( std::back_inserter( new_kids ), decls_before, &mutated);271 __pass::take_all( std::back_inserter( new_kids ), stmts_before, &mutated);305 new_kids.take_all( decls_before ); 306 new_kids.take_all( stmts_before ); 272 307 273 308 // Now add the statement if there is one 274 new_kids.emplace_back( new_stmt ); 309 if(new_stmt != stmt) { 310 new_kids.values.emplace_back( new_stmt, i, false ); 311 } else { 312 new_kids.values.emplace_back( nullptr, i, true ); 313 } 275 314 276 315 // Take all the declarations that go before 277 __pass::take_all( std::back_inserter( new_kids ), decls_after, &mutated);278 __pass::take_all( std::back_inserter( new_kids ), stmts_after, &mutated);316 new_kids.take_all( decls_after ); 317 new_kids.take_all( stmts_after ); 279 318 } 280 319 catch ( SemanticErrorException &e ) { … … 285 324 if ( !errors.isEmpty() ) { throw errors; } 286 325 287 return mutated ? new_kids : container_t< ptr<Stmt> >();326 return new_kids; 288 327 } 289 328 290 329 template< typename core_t > 291 330 template< template <class...> class container_t, typename node_t > 292 container_t< ast::ptr<node_t> > ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 331 template< typename object_t, typename super_t, typename field_t > 332 void ast::Pass< core_t >::resultN<container_t, node_t>::apply(object_t * object, field_t super_t::* field) { 333 auto & container = object->*field; 334 __pedantic_pass_assert( container.size() == values.size() ); 335 336 for(size_t i = 0; i < container.size(); i++) { 337 // Take all the elements that are different in 'values' 338 // and swap them into 'container' 339 if( values[i] != nullptr ) std::swap(container[i], values[i]); 340 } 341 342 // Now the original containers should still have the unchanged values 343 // but also contain the new values 344 } 345 346 template< typename core_t > 347 template< template <class...> class container_t, typename node_t > 348 ast::Pass< core_t >::resultN<container_t, node_t> ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) { 293 349 __pedantic_pass_assert( __visit_children() ); 294 350 if( container.empty() ) return {}; … … 300 356 301 357 bool mutated = false; 302 container_t< ast::ptr<node_t>> new_kids;358 container_t<ptr<node_t>> new_kids; 303 359 for ( const node_t * node : container ) { 304 360 try { 305 361 __pedantic_pass_assert( node ); 306 362 const node_t * new_stmt = strict_dynamic_cast< const node_t * >( node->accept( *this ) ); 307 if(new_stmt != node ) mutated = true; 308 309 new_kids.emplace_back( new_stmt ); 363 if(new_stmt != node ) { 364 mutated = true; 365 new_kids.emplace_back( new_stmt ); 366 } else { 367 new_kids.emplace_back( nullptr ); 368 } 369 310 370 } 311 371 catch( SemanticErrorException &e ) { … … 313 373 } 314 374 } 375 376 __pedantic_pass_assert( new_kids.size() == container.size() ); 315 377 pass_visitor_stats.depth--; 316 378 if ( ! errors.isEmpty() ) { throw errors; } 317 379 318 return mutated ? new_kids : container_t< ast::ptr<node_t> >();380 return ast::Pass< core_t >::resultN<container_t, node_t>{ mutated, new_kids }; 319 381 } 320 382 … … 334 396 auto new_val = call_accept( old_val ); 335 397 336 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR");337 338 if( __pass::differs(old_val, new_val)) {398 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR"); 399 400 if( new_val.differs ) { 339 401 auto new_parent = __pass::mutate<core_t>(parent); 340 new_ parent->*child = new_val;402 new_val.apply(new_parent, child); 341 403 parent = new_parent; 342 404 } … … 360 422 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR"); 361 423 362 if( __pass::differs(old_val, new_val)) {424 if( new_val.differs ) { 363 425 auto new_parent = __pass::mutate<core_t>(parent); 364 new_ parent->*child = new_val;426 new_val.apply( new_parent, child ); 365 427 parent = new_parent; 366 428 } … … 452 514 VISIT_START( node ); 453 515 454 VISIT(516 if ( __visit_children() ) { 455 517 { 456 518 guard_symtab guard { *this }; … … 460 522 maybe_accept( node, &ObjectDecl::bitfieldWidth ); 461 523 maybe_accept( node, &ObjectDecl::attributes ); 462 )524 } 463 525 464 526 __pass::symtab::addId( core, 0, node ); … … 475 537 __pass::symtab::addId( core, 0, node ); 476 538 477 VISIT(maybe_accept( node, &FunctionDecl::withExprs );) 539 if ( __visit_children() ) { 540 maybe_accept( node, &FunctionDecl::withExprs ); 541 } 478 542 { 479 543 // with clause introduces a level of scope (for the with expression members). … … 493 557 } }; 494 558 __pass::symtab::addId( core, 0, func ); 495 VISIT(559 if ( __visit_children() ) { 496 560 // parameter declarations 497 561 maybe_accept( node, &FunctionDecl::params ); … … 509 573 maybe_accept( node, &FunctionDecl::stmts ); 510 574 maybe_accept( node, &FunctionDecl::attributes ); 511 )575 } 512 576 } 513 577 } … … 526 590 __pass::symtab::addStructFwd( core, 0, node ); 527 591 528 VISIT({592 if ( __visit_children() ) { 529 593 guard_symtab guard { * this }; 530 594 maybe_accept( node, &StructDecl::params ); 531 595 maybe_accept( node, &StructDecl::members ); 532 596 maybe_accept( node, &StructDecl::attributes ); 533 } )597 } 534 598 535 599 // this addition replaces the forward declaration … … 548 612 __pass::symtab::addUnionFwd( core, 0, node ); 549 613 550 VISIT({614 if ( __visit_children() ) { 551 615 guard_symtab guard { * this }; 552 616 maybe_accept( node, &UnionDecl::params ); 553 617 maybe_accept( node, &UnionDecl::members ); 554 618 maybe_accept( node, &UnionDecl::attributes ); 555 } )619 } 556 620 557 621 __pass::symtab::addUnion( core, 0, node ); … … 568 632 __pass::symtab::addEnum( core, 0, node ); 569 633 570 VISIT(634 if ( __visit_children() ) { 571 635 // unlike structs, traits, and unions, enums inject their members into the global scope 572 636 maybe_accept( node, &EnumDecl::params ); 573 637 maybe_accept( node, &EnumDecl::members ); 574 638 maybe_accept( node, &EnumDecl::attributes ); 575 )639 } 576 640 577 641 VISIT_END( Decl, node ); … … 584 648 VISIT_START( node ); 585 649 586 VISIT({650 if ( __visit_children() ) { 587 651 guard_symtab guard { *this }; 588 652 maybe_accept( node, &TraitDecl::params ); 589 653 maybe_accept( node, &TraitDecl::members ); 590 654 maybe_accept( node, &TraitDecl::attributes ); 591 } )655 } 592 656 593 657 __pass::symtab::addTrait( core, 0, node ); … … 602 666 VISIT_START( node ); 603 667 604 VISIT({668 if ( __visit_children() ) { 605 669 guard_symtab guard { *this }; 606 670 maybe_accept( node, &TypeDecl::base ); 607 } )671 } 608 672 609 673 // see A NOTE ON THE ORDER OF TRAVERSAL, above … … 612 676 __pass::symtab::addType( core, 0, node ); 613 677 614 VISIT(678 if ( __visit_children() ) { 615 679 maybe_accept( node, &TypeDecl::assertions ); 616 680 … … 619 683 maybe_accept( node, &TypeDecl::init ); 620 684 } 621 )685 } 622 686 623 687 VISIT_END( Decl, node ); … … 630 694 VISIT_START( node ); 631 695 632 VISIT({696 if ( __visit_children() ) { 633 697 guard_symtab guard { *this }; 634 698 maybe_accept( node, &TypedefDecl::base ); 635 } )699 } 636 700 637 701 __pass::symtab::addType( core, 0, node ); 638 702 639 VISIT( maybe_accept( node, &TypedefDecl::assertions ); ) 703 if ( __visit_children() ) { 704 maybe_accept( node, &TypedefDecl::assertions ); 705 } 640 706 641 707 VISIT_END( Decl, node ); … … 648 714 VISIT_START( node ); 649 715 650 VISIT(716 if ( __visit_children() ) { 651 717 maybe_accept( node, &AsmDecl::stmt ); 652 )718 } 653 719 654 720 VISIT_END( AsmDecl, node ); … … 661 727 VISIT_START( node ); 662 728 663 VISIT(729 if ( __visit_children() ) { 664 730 maybe_accept( node, &DirectiveDecl::stmt ); 665 )731 } 666 732 667 733 VISIT_END( DirectiveDecl, node ); … … 674 740 VISIT_START( node ); 675 741 676 VISIT(742 if ( __visit_children() ) { 677 743 maybe_accept( node, &StaticAssertDecl::cond ); 678 744 maybe_accept( node, &StaticAssertDecl::msg ); 679 )745 } 680 746 681 747 VISIT_END( StaticAssertDecl, node ); … … 687 753 const ast::CompoundStmt * ast::Pass< core_t >::visit( const ast::CompoundStmt * node ) { 688 754 VISIT_START( node ); 689 VISIT( 755 756 if ( __visit_children() ) { 690 757 // Do not enter (or leave) a new scope if atFunctionTop. Remember to save the result. 691 758 auto guard1 = makeFuncGuard( [this, enterScope = !this->atFunctionTop]() { … … 704 771 guard_scope guard3 { *this }; 705 772 maybe_accept( node, &CompoundStmt::kids ); 706 ) 773 } 774 707 775 VISIT_END( CompoundStmt, node ); 708 776 } … … 714 782 VISIT_START( node ); 715 783 716 VISIT(784 if ( __visit_children() ) { 717 785 maybe_accept( node, &ExprStmt::expr ); 718 )786 } 719 787 720 788 VISIT_END( Stmt, node ); … … 727 795 VISIT_START( node ) 728 796 729 VISIT(797 if ( __visit_children() ) { 730 798 maybe_accept( node, &AsmStmt::instruction ); 731 799 maybe_accept( node, &AsmStmt::output ); 732 800 maybe_accept( node, &AsmStmt::input ); 733 801 maybe_accept( node, &AsmStmt::clobber ); 734 )802 } 735 803 736 804 VISIT_END( Stmt, node ); … … 752 820 VISIT_START( node ); 753 821 754 VISIT({822 if ( __visit_children() ) { 755 823 // if statements introduce a level of scope (for the initialization) 756 824 guard_symtab guard { *this }; 757 825 maybe_accept( node, &IfStmt::inits ); 758 826 maybe_accept( node, &IfStmt::cond ); 759 maybe_accept_as_compound( node, &IfStmt::then Part);760 maybe_accept_as_compound( node, &IfStmt::else Part);761 } )827 maybe_accept_as_compound( node, &IfStmt::then ); 828 maybe_accept_as_compound( node, &IfStmt::else_ ); 829 } 762 830 763 831 VISIT_END( Stmt, node ); … … 765 833 766 834 //-------------------------------------------------------------------------- 767 // While Stmt768 template< typename core_t > 769 const ast::Stmt * ast::Pass< core_t >::visit( const ast::While Stmt * node ) {770 VISIT_START( node ); 771 772 VISIT({835 // WhileDoStmt 836 template< typename core_t > 837 const ast::Stmt * ast::Pass< core_t >::visit( const ast::WhileDoStmt * node ) { 838 VISIT_START( node ); 839 840 if ( __visit_children() ) { 773 841 // while statements introduce a level of scope (for the initialization) 774 842 guard_symtab guard { *this }; 775 maybe_accept( node, &While Stmt::inits );776 maybe_accept( node, &While Stmt::cond );777 maybe_accept_as_compound( node, &While Stmt::body );778 } )843 maybe_accept( node, &WhileDoStmt::inits ); 844 maybe_accept( node, &WhileDoStmt::cond ); 845 maybe_accept_as_compound( node, &WhileDoStmt::body ); 846 } 779 847 780 848 VISIT_END( Stmt, node ); … … 787 855 VISIT_START( node ); 788 856 789 VISIT({857 if ( __visit_children() ) { 790 858 // for statements introduce a level of scope (for the initialization) 791 859 guard_symtab guard { *this }; … … 795 863 maybe_accept( node, &ForStmt::inc ); 796 864 maybe_accept_as_compound( node, &ForStmt::body ); 797 } )865 } 798 866 799 867 VISIT_END( Stmt, node ); … … 806 874 VISIT_START( node ); 807 875 808 VISIT(876 if ( __visit_children() ) { 809 877 maybe_accept( node, &SwitchStmt::cond ); 810 878 maybe_accept( node, &SwitchStmt::stmts ); 811 )879 } 812 880 813 881 VISIT_END( Stmt, node ); … … 820 888 VISIT_START( node ); 821 889 822 VISIT(890 if ( __visit_children() ) { 823 891 maybe_accept( node, &CaseStmt::cond ); 824 892 maybe_accept( node, &CaseStmt::stmts ); 825 )893 } 826 894 827 895 VISIT_END( Stmt, node ); … … 842 910 VISIT_START( node ); 843 911 844 VISIT(912 if ( __visit_children() ) { 845 913 maybe_accept( node, &ReturnStmt::expr ); 846 )914 } 847 915 848 916 VISIT_END( Stmt, node ); … … 855 923 VISIT_START( node ); 856 924 857 VISIT(925 if ( __visit_children() ) { 858 926 maybe_accept( node, &ThrowStmt::expr ); 859 927 maybe_accept( node, &ThrowStmt::target ); 860 )928 } 861 929 862 930 VISIT_END( Stmt, node ); … … 869 937 VISIT_START( node ); 870 938 871 VISIT(939 if ( __visit_children() ) { 872 940 maybe_accept( node, &TryStmt::body ); 873 941 maybe_accept( node, &TryStmt::handlers ); 874 942 maybe_accept( node, &TryStmt::finally ); 875 )943 } 876 944 877 945 VISIT_END( Stmt, node ); … … 884 952 VISIT_START( node ); 885 953 886 VISIT({954 if ( __visit_children() ) { 887 955 // catch statements introduce a level of scope (for the caught exception) 888 956 guard_symtab guard { *this }; … … 890 958 maybe_accept( node, &CatchStmt::cond ); 891 959 maybe_accept_as_compound( node, &CatchStmt::body ); 892 } )960 } 893 961 894 962 VISIT_END( Stmt, node ); … … 901 969 VISIT_START( node ); 902 970 903 VISIT(971 if ( __visit_children() ) { 904 972 maybe_accept( node, &FinallyStmt::body ); 905 )973 } 906 974 907 975 VISIT_END( Stmt, node ); … … 914 982 VISIT_START( node ); 915 983 916 VISIT(984 if ( __visit_children() ) { 917 985 maybe_accept( node, &SuspendStmt::then ); 918 )986 } 919 987 920 988 VISIT_END( Stmt, node ); … … 934 1002 // } 935 1003 936 VISIT({1004 if ( __visit_children() ) { 937 1005 std::vector<WaitForStmt::Clause> new_clauses; 938 1006 new_clauses.reserve( node->clauses.size() ); … … 942 1010 const Expr * func = clause.target.func ? clause.target.func->accept(*this) : nullptr; 943 1011 if(func != clause.target.func) mutated = true; 1012 else func = nullptr; 944 1013 945 1014 std::vector<ptr<Expr>> new_args; … … 947 1016 for( const auto & arg : clause.target.args ) { 948 1017 auto a = arg->accept(*this); 949 new_args.push_back( a ); 950 if( a != arg ) mutated = true; 1018 if( a != arg ) { 1019 mutated = true; 1020 new_args.push_back( a ); 1021 } else 1022 new_args.push_back( nullptr ); 951 1023 } 952 1024 953 1025 const Stmt * stmt = clause.stmt ? clause.stmt->accept(*this) : nullptr; 954 1026 if(stmt != clause.stmt) mutated = true; 1027 else stmt = nullptr; 955 1028 956 1029 const Expr * cond = clause.cond ? clause.cond->accept(*this) : nullptr; 957 1030 if(cond != clause.cond) mutated = true; 1031 else cond = nullptr; 958 1032 959 1033 new_clauses.push_back( WaitForStmt::Clause{ {func, std::move(new_args) }, stmt, cond } ); … … 962 1036 if(mutated) { 963 1037 auto n = __pass::mutate<core_t>(node); 964 n->clauses = std::move( new_clauses ); 1038 for(size_t i = 0; i < new_clauses.size(); i++) { 1039 if(new_clauses.at(i).target.func != nullptr) std::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func); 1040 1041 for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) { 1042 if(new_clauses.at(i).target.args.at(j) != nullptr) std::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j)); 1043 } 1044 1045 if(new_clauses.at(i).stmt != nullptr) std::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt); 1046 if(new_clauses.at(i).cond != nullptr) std::swap(n->clauses.at(i).cond, new_clauses.at(i).cond); 1047 } 965 1048 node = n; 966 1049 } 967 } )1050 } 968 1051 969 1052 #define maybe_accept(field) \ 970 1053 if(node->field) { \ 971 1054 auto nval = call_accept( node->field ); \ 972 if(nval != node->field) { \1055 if(nval.differs ) { \ 973 1056 auto nparent = __pass::mutate<core_t>(node); \ 974 nparent->field = nval ; \1057 nparent->field = nval.value; \ 975 1058 node = nparent; \ 976 1059 } \ 977 1060 } 978 1061 979 VISIT(1062 if ( __visit_children() ) { 980 1063 maybe_accept( timeout.time ); 981 1064 maybe_accept( timeout.stmt ); … … 983 1066 maybe_accept( orElse.stmt ); 984 1067 maybe_accept( orElse.cond ); 985 )1068 } 986 1069 987 1070 #undef maybe_accept … … 996 1079 VISIT_START( node ); 997 1080 998 VISIT(1081 if ( __visit_children() ) { 999 1082 maybe_accept( node, &WithStmt::exprs ); 1000 1083 { … … 1004 1087 maybe_accept( node, &WithStmt::stmt ); 1005 1088 } 1006 ) 1089 } 1090 1007 1091 VISIT_END( Stmt, node ); 1008 1092 } … … 1022 1106 VISIT_START( node ); 1023 1107 1024 VISIT(1108 if ( __visit_children() ) { 1025 1109 maybe_accept( node, &DeclStmt::decl ); 1026 )1110 } 1027 1111 1028 1112 VISIT_END( Stmt, node ); … … 1037 1121 // For now this isn't visited, it is unclear if this causes problem 1038 1122 // if all tests are known to pass, remove this code 1039 VISIT(1123 if ( __visit_children() ) { 1040 1124 maybe_accept( node, &ImplicitCtorDtorStmt::callStmt ); 1041 )1125 } 1042 1126 1043 1127 VISIT_END( Stmt, node ); … … 1050 1134 VISIT_START( node ); 1051 1135 1052 VISIT({1136 if ( __visit_children() ) { 1053 1137 // mutex statements introduce a level of scope (for the initialization) 1054 1138 guard_symtab guard { *this }; 1055 1139 maybe_accept( node, &MutexStmt::stmt ); 1056 1140 maybe_accept( node, &MutexStmt::mutexObjs ); 1057 } )1141 } 1058 1142 1059 1143 VISIT_END( Stmt, node ); … … 1066 1150 VISIT_START( node ); 1067 1151 1068 VISIT(1152 if ( __visit_children() ) { 1069 1153 { 1070 1154 guard_symtab guard { *this }; … … 1073 1157 maybe_accept( node, &ApplicationExpr::func ); 1074 1158 maybe_accept( node, &ApplicationExpr::args ); 1075 )1159 } 1076 1160 1077 1161 VISIT_END( Expr, node ); … … 1084 1168 VISIT_START( node ); 1085 1169 1086 VISIT(1170 if ( __visit_children() ) { 1087 1171 { 1088 1172 guard_symtab guard { *this }; … … 1091 1175 1092 1176 maybe_accept( node, &UntypedExpr::args ); 1093 )1177 } 1094 1178 1095 1179 VISIT_END( Expr, node ); … … 1102 1186 VISIT_START( node ); 1103 1187 1104 VISIT({1188 if ( __visit_children() ) { 1105 1189 guard_symtab guard { *this }; 1106 1190 maybe_accept( node, &NameExpr::result ); 1107 } )1191 } 1108 1192 1109 1193 VISIT_END( Expr, node ); … … 1116 1200 VISIT_START( node ); 1117 1201 1118 VISIT({ 1202 if ( __visit_children() ) { 1203 { 1119 1204 guard_symtab guard { *this }; 1120 1205 maybe_accept( node, &CastExpr::result ); 1121 1206 } 1122 1207 maybe_accept( node, &CastExpr::arg ); 1123 )1208 } 1124 1209 1125 1210 VISIT_END( Expr, node ); … … 1132 1217 VISIT_START( node ); 1133 1218 1134 VISIT({ 1219 if ( __visit_children() ) { 1220 { 1135 1221 guard_symtab guard { *this }; 1136 1222 maybe_accept( node, &KeywordCastExpr::result ); 1137 1223 } 1138 1224 maybe_accept( node, &KeywordCastExpr::arg ); 1139 )1225 } 1140 1226 1141 1227 VISIT_END( Expr, node ); … … 1148 1234 VISIT_START( node ); 1149 1235 1150 VISIT({ 1236 if ( __visit_children() ) { 1237 { 1151 1238 guard_symtab guard { *this }; 1152 1239 maybe_accept( node, &VirtualCastExpr::result ); 1153 1240 } 1154 1241 maybe_accept( node, &VirtualCastExpr::arg ); 1155 )1242 } 1156 1243 1157 1244 VISIT_END( Expr, node ); … … 1164 1251 VISIT_START( node ); 1165 1252 1166 VISIT({ 1253 if ( __visit_children() ) { 1254 { 1167 1255 guard_symtab guard { *this }; 1168 1256 maybe_accept( node, &AddressExpr::result ); 1169 1257 } 1170 1258 maybe_accept( node, &AddressExpr::arg ); 1171 )1259 } 1172 1260 1173 1261 VISIT_END( Expr, node ); … … 1180 1268 VISIT_START( node ); 1181 1269 1182 VISIT({1270 if ( __visit_children() ) { 1183 1271 guard_symtab guard { *this }; 1184 1272 maybe_accept( node, &LabelAddressExpr::result ); 1185 } )1273 } 1186 1274 1187 1275 VISIT_END( Expr, node ); … … 1194 1282 VISIT_START( node ); 1195 1283 1196 VISIT({ 1284 if ( __visit_children() ) { 1285 { 1197 1286 guard_symtab guard { *this }; 1198 1287 maybe_accept( node, &UntypedMemberExpr::result ); … … 1200 1289 maybe_accept( node, &UntypedMemberExpr::aggregate ); 1201 1290 maybe_accept( node, &UntypedMemberExpr::member ); 1202 )1291 } 1203 1292 1204 1293 VISIT_END( Expr, node ); … … 1211 1300 VISIT_START( node ); 1212 1301 1213 VISIT({ 1302 if ( __visit_children() ) { 1303 { 1214 1304 guard_symtab guard { *this }; 1215 1305 maybe_accept( node, &MemberExpr::result ); 1216 1306 } 1217 1307 maybe_accept( node, &MemberExpr::aggregate ); 1218 )1308 } 1219 1309 1220 1310 VISIT_END( Expr, node ); … … 1227 1317 VISIT_START( node ); 1228 1318 1229 VISIT({1319 if ( __visit_children() ) { 1230 1320 guard_symtab guard { *this }; 1231 1321 maybe_accept( node, &VariableExpr::result ); 1232 } )1322 } 1233 1323 1234 1324 VISIT_END( Expr, node ); … … 1241 1331 VISIT_START( node ); 1242 1332 1243 VISIT({1333 if ( __visit_children() ) { 1244 1334 guard_symtab guard { *this }; 1245 1335 maybe_accept( node, &ConstantExpr::result ); 1246 } )1336 } 1247 1337 1248 1338 VISIT_END( Expr, node ); … … 1255 1345 VISIT_START( node ); 1256 1346 1257 VISIT({ 1347 if ( __visit_children() ) { 1348 { 1258 1349 guard_symtab guard { *this }; 1259 1350 maybe_accept( node, &SizeofExpr::result ); … … 1264 1355 maybe_accept( node, &SizeofExpr::expr ); 1265 1356 } 1266 )1357 } 1267 1358 1268 1359 VISIT_END( Expr, node ); … … 1275 1366 VISIT_START( node ); 1276 1367 1277 VISIT({ 1368 if ( __visit_children() ) { 1369 { 1278 1370 guard_symtab guard { *this }; 1279 1371 maybe_accept( node, &AlignofExpr::result ); … … 1284 1376 maybe_accept( node, &AlignofExpr::expr ); 1285 1377 } 1286 )1378 } 1287 1379 1288 1380 VISIT_END( Expr, node ); … … 1295 1387 VISIT_START( node ); 1296 1388 1297 VISIT({ 1389 if ( __visit_children() ) { 1390 { 1298 1391 guard_symtab guard { *this }; 1299 1392 maybe_accept( node, &UntypedOffsetofExpr::result ); 1300 1393 } 1301 1394 maybe_accept( node, &UntypedOffsetofExpr::type ); 1302 )1395 } 1303 1396 1304 1397 VISIT_END( Expr, node ); … … 1311 1404 VISIT_START( node ); 1312 1405 1313 VISIT({ 1406 if ( __visit_children() ) { 1407 { 1314 1408 guard_symtab guard { *this }; 1315 1409 maybe_accept( node, &OffsetofExpr::result ); 1316 1410 } 1317 1411 maybe_accept( node, &OffsetofExpr::type ); 1318 )1412 } 1319 1413 1320 1414 VISIT_END( Expr, node ); … … 1327 1421 VISIT_START( node ); 1328 1422 1329 VISIT({ 1423 if ( __visit_children() ) { 1424 { 1330 1425 guard_symtab guard { *this }; 1331 1426 maybe_accept( node, &OffsetPackExpr::result ); 1332 1427 } 1333 1428 maybe_accept( node, &OffsetPackExpr::type ); 1334 )1429 } 1335 1430 1336 1431 VISIT_END( Expr, node ); … … 1343 1438 VISIT_START( node ); 1344 1439 1345 VISIT({ 1440 if ( __visit_children() ) { 1441 { 1346 1442 guard_symtab guard { *this }; 1347 1443 maybe_accept( node, &LogicalExpr::result ); … … 1349 1445 maybe_accept( node, &LogicalExpr::arg1 ); 1350 1446 maybe_accept( node, &LogicalExpr::arg2 ); 1351 )1447 } 1352 1448 1353 1449 VISIT_END( Expr, node ); … … 1360 1456 VISIT_START( node ); 1361 1457 1362 VISIT({ 1458 if ( __visit_children() ) { 1459 { 1363 1460 guard_symtab guard { *this }; 1364 1461 maybe_accept( node, &ConditionalExpr::result ); … … 1367 1464 maybe_accept( node, &ConditionalExpr::arg2 ); 1368 1465 maybe_accept( node, &ConditionalExpr::arg3 ); 1369 )1466 } 1370 1467 1371 1468 VISIT_END( Expr, node ); … … 1378 1475 VISIT_START( node ); 1379 1476 1380 VISIT({ 1477 if ( __visit_children() ) { 1478 { 1381 1479 guard_symtab guard { *this }; 1382 1480 maybe_accept( node, &CommaExpr::result ); … … 1384 1482 maybe_accept( node, &CommaExpr::arg1 ); 1385 1483 maybe_accept( node, &CommaExpr::arg2 ); 1386 )1484 } 1387 1485 1388 1486 VISIT_END( Expr, node ); … … 1395 1493 VISIT_START( node ); 1396 1494 1397 VISIT({ 1495 if ( __visit_children() ) { 1496 { 1398 1497 guard_symtab guard { *this }; 1399 1498 maybe_accept( node, &TypeExpr::result ); 1400 1499 } 1401 1500 maybe_accept( node, &TypeExpr::type ); 1402 )1501 } 1403 1502 1404 1503 VISIT_END( Expr, node ); … … 1411 1510 VISIT_START( node ); 1412 1511 1413 VISIT({ 1512 if ( __visit_children() ) { 1513 { 1414 1514 guard_symtab guard { *this }; 1415 1515 maybe_accept( node, &AsmExpr::result ); … … 1417 1517 maybe_accept( node, &AsmExpr::constraint ); 1418 1518 maybe_accept( node, &AsmExpr::operand ); 1419 )1519 } 1420 1520 1421 1521 VISIT_END( Expr, node ); … … 1428 1528 VISIT_START( node ); 1429 1529 1430 VISIT({ 1530 if ( __visit_children() ) { 1531 { 1431 1532 guard_symtab guard { *this }; 1432 1533 maybe_accept( node, &ImplicitCopyCtorExpr::result ); 1433 1534 } 1434 1535 maybe_accept( node, &ImplicitCopyCtorExpr::callExpr ); 1435 )1536 } 1436 1537 1437 1538 VISIT_END( Expr, node ); … … 1444 1545 VISIT_START( node ); 1445 1546 1446 VISIT({ 1547 if ( __visit_children() ) { 1548 { 1447 1549 guard_symtab guard { *this }; 1448 1550 maybe_accept( node, &ConstructorExpr::result ); 1449 1551 } 1450 1552 maybe_accept( node, &ConstructorExpr::callExpr ); 1451 )1553 } 1452 1554 1453 1555 VISIT_END( Expr, node ); … … 1460 1562 VISIT_START( node ); 1461 1563 1462 VISIT({ 1564 if ( __visit_children() ) { 1565 { 1463 1566 guard_symtab guard { *this }; 1464 1567 maybe_accept( node, &CompoundLiteralExpr::result ); 1465 1568 } 1466 1569 maybe_accept( node, &CompoundLiteralExpr::init ); 1467 )1570 } 1468 1571 1469 1572 VISIT_END( Expr, node ); … … 1476 1579 VISIT_START( node ); 1477 1580 1478 VISIT({ 1581 if ( __visit_children() ) { 1582 { 1479 1583 guard_symtab guard { *this }; 1480 1584 maybe_accept( node, &RangeExpr::result ); … … 1482 1586 maybe_accept( node, &RangeExpr::low ); 1483 1587 maybe_accept( node, &RangeExpr::high ); 1484 )1588 } 1485 1589 1486 1590 VISIT_END( Expr, node ); … … 1493 1597 VISIT_START( node ); 1494 1598 1495 VISIT({ 1599 if ( __visit_children() ) { 1600 { 1496 1601 guard_symtab guard { *this }; 1497 1602 maybe_accept( node, &UntypedTupleExpr::result ); 1498 1603 } 1499 1604 maybe_accept( node, &UntypedTupleExpr::exprs ); 1500 )1605 } 1501 1606 1502 1607 VISIT_END( Expr, node ); … … 1509 1614 VISIT_START( node ); 1510 1615 1511 VISIT({ 1616 if ( __visit_children() ) { 1617 { 1512 1618 guard_symtab guard { *this }; 1513 1619 maybe_accept( node, &TupleExpr::result ); 1514 1620 } 1515 1621 maybe_accept( node, &TupleExpr::exprs ); 1516 )1622 } 1517 1623 1518 1624 VISIT_END( Expr, node ); … … 1525 1631 VISIT_START( node ); 1526 1632 1527 VISIT({ 1633 if ( __visit_children() ) { 1634 { 1528 1635 guard_symtab guard { *this }; 1529 1636 maybe_accept( node, &TupleIndexExpr::result ); 1530 1637 } 1531 1638 maybe_accept( node, &TupleIndexExpr::tuple ); 1532 )1639 } 1533 1640 1534 1641 VISIT_END( Expr, node ); … … 1541 1648 VISIT_START( node ); 1542 1649 1543 VISIT({ 1650 if ( __visit_children() ) { 1651 { 1544 1652 guard_symtab guard { *this }; 1545 1653 maybe_accept( node, &TupleAssignExpr::result ); 1546 1654 } 1547 1655 maybe_accept( node, &TupleAssignExpr::stmtExpr ); 1548 )1656 } 1549 1657 1550 1658 VISIT_END( Expr, node ); … … 1557 1665 VISIT_START( node ); 1558 1666 1559 VISIT(// don't want statements from outer CompoundStmts to be added to this StmtExpr 1667 if ( __visit_children() ) { 1668 // don't want statements from outer CompoundStmts to be added to this StmtExpr 1560 1669 // get the stmts that will need to be spliced in 1561 1670 auto stmts_before = __pass::stmtsToAddBefore( core, 0); … … 1574 1683 maybe_accept( node, &StmtExpr::returnDecls ); 1575 1684 maybe_accept( node, &StmtExpr::dtors ); 1576 )1685 } 1577 1686 1578 1687 VISIT_END( Expr, node ); … … 1585 1694 VISIT_START( node ); 1586 1695 1587 VISIT({ 1696 if ( __visit_children() ) { 1697 { 1588 1698 guard_symtab guard { *this }; 1589 1699 maybe_accept( node, &UniqueExpr::result ); 1590 1700 } 1591 1701 maybe_accept( node, &UniqueExpr::expr ); 1592 )1702 } 1593 1703 1594 1704 VISIT_END( Expr, node ); … … 1601 1711 VISIT_START( node ); 1602 1712 1603 VISIT({ 1713 if ( __visit_children() ) { 1714 { 1604 1715 guard_symtab guard { *this }; 1605 1716 maybe_accept( node, &UntypedInitExpr::result ); … … 1607 1718 maybe_accept( node, &UntypedInitExpr::expr ); 1608 1719 // not currently visiting initAlts, but this doesn't matter since this node is only used in the resolver. 1609 )1720 } 1610 1721 1611 1722 VISIT_END( Expr, node ); … … 1618 1729 VISIT_START( node ); 1619 1730 1620 VISIT({ 1731 if ( __visit_children() ) { 1732 { 1621 1733 guard_symtab guard { *this }; 1622 1734 maybe_accept( node, &InitExpr::result ); … … 1624 1736 maybe_accept( node, &InitExpr::expr ); 1625 1737 maybe_accept( node, &InitExpr::designation ); 1626 )1738 } 1627 1739 1628 1740 VISIT_END( Expr, node ); … … 1635 1747 VISIT_START( node ); 1636 1748 1637 VISIT({ 1749 if ( __visit_children() ) { 1750 { 1638 1751 guard_symtab guard { *this }; 1639 1752 maybe_accept( node, &DeletedExpr::result ); … … 1641 1754 maybe_accept( node, &DeletedExpr::expr ); 1642 1755 // don't visit deleteStmt, because it is a pointer to somewhere else in the tree. 1643 )1756 } 1644 1757 1645 1758 VISIT_END( Expr, node ); … … 1652 1765 VISIT_START( node ); 1653 1766 1654 VISIT({ 1767 if ( __visit_children() ) { 1768 { 1655 1769 guard_symtab guard { *this }; 1656 1770 maybe_accept( node, &DefaultArgExpr::result ); 1657 1771 } 1658 1772 maybe_accept( node, &DefaultArgExpr::expr ); 1659 )1773 } 1660 1774 1661 1775 VISIT_END( Expr, node ); … … 1668 1782 VISIT_START( node ); 1669 1783 1670 VISIT({ 1784 if ( __visit_children() ) { 1785 { 1671 1786 guard_symtab guard { *this }; 1672 1787 maybe_accept( node, &GenericExpr::result ); … … 1697 1812 node = n; 1698 1813 } 1699 )1814 } 1700 1815 1701 1816 VISIT_END( Expr, node ); … … 1726 1841 VISIT_START( node ); 1727 1842 1728 VISIT(1843 if ( __visit_children() ) { 1729 1844 // xxx - should PointerType visit/mutate dimension? 1730 1845 maybe_accept( node, &PointerType::base ); 1731 )1846 } 1732 1847 1733 1848 VISIT_END( Type, node ); … … 1740 1855 VISIT_START( node ); 1741 1856 1742 VISIT(1857 if ( __visit_children() ) { 1743 1858 maybe_accept( node, &ArrayType::dimension ); 1744 1859 maybe_accept( node, &ArrayType::base ); 1745 )1860 } 1746 1861 1747 1862 VISIT_END( Type, node ); … … 1754 1869 VISIT_START( node ); 1755 1870 1756 VISIT(1871 if ( __visit_children() ) { 1757 1872 maybe_accept( node, &ReferenceType::base ); 1758 )1873 } 1759 1874 1760 1875 VISIT_END( Type, node ); … … 1767 1882 VISIT_START( node ); 1768 1883 1769 VISIT(1884 if ( __visit_children() ) { 1770 1885 maybe_accept( node, &QualifiedType::parent ); 1771 1886 maybe_accept( node, &QualifiedType::child ); 1772 )1887 } 1773 1888 1774 1889 VISIT_END( Type, node ); … … 1781 1896 VISIT_START( node ); 1782 1897 1783 VISIT({1898 if ( __visit_children() ) { 1784 1899 // guard_forall_subs forall_guard { *this, node }; 1785 1900 // mutate_forall( node ); … … 1787 1902 maybe_accept( node, &FunctionType::returns ); 1788 1903 maybe_accept( node, &FunctionType::params ); 1789 } )1904 } 1790 1905 1791 1906 VISIT_END( Type, node ); … … 1800 1915 __pass::symtab::addStruct( core, 0, node->name ); 1801 1916 1802 VISIT({1917 if ( __visit_children() ) { 1803 1918 guard_symtab guard { *this }; 1804 1919 maybe_accept( node, &StructInstType::params ); 1805 } )1920 } 1806 1921 1807 1922 VISIT_END( Type, node ); … … 1816 1931 __pass::symtab::addUnion( core, 0, node->name ); 1817 1932 1818 VISIT({1933 if ( __visit_children() ) { 1819 1934 guard_symtab guard { *this }; 1820 1935 maybe_accept( node, &UnionInstType::params ); 1821 } )1936 } 1822 1937 1823 1938 VISIT_END( Type, node ); … … 1830 1945 VISIT_START( node ); 1831 1946 1832 VISIT({1947 if ( __visit_children() ) { 1833 1948 maybe_accept( node, &EnumInstType::params ); 1834 } )1949 } 1835 1950 1836 1951 VISIT_END( Type, node ); … … 1843 1958 VISIT_START( node ); 1844 1959 1845 VISIT({1960 if ( __visit_children() ) { 1846 1961 maybe_accept( node, &TraitInstType::params ); 1847 } )1962 } 1848 1963 1849 1964 VISIT_END( Type, node ); … … 1856 1971 VISIT_START( node ); 1857 1972 1858 VISIT(1973 if ( __visit_children() ) { 1859 1974 { 1860 1975 maybe_accept( node, &TypeInstType::params ); … … 1862 1977 // ensure that base re-bound if doing substitution 1863 1978 __pass::forall::replace( core, 0, node ); 1864 )1979 } 1865 1980 1866 1981 VISIT_END( Type, node ); … … 1873 1988 VISIT_START( node ); 1874 1989 1875 VISIT(1990 if ( __visit_children() ) { 1876 1991 maybe_accept( node, &TupleType::types ); 1877 1992 maybe_accept( node, &TupleType::members ); 1878 )1993 } 1879 1994 1880 1995 VISIT_END( Type, node ); … … 1887 2002 VISIT_START( node ); 1888 2003 1889 VISIT(2004 if ( __visit_children() ) { 1890 2005 maybe_accept( node, &TypeofType::expr ); 1891 )2006 } 1892 2007 1893 2008 VISIT_END( Type, node ); … … 1900 2015 VISIT_START( node ); 1901 2016 1902 VISIT(2017 if ( __visit_children() ) { 1903 2018 maybe_accept( node, &VTableType::base ); 1904 )2019 } 1905 2020 1906 2021 VISIT_END( Type, node ); … … 1950 2065 VISIT_START( node ); 1951 2066 1952 VISIT( maybe_accept( node, &Designation::designators ); ) 2067 if ( __visit_children() ) { 2068 maybe_accept( node, &Designation::designators ); 2069 } 1953 2070 1954 2071 VISIT_END( Designation, node ); … … 1961 2078 VISIT_START( node ); 1962 2079 1963 VISIT(2080 if ( __visit_children() ) { 1964 2081 maybe_accept( node, &SingleInit::value ); 1965 )2082 } 1966 2083 1967 2084 VISIT_END( Init, node ); … … 1974 2091 VISIT_START( node ); 1975 2092 1976 VISIT(2093 if ( __visit_children() ) { 1977 2094 maybe_accept( node, &ListInit::designations ); 1978 2095 maybe_accept( node, &ListInit::initializers ); 1979 )2096 } 1980 2097 1981 2098 VISIT_END( Init, node ); … … 1988 2105 VISIT_START( node ); 1989 2106 1990 VISIT(2107 if ( __visit_children() ) { 1991 2108 maybe_accept( node, &ConstructorInit::ctor ); 1992 2109 maybe_accept( node, &ConstructorInit::dtor ); 1993 2110 maybe_accept( node, &ConstructorInit::init ); 1994 )2111 } 1995 2112 1996 2113 VISIT_END( Init, node ); … … 2003 2120 VISIT_START( node ); 2004 2121 2005 VISIT(2122 if ( __visit_children() ) { 2006 2123 maybe_accept( node, &Attribute::params ); 2007 )2124 } 2008 2125 2009 2126 VISIT_END( Attribute, node ); … … 2016 2133 VISIT_START( node ); 2017 2134 2018 VISIT(2135 if ( __visit_children() ) { 2019 2136 { 2020 2137 bool mutated = false; … … 2032 2149 } 2033 2150 } 2034 )2151 } 2035 2152 2036 2153 VISIT_END( TypeSubstitution, node ); … … 2038 2155 2039 2156 #undef VISIT_START 2040 #undef VISIT2041 2157 #undef VISIT_END -
src/AST/Print.cpp
r97c215f rf5a51db 333 333 print( node->funcSpec ); 334 334 335 if ( node->type ) { 335 336 337 if ( node->type && node->isTypeFixed ) { 336 338 node->type->accept( *this ); 337 339 } else { 338 os << "untyped entity"; 340 if (!node->type_params.empty()) { 341 os << "forall" << endl; 342 ++indent; 343 printAll(node->type_params); 344 os << indent; 345 --indent; 346 347 if (!node->assertions.empty()) { 348 os << "with assertions" << endl; 349 ++indent; 350 printAll(node->assertions); 351 os << indent; 352 --indent; 353 } 354 } 355 356 os << "function" << endl; 357 if ( ! node->params.empty() ) { 358 os << indent << "... with parameters" << endl; 359 ++indent; 360 printAll( node->params ); 361 if ( node->type->isVarArgs ) { 362 os << indent << "and a variable number of other arguments" << endl; 363 } 364 --indent; 365 } else if ( node->type->isVarArgs ) { 366 os << indent+1 << "accepting unspecified arguments" << endl; 367 } 368 369 os << indent << "... returning"; 370 if ( node->returns.empty() ) { 371 os << " nothing" << endl; 372 } else { 373 os << endl; 374 ++indent; 375 printAll( node->returns ); 376 --indent; 377 } 339 378 } 340 379 … … 472 511 ++indent; 473 512 os << indent; 474 safe_print( node->then Part);475 --indent; 476 477 if ( node->else Part!= 0 ) {513 safe_print( node->then ); 514 --indent; 515 516 if ( node->else_ != 0 ) { 478 517 os << indent << "... else:" << endl; 479 518 ++indent; 480 519 os << indent; 481 node->else Part->accept( *this );520 node->else_->accept( *this ); 482 521 --indent; 483 522 } // if … … 485 524 } 486 525 487 virtual const ast::Stmt * visit( const ast::While Stmt * node ) override final {526 virtual const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final { 488 527 if ( node->isDoWhile ) { os << "Do-"; } 489 528 os << "While on condition:" << endl; -
src/AST/Stmt.cpp
r97c215f rf5a51db 9 9 // Author : Aaron B. Moss 10 10 // Created On : Wed May 8 13:00:00 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Wed May 15 15:53:00 201913 // Update Count : 211 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 19:01:20 2022 13 // Update Count : 3 14 14 // 15 15 … … 56 56 57 57 // --- BranchStmt 58 BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, std::vector<Label>&& labels )59 : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) {58 BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, const std::vector<Label>&& labels ) 59 : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) { 60 60 // Make sure a syntax error hasn't slipped through. 61 61 assert( Goto != kind || !target.empty() ); -
src/AST/Stmt.hpp
r97c215f rf5a51db 9 9 // Author : Aaron B. Moss 10 10 // Created On : Wed May 8 13:00:00 2019 11 // Last Modified By : Andrew Beach12 // Last Modified On : Fri May 17 12:45:00 201913 // Update Count : 511 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 2 20:06:41 2022 13 // Update Count : 34 14 14 // 15 15 … … 17 17 18 18 #include <list> 19 #include <utility> 19 #include <utility> // for move 20 20 #include <vector> 21 21 22 22 #include "Label.hpp" 23 #include "Node.hpp" 23 #include "Node.hpp" // for node, ptr 24 24 #include "ParseNode.hpp" 25 25 #include "Visitor.hpp" … … 27 27 28 28 // Must be included in *all* AST classes; should be #undef'd at the end of the file 29 #define MUTATE_FRIEND 29 #define MUTATE_FRIEND \ 30 30 template<typename node_t> friend node_t * mutate(const node_t * node); \ 31 31 template<typename node_t> friend node_t * shallowCopy(const node_t * node); 32 32 33 33 namespace ast { 34 35 34 class Expr; 36 35 37 // /Base statement node36 // Base statement node 38 37 class Stmt : public ParseNode { 39 public:38 public: 40 39 std::vector<Label> labels; 41 40 42 Stmt( const CodeLocation & loc, std::vector<Label> && labels = {} )43 : ParseNode(loc), labels(std::move(labels)) {}44 45 Stmt(const Stmt & o) : ParseNode(o), labels(o.labels) {}41 Stmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 42 : ParseNode(loc), labels(std::move(labels)) {} 43 44 Stmt(const Stmt & o) : ParseNode(o), labels(o.labels) {} 46 45 47 46 const Stmt * accept( Visitor & v ) const override = 0; 48 private:47 private: 49 48 Stmt * clone() const override = 0; 50 49 MUTATE_FRIEND 51 50 }; 52 51 53 // / Compound statement `{ ... }`52 // Compound statement: { ... } 54 53 class CompoundStmt final : public Stmt { 55 public:54 public: 56 55 std::list<ptr<Stmt>> kids; 57 56 58 CompoundStmt(const CodeLocation & loc, std::list<ptr<Stmt>> && ks = {}, 59 std::vector<Label>&& labels = {} ) 60 : Stmt(loc, std::move(labels)), kids(std::move(ks)) {} 61 62 CompoundStmt( const CompoundStmt& o ); 63 CompoundStmt( CompoundStmt&& o ) = default; 57 CompoundStmt(const CodeLocation & loc, const std::list<ptr<Stmt>> && ks = {}, const std::vector<Label> && labels = {} ) 58 : Stmt(loc, std::move(labels)), kids(std::move(ks)) {} 59 60 CompoundStmt( const CompoundStmt & o ); 61 CompoundStmt( CompoundStmt && o ) = default; 64 62 65 63 void push_back( const Stmt * s ) { kids.emplace_back( s ); } … … 67 65 68 66 const CompoundStmt * accept( Visitor & v ) const override { return v.visit( this ); } 69 private:67 private: 70 68 CompoundStmt * clone() const override { return new CompoundStmt{ *this }; } 71 69 MUTATE_FRIEND 72 70 }; 73 71 74 // / Empty statment `;`72 // Empty statment: ; 75 73 class NullStmt final : public Stmt { 76 public:77 NullStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )78 : Stmt(loc, std::move(labels)) {}74 public: 75 NullStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 76 : Stmt(loc, std::move(labels)) {} 79 77 80 78 const NullStmt * accept( Visitor & v ) const override { return v.visit( this ); } 81 private:79 private: 82 80 NullStmt * clone() const override { return new NullStmt{ *this }; } 83 81 MUTATE_FRIEND 84 82 }; 85 83 86 // /Expression wrapped by statement84 // Expression wrapped by statement 87 85 class ExprStmt final : public Stmt { 88 public:86 public: 89 87 ptr<Expr> expr; 90 88 91 ExprStmt( const CodeLocation & loc, const Expr* e, std::vector<Label>&& labels = {} )92 : Stmt(loc, std::move(labels)), expr(e) {}93 94 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 95 private:89 ExprStmt( const CodeLocation & loc, const Expr* e, const std::vector<Label> && labels = {} ) 90 : Stmt(loc, std::move(labels)), expr(e) {} 91 92 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 93 private: 96 94 ExprStmt * clone() const override { return new ExprStmt{ *this }; } 97 95 MUTATE_FRIEND 98 96 }; 99 97 100 // / Assembly statement `asm ... ( "..." : ... )`98 // Assembly statement: asm ... ( "..." : ... ) 101 99 class AsmStmt final : public Stmt { 102 public:100 public: 103 101 bool isVolatile; 104 102 ptr<Expr> instruction; … … 108 106 109 107 AsmStmt( const CodeLocation & loc, bool isVolatile, const Expr * instruction, 110 std::vector<ptr<Expr>> && output,std::vector<ptr<Expr>> && input,111 std::vector<ptr<ConstantExpr>> && clobber,std::vector<Label> && gotoLabels,112 std::vector<Label> && labels = {})113 : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction),114 output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)),115 gotoLabels(std::move(gotoLabels)) {}116 117 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 118 private:108 const std::vector<ptr<Expr>> && output, const std::vector<ptr<Expr>> && input, 109 const std::vector<ptr<ConstantExpr>> && clobber, const std::vector<Label> && gotoLabels, 110 const std::vector<Label> && labels = {}) 111 : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction), 112 output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)), 113 gotoLabels(std::move(gotoLabels)) {} 114 115 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 116 private: 119 117 AsmStmt * clone() const override { return new AsmStmt{ *this }; } 120 118 MUTATE_FRIEND 121 119 }; 122 120 123 // / C-preprocessor directive `#...`121 // C-preprocessor directive: #... 124 122 class DirectiveStmt final : public Stmt { 125 public:123 public: 126 124 std::string directive; 127 125 128 126 DirectiveStmt( const CodeLocation & loc, const std::string & directive, 129 std::vector<Label> && labels = {} )130 : Stmt(loc, std::move(labels)), directive(directive) {}131 132 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 133 private:127 std::vector<Label> && labels = {} ) 128 : Stmt(loc, std::move(labels)), directive(directive) {} 129 130 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 131 private: 134 132 DirectiveStmt * clone() const override { return new DirectiveStmt{ *this }; } 135 133 MUTATE_FRIEND 136 134 }; 137 135 138 // / If conditional statement `if (...) ... else ...`136 // If statement: if (...) ... else ... 139 137 class IfStmt final : public Stmt { 140 public:141 ptr<Expr> cond; 142 ptr<Stmt> then Part;143 ptr<Stmt> else Part;138 public: 139 ptr<Expr> cond; 140 ptr<Stmt> then; 141 ptr<Stmt> else_; 144 142 std::vector<ptr<Stmt>> inits; 145 143 146 IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * then Part,147 const Stmt * elsePart = nullptr,std::vector<ptr<Stmt>> && inits = {},148 std::vector<Label> && labels = {} )149 : Stmt(loc, std::move(labels)), cond(cond), thenPart(thenPart), elsePart(elsePart),150 inits(std::move(inits)) {}151 152 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 153 private:144 IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * then, 145 const Stmt * else_ = nullptr, const std::vector<ptr<Stmt>> && inits = {}, 146 const std::vector<Label> && labels = {} ) 147 : Stmt(loc, std::move(labels)), cond(cond), then(then), else_(else_), 148 inits(std::move(inits)) {} 149 150 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 151 private: 154 152 IfStmt * clone() const override { return new IfStmt{ *this }; } 155 153 MUTATE_FRIEND 156 154 }; 157 155 158 // / Switch or choose conditional statement `switch (...) { ... }`156 // Switch or choose statement: switch (...) { ... } 159 157 class SwitchStmt final : public Stmt { 160 public:158 public: 161 159 ptr<Expr> cond; 162 160 std::vector<ptr<Stmt>> stmts; 163 161 164 SwitchStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,165 std::vector<Label> && labels = {} )166 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}167 168 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 169 private:162 SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 163 const std::vector<Label> && labels = {} ) 164 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 165 166 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 167 private: 170 168 SwitchStmt * clone() const override { return new SwitchStmt{ *this }; } 171 169 MUTATE_FRIEND 172 170 }; 173 171 174 // / Case label `case ...:` `default:`172 // Case label: case ...: or default: 175 173 class CaseStmt final : public Stmt { 176 public:177 // /Null for the default label.174 public: 175 // Null for the default label. 178 176 ptr<Expr> cond; 179 177 std::vector<ptr<Stmt>> stmts; 180 178 181 CaseStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,182 std::vector<Label> && labels = {} )183 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}179 CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts, 180 const std::vector<Label> && labels = {} ) 181 : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {} 184 182 185 183 bool isDefault() const { return !cond; } 186 184 187 185 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 188 private:186 private: 189 187 CaseStmt * clone() const override { return new CaseStmt{ *this }; } 190 188 MUTATE_FRIEND 191 189 }; 192 190 193 // / While loop `while (...) ...` `do ... while (...);194 class While Stmt final : public Stmt {195 public:191 // While loop: while (...) ... else ... or do ... while (...) else ...; 192 class WhileDoStmt final : public Stmt { 193 public: 196 194 ptr<Expr> cond; 197 195 ptr<Stmt> body; 196 ptr<Stmt> else_; 198 197 std::vector<ptr<Stmt>> inits; 199 198 bool isDoWhile; 200 199 201 WhileStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, 202 std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, std::vector<Label> && labels = {} ) 203 : Stmt(loc, std::move(labels)), cond(cond), body(body), inits(std::move(inits)), 204 isDoWhile(isDoWhile) {} 205 206 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 207 private: 208 WhileStmt * clone() const override { return new WhileStmt{ *this }; } 209 MUTATE_FRIEND 210 }; 211 212 /// For loop `for (... ; ... ; ...) ...` 200 WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, 201 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} ) 202 : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(nullptr), inits(std::move(inits)), isDoWhile(isDoWhile) {} 203 204 WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, const Stmt * else_, 205 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} ) 206 : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(else_), inits(std::move(inits)), isDoWhile(isDoWhile) {} 207 208 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 209 private: 210 WhileDoStmt * clone() const override { return new WhileDoStmt{ *this }; } 211 MUTATE_FRIEND 212 }; 213 214 // For loop: for (... ; ... ; ...) ... else ... 213 215 class ForStmt final : public Stmt { 214 public:216 public: 215 217 std::vector<ptr<Stmt>> inits; 216 218 ptr<Expr> cond; 217 219 ptr<Expr> inc; 218 220 ptr<Stmt> body; 219 220 ForStmt( const CodeLocation & loc, std::vector<ptr<Stmt>> && inits, const Expr * cond, 221 const Expr * inc, const Stmt * body, std::vector<Label> && labels = {} ) 222 : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc), 223 body(body) {} 224 225 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 226 private: 221 ptr<Stmt> else_; 222 223 ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond, 224 const Expr * inc, const Stmt * body, const std::vector<Label> && label = {} ) 225 : Stmt(loc, std::move(label)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(nullptr) {} 226 227 ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond, 228 const Expr * inc, const Stmt * body, const Stmt * else_, const std::vector<Label> && labels = {} ) 229 : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(else_) {} 230 231 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 232 private: 227 233 ForStmt * clone() const override { return new ForStmt{ *this }; } 228 234 MUTATE_FRIEND 229 235 }; 230 236 231 // / Branch control flow statement `goto ...` `break` `continue` `fallthru`237 // Branch control flow statement: goto ... or break or continue or fallthru 232 238 class BranchStmt final : public Stmt { 233 public:239 public: 234 240 enum Kind { Goto, Break, Continue, FallThrough, FallThroughDefault }; 235 241 static constexpr size_t kindEnd = 1 + (size_t)FallThroughDefault; … … 240 246 Kind kind; 241 247 242 BranchStmt( const CodeLocation & loc, Kind kind, Label target, 243 std::vector<Label> && labels = {} ); 244 BranchStmt( const CodeLocation & loc, const Expr * computedTarget, 245 std::vector<Label> && labels = {} ) 246 : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc), 247 computedTarget(computedTarget), kind(Goto) {} 248 BranchStmt( const CodeLocation & loc, Kind kind, Label target, const std::vector<Label> && labels = {} ); 249 BranchStmt( const CodeLocation & loc, const Expr * computedTarget, const std::vector<Label> && labels = {} ) 250 : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc), computedTarget(computedTarget), kind(Goto) {} 248 251 249 252 const char * kindName() const { return kindNames[kind]; } 250 253 251 254 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 252 private:255 private: 253 256 BranchStmt * clone() const override { return new BranchStmt{ *this }; } 254 257 MUTATE_FRIEND … … 257 260 }; 258 261 259 // / Return statement `return ...`262 // Return statement: return ... 260 263 class ReturnStmt final : public Stmt { 261 public:264 public: 262 265 ptr<Expr> expr; 263 266 264 ReturnStmt( const CodeLocation & loc, const Expr * expr, std::vector<Label> && labels = {} )265 : Stmt(loc, std::move(labels)), expr(expr) {}266 267 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 268 private:267 ReturnStmt( const CodeLocation & loc, const Expr * expr, const std::vector<Label> && labels = {} ) 268 : Stmt(loc, std::move(labels)), expr(expr) {} 269 270 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 271 private: 269 272 ReturnStmt * clone() const override { return new ReturnStmt{ *this }; } 270 273 MUTATE_FRIEND 271 274 }; 272 275 273 // /Kind of exception276 // Kind of exception 274 277 enum ExceptionKind { Terminate, Resume }; 275 278 276 // / Throw statement `throw ...`279 // Throw statement: throw ... 277 280 class ThrowStmt final : public Stmt { 278 public:281 public: 279 282 ptr<Expr> expr; 280 283 ptr<Expr> target; 281 284 ExceptionKind kind; 282 285 283 ThrowStmt( 284 const CodeLocation & loc, ExceptionKind kind, const Expr * expr, const Expr * target, 285 std::vector<Label> && labels = {} ) 286 : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {} 287 288 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 289 private: 286 ThrowStmt( const CodeLocation & loc, ExceptionKind kind, const Expr * expr, 287 const Expr * target, const std::vector<Label> && labels = {} ) 288 : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {} 289 290 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 291 private: 290 292 ThrowStmt * clone() const override { return new ThrowStmt{ *this }; } 291 293 MUTATE_FRIEND 292 294 }; 293 295 294 // / Try statement `try { ... } ...`296 // Try statement: try { ... } ... 295 297 class TryStmt final : public Stmt { 296 public:298 public: 297 299 ptr<CompoundStmt> body; 298 300 std::vector<ptr<CatchStmt>> handlers; 299 301 ptr<FinallyStmt> finally; 300 302 301 TryStmt( 302 const CodeLocation & loc, const CompoundStmt * body, 303 std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally, 304 std::vector<Label> && labels = {} ) 305 : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {} 306 307 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 308 private: 303 TryStmt( const CodeLocation & loc, const CompoundStmt * body, 304 const std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally, 305 const std::vector<Label> && labels = {} ) 306 : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {} 307 308 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 309 private: 309 310 TryStmt * clone() const override { return new TryStmt{ *this }; } 310 311 MUTATE_FRIEND 311 312 }; 312 313 313 // /Catch clause of try statement314 // Catch clause of try statement 314 315 class CatchStmt final : public Stmt { 315 public:316 public: 316 317 ptr<Decl> decl; 317 318 ptr<Expr> cond; … … 319 320 ExceptionKind kind; 320 321 321 CatchStmt( 322 const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond, 323 const Stmt * body, std::vector<Label> && labels = {} ) 324 : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {} 325 326 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 327 private: 322 CatchStmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond, 323 const Stmt * body, const std::vector<Label> && labels = {} ) 324 : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {} 325 326 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 327 private: 328 328 CatchStmt * clone() const override { return new CatchStmt{ *this }; } 329 329 MUTATE_FRIEND 330 330 }; 331 331 332 // /Finally clause of try statement332 // Finally clause of try statement 333 333 class FinallyStmt final : public Stmt { 334 public:334 public: 335 335 ptr<CompoundStmt> body; 336 336 337 337 FinallyStmt( const CodeLocation & loc, const CompoundStmt * body, 338 std::vector<Label> && labels = {} )339 : Stmt(loc, std::move(labels)), body(body) {}340 341 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 342 private:338 std::vector<Label> && labels = {} ) 339 : Stmt(loc, std::move(labels)), body(body) {} 340 341 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 342 private: 343 343 FinallyStmt * clone() const override { return new FinallyStmt{ *this }; } 344 344 MUTATE_FRIEND 345 345 }; 346 346 347 // /Suspend statement347 // Suspend statement 348 348 class SuspendStmt final : public Stmt { 349 public:349 public: 350 350 ptr<CompoundStmt> then; 351 351 enum Type { None, Coroutine, Generator } type = None; 352 352 353 SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, std::vector<Label> && labels = {} )354 : Stmt(loc, std::move(labels)), then(then), type(type) {}355 356 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 357 private:353 SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, const std::vector<Label> && labels = {} ) 354 : Stmt(loc, std::move(labels)), then(then), type(type) {} 355 356 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 357 private: 358 358 SuspendStmt * clone() const override { return new SuspendStmt{ *this }; } 359 359 MUTATE_FRIEND 360 360 }; 361 361 362 // / Wait for concurrency statement `when (...) waitfor (... , ...) ... timeout(...) ... else ...`362 // Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ... 363 363 class WaitForStmt final : public Stmt { 364 public:364 public: 365 365 struct Target { 366 366 ptr<Expr> func; … … 389 389 OrElse orElse; 390 390 391 WaitForStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )392 : Stmt(loc, std::move(labels)) {}393 394 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 395 private:391 WaitForStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 392 : Stmt(loc, std::move(labels)) {} 393 394 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 395 private: 396 396 WaitForStmt * clone() const override { return new WaitForStmt{ *this }; } 397 397 MUTATE_FRIEND 398 398 }; 399 399 400 // /Any declaration in a (compound) statement.400 // Any declaration in a (compound) statement. 401 401 class DeclStmt final : public Stmt { 402 public:402 public: 403 403 ptr<Decl> decl; 404 404 405 DeclStmt( const CodeLocation & loc, const Decl * decl, std::vector<Label> && labels = {} )406 : Stmt(loc, std::move(labels)), decl(decl) {}407 408 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 409 private:405 DeclStmt( const CodeLocation & loc, const Decl * decl, const std::vector<Label> && labels = {} ) 406 : Stmt(loc, std::move(labels)), decl(decl) {} 407 408 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 409 private: 410 410 DeclStmt * clone() const override { return new DeclStmt{ *this }; } 411 411 MUTATE_FRIEND 412 412 }; 413 413 414 // /Represents an implicit application of a constructor or destructor.414 // Represents an implicit application of a constructor or destructor. 415 415 class ImplicitCtorDtorStmt final : public Stmt { 416 public:416 public: 417 417 ptr<Stmt> callStmt; 418 418 419 419 ImplicitCtorDtorStmt( const CodeLocation & loc, const Stmt * callStmt, 420 std::vector<Label> && labels = {} )421 : Stmt(loc, std::move(labels)), callStmt(callStmt) {}422 423 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 424 private:420 std::vector<Label> && labels = {} ) 421 : Stmt(loc, std::move(labels)), callStmt(callStmt) {} 422 423 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 424 private: 425 425 ImplicitCtorDtorStmt * clone() const override { return new ImplicitCtorDtorStmt{ *this }; } 426 426 MUTATE_FRIEND 427 427 }; 428 428 429 // /Mutex Statement429 // Mutex Statement 430 430 class MutexStmt final : public Stmt { 431 public:431 public: 432 432 ptr<Stmt> stmt; 433 433 std::vector<ptr<Expr>> mutexObjs; 434 434 435 435 MutexStmt( const CodeLocation & loc, const Stmt * stmt, 436 std::vector<ptr<Expr>> && mutexes,std::vector<Label> && labels = {} )437 : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {}438 439 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 440 private:436 const std::vector<ptr<Expr>> && mutexes, const std::vector<Label> && labels = {} ) 437 : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {} 438 439 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 440 private: 441 441 MutexStmt * clone() const override { return new MutexStmt{ *this }; } 442 442 MUTATE_FRIEND 443 443 }; 444 445 } 444 } // namespace ast 446 445 447 446 #undef MUTATE_FRIEND 448 447 449 448 // Local Variables: // 450 // tab-width: 4 //451 449 // mode: c++ // 452 // compile-command: "make install" //453 450 // End: // -
src/AST/Visitor.hpp
r97c215f rf5a51db 10 10 // Created On : Thr May 9 15:28:00 2019 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:25:07 202113 // Update Count : 112 // Last Modified On : Tue Feb 1 09:09:34 2022 13 // Update Count : 2 14 14 // 15 15 … … 38 38 virtual const ast::Stmt * visit( const ast::DirectiveStmt * ) = 0; 39 39 virtual const ast::Stmt * visit( const ast::IfStmt * ) = 0; 40 virtual const ast::Stmt * visit( const ast::While Stmt* ) = 0;40 virtual const ast::Stmt * visit( const ast::WhileDoStmt * ) = 0; 41 41 virtual const ast::Stmt * visit( const ast::ForStmt * ) = 0; 42 42 virtual const ast::Stmt * visit( const ast::SwitchStmt * ) = 0; -
src/CodeGen/CodeGenerator.cc
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 19:00:42 202113 // Update Count : 5 3612 // Last Modified On : Wed Feb 2 20:30:30 2022 13 // Update Count : 541 14 14 // 15 15 #include "CodeGenerator.h" … … 42 42 bool wantSpacing( Statement * stmt) { 43 43 return dynamic_cast< IfStmt * >( stmt ) || dynamic_cast< CompoundStmt * >( stmt ) || 44 dynamic_cast< While Stmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt );44 dynamic_cast< WhileDoStmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt ); 45 45 } 46 46 … … 955 955 output << " ) "; 956 956 957 ifStmt->get_then Part()->accept( *visitor );958 959 if ( ifStmt->get_else Part() != 0) {957 ifStmt->get_then()->accept( *visitor ); 958 959 if ( ifStmt->get_else() != 0) { 960 960 output << " else "; 961 ifStmt->get_else Part()->accept( *visitor );961 ifStmt->get_else()->accept( *visitor ); 962 962 } // if 963 963 } … … 1020 1020 output << "fallthru"; 1021 1021 break; 1022 default: ; // prevent warning 1022 1023 } // switch 1023 1024 // print branch target for labelled break/continue/fallthru in debug mode … … 1125 1126 } 1126 1127 1127 void CodeGenerator::postvisit( While Stmt * whileStmt ) {1128 if ( while Stmt->get_isDoWhile() ) {1128 void CodeGenerator::postvisit( WhileDoStmt * whileDoStmt ) { 1129 if ( whileDoStmt->get_isDoWhile() ) { 1129 1130 output << "do"; 1130 1131 } else { 1131 1132 output << "while ("; 1132 while Stmt->get_condition()->accept( *visitor );1133 whileDoStmt->get_condition()->accept( *visitor ); 1133 1134 output << ")"; 1134 1135 } // if 1135 1136 output << " "; 1136 1137 1137 output << CodeGenerator::printLabels( while Stmt->get_body()->get_labels() );1138 while Stmt->get_body()->accept( *visitor );1138 output << CodeGenerator::printLabels( whileDoStmt->get_body()->get_labels() ); 1139 whileDoStmt->get_body()->accept( *visitor ); 1139 1140 1140 1141 output << indent; 1141 1142 1142 if ( while Stmt->get_isDoWhile() ) {1143 if ( whileDoStmt->get_isDoWhile() ) { 1143 1144 output << " while ("; 1144 while Stmt->get_condition()->accept( *visitor );1145 whileDoStmt->get_condition()->accept( *visitor ); 1145 1146 output << ");"; 1146 1147 } // if -
src/CodeGen/CodeGenerator.h
r97c215f rf5a51db 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:38 202113 // Update Count : 6 312 // Last Modified On : Tue Feb 1 09:23:21 2022 13 // Update Count : 64 14 14 // 15 15 … … 116 116 void postvisit( WaitForStmt * ); 117 117 void postvisit( WithStmt * ); 118 void postvisit( While Stmt * );118 void postvisit( WhileDoStmt * ); 119 119 void postvisit( ForStmt * ); 120 120 void postvisit( NullStmt * ); -
src/Common/CodeLocationTools.cpp
r97c215f rf5a51db 10 10 // Created On : Fri Dec 4 15:42:00 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 12 18:35:37 202113 // Update Count : 212 // Last Modified On : Tue Feb 1 09:14:39 2022 13 // Update Count : 3 14 14 // 15 15 … … 109 109 macro(DirectiveStmt, Stmt) \ 110 110 macro(IfStmt, Stmt) \ 111 macro(While Stmt, Stmt) \111 macro(WhileDoStmt, Stmt) \ 112 112 macro(ForStmt, Stmt) \ 113 113 macro(SwitchStmt, Stmt) \ -
src/Common/PassVisitor.h
r97c215f rf5a51db 92 92 virtual void visit( IfStmt * ifStmt ) override final; 93 93 virtual void visit( const IfStmt * ifStmt ) override final; 94 virtual void visit( While Stmt * whileStmt ) override final;95 virtual void visit( const While Stmt * whileStmt ) override final;94 virtual void visit( WhileDoStmt * whileDoStmt ) override final; 95 virtual void visit( const WhileDoStmt * whileDoStmt ) override final; 96 96 virtual void visit( ForStmt * forStmt ) override final; 97 97 virtual void visit( const ForStmt * forStmt ) override final; … … 277 277 virtual Statement * mutate( DirectiveStmt * dirStmt ) override final; 278 278 virtual Statement * mutate( IfStmt * ifStmt ) override final; 279 virtual Statement * mutate( While Stmt * whileStmt ) override final;279 virtual Statement * mutate( WhileDoStmt * whileDoStmt ) override final; 280 280 virtual Statement * mutate( ForStmt * forStmt ) override final; 281 281 virtual Statement * mutate( SwitchStmt * switchStmt ) override final; -
src/Common/PassVisitor.impl.h
r97c215f rf5a51db 1189 1189 maybeAccept_impl( node->initialization, *this ); 1190 1190 visitExpression ( node->condition ); 1191 node->then Part = visitStatement( node->thenPart);1192 node->else Part = visitStatement( node->elsePart);1191 node->then = visitStatement( node->then ); 1192 node->else_ = visitStatement( node->else_ ); 1193 1193 } 1194 1194 VISIT_END( node ); … … 1203 1203 maybeAccept_impl( node->initialization, *this ); 1204 1204 visitExpression ( node->condition ); 1205 visitStatement ( node->then Part);1206 visitStatement ( node->else Part);1205 visitStatement ( node->then ); 1206 visitStatement ( node->else_ ); 1207 1207 } 1208 1208 VISIT_END( node ); … … 1217 1217 maybeMutate_impl( node->initialization, *this ); 1218 1218 node->condition = mutateExpression( node->condition ); 1219 node->then Part = mutateStatement ( node->thenPart);1220 node->else Part = mutateStatement ( node->elsePart);1219 node->then = mutateStatement ( node->then ); 1220 node->else_ = mutateStatement ( node->else_ ); 1221 1221 } 1222 1222 MUTATE_END( Statement, node ); … … 1224 1224 1225 1225 //-------------------------------------------------------------------------- 1226 // While Stmt1227 template< typename pass_type > 1228 void PassVisitor< pass_type >::visit( While Stmt * node ) {1226 // WhileDoStmt 1227 template< typename pass_type > 1228 void PassVisitor< pass_type >::visit( WhileDoStmt * node ) { 1229 1229 VISIT_START( node ); 1230 1230 … … 1241 1241 1242 1242 template< typename pass_type > 1243 void PassVisitor< pass_type >::visit( const While Stmt * node ) {1243 void PassVisitor< pass_type >::visit( const WhileDoStmt * node ) { 1244 1244 VISIT_START( node ); 1245 1245 … … 1256 1256 1257 1257 template< typename pass_type > 1258 Statement * PassVisitor< pass_type >::mutate( While Stmt * node ) {1258 Statement * PassVisitor< pass_type >::mutate( WhileDoStmt * node ) { 1259 1259 MUTATE_START( node ); 1260 1260 -
src/Common/utility.h
r97c215f rf5a51db 371 371 } 372 372 373 template< typename T > 374 struct enumerate_t { 375 template<typename val_t> 376 struct value_t { 377 val_t & val; 378 size_t idx; 379 }; 380 381 template< typename iter_t, typename val_t > 382 struct iterator_t { 383 iter_t it; 384 size_t idx; 385 386 iterator_t( iter_t _it, size_t _idx ) : it(_it), idx(_idx) {} 387 388 value_t<val_t> operator*() const { return value_t<val_t>{ *it, idx }; } 389 390 bool operator==(const iterator_t & o) const { return o.it == it; } 391 bool operator!=(const iterator_t & o) const { return o.it != it; } 392 393 iterator_t & operator++() { 394 it++; 395 idx++; 396 return *this; 397 } 398 399 using difference_type = typename std::iterator_traits< iter_t >::difference_type; 400 using value_type = value_t<val_t>; 401 using pointer = value_t<val_t> *; 402 using reference = value_t<val_t> &; 403 using iterator_category = std::forward_iterator_tag; 404 }; 405 406 T & ref; 407 408 using iterator = iterator_t< typename T::iterator, typename T::value_type >; 409 using const_iterator = iterator_t< typename T::const_iterator, const typename T::value_type >; 410 411 iterator begin() { return iterator( ref.begin(), 0 ); } 412 iterator end() { return iterator( ref.end(), ref.size() ); } 413 414 const_iterator begin() const { return const_iterator( ref.cbegin(), 0 ); } 415 const_iterator end() const { return const_iterator( ref.cend(), ref.size() ); } 416 417 const_iterator cbegin() const { return const_iterator( ref.cbegin(), 0 ); } 418 const_iterator cend() const { return const_iterator( ref.cend(), ref.size() ); } 419 }; 420 421 template< typename T > 422 enumerate_t<T> enumerate( T & ref ) { 423 return enumerate_t< T >{ ref }; 424 } 425 426 template< typename T > 427 const enumerate_t< const T > enumerate( const T & ref ) { 428 return enumerate_t< const T >{ ref }; 429 } 430 373 431 template< typename OutType, typename Range, typename Functor > 374 432 OutType map_range( const Range& range, Functor&& functor ) { -
src/ControlStruct/ExceptTranslate.h
r97c215f rf5a51db 31 31 32 32 void translateTries( std::list< Declaration *> & translationUnit ); 33 void translateTries( ast::TranslationUnit & transUnit ); 33 34 /* Replaces all try blocks (and their many clauses) with function definitions and calls. 34 35 * This uses the exception built-ins to produce typed output and should take place after -
src/ControlStruct/ExceptTranslateNew.cpp
r97c215f rf5a51db 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Nov 8 11:53:00 2021 11 // Last Modified By : Andrew Beach12 // Last Modified On : Mon Nov 8 16:50:00 202113 // Update Count : 011 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 31 18:49:58 2022 13 // Update Count : 1 14 14 // 15 15 … … 20 20 #include "AST/Stmt.hpp" 21 21 #include "AST/TranslationUnit.hpp" 22 #include "AST/DeclReplacer.hpp" 22 23 23 24 namespace ControlStruct { 24 25 25 26 namespace { 27 28 typedef std::list<ast::CatchStmt*> CatchList; 29 30 void split( CatchList& allHandlers, CatchList& terHandlers, 31 CatchList& resHandlers ) { 32 while ( !allHandlers.empty() ) { 33 ast::CatchStmt * stmt = allHandlers.front(); 34 allHandlers.pop_front(); 35 if (stmt->kind == ast::ExceptionKind::Terminate) { 36 terHandlers.push_back(stmt); 37 } else { 38 resHandlers.push_back(stmt); 39 } 40 } 41 } 42 43 void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) { 44 block->push_back(new ast::DeclStmt(block->location, item)); 45 } 26 46 27 47 class TranslateThrowsCore : public ast::WithGuards { … … 128 148 } 129 149 150 151 class TryMutatorCore { 152 // The built in types used in translation. 153 const ast::StructDecl * except_decl; 154 const ast::StructDecl * node_decl; 155 const ast::StructDecl * hook_decl; 156 157 // The many helper functions for code/syntree generation. 158 ast::CompoundStmt * take_try_block( ast::TryStmt * tryStmt ); 159 ast::FunctionDecl * create_try_wrapper( const ast::CompoundStmt * body ); 160 ast::FunctionDecl * create_terminate_catch( CatchList &handlers ); 161 ast::CompoundStmt * create_single_matcher( 162 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ); 163 ast::FunctionDecl * create_terminate_match( CatchList &handlers ); 164 ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper, 165 ast::FunctionDecl * terminate_catch, ast::FunctionDecl * terminate_match ); 166 ast::FunctionDecl * create_resume_handler( CatchList &handlers ); 167 ast::CompoundStmt * create_resume_wrapper( 168 const ast::Stmt * wraps, const ast::FunctionDecl * resume_handler ); 169 ast::FunctionDecl * create_finally_wrapper( ast::TryStmt * tryStmt ); 170 ast::ObjectDecl * create_finally_hook( ast::FunctionDecl * finally_wrapper ); 171 ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt ); 172 173 // Types used in translation, make sure to use clone. 174 // void (*function)(); 175 ast::FunctionDecl * try_func_t; 176 // void (*function)(int, exception); 177 ast::FunctionDecl * catch_func_t; 178 // int (*function)(exception); 179 ast::FunctionDecl * match_func_t; 180 // bool (*function)(exception); 181 ast::FunctionDecl * handle_func_t; 182 // void (*function)(__attribute__((unused)) void *); 183 ast::FunctionDecl * finally_func_t; 184 185 ast::StructInstType * create_except_type() { 186 assert( except_decl ); 187 return new ast::StructInstType( except_decl ); 188 } 189 void init_func_types(); 190 191 public: 192 TryMutatorCore() : 193 except_decl( nullptr ), node_decl( nullptr ), hook_decl( nullptr ) 194 {} 195 196 void previsit( const ast::StructDecl *structDecl ); 197 ast::Stmt * postvisit( const ast::TryStmt *tryStmt ); 198 ast::Stmt * postvisit( const ast::ThrowStmt *throwStmt ); 199 }; 200 201 void TryMutatorCore::init_func_types() { 202 assert( except_decl ); 203 204 ast::ObjectDecl index_obj( 205 {}, 206 "__handler_index", 207 new ast::BasicType(ast::BasicType::SignedInt) 208 ); 209 ast::ObjectDecl exception_obj( 210 {}, 211 "__exception_inst", 212 new ast::PointerType( 213 new ast::StructInstType( except_decl ) 214 ), 215 NULL 216 ); 217 ast::ObjectDecl bool_obj( 218 {}, 219 "__ret_bool", 220 new ast::BasicType( ast::BasicType::Bool ), 221 nullptr, //init 222 ast::Storage::Classes{}, 223 ast::Linkage::Cforall, 224 nullptr, //width 225 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 226 ); 227 ast::ObjectDecl voidptr_obj( 228 {}, 229 "__hook", 230 new ast::PointerType( 231 new ast::VoidType() 232 ), 233 nullptr, //init 234 ast::Storage::Classes{}, 235 ast::Linkage::Cforall, 236 nullptr, //width 237 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 238 ); 239 240 ast::ObjectDecl unused_index_obj( 241 {}, 242 "__handler_index", 243 new ast::BasicType(ast::BasicType::SignedInt), 244 nullptr, 245 ast::Storage::Classes{}, 246 ast::Linkage::Cforall, 247 nullptr, //width 248 std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) } 249 ); 250 //unused_index_obj->attributes.push_back( new Attribute( "unused" ) ); 251 252 try_func_t = new ast::FunctionDecl( 253 {}, 254 "try", 255 {}, //forall 256 {}, //no param 257 {}, //no return 258 nullptr, 259 ast::Storage::Classes{}, 260 ast::Linkage::Cforall 261 ); 262 263 catch_func_t = new ast::FunctionDecl( 264 {}, 265 "catch", 266 {}, //forall 267 {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param 268 {}, //return void 269 nullptr, 270 ast::Storage::Classes{}, 271 ast::Linkage::Cforall 272 ); 273 274 match_func_t = new ast::FunctionDecl( 275 {}, 276 "match", 277 {}, //forall 278 {ast::deepCopy(&exception_obj)}, 279 {ast::deepCopy(&unused_index_obj)}, 280 nullptr, 281 ast::Storage::Classes{}, 282 ast::Linkage::Cforall 283 ); 284 285 handle_func_t = new ast::FunctionDecl( 286 {}, 287 "handle", 288 {}, //forall 289 {ast::deepCopy(&exception_obj)}, 290 {ast::deepCopy(&bool_obj)}, 291 nullptr, 292 ast::Storage::Classes{}, 293 ast::Linkage::Cforall 294 ); 295 296 finally_func_t = new ast::FunctionDecl( 297 {}, 298 "finally", 299 {}, //forall 300 {ast::deepCopy(&voidptr_obj)}, 301 {}, //return void 302 nullptr, 303 ast::Storage::Classes{}, 304 ast::Linkage::Cforall 305 ); 306 307 //catch_func_t.get_parameters().push_back( index_obj.clone() ); 308 //catch_func_t.get_parameters().push_back( exception_obj.clone() ); 309 //match_func_t.get_returnVals().push_back( unused_index_obj ); 310 //match_func_t.get_parameters().push_back( exception_obj.clone() ); 311 //handle_func_t.get_returnVals().push_back( bool_obj.clone() ); 312 //handle_func_t.get_parameters().push_back( exception_obj.clone() ); 313 //finally_func_t.get_parameters().push_back( voidptr_obj.clone() ); 314 } 315 316 // TryStmt Mutation Helpers 317 318 /* 319 ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) { 320 ast::CompoundStmt * block = tryStmt->body; 321 tryStmt->body = nullptr; 322 return block; 323 } 324 */ 325 326 ast::FunctionDecl * TryMutatorCore::create_try_wrapper( 327 const ast::CompoundStmt *body ) { 328 329 ast::FunctionDecl * ret = ast::deepCopy(try_func_t); 330 ret->stmts = body; 331 return ret; 332 } 333 334 ast::FunctionDecl * TryMutatorCore::create_terminate_catch( 335 CatchList &handlers ) { 336 std::vector<ast::ptr<ast::Stmt>> handler_wrappers; 337 338 assert (!handlers.empty()); 339 const CodeLocation loc = handlers.front()->location; 340 341 ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t); 342 const ast::DeclWithType * index_obj = func_t->params.front(); 343 const ast::DeclWithType * except_obj = func_t->params.back(); 344 345 // Index 1..{number of handlers} 346 int index = 0; 347 CatchList::iterator it = handlers.begin(); 348 for ( ; it != handlers.end() ; ++it ) { 349 ++index; 350 ast::CatchStmt * handler = *it; 351 const CodeLocation loc = handler->location; 352 353 // case `index`: 354 // { 355 // `handler.decl` = { (virtual `decl.type`)`except` }; 356 // `handler.body`; 357 // } 358 // return; 359 ast::CompoundStmt * block = new ast::CompoundStmt(loc); 360 361 // Just copy the exception value. (Post Validation) 362 const ast::ObjectDecl * handler_decl = 363 handler->decl.strict_as<ast::ObjectDecl>(); 364 ast::ObjectDecl * local_except = ast::deepCopy(handler_decl); 365 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 366 new ast::VariableExpr( loc, except_obj ), 367 local_except->get_type() 368 ); 369 vcex->location = handler->location; 370 local_except->init = new ast::ListInit(loc, { new ast::SingleInit( loc, vcex ) }); 371 block->push_back( new ast::DeclStmt( loc, local_except ) ); 372 373 // Add the cleanup attribute. 374 local_except->attributes.push_back( new ast::Attribute( 375 "cleanup", 376 { new ast::NameExpr( loc, "__cfaehm_cleanup_terminate" ) } 377 ) ); 378 379 ast::DeclReplacer::DeclMap mapping; 380 mapping[handler_decl] = local_except; 381 const ast::Stmt * mutBody = strict_dynamic_cast<const ast::Stmt *>( 382 ast::DeclReplacer::replace(handler->body, mapping)); 383 384 385 block->push_back( mutBody ); 386 // handler->body = nullptr; 387 388 handler_wrappers.push_back( new ast::CaseStmt(loc, 389 ast::ConstantExpr::from_int(loc, index) , 390 { block, new ast::ReturnStmt( loc, nullptr ) } 391 )); 392 } 393 // TODO: Some sort of meaningful error on default perhaps? 394 395 /* 396 std::list<Statement*> stmt_handlers; 397 while ( !handler_wrappers.empty() ) { 398 stmt_handlers.push_back( handler_wrappers.front() ); 399 handler_wrappers.pop_front(); 400 } 401 */ 402 403 ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc, 404 new ast::VariableExpr( loc, index_obj ), 405 std::move(handler_wrappers) 406 ); 407 ast::CompoundStmt * body = new ast::CompoundStmt(loc, 408 {handler_lookup}); 409 410 func_t->stmts = body; 411 return func_t; 412 } 413 414 // Create a single check from a moddified handler. 415 // except_obj is referenced, modded_handler will be freed. 416 ast::CompoundStmt * TryMutatorCore::create_single_matcher( 417 const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ) { 418 // { 419 // `modded_handler.decl` 420 // if ( `decl.name = (virtual `decl.type`)`except` 421 // [&& `modded_handler.cond`] ) { 422 // `modded_handler.body` 423 // } 424 // } 425 426 const CodeLocation loc = modded_handler->location; 427 ast::CompoundStmt * block = new ast::CompoundStmt(loc); 428 429 // Local Declaration 430 const ast::ObjectDecl * local_except = 431 modded_handler->decl.strict_as<ast::ObjectDecl>(); 432 block->push_back( new ast::DeclStmt( loc, local_except ) ); 433 434 // Check for type match. 435 ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc, 436 new ast::VariableExpr(loc, except_obj ), 437 local_except->get_type() 438 ); 439 ast::Expr * cond = ast::UntypedExpr::createAssign(loc, 440 new ast::VariableExpr(loc, local_except ), vcex ); 441 442 // Add the check on the conditional if it is provided. 443 if ( modded_handler->cond ) { 444 cond = new ast::LogicalExpr( loc, cond, modded_handler->cond, ast::LogicalFlag::AndExpr ); 445 } 446 // Construct the match condition. 447 block->push_back( new ast::IfStmt(loc, 448 cond, modded_handler->body, nullptr ) ); 449 450 // xxx - how does this work in new ast 451 //modded_handler->set_decl( nullptr ); 452 //modded_handler->set_cond( nullptr ); 453 //modded_handler->set_body( nullptr ); 454 //delete modded_handler; 455 return block; 456 } 457 458 ast::FunctionDecl * TryMutatorCore::create_terminate_match( 459 CatchList &handlers ) { 460 // int match(exception * except) { 461 // HANDLER WRAPPERS { return `index`; } 462 // } 463 464 assert (!handlers.empty()); 465 const CodeLocation loc = handlers.front()->location; 466 467 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 468 469 ast::FunctionDecl * func_t = ast::deepCopy(match_func_t); 470 const ast::DeclWithType * except_obj = func_t->params.back(); 471 472 // Index 1..{number of handlers} 473 int index = 0; 474 CatchList::iterator it; 475 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 476 ++index; 477 ast::CatchStmt * handler = *it; 478 479 // Body should have been taken by create_terminate_catch. 480 // xxx - just ignore it? 481 // assert( nullptr == handler->get_body() ); 482 483 // Create new body. 484 handler->body = new ast::ReturnStmt( handler->location, 485 ast::ConstantExpr::from_int( handler->location, index ) ); 486 487 // Create the handler. 488 body->push_back( create_single_matcher( except_obj, handler ) ); 489 *it = nullptr; 490 } 491 492 body->push_back( new ast::ReturnStmt(loc, 493 ast::ConstantExpr::from_int( loc, 0 ) )); 494 495 func_t->stmts = body; 496 497 return func_t; 498 } 499 500 ast::CompoundStmt * TryMutatorCore::create_terminate_caller( 501 CodeLocation loc, 502 ast::FunctionDecl * try_wrapper, 503 ast::FunctionDecl * terminate_catch, 504 ast::FunctionDecl * terminate_match ) { 505 // { __cfaehm_try_terminate(`try`, `catch`, `match`); } 506 507 ast::UntypedExpr * caller = new ast::UntypedExpr(loc, new ast::NameExpr(loc, 508 "__cfaehm_try_terminate" ) ); 509 caller->args.push_back( new ast::VariableExpr(loc, try_wrapper ) ); 510 caller->args.push_back( new ast::VariableExpr(loc, terminate_catch ) ); 511 caller->args.push_back( new ast::VariableExpr(loc, terminate_match ) ); 512 513 ast::CompoundStmt * callStmt = new ast::CompoundStmt(loc); 514 callStmt->push_back( new ast::ExprStmt( loc, caller ) ); 515 return callStmt; 516 } 517 518 ast::FunctionDecl * TryMutatorCore::create_resume_handler( 519 CatchList &handlers ) { 520 // bool handle(exception * except) { 521 // HANDLER WRAPPERS { `hander->body`; return true; } 522 // } 523 assert (!handlers.empty()); 524 const CodeLocation loc = handlers.front()->location; 525 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 526 527 ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t); 528 const ast::DeclWithType * except_obj = func_t->params.back(); 529 530 CatchList::iterator it; 531 for ( it = handlers.begin() ; it != handlers.end() ; ++it ) { 532 ast::CatchStmt * handler = *it; 533 const CodeLocation loc = handler->location; 534 // Modifiy body. 535 ast::CompoundStmt * handling_code; 536 if (handler->body.as<ast::CompoundStmt>()) { 537 handling_code = 538 strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() ); 539 } else { 540 handling_code = new ast::CompoundStmt(loc); 541 handling_code->push_back( handler->body ); 542 } 543 handling_code->push_back( new ast::ReturnStmt(loc, 544 ast::ConstantExpr::from_bool(loc, true ) ) ); 545 handler->body = handling_code; 546 547 // Create the handler. 548 body->push_back( create_single_matcher( except_obj, handler ) ); 549 *it = nullptr; 550 } 551 552 body->push_back( new ast::ReturnStmt(loc, 553 ast::ConstantExpr::from_bool(loc, false ) ) ); 554 func_t->stmts = body; 555 556 return func_t; 557 } 558 559 ast::CompoundStmt * TryMutatorCore::create_resume_wrapper( 560 const ast::Stmt * wraps, 561 const ast::FunctionDecl * resume_handler ) { 562 const CodeLocation loc = wraps->location; 563 ast::CompoundStmt * body = new ast::CompoundStmt(loc); 564 565