Changeset f5a51db


Ignore:
Timestamp:
Feb 8, 2022, 11:53:13 AM (20 months ago)
Author:
Michael Brooks <mlbrooks@…>
Branches:
ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
Children:
cc7bbe6
Parents:
97c215f (diff), 1cf8a9f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
8 added
75 edited

Legend:

Unmodified
Added
Removed
  • benchmark/io/http/Makefile.am

    r97c215f rf5a51db  
    2121include $(top_srcdir)/tools/build/cfa.make
    2222
    23 AM_CFLAGS = -O2 -Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror
     23AM_CFLAGS = -O3 -Wall -Wextra -I$(srcdir) -lrt -pthread # -Werror
    2424AM_CFAFLAGS = -quiet -nodebug
    2525AM_LDFLAGS = -quiet -nodebug
  • benchmark/io/http/main.cfa

    r97c215f rf5a51db  
    1 #define __USE_GNU
     1#define _GNU_SOURCE
    22
    33#include <errno.h>
     
    66#include <unistd.h>
    77extern "C" {
     8        #include <sched.h>
    89        #include <signal.h>
    910        #include <sys/socket.h>
     
    6768        (this.self){ "Server Cluster", options.clopts.params };
    6869
     70        cpu_set_t fullset;
     71        CPU_ZERO(&fullset);
     72        int ret = sched_getaffinity(getpid(), sizeof(fullset), &fullset);
     73        if( ret != 0 ) abort | "sched_getaffinity failed with" | errno | strerror( errno );
     74        int cnt = CPU_COUNT(&fullset);
     75
    6976        this.procs = alloc(options.clopts.nprocs);
    7077        for(i; options.clopts.nprocs) {
    7178                (this.procs[i]){ "Benchmark Processor", this.self };
     79
     80                int c = 0;
     81                int n = 1 + (i % cnt);
     82                for(int j = 0; j < CPU_SETSIZE; j++) {
     83                        if(CPU_ISSET(j, &fullset)) n--;
     84                        if(n == 0) {
     85                                c = j;
     86                                break;
     87                        }
     88                }
     89                cpu_set_t localset;
     90                CPU_ZERO(&localset);
     91                CPU_SET(c, &localset);
     92                ret = pthread_setaffinity_np(this.procs[i].kernel_thread, sizeof(localset), &localset);
     93                if( ret != 0 ) abort | "sched_getaffinity failed with" | ret | strerror( ret );
    7294
    7395                #if !defined(__CFA_NO_STATISTICS__)
     
    146168        int waited = 0;
    147169        for() {
    148                 ret = bind( server_fd, (struct sockaddr *)&address, sizeof(address) );
     170                int sockfd = server_fd;
     171                __CONST_SOCKADDR_ARG addr;
     172                addr.__sockaddr__ = (struct sockaddr *)&address;
     173                socklen_t addrlen = sizeof(address);
     174                ret = bind( sockfd, addr, addrlen );
    149175                if(ret < 0) {
    150176                        if(errno == EADDRINUSE) {
  • doc/theses/thierry_delisle_PhD/thesis/Makefile

    r97c215f rf5a51db  
    4848## Define the documents that need to be made.
    4949all: thesis.pdf
    50 thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib
     50thesis.pdf: ${TEXTS} ${FIGURES} ${PICTURES} thesis.tex glossary.tex local.bib ../../../LaTeXmacros/common.tex ../../../LaTeXmacros/common.sty
    5151
    5252DOCUMENT = thesis.pdf
  • doc/theses/thierry_delisle_PhD/thesis/fig/base.fig

    r97c215f rf5a51db  
    12121 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6900 4200 20 20 6900 4200 6920 4200
    13131 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6975 4200 20 20 6975 4200 6995 4200
     14-6
     156 6375 5100 6675 5250
     161 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6450 5175 20 20 6450 5175 6470 5175
     171 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6525 5175 20 20 6525 5175 6545 5175
     181 3 0 1 0 0 50 -1 20 0.000 1 0.0000 6600 5175 20 20 6600 5175 6620 5175
    1419-6
    15201 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 2400 300 300 3900 2400 4200 2400
     
    75802 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    7681         2400 2475 3000 2475
     822 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
     83         3300 5210 3150 4950 2850 4950 2700 5210 2850 5470 3150 5470
     84         3300 5210
     852 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
     86         4500 5210 4350 4950 4050 4950 3900 5210 4050 5470 4350 5470
     87         4500 5210
     882 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
     89         5700 5210 5550 4950 5250 4950 5100 5210 5250 5470 5550 5470
     90         5700 5210
    77914 2 -1 50 -1 0 12 0.0000 2 135 630 2100 3075 Threads\001
    78924 2 -1 50 -1 0 12 0.0000 2 165 450 2100 2850 Ready\001
     
    82964 1 -1 50 -1 0 11 0.0000 2 135 180 2700 3550 TS\001
    83974 1 -1 50 -1 0 11 0.0000 2 135 180 2700 2650 TS\001
     984 2 -1 50 -1 0 12 0.0000 2 135 900 2100 5175 Processors\001
  • doc/theses/thierry_delisle_PhD/thesis/glossary.tex

    r97c215f rf5a51db  
    4040
    4141\textit{Synonyms : User threads, Lightweight threads, Green threads, Virtual threads, Tasks.}
     42}
     43
     44\longnewglossaryentry{rmr}
     45{name={remote memory reference}}
     46{
     47
    4248}
    4349
  • doc/theses/thierry_delisle_PhD/thesis/text/core.tex

    r97c215f rf5a51db  
    4949
    5050\section{Design}
    51 In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.
     51In general, a na\"{i}ve \glsxtrshort{fifo} ready-queue does not scale with increased parallelism from \glspl{hthrd}, resulting in decreased performance. The problem is adding/removing \glspl{thrd} is a single point of contention. As shown in the evaluation sections, most production schedulers do scale when adding \glspl{hthrd}. The solution to this problem is to shard the ready-queue : create multiple sub-ready-queues that multiple \glspl{hthrd} can access and modify without interfering.
    5252
    53 \subsection{Sharding} \label{sec:sharding}
    54 An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again.
     53Before going into the design of \CFA's scheduler proper, I want to discuss two sharding solutions which served as the inspiration scheduler in this thesis.
    5554
     55\subsection{Work-Stealing}
     56
     57As I mentioned in \ref{existing:workstealing}, a popular pattern shard the ready-queue is work-stealing. As mentionned, in this pattern each \gls{proc} has its own ready-queue and \glspl{proc} only access each other's ready-queue if they run out of work.
     58The interesting aspect of workstealing happen in easier scheduling cases, \ie enough work for everyone but no more and no load balancing needed. In these cases, work-stealing is close to optimal scheduling: it can achieve perfect locality and have no contention.
     59On the other hand, work-stealing schedulers only attempt to do load-balancing when a \gls{proc} runs out of work.
     60This means that the scheduler may never balance unfairness that does not result in a \gls{proc} running out of work.
     61Chapter~\ref{microbench} shows that in pathological cases this problem can lead to indefinite starvation.
     62
     63
     64Based on these observation, I conclude that \emph{perfect} scheduler should behave very similarly to work-stealing in the easy cases, but should have more proactive load-balancing if the need arises.
     65
     66\subsection{Relaxed-Fifo}
     67An entirely different scheme is to create a ``relaxed-FIFO'' queue as in \todo{cite Trevor's paper}. This approach forgos any ownership between \gls{proc} and ready-queue, and simply creates a pool of ready-queues from which the \glspl{proc} can pick from.
     68\Glspl{proc} choose ready-queus at random, but timestamps are added to all elements of the queue and dequeues are done by picking two queues and dequeing the oldest element.
     69The result is a queue that has both decent scalability and sufficient fairness.
     70The lack of ownership means that as long as one \gls{proc} is still able to repeatedly dequeue elements, it is unlikely that any element will stay on the queue for much longer than any other element.
     71This contrasts with work-stealing, where \emph{any} \gls{proc} busy for an extended period of time results in all the elements on its local queue to have to wait. Unless another \gls{proc} runs out of work.
     72
     73An important aspects of this scheme's fairness approach is that the timestamps make it possible to evaluate how long elements have been on the queue.
     74However, another major aspect is that \glspl{proc} will eagerly search for these older elements instead of focusing on specific queues.
     75
     76While the fairness, of this scheme is good, it does suffer in terms of performance.
     77It requires very wide sharding, \eg at least 4 queues per \gls{hthrd}, and the randomness means locality can suffer significantly and finding non-empty queues can be difficult.
     78
     79\section{\CFA}
     80The \CFA is effectively attempting to merge these two approaches, keeping the best of both.
     81It is based on the
    5682\begin{figure}
    5783        \centering
    5884        \input{base.pstex_t}
    59         \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}
     85        \caption[Base \CFA design]{Base \CFA design \smallskip\newline A list of sub-ready queues offers the sharding, two per \glspl{proc}. However, \glspl{proc} can access any of the sub-queues.}
    6086        \label{fig:base}
    6187\end{figure}
    6288
    63 \subsection{Finding threads}
    64 Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.
    6589
    66 \begin{figure}
    67         \centering
    68         \input{empty.pstex_t}
    69         \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}
    70         \label{fig:empty}
    71 \end{figure}
    7290
    73 There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information:
     91% The common solution to the single point of contention is to shard the ready-queue so each \gls{hthrd} can access the ready-queue without contention, increasing performance.
    7492
    75 \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done.
     93% \subsection{Sharding} \label{sec:sharding}
     94% An interesting approach to sharding a queue is presented in \cit{Trevors paper}. This algorithm presents a queue with a relaxed \glsxtrshort{fifo} guarantee using an array of strictly \glsxtrshort{fifo} sublists as shown in Figure~\ref{fig:base}. Each \emph{cell} of the array has a timestamp for the last operation and a pointer to a linked-list with a lock. Each node in the list is marked with a timestamp indicating when it is added to the list. A push operation is done by picking a random cell, acquiring the list lock, and pushing to the list. If the cell is locked, the operation is simply retried on another random cell until a lock is acquired. A pop operation is done in a similar fashion except two random cells are picked. If both cells are unlocked with non-empty lists, the operation pops the node with the oldest timestamp. If one of the cells is unlocked and non-empty, the operation pops from that cell. If both cells are either locked or empty, the operation picks two new random cells and tries again.
    7695
    77 \begin{figure}
    78         \centering
    79         \vspace*{-5pt}
    80         {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}}
    81         \vspace*{-5pt}
    82         \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.}
    83         \label{fig:emptybit}
     96% \begin{figure}
     97%       \centering
     98%       \input{base.pstex_t}
     99%       \caption[Relaxed FIFO list]{Relaxed FIFO list \smallskip\newline List at the base of the scheduler: an array of strictly FIFO lists. The timestamp is in all nodes and cell arrays.}
     100%       \label{fig:base}
     101% \end{figure}
    84102
    85         \vspace*{10pt}
    86         {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}}
    87         \vspace*{-5pt}
    88         \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.}
    89         \label{fig:emptytree}
     103% \subsection{Finding threads}
     104% Once threads have been distributed onto multiple queues, identifying empty queues becomes a problem. Indeed, if the number of \glspl{thrd} does not far exceed the number of queues, it is probable that several of the cell queues are empty. Figure~\ref{fig:empty} shows an example with 2 \glspl{thrd} running on 8 queues, where the chances of getting an empty queue is 75\% per pick, meaning two random picks yield a \gls{thrd} only half the time. This scenario leads to performance problems since picks that do not yield a \gls{thrd} are not useful and do not necessarily help make more informed guesses.
    90105
    91         \vspace*{10pt}
    92         {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}}
    93         \vspace*{-5pt}
    94         \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}
    95         \label{fig:emptytls}
    96 \end{figure}
     106% \begin{figure}
     107%       \centering
     108%       \input{empty.pstex_t}
     109%       \caption[``More empty'' Relaxed FIFO list]{``More empty'' Relaxed FIFO list \smallskip\newline Emptier state of the queue: the array contains many empty cells, that is strictly FIFO lists containing no elements.}
     110%       \label{fig:empty}
     111% \end{figure}
    97112
    98 \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow.
     113% There are several solutions to this problem, but they ultimately all have to encode if a cell has an empty list. My results show the density and locality of this encoding is generally the dominating factor in these scheme. Classic solutions to this problem use one of three techniques to encode the information:
    99114
    100 \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.
     115% \paragraph{Dense Information} Figure~\ref{fig:emptybit} shows a dense bitmask to identify the cell queues currently in use. This approach means processors can often find \glspl{thrd} in constant time, regardless of how many underlying queues are empty. Furthermore, modern x86 CPUs have extended bit manipulation instructions (BMI2) that allow searching the bitmask with very little overhead compared to the randomized selection approach for a filled ready queue, offering good performance even in cases with many empty inner queues. However, this technique has its limits: with a single word\footnote{Word refers here to however many bits can be written atomically.} bitmask, the total amount of ready-queue sharding is limited to the number of bits in the word. With a multi-word bitmask, this maximum limit can be increased arbitrarily, but the look-up time increases. Finally, a dense bitmap, either single or multi-word, causes additional contention problems that reduces performance because of cache misses after updates. This central update bottleneck also means the information in the bitmask is more often stale before a processor can use it to find an item, \ie mask read says there are available \glspl{thrd} but none on queue when the subsequent atomic check is done.
    101116
    102 I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach.
     117% \begin{figure}
     118%       \centering
     119%       \vspace*{-5pt}
     120%       {\resizebox{0.75\textwidth}{!}{\input{emptybit.pstex_t}}}
     121%       \vspace*{-5pt}
     122%       \caption[Underloaded queue with bitmask]{Underloaded queue with bitmask indicating array cells with items.}
     123%       \label{fig:emptybit}
    103124
    104 \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
    105 In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later.
     125%       \vspace*{10pt}
     126%       {\resizebox{0.75\textwidth}{!}{\input{emptytree.pstex_t}}}
     127%       \vspace*{-5pt}
     128%       \caption[Underloaded queue with binary search-tree]{Underloaded queue with binary search-tree indicating array cells with items.}
     129%       \label{fig:emptytree}
    106130
    107 To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards.  Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached.
     131%       \vspace*{10pt}
     132%       {\resizebox{0.95\textwidth}{!}{\input{emptytls.pstex_t}}}
     133%       \vspace*{-5pt}
     134%       \caption[Underloaded queue with per processor bitmask]{Underloaded queue with per processor bitmask indicating array cells with items.}
     135%       \label{fig:emptytls}
     136% \end{figure}
    108137
    109 The algorithm works as follows:
    110 \begin{itemize}
    111         \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$.
    112         \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions:
    113         \begin{itemize}
    114                 \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$.
    115                 \item Pop operations use $B$ going backwards on each try.
    116         \end{itemize}
    117 \end{itemize}
     138% \paragraph{Sparse Information} Figure~\ref{fig:emptytree} shows an approach using a hierarchical tree data-structure to reduce contention and has been shown to work in similar cases~\cite{ellen2007snzi}. However, this approach may lead to poorer performance due to the inherent pointer chasing cost while still allowing significant contention on the nodes of the tree if the tree is shallow.
    118139
    119 The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.
     140% \paragraph{Local Information} Figure~\ref{fig:emptytls} shows an approach using dense information, similar to the bitmap, but each \gls{hthrd} keeps its own independent copy. While this approach can offer good scalability \emph{and} low latency, the liveliness and discovery of the information can become a problem. This case is made worst in systems with few processors where even blind random picks can find \glspl{thrd} in a few tries.
    120141
    121 \section{Details}
     142% I built a prototype of these approaches and none of these techniques offer satisfying performance when few threads are present. All of these approach hit the same 2 problems. First, randomly picking sub-queues is very fast. That speed means any improvement to the hit rate can easily be countered by a slow-down in look-up speed, whether or not there are empty lists. Second, the array is already sharded to avoid contention bottlenecks, so any denser data structure tends to become a bottleneck. In all cases, these factors meant the best cases scenario, \ie many threads, would get worst throughput, and the worst-case scenario, few threads, would get a better hit rate, but an equivalent poor throughput. As a result I tried an entirely different approach.
     143
     144% \subsection{Dynamic Entropy}\cit{https://xkcd.com/2318/}
     145% In the worst-case scenario there are only few \glspl{thrd} ready to run, or more precisely given $P$ \glspl{proc}\footnote{For simplicity, this assumes there is a one-to-one match between \glspl{proc} and \glspl{hthrd}.}, $T$ \glspl{thrd} and $\epsilon$ a very small number, than the worst case scenario can be represented by $T = P + \epsilon$, with $\epsilon \ll P$. It is important to note in this case that fairness is effectively irrelevant. Indeed, this case is close to \emph{actually matching} the model of the ``Ideal multi-tasking CPU'' on page \pageref{q:LinuxCFS}. In this context, it is possible to use a purely internal-locality based approach and still meet the fairness requirements. This approach simply has each \gls{proc} running a single \gls{thrd} repeatedly. Or from the shared ready-queue viewpoint, each \gls{proc} pushes to a given sub-queue and then pops from the \emph{same} subqueue. The challenge is for the the scheduler to achieve good performance in both the $T = P + \epsilon$ case and the $T \gg P$ case, without affecting the fairness guarantees in the later.
     146
     147% To handle this case, I use a \glsxtrshort{prng}\todo{Fix missing long form} in a novel way. There exist \glsxtrshort{prng}s that are fast, compact and can be run forward \emph{and} backwards.  Linear congruential generators~\cite{wiki:lcg} are an example of \glsxtrshort{prng}s of such \glsxtrshort{prng}s. The novel approach is to use the ability to run backwards to ``replay'' the \glsxtrshort{prng}. The scheduler uses an exclusive \glsxtrshort{prng} instance per \gls{proc}, the random-number seed effectively starts an encoding that produces a list of all accessed subqueues, from latest to oldest. Replaying the \glsxtrshort{prng} to identify cells accessed recently and which probably have data still cached.
     148
     149% The algorithm works as follows:
     150% \begin{itemize}
     151%       \item Each \gls{proc} has two \glsxtrshort{prng} instances, $F$ and $B$.
     152%       \item Push and Pop operations occur as discussed in Section~\ref{sec:sharding} with the following exceptions:
     153%       \begin{itemize}
     154%               \item Push operations use $F$ going forward on each try and on success $F$ is copied into $B$.
     155%               \item Pop operations use $B$ going backwards on each try.
     156%       \end{itemize}
     157% \end{itemize}
     158
     159% The main benefit of this technique is that it basically respects the desired properties of Figure~\ref{fig:fair}. When looking for work, a \gls{proc} first looks at the last cell they pushed to, if any, and then move backwards through its accessed cells. As the \gls{proc} continues looking for work, $F$ moves backwards and $B$ stays in place. As a result, the relation between the two becomes weaker, which means that the probablisitic fairness of the algorithm reverts to normal. Chapter~\ref{proofs} discusses more formally the fairness guarantees of this algorithm.
     160
     161% \section{Details}
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex

    r97c215f rf5a51db  
    44
    55In Memory Plain Text
    6 
    7 Networked Plain Text
    86
    97Networked ZIPF
  • doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex

    r97c215f rf5a51db  
    22
    33The first step of evaluation is always to test-out small controlled cases, to ensure that the basics are working properly.
    4 This sections presents four different experimental setup, evaluating some of the basic features of \CFA's scheduler.
     4This sections presents five different experimental setup, evaluating some of the basic features of \CFA's scheduler.
    55
    66\section{Cycling latency}
    77The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue.
    8 While these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.
    9 However, yielding can be treated as a special case, since it also carries the information that the length of the ready queue will not change.
     8Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.
     9However, yielding can be treated as a special case, since it also carries the information that the number of the ready \glspl{at} will not change.
    1010Not all systems use this information, but those which do may appear to have better performance than they would for disconnected push/pop pairs.
    1111For this reason, I chose a different first benchmark, which I call the Cycle Benchmark.
    12 This benchmark arranges many threads into multiple rings of threads.
     12This benchmark arranges many \glspl{at} into multiple rings of \glspl{at}.
    1313Each ring is effectively a circular singly-linked list.
    14 At runtime, each thread unparks the next thread before parking itself.
     14At runtime, each \gls{at} unparks the next \gls{at} before parking itself.
    1515This corresponds to the desired pair of ready queue operations.
    16 Unparking the next thread requires pushing that thread onto the ready queue and the ensuing park will cause the runtime to pop a thread from the ready-queue.
     16Unparking the next \gls{at} requires pushing that \gls{at} onto the ready queue and the ensuing park will cause the runtime to pop a \gls{at} from the ready-queue.
    1717Figure~\ref{fig:cycle} shows a visual representation of this arrangement.
    1818
    19 The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready threads will stay constant over the duration of the experiment.
    20 In fact, the total number of threads waiting on the ready is expected to vary a little because of the race between the next thread unparking and the current thread parking.
    21 The size of the cycle is also decided based on this race: cycles that are too small may see the
    22 chain of unparks go full circle before the first thread can park.
     19The goal of this ring is that the underlying runtime cannot rely on the guarantee that the number of ready \glspl{at} will stay constant over the duration of the experiment.
     20In fact, the total number of \glspl{at} waiting on the ready queue is expected to vary because of the race between the next \gls{at} unparking and the current \gls{at} parking.
     21The size of the cycle is also decided based on this race: cycles that are too small may see the chain of unparks go full circle before the first \gls{at} can park.
    2322While this would not be a correctness problem, every runtime system must handle that race, it could lead to pushes and pops being optimized away.
    24 Since silently omitting ready-queue operations would throw off the measuring of these operations.
    25 Therefore the ring of threads must be big enough so the threads have the time to fully park before they are unparked.
     23Since silently omitting ready-queue operations would throw off the measuring of these operations, the ring of \glspl{at} must be big enough so the \glspl{at} have the time to fully park before they are unparked.
    2624Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system.
    2725
     
    2927        \centering
    3028        \input{cycle.pstex_t}
    31         \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each thread unparks the next thread in the cycle before parking itself.}
     29        \caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
    3230        \label{fig:cycle}
    3331\end{figure}
    3432
    3533\todo{check term ``idle sleep handling''}
    36 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of processors available.
     34To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
    3735Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
    38 This is to avoid the case where one of the worker threads runs out of work because of the variation on the number of ready threads mentionned above.
     36This is to avoid the case where one of the worker \glspl{at} runs out of work because of the variation on the number of ready \glspl{at} mentionned above.
    3937
    4038The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
    4139
    42 \todo{mention where to get the code.}
     40\todo{code, setup, results}
     41\begin{lstlisting}
     42        Thread.main() {
     43                count := 0
     44                for {
     45                        wait()
     46                        this.next.wake()
     47                        count ++
     48                        if must_stop() { break }
     49                }
     50                global.count += count
     51        }
     52\end{lstlisting}
     53
    4354
    4455\section{Yield}
    4556For completion, I also include the yield benchmark.
    46 This benchmark is much simpler than the cycle tests, it simply creates many threads that call \texttt{yield}.
     57This benchmark is much simpler than the cycle tests, it simply creates many \glspl{at} that call \texttt{yield}.
     58As mentionned in the previous section, this benchmark may be less representative of usages that only make limited use of \texttt{yield}, due to potential shortcuts in the routine.
     59Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty.
     60This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done.
     61
     62\todo{code, setup, results}
     63
     64\begin{lstlisting}
     65        Thread.main() {
     66                count := 0
     67                while !stop {
     68                        yield()
     69                        count ++
     70                }
     71                global.count += count
     72        }
     73\end{lstlisting}
     74
     75
     76\section{Churn}
     77The Cycle and Yield benchmark represents an ``easy'' scenario for a scheduler, \eg, an embarrassingly parallel application.
     78In these benchmarks, \glspl{at} can be easily partitioned over the different \glspl{proc} up-front and none of the \glspl{at} communicate with each other.
     79
     80The Churn benchmark represents more chaotic usages, where there is no relation between the last \gls{proc} on which a \gls{at} ran and the \gls{proc} that unblocked it.
     81When a \gls{at} is unblocked from a different \gls{proc} than the one on which it last ran, the unblocking \gls{proc} must either ``steal'' the \gls{at} or place it on a remote queue.
     82This results can result in either contention on the remote queue or \glspl{rmr} on \gls{at} data structure.
     83In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly.
     84
     85To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}.
     86When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any.
     87This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves.
     88For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}.
     89
     90\todo{code, setup, results}
     91\begin{lstlisting}
     92        Thread.main() {
     93                count := 0
     94                for {
     95                        r := random() % len(spots)
     96                        next := xchg(spots[r], this)
     97                        if next { next.wake() }
     98                        wait()
     99                        count ++
     100                        if must_stop() { break }
     101                }
     102                global.count += count
     103        }
     104\end{lstlisting}
    47105
    48106\section{Locality}
    49107
     108\todo{code, setup, results}
     109
    50110\section{Transfer}
     111The last benchmark is more exactly characterize as an experiment than a benchmark.
     112It tests the behavior of the schedulers for a particularly misbehaved workload.
     113In this workload, one of the \gls{at} is selected at random to be the leader.
     114The leader then spins in a tight loop until it has observed that all other \glspl{at} have acknowledged its leadership.
     115The leader \gls{at} then picks a new \gls{at} to be the ``spinner'' and the cycle repeats.
     116
     117The benchmark comes in two flavours for the behavior of the non-leader \glspl{at}:
     118once they acknowledged the leader, they either block on a semaphore or yield repeatadly.
     119
     120This experiment is designed to evaluate the short term load balancing of the scheduler.
     121Indeed, schedulers where the runnable \glspl{at} are partitioned on the \glspl{proc} may need to balance the \glspl{at} for this experient to terminate.
     122This is because the spinning \gls{at} is effectively preventing the \gls{proc} from runnning any other \glspl{thrd}.
     123In the semaphore flavour, the number of runnable \glspl{at} will eventually dwindle down to only the leader.
     124This is a simpler case to handle for schedulers since \glspl{proc} eventually run out of work.
     125In the yielding flavour, the number of runnable \glspl{at} stays constant.
     126This is a harder case to handle because corrective measures must be taken even if work is still available.
     127Note that languages that have mandatory preemption do circumvent this problem by forcing the spinner to yield.
     128
     129\todo{code, setup, results}
     130\begin{lstlisting}
     131        Thread.lead() {
     132                this.idx_seen = ++lead_idx
     133                if lead_idx > stop_idx {
     134                        done := true
     135                        return
     136                }
     137
     138                // Wait for everyone to acknowledge my leadership
     139                start: = timeNow()
     140                for t in threads {
     141                        while t.idx_seen != lead_idx {
     142                                asm pause
     143                                if (timeNow() - start) > 5 seconds { error() }
     144                        }
     145                }
     146
     147                // pick next leader
     148                leader := threads[ prng() % len(threads) ]
     149
     150                // wake every one
     151                if !exhaust {
     152                        for t in threads {
     153                                if t != me { t.wake() }
     154                        }
     155                }
     156        }
     157
     158        Thread.wait() {
     159                this.idx_seen := lead_idx
     160                if exhaust { wait() }
     161                else { yield() }
     162        }
     163
     164        Thread.main() {
     165                while !done  {
     166                        if leader == me { this.lead() }
     167                        else { this.wait() }
     168                }
     169        }
     170\end{lstlisting}
  • doc/theses/thierry_delisle_PhD/thesis/text/existing.tex

    r97c215f rf5a51db  
    3333
    3434
    35 \section{Work Stealing}
     35\section{Work Stealing}\label{existing:workstealing}
    3636One of the most popular scheduling algorithm in practice (see~\ref{existing:prod}) is work-stealing. This idea, introduce by \cite{DBLP:conf/fpca/BurtonS81}, effectively has each worker work on its local tasks first, but allows the possibility for other workers to steal local tasks if they run out of tasks. \cite{DBLP:conf/focs/Blumofe94} introduced the more familiar incarnation of this, where each workers has queue of tasks to accomplish and workers without tasks steal tasks from random workers. (The Burton and Sleep algorithm had trees of tasks and stole only among neighbours). Blumofe and Leiserson also prove worst case space and time requirements for well-structured computations.
    3737
  • libcfa/src/concurrency/io.cfa

    r97c215f rf5a51db  
    306306                ctx->proc->io.pending = true;
    307307                ctx->proc->io.dirty   = true;
    308                 if(sq.to_submit > 30 || !lazy) {
     308                if(sq.to_submit > 30) {
     309                        __tls_stats()->io.flush.full++;
     310                        __cfa_io_flush( ctx->proc, 0 );
     311                }
     312                if(!lazy) {
     313                        __tls_stats()->io.flush.eager++;
    309314                        __cfa_io_flush( ctx->proc, 0 );
    310315                }
  • libcfa/src/concurrency/kernel.cfa

    r97c215f rf5a51db  
    4242
    4343#if !defined(__CFA_NO_STATISTICS__)
    44         #define __STATS( ...) __VA_ARGS__
     44        #define __STATS_DEF( ...) __VA_ARGS__
    4545#else
    46         #define __STATS( ...)
     46        #define __STATS_DEF( ...)
    4747#endif
    4848
     
    122122static thread$ * __next_thread(cluster * this);
    123123static thread$ * __next_thread_slow(cluster * this);
     124static thread$ * __next_thread_search(cluster * this);
    124125static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
    125126static void __run_thread(processor * this, thread$ * dst);
     
    187188                MAIN_LOOP:
    188189                for() {
    189                         #define OLD_MAIN 1
    190                         #if OLD_MAIN
    191190                        // Check if there is pending io
    192191                        __maybe_io_drain( this );
     
    196195
    197196                        if( !readyThread ) {
     197                                __IO_STATS__(true, io.flush.idle++; )
    198198                                __cfa_io_flush( this, 0 );
    199199
     200                                readyThread = __next_thread( this->cltr );
     201                        }
     202
     203                        if( !readyThread ) for(5) {
     204                                __IO_STATS__(true, io.flush.idle++; )
     205
    200206                                readyThread = __next_thread_slow( this->cltr );
     207
     208                                if( readyThread ) break;
     209
     210                                __cfa_io_flush( this, 0 );
    201211                        }
    202212
     
    206216                                if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    207217
    208                                 #if !defined(__CFA_NO_STATISTICS__)
    209                                         __tls_stats()->ready.sleep.halts++;
    210                                 #endif
    211 
    212218                                // Push self to idle stack
    213219                                if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
    214220
    215221                                // Confirm the ready-queue is empty
    216                                 readyThread = __next_thread_slow( this->cltr );
     222                                readyThread = __next_thread_search( this->cltr );
    217223                                if( readyThread ) {
    218224                                        // A thread was found, cancel the halt
    219225                                        mark_awake(this->cltr->procs, * this);
    220226
    221                                         #if !defined(__CFA_NO_STATISTICS__)
    222                                                 __tls_stats()->ready.sleep.cancels++;
    223                                         #endif
     227                                        __STATS__(true, ready.sleep.cancels++; )
    224228
    225229                                        // continue the mai loop
     
    248252
    249253                        if(this->io.pending && !this->io.dirty) {
     254                                __IO_STATS__(true, io.flush.dirty++; )
    250255                                __cfa_io_flush( this, 0 );
    251256                        }
    252 
    253                         #else
    254                                 #warning new kernel loop
    255                         SEARCH: {
    256                                 /* paranoid */ verify( ! __preemption_enabled() );
    257 
    258                                 // First, lock the scheduler since we are searching for a thread
    259                                 ready_schedule_lock();
    260 
    261                                 // Try to get the next thread
    262                                 readyThread = pop_fast( this->cltr );
    263                                 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
    264 
    265                                 // If we can't find a thread, might as well flush any outstanding I/O
    266                                 if(this->io.pending) { __cfa_io_flush( this, 0 ); }
    267 
    268                                 // Spin a little on I/O, just in case
    269                                 for(5) {
    270                                         __maybe_io_drain( this );
    271                                         readyThread = pop_fast( this->cltr );
    272                                         if(readyThread) { ready_schedule_unlock(); break SEARCH; }
    273                                 }
    274 
    275                                 // no luck, try stealing a few times
    276                                 for(5) {
    277                                         if( __maybe_io_drain( this ) ) {
    278                                                 readyThread = pop_fast( this->cltr );
    279                                         } else {
    280                                                 readyThread = pop_slow( this->cltr );
    281                                         }
    282                                         if(readyThread) { ready_schedule_unlock(); break SEARCH; }
    283                                 }
    284 
    285                                 // still no luck, search for a thread
    286                                 readyThread = pop_search( this->cltr );
    287                                 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
    288 
    289                                 // Don't block if we are done
    290                                 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {
    291                                         ready_schedule_unlock();
    292                                         break MAIN_LOOP;
    293                                 }
    294 
    295                                 __STATS( __tls_stats()->ready.sleep.halts++; )
    296 
    297                                 // Push self to idle stack
    298                                 ready_schedule_unlock();
    299                                 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;
    300                                 ready_schedule_lock();
    301 
    302                                 // Confirm the ready-queue is empty
    303                                 __maybe_io_drain( this );
    304                                 readyThread = pop_search( this->cltr );
    305                                 ready_schedule_unlock();
    306 
    307                                 if( readyThread ) {
    308                                         // A thread was found, cancel the halt
    309                                         mark_awake(this->cltr->procs, * this);
    310 
    311                                         __STATS( __tls_stats()->ready.sleep.cancels++; )
    312 
    313                                         // continue the main loop
    314                                         break SEARCH;
    315                                 }
    316 
    317                                 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
    318                                 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
    319 
    320                                 {
    321                                         eventfd_t val;
    322                                         ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
    323                                         if(ret < 0) {
    324                                                 switch((int)errno) {
    325                                                 case EAGAIN:
    326                                                 #if EAGAIN != EWOULDBLOCK
    327                                                         case EWOULDBLOCK:
    328                                                 #endif
    329                                                 case EINTR:
    330                                                         // No need to do anything special here, just assume it's a legitimate wake-up
    331                                                         break;
    332                                                 default:
    333                                                         abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
    334                                                 }
    335                                         }
    336                                 }
    337 
    338                                         __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
    339 
    340                                 // We were woken up, remove self from idle
    341                                 mark_awake(this->cltr->procs, * this);
    342 
    343                                 // DON'T just proceed, start looking again
    344                                 continue MAIN_LOOP;
    345                         }
    346 
    347                 RUN_THREAD:
    348                         /* paranoid */ verify( ! __preemption_enabled() );
    349                         /* paranoid */ verify( readyThread );
    350 
    351                         // Reset io dirty bit
    352                         this->io.dirty = false;
    353 
    354                         // We found a thread run it
    355                         __run_thread(this, readyThread);
    356 
    357                         // Are we done?
    358                         if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    359 
    360                         if(this->io.pending && !this->io.dirty) {
    361                                 __cfa_io_flush( this, 0 );
    362                         }
    363 
    364                         ready_schedule_lock();
    365                         __maybe_io_drain( this );
    366                         ready_schedule_unlock();
    367                         #endif
    368257                }
    369258
     
    476365                                break RUNNING;
    477366                        case TICKET_UNBLOCK:
    478                                 #if !defined(__CFA_NO_STATISTICS__)
    479                                         __tls_stats()->ready.threads.threads++;
    480                                 #endif
     367                                __STATS__(true, ready.threads.threads++; )
    481368                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    482369                                // In this case, just run it again.
     
    493380        __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
    494381
    495         #if !defined(__CFA_NO_STATISTICS__)
    496                 __tls_stats()->ready.threads.threads--;
    497         #endif
     382        __STATS__(true, ready.threads.threads--; )
    498383
    499384        /* paranoid */ verify( ! __preemption_enabled() );
     
    506391        thread$ * thrd_src = kernelTLS().this_thread;
    507392
    508         __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
     393        __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; )
    509394
    510395        // Run the thread on this processor
     
    558443        // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
    559444        struct cluster * cl = thrd->curr_cluster;
    560         __STATS(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
     445        __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
    561446
    562447        // push the thread to the cluster ready-queue
     
    609494
    610495        ready_schedule_lock();
    611                 thread$ * thrd;
    612                 for(25) {
    613                         thrd = pop_slow( this );
    614                         if(thrd) goto RET;
    615                 }
    616                 thrd = pop_search( this );
    617 
    618                 RET:
     496                thread$ * thrd = pop_slow( this );
     497        ready_schedule_unlock();
     498
     499        /* paranoid */ verify( ! __preemption_enabled() );
     500        return thrd;
     501}
     502
     503// KERNEL ONLY
     504static inline thread$ * __next_thread_search(cluster * this) with( *this ) {
     505        /* paranoid */ verify( ! __preemption_enabled() );
     506
     507        ready_schedule_lock();
     508                thread$ * thrd = pop_search( this );
    619509        ready_schedule_unlock();
    620510
     
    732622// Wake a thread from the front if there are any
    733623static void __wake_one(cluster * this) {
     624        eventfd_t val;
     625
    734626        /* paranoid */ verify( ! __preemption_enabled() );
    735627        /* paranoid */ verify( ready_schedule_islocked() );
    736628
    737629        // Check if there is a sleeping processor
    738         // int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST);
    739         int fd = 0;
    740         if( __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST) != 0 ) {
    741                 fd = __atomic_exchange_n(&this->procs.fd, 0, __ATOMIC_RELAXED);
    742         }
    743 
    744         // If no one is sleeping, we are done
    745         if( fd == 0 ) return;
    746 
    747         // We found a processor, wake it up
    748         eventfd_t val;
    749         val = 1;
    750         eventfd_write( fd, val );
    751 
    752         #if !defined(__CFA_NO_STATISTICS__)
    753                 if( kernelTLS().this_stats ) {
    754                         __tls_stats()->ready.sleep.wakes++;
    755                 }
    756                 else {
    757                         __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
    758                 }
    759         #endif
     630        struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST);
     631
     632        // If no one is sleeping: we are done
     633        if( fdp == 0p ) return;
     634
     635        int fd = 1;
     636        if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
     637                fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
     638        }
     639
     640        switch(fd) {
     641        case 0:
     642                // If the processor isn't ready to sleep then the exchange will already wake it up
     643                #if !defined(__CFA_NO_STATISTICS__)
     644                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
     645                        } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
     646                #endif
     647                break;
     648        case 1:
     649                // If someone else already said they will wake them: we are done
     650                #if !defined(__CFA_NO_STATISTICS__)
     651                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
     652                        } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
     653                #endif
     654                break;
     655        default:
     656                // If the processor was ready to sleep, we need to wake it up with an actual write
     657                val = 1;
     658                eventfd_write( fd, val );
     659
     660                #if !defined(__CFA_NO_STATISTICS__)
     661                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
     662                        } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
     663                #endif
     664                break;
     665        }
    760666
    761667        /* paranoid */ verify( ready_schedule_islocked() );
     
    770676
    771677        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
     678
     679        this->idle_wctx.fd = 1;
    772680
    773681        eventfd_t val;
     
    779687
    780688static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
     689        // Tell everyone we are ready to go do sleep
     690        for() {
     691                int expected = this->idle_wctx.fd;
     692
     693                // Someone already told us to wake-up! No time for a nap.
     694                if(expected == 1) { return; }
     695
     696                // Try to mark that we are going to sleep
     697                if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     698                        // Every one agreed, taking a nap
     699                        break;
     700                }
     701        }
     702
     703
    781704        #if !defined(CFA_WITH_IO_URING_IDLE)
    782705                #if !defined(__CFA_NO_STATISTICS__)
     
    825748
    826749static bool mark_idle(__cluster_proc_list & this, processor & proc) {
     750        __STATS__(true, ready.sleep.halts++; )
     751
     752        proc.idle_wctx.fd = 0;
     753
    827754        /* paranoid */ verify( ! __preemption_enabled() );
    828755        if(!try_lock( this )) return false;
     
    832759                insert_first(this.idles, proc);
    833760
    834                 __atomic_store_n(&this.fd, proc.idle_fd, __ATOMIC_SEQ_CST);
     761                __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
    835762        unlock( this );
    836763        /* paranoid */ verify( ! __preemption_enabled() );
     
    848775
    849776                {
    850                         int fd = 0;
    851                         if(!this.idles`isEmpty) fd = this.idles`first.idle_fd;
    852                         __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST);
     777                        struct __fd_waitctx * wctx = 0;
     778                        if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
     779                        __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
    853780                }
    854781
     
    914841                unsigned tail = *ctx->cq.tail;
    915842                if(head == tail) return false;
    916                 #if OLD_MAIN
    917                         ready_schedule_lock();
    918                         ret = __cfa_io_drain( proc );
    919                         ready_schedule_unlock();
    920                 #else
    921                         ret = __cfa_io_drain( proc );
    922                 #endif
     843                ready_schedule_lock();
     844                ret = __cfa_io_drain( proc );
     845                ready_schedule_unlock();
    923846        #endif
    924847        return ret;
  • libcfa/src/concurrency/kernel.hfa

    r97c215f rf5a51db  
    5353coroutine processorCtx_t {
    5454        struct processor * proc;
     55};
     56
     57
     58struct __fd_waitctx {
     59        volatile int fd;
    5560};
    5661
     
    101106        int idle_fd;
    102107
     108        // Idle waitctx
     109        struct __fd_waitctx idle_wctx;
     110
    103111        // Termination synchronisation (user semaphore)
    104112        oneshot terminated;
     
    207215
    208216        // FD to use to wake a processor
    209         volatile int fd;
     217        struct __fd_waitctx * volatile fdw;
    210218
    211219        // Total number of processors
  • libcfa/src/concurrency/kernel/fwd.hfa

    r97c215f rf5a51db  
    396396                                if( !(in_kernel) ) enable_interrupts(); \
    397397                        }
     398                        #if defined(CFA_HAVE_LINUX_IO_URING_H)
     399                                #define __IO_STATS__(in_kernel, ...) { \
     400                                        if( !(in_kernel) ) disable_interrupts(); \
     401                                        with( *__tls_stats() ) { \
     402                                                __VA_ARGS__ \
     403                                        } \
     404                                        if( !(in_kernel) ) enable_interrupts(); \
     405                                }
     406                        #else
     407                                #define __IO_STATS__(in_kernel, ...)
     408                        #endif
    398409                #else
    399410                        #define __STATS__(in_kernel, ...)
     411                        #define __IO_STATS__(in_kernel, ...)
    400412                #endif
    401413        }
  • libcfa/src/concurrency/kernel/startup.cfa

    r97c215f rf5a51db  
    537537        }
    538538
     539        this.idle_wctx.fd = 0;
     540
     541        // I'm assuming these two are reserved for standard input and output
     542        // so I'm using them as sentinels with idle_wctx.
     543        /* paranoid */ verify( this.idle_fd != 0 );
     544        /* paranoid */ verify( this.idle_fd != 1 );
     545
    539546        #if !defined(__CFA_NO_STATISTICS__)
    540547                print_stats = 0;
     
    590597// Cluster
    591598static void ?{}(__cluster_proc_list & this) {
    592         this.fd    = 0;
     599        this.fdw   = 0p;
    593600        this.idle  = 0;
    594601        this.total = 0;
  • libcfa/src/concurrency/mutex_stmt.hfa

    r97c215f rf5a51db  
    3838    }
    3939
     40    struct scoped_lock {
     41        L * internal_lock;
     42    };
     43
     44    static inline void ?{}( scoped_lock(L) & this, L & internal_lock ) {
     45        this.internal_lock = &internal_lock;
     46        lock(internal_lock);
     47    }
     48   
     49    static inline void ^?{}( scoped_lock(L) & this ) with(this) {
     50        unlock(*internal_lock);
     51    }
     52
    4053    static inline L * __get_ptr( L & this ) {
    4154        return &this;
  • libcfa/src/concurrency/preemption.cfa

    r97c215f rf5a51db  
    251251        bool enabled = __cfaabi_tls.preemption_state.enabled;
    252252
     253        // Check if there is a pending preemption
     254        processor   * proc = __cfaabi_tls.this_processor;
     255        bool pending = proc ? proc->pending_preemption : false;
     256        if( enabled && pending ) proc->pending_preemption = false;
     257
    253258        // create a assembler label after
    254259        // marked as clobber all to avoid movement
    255260        __cfaasm_label(check, after);
     261
     262        // If we can preempt and there is a pending one
     263        // this is a good time to yield
     264        if( enabled && pending ) {
     265                force_yield( __POLL_PREEMPTION );
     266        }
    256267        return enabled;
    257268}
     
    282293        // marked as clobber all to avoid movement
    283294        __cfaasm_label(get, after);
     295
     296        // This is used everywhere, to avoid cost, we DO NOT poll pending preemption
    284297        return val;
    285298}
     
    358371        if(!ready) { abort("Preemption should be ready"); }
    359372
    360         __cfaasm_label(debug, before);
    361 
    362                 sigset_t oldset;
    363                 int ret;
    364                 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    365                 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    366 
    367                 ret = sigismember(&oldset, SIGUSR1);
    368                 if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    369                 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
    370 
    371                 ret = sigismember(&oldset, SIGALRM);
    372                 if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    373                 if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
    374 
    375                 ret = sigismember(&oldset, SIGTERM);
    376                 if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    377                 if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
    378 
    379         __cfaasm_label(debug, after);
     373        sigset_t oldset;
     374        int ret;
     375        ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
     376        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     377
     378        ret = sigismember(&oldset, SIGUSR1);
     379        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     380        if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     381
     382        ret = sigismember(&oldset, SIGALRM);
     383        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     384        if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
     385
     386        ret = sigismember(&oldset, SIGTERM);
     387        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     388        if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
    380389}
    381390
     
    548557        __cfaasm_label( check  );
    549558        __cfaasm_label( dsable );
    550         __cfaasm_label( debug  );
     559        // __cfaasm_label( debug  );
    551560
    552561        // Check if preemption is safe
     
    555564        if( __cfaasm_in( ip, check  ) ) { ready = false; goto EXIT; };
    556565        if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; };
    557         if( __cfaasm_in( ip, debug  ) ) { ready = false; goto EXIT; };
     566        // if( __cfaasm_in( ip, debug  ) ) { ready = false; goto EXIT; };
    558567        if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
    559568        if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
     
    661670
    662671        // Check if it is safe to preempt here
    663         if( !preemption_ready( ip ) ) { return; }
     672        if( !preemption_ready( ip ) ) {
     673                #if !defined(__CFA_NO_STATISTICS__)
     674                        __cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++;
     675                #endif
     676                return;
     677        }
    664678
    665679        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
     
    680694
    681695        // Preemption can occur here
     696
     697        #if !defined(__CFA_NO_STATISTICS__)
     698                __cfaabi_tls.this_stats->ready.threads.preempt.yield++;
     699        #endif
    682700
    683701        force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch
  • libcfa/src/concurrency/ready_queue.cfa

    r97c215f rf5a51db  
    201201uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
    202202        /* paranoid */ verify( ! __preemption_enabled() );
    203         /* paranoid */ verify( ! kernelTLS().sched_lock );
    204203
    205204        // Step 1 : lock global lock
     
    207206        //   to simply lock their own lock and enter.
    208207        __atomic_acquire( &write_lock );
     208
     209        // Make sure we won't deadlock ourself
     210        // Checking before acquiring the writer lock isn't safe
     211        // because someone else could have locked us.
     212        /* paranoid */ verify( ! kernelTLS().sched_lock );
    209213
    210214        // Step 2 : lock per-proc lock
  • libcfa/src/concurrency/stats.cfa

    r97c215f rf5a51db  
    2929                stats->ready.threads.threads   = 0;
    3030                stats->ready.threads.cthreads  = 0;
     31                stats->ready.threads.preempt.yield  = 0;
     32                stats->ready.threads.preempt.rllfwd = 0;
    3133                stats->ready.sleep.halts   = 0;
    3234                stats->ready.sleep.cancels = 0;
     35                stats->ready.sleep.early   = 0;
    3336                stats->ready.sleep.wakes   = 0;
     37                stats->ready.sleep.seen    = 0;
    3438                stats->ready.sleep.exits   = 0;
    3539
     
    4347                        stats->io.submit.slow       = 0;
    4448                        stats->io.flush.external    = 0;
     49                        stats->io.flush.dirty       = 0;
     50                        stats->io.flush.full        = 0;
     51                        stats->io.flush.idle        = 0;
     52                        stats->io.flush.eager       = 0;
    4553                        stats->io.calls.flush       = 0;
    4654                        stats->io.calls.submitted   = 0;
     
    7179
    7280        void __tally_stats( struct __stats_t * cltr, struct __stats_t * proc ) {
    73                 tally_one( &cltr->ready.push.local.attempt, &proc->ready.push.local.attempt );
    74                 tally_one( &cltr->ready.push.local.success, &proc->ready.push.local.success );
    75                 tally_one( &cltr->ready.push.share.attempt, &proc->ready.push.share.attempt );
    76                 tally_one( &cltr->ready.push.share.success, &proc->ready.push.share.success );
    77                 tally_one( &cltr->ready.push.extrn.attempt, &proc->ready.push.extrn.attempt );
    78                 tally_one( &cltr->ready.push.extrn.success, &proc->ready.push.extrn.success );
    79                 tally_one( &cltr->ready.pop.local .attempt, &proc->ready.pop.local .attempt );
    80                 tally_one( &cltr->ready.pop.local .success, &proc->ready.pop.local .success );
    81                 tally_one( &cltr->ready.pop.help  .attempt, &proc->ready.pop.help  .attempt );
    82                 tally_one( &cltr->ready.pop.help  .success, &proc->ready.pop.help  .success );
    83                 tally_one( &cltr->ready.pop.steal .attempt, &proc->ready.pop.steal .attempt );
    84                 tally_one( &cltr->ready.pop.steal .success, &proc->ready.pop.steal .success );
    85                 tally_one( &cltr->ready.pop.search.attempt, &proc->ready.pop.search.attempt );
    86                 tally_one( &cltr->ready.pop.search.success, &proc->ready.pop.search.success );
    87                 tally_one( &cltr->ready.threads.migration , &proc->ready.threads.migration  );
    88                 tally_one( &cltr->ready.threads.extunpark , &proc->ready.threads.extunpark  );
    89                 tally_one( &cltr->ready.threads.threads   , &proc->ready.threads.threads    );
    90                 tally_one( &cltr->ready.threads.cthreads  , &proc->ready.threads.cthreads   );
    91                 tally_one( &cltr->ready.sleep.halts       , &proc->ready.sleep.halts        );
    92                 tally_one( &cltr->ready.sleep.cancels     , &proc->ready.sleep.cancels      );
    93                 tally_one( &cltr->ready.sleep.wakes       , &proc->ready.sleep.wakes        );
    94                 tally_one( &cltr->ready.sleep.exits       , &proc->ready.sleep.exits        );
     81                tally_one( &cltr->ready.push.local.attempt    , &proc->ready.push.local.attempt     );
     82                tally_one( &cltr->ready.push.local.success    , &proc->ready.push.local.success     );
     83                tally_one( &cltr->ready.push.share.attempt    , &proc->ready.push.share.attempt     );
     84                tally_one( &cltr->ready.push.share.success    , &proc->ready.push.share.success     );
     85                tally_one( &cltr->ready.push.extrn.attempt    , &proc->ready.push.extrn.attempt     );
     86                tally_one( &cltr->ready.push.extrn.success    , &proc->ready.push.extrn.success     );
     87                tally_one( &cltr->ready.pop.local .attempt    , &proc->ready.pop.local .attempt     );
     88                tally_one( &cltr->ready.pop.local .success    , &proc->ready.pop.local .success     );
     89                tally_one( &cltr->ready.pop.help  .attempt    , &proc->ready.pop.help  .attempt     );
     90                tally_one( &cltr->ready.pop.help  .success    , &proc->ready.pop.help  .success     );
     91                tally_one( &cltr->ready.pop.steal .attempt    , &proc->ready.pop.steal .attempt     );
     92                tally_one( &cltr->ready.pop.steal .success    , &proc->ready.pop.steal .success     );
     93                tally_one( &cltr->ready.pop.search.attempt    , &proc->ready.pop.search.attempt     );
     94                tally_one( &cltr->ready.pop.search.success    , &proc->ready.pop.search.success     );
     95                tally_one( &cltr->ready.threads.migration     , &proc->ready.threads.migration      );
     96                tally_one( &cltr->ready.threads.extunpark     , &proc->ready.threads.extunpark      );
     97                tally_one( &cltr->ready.threads.threads       , &proc->ready.threads.threads        );
     98                tally_one( &cltr->ready.threads.cthreads      , &proc->ready.threads.cthreads       );
     99                tally_one( &cltr->ready.threads.preempt.yield , &proc->ready.threads.preempt.yield  );
     100                tally_one( &cltr->ready.threads.preempt.rllfwd, &proc->ready.threads.preempt.rllfwd );
     101                tally_one( &cltr->ready.sleep.halts           , &proc->ready.sleep.halts            );
     102                tally_one( &cltr->ready.sleep.cancels         , &proc->ready.sleep.cancels          );
     103                tally_one( &cltr->ready.sleep.early           , &proc->ready.sleep.early            );
     104                tally_one( &cltr->ready.sleep.wakes           , &proc->ready.sleep.wakes            );
     105                tally_one( &cltr->ready.sleep.seen            , &proc->ready.sleep.wakes            );
     106                tally_one( &cltr->ready.sleep.exits           , &proc->ready.sleep.exits            );
    95107
    96108                #if defined(CFA_HAVE_LINUX_IO_URING_H)
     
    103115                        tally_one( &cltr->io.submit.slow      , &proc->io.submit.slow       );
    104116                        tally_one( &cltr->io.flush.external   , &proc->io.flush.external    );
     117                        tally_one( &cltr->io.flush.dirty      , &proc->io.flush.dirty       );
     118                        tally_one( &cltr->io.flush.full       , &proc->io.flush.full        );
     119                        tally_one( &cltr->io.flush.idle       , &proc->io.flush.idle        );
     120                        tally_one( &cltr->io.flush.eager      , &proc->io.flush.eager       );
    105121                        tally_one( &cltr->io.calls.flush      , &proc->io.calls.flush       );
    106122                        tally_one( &cltr->io.calls.submitted  , &proc->io.calls.submitted   );
     
    153169                             | " (" | eng3(ready.pop.search.attempt) | " try)";
    154170
    155                         sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel," | eng3(ready.sleep.wakes) | "wake," | eng3(ready.sleep.exits) | "exit";
     171                        sstr | "- Idle Slp : " | eng3(ready.sleep.halts) | "halt," | eng3(ready.sleep.cancels) | "cancel,"
     172                             | eng3(ready.sleep.wakes + ready.sleep.early) | '(' | eng3(ready.sleep.early) | ',' | eng3(ready.sleep.seen) | ')' | " wake(early, seen),"
     173                             | eng3(ready.sleep.exits) | "exit";
     174                        sstr | "- Preemption : " | eng3(ready.threads.preempt.yield) | "yields," | eng3(ready.threads.preempt.rllfwd) | "delayed";
    156175                        sstr | nl;
    157176                }
     
    178197                                if(io.alloc.fail || io.alloc.revoke || io.alloc.block)
    179198                                        sstr | "-     failures      : " | eng3(io.alloc.fail) | "oom, " | eng3(io.alloc.revoke) | "rvk, " | eng3(io.alloc.block) | "blk";
    180                                 if(io.flush.external)
    181                                         sstr | "- flush external    : " | eng3(io.flush.external);
     199                                // if(io.flush.external)
     200                                //      sstr | "- flush external    : " | eng3(io.flush.external);
    182201
    183202                                double avgsubs = ((double)io.calls.submitted) / io.calls.flush;
    184203                                double avgcomp = ((double)io.calls.completed) / io.calls.drain;
    185204                                sstr | "- syscll : "
    186                                      |   " sub " | eng3(io.calls.flush) | "/" | eng3(io.calls.submitted) | "(" | ws(3, 3, avgsubs) | "/flush)"
    187                                      | " - cmp " | eng3(io.calls.drain) | "/" | eng3(io.calls.completed) | "(" | ws(3, 3, avgcomp) | "/drain)"
     205                                     |   " sub " | eng3(io.calls.submitted) | "/" | eng3(io.calls.flush) | "(" | ws(3, 3, avgsubs) | "/flush)"
     206                                     | " - cmp " | eng3(io.calls.completed) | "/" | eng3(io.calls.drain) | "(" | ws(3, 3, avgcomp) | "/drain)"
    188207                                     | " - " | eng3(io.calls.errors.busy) | " EBUSY";
     208                                sstr | " - sub: " | eng3(io.flush.full) | "full, " | eng3(io.flush.dirty) | "drty, " | eng3(io.flush.idle) | "idle, " | eng3(io.flush.eager) | "eagr, " | eng3(io.flush.external) | "ext";
    189209                                sstr | "- ops blk: "
    190210                                     |   " sk rd: " | eng3(io.ops.sockread)  | "epll: " | eng3(io.ops.epllread)
  • libcfa/src/concurrency/stats.hfa

    r97c215f rf5a51db  
    6565                        volatile  int64_t threads;  // number of threads in the system, includes only local change
    6666                        volatile  int64_t cthreads; // number of threads in the system, includes only local change
     67                        struct {
     68                                volatile uint64_t yield;
     69                                volatile uint64_t rllfwd;
     70                        } preempt;
    6771                } threads;
    6872                struct {
    6973                        volatile uint64_t halts;
    7074                        volatile uint64_t cancels;
     75                        volatile uint64_t early;
    7176                        volatile uint64_t wakes;
     77                        volatile uint64_t seen;
    7278                        volatile uint64_t exits;
    7379                } sleep;
     
    8995                        struct {
    9096                                volatile uint64_t external;
     97                                volatile uint64_t dirty;
     98                                volatile uint64_t full;
     99                                volatile uint64_t idle;
     100                                volatile uint64_t eager;
    91101                        } flush;
    92102                        struct {
  • libcfa/src/stdhdr/pthread.h

    r97c215f rf5a51db  
    1010// Created On       : Wed Jun 16 13:39:06 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Jun 16 13:39:42 2021
    13 // Update Count     : 1
     12// Last Modified On : Thu Feb  3 21:53:26 2022
     13// Update Count     : 13
    1414//
    1515
     16// pthread.h and setjmp.h cannot agree on the type of __sigsetjmp:
     17//
     18//   extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__));
     19//   extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__));
     20//
     21// With -Wall, gcc-11 warns about the disagreement unless the CPP directive
     22//
     23//    # 1 "/usr/include/pthread.h" 1 3 4
     24//
     25// appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the
     26// batchtest fails because of the spurious warning message. Hence, the warning is elided.
     27
    1628extern "C" {
     29#if defined(__GNUC__) && __GNUC__ == 11
     30        #pragma GCC diagnostic push
     31        #pragma GCC diagnostic ignored "-Warray-parameter"
     32#endif // defined(__GNUC__) && __GNUC__ == 11
     33
    1734#include_next <pthread.h>                                                               // has internal check for multiple expansion
     35
     36#if defined(__GNUC__) && __GNUC__ == 11
     37        #pragma GCC diagnostic pop
     38#endif // defined(__GNUC__) && __GNUC__ == 11
    1839} // extern "C"
    1940
    2041// Local Variables: //
    21 // tab-width: 4 //
    2242// mode: c++ //
    23 // compile-command: "make install" //
    2443// End: //
  • libcfa/src/stdhdr/setjmp.h

    r97c215f rf5a51db  
    1010// Created On       : Mon Jul  4 23:25:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Jul  5 20:38:33 2016
    13 // Update Count     : 12
     12// Last Modified On : Thu Feb  3 21:53:28 2022
     13// Update Count     : 18
    1414//
    1515
     16// pthread.h and setjmp.h cannot agree on the type of __sigsetjmp:
     17//
     18//   extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __attribute__ ((__nothrow__));
     19//   extern int __sigsetjmp (struct __jmp_buf_tag __env[1], int __savemask) __attribute__ ((__nothrow__));
     20//
     21// With -Wall, gcc-11 warns about the disagreement unless the CPP directive
     22//
     23//    # 1 "/usr/include/pthread.h" 1 3 4
     24//
     25// appears, which appears to be witchcraft. Unfortunately, this directive is removed by the CFA preprocessor, so the
     26// batchtest fails because of the spurious warning message. Hence, the warning is elided.
     27
    1628extern "C" {
     29#if defined(__GNUC__) && __GNUC__ == 11
     30        #pragma GCC diagnostic push
     31        #pragma GCC diagnostic ignored "-Warray-parameter"
     32#endif // defined(__GNUC__) && __GNUC__ == 11
     33
    1734#include_next <setjmp.h>                                                                // has internal check for multiple expansion
     35
     36#if defined(__GNUC__) && __GNUC__ == 11
     37        #pragma GCC diagnostic pop
     38#endif // defined(__GNUC__) && __GNUC__ == 11
    1839} // extern "C"
    1940
    2041// Local Variables: //
    21 // tab-width: 4 //
    2242// mode: c++ //
    23 // compile-command: "make install" //
    2443// End: //
  • src/AST/Convert.cpp

    r97c215f rf5a51db  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu May 09 15::37::05 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed Jul 14 16:15:00 2021
    13 // Update Count     : 37
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 13:19:22 2022
     13// Update Count     : 41
    1414//
    1515
     
    393393                auto stmt = new IfStmt(
    394394                        get<Expression>().accept1( node->cond ),
    395                         get<Statement>().accept1( node->thenPart ),
    396                         get<Statement>().accept1( node->elsePart ),
     395                        get<Statement>().accept1( node->then ),
     396                        get<Statement>().accept1( node->else_ ),
    397397                        get<Statement>().acceptL( node->inits )
    398398                );
     
    419419        }
    420420
    421         const ast::Stmt * visit( const ast::WhileStmt * node ) override final {
     421        const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final {
    422422                if ( inCache( node ) ) return nullptr;
    423423                auto inits = get<Statement>().acceptL( node->inits );
    424                 auto stmt = new WhileStmt(
     424                auto stmt = new WhileDoStmt(
    425425                        get<Expression>().accept1( node->cond ),
    426426                        get<Statement>().accept1( node->body ),
     427                        get<Statement>().accept1( node->else_ ),
    427428                        inits,
    428429                        node->isDoWhile
     
    437438                        get<Expression>().accept1( node->cond ),
    438439                        get<Expression>().accept1( node->inc ),
    439                         get<Statement>().accept1( node->body )
     440                        get<Statement>().accept1( node->body ),
     441                        get<Statement>().accept1( node->else_ )
    440442                );
    441443                return stmtPostamble( stmt, node );
     
    18721874                        old->location,
    18731875                        GET_ACCEPT_1(condition, Expr),
    1874                         GET_ACCEPT_1(thenPart, Stmt),
    1875                         GET_ACCEPT_1(elsePart, Stmt),
     1876                        GET_ACCEPT_1(then, Stmt),
     1877                        GET_ACCEPT_1(else_, Stmt),
    18761878                        GET_ACCEPT_V(initialization, Stmt),
    18771879                        GET_LABELS_V(old->labels)
     
    19021904        }
    19031905
    1904         virtual void visit( const WhileStmt * old ) override final {
     1906        virtual void visit( const WhileDoStmt * old ) override final {
    19051907                if ( inCache( old ) ) return;
    1906                 this->node = new ast::WhileStmt(
     1908                this->node = new ast::WhileDoStmt(
    19071909                        old->location,
    19081910                        GET_ACCEPT_1(condition, Expr),
    19091911                        GET_ACCEPT_1(body, Stmt),
     1912                        GET_ACCEPT_1(else_, Stmt),
    19101913                        GET_ACCEPT_V(initialization, Stmt),
    19111914                        old->isDoWhile,
     
    19231926                        GET_ACCEPT_1(increment, Expr),
    19241927                        GET_ACCEPT_1(body, Stmt),
     1928                        GET_ACCEPT_1(else_, Stmt),
    19251929                        GET_LABELS_V(old->labels)
    19261930                );
  • src/AST/Copy.hpp

    r97c215f rf5a51db  
    1010// Created On       : Wed Jul 10 16:13:00 2019
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Thr Nov 11  9:22:00 2021
    13 // Update Count     : 2
     12// Last Modified On : Wed Dec 15 11:07:00 2021
     13// Update Count     : 3
    1414//
    1515
     
    5252Node * deepCopy<Node>( const Node * localRoot );
    5353
     54template<typename node_t, enum Node::ref_type ref_t>
     55node_t * shallowCopy( const ptr_base<node_t, ref_t> & localRoot ) {
     56        return shallowCopy( localRoot.get() );
     57}
     58
     59template<typename node_t, enum Node::ref_type ref_t>
     60node_t * deepCopy( const ptr_base<node_t, ref_t> & localRoot ) {
     61        return deepCopy( localRoot.get() );
     62}
     63
    5464}
    5565
  • src/AST/Fwd.hpp

    r97c215f rf5a51db  
    1010// Created On       : Wed May  8 16:05:00 2019
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 18:37:39 2021
    13 // Update Count     : 4
     12// Last Modified On : Tue Feb  1 09:08:33 2022
     13// Update Count     : 5
    1414//
    1515
     
    4444class DirectiveStmt;
    4545class IfStmt;
    46 class WhileStmt;
     46class WhileDoStmt;
    4747class ForStmt;
    4848class SwitchStmt;
  • src/AST/Node.cpp

    r97c215f rf5a51db  
    1010// Created On       : Thu May 16 14:16:00 2019
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 18:25:06 2021
    13 // Update Count     : 2
     12// Last Modified On : Tue Feb  1 09:09:39 2022
     13// Update Count     : 3
    1414//
    1515
     
    146146template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::weak >;
    147147template class ast::ptr_base< ast::IfStmt, ast::Node::ref_type::strong >;
    148 template class ast::ptr_base< ast::WhileStmt, ast::Node::ref_type::weak >;
    149 template class ast::ptr_base< ast::WhileStmt, ast::Node::ref_type::strong >;
     148template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::weak >;
     149template class ast::ptr_base< ast::WhileDoStmt, ast::Node::ref_type::strong >;
    150150template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::weak >;
    151151template class ast::ptr_base< ast::ForStmt, ast::Node::ref_type::strong >;
  • src/AST/Node.hpp

    r97c215f rf5a51db  
    188188        }
    189189
     190        ptr_base & operator=( const node_t * node ) {
     191                assign( node );
     192                return *this;
     193        }
     194
    190195        template<typename o_node_t>
    191196        ptr_base & operator=( const o_node_t * node ) {
  • src/AST/Pass.hpp

    r97c215f rf5a51db  
    146146        const ast::Stmt *             visit( const ast::DirectiveStmt        * ) override final;
    147147        const ast::Stmt *             visit( const ast::IfStmt               * ) override final;
    148         const ast::Stmt *             visit( const ast::WhileStmt            * ) override final;
     148        const ast::Stmt *             visit( const ast::WhileDoStmt          * ) override final;
    149149        const ast::Stmt *             visit( const ast::ForStmt              * ) override final;
    150150        const ast::Stmt *             visit( const ast::SwitchStmt           * ) override final;
     
    238238
    239239private:
    240         const ast::Stmt * call_accept( const ast::Stmt * );
    241         const ast::Expr * call_accept( const ast::Expr * );
    242 
    243         // requests WithStmtsToAdd directly add to this statement, as if it is a compound.
    244 
    245         const ast::Stmt * call_accept_as_compound(const ast::Stmt *);
    246 
     240
     241        // Regular nodes
    247242        template< typename node_t >
    248         auto call_accept( const node_t * node ) -> typename std::enable_if<
     243        struct result1 {
     244                bool differs;
     245                const node_t * value;
     246
     247                template< typename object_t, typename super_t, typename field_t >
     248                void apply(object_t *, field_t super_t::* field);
     249        };
     250
     251        result1<ast::Stmt> call_accept( const ast::Stmt * );
     252        result1<ast::Expr> call_accept( const ast::Expr * );
     253
     254        template< typename node_t >
     255        auto call_accept( const node_t * node )
     256                -> typename std::enable_if<
    249257                                !std::is_base_of<ast::Expr, node_t>::value &&
    250258                                !std::is_base_of<ast::Stmt, node_t>::value
    251                         , decltype( node->accept(*this) )
     259                        , result1<
     260                                typename std::remove_pointer< decltype( node->accept(*this) ) >::type
     261                        >
    252262                >::type;
    253263
     264        // requests WithStmtsToAdd directly add to this statement, as if it is a compound.
     265        result1<ast::Stmt> call_accept_as_compound(const ast::Stmt *);
     266
     267        template<typename it_t, template <class...> class container_t>
     268                static inline void take_all_delta( it_t it, container_t<ast::ptr<ast::Decl>> * decls, bool * mutated = nullptr ) {
     269                        if(empty(decls)) return;
     270
     271                        std::transform(decls->begin(), decls->end(), it, [](ast::ptr<ast::Decl>&& decl) -> auto {
     272                                        auto loc = decl->location;
     273                                        auto stmt = new DeclStmt( loc, decl.release() );
     274                                        return { {stmt}, -1, false };
     275                                });
     276                        decls->clear();
     277                        if(mutated) *mutated = true;
     278                }
     279
     280        // Container of statements
    254281        template< template <class...> class container_t >
    255         container_t< ptr<Stmt> > call_accept( const container_t< ptr<Stmt> > & );
    256 
     282        struct resultNstmt {
     283                struct delta {
     284                        ptr<Stmt> nval;
     285                        ssize_t old_idx;
     286                        bool is_old;
     287
     288                        delta(const Stmt * s, ssize_t i, bool old) : nval{s}, old_idx{i}, is_old{old} {}
     289                };
     290
     291                bool differs;
     292                container_t< delta > values;
     293
     294                resultNstmt() : differs(false), values{} {}
     295                resultNstmt(bool diff, container_t< delta > && vals) : differs(diff), values(vals) {}
     296
     297                template< typename object_t, typename super_t, typename field_t >
     298                void apply(object_t *, field_t super_t::* field);
     299
     300                template< template <class...> class incontainer_t >
     301                void take_all( incontainer_t<ast::ptr<ast::Stmt>> * stmts ) {
     302                        if(!stmts || stmts->empty()) return;
     303
     304                        std::transform(stmts->begin(), stmts->end(), std::back_inserter( values ), [](ast::ptr<ast::Stmt>& decl) -> delta {
     305                                        return delta( decl.release(), -1, false );
     306                                });
     307                        stmts->clear();
     308                        differs = true;
     309                }
     310
     311                template< template <class...> class incontainer_t >
     312                void take_all( incontainer_t<ast::ptr<ast::Decl>> * decls ) {
     313                        if(!decls || decls->empty()) return;
     314
     315                        std::transform(decls->begin(), decls->end(), std::back_inserter( values ), [](ast::ptr<ast::Decl>& decl) -> auto {
     316                                        auto loc = decl->location;
     317                                        auto stmt = new DeclStmt( loc, decl.release() );
     318                                        return delta( stmt, -1, false );
     319                                });
     320                        decls->clear();
     321                        differs = true;
     322                }
     323        };
     324
     325        template< template <class...> class container_t >
     326        resultNstmt<container_t> call_accept( const container_t< ptr<Stmt> > & );
     327
     328        // Container of something
    257329        template< template <class...> class container_t, typename node_t >
    258         container_t< ptr<node_t> > call_accept( const container_t< ptr<node_t> > & container );
     330        struct resultN {
     331                bool differs;
     332                container_t<ptr<node_t>> values;
     333
     334                template< typename object_t, typename super_t, typename field_t >
     335                void apply(object_t *, field_t super_t::* field);
     336        };
     337
     338        template< template <class...> class container_t, typename node_t >
     339        resultN< container_t, node_t > call_accept( const container_t< ptr<node_t> > & container );
    259340
    260341public:
    261342        /// Logic to call the accept and mutate the parent if needed, delegates call to accept
    262         template<typename node_t, typename parent_t, typename child_t>
    263         void maybe_accept(const node_t * &, child_t parent_t::* child);
    264 
    265         template<typename node_t, typename parent_t, typename child_t>
    266         void maybe_accept_as_compound(const node_t * &, child_t parent_t::* child);
     343        template<typename node_t, typename parent_t, typename field_t>
     344        void maybe_accept(const node_t * &, field_t parent_t::* field);
     345
     346        template<typename node_t, typename parent_t, typename field_t>
     347        void maybe_accept_as_compound(const node_t * &, field_t parent_t::* field);
    267348
    268349private:
  • src/AST/Pass.impl.hpp

    r97c215f rf5a51db  
    3434        __pass::previsit( core, node, 0 );
    3535
    36 #define VISIT( code... ) \
    37         /* if this node should visit its children */ \
    38         if ( __visit_children() ) { \
    39                 /* visit the children */ \
    40                 code \
    41         }
    42 
    4336#define VISIT_END( type, node ) \
    4437        /* call the implementation of the postvisit of this pass */ \
     
    8679
    8780                template<typename it_t, template <class...> class container_t>
    88                 static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * decls, bool * mutated = nullptr ) {
    89                         if(empty(decls)) return;
    90 
    91                         std::move(decls->begin(), decls->end(), it);
    92                         decls->clear();
     81                static inline void take_all( it_t it, container_t<ast::ptr<ast::Stmt>> * stmts, bool * mutated = nullptr ) {
     82                        if(empty(stmts)) return;
     83
     84                        std::move(stmts->begin(), stmts->end(), it);
     85                        stmts->clear();
    9386                        if(mutated) *mutated = true;
    9487                }
     
    130123                        return !new_val.empty();
    131124                }
     125        }
     126
     127
     128        template< typename core_t >
     129        template< typename node_t >
     130        template< typename object_t, typename super_t, typename field_t >
     131        void ast::Pass< core_t >::result1< node_t >::apply(object_t * object, field_t super_t::* field) {
     132                object->*field = value;
    132133        }
    133134
     
    138139                                !std::is_base_of<ast::Expr, node_t>::value &&
    139140                                !std::is_base_of<ast::Stmt, node_t>::value
    140                         , decltype( node->accept(*this) )
     141                        , ast::Pass< core_t >::result1<
     142                                typename std::remove_pointer< decltype( node->accept(*this) ) >::type
     143                        >
    141144                >::type
    142145        {
     
    147150                static_assert( !std::is_base_of<ast::Stmt, node_t>::value, "ERROR");
    148151
    149                 return node->accept( *this );
     152                auto nval = node->accept( *this );
     153                ast::Pass< core_t >::result1<
     154                        typename std::remove_pointer< decltype( node->accept(*this) ) >::type
     155                > res;
     156                res.differs = nval != node;
     157                res.value = nval;
     158                return res;
    150159        }
    151160
    152161        template< typename core_t >
    153         const ast::Expr * ast::Pass< core_t >::call_accept( const ast::Expr * expr ) {
     162        ast::Pass< core_t >::result1<ast::Expr> ast::Pass< core_t >::call_accept( const ast::Expr * expr ) {
    154163                __pedantic_pass_assert( __visit_children() );
    155164                __pedantic_pass_assert( expr );
     
    160169                }
    161170
    162                 return expr->accept( *this );
     171                auto nval = expr->accept( *this );
     172                return { nval != expr, nval };
    163173        }
    164174
    165175        template< typename core_t >
    166         const ast::Stmt * ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) {
     176        ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept( const ast::Stmt * stmt ) {
    167177                __pedantic_pass_assert( __visit_children() );
    168178                __pedantic_pass_assert( stmt );
    169179
    170                 return stmt->accept( *this );
     180                const ast::Stmt * nval = stmt->accept( *this );
     181                return { nval != stmt, nval };
    171182        }
    172183
    173184        template< typename core_t >
    174         const ast::Stmt * ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) {
     185        ast::Pass< core_t >::result1<ast::Stmt> ast::Pass< core_t >::call_accept_as_compound( const ast::Stmt * stmt ) {
    175186                __pedantic_pass_assert( __visit_children() );
    176187                __pedantic_pass_assert( stmt );
     
    197208                // If the pass doesn't want to add anything then we are done
    198209                if( empty(stmts_before) && empty(stmts_after) && empty(decls_before) && empty(decls_after) ) {
    199                         return nstmt;
     210                        return { nstmt != stmt, nstmt };
    200211                }
    201212
     
    219230                __pass::take_all( std::back_inserter( compound->kids ), stmts_after );
    220231
    221                 return compound;
     232                return {true, compound};
    222233        }
    223234
    224235        template< typename core_t >
    225236        template< template <class...> class container_t >
    226         container_t< ptr<Stmt> > ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) {
     237        template< typename object_t, typename super_t, typename field_t >
     238        void ast::Pass< core_t >::resultNstmt<container_t>::apply(object_t * object, field_t super_t::* field) {
     239                auto & container = object->*field;
     240                __pedantic_pass_assert( container.size() <= values.size() );
     241
     242                auto cit = enumerate(container).begin();
     243
     244                container_t<ptr<Stmt>> nvals;
     245                for(delta & d : values) {
     246                        if( d.is_old ) {
     247                                __pedantic_pass_assert( cit.idx <= d.old_idx );
     248                                std::advance( cit, d.old_idx - cit.idx );
     249                                nvals.push_back( std::move( (*cit).val) );
     250                        } else {
     251                                nvals.push_back( std::move(d.nval) );
     252                        }
     253                }
     254
     255                object->*field = std::move(nvals);
     256        }
     257
     258        template< typename core_t >
     259        template< template <class...> class container_t >
     260        ast::Pass< core_t >::resultNstmt<container_t> ast::Pass< core_t >::call_accept( const container_t< ptr<Stmt> > & statements ) {
    227261                __pedantic_pass_assert( __visit_children() );
    228262                if( statements.empty() ) return {};
     
    251285                pass_visitor_stats.avg->push(pass_visitor_stats.depth);
    252286
    253                 bool mutated = false;
    254                 container_t< ptr<Stmt> > new_kids;
    255                 for( const Stmt * stmt : statements ) {
     287                resultNstmt<container_t> new_kids;
     288                for( auto value : enumerate( statements ) ) {
    256289                        try {
     290                                size_t i = value.idx;
     291                                const Stmt * stmt = value.val;
    257292                                __pedantic_pass_assert( stmt );
    258293                                const ast::Stmt * new_stmt = stmt->accept( *this );
    259294                                assert( new_stmt );
    260                                 if(new_stmt != stmt ) mutated = true;
     295                                if(new_stmt != stmt ) { new_kids.differs = true; }
    261296
    262297                                // Make sure that it is either adding statements or declartions but not both
     
    268303
    269304                                // Take all the statements which should have gone after, N/A for first iteration
    270                                 __pass::take_all( std::back_inserter( new_kids ), decls_before, &mutated );
    271                                 __pass::take_all( std::back_inserter( new_kids ), stmts_before, &mutated );
     305                                new_kids.take_all( decls_before );
     306                                new_kids.take_all( stmts_before );
    272307
    273308                                // Now add the statement if there is one
    274                                 new_kids.emplace_back( new_stmt );
     309                                if(new_stmt != stmt) {
     310                                        new_kids.values.emplace_back( new_stmt, i, false );
     311                                } else {
     312                                        new_kids.values.emplace_back( nullptr, i, true );
     313                                }
    275314
    276315                                // Take all the declarations that go before
    277                                 __pass::take_all( std::back_inserter( new_kids ), decls_after, &mutated );
    278                                 __pass::take_all( std::back_inserter( new_kids ), stmts_after, &mutated );
     316                                new_kids.take_all( decls_after );
     317                                new_kids.take_all( stmts_after );
    279318                        }
    280319                        catch ( SemanticErrorException &e ) {
     
    285324                if ( !errors.isEmpty() ) { throw errors; }
    286325
    287                 return mutated ? new_kids : container_t< ptr<Stmt> >();
     326                return new_kids;
    288327        }
    289328
    290329        template< typename core_t >
    291330        template< template <class...> class container_t, typename node_t >
    292         container_t< ast::ptr<node_t> > ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) {
     331        template< typename object_t, typename super_t, typename field_t >
     332        void ast::Pass< core_t >::resultN<container_t, node_t>::apply(object_t * object, field_t super_t::* field) {
     333                auto & container = object->*field;
     334                __pedantic_pass_assert( container.size() == values.size() );
     335
     336                for(size_t i = 0; i < container.size(); i++) {
     337                        // Take all the elements that are different in 'values'
     338                        // and swap them into 'container'
     339                        if( values[i] != nullptr ) std::swap(container[i], values[i]);
     340                }
     341
     342                // Now the original containers should still have the unchanged values
     343                // but also contain the new values
     344        }
     345
     346        template< typename core_t >
     347        template< template <class...> class container_t, typename node_t >
     348        ast::Pass< core_t >::resultN<container_t, node_t> ast::Pass< core_t >::call_accept( const container_t< ast::ptr<node_t> > & container ) {
    293349                __pedantic_pass_assert( __visit_children() );
    294350                if( container.empty() ) return {};
     
    300356
    301357                bool mutated = false;
    302                 container_t< ast::ptr<node_t> > new_kids;
     358                container_t<ptr<node_t>> new_kids;
    303359                for ( const node_t * node : container ) {
    304360                        try {
    305361                                __pedantic_pass_assert( node );
    306362                                const node_t * new_stmt = strict_dynamic_cast< const node_t * >( node->accept( *this ) );
    307                                 if(new_stmt != node ) mutated = true;
    308 
    309                                 new_kids.emplace_back( new_stmt );
     363                                if(new_stmt != node ) {
     364                                        mutated = true;
     365                                        new_kids.emplace_back( new_stmt );
     366                                } else {
     367                                        new_kids.emplace_back( nullptr );
     368                                }
     369
    310370                        }
    311371                        catch( SemanticErrorException &e ) {
     
    313373                        }
    314374                }
     375
     376                __pedantic_pass_assert( new_kids.size() == container.size() );
    315377                pass_visitor_stats.depth--;
    316378                if ( ! errors.isEmpty() ) { throw errors; }
    317379
    318                 return mutated ? new_kids : container_t< ast::ptr<node_t> >();
     380                return ast::Pass< core_t >::resultN<container_t, node_t>{ mutated,  new_kids };
    319381        }
    320382
     
    334396                auto new_val = call_accept( old_val );
    335397
    336                 static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR");
    337 
    338                 if( __pass::differs(old_val, new_val) ) {
     398                static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value /* || std::is_same<int, decltype(old_val)>::value */, "ERROR");
     399
     400                if( new_val.differs ) {
    339401                        auto new_parent = __pass::mutate<core_t>(parent);
    340                         new_parent->*child = new_val;
     402                        new_val.apply(new_parent, child);
    341403                        parent = new_parent;
    342404                }
     
    360422                static_assert( !std::is_same<const ast::Node *, decltype(new_val)>::value || std::is_same<int, decltype(old_val)>::value, "ERROR");
    361423
    362                 if( __pass::differs(old_val, new_val) ) {
     424                if( new_val.differs ) {
    363425                        auto new_parent = __pass::mutate<core_t>(parent);
    364                         new_parent->*child = new_val;
     426                        new_val.apply( new_parent, child );
    365427                        parent = new_parent;
    366428                }
     
    452514        VISIT_START( node );
    453515
    454         VISIT(
     516        if ( __visit_children() ) {
    455517                {
    456518                        guard_symtab guard { *this };
     
    460522                maybe_accept( node, &ObjectDecl::bitfieldWidth );
    461523                maybe_accept( node, &ObjectDecl::attributes    );
    462         )
     524        }
    463525
    464526        __pass::symtab::addId( core, 0, node );
     
    475537        __pass::symtab::addId( core, 0, node );
    476538
    477         VISIT(maybe_accept( node, &FunctionDecl::withExprs );)
     539        if ( __visit_children() ) {
     540                maybe_accept( node, &FunctionDecl::withExprs );
     541        }
    478542        {
    479543                // with clause introduces a level of scope (for the with expression members).
     
    493557                        } };
    494558                        __pass::symtab::addId( core, 0, func );
    495                         VISIT(
     559                        if ( __visit_children() ) {
    496560                                // parameter declarations
    497561                                maybe_accept( node, &FunctionDecl::params );
     
    509573                                maybe_accept( node, &FunctionDecl::stmts );
    510574                                maybe_accept( node, &FunctionDecl::attributes );
    511                         )
     575                        }
    512576                }
    513577        }
     
    526590        __pass::symtab::addStructFwd( core, 0, node );
    527591
    528         VISIT({
     592        if ( __visit_children() ) {
    529593                guard_symtab guard { * this };
    530594                maybe_accept( node, &StructDecl::params     );
    531595                maybe_accept( node, &StructDecl::members    );
    532596                maybe_accept( node, &StructDecl::attributes );
    533         })
     597        }
    534598
    535599        // this addition replaces the forward declaration
     
    548612        __pass::symtab::addUnionFwd( core, 0, node );
    549613
    550         VISIT({
     614        if ( __visit_children() ) {
    551615                guard_symtab guard { * this };
    552616                maybe_accept( node, &UnionDecl::params     );
    553617                maybe_accept( node, &UnionDecl::members    );
    554618                maybe_accept( node, &UnionDecl::attributes );
    555         })
     619        }
    556620
    557621        __pass::symtab::addUnion( core, 0, node );
     
    568632        __pass::symtab::addEnum( core, 0, node );
    569633
    570         VISIT(
     634        if ( __visit_children() ) {
    571635                // unlike structs, traits, and unions, enums inject their members into the global scope
    572636                maybe_accept( node, &EnumDecl::params     );
    573637                maybe_accept( node, &EnumDecl::members    );
    574638                maybe_accept( node, &EnumDecl::attributes );
    575         )
     639        }
    576640
    577641        VISIT_END( Decl, node );
     
    584648        VISIT_START( node );
    585649
    586         VISIT({
     650        if ( __visit_children() ) {
    587651                guard_symtab guard { *this };
    588652                maybe_accept( node, &TraitDecl::params     );
    589653                maybe_accept( node, &TraitDecl::members    );
    590654                maybe_accept( node, &TraitDecl::attributes );
    591         })
     655        }
    592656
    593657        __pass::symtab::addTrait( core, 0, node );
     
    602666        VISIT_START( node );
    603667
    604         VISIT({
     668        if ( __visit_children() ) {
    605669                guard_symtab guard { *this };
    606670                maybe_accept( node, &TypeDecl::base   );
    607         })
     671        }
    608672
    609673        // see A NOTE ON THE ORDER OF TRAVERSAL, above
     
    612676        __pass::symtab::addType( core, 0, node );
    613677
    614         VISIT(
     678        if ( __visit_children() ) {
    615679                maybe_accept( node, &TypeDecl::assertions );
    616680
     
    619683                        maybe_accept( node, &TypeDecl::init );
    620684                }
    621         )
     685        }
    622686
    623687        VISIT_END( Decl, node );
     
    630694        VISIT_START( node );
    631695
    632         VISIT({
     696        if ( __visit_children() ) {
    633697                guard_symtab guard { *this };
    634698                maybe_accept( node, &TypedefDecl::base   );
    635         })
     699        }
    636700
    637701        __pass::symtab::addType( core, 0, node );
    638702
    639         VISIT( maybe_accept( node, &TypedefDecl::assertions ); )
     703        if ( __visit_children() ) {
     704                maybe_accept( node, &TypedefDecl::assertions );
     705        }
    640706
    641707        VISIT_END( Decl, node );
     
    648714        VISIT_START( node );
    649715
    650         VISIT(
     716        if ( __visit_children() ) {
    651717                maybe_accept( node, &AsmDecl::stmt );
    652         )
     718        }
    653719
    654720        VISIT_END( AsmDecl, node );
     
    661727        VISIT_START( node );
    662728
    663         VISIT(
     729        if ( __visit_children() ) {
    664730                maybe_accept( node, &DirectiveDecl::stmt );
    665         )
     731        }
    666732
    667733        VISIT_END( DirectiveDecl, node );
     
    674740        VISIT_START( node );
    675741
    676         VISIT(
     742        if ( __visit_children() ) {
    677743                maybe_accept( node, &StaticAssertDecl::cond );
    678744                maybe_accept( node, &StaticAssertDecl::msg  );
    679         )
     745        }
    680746
    681747        VISIT_END( StaticAssertDecl, node );
     
    687753const ast::CompoundStmt * ast::Pass< core_t >::visit( const ast::CompoundStmt * node ) {
    688754        VISIT_START( node );
    689         VISIT(
     755
     756        if ( __visit_children() ) {
    690757                // Do not enter (or leave) a new scope if atFunctionTop. Remember to save the result.
    691758                auto guard1 = makeFuncGuard( [this, enterScope = !this->atFunctionTop]() {
     
    704771                guard_scope guard3 { *this };
    705772                maybe_accept( node, &CompoundStmt::kids );
    706         )
     773        }
     774
    707775        VISIT_END( CompoundStmt, node );
    708776}
     
    714782        VISIT_START( node );
    715783
    716         VISIT(
     784        if ( __visit_children() ) {
    717785                maybe_accept( node, &ExprStmt::expr );
    718         )
     786        }
    719787
    720788        VISIT_END( Stmt, node );
     
    727795        VISIT_START( node )
    728796
    729         VISIT(
     797        if ( __visit_children() ) {
    730798                maybe_accept( node, &AsmStmt::instruction );
    731799                maybe_accept( node, &AsmStmt::output      );
    732800                maybe_accept( node, &AsmStmt::input       );
    733801                maybe_accept( node, &AsmStmt::clobber     );
    734         )
     802        }
    735803
    736804        VISIT_END( Stmt, node );
     
    752820        VISIT_START( node );
    753821
    754         VISIT({
     822        if ( __visit_children() ) {
    755823                // if statements introduce a level of scope (for the initialization)
    756824                guard_symtab guard { *this };
    757825                maybe_accept( node, &IfStmt::inits    );
    758826                maybe_accept( node, &IfStmt::cond     );
    759                 maybe_accept_as_compound( node, &IfStmt::thenPart );
    760                 maybe_accept_as_compound( node, &IfStmt::elsePart );
    761         })
     827                maybe_accept_as_compound( node, &IfStmt::then );
     828                maybe_accept_as_compound( node, &IfStmt::else_ );
     829        }
    762830
    763831        VISIT_END( Stmt, node );
     
    765833
    766834//--------------------------------------------------------------------------
    767 // WhileStmt
    768 template< typename core_t >
    769 const ast::Stmt * ast::Pass< core_t >::visit( const ast::WhileStmt * node ) {
    770         VISIT_START( node );
    771 
    772         VISIT({
     835// WhileDoStmt
     836template< typename core_t >
     837const ast::Stmt * ast::Pass< core_t >::visit( const ast::WhileDoStmt * node ) {
     838        VISIT_START( node );
     839
     840        if ( __visit_children() ) {
    773841                // while statements introduce a level of scope (for the initialization)
    774842                guard_symtab guard { *this };
    775                 maybe_accept( node, &WhileStmt::inits );
    776                 maybe_accept( node, &WhileStmt::cond  );
    777                 maybe_accept_as_compound( node, &WhileStmt::body  );
    778         })
     843                maybe_accept( node, &WhileDoStmt::inits );
     844                maybe_accept( node, &WhileDoStmt::cond  );
     845                maybe_accept_as_compound( node, &WhileDoStmt::body  );
     846        }
    779847
    780848        VISIT_END( Stmt, node );
     
    787855        VISIT_START( node );
    788856
    789         VISIT({
     857        if ( __visit_children() ) {
    790858                // for statements introduce a level of scope (for the initialization)
    791859                guard_symtab guard { *this };
     
    795863                maybe_accept( node, &ForStmt::inc   );
    796864                maybe_accept_as_compound( node, &ForStmt::body  );
    797         })
     865        }
    798866
    799867        VISIT_END( Stmt, node );
     
    806874        VISIT_START( node );
    807875
    808         VISIT(
     876        if ( __visit_children() ) {
    809877                maybe_accept( node, &SwitchStmt::cond  );
    810878                maybe_accept( node, &SwitchStmt::stmts );
    811         )
     879        }
    812880
    813881        VISIT_END( Stmt, node );
     
    820888        VISIT_START( node );
    821889
    822         VISIT(
     890        if ( __visit_children() ) {
    823891                maybe_accept( node, &CaseStmt::cond  );
    824892                maybe_accept( node, &CaseStmt::stmts );
    825         )
     893        }
    826894
    827895        VISIT_END( Stmt, node );
     
    842910        VISIT_START( node );
    843911
    844         VISIT(
     912        if ( __visit_children() ) {
    845913                maybe_accept( node, &ReturnStmt::expr );
    846         )
     914        }
    847915
    848916        VISIT_END( Stmt, node );
     
    855923        VISIT_START( node );
    856924
    857         VISIT(
     925        if ( __visit_children() ) {
    858926                maybe_accept( node, &ThrowStmt::expr   );
    859927                maybe_accept( node, &ThrowStmt::target );
    860         )
     928        }
    861929
    862930        VISIT_END( Stmt, node );
     
    869937        VISIT_START( node );
    870938
    871         VISIT(
     939        if ( __visit_children() ) {
    872940                maybe_accept( node, &TryStmt::body     );
    873941                maybe_accept( node, &TryStmt::handlers );
    874942                maybe_accept( node, &TryStmt::finally  );
    875         )
     943        }
    876944
    877945        VISIT_END( Stmt, node );
     
    884952        VISIT_START( node );
    885953
    886         VISIT({
     954        if ( __visit_children() ) {
    887955                // catch statements introduce a level of scope (for the caught exception)
    888956                guard_symtab guard { *this };
     
    890958                maybe_accept( node, &CatchStmt::cond );
    891959                maybe_accept_as_compound( node, &CatchStmt::body );
    892         })
     960        }
    893961
    894962        VISIT_END( Stmt, node );
     
    901969        VISIT_START( node );
    902970
    903         VISIT(
     971        if ( __visit_children() ) {
    904972                maybe_accept( node, &FinallyStmt::body );
    905         )
     973        }
    906974
    907975        VISIT_END( Stmt, node );
     
    914982        VISIT_START( node );
    915983
    916         VISIT(
     984        if ( __visit_children() ) {
    917985                maybe_accept( node, &SuspendStmt::then   );
    918         )
     986        }
    919987
    920988        VISIT_END( Stmt, node );
     
    9341002                // }
    9351003
    936         VISIT({
     1004        if ( __visit_children() ) {
    9371005                std::vector<WaitForStmt::Clause> new_clauses;
    9381006                new_clauses.reserve( node->clauses.size() );
     
    9421010                        const Expr * func = clause.target.func ? clause.target.func->accept(*this) : nullptr;
    9431011                        if(func != clause.target.func) mutated = true;
     1012                        else func = nullptr;
    9441013
    9451014                        std::vector<ptr<Expr>> new_args;
     
    9471016                        for( const auto & arg : clause.target.args ) {
    9481017                                auto a = arg->accept(*this);
    949                                 new_args.push_back( a );
    950                                 if( a != arg ) mutated = true;
     1018                                if( a != arg ) {
     1019                                        mutated = true;
     1020                                        new_args.push_back( a );
     1021                                } else
     1022                                        new_args.push_back( nullptr );
    9511023                        }
    9521024
    9531025                        const Stmt * stmt = clause.stmt ? clause.stmt->accept(*this) : nullptr;
    9541026                        if(stmt != clause.stmt) mutated = true;
     1027                        else stmt = nullptr;
    9551028
    9561029                        const Expr * cond = clause.cond ? clause.cond->accept(*this) : nullptr;
    9571030                        if(cond != clause.cond) mutated = true;
     1031                        else cond = nullptr;
    9581032
    9591033                        new_clauses.push_back( WaitForStmt::Clause{ {func, std::move(new_args) }, stmt, cond } );
     
    9621036                if(mutated) {
    9631037                        auto n = __pass::mutate<core_t>(node);
    964                         n->clauses = std::move( new_clauses );
     1038                        for(size_t i = 0; i < new_clauses.size(); i++) {
     1039                                if(new_clauses.at(i).target.func != nullptr) std::swap(n->clauses.at(i).target.func, new_clauses.at(i).target.func);
     1040
     1041                                for(size_t j = 0; j < new_clauses.at(i).target.args.size(); j++) {
     1042                                        if(new_clauses.at(i).target.args.at(j) != nullptr) std::swap(n->clauses.at(i).target.args.at(j), new_clauses.at(i).target.args.at(j));
     1043                                }
     1044
     1045                                if(new_clauses.at(i).stmt != nullptr) std::swap(n->clauses.at(i).stmt, new_clauses.at(i).stmt);
     1046                                if(new_clauses.at(i).cond != nullptr) std::swap(n->clauses.at(i).cond, new_clauses.at(i).cond);
     1047                        }
    9651048                        node = n;
    9661049                }
    967         })
     1050        }
    9681051
    9691052        #define maybe_accept(field) \
    9701053                if(node->field) { \
    9711054                        auto nval = call_accept( node->field ); \
    972                         if(nval != node->field ) { \
     1055                        if(nval.differs ) { \
    9731056                                auto nparent = __pass::mutate<core_t>(node); \
    974                                 nparent->field = nval; \
     1057                                nparent->field = nval.value; \
    9751058                                node = nparent; \
    9761059                        } \
    9771060                }
    9781061
    979         VISIT(
     1062        if ( __visit_children() ) {
    9801063                maybe_accept( timeout.time );
    9811064                maybe_accept( timeout.stmt );
     
    9831066                maybe_accept( orElse.stmt  );
    9841067                maybe_accept( orElse.cond  );
    985         )
     1068        }
    9861069
    9871070        #undef maybe_accept
     
    9961079        VISIT_START( node );
    9971080
    998         VISIT(
     1081        if ( __visit_children() ) {
    9991082                maybe_accept( node, &WithStmt::exprs );
    10001083                {
     
    10041087                        maybe_accept( node, &WithStmt::stmt );
    10051088                }
    1006         )
     1089        }
     1090
    10071091        VISIT_END( Stmt, node );
    10081092}
     
    10221106        VISIT_START( node );
    10231107
    1024         VISIT(
     1108        if ( __visit_children() ) {
    10251109                maybe_accept( node, &DeclStmt::decl );
    1026         )
     1110        }
    10271111
    10281112        VISIT_END( Stmt, node );
     
    10371121        // For now this isn't visited, it is unclear if this causes problem
    10381122        // if all tests are known to pass, remove this code
    1039         VISIT(
     1123        if ( __visit_children() ) {
    10401124                maybe_accept( node, &ImplicitCtorDtorStmt::callStmt );
    1041         )
     1125        }
    10421126
    10431127        VISIT_END( Stmt, node );
     
    10501134        VISIT_START( node );
    10511135
    1052         VISIT({
     1136        if ( __visit_children() ) {
    10531137                // mutex statements introduce a level of scope (for the initialization)
    10541138                guard_symtab guard { *this };
    10551139                maybe_accept( node, &MutexStmt::stmt );
    10561140                maybe_accept( node, &MutexStmt::mutexObjs );
    1057         })
     1141        }
    10581142
    10591143        VISIT_END( Stmt, node );
     
    10661150        VISIT_START( node );
    10671151
    1068         VISIT(
     1152        if ( __visit_children() ) {
    10691153                {
    10701154                        guard_symtab guard { *this };
     
    10731157                maybe_accept( node, &ApplicationExpr::func );
    10741158                maybe_accept( node, &ApplicationExpr::args );
    1075         )
     1159        }
    10761160
    10771161        VISIT_END( Expr, node );
     
    10841168        VISIT_START( node );
    10851169
    1086         VISIT(
     1170        if ( __visit_children() ) {
    10871171                {
    10881172                        guard_symtab guard { *this };
     
    10911175
    10921176                maybe_accept( node, &UntypedExpr::args );
    1093         )
     1177        }
    10941178
    10951179        VISIT_END( Expr, node );
     
    11021186        VISIT_START( node );
    11031187
    1104         VISIT({
     1188        if ( __visit_children() ) {
    11051189                guard_symtab guard { *this };
    11061190                maybe_accept( node, &NameExpr::result );
    1107         })
     1191        }
    11081192
    11091193        VISIT_END( Expr, node );
     
    11161200        VISIT_START( node );
    11171201
    1118         VISIT({
     1202        if ( __visit_children() ) {
     1203                {
    11191204                        guard_symtab guard { *this };
    11201205                        maybe_accept( node, &CastExpr::result );
    11211206                }
    11221207                maybe_accept( node, &CastExpr::arg );
    1123         )
     1208        }
    11241209
    11251210        VISIT_END( Expr, node );
     
    11321217        VISIT_START( node );
    11331218
    1134         VISIT({
     1219        if ( __visit_children() ) {
     1220                {
    11351221                        guard_symtab guard { *this };
    11361222                        maybe_accept( node, &KeywordCastExpr::result );
    11371223                }
    11381224                maybe_accept( node, &KeywordCastExpr::arg );
    1139         )
     1225        }
    11401226
    11411227        VISIT_END( Expr, node );
     
    11481234        VISIT_START( node );
    11491235
    1150         VISIT({
     1236        if ( __visit_children() ) {
     1237                {
    11511238                        guard_symtab guard { *this };
    11521239                        maybe_accept( node, &VirtualCastExpr::result );
    11531240                }
    11541241                maybe_accept( node, &VirtualCastExpr::arg );
    1155         )
     1242        }
    11561243
    11571244        VISIT_END( Expr, node );
     
    11641251        VISIT_START( node );
    11651252
    1166         VISIT({
     1253        if ( __visit_children() ) {
     1254                {
    11671255                        guard_symtab guard { *this };
    11681256                        maybe_accept( node, &AddressExpr::result );
    11691257                }
    11701258                maybe_accept( node, &AddressExpr::arg );
    1171         )
     1259        }
    11721260
    11731261        VISIT_END( Expr, node );
     
    11801268        VISIT_START( node );
    11811269
    1182         VISIT({
     1270        if ( __visit_children() ) {
    11831271                guard_symtab guard { *this };
    11841272                maybe_accept( node, &LabelAddressExpr::result );
    1185         })
     1273        }
    11861274
    11871275        VISIT_END( Expr, node );
     
    11941282        VISIT_START( node );
    11951283
    1196         VISIT({
     1284        if ( __visit_children() ) {
     1285                {
    11971286                        guard_symtab guard { *this };
    11981287                        maybe_accept( node, &UntypedMemberExpr::result );
     
    12001289                maybe_accept( node, &UntypedMemberExpr::aggregate );
    12011290                maybe_accept( node, &UntypedMemberExpr::member    );
    1202         )
     1291        }
    12031292
    12041293        VISIT_END( Expr, node );
     
    12111300        VISIT_START( node );
    12121301
    1213         VISIT({
     1302        if ( __visit_children() ) {
     1303                {
    12141304                        guard_symtab guard { *this };
    12151305                        maybe_accept( node, &MemberExpr::result );
    12161306                }
    12171307                maybe_accept( node, &MemberExpr::aggregate );
    1218         )
     1308        }
    12191309
    12201310        VISIT_END( Expr, node );
     
    12271317        VISIT_START( node );
    12281318
    1229         VISIT({
     1319        if ( __visit_children() ) {
    12301320                guard_symtab guard { *this };
    12311321                maybe_accept( node, &VariableExpr::result );
    1232         })
     1322        }
    12331323
    12341324        VISIT_END( Expr, node );
     
    12411331        VISIT_START( node );
    12421332
    1243         VISIT({
     1333        if ( __visit_children() ) {
    12441334                guard_symtab guard { *this };
    12451335                maybe_accept( node, &ConstantExpr::result );
    1246         })
     1336        }
    12471337
    12481338        VISIT_END( Expr, node );
     
    12551345        VISIT_START( node );
    12561346
    1257         VISIT({
     1347        if ( __visit_children() ) {
     1348                {
    12581349                        guard_symtab guard { *this };
    12591350                        maybe_accept( node, &SizeofExpr::result );
     
    12641355                        maybe_accept( node, &SizeofExpr::expr );
    12651356                }
    1266         )
     1357        }
    12671358
    12681359        VISIT_END( Expr, node );
     
    12751366        VISIT_START( node );
    12761367
    1277         VISIT({
     1368        if ( __visit_children() ) {
     1369                {
    12781370                        guard_symtab guard { *this };
    12791371                        maybe_accept( node, &AlignofExpr::result );
     
    12841376                        maybe_accept( node, &AlignofExpr::expr );
    12851377                }
    1286         )
     1378        }
    12871379
    12881380        VISIT_END( Expr, node );
     
    12951387        VISIT_START( node );
    12961388
    1297         VISIT({
     1389        if ( __visit_children() ) {
     1390                {
    12981391                        guard_symtab guard { *this };
    12991392                        maybe_accept( node, &UntypedOffsetofExpr::result );
    13001393                }
    13011394                maybe_accept( node, &UntypedOffsetofExpr::type   );
    1302         )
     1395        }
    13031396
    13041397        VISIT_END( Expr, node );
     
    13111404        VISIT_START( node );
    13121405
    1313         VISIT({
     1406        if ( __visit_children() ) {
     1407                {
    13141408                        guard_symtab guard { *this };
    13151409                        maybe_accept( node, &OffsetofExpr::result );
    13161410                }
    13171411                maybe_accept( node, &OffsetofExpr::type   );
    1318         )
     1412        }
    13191413
    13201414        VISIT_END( Expr, node );
     
    13271421        VISIT_START( node );
    13281422
    1329         VISIT({
     1423        if ( __visit_children() ) {
     1424                {
    13301425                        guard_symtab guard { *this };
    13311426                        maybe_accept( node, &OffsetPackExpr::result );
    13321427                }
    13331428                maybe_accept( node, &OffsetPackExpr::type   );
    1334         )
     1429        }
    13351430
    13361431        VISIT_END( Expr, node );
     
    13431438        VISIT_START( node );
    13441439
    1345         VISIT({
     1440        if ( __visit_children() ) {
     1441                {
    13461442                        guard_symtab guard { *this };
    13471443                        maybe_accept( node, &LogicalExpr::result );
     
    13491445                maybe_accept( node, &LogicalExpr::arg1 );
    13501446                maybe_accept( node, &LogicalExpr::arg2 );
    1351         )
     1447        }
    13521448
    13531449        VISIT_END( Expr, node );
     
    13601456        VISIT_START( node );
    13611457
    1362         VISIT({
     1458        if ( __visit_children() ) {
     1459                {
    13631460                        guard_symtab guard { *this };
    13641461                        maybe_accept( node, &ConditionalExpr::result );
     
    13671464                maybe_accept( node, &ConditionalExpr::arg2 );
    13681465                maybe_accept( node, &ConditionalExpr::arg3 );
    1369         )
     1466        }
    13701467
    13711468        VISIT_END( Expr, node );
     
    13781475        VISIT_START( node );
    13791476
    1380         VISIT({
     1477        if ( __visit_children() ) {
     1478                {
    13811479                        guard_symtab guard { *this };
    13821480                        maybe_accept( node, &CommaExpr::result );
     
    13841482                maybe_accept( node, &CommaExpr::arg1 );
    13851483                maybe_accept( node, &CommaExpr::arg2 );
    1386         )
     1484        }
    13871485
    13881486        VISIT_END( Expr, node );
     
    13951493        VISIT_START( node );
    13961494
    1397         VISIT({
     1495        if ( __visit_children() ) {
     1496                {
    13981497                        guard_symtab guard { *this };
    13991498                        maybe_accept( node, &TypeExpr::result );
    14001499                }
    14011500                maybe_accept( node, &TypeExpr::type );
    1402         )
     1501        }
    14031502
    14041503        VISIT_END( Expr, node );
     
    14111510        VISIT_START( node );
    14121511
    1413         VISIT({
     1512        if ( __visit_children() ) {
     1513                {
    14141514                        guard_symtab guard { *this };
    14151515                        maybe_accept( node, &AsmExpr::result );
     
    14171517                maybe_accept( node, &AsmExpr::constraint );
    14181518                maybe_accept( node, &AsmExpr::operand    );
    1419         )
     1519        }
    14201520
    14211521        VISIT_END( Expr, node );
     
    14281528        VISIT_START( node );
    14291529
    1430         VISIT({
     1530        if ( __visit_children() ) {
     1531                {
    14311532                        guard_symtab guard { *this };
    14321533                        maybe_accept( node, &ImplicitCopyCtorExpr::result );
    14331534                }
    14341535                maybe_accept( node, &ImplicitCopyCtorExpr::callExpr    );
    1435         )
     1536        }
    14361537
    14371538        VISIT_END( Expr, node );
     
    14441545        VISIT_START( node );
    14451546
    1446         VISIT({
     1547        if ( __visit_children() ) {
     1548                {
    14471549                        guard_symtab guard { *this };
    14481550                        maybe_accept( node, &ConstructorExpr::result );
    14491551                }
    14501552                maybe_accept( node, &ConstructorExpr::callExpr );
    1451         )
     1553        }
    14521554
    14531555        VISIT_END( Expr, node );
     
    14601562        VISIT_START( node );
    14611563
    1462         VISIT({
     1564        if ( __visit_children() ) {
     1565                {
    14631566                        guard_symtab guard { *this };
    14641567                        maybe_accept( node, &CompoundLiteralExpr::result );
    14651568                }
    14661569                maybe_accept( node, &CompoundLiteralExpr::init );
    1467         )
     1570        }
    14681571
    14691572        VISIT_END( Expr, node );
     
    14761579        VISIT_START( node );
    14771580
    1478         VISIT({
     1581        if ( __visit_children() ) {
     1582                {
    14791583                        guard_symtab guard { *this };
    14801584                        maybe_accept( node, &RangeExpr::result );
     
    14821586                maybe_accept( node, &RangeExpr::low    );
    14831587                maybe_accept( node, &RangeExpr::high   );
    1484         )
     1588        }
    14851589
    14861590        VISIT_END( Expr, node );
     
    14931597        VISIT_START( node );
    14941598
    1495         VISIT({
     1599        if ( __visit_children() ) {
     1600                {
    14961601                        guard_symtab guard { *this };
    14971602                        maybe_accept( node, &UntypedTupleExpr::result );
    14981603                }
    14991604                maybe_accept( node, &UntypedTupleExpr::exprs  );
    1500         )
     1605        }
    15011606
    15021607        VISIT_END( Expr, node );
     
    15091614        VISIT_START( node );
    15101615
    1511         VISIT({
     1616        if ( __visit_children() ) {
     1617                {
    15121618                        guard_symtab guard { *this };
    15131619                        maybe_accept( node, &TupleExpr::result );
    15141620                }
    15151621                maybe_accept( node, &TupleExpr::exprs  );
    1516         )
     1622        }
    15171623
    15181624        VISIT_END( Expr, node );
     
    15251631        VISIT_START( node );
    15261632
    1527         VISIT({
     1633        if ( __visit_children() ) {
     1634                {
    15281635                        guard_symtab guard { *this };
    15291636                        maybe_accept( node, &TupleIndexExpr::result );
    15301637                }
    15311638                maybe_accept( node, &TupleIndexExpr::tuple  );
    1532         )
     1639        }
    15331640
    15341641        VISIT_END( Expr, node );
     
    15411648        VISIT_START( node );
    15421649
    1543         VISIT({
     1650        if ( __visit_children() ) {
     1651                {
    15441652                        guard_symtab guard { *this };
    15451653                        maybe_accept( node, &TupleAssignExpr::result );
    15461654                }
    15471655                maybe_accept( node, &TupleAssignExpr::stmtExpr );
    1548         )
     1656        }
    15491657
    15501658        VISIT_END( Expr, node );
     
    15571665        VISIT_START( node );
    15581666
    1559         VISIT(// don't want statements from outer CompoundStmts to be added to this StmtExpr
     1667        if ( __visit_children() ) {
     1668                // don't want statements from outer CompoundStmts to be added to this StmtExpr
    15601669                // get the stmts that will need to be spliced in
    15611670                auto stmts_before = __pass::stmtsToAddBefore( core, 0);
     
    15741683                maybe_accept( node, &StmtExpr::returnDecls );
    15751684                maybe_accept( node, &StmtExpr::dtors       );
    1576         )
     1685        }
    15771686
    15781687        VISIT_END( Expr, node );
     
    15851694        VISIT_START( node );
    15861695
    1587         VISIT({
     1696        if ( __visit_children() ) {
     1697                {
    15881698                        guard_symtab guard { *this };
    15891699                        maybe_accept( node, &UniqueExpr::result );
    15901700                }
    15911701                maybe_accept( node, &UniqueExpr::expr   );
    1592         )
     1702        }
    15931703
    15941704        VISIT_END( Expr, node );
     
    16011711        VISIT_START( node );
    16021712
    1603         VISIT({
     1713        if ( __visit_children() ) {
     1714                {
    16041715                        guard_symtab guard { *this };
    16051716                        maybe_accept( node, &UntypedInitExpr::result );
     
    16071718                maybe_accept( node, &UntypedInitExpr::expr   );
    16081719                // not currently visiting initAlts, but this doesn't matter since this node is only used in the resolver.
    1609         )
     1720        }
    16101721
    16111722        VISIT_END( Expr, node );
     
    16181729        VISIT_START( node );
    16191730
    1620         VISIT({
     1731        if ( __visit_children() ) {
     1732                {
    16211733                        guard_symtab guard { *this };
    16221734                        maybe_accept( node, &InitExpr::result );
     
    16241736                maybe_accept( node, &InitExpr::expr   );
    16251737                maybe_accept( node, &InitExpr::designation );
    1626         )
     1738        }
    16271739
    16281740        VISIT_END( Expr, node );
     
    16351747        VISIT_START( node );
    16361748
    1637         VISIT({
     1749        if ( __visit_children() ) {
     1750                {
    16381751                        guard_symtab guard { *this };
    16391752                        maybe_accept( node, &DeletedExpr::result );
     
    16411754                maybe_accept( node, &DeletedExpr::expr );
    16421755                // don't visit deleteStmt, because it is a pointer to somewhere else in the tree.
    1643         )
     1756        }
    16441757
    16451758        VISIT_END( Expr, node );
     
    16521765        VISIT_START( node );
    16531766
    1654         VISIT({
     1767        if ( __visit_children() ) {
     1768                {
    16551769                        guard_symtab guard { *this };
    16561770                        maybe_accept( node, &DefaultArgExpr::result );
    16571771                }
    16581772                maybe_accept( node, &DefaultArgExpr::expr );
    1659         )
     1773        }
    16601774
    16611775        VISIT_END( Expr, node );
     
    16681782        VISIT_START( node );
    16691783
    1670         VISIT({
     1784        if ( __visit_children() ) {
     1785                {
    16711786                        guard_symtab guard { *this };
    16721787                        maybe_accept( node, &GenericExpr::result );
     
    16971812                        node = n;
    16981813                }
    1699         )
     1814        }
    17001815
    17011816        VISIT_END( Expr, node );
     
    17261841        VISIT_START( node );
    17271842
    1728         VISIT(
     1843        if ( __visit_children() ) {
    17291844                // xxx - should PointerType visit/mutate dimension?
    17301845                maybe_accept( node, &PointerType::base );
    1731         )
     1846        }
    17321847
    17331848        VISIT_END( Type, node );
     
    17401855        VISIT_START( node );
    17411856
    1742         VISIT(
     1857        if ( __visit_children() ) {
    17431858                maybe_accept( node, &ArrayType::dimension );
    17441859                maybe_accept( node, &ArrayType::base );
    1745         )
     1860        }
    17461861
    17471862        VISIT_END( Type, node );
     
    17541869        VISIT_START( node );
    17551870
    1756         VISIT(
     1871        if ( __visit_children() ) {
    17571872                maybe_accept( node, &ReferenceType::base );
    1758         )
     1873        }
    17591874
    17601875        VISIT_END( Type, node );
     
    17671882        VISIT_START( node );
    17681883
    1769         VISIT(
     1884        if ( __visit_children() ) {
    17701885                maybe_accept( node, &QualifiedType::parent );
    17711886                maybe_accept( node, &QualifiedType::child );
    1772         )
     1887        }
    17731888
    17741889        VISIT_END( Type, node );
     
    17811896        VISIT_START( node );
    17821897
    1783         VISIT({
     1898        if ( __visit_children() ) {
    17841899                // guard_forall_subs forall_guard { *this, node };
    17851900                // mutate_forall( node );
     
    17871902                maybe_accept( node, &FunctionType::returns );
    17881903                maybe_accept( node, &FunctionType::params  );
    1789         })
     1904        }
    17901905
    17911906        VISIT_END( Type, node );
     
    18001915        __pass::symtab::addStruct( core, 0, node->name );
    18011916
    1802         VISIT({
     1917        if ( __visit_children() ) {
    18031918                guard_symtab guard { *this };
    18041919                maybe_accept( node, &StructInstType::params );
    1805         })
     1920        }
    18061921
    18071922        VISIT_END( Type, node );
     
    18161931        __pass::symtab::addUnion( core, 0, node->name );
    18171932
    1818         VISIT({
     1933        if ( __visit_children() ) {
    18191934                guard_symtab guard { *this };
    18201935                maybe_accept( node, &UnionInstType::params );
    1821         })
     1936        }
    18221937
    18231938        VISIT_END( Type, node );
     
    18301945        VISIT_START( node );
    18311946
    1832         VISIT({
     1947        if ( __visit_children() ) {
    18331948                maybe_accept( node, &EnumInstType::params );
    1834         })
     1949        }
    18351950
    18361951        VISIT_END( Type, node );
     
    18431958        VISIT_START( node );
    18441959
    1845         VISIT({
     1960        if ( __visit_children() ) {
    18461961                maybe_accept( node, &TraitInstType::params );
    1847         })
     1962        }
    18481963
    18491964        VISIT_END( Type, node );
     
    18561971        VISIT_START( node );
    18571972
    1858         VISIT(
     1973        if ( __visit_children() ) {
    18591974                {
    18601975                        maybe_accept( node, &TypeInstType::params );
     
    18621977                // ensure that base re-bound if doing substitution
    18631978                __pass::forall::replace( core, 0, node );
    1864         )
     1979        }
    18651980
    18661981        VISIT_END( Type, node );
     
    18731988        VISIT_START( node );
    18741989
    1875         VISIT(
     1990        if ( __visit_children() ) {
    18761991                maybe_accept( node, &TupleType::types );
    18771992                maybe_accept( node, &TupleType::members );
    1878         )
     1993        }
    18791994
    18801995        VISIT_END( Type, node );
     
    18872002        VISIT_START( node );
    18882003
    1889         VISIT(
     2004        if ( __visit_children() ) {
    18902005                maybe_accept( node, &TypeofType::expr );
    1891         )
     2006        }
    18922007
    18932008        VISIT_END( Type, node );
     
    19002015        VISIT_START( node );
    19012016
    1902         VISIT(
     2017        if ( __visit_children() ) {
    19032018                maybe_accept( node, &VTableType::base );
    1904         )
     2019        }
    19052020
    19062021        VISIT_END( Type, node );
     
    19502065        VISIT_START( node );
    19512066
    1952         VISIT( maybe_accept( node, &Designation::designators ); )
     2067        if ( __visit_children() ) {
     2068                maybe_accept( node, &Designation::designators );
     2069        }
    19532070
    19542071        VISIT_END( Designation, node );
     
    19612078        VISIT_START( node );
    19622079
    1963         VISIT(
     2080        if ( __visit_children() ) {
    19642081                maybe_accept( node, &SingleInit::value );
    1965         )
     2082        }
    19662083
    19672084        VISIT_END( Init, node );
     
    19742091        VISIT_START( node );
    19752092
    1976         VISIT(
     2093        if ( __visit_children() ) {
    19772094                maybe_accept( node, &ListInit::designations );
    19782095                maybe_accept( node, &ListInit::initializers );
    1979         )
     2096        }
    19802097
    19812098        VISIT_END( Init, node );
     
    19882105        VISIT_START( node );
    19892106
    1990         VISIT(
     2107        if ( __visit_children() ) {
    19912108                maybe_accept( node, &ConstructorInit::ctor );
    19922109                maybe_accept( node, &ConstructorInit::dtor );
    19932110                maybe_accept( node, &ConstructorInit::init );
    1994         )
     2111        }
    19952112
    19962113        VISIT_END( Init, node );
     
    20032120        VISIT_START( node );
    20042121
    2005         VISIT(
     2122        if ( __visit_children() ) {
    20062123                maybe_accept( node, &Attribute::params );
    2007         )
     2124        }
    20082125
    20092126        VISIT_END( Attribute, node );
     
    20162133        VISIT_START( node );
    20172134
    2018         VISIT(
     2135        if ( __visit_children() ) {
    20192136                {
    20202137                        bool mutated = false;
     
    20322149                        }
    20332150                }
    2034         )
     2151        }
    20352152
    20362153        VISIT_END( TypeSubstitution, node );
     
    20382155
    20392156#undef VISIT_START
    2040 #undef VISIT
    20412157#undef VISIT_END
  • src/AST/Print.cpp

    r97c215f rf5a51db  
    333333                print( node->funcSpec );
    334334
    335                 if ( node->type ) {
     335
     336
     337                if ( node->type && node->isTypeFixed ) {
    336338                        node->type->accept( *this );
    337339                } else {
    338                         os << "untyped entity";
     340                        if (!node->type_params.empty()) {
     341                                os << "forall" << endl;
     342                                ++indent;
     343                                printAll(node->type_params);
     344                                os << indent;
     345                                --indent;
     346
     347                                if (!node->assertions.empty()) {
     348                                        os << "with assertions" << endl;
     349                                        ++indent;
     350                                        printAll(node->assertions);
     351                                        os << indent;
     352                                        --indent;
     353                                }
     354                        }
     355
     356                        os << "function" << endl;
     357                        if ( ! node->params.empty() ) {
     358                                os << indent << "... with parameters" << endl;
     359                                ++indent;
     360                                printAll( node->params );
     361                                if ( node->type->isVarArgs ) {
     362                                        os << indent << "and a variable number of other arguments" << endl;
     363                                }
     364                                --indent;
     365                        } else if ( node->type->isVarArgs ) {
     366                                os << indent+1 << "accepting unspecified arguments" << endl;
     367                        }
     368
     369                        os << indent << "... returning";
     370                        if ( node->returns.empty() ) {
     371                                os << " nothing" << endl;
     372                        } else {
     373                                os << endl;
     374                                ++indent;
     375                                printAll( node->returns );
     376                                --indent;
     377                        }
    339378                }
    340379
     
    472511                ++indent;
    473512                os << indent;
    474                 safe_print( node->thenPart );
    475                 --indent;
    476 
    477                 if ( node->elsePart != 0 ) {
     513                safe_print( node->then );
     514                --indent;
     515
     516                if ( node->else_ != 0 ) {
    478517                        os << indent << "... else:" << endl;
    479518                        ++indent;
    480519                        os << indent;
    481                         node->elsePart->accept( *this );
     520                        node->else_->accept( *this );
    482521                        --indent;
    483522                } // if
     
    485524        }
    486525
    487         virtual const ast::Stmt * visit( const ast::WhileStmt * node ) override final {
     526        virtual const ast::Stmt * visit( const ast::WhileDoStmt * node ) override final {
    488527                if ( node->isDoWhile ) { os << "Do-"; }
    489528                os << "While on condition:" << endl;
  • src/AST/Stmt.cpp

    r97c215f rf5a51db  
    99// Author           : Aaron B. Moss
    1010// Created On       : Wed May  8 13:00:00 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Wed May 15 15:53:00 2019
    13 // Update Count     : 2
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 19:01:20 2022
     13// Update Count     : 3
    1414//
    1515
     
    5656
    5757// --- BranchStmt
    58 BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, std::vector<Label>&& labels )
    59 : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) {
     58BranchStmt::BranchStmt( const CodeLocation& loc, Kind kind, Label target, const std::vector<Label>&& labels )
     59                : Stmt(loc, std::move(labels)), originalTarget(target), target(target), kind(kind) {
    6060        // Make sure a syntax error hasn't slipped through.
    6161        assert( Goto != kind || !target.empty() );
  • src/AST/Stmt.hpp

    r97c215f rf5a51db  
    99// Author           : Aaron B. Moss
    1010// Created On       : Wed May  8 13:00:00 2019
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Fri May 17 12:45:00 2019
    13 // Update Count     : 5
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Wed Feb  2 20:06:41 2022
     13// Update Count     : 34
    1414//
    1515
     
    1717
    1818#include <list>
    19 #include <utility>                // for move
     19#include <utility>                                                                              // for move
    2020#include <vector>
    2121
    2222#include "Label.hpp"
    23 #include "Node.hpp"               // for node, ptr
     23#include "Node.hpp"                                                                             // for node, ptr
    2424#include "ParseNode.hpp"
    2525#include "Visitor.hpp"
     
    2727
    2828// Must be included in *all* AST classes; should be #undef'd at the end of the file
    29 #define MUTATE_FRIEND \
     29#define MUTATE_FRIEND                                                                                                   \
    3030    template<typename node_t> friend node_t * mutate(const node_t * node); \
    3131        template<typename node_t> friend node_t * shallowCopy(const node_t * node);
    3232
    3333namespace ast {
    34 
    3534class Expr;
    3635
    37 /// Base statement node
     36// Base statement node
    3837class Stmt : public ParseNode {
    39 public:
     38  public:
    4039        std::vector<Label> labels;
    4140
    42         Stmt( const CodeLocation & loc, std::vector<Label> && labels = {} )
    43         : ParseNode(loc), labels(std::move(labels)) {}
    44 
    45         Stmt(const Stmt& o) : ParseNode(o), labels(o.labels) {}
     41        Stmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
     42                : ParseNode(loc), labels(std::move(labels)) {}
     43
     44        Stmt(const Stmt & o) : ParseNode(o), labels(o.labels) {}
    4645
    4746        const Stmt * accept( Visitor & v ) const override = 0;
    48 private:
     47  private:
    4948        Stmt * clone() const override = 0;
    5049        MUTATE_FRIEND
    5150};
    5251
    53 /// Compound statement `{ ... }`
     52// Compound statement: { ... }
    5453class CompoundStmt final : public Stmt {
    55 public:
     54  public:
    5655        std::list<ptr<Stmt>> kids;
    5756
    58         CompoundStmt(const CodeLocation & loc, std::list<ptr<Stmt>> && ks = {},
    59                 std::vector<Label>&& labels = {} )
    60         : Stmt(loc, std::move(labels)), kids(std::move(ks)) {}
    61 
    62         CompoundStmt( const CompoundStmt& o );
    63         CompoundStmt( CompoundStmt&& o ) = default;
     57        CompoundStmt(const CodeLocation & loc, const std::list<ptr<Stmt>> && ks = {}, const std::vector<Label> && labels = {} )
     58                : Stmt(loc, std::move(labels)), kids(std::move(ks)) {}
     59
     60        CompoundStmt( const CompoundStmt & o );
     61        CompoundStmt( CompoundStmt && o ) = default;
    6462
    6563        void push_back( const Stmt * s ) { kids.emplace_back( s ); }
     
    6765
    6866        const CompoundStmt * accept( Visitor & v ) const override { return v.visit( this ); }
    69 private:
     67  private:
    7068        CompoundStmt * clone() const override { return new CompoundStmt{ *this }; }
    7169        MUTATE_FRIEND
    7270};
    7371
    74 /// Empty statment `;`
     72// Empty statment: ;
    7573class NullStmt final : public Stmt {
    76 public:
    77         NullStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )
    78         : Stmt(loc, std::move(labels)) {}
     74  public:
     75        NullStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
     76                : Stmt(loc, std::move(labels)) {}
    7977
    8078        const NullStmt * accept( Visitor & v ) const override { return v.visit( this ); }
    81 private:
     79  private:
    8280        NullStmt * clone() const override { return new NullStmt{ *this }; }
    8381        MUTATE_FRIEND
    8482};
    8583
    86 /// Expression wrapped by statement
     84// Expression wrapped by statement
    8785class ExprStmt final : public Stmt {
    88 public:
     86  public:
    8987        ptr<Expr> expr;
    9088
    91         ExprStmt( const CodeLocation& loc, const Expr* e, std::vector<Label>&& labels = {} )
    92         : Stmt(loc, std::move(labels)), expr(e) {}
    93 
    94         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    95 private:
     89        ExprStmt( const CodeLocation & loc, const Expr* e, const std::vector<Label> && labels = {} )
     90                : Stmt(loc, std::move(labels)), expr(e) {}
     91
     92        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     93  private:
    9694        ExprStmt * clone() const override { return new ExprStmt{ *this }; }
    9795        MUTATE_FRIEND
    9896};
    9997
    100 /// Assembly statement `asm ... ( "..." : ... )`
     98// Assembly statement: asm ... ( "..." : ... )
    10199class AsmStmt final : public Stmt {
    102 public:
     100  public:
    103101        bool isVolatile;
    104102        ptr<Expr> instruction;
     
    108106
    109107        AsmStmt( const CodeLocation & loc, bool isVolatile, const Expr * instruction,
    110                 std::vector<ptr<Expr>> && output, std::vector<ptr<Expr>> && input,
    111                 std::vector<ptr<ConstantExpr>> && clobber, std::vector<Label> && gotoLabels,
    112                 std::vector<Label> && labels = {})
    113         : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction),
    114           output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)),
    115           gotoLabels(std::move(gotoLabels)) {}
    116 
    117         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    118 private:
     108                         const std::vector<ptr<Expr>> && output, const std::vector<ptr<Expr>> && input,
     109                         const std::vector<ptr<ConstantExpr>> && clobber, const std::vector<Label> && gotoLabels,
     110                         const std::vector<Label> && labels = {})
     111                : Stmt(loc, std::move(labels)), isVolatile(isVolatile), instruction(instruction),
     112                  output(std::move(output)), input(std::move(input)), clobber(std::move(clobber)),
     113                  gotoLabels(std::move(gotoLabels)) {}
     114
     115        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     116  private:
    119117        AsmStmt * clone() const override { return new AsmStmt{ *this }; }
    120118        MUTATE_FRIEND
    121119};
    122120
    123 /// C-preprocessor directive `#...`
     121// C-preprocessor directive: #...
    124122class DirectiveStmt final : public Stmt {
    125 public:
     123  public:
    126124        std::string directive;
    127125
    128126        DirectiveStmt( const CodeLocation & loc, const std::string & directive,
    129                 std::vector<Label> && labels = {} )
    130         : Stmt(loc, std::move(labels)), directive(directive) {}
    131 
    132         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    133 private:
     127                                   std::vector<Label> && labels = {} )
     128                : Stmt(loc, std::move(labels)), directive(directive) {}
     129
     130        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     131  private:
    134132        DirectiveStmt * clone() const override { return new DirectiveStmt{ *this }; }
    135133        MUTATE_FRIEND
    136134};
    137135
    138 /// If conditional statement `if (...) ... else ...`
     136// If statement: if (...) ... else ...
    139137class IfStmt final : public Stmt {
    140 public:
    141         ptr<Expr> cond;
    142         ptr<Stmt> thenPart;
    143         ptr<Stmt> elsePart;
     138  public:
     139        ptr<Expr> cond;
     140        ptr<Stmt> then;
     141        ptr<Stmt> else_;
    144142        std::vector<ptr<Stmt>> inits;
    145143
    146         IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * thenPart,
    147                 const Stmt * elsePart = nullptr, std::vector<ptr<Stmt>> && inits = {},
    148                 std::vector<Label> && labels = {} )
    149         : Stmt(loc, std::move(labels)), cond(cond), thenPart(thenPart), elsePart(elsePart),
    150           inits(std::move(inits)) {}
    151 
    152         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    153 private:
     144        IfStmt( const CodeLocation & loc, const Expr * cond, const Stmt * then,
     145                        const Stmt * else_ = nullptr, const std::vector<ptr<Stmt>> && inits = {},
     146                        const std::vector<Label> && labels = {} )
     147                : Stmt(loc, std::move(labels)), cond(cond), then(then), else_(else_),
     148                  inits(std::move(inits)) {}
     149
     150        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     151  private:
    154152        IfStmt * clone() const override { return new IfStmt{ *this }; }
    155153        MUTATE_FRIEND
    156154};
    157155
    158 /// Switch or choose conditional statement `switch (...) { ... }`
     156// Switch or choose statement: switch (...) { ... }
    159157class SwitchStmt final : public Stmt {
    160 public:
     158  public:
    161159        ptr<Expr> cond;
    162160        std::vector<ptr<Stmt>> stmts;
    163161
    164         SwitchStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,
    165                 std::vector<Label> && labels = {} )
    166         : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
    167 
    168         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    169 private:
     162        SwitchStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
     163                                const std::vector<Label> && labels = {} )
     164                : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
     165
     166        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     167  private:
    170168        SwitchStmt * clone() const override { return new SwitchStmt{ *this }; }
    171169        MUTATE_FRIEND
    172170};
    173171
    174 /// Case label `case ...:` `default:`
     172// Case label: case ...: or default:
    175173class CaseStmt final : public Stmt {
    176 public:
    177         /// Null for the default label.
     174  public:
     175        // Null for the default label.
    178176        ptr<Expr> cond;
    179177        std::vector<ptr<Stmt>> stmts;
    180178
    181         CaseStmt( const CodeLocation & loc, const Expr * cond, std::vector<ptr<Stmt>> && stmts,
    182                 std::vector<Label> && labels = {} )
    183         : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
     179        CaseStmt( const CodeLocation & loc, const Expr * cond, const std::vector<ptr<Stmt>> && stmts,
     180                          const std::vector<Label> && labels = {} )
     181                : Stmt(loc, std::move(labels)), cond(cond), stmts(std::move(stmts)) {}
    184182
    185183        bool isDefault() const { return !cond; }
    186184
    187185        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    188 private:
     186  private:
    189187        CaseStmt * clone() const override { return new CaseStmt{ *this }; }
    190188        MUTATE_FRIEND
    191189};
    192190
    193 /// While loop `while (...) ...` `do ... while (...);
    194 class WhileStmt final : public Stmt {
    195 public:
     191// While loop: while (...) ... else ... or do ... while (...) else ...;
     192class WhileDoStmt final : public Stmt {
     193  public:
    196194        ptr<Expr> cond;
    197195        ptr<Stmt> body;
     196        ptr<Stmt> else_;
    198197        std::vector<ptr<Stmt>> inits;
    199198        bool isDoWhile;
    200199
    201         WhileStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body,
    202                 std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, std::vector<Label> && labels = {} )
    203         : Stmt(loc, std::move(labels)), cond(cond), body(body), inits(std::move(inits)),
    204           isDoWhile(isDoWhile) {}
    205 
    206         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    207 private:
    208         WhileStmt * clone() const override { return new WhileStmt{ *this }; }
    209         MUTATE_FRIEND
    210 };
    211 
    212 /// For loop `for (... ; ... ; ...) ...`
     200        WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body,
     201                                 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} )
     202                : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(nullptr), inits(std::move(inits)), isDoWhile(isDoWhile) {}
     203
     204        WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, const Stmt * else_,
     205                                 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} )
     206                : Stmt(loc, std::move(labels)), cond(cond), body(body), else_(else_), inits(std::move(inits)), isDoWhile(isDoWhile) {}
     207
     208        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     209  private:
     210        WhileDoStmt * clone() const override { return new WhileDoStmt{ *this }; }
     211        MUTATE_FRIEND
     212};
     213
     214// For loop: for (... ; ... ; ...) ... else ...
    213215class ForStmt final : public Stmt {
    214 public:
     216  public:
    215217        std::vector<ptr<Stmt>> inits;
    216218        ptr<Expr> cond;
    217219        ptr<Expr> inc;
    218220        ptr<Stmt> body;
    219 
    220         ForStmt( const CodeLocation & loc, std::vector<ptr<Stmt>> && inits, const Expr * cond,
    221                 const Expr * inc, const Stmt * body, std::vector<Label> && labels = {} )
    222         : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc),
    223           body(body) {}
    224 
    225         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    226 private:
     221        ptr<Stmt> else_;
     222
     223        ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond,
     224                         const Expr * inc, const Stmt * body, const std::vector<Label> && label = {} )
     225                : Stmt(loc, std::move(label)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(nullptr) {}
     226
     227        ForStmt( const CodeLocation & loc, const std::vector<ptr<Stmt>> && inits, const Expr * cond,
     228                         const Expr * inc, const Stmt * body, const Stmt * else_, const std::vector<Label> && labels = {} )
     229                : Stmt(loc, std::move(labels)), inits(std::move(inits)), cond(cond), inc(inc), body(body), else_(else_) {}
     230
     231        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     232  private:
    227233        ForStmt * clone() const override { return new ForStmt{ *this }; }
    228234        MUTATE_FRIEND
    229235};
    230236
    231 /// Branch control flow statement `goto ...` `break` `continue` `fallthru`
     237// Branch control flow statement: goto ... or break or continue or fallthru
    232238class BranchStmt final : public Stmt {
    233 public:
     239  public:
    234240        enum Kind { Goto, Break, Continue, FallThrough, FallThroughDefault };
    235241        static constexpr size_t kindEnd = 1 + (size_t)FallThroughDefault;
     
    240246        Kind kind;
    241247
    242         BranchStmt( const CodeLocation & loc, Kind kind, Label target,
    243                 std::vector<Label> && labels = {} );
    244         BranchStmt( const CodeLocation & loc, const Expr * computedTarget,
    245                 std::vector<Label> && labels = {} )
    246         : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc),
    247           computedTarget(computedTarget), kind(Goto) {}
     248        BranchStmt( const CodeLocation & loc, Kind kind, Label target, const std::vector<Label> && labels = {} );
     249        BranchStmt( const CodeLocation & loc, const Expr * computedTarget, const std::vector<Label> && labels = {} )
     250                : Stmt(loc, std::move(labels)), originalTarget(loc), target(loc), computedTarget(computedTarget), kind(Goto) {}
    248251
    249252        const char * kindName() const { return kindNames[kind]; }
    250253
    251254        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    252 private:
     255  private:
    253256        BranchStmt * clone() const override { return new BranchStmt{ *this }; }
    254257        MUTATE_FRIEND
     
    257260};
    258261
    259 /// Return statement `return ...`
     262// Return statement: return ...
    260263class ReturnStmt final : public Stmt {
    261 public:
     264  public:
    262265        ptr<Expr> expr;
    263266
    264         ReturnStmt( const CodeLocation & loc, const Expr * expr, std::vector<Label> && labels = {} )
    265         : Stmt(loc, std::move(labels)), expr(expr) {}
    266 
    267         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    268 private:
     267        ReturnStmt( const CodeLocation & loc, const Expr * expr, const std::vector<Label> && labels = {} )
     268                : Stmt(loc, std::move(labels)), expr(expr) {}
     269
     270        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     271  private:
    269272        ReturnStmt * clone() const override { return new ReturnStmt{ *this }; }
    270273        MUTATE_FRIEND
    271274};
    272275
    273 /// Kind of exception
     276// Kind of exception
    274277enum ExceptionKind { Terminate, Resume };
    275278
    276 /// Throw statement `throw ...`
     279// Throw statement: throw ...
    277280class ThrowStmt final : public Stmt {
    278 public:
     281  public:
    279282        ptr<Expr> expr;
    280283        ptr<Expr> target;
    281284        ExceptionKind kind;
    282285
    283         ThrowStmt(
    284                 const CodeLocation & loc, ExceptionKind kind, const Expr * expr, const Expr * target,
    285                 std::vector<Label> && labels = {} )
    286         : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {}
    287 
    288         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    289 private:
     286        ThrowStmt( const CodeLocation & loc, ExceptionKind kind, const Expr * expr,
     287                           const Expr * target, const std::vector<Label> && labels = {} )
     288                : Stmt(loc, std::move(labels)), expr(expr), target(target), kind(kind) {}
     289
     290        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     291  private:
    290292        ThrowStmt * clone() const override { return new ThrowStmt{ *this }; }
    291293        MUTATE_FRIEND
    292294};
    293295
    294 /// Try statement `try { ... } ...`
     296// Try statement: try { ... } ...
    295297class TryStmt final : public Stmt {
    296 public:
     298  public:
    297299        ptr<CompoundStmt> body;
    298300        std::vector<ptr<CatchStmt>> handlers;
    299301        ptr<FinallyStmt> finally;
    300302
    301         TryStmt(
    302                 const CodeLocation & loc, const CompoundStmt * body,
    303                 std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally,
    304                 std::vector<Label> && labels = {} )
    305         : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {}
    306 
    307         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    308 private:
     303        TryStmt( const CodeLocation & loc, const CompoundStmt * body,
     304                         const std::vector<ptr<CatchStmt>> && handlers, const FinallyStmt * finally,
     305                         const std::vector<Label> && labels = {} )
     306                : Stmt(loc, std::move(labels)), body(body), handlers(std::move(handlers)), finally(finally) {}
     307
     308        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     309  private:
    309310        TryStmt * clone() const override { return new TryStmt{ *this }; }
    310311        MUTATE_FRIEND
    311312};
    312313
    313 /// Catch clause of try statement
     314// Catch clause of try statement
    314315class CatchStmt final : public Stmt {
    315 public:
     316  public:
    316317        ptr<Decl> decl;
    317318        ptr<Expr> cond;
     
    319320        ExceptionKind kind;
    320321
    321         CatchStmt(
    322                 const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
    323                 const Stmt * body, std::vector<Label> && labels = {} )
    324         : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {}
    325 
    326         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    327 private:
     322        CatchStmt( const CodeLocation & loc, ExceptionKind kind, const Decl * decl, const Expr * cond,
     323                           const Stmt * body, const std::vector<Label> && labels = {} )
     324                : Stmt(loc, std::move(labels)), decl(decl), cond(cond), body(body), kind(kind) {}
     325
     326        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     327  private:
    328328        CatchStmt * clone() const override { return new CatchStmt{ *this }; }
    329329        MUTATE_FRIEND
    330330};
    331331
    332 /// Finally clause of try statement
     332// Finally clause of try statement
    333333class FinallyStmt final : public Stmt {
    334 public:
     334  public:
    335335        ptr<CompoundStmt> body;
    336336
    337337        FinallyStmt( const CodeLocation & loc, const CompoundStmt * body,
    338                 std::vector<Label> && labels = {} )
    339         : Stmt(loc, std::move(labels)), body(body) {}
    340 
    341         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    342 private:
     338                                 std::vector<Label> && labels = {} )
     339                : Stmt(loc, std::move(labels)), body(body) {}
     340
     341        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     342  private:
    343343        FinallyStmt * clone() const override { return new FinallyStmt{ *this }; }
    344344        MUTATE_FRIEND
    345345};
    346346
    347 /// Suspend statement
     347// Suspend statement
    348348class SuspendStmt final : public Stmt {
    349 public:
     349  public:
    350350        ptr<CompoundStmt> then;
    351351        enum Type { None, Coroutine, Generator } type = None;
    352352
    353         SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, std::vector<Label> && labels = {} )
    354         : Stmt(loc, std::move(labels)), then(then), type(type) {}
    355 
    356         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    357 private:
     353        SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, const std::vector<Label> && labels = {} )
     354                : Stmt(loc, std::move(labels)), then(then), type(type) {}
     355
     356        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     357  private:
    358358        SuspendStmt * clone() const override { return new SuspendStmt{ *this }; }
    359359        MUTATE_FRIEND
    360360};
    361361
    362 /// Wait for concurrency statement `when (...) waitfor (... , ...) ... timeout(...) ... else ...`
     362// Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ...
    363363class WaitForStmt final : public Stmt {
    364 public:
     364  public:
    365365        struct Target {
    366366                ptr<Expr> func;
     
    389389        OrElse orElse;
    390390
    391         WaitForStmt( const CodeLocation & loc, std::vector<Label> && labels = {} )
    392         : Stmt(loc, std::move(labels)) {}
    393 
    394         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    395 private:
     391        WaitForStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
     392                : Stmt(loc, std::move(labels)) {}
     393
     394        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     395  private:
    396396        WaitForStmt * clone() const override { return new WaitForStmt{ *this }; }
    397397        MUTATE_FRIEND
    398398};
    399399
    400 /// Any declaration in a (compound) statement.
     400// Any declaration in a (compound) statement.
    401401class DeclStmt final : public Stmt {
    402 public:
     402  public:
    403403        ptr<Decl> decl;
    404404
    405         DeclStmt( const CodeLocation & loc, const Decl * decl, std::vector<Label> && labels = {} )
    406         : Stmt(loc, std::move(labels)), decl(decl) {}
    407 
    408         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    409 private:
     405        DeclStmt( const CodeLocation & loc, const Decl * decl, const std::vector<Label> && labels = {} )
     406                : Stmt(loc, std::move(labels)), decl(decl) {}
     407
     408        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     409  private:
    410410        DeclStmt * clone() const override { return new DeclStmt{ *this }; }
    411411        MUTATE_FRIEND
    412412};
    413413
    414 /// Represents an implicit application of a constructor or destructor.
     414// Represents an implicit application of a constructor or destructor.
    415415class ImplicitCtorDtorStmt final : public Stmt {
    416 public:
     416  public:
    417417        ptr<Stmt> callStmt;
    418418
    419419        ImplicitCtorDtorStmt( const CodeLocation & loc, const Stmt * callStmt,
    420                 std::vector<Label> && labels = {} )
    421         : Stmt(loc, std::move(labels)), callStmt(callStmt) {}
    422 
    423         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    424 private:
     420                                                  std::vector<Label> && labels = {} )
     421                : Stmt(loc, std::move(labels)), callStmt(callStmt) {}
     422
     423        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     424  private:
    425425        ImplicitCtorDtorStmt * clone() const override { return new ImplicitCtorDtorStmt{ *this }; }
    426426        MUTATE_FRIEND
    427427};
    428428
    429 /// Mutex Statement
     429// Mutex Statement
    430430class MutexStmt final : public Stmt {
    431 public:
     431  public:
    432432        ptr<Stmt> stmt;
    433433        std::vector<ptr<Expr>> mutexObjs;
    434434
    435435        MutexStmt( const CodeLocation & loc, const Stmt * stmt,
    436                 std::vector<ptr<Expr>> && mutexes, std::vector<Label> && labels = {} )
    437         : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {}
    438 
    439         const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
    440 private:
     436                           const std::vector<ptr<Expr>> && mutexes, const std::vector<Label> && labels = {} )
     437                : Stmt(loc, std::move(labels)), stmt(stmt), mutexObjs(std::move(mutexes)) {}
     438
     439        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     440  private:
    441441        MutexStmt * clone() const override { return new MutexStmt{ *this }; }
    442442        MUTATE_FRIEND
    443443};
    444 
    445 }
     444} // namespace ast
    446445
    447446#undef MUTATE_FRIEND
    448447
    449448// Local Variables: //
    450 // tab-width: 4 //
    451449// mode: c++ //
    452 // compile-command: "make install" //
    453450// End: //
  • src/AST/Visitor.hpp

    r97c215f rf5a51db  
    1010// Created On       : Thr May 9 15:28:00 2019
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 18:25:07 2021
    13 // Update Count     : 1
     12// Last Modified On : Tue Feb  1 09:09:34 2022
     13// Update Count     : 2
    1414//
    1515
     
    3838    virtual const ast::Stmt *             visit( const ast::DirectiveStmt        * ) = 0;
    3939    virtual const ast::Stmt *             visit( const ast::IfStmt               * ) = 0;
    40     virtual const ast::Stmt *             visit( const ast::WhileStmt            * ) = 0;
     40    virtual const ast::Stmt *             visit( const ast::WhileDoStmt          * ) = 0;
    4141    virtual const ast::Stmt *             visit( const ast::ForStmt              * ) = 0;
    4242    virtual const ast::Stmt *             visit( const ast::SwitchStmt           * ) = 0;
  • src/CodeGen/CodeGenerator.cc

    r97c215f rf5a51db  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 19:00:42 2021
    13 // Update Count     : 536
     12// Last Modified On : Wed Feb  2 20:30:30 2022
     13// Update Count     : 541
    1414//
    1515#include "CodeGenerator.h"
     
    4242        bool wantSpacing( Statement * stmt) {
    4343                return dynamic_cast< IfStmt * >( stmt ) || dynamic_cast< CompoundStmt * >( stmt ) ||
    44                         dynamic_cast< WhileStmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt );
     44                        dynamic_cast< WhileDoStmt * >( stmt ) || dynamic_cast< ForStmt * >( stmt ) || dynamic_cast< SwitchStmt *>( stmt );
    4545        }
    4646
     
    955955                output << " ) ";
    956956
    957                 ifStmt->get_thenPart()->accept( *visitor );
    958 
    959                 if ( ifStmt->get_elsePart() != 0) {
     957                ifStmt->get_then()->accept( *visitor );
     958
     959                if ( ifStmt->get_else() != 0) {
    960960                        output << " else ";
    961                         ifStmt->get_elsePart()->accept( *visitor );
     961                        ifStmt->get_else()->accept( *visitor );
    962962                } // if
    963963        }
     
    10201020                        output << "fallthru";
    10211021                        break;
     1022                  default: ;                                                                    // prevent warning
    10221023                } // switch
    10231024                // print branch target for labelled break/continue/fallthru in debug mode
     
    11251126        }
    11261127
    1127         void CodeGenerator::postvisit( WhileStmt * whileStmt ) {
    1128                 if ( whileStmt->get_isDoWhile() ) {
     1128        void CodeGenerator::postvisit( WhileDoStmt * whileDoStmt ) {
     1129                if ( whileDoStmt->get_isDoWhile() ) {
    11291130                        output << "do";
    11301131                } else {
    11311132                        output << "while (";
    1132                         whileStmt->get_condition()->accept( *visitor );
     1133                        whileDoStmt->get_condition()->accept( *visitor );
    11331134                        output << ")";
    11341135                } // if
    11351136                output << " ";
    11361137
    1137                 output << CodeGenerator::printLabels( whileStmt->get_body()->get_labels() );
    1138                 whileStmt->get_body()->accept( *visitor );
     1138                output << CodeGenerator::printLabels( whileDoStmt->get_body()->get_labels() );
     1139                whileDoStmt->get_body()->accept( *visitor );
    11391140
    11401141                output << indent;
    11411142
    1142                 if ( whileStmt->get_isDoWhile() ) {
     1143                if ( whileDoStmt->get_isDoWhile() ) {
    11431144                        output << " while (";
    1144                         whileStmt->get_condition()->accept( *visitor );
     1145                        whileDoStmt->get_condition()->accept( *visitor );
    11451146                        output << ");";
    11461147                } // if
  • src/CodeGen/CodeGenerator.h

    r97c215f rf5a51db  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 18:35:38 2021
    13 // Update Count     : 63
     12// Last Modified On : Tue Feb  1 09:23:21 2022
     13// Update Count     : 64
    1414//
    1515
     
    116116                void postvisit( WaitForStmt * );
    117117                void postvisit( WithStmt * );
    118                 void postvisit( WhileStmt * );
     118                void postvisit( WhileDoStmt * );
    119119                void postvisit( ForStmt * );
    120120                void postvisit( NullStmt * );
  • src/Common/CodeLocationTools.cpp

    r97c215f rf5a51db  
    1010// Created On       : Fri Dec  4 15:42:00 2020
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 12 18:35:37 2021
    13 // Update Count     : 2
     12// Last Modified On : Tue Feb  1 09:14:39 2022
     13// Update Count     : 3
    1414//
    1515
     
    109109    macro(DirectiveStmt, Stmt) \
    110110    macro(IfStmt, Stmt) \
    111     macro(WhileStmt, Stmt) \
     111    macro(WhileDoStmt, Stmt) \
    112112    macro(ForStmt, Stmt) \
    113113    macro(SwitchStmt, Stmt) \
  • src/Common/PassVisitor.h

    r97c215f rf5a51db  
    9292        virtual void visit( IfStmt * ifStmt ) override final;
    9393        virtual void visit( const IfStmt * ifStmt ) override final;
    94         virtual void visit( WhileStmt * whileStmt ) override final;
    95         virtual void visit( const WhileStmt * whileStmt ) override final;
     94        virtual void visit( WhileDoStmt * whileDoStmt ) override final;
     95        virtual void visit( const WhileDoStmt * whileDoStmt ) override final;
    9696        virtual void visit( ForStmt * forStmt ) override final;
    9797        virtual void visit( const ForStmt * forStmt ) override final;
     
    277277        virtual Statement * mutate( DirectiveStmt * dirStmt ) override final;
    278278        virtual Statement * mutate( IfStmt * ifStmt ) override final;
    279         virtual Statement * mutate( WhileStmt * whileStmt ) override final;
     279        virtual Statement * mutate( WhileDoStmt * whileDoStmt ) override final;
    280280        virtual Statement * mutate( ForStmt * forStmt ) override final;
    281281        virtual Statement * mutate( SwitchStmt * switchStmt ) override final;
  • src/Common/PassVisitor.impl.h

    r97c215f rf5a51db  
    11891189                maybeAccept_impl( node->initialization, *this );
    11901190                visitExpression ( node->condition );
    1191                 node->thenPart = visitStatement( node->thenPart );
    1192                 node->elsePart = visitStatement( node->elsePart );
     1191                node->then = visitStatement( node->then );
     1192                node->else_ = visitStatement( node->else_ );
    11931193        }
    11941194        VISIT_END( node );
     
    12031203                maybeAccept_impl( node->initialization, *this );
    12041204                visitExpression ( node->condition );
    1205                 visitStatement  ( node->thenPart );
    1206                 visitStatement  ( node->elsePart );
     1205                visitStatement  ( node->then );
     1206                visitStatement  ( node->else_ );
    12071207        }
    12081208        VISIT_END( node );
     
    12171217                maybeMutate_impl( node->initialization, *this );
    12181218                node->condition = mutateExpression( node->condition );
    1219                 node->thenPart  = mutateStatement ( node->thenPart  );
    1220                 node->elsePart  = mutateStatement ( node->elsePart  );
     1219                node->then  = mutateStatement ( node->then  );
     1220                node->else_  = mutateStatement ( node->else_  );
    12211221        }
    12221222        MUTATE_END( Statement, node );
     
    12241224
    12251225//--------------------------------------------------------------------------
    1226 // WhileStmt
    1227 template< typename pass_type >
    1228 void PassVisitor< pass_type >::visit( WhileStmt * node ) {
     1226// WhileDoStmt
     1227template< typename pass_type >
     1228void PassVisitor< pass_type >::visit( WhileDoStmt * node ) {
    12291229        VISIT_START( node );
    12301230
     
    12411241
    12421242template< typename pass_type >
    1243 void PassVisitor< pass_type >::visit( const WhileStmt * node ) {
     1243void PassVisitor< pass_type >::visit( const WhileDoStmt * node ) {
    12441244        VISIT_START( node );
    12451245
     
    12561256
    12571257template< typename pass_type >
    1258 Statement * PassVisitor< pass_type >::mutate( WhileStmt * node ) {
     1258Statement * PassVisitor< pass_type >::mutate( WhileDoStmt * node ) {
    12591259        MUTATE_START( node );
    12601260
  • src/Common/utility.h

    r97c215f rf5a51db  
    371371}
    372372
     373template< typename T >
     374struct enumerate_t {
     375        template<typename val_t>
     376        struct value_t {
     377                val_t & val;
     378                size_t idx;
     379        };
     380
     381        template< typename iter_t, typename val_t >
     382        struct iterator_t {
     383                iter_t it;
     384                size_t idx;
     385
     386                iterator_t( iter_t _it, size_t _idx ) : it(_it), idx(_idx) {}
     387
     388                value_t<val_t> operator*() const { return value_t<val_t>{ *it, idx }; }
     389
     390                bool operator==(const iterator_t & o) const { return o.it == it; }
     391                bool operator!=(const iterator_t & o) const { return o.it != it; }
     392
     393                iterator_t & operator++() {
     394                        it++;
     395                        idx++;
     396                        return *this;
     397                }
     398
     399                using difference_type   = typename std::iterator_traits< iter_t >::difference_type;
     400                using value_type        = value_t<val_t>;
     401                using pointer           = value_t<val_t> *;
     402                using reference         = value_t<val_t> &;
     403                using iterator_category = std::forward_iterator_tag;
     404        };
     405
     406        T & ref;
     407
     408        using iterator = iterator_t< typename T::iterator, typename T::value_type >;
     409        using const_iterator = iterator_t< typename T::const_iterator, const typename T::value_type >;
     410
     411        iterator begin() { return iterator( ref.begin(), 0 ); }
     412        iterator end()   { return iterator( ref.end(), ref.size() ); }
     413
     414        const_iterator begin() const { return const_iterator( ref.cbegin(), 0 ); }
     415        const_iterator end()   const { return const_iterator( ref.cend(), ref.size() ); }
     416
     417        const_iterator cbegin() const { return const_iterator( ref.cbegin(), 0 ); }
     418        const_iterator cend()   const { return const_iterator( ref.cend(), ref.size() ); }
     419};
     420
     421template< typename T >
     422enumerate_t<T> enumerate( T & ref ) {
     423        return enumerate_t< T >{ ref };
     424}
     425
     426template< typename T >
     427const enumerate_t< const T > enumerate( const T & ref ) {
     428        return enumerate_t< const T >{ ref };
     429}
     430
    373431template< typename OutType, typename Range, typename Functor >
    374432OutType map_range( const Range& range, Functor&& functor ) {
  • src/ControlStruct/ExceptTranslate.h

    r97c215f rf5a51db  
    3131
    3232        void translateTries( std::list< Declaration *> & translationUnit );
     33        void translateTries( ast::TranslationUnit & transUnit );
    3334        /* Replaces all try blocks (and their many clauses) with function definitions and calls.
    3435         * This uses the exception built-ins to produce typed output and should take place after
  • src/ControlStruct/ExceptTranslateNew.cpp

    r97c215f rf5a51db  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Nov  8 11:53:00 2021
    11 // Last Modified By : Andrew Beach
    12 // Last Modified On : Mon Nov  8 16:50:00 2021
    13 // Update Count     : 0
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Mon Jan 31 18:49:58 2022
     13// Update Count     : 1
    1414//
    1515
     
    2020#include "AST/Stmt.hpp"
    2121#include "AST/TranslationUnit.hpp"
     22#include "AST/DeclReplacer.hpp"
    2223
    2324namespace ControlStruct {
    2425
    2526namespace {
     27
     28        typedef std::list<ast::CatchStmt*> CatchList;
     29
     30        void split( CatchList& allHandlers, CatchList& terHandlers,
     31                                CatchList& resHandlers ) {
     32                while ( !allHandlers.empty() ) {
     33                        ast::CatchStmt * stmt = allHandlers.front();
     34                        allHandlers.pop_front();
     35                        if (stmt->kind == ast::ExceptionKind::Terminate) {
     36                                terHandlers.push_back(stmt);
     37                        } else {
     38                                resHandlers.push_back(stmt);
     39                        }
     40                }
     41        }
     42
     43        void appendDeclStmt( ast::CompoundStmt * block, ast::DeclWithType * item ) {
     44                block->push_back(new ast::DeclStmt(block->location, item));
     45        }
    2646
    2747class TranslateThrowsCore : public ast::WithGuards {
     
    128148}
    129149
     150
     151class TryMutatorCore {
     152        // The built in types used in translation.
     153        const ast::StructDecl * except_decl;
     154        const ast::StructDecl * node_decl;
     155        const ast::StructDecl * hook_decl;
     156
     157        // The many helper functions for code/syntree generation.
     158        ast::CompoundStmt * take_try_block( ast::TryStmt * tryStmt );
     159        ast::FunctionDecl * create_try_wrapper( const ast::CompoundStmt * body );
     160        ast::FunctionDecl * create_terminate_catch( CatchList &handlers );
     161        ast::CompoundStmt * create_single_matcher(
     162                const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler );
     163        ast::FunctionDecl * create_terminate_match( CatchList &handlers );
     164        ast::CompoundStmt * create_terminate_caller( CodeLocation loc, ast::FunctionDecl * try_wrapper,
     165                ast::FunctionDecl * terminate_catch, ast::FunctionDecl * terminate_match );
     166        ast::FunctionDecl * create_resume_handler( CatchList &handlers );
     167        ast::CompoundStmt * create_resume_wrapper(
     168                const ast::Stmt * wraps, const ast::FunctionDecl * resume_handler );
     169        ast::FunctionDecl * create_finally_wrapper( ast::TryStmt * tryStmt );
     170        ast::ObjectDecl * create_finally_hook( ast::FunctionDecl * finally_wrapper );
     171        ast::Stmt * create_resume_rethrow( const ast::ThrowStmt * throwStmt );
     172
     173        // Types used in translation, make sure to use clone.
     174        // void (*function)();
     175        ast::FunctionDecl * try_func_t;
     176        // void (*function)(int, exception);
     177        ast::FunctionDecl * catch_func_t;
     178        // int (*function)(exception);
     179        ast::FunctionDecl * match_func_t;
     180        // bool (*function)(exception);
     181        ast::FunctionDecl * handle_func_t;
     182        // void (*function)(__attribute__((unused)) void *);
     183        ast::FunctionDecl * finally_func_t;
     184
     185        ast::StructInstType * create_except_type() {
     186                assert( except_decl );
     187                return new ast::StructInstType( except_decl );
     188        }
     189        void init_func_types();
     190
     191public:
     192        TryMutatorCore() :
     193                except_decl( nullptr ), node_decl( nullptr ), hook_decl( nullptr )
     194        {}
     195
     196        void previsit( const ast::StructDecl *structDecl );
     197        ast::Stmt * postvisit( const ast::TryStmt *tryStmt );
     198        ast::Stmt * postvisit( const ast::ThrowStmt *throwStmt );
     199};
     200
     201void TryMutatorCore::init_func_types() {
     202        assert( except_decl );
     203
     204        ast::ObjectDecl index_obj(
     205                {},
     206                "__handler_index",
     207                new ast::BasicType(ast::BasicType::SignedInt)
     208                );
     209        ast::ObjectDecl exception_obj(
     210                {},
     211                "__exception_inst",
     212                new ast::PointerType(
     213                        new ast::StructInstType( except_decl )
     214                        ),
     215                NULL
     216                );
     217        ast::ObjectDecl bool_obj(
     218                {},
     219                "__ret_bool",
     220                new ast::BasicType( ast::BasicType::Bool ),
     221                nullptr, //init
     222                ast::Storage::Classes{},
     223                ast::Linkage::Cforall,
     224                nullptr, //width
     225                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
     226                );
     227        ast::ObjectDecl voidptr_obj(
     228                {},
     229                "__hook",
     230                new ast::PointerType(
     231                        new ast::VoidType()
     232                ),
     233                nullptr, //init
     234                ast::Storage::Classes{},
     235                ast::Linkage::Cforall,
     236                nullptr, //width
     237                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
     238                );
     239
     240        ast::ObjectDecl unused_index_obj(
     241                {},
     242                "__handler_index",
     243                new ast::BasicType(ast::BasicType::SignedInt),
     244                nullptr,
     245                ast::Storage::Classes{},
     246                ast::Linkage::Cforall,
     247                nullptr, //width
     248                std::vector<ast::ptr<ast::Attribute>>{ new ast::Attribute( "unused" ) }
     249        );
     250        //unused_index_obj->attributes.push_back( new Attribute( "unused" ) );
     251
     252        try_func_t = new ast::FunctionDecl(
     253                {},
     254                "try",
     255                {}, //forall
     256                {}, //no param
     257                {}, //no return
     258                nullptr,
     259                ast::Storage::Classes{},
     260                ast::Linkage::Cforall
     261        );
     262
     263        catch_func_t = new ast::FunctionDecl(
     264                {},
     265                "catch",
     266                {}, //forall
     267                {ast::deepCopy(&index_obj), ast::deepCopy(&exception_obj)},//param
     268                {}, //return void
     269                nullptr,
     270                ast::Storage::Classes{},
     271                ast::Linkage::Cforall
     272        );
     273
     274        match_func_t = new ast::FunctionDecl(
     275                {},
     276                "match",
     277                {}, //forall
     278                {ast::deepCopy(&exception_obj)},
     279                {ast::deepCopy(&unused_index_obj)},
     280                nullptr,
     281                ast::Storage::Classes{},
     282                ast::Linkage::Cforall
     283        );
     284
     285        handle_func_t = new ast::FunctionDecl(
     286                {},
     287                "handle",
     288                {}, //forall
     289                {ast::deepCopy(&exception_obj)},
     290                {ast::deepCopy(&bool_obj)},
     291                nullptr,
     292                ast::Storage::Classes{},
     293                ast::Linkage::Cforall
     294        );
     295
     296        finally_func_t = new ast::FunctionDecl(
     297                {},
     298                "finally",
     299                {}, //forall
     300                {ast::deepCopy(&voidptr_obj)},
     301                {}, //return void
     302                nullptr,
     303                ast::Storage::Classes{},
     304                ast::Linkage::Cforall
     305        );
     306
     307        //catch_func_t.get_parameters().push_back( index_obj.clone() );
     308        //catch_func_t.get_parameters().push_back( exception_obj.clone() );
     309        //match_func_t.get_returnVals().push_back( unused_index_obj );
     310        //match_func_t.get_parameters().push_back( exception_obj.clone() );
     311        //handle_func_t.get_returnVals().push_back( bool_obj.clone() );
     312        //handle_func_t.get_parameters().push_back( exception_obj.clone() );
     313        //finally_func_t.get_parameters().push_back( voidptr_obj.clone() );
     314}
     315
     316// TryStmt Mutation Helpers
     317
     318/*
     319ast::CompoundStmt * TryMutatorCore::take_try_block( ast::TryStmt *tryStmt ) {
     320        ast::CompoundStmt * block = tryStmt->body;
     321        tryStmt->body = nullptr;
     322        return block;
     323}
     324*/
     325
     326ast::FunctionDecl * TryMutatorCore::create_try_wrapper(
     327                const ast::CompoundStmt *body ) {
     328
     329        ast::FunctionDecl * ret = ast::deepCopy(try_func_t);
     330        ret->stmts = body;
     331        return ret;
     332}
     333
     334ast::FunctionDecl * TryMutatorCore::create_terminate_catch(
     335                CatchList &handlers ) {
     336        std::vector<ast::ptr<ast::Stmt>> handler_wrappers;
     337
     338        assert (!handlers.empty());
     339        const CodeLocation loc = handlers.front()->location;
     340
     341        ast::FunctionDecl * func_t = ast::deepCopy(catch_func_t);
     342        const ast::DeclWithType * index_obj = func_t->params.front();
     343        const ast::DeclWithType * except_obj = func_t->params.back();
     344
     345        // Index 1..{number of handlers}
     346        int index = 0;
     347        CatchList::iterator it = handlers.begin();
     348        for ( ; it != handlers.end() ; ++it ) {
     349                ++index;
     350                ast::CatchStmt * handler = *it;
     351                const CodeLocation loc = handler->location;
     352
     353                // case `index`:
     354                // {
     355                //     `handler.decl` = { (virtual `decl.type`)`except` };
     356                //     `handler.body`;
     357                // }
     358                // return;
     359                ast::CompoundStmt * block = new ast::CompoundStmt(loc);
     360
     361                // Just copy the exception value. (Post Validation)
     362                const ast::ObjectDecl * handler_decl =
     363                        handler->decl.strict_as<ast::ObjectDecl>();
     364                ast::ObjectDecl * local_except = ast::deepCopy(handler_decl);
     365                ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc,
     366                        new ast::VariableExpr( loc, except_obj ),
     367                        local_except->get_type()
     368                        );
     369                vcex->location = handler->location;
     370                local_except->init = new ast::ListInit(loc, { new ast::SingleInit( loc, vcex ) });
     371                block->push_back( new ast::DeclStmt( loc, local_except ) );
     372
     373                // Add the cleanup attribute.
     374                local_except->attributes.push_back( new ast::Attribute(
     375                        "cleanup",
     376                        { new ast::NameExpr( loc, "__cfaehm_cleanup_terminate" ) }
     377                        ) );
     378
     379                ast::DeclReplacer::DeclMap mapping;
     380                mapping[handler_decl] = local_except;
     381                const ast::Stmt * mutBody = strict_dynamic_cast<const ast::Stmt *>(
     382                        ast::DeclReplacer::replace(handler->body, mapping));
     383
     384
     385                block->push_back( mutBody );
     386                // handler->body = nullptr;
     387
     388                handler_wrappers.push_back( new ast::CaseStmt(loc,
     389                        ast::ConstantExpr::from_int(loc, index) ,
     390                        { block, new ast::ReturnStmt( loc, nullptr ) }
     391                        ));
     392        }
     393        // TODO: Some sort of meaningful error on default perhaps?
     394
     395        /*
     396        std::list<Statement*> stmt_handlers;
     397        while ( !handler_wrappers.empty() ) {
     398                stmt_handlers.push_back( handler_wrappers.front() );
     399                handler_wrappers.pop_front();
     400        }
     401        */
     402
     403        ast::SwitchStmt * handler_lookup = new ast::SwitchStmt(loc,
     404                new ast::VariableExpr( loc, index_obj ),
     405                std::move(handler_wrappers)
     406                );
     407        ast::CompoundStmt * body = new ast::CompoundStmt(loc,
     408                {handler_lookup});
     409
     410        func_t->stmts = body;
     411        return func_t;
     412}
     413
     414// Create a single check from a moddified handler.
     415// except_obj is referenced, modded_handler will be freed.
     416ast::CompoundStmt * TryMutatorCore::create_single_matcher(
     417                const ast::DeclWithType * except_obj, ast::CatchStmt * modded_handler ) {
     418        // {
     419        //     `modded_handler.decl`
     420        //     if ( `decl.name = (virtual `decl.type`)`except`
     421        //             [&& `modded_handler.cond`] ) {
     422        //         `modded_handler.body`
     423        //     }
     424        // }
     425
     426        const CodeLocation loc = modded_handler->location;
     427        ast::CompoundStmt * block = new ast::CompoundStmt(loc);
     428
     429        // Local Declaration
     430        const ast::ObjectDecl * local_except =
     431                modded_handler->decl.strict_as<ast::ObjectDecl>();
     432        block->push_back( new ast::DeclStmt( loc,  local_except ) );
     433
     434        // Check for type match.
     435        ast::VirtualCastExpr * vcex = new ast::VirtualCastExpr(loc,
     436                new ast::VariableExpr(loc, except_obj ),
     437                local_except->get_type()
     438                );
     439        ast::Expr * cond = ast::UntypedExpr::createAssign(loc,
     440                new ast::VariableExpr(loc, local_except ), vcex );
     441
     442        // Add the check on the conditional if it is provided.
     443        if ( modded_handler->cond ) {
     444                cond = new ast::LogicalExpr( loc, cond, modded_handler->cond, ast::LogicalFlag::AndExpr );
     445        }
     446        // Construct the match condition.
     447        block->push_back( new ast::IfStmt(loc,
     448                cond, modded_handler->body, nullptr ) );
     449
     450        // xxx - how does this work in new ast
     451        //modded_handler->set_decl( nullptr );
     452        //modded_handler->set_cond( nullptr );
     453        //modded_handler->set_body( nullptr );
     454        //delete modded_handler;
     455        return block;
     456}
     457
     458ast::FunctionDecl * TryMutatorCore::create_terminate_match(
     459                CatchList &handlers ) {
     460        // int match(exception * except) {
     461        //     HANDLER WRAPPERS { return `index`; }
     462        // }
     463
     464        assert (!handlers.empty());
     465        const CodeLocation loc = handlers.front()->location;
     466
     467        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
     468
     469        ast::FunctionDecl * func_t = ast::deepCopy(match_func_t);
     470        const ast::DeclWithType * except_obj = func_t->params.back();
     471
     472        // Index 1..{number of handlers}
     473        int index = 0;
     474        CatchList::iterator it;
     475        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
     476                ++index;
     477                ast::CatchStmt * handler = *it;
     478
     479                // Body should have been taken by create_terminate_catch.
     480                // xxx - just ignore it?
     481                // assert( nullptr == handler->get_body() );
     482
     483                // Create new body.
     484                handler->body = new ast::ReturnStmt( handler->location,
     485                        ast::ConstantExpr::from_int( handler->location, index ) );
     486
     487                // Create the handler.
     488                body->push_back( create_single_matcher( except_obj, handler ) );
     489                *it = nullptr;
     490        }
     491
     492        body->push_back( new ast::ReturnStmt(loc,
     493                ast::ConstantExpr::from_int( loc, 0 ) ));
     494
     495        func_t->stmts = body;
     496
     497        return func_t;
     498}
     499
     500ast::CompoundStmt * TryMutatorCore::create_terminate_caller(
     501                CodeLocation loc,
     502                ast::FunctionDecl * try_wrapper,
     503                ast::FunctionDecl * terminate_catch,
     504                ast::FunctionDecl * terminate_match ) {
     505        // { __cfaehm_try_terminate(`try`, `catch`, `match`); }
     506
     507        ast::UntypedExpr * caller = new ast::UntypedExpr(loc, new ast::NameExpr(loc,
     508                "__cfaehm_try_terminate" ) );
     509        caller->args.push_back( new ast::VariableExpr(loc, try_wrapper ) );
     510        caller->args.push_back( new ast::VariableExpr(loc, terminate_catch ) );
     511        caller->args.push_back( new ast::VariableExpr(loc, terminate_match ) );
     512
     513        ast::CompoundStmt * callStmt = new ast::CompoundStmt(loc);
     514        callStmt->push_back( new ast::ExprStmt( loc, caller ) );
     515        return callStmt;
     516}
     517
     518ast::FunctionDecl * TryMutatorCore::create_resume_handler(
     519                CatchList &handlers ) {
     520        // bool handle(exception * except) {
     521        //     HANDLER WRAPPERS { `hander->body`; return true; }
     522        // }
     523        assert (!handlers.empty());
     524        const CodeLocation loc = handlers.front()->location;
     525        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
     526
     527        ast::FunctionDecl * func_t = ast::deepCopy(handle_func_t);
     528        const ast::DeclWithType * except_obj = func_t->params.back();
     529
     530        CatchList::iterator it;
     531        for ( it = handlers.begin() ; it != handlers.end() ; ++it ) {
     532                ast::CatchStmt * handler = *it;
     533                const CodeLocation loc = handler->location;
     534                // Modifiy body.
     535                ast::CompoundStmt * handling_code;
     536                if (handler->body.as<ast::CompoundStmt>()) {
     537                        handling_code =
     538                        strict_dynamic_cast<ast::CompoundStmt*>( handler->body.get_and_mutate() );
     539                } else {
     540                        handling_code = new ast::CompoundStmt(loc);
     541                        handling_code->push_back( handler->body );
     542                }
     543                handling_code->push_back( new ast::ReturnStmt(loc,
     544                        ast::ConstantExpr::from_bool(loc, true ) ) );
     545                handler->body = handling_code;
     546
     547                // Create the handler.
     548                body->push_back( create_single_matcher( except_obj, handler ) );
     549                *it = nullptr;
     550        }
     551
     552        body->push_back( new ast::ReturnStmt(loc,
     553                ast::ConstantExpr::from_bool(loc, false ) ) );
     554        func_t->stmts = body;
     555
     556        return func_t;
     557}
     558
     559ast::CompoundStmt * TryMutatorCore::create_resume_wrapper(
     560                const ast::Stmt * wraps,
     561                const ast::FunctionDecl * resume_handler ) {
     562        const CodeLocation loc = wraps->location;
     563        ast::CompoundStmt * body = new ast::CompoundStmt(loc);
     564
     565