Index: doc/theses/colby_parsons_MMAth/style/style.tex
===================================================================
--- doc/theses/colby_parsons_MMAth/style/style.tex	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ doc/theses/colby_parsons_MMAth/style/style.tex	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -3,5 +3,15 @@
 \lstset{language=CFA}					% default language
 
+\newcommand{\newtermFont}{\emph}
+\newcommand{\Newterm}[1]{\newtermFont{#1}}
+
 \newcommand{\code}[1]{\lstinline[language=CFA]{#1}}
 \newcommand{\uC}{$\mu$\CC}
+\newcommand{\PAB}[1]{{\color{red}PAB: #1}}
 
+\newsavebox{\myboxA}					% used with subfigure
+\newsavebox{\myboxB}
+
+\lstnewenvironment{java}[1][]
+{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
+{}
Index: doc/theses/colby_parsons_MMAth/text/actors.tex
===================================================================
--- doc/theses/colby_parsons_MMAth/text/actors.tex	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ doc/theses/colby_parsons_MMAth/text/actors.tex	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -90,31 +90,31 @@
 \begin{cfa}
 struct derived_actor {
-    inline actor;       // Plan-9 C inheritance
+	inline actor;	   // Plan-9 C inheritance
 };
 void ?{}( derived_actor & this ) { // Default ctor
-    ((actor &)this){};  // Call to actor ctor
+	((actor &)this){};  // Call to actor ctor
 }
 
 struct derived_msg {
-    inline message;     // Plan-9 C nominal inheritance
-    char word[12];
+	inline message;	 // Plan-9 C nominal inheritance
+	char word[12];
 };
 void ?{}( derived_msg & this, char * new_word ) { // Overloaded ctor
-    ((message &) this){ Nodelete }; // Passing allocation to ctor
-    strcpy(this.word, new_word);
+	((message &) this){ Nodelete }; // Passing allocation to ctor
+	strcpy(this.word, new_word);
 }
 
 Allocation receive( derived_actor & receiver, derived_msg & msg ) {
-    printf("The message contained the string: %s\n", msg.word);
-    return Finished; // Return finished since actor is done
+	printf("The message contained the string: %s\n", msg.word);
+	return Finished; // Return finished since actor is done
 }
 
 int main() {
-    start_actor_system(); // Sets up executor
-    derived_actor my_actor;         
-    derived_msg my_msg{ "Hello World" }; // Constructor call
-    my_actor << my_msg;   // Send message via left shift operator
-    stop_actor_system(); // Waits until actors are finished
-    return 0;
+	start_actor_system(); // Sets up executor
+	derived_actor my_actor;		 
+	derived_msg my_msg{ "Hello World" }; // Constructor call
+	my_actor << my_msg;   // Send message via left shift operator
+	stop_actor_system(); // Waits until actors are finished
+	return 0;
 }
 \end{cfa}
@@ -229,5 +229,5 @@
 \section{Envelopes}\label{s:envelope}
 In actor systems messages are sent and received by actors. 
-When a actor receives a message it  executes its behaviour that is associated with that message type. 
+When a actor receives a message it executes its behaviour that is associated with that message type. 
 However the unit of work that stores the message, the receiving actor's address, and other pertinent information needs to persist between send and the receive. 
 Furthermore the unit of work needs to be able to be stored in some fashion, usually in a queue, until it is executed by an actor. 
@@ -301,5 +301,5 @@
 While other systems are concerned with stealing actors, the \CFA actor system steals queues. 
 This is a result of \CFA's use of the inverted actor system. 
- The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism. 
+The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism. 
 This does not means that stealing has no cost. 
 This goal is to ensure that stealing work does not impact the performance of victim workers. 
@@ -369,14 +369,14 @@
 
 \begin{cfa}
-void swap( uint victim_idx, uint my_idx  ) {
-    // Step 0:
-    work_queue * my_queue = request_queues[my_idx];
-    work_queue * vic_queue = request_queues[victim_idx];
-    // Step 2:
-    request_queues[my_idx] = 0p;
-    // Step 3:
-    request_queues[victim_idx] = my_queue;
-    // Step 4:
-    request_queues[my_idx] = vic_queue;
+void swap( uint victim_idx, uint my_idx ) {
+	// Step 0:
+	work_queue * my_queue = request_queues[my_idx];
+	work_queue * vic_queue = request_queues[victim_idx];
+	// Step 2:
+	request_queues[my_idx] = 0p;
+	// Step 3:
+	request_queues[victim_idx] = my_queue;
+	// Step 4:
+	request_queues[my_idx] = vic_queue;
 }
 \end{cfa}
@@ -389,41 +389,41 @@
 // This routine is atomic
 bool CAS( work_queue ** ptr, work_queue ** old, work_queue * new ) {
-    if ( *ptr != *old )
-        return false;
-    *ptr = new;
-    return true;
+	if ( *ptr != *old )
+		return false;
+	*ptr = new;
+	return true;
 }
 
 bool try_swap_queues( worker & this, uint victim_idx, uint my_idx ) with(this) {
-    // Step 0:
-    // request_queues is the shared array of all sharded queues
-    work_queue * my_queue = request_queues[my_idx];
-    work_queue * vic_queue = request_queues[victim_idx];
-
-    // Step 1:
-    // If either queue is 0p then they are in the process of being stolen
-    // 0p is CForAll's equivalent of C++'s nullptr
-    if ( vic_queue == 0p ) return false;
-
-    // Step 2:
-    // Try to set thief's queue ptr to be 0p.
-    // If this CAS fails someone stole thief's queue so return false
-    if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) )
-        return false;
-    
-    // Step 3:
-    // Try to set victim queue ptr to be thief's queue ptr.
-    // If it fails someone stole the other queue, so fix up then return false
-    if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) {
-        request_queues[my_idx] = my_queue; // reset queue ptr back to prev val
-        return false;
-    }
-
-    // Step 4:
-    // Successfully swapped.
-    // Thief's ptr is 0p so no one will touch it
-    // Write back without CAS is safe
-    request_queues[my_idx] = vic_queue;
-    return true;
+	// Step 0:
+	// request_queues is the shared array of all sharded queues
+	work_queue * my_queue = request_queues[my_idx];
+	work_queue * vic_queue = request_queues[victim_idx];
+
+	// Step 1:
+	// If either queue is 0p then they are in the process of being stolen
+	// 0p is CForAll's equivalent of C++'s nullptr
+	if ( vic_queue == 0p ) return false;
+
+	// Step 2:
+	// Try to set thief's queue ptr to be 0p.
+	// If this CAS fails someone stole thief's queue so return false
+	if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) )
+		return false;
+	
+	// Step 3:
+	// Try to set victim queue ptr to be thief's queue ptr.
+	// If it fails someone stole the other queue, so fix up then return false
+	if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) {
+		request_queues[my_idx] = my_queue; // reset queue ptr back to prev val
+		return false;
+	}
+
+	// Step 4:
+	// Successfully swapped.
+	// Thief's ptr is 0p so no one will touch it
+	// Write back without CAS is safe
+	request_queues[my_idx] = vic_queue;
+	return true;
 }
 \end{cfa}\label{c:swap}
@@ -706,9 +706,9 @@
 \label{t:StaticActorMessagePerformance}
 \begin{tabular}{*{5}{r|}r}
-    & \multicolumn{1}{c|}{\CFA (100M)} & \multicolumn{1}{c|}{CAF (10M)} & \multicolumn{1}{c|}{Akka (100M)} & \multicolumn{1}{c|}{\uC (100M)} & \multicolumn{1}{c@{}}{ProtoActor (100M)} \\
-    \hline																            
-    AMD		& \input{data/pykeSendStatic} \\
-    \hline																            
-    Intel	& \input{data/nasusSendStatic}
+	& \multicolumn{1}{c|}{\CFA (100M)} & \multicolumn{1}{c|}{CAF (10M)} & \multicolumn{1}{c|}{Akka (100M)} & \multicolumn{1}{c|}{\uC (100M)} & \multicolumn{1}{c@{}}{ProtoActor (100M)} \\
+	\hline																			
+	AMD		& \input{data/pykeSendStatic} \\
+	\hline																			
+	Intel	& \input{data/nasusSendStatic}
 \end{tabular}
 
@@ -719,9 +719,9 @@
 
 \begin{tabular}{*{5}{r|}r}
-    & \multicolumn{1}{c|}{\CFA (20M)} & \multicolumn{1}{c|}{CAF (2M)} & \multicolumn{1}{c|}{Akka (2M)} & \multicolumn{1}{c|}{\uC (20M)} & \multicolumn{1}{c@{}}{ProtoActor (2M)} \\
-    \hline																            
-    AMD		& \input{data/pykeSendDynamic} \\
-    \hline																            
-    Intel	& \input{data/nasusSendDynamic}
+	& \multicolumn{1}{c|}{\CFA (20M)} & \multicolumn{1}{c|}{CAF (2M)} & \multicolumn{1}{c|}{Akka (2M)} & \multicolumn{1}{c|}{\uC (20M)} & \multicolumn{1}{c@{}}{ProtoActor (2M)} \\
+	\hline																			
+	AMD		& \input{data/pykeSendDynamic} \\
+	\hline																			
+	Intel	& \input{data/nasusSendDynamic}
 \end{tabular}
 \end{table}
@@ -745,5 +745,5 @@
 In the static send benchmark all systems except CAF have static send costs that are in the same ballpark, only varying by ~70ns. 
 In the dynamic send benchmark all systems experience slower message sends, as expected due to the extra allocations. 
-However,  Akka and ProtoActor, slow down by a more significant margin than the \uC and \CFA. 
+However, Akka and ProtoActor, slow down by a more significant margin than the \uC and \CFA. 
 This is likely a result of Akka and ProtoActor's garbage collection, which can suffer from hits in performance for allocation heavy workloads, whereas \uC and \CFA have explicit allocation/deallocation.
 
@@ -753,35 +753,27 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusCFABalance-One.pgf}}
-        \subcaption{AMD \CFA Balance-One Benchmark}
-        \label{f:BalanceOneAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeCFABalance-One.pgf}}
-        \subcaption{Intel \CFA Balance-One Benchmark}
-        \label{f:BalanceOneIntel}
-    \end{subfigure}
-    \caption{The balance-one benchmark comparing stealing heuristics (lower is better).}
-\end{figure}
-
-\begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusCFABalance-Multi.pgf}}
-        \subcaption{AMD \CFA Balance-Multi Benchmark}
-        \label{f:BalanceMultiAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeCFABalance-Multi.pgf}}
-        \subcaption{Intel \CFA Balance-Multi Benchmark}
-        \label{f:BalanceMultiIntel}
-    \end{subfigure}
-    \caption{The balance-multi benchmark comparing stealing heuristics (lower is better).}
+	\centering
+	\subfloat[AMD \CFA Balance-One Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusCFABalance-One.pgf}}
+		\label{f:BalanceOneAMD}
+	}
+	\subfloat[Intel \CFA Balance-One Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeCFABalance-One.pgf}}
+		\label{f:BalanceOneIntel}
+	}
+	\caption{The balance-one benchmark comparing stealing heuristics (lower is better).}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\subfloat[AMD \CFA Balance-Multi Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusCFABalance-Multi.pgf}}
+		\label{f:BalanceMultiAMD}
+	}
+	\subfloat[Intel \CFA Balance-Multi Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeCFABalance-Multi.pgf}}
+		\label{f:BalanceMultiIntel}
+	}
+	\caption{The balance-multi benchmark comparing stealing heuristics (lower is better).}
 \end{figure}
 
@@ -817,18 +809,14 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusExecutor.pgf}}
-        \subcaption{AMD Executor Benchmark}
-        \label{f:ExecutorAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeExecutor.pgf}}
-        \subcaption{Intel Executor Benchmark}
-        \label{f:ExecutorIntel}
-    \end{subfigure}
-    \caption{The executor benchmark comparing actor systems (lower is better).}
+	\centering
+	\subfloat[AMD Executor Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusExecutor.pgf}}
+		\label{f:ExecutorAMD}
+	}
+	\subfloat[Intel Executor Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeExecutor.pgf}}
+		\label{f:ExecutorIntel}
+	}
+	\caption{The executor benchmark comparing actor systems (lower is better).}
 \end{figure}
 
@@ -840,16 +828,14 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusCFAExecutor.pgf}}
-        \subcaption{AMD \CFA Executor Benchmark}\label{f:cfaExecutorAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeCFAExecutor.pgf}}
-        \subcaption{Intel \CFA Executor Benchmark}\label{f:cfaExecutorIntel}
-    \end{subfigure}
-    \caption{Executor benchmark comparing \CFA stealing heuristics (lower is better).}
+	\centering
+	\subfloat[AMD \CFA Executor Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusCFAExecutor.pgf}}
+		\label{f:cfaExecutorAMD}
+	}
+	\subfloat[Intel \CFA Executor Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeCFAExecutor.pgf}}
+		\label{f:cfaExecutorIntel}
+	}
+	\caption{Executor benchmark comparing \CFA stealing heuristics (lower is better).}
 \end{figure}
 
@@ -857,16 +843,14 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusRepeat.pgf}}
-        \subcaption{AMD Repeat Benchmark}\label{f:RepeatAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeRepeat.pgf}}
-        \subcaption{Intel Repeat Benchmark}\label{f:RepeatIntel}
-    \end{subfigure}
-    \caption{The repeat benchmark comparing actor systems (lower is better).}
+	\centering
+	\subfloat[AMD Repeat Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusRepeat.pgf}}
+		\label{f:RepeatAMD}
+	}
+	\subfloat[Intel Repeat Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeRepeat.pgf}}
+		\label{f:RepeatIntel}
+	}
+	\caption{The repeat benchmark comparing actor systems (lower is better).}
 \end{figure}
 
@@ -881,16 +865,14 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusCFARepeat.pgf}}
-        \subcaption{AMD \CFA Repeat Benchmark}\label{f:cfaRepeatAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeCFARepeat.pgf}}
-        \subcaption{Intel \CFA Repeat Benchmark}\label{f:cfaRepeatIntel}
-    \end{subfigure}
-    \caption{The repeat benchmark comparing \CFA stealing heuristics (lower is better).}
+	\centering
+	\subfloat[AMD \CFA Repeat Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusCFARepeat.pgf}}
+		\label{f:cfaRepeatAMD}
+	}
+	\subfloat[Intel \CFA Repeat Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeCFARepeat.pgf}}
+		\label{f:cfaRepeatIntel}
+	}
+	\caption{The repeat benchmark comparing \CFA stealing heuristics (lower is better).}
 \end{figure}
 
@@ -913,17 +895,17 @@
 
 \begin{table}[t]
-    \centering
-    \setlength{\extrarowheight}{2pt}
-    \setlength{\tabcolsep}{5pt}
-    
-    \caption{Executor Program Memory High Watermark}
-    \label{t:ExecutorMemory}
-    \begin{tabular}{*{5}{r|}r}
-        & \multicolumn{1}{c|}{\CFA} & \multicolumn{1}{c|}{CAF} & \multicolumn{1}{c|}{Akka} & \multicolumn{1}{c|}{\uC} & \multicolumn{1}{c@{}}{ProtoActor} \\
-        \hline																            
-        AMD		& \input{data/pykeExecutorMem} \\
-        \hline																            
-        Intel	& \input{data/nasusExecutorMem}
-    \end{tabular}
+	\centering
+	\setlength{\extrarowheight}{2pt}
+	\setlength{\tabcolsep}{5pt}
+	
+	\caption{Executor Program Memory High Watermark}
+	\label{t:ExecutorMemory}
+	\begin{tabular}{*{5}{r|}r}
+		& \multicolumn{1}{c|}{\CFA} & \multicolumn{1}{c|}{CAF} & \multicolumn{1}{c|}{Akka} & \multicolumn{1}{c|}{\uC} & \multicolumn{1}{c@{}}{ProtoActor} \\
+		\hline																			
+		AMD		& \input{data/pykeExecutorMem} \\
+		\hline																			
+		Intel	& \input{data/nasusExecutorMem}
+	\end{tabular}
 \end{table}
 
@@ -951,30 +933,30 @@
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusMatrix.pgf}}
-        \subcaption{AMD Matrix Benchmark}\label{f:MatrixAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeMatrix.pgf}}
-        \subcaption{Intel Matrix Benchmark}\label{f:MatrixIntel}
-    \end{subfigure}
-    \caption{The matrix benchmark comparing actor systems (lower is better).}
-\end{figure}
-
-\begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasusCFAMatrix.pgf}}
-        \subcaption{AMD \CFA Matrix Benchmark}\label{f:cfaMatrixAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pykeCFAMatrix.pgf}}
-        \subcaption{Intel \CFA Matrix Benchmark}\label{f:cfaMatrixIntel}
-    \end{subfigure}
-    \caption{The matrix benchmark comparing \CFA stealing heuristics (lower is better).}
-\end{figure}
+	\centering
+	\subfloat[AMD Matrix Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusMatrix.pgf}}
+		\label{f:MatrixAMD}
+	}
+	\subfloat[Intel Matrix Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeMatrix.pgf}}
+		\label{f:MatrixIntel}
+	}
+	\caption{The matrix benchmark comparing actor systems (lower is better).}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\subfloat[AMD \CFA Matrix Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasusCFAMatrix.pgf}}
+		\label{f:cfaMatrixAMD}
+	}
+	\subfloat[Intel \CFA Matrix Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pykeCFAMatrix.pgf}}
+		\label{f:cfaMatrixIntel}
+	}
+	\caption{The matrix benchmark comparing \CFA stealing heuristics (lower is better).}
+\end{figure}
+
+% Local Variables: %
+% tab-width: 4 %
+% End: %
Index: doc/theses/colby_parsons_MMAth/text/channels.tex
===================================================================
--- doc/theses/colby_parsons_MMAth/text/channels.tex	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ doc/theses/colby_parsons_MMAth/text/channels.tex	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -5,76 +5,76 @@
 % ======================================================================
 
-Channels were first introduced by Hoare in his paper Communicating Sequentual Processes~\cite{Hoare78}, where he proposes a concurrent language that communicates across processes using input/output channels to send data. 
-Channels are a concurrent language feature used to perform message passing concurrency, a model of concurrency where threads communicate by sending data as messages, and synchronizing via the message passing mechanism. 
-This is an alternative to shared memory concurrency, where threads can communicate directly by changing shared memory state. 
-Most modern concurrent programming languages do not subscribe to just one style of communication between threads, and provide features that support both. 
+Channels were first introduced by Hoare in his paper Communicating Sequentual Processes~\cite{Hoare78}, where he proposes a concurrent language that communicates across processes using input/output channels to send data.
+Channels are a concurrent language feature used to perform message passing concurrency, a model of concurrency where threads communicate by sending data as messages, and synchronizing via the message passing mechanism.
+This is an alternative to shared memory concurrency, where threads can communicate directly by changing shared memory state.
+Most modern concurrent programming languages do not subscribe to just one style of communication between threads, and provide features that support both.
 Channels as a programming language feature has been popularized in recent years due to the language Go, which encourages the use of channels as its fundamental concurrent feature.
 
 \section{Producer-Consumer Problem}
-Most channels in modern programming languages are built on top of a shared memory buffer. 
-While it is possible to create a channel that contains an unbounded buffer, most implementations opt to only support a fixed size channel, where the size is given at the time of channel creation. 
-This turns the implementation of a channel into the producer-consumer problem. 
-The producer-consumer problem, also known as the bounded buffer problem, was introduced by Dijkstra in his book Cooperating Sequential Processes\cite{Dijkstra65}. 
-In the problem threads interact with the buffer in two ways, either consuming values by removing them from the buffer, or producing values and inserting them in the buffer. 
-The buffer needs to be protected from concurrent access since each item in the buffer should only be produced and consumed once. 
+Most channels in modern programming languages are built on top of a shared memory buffer.
+While it is possible to create a channel that contains an unbounded buffer, most implementations opt to only support a fixed size channel, where the size is given at the time of channel creation.
+This turns the implementation of a channel into the producer-consumer problem.
+The producer-consumer problem, also known as the bounded buffer problem, was introduced by Dijkstra in his book Cooperating Sequential Processes\cite{Dijkstra65}.
+In the problem threads interact with the buffer in two ways, either consuming values by removing them from the buffer, or producing values and inserting them in the buffer.
+The buffer needs to be protected from concurrent access since each item in the buffer should only be produced and consumed once.
 Additionally, a consumer can only remove from a non-empty buffer and a producer can only insert into a non-full buffer.
 
 \section{First-Come First-Served}
-The channel implementations that will be discussed are \gls{fcfs}. 
-This term was defined by Lamport~\cite{Lamport74}. 
-\gls{fcfs} is defined in relation to a doorway~\cite[p.~330]{Lamport86II}, which is the point at which an ordering among threads can be established. 
-Given this doorway, a critical section is said to be \gls{fcfs}, if threads access the shared resource in the order they proceed through the doorway. 
-\gls{fcfs} is a fairness property which prevents unequal access to the shared resource and prevents starvation, however it can come at a cost. 
-Implementing an algorithm with \gls{fcfs} can lead to double blocking, where entering threads may need to block to allow other threads to proceed first, resulting in blocking both inside and outside the doorway. 
+The channel implementations that will be discussed are \gls{fcfs}.
+This term was defined by Lamport~\cite{Lamport74}.
+\gls{fcfs} is defined in relation to a doorway~\cite[p.~330]{Lamport86II}, which is the point at which an ordering among threads can be established.
+Given this doorway, a critical section is said to be \gls{fcfs}, if threads access the shared resource in the order they proceed through the doorway.
+\gls{fcfs} is a fairness property which prevents unequal access to the shared resource and prevents starvation, however it can come at a cost.
+Implementing an algorithm with \gls{fcfs} can lead to double blocking, where entering threads may need to block to allow other threads to proceed first, resulting in blocking both inside and outside the doorway.
 As such algorithms that are not \gls{fcfs} may be more performant but that performance comes with the downside of likely introducing starvation and unfairness.
 
 \section{Channel Implementation}
-The channel implementation in \CFA is a near carbon copy of the Go implementation. 
-Experimentation was conducted that varied the producer-consumer problem algorithm and lock type used inside the channel. 
-With the exception of non-\gls{fcfs} algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation. 
+The channel implementation in \CFA is a near carbon copy of the Go implementation.
+Experimentation was conducted that varied the producer-consumer problem algorithm and lock type used inside the channel.
+With the exception of non-\gls{fcfs} algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation.
 As such the research contributions added by \CFA's channel implementation lie in the realm of safety and productivity features.
 
 \section{Safety and Productivity}
-Channels in \CFA come with safety and productivity features to aid users. 
+Channels in \CFA come with safety and productivity features to aid users.
 The features include the following.
 
 \begin{itemize}
-\item Toggle-able statistic collection on channel behvaiour that counts channel operations, and the number of the operations that block. 
+\item Toggle-able statistic collection on channel behvaiour that counts channel operations, and the number of the operations that block.
 Tracking blocking operations helps users tune their channel size or channel usage when the channel is used for buffering, where the aim is to have as few blocking operations as possible.
-\item Deadlock detection on deallocation of the channel. 
+\item Deadlock detection on deallocation of the channel.
 If any threads are blocked inside the channel when it terminates it is detected and informs the user, as this would cause a deadlock.
-\item A \code{flush} routine that delivers copies of an element to all waiting consumers, flushing the buffer. 
-Programmers can use this to easily to broadcast data to multiple consumers. 
+\item A \code{flush} routine that delivers copies of an element to all waiting consumers, flushing the buffer.
+Programmers can use this to easily to broadcast data to multiple consumers.
 Additionally, the \code{flush} routine is more performant then looping around the \code{insert} operation since it can deliver the elements without having to reaquire mutual exclusion for each element sent.
 \end{itemize}
 
-The other safety and productivity feature of \CFA channels deals with concurrent termination. 
-Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed. 
-The difficulty of graceful termination often arises from the usage of synchronization primitives which need to be handled carefully during shutdown. 
-It is easy to deadlock during termination if threads are left behind on synchronization primitives. 
-Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state. 
-\gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it. 
-Channels are a particularly hard synchronization primitive to terminate since both sending and receiving off a channel can block. 
+The other safety and productivity feature of \CFA channels deals with concurrent termination.
+Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed.
+The difficulty of graceful termination often arises from the usage of synchronization primitives which need to be handled carefully during shutdown.
+It is easy to deadlock during termination if threads are left behind on synchronization primitives.
+Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state.
+\gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it.
+Channels are a particularly hard synchronization primitive to terminate since both sending and receiving off a channel can block.
 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads trying to perform the termination may end up unexpectedly blocking in their attempt to help other threads exit the system.
 
 % C_TODO: add reference to select chapter, add citation to go channels info
-Go channels provide a set of tools to help with concurrent shutdown. 
-Channels in Go have a \code{close} operation and a \code{select} statement that both can be used to help threads terminate. 
-The \code{select} statement will be discussed in \ref{}, where \CFA's \code{waituntil} statement will be compared with the Go \code{select} statement. 
-The \code{close} operation on a channel in Go changes the state of the channel. 
-When a channel is closed, sends to the channel will panic and additional calls to \code{close} will panic. 
-Receives are handled differently where receivers will never block on a closed channel and will continue to remove elements from the channel. 
-Once a channel is empty, receivers can continue to remove elements, but will receive the zero-value version of the element type. 
-To aid in avoiding unwanted zero-value elements, Go provides the ability to iterate over a closed channel to remove the remaining elements. 
-These design choices for Go channels enforce a specific interaction style with channels during termination, where careful thought is needed to ensure that additional \code{close} calls don't occur and that no sends occur after channels are closed. 
-These design choices fit Go's paradigm of error management, where users are expected to explicitly check for errors, rather than letting errors occur and catching them. 
-If errors need to occur in Go, return codes are used to pass error information where they are needed. 
+Go channels provide a set of tools to help with concurrent shutdown.
+Channels in Go have a \code{close} operation and a \code{select} statement that both can be used to help threads terminate.
+The \code{select} statement will be discussed in \ref{}, where \CFA's \code{waituntil} statement will be compared with the Go \code{select} statement.
+The \code{close} operation on a channel in Go changes the state of the channel.
+When a channel is closed, sends to the channel will panic and additional calls to \code{close} will panic.
+Receives are handled differently where receivers will never block on a closed channel and will continue to remove elements from the channel.
+Once a channel is empty, receivers can continue to remove elements, but will receive the zero-value version of the element type.
+To aid in avoiding unwanted zero-value elements, Go provides the ability to iterate over a closed channel to remove the remaining elements.
+These design choices for Go channels enforce a specific interaction style with channels during termination, where careful thought is needed to ensure that additional \code{close} calls don't occur and that no sends occur after channels are closed.
+These design choices fit Go's paradigm of error management, where users are expected to explicitly check for errors, rather than letting errors occur and catching them.
+If errors need to occur in Go, return codes are used to pass error information where they are needed.
 Note that panics in Go can be caught, but it is not considered an idiomatic way to write Go programs.
 
-While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired. 
-Since both closing and sending panic, once a channel is closed, a user often has to synchronize the senders to a channel before the channel can be closed to avoid panics. 
-However, in doing so it renders the \code{close} operation nearly useless, as the only utilities it provides are the ability to ensure that receivers no longer block on the channel, and will receive zero-valued elements. 
-This can be useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is preferred, then \code{close} only provides its non-blocking feature. 
-To avoid \gls{toctou} issues during shutdown, a busy wait with a \code{select} statement is often used to add or remove elements from a channel. 
+While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.
+Since both closing and sending panic, once a channel is closed, a user often has to synchronize the senders to a channel before the channel can be closed to avoid panics.
+However, in doing so it renders the \code{close} operation nearly useless, as the only utilities it provides are the ability to ensure that receivers no longer block on the channel, and will receive zero-valued elements.
+This can be useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is preferred, then \code{close} only provides its non-blocking feature.
+To avoid \gls{toctou} issues during shutdown, a busy wait with a \code{select} statement is often used to add or remove elements from a channel.
 Due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.
 
@@ -82,72 +82,72 @@
 As such \CFA uses an exception based approach to channel shutdown that is symmetric for both producers and consumers, and supports graceful shutdown.Exceptions in \CFA support both termination and resumption.Termination exceptions operate in the same way as exceptions seen in many popular programming languages such as \CC, Python and Java.
 Resumption exceptions are a style of exception that when caught run the corresponding catch block in the same way that termination exceptions do.
-The difference between the exception handling mechanisms arises after the exception is handled. 
-In termination handling, the control flow continues into the code following the catch after the exception is handled. 
-In resumption handling, the control flow returns to the site of the \code{throw}, allowing the control to continue where it left off. 
-Note that in resumption, since control can return to the point of error propagation, the stack is not unwound during resumption propagation. 
-In \CFA if a resumption is not handled, it is reraised as a termination. 
+The difference between the exception handling mechanisms arises after the exception is handled.
+In termination handling, the control flow continues into the code following the catch after the exception is handled.
+In resumption handling, the control flow returns to the site of the \code{throw}, allowing the control to continue where it left off.
+Note that in resumption, since control can return to the point of error propagation, the stack is not unwound during resumption propagation.
+In \CFA if a resumption is not handled, it is reraised as a termination.
 This mechanism can be used to create a flexible and robust termination system for channels.
 
-When a channel in \CFA is closed, all subsequent calls to the channel will throw a resumption exception at the caller. 
-If the resumption is handled, then the caller will proceed to attempt to complete their operation. 
-If the resumption is not handled it is then rethrown as a termination exception. 
-Or, if the resumption is handled, but the subsequent attempt at an operation would block, a termination exception is thrown. 
-These termination exceptions allow for non-local transfer that can be used to great effect to eagerly and gracefully shut down a thread. 
-When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them. 
-The resumption exception, \code{channel_closed}, has a couple fields to aid in handling the exception. 
-The exception contains a pointer to the channel it was thrown from, and a pointer to an element. 
-In exceptions thrown from remove the element pointer will be null. 
-In the case of insert the element pointer points to the element that the thread attempted to insert. 
-This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler. 
-Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed. 
-Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception. 
-It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go. 
+When a channel in \CFA is closed, all subsequent calls to the channel will throw a resumption exception at the caller.
+If the resumption is handled, then the caller will proceed to attempt to complete their operation.
+If the resumption is not handled it is then rethrown as a termination exception.
+Or, if the resumption is handled, but the subsequent attempt at an operation would block, a termination exception is thrown.
+These termination exceptions allow for non-local transfer that can be used to great effect to eagerly and gracefully shut down a thread.
+When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them.
+The resumption exception, \code{channel_closed}, has a couple fields to aid in handling the exception.
+The exception contains a pointer to the channel it was thrown from, and a pointer to an element.
+In exceptions thrown from remove the element pointer will be null.
+In the case of insert the element pointer points to the element that the thread attempted to insert.
+This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler.
+Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed.
+Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception.
+It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go.
 This should not be an issue, since termination is rarely an fast-path of an application and ensuring that termination can be implemented correctly with ease is the aim of the exception approach.
 
-To highlight the differences between \CFA's and Go's close semantics, an example program is presented. 
-The program is a barrier implemented using two channels shown in Listings~\ref{l:cfa_chan_bar} and \ref{l:go_chan_bar}. 
-Both of these exaples are implmented using \CFA syntax so that they can be easily compared. 
-Listing~\ref{l:go_chan_bar} uses go-style channel close semantics and Listing~\ref{l:cfa_chan_bar} uses \CFA close semantics. 
-In this problem it is infeasible to use the Go \code{close} call since all tasks are both potentially producers and consumers, causing panics on close to be unavoidable. 
-As such in Listing~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of $-1$ has to be used to indicate to threads that they need to leave the barrier. 
-This sentinel value has to be checked at two points. 
-Furthermore, an additional flag \code{done} is needed to communicate to threads once they have left the barrier that they are done. 
-This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs. 
-In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate. 
-This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation. 
+To highlight the differences between \CFA's and Go's close semantics, an example program is presented.
+The program is a barrier implemented using two channels shown in Listings~\ref{l:cfa_chan_bar} and \ref{l:go_chan_bar}.
+Both of these exaples are implmented using \CFA syntax so that they can be easily compared.
+Listing~\ref{l:go_chan_bar} uses go-style channel close semantics and Listing~\ref{l:cfa_chan_bar} uses \CFA close semantics.
+In this problem it is infeasible to use the Go \code{close} call since all tasks are both potentially producers and consumers, causing panics on close to be unavoidable.
+As such in Listing~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of $-1$ has to be used to indicate to threads that they need to leave the barrier.
+This sentinel value has to be checked at two points.
+Furthermore, an additional flag \code{done} is needed to communicate to threads once they have left the barrier that they are done.
+This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs.
+In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate.
+This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation.
 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.
 
 \begin{cfa}[tabsize=3,caption={\CFA channel barrier termination},label={l:cfa_chan_bar}]
 struct barrier {
-    channel( int ) barWait;
-    channel( int ) entryWait;
-    int size;
+	channel( int ) barWait;
+	channel( int ) entryWait;
+	int size;
 }
 void ?{}(barrier & this, int size) with(this) {
-    barWait{size};
-    entryWait{size};
-    this.size = size;
-    for ( j; size )
-        insert( *entryWait, j );
+	barWait{size};
+	entryWait{size};
+	this.size = size;
+	for ( j; size )
+		insert( *entryWait, j );
 }
 
 void flush(barrier & this) with(this) {
-    close(barWait);
-    close(entryWait);
+	close(barWait);
+	close(entryWait);
 }
 void wait(barrier & this) with(this) {
-    int ticket = remove( *entryWait );
-    if ( ticket == size - 1 ) {
-        for ( j; size - 1 )
-            insert( *barWait, j );
-        return;
-    }
-    ticket = remove( *barWait );
-
-    // last one out
-    if ( size == 1 || ticket == size - 2 ) {
-        for ( j; size )
-            insert( *entryWait, j );
-    }
+	int ticket = remove( *entryWait );
+	if ( ticket == size - 1 ) {
+		for ( j; size - 1 )
+			insert( *barWait, j );
+		return;
+	}
+	ticket = remove( *barWait );
+
+	// last one out
+	if ( size == 1 || ticket == size - 2 ) {
+		for ( j; size )
+			insert( *entryWait, j );
+	}
 }
 barrier b{Tasks};
@@ -155,19 +155,19 @@
 // thread main
 void main(Task & this) {
-    try {
-        for ( ;; ) {
-            wait( b );
-        }
-    } catch ( channel_closed * e ) {}
+	try {
+		for ( ;; ) {
+			wait( b );
+		}
+	} catch ( channel_closed * e ) {}
 }
 
 int main() {
-    {
-        Task t[Tasks];
-
-        sleep(10`s);
-        flush( b );
-    } // wait for tasks to terminate
-    return 0;
+	{
+		Task t[Tasks];
+
+		sleep(10`s);
+		flush( b );
+	} // wait for tasks to terminate
+	return 0;
 }
 \end{cfa}
@@ -176,42 +176,42 @@
 
 struct barrier {
-    channel( int ) barWait;
-    channel( int ) entryWait;
-    int size;
+	channel( int ) barWait;
+	channel( int ) entryWait;
+	int size;
 }
 void ?{}(barrier & this, int size) with(this) {
-    barWait{size + 1};
-    entryWait{size + 1};
-    this.size = size;
-    for ( j; size )
-        insert( *entryWait, j );
+	barWait{size + 1};
+	entryWait{size + 1};
+	this.size = size;
+	for ( j; size )
+		insert( *entryWait, j );
 }
 
 void flush(barrier & this) with(this) {
-    insert( *entryWait, -1 );
-    insert( *barWait, -1 );
+	insert( *entryWait, -1 );
+	insert( *barWait, -1 );
 }
 void wait(barrier & this) with(this) {
-    int ticket = remove( *entryWait );
-    if ( ticket == -1 ) {
-        insert( *entryWait, -1 );
-        return;
-    }
-    if ( ticket == size - 1 ) {
-        for ( j; size - 1 )
-            insert( *barWait, j );
-        return;
-    }
-    ticket = remove( *barWait );
-    if ( ticket == -1 ) {
-        insert( *barWait, -1 );
-        return;
-    }
-
-    // last one out
-    if ( size == 1 || ticket == size - 2 ) {
-        for ( j; size )
-            insert( *entryWait, j );
-    }
+	int ticket = remove( *entryWait );
+	if ( ticket == -1 ) {
+		insert( *entryWait, -1 );
+		return;
+	}
+	if ( ticket == size - 1 ) {
+		for ( j; size - 1 )
+			insert( *barWait, j );
+		return;
+	}
+	ticket = remove( *barWait );
+	if ( ticket == -1 ) {
+		insert( *barWait, -1 );
+		return;
+	}
+
+	// last one out
+	if ( size == 1 || ticket == size - 2 ) {
+		for ( j; size )
+			insert( *entryWait, j );
+	}
 }
 barrier b;
@@ -220,26 +220,26 @@
 // thread main
 void main(Task & this) {
-    for ( ;; ) {
-        if ( done ) break;
-        wait( b );
-    }
+	for ( ;; ) {
+		if ( done ) break;
+		wait( b );
+	}
 }
 
 int main() {
-    {
-        Task t[Tasks];
-
-        sleep(10`s);
-        done = true;
-
-        flush( b );
-    } // wait for tasks to terminate
-    return 0;
+	{
+		Task t[Tasks];
+
+		sleep(10`s);
+		done = true;
+
+		flush( b );
+	} // wait for tasks to terminate
+	return 0;
 }
 \end{cfa}
 
-In Listing~\ref{l:cfa_resume} an example of channel closing with resumption is used. 
-This program uses resumption in the \code{Consumer} thread main to ensure that all elements in the channel are removed before the consumer thread terminates. 
-The producer only has a \code{catch} so the moment it receives an exception it terminates, whereas the consumer will continue to remove from the closed channel via handling resumptions until the buffer is empty, which then throws a termination exception. 
+In Listing~\ref{l:cfa_resume} an example of channel closing with resumption is used.
+This program uses resumption in the \code{Consumer} thread main to ensure that all elements in the channel are removed before the consumer thread terminates.
+The producer only has a \code{catch} so the moment it receives an exception it terminates, whereas the consumer will continue to remove from the closed channel via handling resumptions until the buffer is empty, which then throws a termination exception.
 If the same program was implemented in Go it would require explicit synchronization with both producers and consumers by some mechanism outside the channel to ensure that all elements were removed before task termination.
 
@@ -249,34 +249,34 @@
 // Consumer thread main
 void main(Consumer & this) {
-    size_t runs = 0;
-    try {
-        for ( ;; ) {
-            remove( chan );
-        }
-    } catchResume ( channel_closed * e ) {}
-    catch ( channel_closed * e ) {} 
+	size_t runs = 0;
+	try {
+		for ( ;; ) {
+			remove( chan );
+		}
+	} catchResume ( channel_closed * e ) {}
+	catch ( channel_closed * e ) {}
 }
 
 // Producer thread main
 void main(Producer & this) {
-    int j = 0;
-    try {
-        for ( ;;j++ ) {
-            insert( chan, j );
-        }
-    } catch ( channel_closed * e ) {} 
+	int j = 0;
+	try {
+		for ( ;;j++ ) {
+			insert( chan, j );
+		}
+	} catch ( channel_closed * e ) {}
 }
 
 int main( int argc, char * argv[] ) {
-    {
-        Consumers c[4];
-        Producer p[4];
-
-        sleep(10`s);
-
-        for ( i; Channels )
-            close( channels[i] );
-    }
-    return 0;
+	{
+		Consumers c[4];
+		Producer p[4];
+
+		sleep(10`s);
+
+		for ( i; Channels )
+			close( channels[i] );
+	}
+	return 0;
 }
 \end{cfa}
@@ -284,27 +284,29 @@
 \section{Performance}
 
-Given that the base implementation of the \CFA channels is very similar to the Go implementation, this section aims to show that the performance of the two implementations are comparable. 
-One microbenchmark is conducted to compare Go and \CFA. 
-The benchmark is a ten second experiment where producers and consumers operate on a channel in parallel and throughput is measured. 
-The number of cores is varied to measure how throughtput scales. 
-The cores are divided equally between producers and consumers, with one producer or consumer owning each core. 
-The results of the benchmark are shown in Figure~\ref{f:chanPerf}. 
-The performance of Go and \CFA channels on this microbenchmark is comparable. 
+Given that the base implementation of the \CFA channels is very similar to the Go implementation, this section aims to show that the performance of the two implementations are comparable.
+One microbenchmark is conducted to compare Go and \CFA.
+The benchmark is a ten second experiment where producers and consumers operate on a channel in parallel and throughput is measured.
+The number of cores is varied to measure how throughtput scales.
+The cores are divided equally between producers and consumers, with one producer or consumer owning each core.
+The results of the benchmark are shown in Figure~\ref{f:chanPerf}.
+The performance of Go and \CFA channels on this microbenchmark is comparable.
 Note, it is expected for the performance to decline as the number of cores increases as the channel operations all occur in a critical section so an increase in cores results in higher contention with no increase in parallelism.
 
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasus_Channel_Contention.pgf}}
-        \subcaption{AMD \CFA Channel Benchmark}\label{f:chanAMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pyke_Channel_Contention.pgf}}
-        \subcaption{Intel \CFA Channel Benchmark}\label{f:chanIntel}
-    \end{subfigure}
-    \caption{The channel contention benchmark comparing \CFA and Go channel throughput (higher is better).}
-    \label{f:chanPerf}
+	\centering
+	\subfloat[AMD \CFA Channel Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasus_Channel_Contention.pgf}}
+		\label{f:chanAMD}
+	}
+	\subfloat[Intel \CFA Channel Benchmark]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pyke_Channel_Contention.pgf}}
+		\label{f:chanIntel}
+	}
+	\caption{The channel contention benchmark comparing \CFA and Go channel throughput (higher is better).}
+	\label{f:chanPerf}
 \end{figure}
+
+% Local Variables: %
+% tab-width: 4 %
+% End: %
Index: doc/theses/colby_parsons_MMAth/text/mutex_stmt.tex
===================================================================
--- doc/theses/colby_parsons_MMAth/text/mutex_stmt.tex	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ doc/theses/colby_parsons_MMAth/text/mutex_stmt.tex	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -5,163 +5,415 @@
 % ======================================================================
 
-The mutex statement is a concurrent language feature that aims to support easy lock usage. 
-The mutex statement is in the form of a clause and following statement, similar to a loop or conditional statement. 
-In the clause the mutex statement accepts a number of lockable objects, and then locks them for the duration of the following statement. 
-The locks are acquired in a deadlock free manner and released using \gls{raii}. 
-The mutex statement provides an avenue for easy lock usage in the common case where locks are used to wrap a critical section. 
-Additionally, it provides the safety guarantee of deadlock-freedom, both by acquiring the locks in a deadlock-free manner, and by ensuring that the locks release on error, or normal program execution via \gls{raii}.
-
-\begin{cfa}[tabsize=3,caption={\CFA mutex statement usage},label={l:cfa_mutex_ex}]
+The mutual exclusion problem was introduced by Dijkstra in 1965~\cite{Dijkstra65,Dijkstra65a}.
+There are several concurrent processes or threads that communicate by shared variables and from time to time need exclusive access to shared resources.
+A shared resource and code manipulating it form a pairing called a \Newterm{critical section (CS)}, which is a many-to-one relationship;
+\eg if multiple files are being written to by multiple threads, only the pairings of simultaneous writes to the same files are CSs.
+Regions of code where the thread is not interested in the resource are combined into the \Newterm{non-critical section (NCS)}.
+
+Exclusive access to a resource is provided by \Newterm{mutual exclusion (MX)}.
+MX is implemented by some form of \emph{lock}, where the CS is bracketed by lock procedures @acquire@ and @release@.
+Threads execute a loop of the form:
+\begin{cfa}
+loop of $thread$ p:
+	NCS;
+	acquire( lock );  CS;  release( lock ); // protected critical section with MX
+end loop.
+\end{cfa}
+MX guarantees there is never more than one thread in the CS.
+MX must also guarantee eventual progress: when there are competing threads attempting access, eventually some competing thread succeeds, \ie acquires the CS, releases it, and returns to the NCS.
+% Lamport \cite[p.~329]{Lam86mx} extends this requirement to the exit protocol.
+A stronger constraint is that every thread that calls @acquire@ eventually succeeds after some reasonable bounded time.
+
+\section{Monitor}
+\CFA provides a high-level locking object, called a \Newterm{monitor}, an elegant, efficient, high-level mechanisms for mutual exclusion and synchronization for shared-memory systems.
+First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, several concurrent programming languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, \uC~\cite{Buhr92a} and Java~\cite{Java}.
+In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as mutex locks or semaphores to manually implement a monitor.
+
+Figure~\ref{f:AtomicCounter} shows a \CFA and Java monitor implementing an atomic counter.
+A \Newterm{monitor} is a programming technique that implicitly binds mutual exclusion to static function scope by call and return.
+Lock mutual exclusion, defined by acquire/release calls, is independent of lexical context (analogous to block versus heap storage allocation).
+Restricting acquire and release points in a monitor eases programming, comprehension, and maintenance, at a slight cost in flexibility and efficiency.
+Ultimately, a monitor is implemented using a combination of basic locks and atomic instructions.
+
+\begin{figure}
+\centering
+
+\begin{lrbox}{\myboxA}
+\begin{cfa}[aboveskip=0pt,belowskip=0pt]
+@monitor@ Aint {
+	int cnt;
+};
+int ++?( Aint & @mutex@ m ) { return ++m.cnt; }
+int ?=?( Aint & @mutex@ l, int r ) { l.cnt = r; }
+int ?=?(int & l, Aint & r) { l = r.cnt; }
+
+int i = 0, j = 0;
+Aint x = { 0 }, y = { 0 };	$\C[1.5in]{// no mutex}$
+++x;  ++y;			$\C{// mutex}$
+x = 2;  y = i;		$\C{// mutex}$
+i = x;  j = y;		$\C{// no mutex}\CRT$
+\end{cfa}
+\end{lrbox}
+
+\begin{lrbox}{\myboxB}
+\begin{java}[aboveskip=0pt,belowskip=0pt]
+class Aint {
+	private int cnt;
+	public Aint( int init ) { cnt = init; }
+	@synchronized@ public int inc() { return ++cnt; }
+	@synchronized@ public void set( int r ) {cnt = r;}
+	public int get() { return cnt; }
+}
+int i = 0, j = 0;
+Aint x = new Aint( 0 ), y = new Aint( 0 );
+x.inc();  y.inc();
+x.set( 2 );  y.set( i );
+i = x.get();  j = y.get();
+\end{java}
+\end{lrbox}
+
+\subfloat[\CFA]{\label{f:AtomicCounterCFA}\usebox\myboxA}
+\hspace*{3pt}
+\vrule
+\hspace*{3pt}
+\subfloat[Java]{\label{f:AtomicCounterJava}\usebox\myboxB}
+\caption{Atomic integer counter}
+\label{f:AtomicCounter}
+\end{figure}
+
+Like Java, \CFA monitors have \Newterm{multi-acquire} semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling other MX functions.
+For robustness, \CFA monitors ensure the monitor lock is released regardless of how an acquiring function ends, normal or exceptional, and returning a shared variable is safe via copying before the lock is released.
+Monitor objects can be passed through multiple helper functions without acquiring mutual exclusion, until a designated function associated with the object is called.
+\CFA functions are designated MX by one or more pointer/reference parameters having qualifier @mutex@.
+Java members are designated MX with \lstinline[language=java]{synchronized}, which applies only to the implicit receiver parameter.
+In the example, the increment and setter operations need mutual exclusion, while the read-only getter operation is not MX because reading an integer is atomic.
+
+As stated, the non-object-oriented nature of \CFA monitors allows a function to acquire multiple mutex objects.
+For example, the bank-transfer problem requires locking two bank accounts to safely debit and credit money between accounts.
+\begin{cfa}
+monitor BankAccount {
+	int balance;
+};
+void deposit( BankAccount & mutex b, int deposit ) with( b ) {
+	balance += deposit;
+}
+void transfer( BankAccount & mutex my, BankAccount & mutex your, int me2you ) {
+	deposit( my, -me2you );		$\C{// debit}$
+	deposit( your, me2you );	$\C{// credit}$
+}
+\end{cfa}
+The \CFA monitor implementation ensures multi-lock acquisition is done in a deadlock-free manner regardless of the number of MX parameters and monitor arguments.
+
+
+\section{\lstinline{mutex} statement}
+Restricting implicit lock acquisition to function entry and exit can be awkward for certain problems.
+To increase locking flexibility, some languages introduce a mutex statement.
+\VRef[Figure]{f:ReadersWriter} shows the outline of a reader/writer lock written as a \CFA monitor and mutex statements.
+(The exact lock implement is irrelevant.)
+The @read@ and @write@ functions are called with a reader/write lock and any arguments to perform reading or writing.
+The @read@ function is not MX because multiple readers can read simultaneously.
+MX is acquired within @read@ by calling the (nested) helper functions @StartRead@ and @EndRead@ or executing the mutex statements.
+Between the calls or statements, reads can execute simultaneous within the body of @read@.
+The @write@ function does not require refactoring because writing is a CS.
+The mutex-statement version is better because it has fewer names, less argument/parameter passing, and can possibly hold MX for a shorter duration.
+
+\begin{figure}
+\centering
+
+\begin{lrbox}{\myboxA}
+\begin{cfa}[aboveskip=0pt,belowskip=0pt]
+monitor RWlock { ... };
+void read( RWlock & rw, ... ) {
+	void StartRead( RWlock & @mutex@ rw ) { ... }
+	void EndRead( RWlock & @mutex@ rw ) { ... }
+	StartRead( rw );
+	... // read without MX
+	EndRead( rw );
+}
+void write( RWlock & @mutex@ rw, ... ) {
+	... // write with MX
+}
+\end{cfa}
+\end{lrbox}
+
+\begin{lrbox}{\myboxB}
+\begin{cfa}[aboveskip=0pt,belowskip=0pt]
+
+void read( RWlock & rw, ... ) {
+
+
+	@mutex@( rw ) { ... }
+	... // read without MX
+	@mutex@{ rw ) { ... }
+}
+void write( RWlock & @mutex@ rw, ... ) {
+	... // write with MX
+}
+\end{cfa}
+\end{lrbox}
+
+\subfloat[monitor]{\label{f:RWmonitor}\usebox\myboxA}
+\hspace*{3pt}
+\vrule
+\hspace*{3pt}
+\subfloat[mutex statement]{\label{f:RWmutexstmt}\usebox\myboxB}
+\caption{Readers writer problem}
+\label{f:ReadersWriter}
+\end{figure}
+
+This work adds a mutex statement to \CFA, but generalizes it beyond implicit monitor locks.
+In detail, the mutex statement has a clause and statement block, similar to a conditional or loop statement.
+The clause accepts any number of lockable objects (like a \CFA MX function prototype), and locks them for the duration of the statement.
+The locks are acquired in a deadlock free manner and released regardless of how control-flow exits the statement.
+The mutex statement provides easy lock usage in the common case of lexically wrapping a CS.
+Examples of \CFA mutex statement are shown in \VRef[Listing]{l:cfa_mutex_ex}.
+
+\begin{cfa}[caption={\CFA mutex statement usage},label={l:cfa_mutex_ex}]
 owner_lock lock1, lock2, lock3;
-int count = 0; 
-mutex( lock1, lock2, lock3 ) {
-    // can use block statement
-    // ...
-}
-mutex( lock2, lock3 ) count++; // or inline statement
+@mutex@( lock2, lock3 ) ...;    $\C{// inline statement}$
+@mutex@( lock1, lock2, lock3 ) { ... }  $\C{// statement block}$
+void transfer( BankAccount & my, BankAccount & your, int me2you ) {
+	... // check values, no MX
+	@mutex@( my, your ) { // MX is shorter duration that function body
+		deposit( my, -me2you );  $\C{// debit}$
+		deposit( your, me2you ); $\C{// credit}$
+	}
+}
 \end{cfa}
 
 \section{Other Languages}
-There are similar concepts to the mutex statement that exist in other languages. 
-Java has a feature called a synchronized statement, which looks identical to \CFA's mutex statement, but it has some differences. 
-The synchronized statement only accepts a single object in its clause. 
-Any object can be passed to the synchronized statement in Java since all objects in Java are monitors, and the synchronized statement acquires that object's monitor. 
-In \CC there is a feature in the standard library \code{<mutex>} header called scoped\_lock, which is also similar to the mutex statement. 
-The scoped\_lock is a class that takes in any number of locks in its constructor, and acquires them in a deadlock-free manner. 
-It then releases them when the scoped\_lock object is deallocated, thus using \gls{raii}. 
-An example of \CC scoped\_lock usage is shown in Listing~\ref{l:cc_scoped_lock}.
-
-\begin{cfa}[tabsize=3,caption={\CC scoped\_lock usage},label={l:cc_scoped_lock}]
-std::mutex lock1, lock2, lock3;
-{
-    scoped_lock s( lock1, lock2, lock3 )
-    // locks are released via raii at end of scope
-}
+There are similar constructs to the mutex statement in other programming languages.
+Java has a feature called a synchronized statement, which looks like the \CFA's mutex statement, but only accepts a single object in the clause and only handles monitor locks.
+The \CC standard library has a @scoped_lock@, which is also similar to the mutex statement.
+The @scoped_lock@ takes any number of locks in its constructor, and acquires them in a deadlock-free manner.
+It then releases them when the @scoped_lock@ object is deallocated using \gls{raii}.
+An example of \CC @scoped_lock@ is shown in \VRef[Listing]{l:cc_scoped_lock}.
+
+\begin{cfa}[caption={\CC \lstinline{scoped_lock} usage},label={l:cc_scoped_lock}]
+struct BankAccount {
+	@recursive_mutex m;@		$\C{// must be recursive}$
+	int balance = 0;
+};
+void deposit( BankAccount & b, int deposit ) {
+	@scoped_lock lock( b.m );@	$\C{// RAII acquire}$
+	b.balance += deposit;
+}								$\C{// RAII release}$
+void transfer( BankAccount & my, BankAccount & your, int me2you ) {
+	@scoped_lock lock( my.m, your.m );@	$\C{// RAII acquire}$
+	deposit( my, -me2you );		$\C{// debit}$
+	deposit( your, me2you );	$\C{// credit}$
+}								$\C{// RAII release}$
 \end{cfa}
 
 \section{\CFA implementation}
-The \CFA mutex statement takes some ideas from both the Java and \CC features. 
-The mutex statement can acquire more that one lock in a deadlock-free manner, and releases them via \gls{raii} like \CC, however the syntax is identical to the Java synchronized statement. 
-This syntactic choice was made so that the body of the mutex statement is its own scope. 
-Compared to the scoped\_lock, which relies on its enclosing scope, the mutex statement's introduced scope can provide visual clarity as to what code is being protected by the mutex statement, and where the mutual exclusion ends. 
-\CFA's mutex statement and \CC's scoped\_lock both use parametric polymorphism to allow user defined types to work with the feature. 
-\CFA's implementation requires types to support the routines \code{lock()} and \code{unlock()}, whereas \CC requires those routines, plus \code{try_lock()}. 
-The scoped\_lock requires an additional routine since it differs from the mutex statement in how it implements deadlock avoidance.
-
-The parametric polymorphism allows for locking to be defined for types that may want convenient mutual exclusion. 
-An example of one such use case in \CFA is \code{sout}. 
-The output stream in \CFA is called \code{sout}, and functions similarly to \CC's \code{cout}. 
-\code{sout} has routines that satisfy the mutex statement trait, so the mutex statement can be used to lock the output stream while producing output. 
-In this case, the mutex statement allows the programmer to acquire mutual exclusion over an object without having to know the internals of the object or what locks need to be acquired. 
-The ability to do so provides both improves safety and programmer productivity since it abstracts away the concurrent details and provides an interface for optional thread-safety. 
-This is a commonly used feature when producing output from a concurrent context, since producing output is not thread safe by default. 
-This use case is shown in Listing~\ref{l:sout}.
-
-\begin{cfa}[tabsize=3,caption={\CFA sout with mutex statement},label={l:sout}]
-mutex( sout )
-    sout | "This output is protected by mutual exclusion!"; 
-\end{cfa}
-
-\section{Deadlock Avoidance}
-The mutex statement uses the deadlock prevention technique of lock ordering, where the circular-wait condition of a deadlock cannot occur if all locks are acquired in the same order. 
-The scoped\_lock uses a deadlock avoidance algorithm where all locks after the first are acquired using \code{try_lock} and if any of the attempts to lock fails, all locks so far are released. 
-This repeats until all locks are acquired successfully. 
-The deadlock avoidance algorithm used by scoped\_lock is shown in Listing~\ref{l:cc_deadlock_avoid}. 
-The algorithm presented is taken directly from the source code of the \code{<mutex>} header, with some renaming and comments for clarity.
-
-\begin{cfa}[caption={\CC scoped\_lock deadlock avoidance algorithm},label={l:cc_deadlock_avoid}]
+The \CFA mutex statement takes some ideas from both the Java and \CC features.
+Like Java, \CFA introduces a new statement rather than building from existing language features.
+(\CFA has sufficient language features to mimic \CC RAII locking.)
+This syntactic choice makes MX explicit rather than implicit via object declarations.
+Hence, it is easier for programmers and language tools to identify MX points in a program, \eg scan for all @mutex@ parameters and statements in a body of code.
+Furthermore, concurrent safety is provided across an entire program for the complex operation of acquiring multiple locks in a deadlock-free manner.
+Unlike Java, \CFA's mutex statement and \CC's @scoped_lock@ both use parametric polymorphism to allow user defined types to work with this feature.
+In this case, the polymorphism allows a locking mechanism to acquire MX over an object without having to know the object internals or what kind of lock it is using.
+\CFA's provides and uses this locking trait:
+\begin{cfa}
+forall( L & | sized(L) )
+trait is_lock {
+	void lock( L & );
+	void unlock( L & );
+};
+\end{cfa}
+\CC @scoped_lock@ has this trait implicitly based on functions accessed in a template.
+@scoped_lock@ also requires @try_lock@ because of its technique for deadlock avoidance \see{\VRef{s:DeadlockAvoidance}}.
+
+The following shows how the @mutex@ statement is used with \CFA streams to eliminate unpredictable results when printing in a concurrent program.
+For example, if two threads execute:
+\begin{cfa}
+thread$\(_1\)$ : sout | "abc" | "def";
+thread$\(_2\)$ : sout | "uvw" | "xyz";
+\end{cfa}
+any of the outputs can appear, included a segment fault due to I/O buffer corruption:
+\begin{cquote}
+\small\tt
+\begin{tabular}{@{}l|l|l|l|l@{}}
+abc def & abc uvw xyz & uvw abc xyz def & abuvwc dexf &  uvw abc def \\
+uvw xyz & def & & yz & xyz
+\end{tabular}
+\end{cquote}
+The stream type for @sout@ is defined to satisfy the @is_lock@ trait, so the @mutex@ statement can be used to lock an output stream while producing output.
+From the programmer's perspective, it is sufficient to know an object can be locked and then any necessary MX is easily available via the @mutex@ statement.
+This ability improves safety and programmer productivity since it abstracts away the concurrent details.
+Hence, a  programmer can easily protect cascaded I/O expressions:
+\begin{cfa}
+thread$\(_1\)$ : mutex( sout )  sout | "abc" | "def";
+thread$\(_2\)$ : mutex( sout )  sout | "uvw" | "xyz";
+\end{cfa}
+constraining the output to two different lines in either order:
+\begin{cquote}
+\small\tt
+\begin{tabular}{@{}l|l@{}}
+abc def & uvw xyz \\
+uvw xyz & abc def
+\end{tabular}
+\end{cquote}
+where this level of safe nondeterministic output is acceptable.
+Alternatively, multiple I/O statements can be protected using the mutex statement block:
+\begin{cfa}
+mutex( sout ) {	// acquire stream lock for sout for block duration
+	sout | "abc";
+	mutex( sout ) sout | "uvw" | "xyz"; // OK because sout lock is recursive
+	sout | "def";
+} // implicitly release sout lock
+\end{cfa}
+The inner lock acquire is likely to occur through a function call that does a thread-safe print.
+
+\section{Deadlock Avoidance}\label{s:DeadlockAvoidance}
+The mutex statement uses the deadlock avoidance technique of lock ordering, where the circular-wait condition of a deadlock cannot occur if all locks are acquired in the same order.
+The @scoped_lock@ uses a deadlock avoidance algorithm where all locks after the first are acquired using @try_lock@ and if any of the lock attempts fail, all acquired locks are released.
+This repeats after selecting a new starting point in a cyclic manner until all locks are acquired successfully.
+This deadlock avoidance algorithm is shown in Listing~\ref{l:cc_deadlock_avoid}.
+The algorithm is taken directly from the source code of the @<mutex>@ header, with some renaming and comments for clarity.
+
+\begin{cfa}[caption={\CC \lstinline{scoped_lock} deadlock avoidance algorithm},label={l:cc_deadlock_avoid}]
 int first = 0;  // first lock to attempt to lock
 do {
-    // locks is the array of locks to acquire
-    locks[first].lock();    // lock first lock
-    for (int i = 1; i < Num_Locks; ++i) {   // iterate over rest of locks
-        const int idx = (first + i) % Num_Locks;
-        if (!locks[idx].try_lock()) {       // try lock each one
-            for (int j = i; j != 0; --j)    // release all locks
-                locks[(first + j - 1) % Num_Locks].unlock();
-            first = idx;    // rotate which lock to acquire first
-            break;
-        }
-    }
+	// locks is the array of locks to acquire
+	locks[first].lock();				$\C{// lock first lock}$
+	for ( int i = 1; i < Num_Locks; i += 1 ) { $\C{// iterate over rest of locks}$
+		const int idx = (first + i) % Num_Locks;
+		if ( ! locks[idx].try_lock() ) {   $\C{// try lock each one}$
+			for ( int j = i; j != 0; j -= 1 )	$\C{// release all locks}$
+				locks[(first + j - 1) % Num_Locks].unlock();
+			first = idx;				$\C{// rotate which lock to acquire first}$
+			break;
+		}
+	}
 // if first lock is still held then all have been acquired
-} while (!locks[first].owns_lock());  // is first lock held?
-\end{cfa}
-
-The algorithm in \ref{l:cc_deadlock_avoid} successfully avoids deadlock, however there is a potential livelock scenario. 
-Given two threads $A$ and $B$, who create a scoped\_lock with two locks $L1$ and $L2$, a livelock can form as follows. 
-Thread $A$ creates a scoped\_lock with $L1$, $L2$, and $B$ creates a scoped lock with the order $L2$, $L1$. 
-Both threads acquire the first lock in their order and then fail the try\_lock since the other lock is held. 
-They then reset their start lock to be their 2nd lock and try again. 
-This time $A$ has order $L2$, $L1$, and $B$ has order $L1$, $L2$. 
-This is identical to the starting setup, but with the ordering swapped among threads. 
-As such, if they each acquire their first lock before the other acquires their second, they can livelock indefinitely.
-
-The lock ordering algorithm used in the mutex statement in \CFA is both deadlock and livelock free. 
-It sorts the locks based on memory address and then acquires them. 
-For locks fewer than 7, it sorts using hard coded sorting methods that perform the minimum number of swaps for a given number of locks. 
-For 7 or more locks insertion sort is used. 
-These sorting algorithms were chosen since it is rare to have to hold more than  a handful of locks at a time. 
-It is worth mentioning that the downside to the sorting approach is that it is not fully compatible with usages of the same locks outside the mutex statement. 
-If more than one lock is held by a mutex statement, if more than one lock is to be held elsewhere, it must be acquired via the mutex statement, or else the required ordering will not occur. 
-Comparitively, if the scoped\_lock is used and the same locks are acquired elsewhere, there is no concern of the scoped\_lock deadlocking, due to its avoidance scheme, but it may livelock.
+} while ( ! locks[first].owns_lock() );  $\C{// is first lock held?}$
+\end{cfa}
+
+While the algorithm in \ref{l:cc_deadlock_avoid} successfully avoids deadlock, there is a livelock scenario.
+Assume two threads, $A$ and $B$, create a @scoped_lock@ accessing two locks, $L1$ and $L2$.
+A livelock can form as follows.
+Thread $A$ creates a @scoped_lock@ with arguments $L1$, $L2$, and $B$ creates a scoped lock with the lock arguments in the opposite order $L2$, $L1$.
+Both threads acquire the first lock in their order and then fail the @try_lock@ since the other lock is held.
+Both threads then reset their starting lock to be their second lock and try again.
+This time $A$ has order $L2$, $L1$, and $B$ has order $L1$, $L2$, which is identical to the starting setup but with the ordering swapped between threads.
+If the threads perform this action in lock-step, they cycle indefinitely without entering the CS, \ie livelock.
+Hence, to use @scoped_lock@ safely, a programmer must manually construct and maintain a global ordering of lock arguments passed to @scoped_lock@.
+
+The lock ordering algorithm used in \CFA mutex functions and statements is deadlock and livelock free.
+The algorithm uses the lock memory addresses as keys, sorts the keys, and then acquires the locks in sorted order.
+For fewer than 7 locks ($2^3-1$), the sort is unrolled performing the minimum number of compare and swaps for the given number of locks;
+for 7 or more locks, insertion sort is used.
+Since it is extremely rare to hold more than 6 locks at a time, the algorithm is fast and executes in $O(1)$ time.
+Furthermore, lock addresses are unique across program execution, even for dynamically allocated locks, so the algorithm is safe across the entire program execution.
+
+The downside to the sorting approach is that it is not fully compatible with manual usages of the same locks outside the @mutex@ statement, \ie the lock are acquired without using the @mutex@ statement.
+The following scenario is a classic deadlock.
+\begin{cquote}
+\begin{tabular}{@{}l@{\hspace{30pt}}l@{}}
+\begin{cfa}
+lock L1, L2; // assume &L1 < &L2
+        $\textbf{thread\(_1\)}$
+acquire( L2 );
+	acquire( L1 );
+		CS
+	release( L1 );
+release( L2 );
+\end{cfa}
+&
+\begin{cfa}
+
+        $\textbf{thread\(_2\)}$
+mutex( L1, L2 ) {
+
+	CS
+
+}
+\end{cfa}
+\end{tabular}
+\end{cquote}
+Comparatively, if the @scoped_lock@ is used and the same locks are acquired elsewhere, there is no concern of the @scoped_lock@ deadlocking, due to its avoidance scheme, but it may livelock.
+The convenience and safety of the @mutex@ statement, \eg guaranteed lock release with exceptions, should encourage programmers to always use it for locking, mitigating any deadlock scenario.
+
+\section{Performance}
+Given the two multi-acquisition algorithms in \CC and \CFA, each with differing advantages and disadvantages, it interesting to compare their performance.
+Comparison with Java is not possible, since it only takes a single lock.
+
+The comparison starts with a baseline that acquires the locks directly without a mutex statement or @scoped_lock@ in a fixed ordering and then releases them.
+The baseline helps highlight the cost of the deadlock avoidance/prevention algorithms for each implementation.
+
+The benchmark used to evaluate the avoidance algorithms repeatedly acquires a fixed number of locks in a random order and then releases them.
+The pseudo code for the deadlock avoidance benchmark is shown in \VRef[Listing]{l:deadlock_avoid_pseudo}.
+To ensure the comparison exercises the implementation of each lock avoidance algorithm, an identical spinlock is implemented in each language using a set of builtin atomics available in both \CC and \CFA.
+The benchmarks are run for a fixed duration of 10 seconds and then terminate.
+The total number of times the group of locks is acquired is returned for each thread.
+Each variation is run 11 times on 2, 4, 8, 16, 24, 32 cores and with 2, 4, and 8 locks being acquired.
+The median is calculated and is plotted alongside the 95\% confidence intervals for each point.
+
+\begin{cfa}[caption={Deadlock avoidance bendchmark pseudo code},label={l:deadlock_avoid_pseudo}]
+
+
+
+$\PAB{// add pseudo code}$
+
+
+
+\end{cfa}
+
+The performance experiments were run on the following multi-core hardware systems to determine differences across platforms:
+\begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=5pt\parsep=5pt\itemsep=0pt}
+% sudo dmidecode -t system
+\item
+Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units) 2.0 GHz, TSO memory model, running Linux v5.8.0--55--generic, gcc--10 compiler
+\item
+Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets (48 processing units) 2.2GHz, TSO memory model, running Linux v5.8.0--59--generic, gcc--10 compiler
+\end{list}
+%The hardware architectures are different in threading (multithreading vs hyper), cache structure (MESI or MESIF), NUMA layout (QPI vs HyperTransport), memory model (TSO vs WO), and energy/thermal mechanisms (turbo-boost).
+%Software that runs well on one architecture may run poorly or not at all on another.
+
+Figure~\ref{f:mutex_bench} shows the results of the benchmark experiments.
+\PAB{Make the points in the graphs for each line different.
+Also, make the text in the graphs larger.}
+The baseline results for both languages are mostly comparable, except for the 8 locks results in \ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel}, where the \CFA baseline is slightly slower.
+The avoidance result for both languages is significantly different, where \CFA's mutex statement achieves throughput that is magnitudes higher than \CC's @scoped_lock@.
+The slowdown for @scoped_lock@ is likely due to its deadlock-avoidance implementation.
+Since it uses a retry based mechanism, it can take a long time for threads to progress.
+Additionally the potential for livelock in the algorithm can result in very little throughput under high contention.
+For example, on the AMD machine with 32 threads and 8 locks, the benchmarks would occasionally livelock indefinitely, with no threads making any progress for 3 hours before the experiment was terminated manually.
+It is likely that shorter bouts of livelock occurred in many of the experiments, which would explain large confidence intervals for some of the data points in the \CC data.
+In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} the mutex statement performs better than the baseline.
+At 7 locks and above the mutex statement switches from a hard coded sort to insertion sort.
+It is likely that the improvement in throughput compared to baseline is due to the time spent in the insertion sort, which decreases contention on the locks.
 
 \begin{figure}
-    \centering
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_2.pgf}}
-        \subcaption{AMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_2.pgf}}
-        \subcaption{Intel}
-    \end{subfigure}
-
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_4.pgf}}
-        \subcaption{AMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_4.pgf}}
-        \subcaption{Intel}
-    \end{subfigure}
-
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_8.pgf}}
-        \subcaption{AMD}\label{f:mutex_bench8_AMD}
-    \end{subfigure}\hfill
-    \begin{subfigure}{0.5\textwidth}
-        \centering
-        \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_8.pgf}}
-        \subcaption{Intel}\label{f:mutex_bench8_Intel}
-    \end{subfigure}
-    \caption{The aggregate lock benchmark comparing \CC scoped\_lock and \CFA mutex statement throughput (higher is better).}
-    \label{f:mutex_bench}
+	\centering
+	\subfloat[AMD]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_2.pgf}}
+	}
+	\subfloat[Intel]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_2.pgf}}
+	}
+
+	\subfloat[AMD]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_4.pgf}}
+	}
+	\subfloat[Intel]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_4.pgf}}
+	}
+
+	\subfloat[AMD]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_8.pgf}}
+		\label{f:mutex_bench8_AMD}
+	}
+	\subfloat[Intel]{
+		\resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_8.pgf}}
+		\label{f:mutex_bench8_Intel}
+	}
+	\caption{The aggregate lock benchmark comparing \CC \lstinline{scoped_lock} and \CFA mutex statement throughput (higher is better).}
+	\label{f:mutex_bench}
 \end{figure}
 
-\section{Performance}
-Performance is compared between \CC's scoped\_lock and \CFA's mutex statement. 
-Comparison with Java is omitted, since it only takes a single lock. 
-To ensure that the comparison between \CC and \CFA exercises the implementation of each feature, an identical spinlock is implemented in each language using a set of builtin atomics available in both \CFA and \CC. 
-Each feature is evaluated on a benchmark which acquires a fixed number of locks in a random order and then releases them. 
-A baseline is included that acquires the locks directly without a mutex statement or scoped\_lock in a fixed ordering and then releases them. 
-The baseline helps highlight the cost of the deadlock avoidance/prevention algorithms for each implementation. 
-The benchmarks are run for a fixed duration of 10 seconds and then terminate and return the total number of times the group of locks were acquired. 
-Each variation is run 11 times on a variety up to 32 cores and with 2, 4, and 8 locks being acquired. 
-The median is calculated and is plotted alongside the 95\% confidence intervals for each point.
-
-Figure~\ref{f:mutex_bench} shows the results of the benchmark. 
-The baseline runs for both languages are mostly comparable, except for the 8 locks results in \ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel}, where the \CFA baseline is slower. 
-\CFA's mutex statement achieves throughput that is magnitudes higher than \CC's scoped\_lock. 
-This is likely due to the scoped\_lock deadlock avoidance implementation. 
-Since it uses a retry based mechanism, it can take a long time for threads to progress. 
-Additionally the potential for livelock in the algorithm can result in very little throughput under high contention. 
-It was observed on the AMD machine that with 32 threads and 8 locks the benchmarks would occasionally livelock indefinitely, with no threads making any progress for 3 hours before the experiment was terminated manually. 
-It is likely that shorter bouts of livelock occured in many of the experiments, which would explain large confidence intervals for some of the data points in the \CC data. 
-In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} the mutex statement performs better than the baseline. 
-At 7 locks and above the mutex statement switches from a hard coded sort to insertion sort. 
-It is likely that the improvement in throughput compared to baseline is due to the time spent in the insertion sort, which decreases contention on the locks.
+% Local Variables: %
+% tab-width: 4 %
+% End: %
Index: doc/theses/colby_parsons_MMAth/thesis.tex
===================================================================
--- doc/theses/colby_parsons_MMAth/thesis.tex	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ doc/theses/colby_parsons_MMAth/thesis.tex	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -84,9 +84,10 @@
 \usepackage{tikz} % for diagrams and figures
 \def\checkmark{\tikz\fill[scale=0.4](0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle;}
-\usepackage{subcaption}
 \usepackage{fullpage,times,comment}
 \usepackage{textcomp}
 \usepackage{graphicx}
 \usepackage{tabularx}
+\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt,font=normalsize]{subfig}
+\renewcommand\thesubfigure{(\alph{subfigure})}
 \input{style}
 
Index: src/AST/Convert.cpp
===================================================================
--- src/AST/Convert.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Convert.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -559,5 +559,5 @@
 		auto stmt = new SuspendStmt();
 		stmt->then   = get<CompoundStmt>().accept1( node->then   );
-		switch(node->type) {
+		switch (node->kind) {
 			case ast::SuspendStmt::None     : stmt->type = SuspendStmt::None     ; break;
 			case ast::SuspendStmt::Coroutine: stmt->type = SuspendStmt::Coroutine; break;
@@ -1683,5 +1683,5 @@
 			GET_ACCEPT_V(attributes, Attribute),
 			{ old->get_funcSpec().val },
-			old->type->isVarArgs
+			(old->type->isVarArgs) ? ast::VariableArgs : ast::FixedArgs
 		};
 
@@ -1989,5 +1989,5 @@
 			GET_ACCEPT_1(else_, Stmt),
 			GET_ACCEPT_V(initialization, Stmt),
-			old->isDoWhile,
+			(old->isDoWhile) ? ast::DoWhile : ast::While,
 			GET_LABELS_V(old->labels)
 		);
@@ -2131,5 +2131,5 @@
 	virtual void visit( const SuspendStmt * old ) override final {
 		if ( inCache( old ) ) return;
-		ast::SuspendStmt::Type type;
+		ast::SuspendStmt::Kind type;
 		switch (old->type) {
 			case SuspendStmt::Coroutine: type = ast::SuspendStmt::Coroutine; break;
Index: src/AST/Decl.cpp
===================================================================
--- src/AST/Decl.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Decl.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -57,9 +57,9 @@
 	std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns,
 	CompoundStmt * stmts, Storage::Classes storage, Linkage::Spec linkage,
-	std::vector<ptr<Attribute>>&& attrs, Function::Specs fs, bool isVarArgs)
+	std::vector<ptr<Attribute>>&& attrs, Function::Specs fs, ArgumentFlag isVarArgs )
 : DeclWithType( loc, name, storage, linkage, std::move(attrs), fs ),
 	type_params(std::move(forall)), assertions(),
 	params(std::move(params)), returns(std::move(returns)), stmts( stmts ) {
-	FunctionType * ftype = new FunctionType(static_cast<ArgumentFlag>(isVarArgs));
+	FunctionType * ftype = new FunctionType( isVarArgs );
 	for (auto & param : this->params) {
 		ftype->params.emplace_back(param->get_type());
@@ -81,10 +81,10 @@
 	std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns,
 	CompoundStmt * stmts, Storage::Classes storage, Linkage::Spec linkage,
-	std::vector<ptr<Attribute>>&& attrs, Function::Specs fs, bool isVarArgs)
+	std::vector<ptr<Attribute>>&& attrs, Function::Specs fs, ArgumentFlag isVarArgs )
 : DeclWithType( location, name, storage, linkage, std::move(attrs), fs ),
 		type_params( std::move( forall) ), assertions( std::move( assertions ) ),
 		params( std::move(params) ), returns( std::move(returns) ),
 		type( nullptr ), stmts( stmts ) {
-	FunctionType * type = new FunctionType( (isVarArgs) ? VariableArgs : FixedArgs );
+	FunctionType * type = new FunctionType( isVarArgs );
 	for ( auto & param : this->params ) {
 		type->params.emplace_back( param->get_type() );
Index: src/AST/Decl.hpp
===================================================================
--- src/AST/Decl.hpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Decl.hpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -10,6 +10,6 @@
 // Created On       : Thu May 9 10:00:00 2019
 // Last Modified By : Andrew Beach
-// Last Modified On : Thu Nov 24  9:44:00 2022
-// Update Count     : 34
+// Last Modified On : Wed Apr  5 10:42:00 2023
+// Update Count     : 35
 //
 
@@ -122,4 +122,7 @@
 };
 
+/// Function variable arguments flag
+enum ArgumentFlag { FixedArgs, VariableArgs };
+
 /// Object declaration `int foo()`
 class FunctionDecl : public DeclWithType {
@@ -144,5 +147,5 @@
 		std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns,
 		CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::Cforall,
-		std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, bool isVarArgs = false);
+		std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, ArgumentFlag isVarArgs = FixedArgs );
 
 	FunctionDecl( const CodeLocation & location, const std::string & name,
@@ -150,5 +153,5 @@
 		std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns,
 		CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::Cforall,
-		std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, bool isVarArgs = false);
+		std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, ArgumentFlag isVarArgs = FixedArgs );
 
 	const Type * get_type() const override;
Index: src/AST/Pass.impl.hpp
===================================================================
--- src/AST/Pass.impl.hpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Pass.impl.hpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -2042,5 +2042,4 @@
 	if ( __visit_children() ) {
 		maybe_accept( node, &TupleType::types );
-		maybe_accept( node, &TupleType::members );
 	}
 
Index: src/AST/Print.cpp
===================================================================
--- src/AST/Print.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Print.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -739,8 +739,8 @@
 	virtual const ast::Stmt * visit( const ast::SuspendStmt * node ) override final {
 		os << "Suspend Statement";
-		switch (node->type) {
-			case ast::SuspendStmt::None     : os << " with implicit target"; break;
-			case ast::SuspendStmt::Generator: os << " for generator"; break;
-			case ast::SuspendStmt::Coroutine: os << " for coroutine"; break;
+		switch (node->kind) {
+		case ast::SuspendStmt::None     : os << " with implicit target"; break;
+		case ast::SuspendStmt::Generator: os << " for generator"; break;
+		case ast::SuspendStmt::Coroutine: os << " for coroutine"; break;
 		}
 		os << endl;
Index: src/AST/Stmt.hpp
===================================================================
--- src/AST/Stmt.hpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Stmt.hpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -10,6 +10,6 @@
 // Created On       : Wed May  8 13:00:00 2019
 // Last Modified By : Andrew Beach
-// Last Modified On : Wed Apr 20 14:34:00 2022
-// Update Count     : 36
+// Last Modified On : Wed Apr  5 10:34:00 2023
+// Update Count     : 37
 //
 
@@ -205,4 +205,7 @@
 };
 
+// A while loop or a do-while loop:
+enum WhileDoKind { While, DoWhile };
+
 // While loop: while (...) ... else ... or do ... while (...) else ...;
 class WhileDoStmt final : public Stmt {
@@ -212,12 +215,12 @@
 	ptr<Stmt> else_;
 	std::vector<ptr<Stmt>> inits;
-	bool isDoWhile;
+	WhileDoKind isDoWhile;
 
 	WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body,
-				 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} )
+				 const std::vector<ptr<Stmt>> && inits, WhileDoKind isDoWhile = While, const std::vector<Label> && labels = {} )
 		: Stmt(loc, std::move(labels)), cond(cond), body(body), else_(nullptr), inits(std::move(inits)), isDoWhile(isDoWhile) {}
 
 	WhileDoStmt( const CodeLocation & loc, const Expr * cond, const Stmt * body, const Stmt * else_,
-				 const std::vector<ptr<Stmt>> && inits, bool isDoWhile = false, const std::vector<Label> && labels = {} )
+				 const std::vector<ptr<Stmt>> && inits, WhileDoKind isDoWhile = While, const std::vector<Label> && labels = {} )
 		: Stmt(loc, std::move(labels)), cond(cond), body(body), else_(else_), inits(std::move(inits)), isDoWhile(isDoWhile) {}
 
@@ -364,8 +367,8 @@
   public:
 	ptr<CompoundStmt> then;
-	enum Type { None, Coroutine, Generator } type = None;
-
-	SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Type type, const std::vector<Label> && labels = {} )
-		: Stmt(loc, std::move(labels)), then(then), type(type) {}
+	enum Kind { None, Coroutine, Generator } kind = None;
+
+	SuspendStmt( const CodeLocation & loc, const CompoundStmt * then, Kind kind, const std::vector<Label> && labels = {} )
+		: Stmt(loc, std::move(labels)), then(then), kind(kind) {}
 
 	const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
Index: src/AST/Type.cpp
===================================================================
--- src/AST/Type.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Type.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -10,6 +10,6 @@
 // Created On       : Mon May 13 15:00:00 2019
 // Last Modified By : Andrew Beach
-// Last Modified On : Thu Nov 24  9:49:00 2022
-// Update Count     : 6
+// Last Modified On : Thu Apr  6 15:59:00 2023
+// Update Count     : 7
 //
 
@@ -199,23 +199,5 @@
 
 TupleType::TupleType( std::vector<ptr<Type>> && ts, CV::Qualifiers q )
-: Type( q ), types( std::move(ts) ), members() {
-	// This constructor is awkward. `TupleType` needs to contain objects so that members can be
-	// named, but members without initializer nodes end up getting constructors, which breaks
-	// things. This happens because the object decls have to be visited so that their types are
-	// kept in sync with the types listed here. Ultimately, the types listed here should perhaps
-	// be eliminated and replaced with a list-view over members. The temporary solution is to
-	// make a `ListInit` with `maybeConstructed = false`, so when the object is visited it is not
-	// constructed. Potential better solutions include:
-	//   a) Separate `TupleType` from its declarations, into `TupleDecl` and `Tuple{Inst?}Type`,
-	//      similar to the aggregate types.
-	//   b) Separate initializer nodes better, e.g. add a `MaybeConstructed` node that is replaced
-	//      by `genInit`, rather than the current boolean flag.
-	members.reserve( types.size() );
-	for ( const Type * ty : types ) {
-		members.emplace_back( new ObjectDecl{
-			CodeLocation(), "", ty, new ListInit( CodeLocation(), {}, {}, NoConstruct ),
-			Storage::Classes{}, Linkage::Cforall } );
-	}
-}
+: Type( q ), types( std::move(ts) ) {}
 
 bool isUnboundType(const Type * type) {
Index: src/AST/Type.hpp
===================================================================
--- src/AST/Type.hpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/AST/Type.hpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -10,6 +10,6 @@
 // Created On       : Thu May 9 10:00:00 2019
 // Last Modified By : Andrew Beach
-// Last Modified On : Thu Nov 24  9:47:00 2022
-// Update Count     : 8
+// Last Modified On : Thu Apr  6 15:58:00 2023
+// Update Count     : 9
 //
 
@@ -265,7 +265,4 @@
 };
 
-/// Function variable arguments flag
-enum ArgumentFlag { FixedArgs, VariableArgs };
-
 /// Type of a function `[R1, R2](*)(P1, P2, P3)`
 class FunctionType final : public Type {
@@ -460,5 +457,4 @@
 public:
 	std::vector<ptr<Type>> types;
-	std::vector<ptr<Decl>> members;
 
 	TupleType( std::vector<ptr<Type>> && ts, CV::Qualifiers q = {} );
Index: src/Concurrency/KeywordsNew.cpp
===================================================================
--- src/Concurrency/KeywordsNew.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Concurrency/KeywordsNew.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -779,5 +779,5 @@
 
 const ast::Stmt * SuspendKeyword::postvisit( const ast::SuspendStmt * stmt ) {
-	switch ( stmt->type ) {
+	switch ( stmt->kind ) {
 	case ast::SuspendStmt::None:
 		// Use the context to determain the implicit target.
Index: src/Parser/DeclarationNode.cc
===================================================================
--- src/Parser/DeclarationNode.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/DeclarationNode.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -14,4 +14,6 @@
 //
 
+#include "DeclarationNode.h"
+
 #include <cassert>                 // for assert, assertf, strict_dynamic_cast
 #include <iterator>                // for back_insert_iterator
@@ -34,5 +36,7 @@
 #include "Common/UniqueName.h"     // for UniqueName
 #include "Common/utility.h"        // for maybeClone
-#include "Parser/ParseNode.h"      // for DeclarationNode, ExpressionNode
+#include "Parser/ExpressionNode.h" // for ExpressionNode
+#include "Parser/InitializerNode.h"// for InitializerNode
+#include "Parser/StatementNode.h"  // for StatementNode
 #include "TypeData.h"              // for TypeData, TypeData::Aggregate_t
 #include "TypedefTable.h"          // for TypedefTable
Index: src/Parser/DeclarationNode.h
===================================================================
--- src/Parser/DeclarationNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
+++ src/Parser/DeclarationNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -0,0 +1,221 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// DeclarationNode.h --
+//
+// Author           : Andrew Beach
+// Created On       : Wed Apr  5 11:38:00 2023
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed Apr  5 11:55:00 2023
+// Update Count     : 0
+//
+
+#pragma once
+
+#include "ParseNode.h"
+
+struct TypeData;
+class InitializerNode;
+
+struct DeclarationNode : public ParseNode {
+	// These enumerations must harmonize with their names in DeclarationNode.cc.
+	enum BasicType {
+		Void, Bool, Char, Int, Int128,
+		Float, Double, LongDouble, uuFloat80, uuFloat128,
+		uFloat16, uFloat32, uFloat32x, uFloat64, uFloat64x, uFloat128, uFloat128x,
+		NoBasicType
+	};
+	static const char * basicTypeNames[];
+	enum ComplexType { Complex, NoComplexType, Imaginary };
+	// Imaginary unsupported => parse, but make invisible and print error message
+	static const char * complexTypeNames[];
+	enum Signedness { Signed, Unsigned, NoSignedness };
+	static const char * signednessNames[];
+	enum Length { Short, Long, LongLong, NoLength };
+	static const char * lengthNames[];
+	enum BuiltinType { Valist, AutoType, Zero, One, NoBuiltinType };
+	static const char * builtinTypeNames[];
+
+	static DeclarationNode * newStorageClass( ast::Storage::Classes );
+	static DeclarationNode * newFuncSpecifier( ast::Function::Specs );
+	static DeclarationNode * newTypeQualifier( ast::CV::Qualifiers );
+	static DeclarationNode * newBasicType( BasicType );
+	static DeclarationNode * newComplexType( ComplexType );
+	static DeclarationNode * newSignedNess( Signedness );
+	static DeclarationNode * newLength( Length );
+	static DeclarationNode * newBuiltinType( BuiltinType );
+	static DeclarationNode * newForall( DeclarationNode * );
+	static DeclarationNode * newFromTypedef( const std::string * );
+	static DeclarationNode * newFromGlobalScope();
+	static DeclarationNode * newQualifiedType( DeclarationNode *, DeclarationNode * );
+	static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
+	static DeclarationNode * newAggregate( ast::AggregateDecl::Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body );
+	static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed, DeclarationNode * base = nullptr, EnumHiding hiding = EnumHiding::Visible );
+	static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant );
+	static DeclarationNode * newEnumValueGeneric( const std::string * name, InitializerNode * init );
+	static DeclarationNode * newEnumInLine( const std::string name );
+	static DeclarationNode * newName( const std::string * );
+	static DeclarationNode * newFromTypeGen( const std::string *, ExpressionNode * params );
+	static DeclarationNode * newTypeParam( ast::TypeDecl::Kind, const std::string * );
+	static DeclarationNode * newTrait( const std::string * name, DeclarationNode * params, DeclarationNode * asserts );
+	static DeclarationNode * newTraitUse( const std::string * name, ExpressionNode * params );
+	static DeclarationNode * newTypeDecl( const std::string * name, DeclarationNode * typeParams );
+	static DeclarationNode * newPointer( DeclarationNode * qualifiers, OperKinds kind );
+	static DeclarationNode * newArray( ExpressionNode * size, DeclarationNode * qualifiers, bool isStatic );
+	static DeclarationNode * newVarArray( DeclarationNode * qualifiers );
+	static DeclarationNode * newBitfield( ExpressionNode * size );
+	static DeclarationNode * newTuple( DeclarationNode * members );
+	static DeclarationNode * newTypeof( ExpressionNode * expr, bool basetypeof = false );
+	static DeclarationNode * newVtableType( DeclarationNode * expr );
+	static DeclarationNode * newAttribute( const std::string *, ExpressionNode * expr = nullptr ); // gcc attributes
+	static DeclarationNode * newDirectiveStmt( StatementNode * stmt ); // gcc external directive statement
+	static DeclarationNode * newAsmStmt( StatementNode * stmt ); // gcc external asm statement
+	static DeclarationNode * newStaticAssert( ExpressionNode * condition, ast::Expr * message );
+
+	DeclarationNode();
+	~DeclarationNode();
+	DeclarationNode * clone() const override;
+
+	DeclarationNode * addQualifiers( DeclarationNode * );
+	void checkQualifiers( const TypeData *, const TypeData * );
+	void checkSpecifiers( DeclarationNode * );
+	DeclarationNode * copySpecifiers( DeclarationNode * );
+	DeclarationNode * addType( DeclarationNode * );
+	DeclarationNode * addTypedef();
+	DeclarationNode * addEnumBase( DeclarationNode * );
+	DeclarationNode * addAssertions( DeclarationNode * );
+	DeclarationNode * addName( std::string * );
+	DeclarationNode * addAsmName( DeclarationNode * );
+	DeclarationNode * addBitfield( ExpressionNode * size );
+	DeclarationNode * addVarArgs();
+	DeclarationNode * addFunctionBody( StatementNode * body, ExpressionNode * with = nullptr );
+	DeclarationNode * addOldDeclList( DeclarationNode * list );
+	DeclarationNode * setBase( TypeData * newType );
+	DeclarationNode * copyAttribute( DeclarationNode * attr );
+	DeclarationNode * addPointer( DeclarationNode * qualifiers );
+	DeclarationNode * addArray( DeclarationNode * array );
+	DeclarationNode * addNewPointer( DeclarationNode * pointer );
+	DeclarationNode * addNewArray( DeclarationNode * array );
+	DeclarationNode * addParamList( DeclarationNode * list );
+	DeclarationNode * addIdList( DeclarationNode * list ); // old-style functions
+	DeclarationNode * addInitializer( InitializerNode * init );
+	DeclarationNode * addTypeInitializer( DeclarationNode * init );
+
+	DeclarationNode * cloneType( std::string * newName );
+	DeclarationNode * cloneBaseType( DeclarationNode * newdecl );
+
+	DeclarationNode * appendList( DeclarationNode * node ) {
+		return (DeclarationNode *)set_last( node );
+	}
+
+	virtual void print( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const override;
+	virtual void printList( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const override;
+
+	ast::Decl * build() const;
+	ast::Type * buildType() const;
+
+	ast::Linkage::Spec get_linkage() const { return linkage; }
+	DeclarationNode * extractAggregate() const;
+	bool has_enumeratorValue() const { return (bool)enumeratorValue; }
+	ExpressionNode * consume_enumeratorValue() const { return const_cast<DeclarationNode *>(this)->enumeratorValue.release(); }
+
+	bool get_extension() const { return extension; }
+	DeclarationNode * set_extension( bool exten ) { extension = exten; return this; }
+
+	bool get_inLine() const { return inLine; }
+	DeclarationNode * set_inLine( bool inL ) { inLine = inL; return this; }
+
+	DeclarationNode * get_last() { return (DeclarationNode *)ParseNode::get_last(); }
+
+	struct Variable_t {
+//		const std::string * name;
+		ast::TypeDecl::Kind tyClass;
+		DeclarationNode * assertions;
+		DeclarationNode * initializer;
+	};
+	Variable_t variable;
+
+	struct StaticAssert_t {
+		ExpressionNode * condition;
+		ast::Expr * message;
+	};
+	StaticAssert_t assert;
+
+	BuiltinType builtin = NoBuiltinType;
+
+	TypeData * type = nullptr;
+
+	bool inLine = false;
+	bool enumInLine = false;
+	ast::Function::Specs funcSpecs;
+	ast::Storage::Classes storageClasses;
+
+	ExpressionNode * bitfieldWidth = nullptr;
+	std::unique_ptr<ExpressionNode> enumeratorValue;
+	bool hasEllipsis = false;
+	ast::Linkage::Spec linkage;
+	ast::Expr * asmName = nullptr;
+	std::vector<ast::ptr<ast::Attribute>> attributes;
+	InitializerNode * initializer = nullptr;
+	bool extension = false;
+	std::string error;
+	StatementNode * asmStmt = nullptr;
+	StatementNode * directiveStmt = nullptr;
+
+	static UniqueName anonymous;
+}; // DeclarationNode
+
+ast::Type * buildType( TypeData * type );
+
+static inline ast::Type * maybeMoveBuildType( const DeclarationNode * orig ) {
+	ast::Type * ret = orig ? orig->buildType() : nullptr;
+	delete orig;
+	return ret;
+}
+
+// This generic buildList is here along side its overloads.
+template<typename AstType, typename NodeType,
+    template<typename, typename...> class Container, typename... Args>
+void buildList( const NodeType * firstNode,
+        Container<ast::ptr<AstType>, Args...> & output ) {
+    SemanticErrorException errors;
+    std::back_insert_iterator<Container<ast::ptr<AstType>, Args...>> out( output );
+    const NodeType * cur = firstNode;
+
+    while ( cur ) {
+        try {
+            if ( auto result = dynamic_cast<AstType *>( maybeBuild( cur ) ) ) {
+                *out++ = result;
+            } else {
+                assertf(false, __PRETTY_FUNCTION__ );
+                SemanticError( cur->location, "type specifier declaration in forall clause is currently unimplemented." );
+            } // if
+        } catch( SemanticErrorException & e ) {
+            errors.append( e );
+        } // try
+        const ParseNode * temp = cur->get_next();
+        // Should not return nullptr, then it is non-homogeneous:
+        cur = dynamic_cast<const NodeType *>( temp );
+        if ( !cur && temp ) {
+            SemanticError( temp->location, "internal error, non-homogeneous nodes founds in buildList processing." );
+        } // if
+    } // while
+    if ( ! errors.isEmpty() ) {
+        throw errors;
+    } // if
+}
+
+void buildList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::Decl>> & outputList );
+void buildList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::DeclWithType>> & outputList );
+void buildTypeList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::Type>> & outputList );
+
+template<typename AstType, typename NodeType,
+template<typename, typename...> class Container, typename... Args>
+void buildMoveList( const NodeType * firstNode,
+Container<ast::ptr<AstType>, Args...> & output ) {
+buildList<AstType, NodeType, Container, Args...>( firstNode, output );
+delete firstNode;
+}
Index: src/Parser/ExpressionNode.cc
===================================================================
--- src/Parser/ExpressionNode.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/ExpressionNode.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -13,4 +13,6 @@
 // Update Count     : 1083
 //
+
+#include "ExpressionNode.h"
 
 #include <cassert>                 // for assert
@@ -25,5 +27,6 @@
 #include "Common/SemanticError.h"  // for SemanticError
 #include "Common/utility.h"        // for maybeMoveBuild, maybeBuild, CodeLo...
-#include "ParseNode.h"             // for ExpressionNode, maybeMoveBuildType
+#include "DeclarationNode.h"       // for DeclarationNode
+#include "InitializerNode.h"       // for InitializerNode
 #include "parserutility.h"         // for notZeroExpr
 
Index: src/Parser/ExpressionNode.h
===================================================================
--- src/Parser/ExpressionNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
+++ src/Parser/ExpressionNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -0,0 +1,105 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// ExpressionNode.h --
+//
+// Author           : Andrew Beach
+// Created On       : Wed Apr  5 11:34:00 2023
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed Apr  5 11:50:00 2023
+// Update Count     : 0
+//
+
+#pragma once
+
+#include "ParseNode.h"
+
+class InitializerNode;
+
+class ExpressionNode final : public ParseNode {
+public:
+	ExpressionNode( ast::Expr * expr = nullptr ) : expr( expr ) {}
+	virtual ~ExpressionNode() {}
+	virtual ExpressionNode * clone() const override {
+		if ( nullptr == expr ) return nullptr;
+		return static_cast<ExpressionNode*>(
+			(new ExpressionNode( ast::shallowCopy( expr.get() ) ))->set_next( maybeCopy( get_next() ) ));
+	}
+
+	bool get_extension() const { return extension; }
+	ExpressionNode * set_extension( bool exten ) { extension = exten; return this; }
+
+	virtual void print( std::ostream & os, __attribute__((unused)) int indent = 0 ) const override {
+		os << expr.get();
+	}
+	void printOneLine( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const {}
+
+	template<typename T>
+	bool isExpressionType() const {  return nullptr != dynamic_cast<T>(expr.get()); }
+
+	ast::Expr * build() const {
+		ast::Expr * node = const_cast<ExpressionNode *>(this)->expr.release();
+		node->set_extension( this->get_extension() );
+		node->location = this->location;
+		return node;
+	}
+
+	// Public because of lifetime implications (what lifetime implications?)
+	std::unique_ptr<ast::Expr> expr;
+private:
+	bool extension = false;
+}; // ExpressionNode
+
+/*
+// Must harmonize with OperName.
+enum class OperKinds {
+    // diadic
+    SizeOf, AlignOf, OffsetOf, Plus, Minus, Exp, Mul, Div, Mod, Or, And,
+    BitOr, BitAnd, Xor, Cast, LShift, RShift, LThan, GThan, LEThan, GEThan, Eq, Neq,
+    Assign, AtAssn, ExpAssn, MulAssn, DivAssn, ModAssn, PlusAssn, MinusAssn, LSAssn, RSAssn, AndAssn, ERAssn, OrAssn,
+    Index, Range,
+    // monadic
+    UnPlus, UnMinus, AddressOf, PointTo, Neg, BitNeg, Incr, IncrPost, Decr, DecrPost,
+    Ctor, Dtor,
+}; // OperKinds
+
+enum class EnumHiding { Visible, Hide };
+
+struct LabelNode {
+    std::vector<ast::Label> labels;
+};
+*/
+
+// These 4 routines modify the string:
+ast::Expr * build_constantInteger( const CodeLocation &, std::string & );
+ast::Expr * build_constantFloat( const CodeLocation &, std::string & );
+ast::Expr * build_constantChar( const CodeLocation &, std::string & );
+ast::Expr * build_constantStr( const CodeLocation &, std::string & );
+ast::Expr * build_field_name_FLOATING_FRACTIONconstant( const CodeLocation &, const std::string & str );
+ast::Expr * build_field_name_FLOATING_DECIMALconstant( const CodeLocation &, const std::string & str );
+ast::Expr * build_field_name_FLOATINGconstant( const CodeLocation &, const std::string & str );
+ast::Expr * build_field_name_fraction_constants( const CodeLocation &, ast::Expr * fieldName, ExpressionNode * fracts );
+
+ast::NameExpr * build_varref( const CodeLocation &, const std::string * name );
+ast::QualifiedNameExpr * build_qualified_expr( const CodeLocation &, const DeclarationNode * decl_node, const ast::NameExpr * name );
+ast::QualifiedNameExpr * build_qualified_expr( const CodeLocation &, const ast::EnumDecl * decl, const ast::NameExpr * name );
+ast::DimensionExpr * build_dimensionref( const CodeLocation &, const std::string * name );
+
+ast::Expr * build_cast( const CodeLocation &, DeclarationNode * decl_node, ExpressionNode * expr_node );
+ast::Expr * build_keyword_cast( const CodeLocation &, ast::AggregateDecl::Aggregate target, ExpressionNode * expr_node );
+ast::Expr * build_virtual_cast( const CodeLocation &, DeclarationNode * decl_node, ExpressionNode * expr_node );
+ast::Expr * build_fieldSel( const CodeLocation &, ExpressionNode * expr_node, ast::Expr * member );
+ast::Expr * build_pfieldSel( const CodeLocation &, ExpressionNode * expr_node, ast::Expr * member );
+ast::Expr * build_offsetOf( const CodeLocation &, DeclarationNode * decl_node, ast::NameExpr * member );
+ast::Expr * build_and( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
+ast::Expr * build_and_or( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2, ast::LogicalFlag flag );
+ast::Expr * build_unary_val( const CodeLocation &, OperKinds op, ExpressionNode * expr_node );
+ast::Expr * build_binary_val( const CodeLocation &, OperKinds op, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
+ast::Expr * build_binary_ptr( const CodeLocation &, OperKinds op, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
+ast::Expr * build_cond( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2, ExpressionNode * expr_node3 );
+ast::Expr * build_tuple( const CodeLocation &, ExpressionNode * expr_node = nullptr );
+ast::Expr * build_func( const CodeLocation &, ExpressionNode * function, ExpressionNode * expr_node );
+ast::Expr * build_compoundLiteral( const CodeLocation &, DeclarationNode * decl_node, InitializerNode * kids );
Index: src/Parser/InitializerNode.cc
===================================================================
--- src/Parser/InitializerNode.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/InitializerNode.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -14,9 +14,9 @@
 //
 
+#include "InitializerNode.h"
+
 #include <iostream>                // for operator<<, ostream, basic_ostream
 #include <list>                    // for list
 #include <string>                  // for operator<<, string
-
-using namespace std;
 
 #include "AST/Expr.hpp"            // for Expr
@@ -24,5 +24,8 @@
 #include "Common/SemanticError.h"  // for SemanticError
 #include "Common/utility.h"        // for maybeBuild
-#include "ParseNode.h"             // for InitializerNode, ExpressionNode
+#include "ExpressionNode.h"        // for ExpressionNode
+#include "DeclarationNode.h"       // for buildList
+
+using namespace std;
 
 static ast::ConstructFlag toConstructFlag( bool maybeConstructed ) {
Index: src/Parser/InitializerNode.h
===================================================================
--- src/Parser/InitializerNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
+++ src/Parser/InitializerNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -0,0 +1,51 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// InitializerNode.h --
+//
+// Author           : Andrew Beach
+// Created On       : Wed Apr  5 11:31:00 2023
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed Apr  5 11:48:00 2023
+// Update Count     : 0
+//
+
+#pragma once
+
+#include "ParseNode.h"
+
+class InitializerNode : public ParseNode {
+public:
+	InitializerNode( ExpressionNode *, bool aggrp = false, ExpressionNode * des = nullptr );
+	InitializerNode( InitializerNode *, bool aggrp = false, ExpressionNode * des = nullptr );
+	InitializerNode( bool isDelete );
+	~InitializerNode();
+	virtual InitializerNode * clone() const { assert( false ); return nullptr; }
+
+	ExpressionNode * get_expression() const { return expr; }
+
+	InitializerNode * set_designators( ExpressionNode * des ) { designator = des; return this; }
+	ExpressionNode * get_designators() const { return designator; }
+
+	InitializerNode * set_maybeConstructed( bool value ) { maybeConstructed = value; return this; }
+	bool get_maybeConstructed() const { return maybeConstructed; }
+
+	bool get_isDelete() const { return isDelete; }
+
+	InitializerNode * next_init() const { return kids; }
+
+	void print( std::ostream & os, int indent = 0 ) const;
+	void printOneLine( std::ostream & ) const;
+
+	virtual ast::Init * build() const;
+private:
+	ExpressionNode * expr;
+	bool aggregate;
+	ExpressionNode * designator;                        // may be list
+	InitializerNode * kids;
+	bool maybeConstructed;
+	bool isDelete;
+}; // InitializerNode
Index: src/Parser/ParseNode.h
===================================================================
--- src/Parser/ParseNode.h	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/ParseNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -38,4 +38,5 @@
 class DeclarationWithType;
 class Initializer;
+class InitializerNode;
 class ExpressionNode;
 struct StatementNode;
@@ -80,75 +81,4 @@
 }; // ParseNode
 
-//##############################################################################
-
-class InitializerNode : public ParseNode {
-  public:
-	InitializerNode( ExpressionNode *, bool aggrp = false, ExpressionNode * des = nullptr );
-	InitializerNode( InitializerNode *, bool aggrp = false, ExpressionNode * des = nullptr );
-	InitializerNode( bool isDelete );
-	~InitializerNode();
-	virtual InitializerNode * clone() const { assert( false ); return nullptr; }
-
-	ExpressionNode * get_expression() const { return expr; }
-
-	InitializerNode * set_designators( ExpressionNode * des ) { designator = des; return this; }
-	ExpressionNode * get_designators() const { return designator; }
-
-	InitializerNode * set_maybeConstructed( bool value ) { maybeConstructed = value; return this; }
-	bool get_maybeConstructed() const { return maybeConstructed; }
-
-	bool get_isDelete() const { return isDelete; }
-
-	InitializerNode * next_init() const { return kids; }
-
-	void print( std::ostream & os, int indent = 0 ) const;
-	void printOneLine( std::ostream & ) const;
-
-	virtual ast::Init * build() const;
-  private:
-	ExpressionNode * expr;
-	bool aggregate;
-	ExpressionNode * designator;						// may be list
-	InitializerNode * kids;
-	bool maybeConstructed;
-	bool isDelete;
-}; // InitializerNode
-
-//##############################################################################
-
-class ExpressionNode final : public ParseNode {
-  public:
-	ExpressionNode( ast::Expr * expr = nullptr ) : expr( expr ) {}
-	virtual ~ExpressionNode() {}
-	virtual ExpressionNode * clone() const override {
-		if ( nullptr == expr ) return nullptr;
-		return static_cast<ExpressionNode*>(
-			(new ExpressionNode( ast::shallowCopy( expr.get() ) ))->set_next( maybeCopy( get_next() ) ));
-	}
-
-	bool get_extension() const { return extension; }
-	ExpressionNode * set_extension( bool exten ) { extension = exten; return this; }
-
-	virtual void print( std::ostream & os, __attribute__((unused)) int indent = 0 ) const override {
-		os << expr.get();
-	}
-	void printOneLine( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const {}
-
-	template<typename T>
-	bool isExpressionType() const {	return nullptr != dynamic_cast<T>(expr.get()); }
-
-	ast::Expr * build() const {
-		ast::Expr * node = const_cast<ExpressionNode *>(this)->expr.release();
-		node->set_extension( this->get_extension() );
-		node->location = this->location;
-		return node;
-	}
-
-	// Public because of lifetime implications (what lifetime implications?)
-	std::unique_ptr<ast::Expr> expr;
-  private:
-	bool extension = false;
-}; // ExpressionNode
-
 // Must harmonize with OperName.
 enum class OperKinds {
@@ -169,326 +99,4 @@
 };
 
-// These 4 routines modify the string:
-ast::Expr * build_constantInteger( const CodeLocation &, std::string & );
-ast::Expr * build_constantFloat( const CodeLocation &, std::string & );
-ast::Expr * build_constantChar( const CodeLocation &, std::string & );
-ast::Expr * build_constantStr( const CodeLocation &, std::string & );
-ast::Expr * build_field_name_FLOATING_FRACTIONconstant( const CodeLocation &, const std::string & str );
-ast::Expr * build_field_name_FLOATING_DECIMALconstant( const CodeLocation &, const std::string & str );
-ast::Expr * build_field_name_FLOATINGconstant( const CodeLocation &, const std::string & str );
-ast::Expr * build_field_name_fraction_constants( const CodeLocation &, ast::Expr * fieldName, ExpressionNode * fracts );
-
-ast::NameExpr * build_varref( const CodeLocation &, const std::string * name );
-ast::QualifiedNameExpr * build_qualified_expr( const CodeLocation &, const DeclarationNode * decl_node, const ast::NameExpr * name );
-ast::QualifiedNameExpr * build_qualified_expr( const CodeLocation &, const ast::EnumDecl * decl, const ast::NameExpr * name );
-ast::DimensionExpr * build_dimensionref( const CodeLocation &, const std::string * name );
-
-ast::Expr * build_cast( const CodeLocation &, DeclarationNode * decl_node, ExpressionNode * expr_node );
-ast::Expr * build_keyword_cast( const CodeLocation &, ast::AggregateDecl::Aggregate target, ExpressionNode * expr_node );
-ast::Expr * build_virtual_cast( const CodeLocation &, DeclarationNode * decl_node, ExpressionNode * expr_node );
-ast::Expr * build_fieldSel( const CodeLocation &, ExpressionNode * expr_node, ast::Expr * member );
-ast::Expr * build_pfieldSel( const CodeLocation &, ExpressionNode * expr_node, ast::Expr * member );
-ast::Expr * build_offsetOf( const CodeLocation &, DeclarationNode * decl_node, ast::NameExpr * member );
-ast::Expr * build_and( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
-ast::Expr * build_and_or( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2, ast::LogicalFlag flag );
-ast::Expr * build_unary_val( const CodeLocation &, OperKinds op, ExpressionNode * expr_node );
-ast::Expr * build_binary_val( const CodeLocation &, OperKinds op, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
-ast::Expr * build_binary_ptr( const CodeLocation &, OperKinds op, ExpressionNode * expr_node1, ExpressionNode * expr_node2 );
-ast::Expr * build_cond( const CodeLocation &, ExpressionNode * expr_node1, ExpressionNode * expr_node2, ExpressionNode * expr_node3 );
-ast::Expr * build_tuple( const CodeLocation &, ExpressionNode * expr_node = nullptr );
-ast::Expr * build_func( const CodeLocation &, ExpressionNode * function, ExpressionNode * expr_node );
-ast::Expr * build_compoundLiteral( const CodeLocation &, DeclarationNode * decl_node, InitializerNode * kids );
-
-//##############################################################################
-
-struct TypeData;
-
-struct DeclarationNode : public ParseNode {
-	// These enumerations must harmonize with their names in DeclarationNode.cc.
-	enum BasicType {
-		Void, Bool, Char, Int, Int128,
-		Float, Double, LongDouble, uuFloat80, uuFloat128,
-		uFloat16, uFloat32, uFloat32x, uFloat64, uFloat64x, uFloat128, uFloat128x,
-		NoBasicType
-	};
-	static const char * basicTypeNames[];
-	enum ComplexType { Complex, NoComplexType, Imaginary };	// Imaginary unsupported => parse, but make invisible and print error message
-	static const char * complexTypeNames[];
-	enum Signedness { Signed, Unsigned, NoSignedness };
-	static const char * signednessNames[];
-	enum Length { Short, Long, LongLong, NoLength };
-	static const char * lengthNames[];
-	enum BuiltinType { Valist, AutoType, Zero, One, NoBuiltinType };
-	static const char * builtinTypeNames[];
-
-	static DeclarationNode * newStorageClass( ast::Storage::Classes );
-	static DeclarationNode * newFuncSpecifier( ast::Function::Specs );
-	static DeclarationNode * newTypeQualifier( ast::CV::Qualifiers );
-	static DeclarationNode * newBasicType( BasicType );
-	static DeclarationNode * newComplexType( ComplexType );
-	static DeclarationNode * newSignedNess( Signedness );
-	static DeclarationNode * newLength( Length );
-	static DeclarationNode * newBuiltinType( BuiltinType );
-	static DeclarationNode * newForall( DeclarationNode * );
-	static DeclarationNode * newFromTypedef( const std::string * );
-	static DeclarationNode * newFromGlobalScope();
-	static DeclarationNode * newQualifiedType( DeclarationNode *, DeclarationNode * );
-	static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
-	static DeclarationNode * newAggregate( ast::AggregateDecl::Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body );
-	static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body, bool typed, DeclarationNode * base = nullptr, EnumHiding hiding = EnumHiding::Visible );
-	static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant );
-	static DeclarationNode * newEnumValueGeneric( const std::string * name, InitializerNode * init );
-	static DeclarationNode * newEnumInLine( const std::string name );
-	static DeclarationNode * newName( const std::string * );
-	static DeclarationNode * newFromTypeGen( const std::string *, ExpressionNode * params );
-	static DeclarationNode * newTypeParam( ast::TypeDecl::Kind, const std::string * );
-	static DeclarationNode * newTrait( const std::string * name, DeclarationNode * params, DeclarationNode * asserts );
-	static DeclarationNode * newTraitUse( const std::string * name, ExpressionNode * params );
-	static DeclarationNode * newTypeDecl( const std::string * name, DeclarationNode * typeParams );
-	static DeclarationNode * newPointer( DeclarationNode * qualifiers, OperKinds kind );
-	static DeclarationNode * newArray( ExpressionNode * size, DeclarationNode * qualifiers, bool isStatic );
-	static DeclarationNode * newVarArray( DeclarationNode * qualifiers );
-	static DeclarationNode * newBitfield( ExpressionNode * size );
-	static DeclarationNode * newTuple( DeclarationNode * members );
-	static DeclarationNode * newTypeof( ExpressionNode * expr, bool basetypeof = false );
-	static DeclarationNode * newVtableType( DeclarationNode * expr );
-	static DeclarationNode * newAttribute( const std::string *, ExpressionNode * expr = nullptr ); // gcc attributes
-	static DeclarationNode * newDirectiveStmt( StatementNode * stmt ); // gcc external directive statement
-	static DeclarationNode * newAsmStmt( StatementNode * stmt ); // gcc external asm statement
-	static DeclarationNode * newStaticAssert( ExpressionNode * condition, ast::Expr * message );
-
-	DeclarationNode();
-	~DeclarationNode();
-	DeclarationNode * clone() const override;
-
-	DeclarationNode * addQualifiers( DeclarationNode * );
-	void checkQualifiers( const TypeData *, const TypeData * );
-	void checkSpecifiers( DeclarationNode * );
-	DeclarationNode * copySpecifiers( DeclarationNode * );
-	DeclarationNode * addType( DeclarationNode * );
-	DeclarationNode * addTypedef();
-	DeclarationNode * addEnumBase( DeclarationNode * );
-	DeclarationNode * addAssertions( DeclarationNode * );
-	DeclarationNode * addName( std::string * );
-	DeclarationNode * addAsmName( DeclarationNode * );
-	DeclarationNode * addBitfield( ExpressionNode * size );
-	DeclarationNode * addVarArgs();
-	DeclarationNode * addFunctionBody( StatementNode * body, ExpressionNode * with = nullptr );
-	DeclarationNode * addOldDeclList( DeclarationNode * list );
-	DeclarationNode * setBase( TypeData * newType );
-	DeclarationNode * copyAttribute( DeclarationNode * attr );
-	DeclarationNode * addPointer( DeclarationNode * qualifiers );
-	DeclarationNode * addArray( DeclarationNode * array );
-	DeclarationNode * addNewPointer( DeclarationNode * pointer );
-	DeclarationNode * addNewArray( DeclarationNode * array );
-	DeclarationNode * addParamList( DeclarationNode * list );
-	DeclarationNode * addIdList( DeclarationNode * list ); // old-style functions
-	DeclarationNode * addInitializer( InitializerNode * init );
-	DeclarationNode * addTypeInitializer( DeclarationNode * init );
-
-	DeclarationNode * cloneType( std::string * newName );
-	DeclarationNode * cloneBaseType( DeclarationNode * newdecl );
-
-	DeclarationNode * appendList( DeclarationNode * node ) {
-		return (DeclarationNode *)set_last( node );
-	}
-
-	virtual void print( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const override;
-	virtual void printList( __attribute__((unused)) std::ostream & os, __attribute__((unused)) int indent = 0 ) const override;
-
-	ast::Decl * build() const;
-	ast::Type * buildType() const;
-
-	ast::Linkage::Spec get_linkage() const { return linkage; }
-	DeclarationNode * extractAggregate() const;
-	bool has_enumeratorValue() const { return (bool)enumeratorValue; }
-	ExpressionNode * consume_enumeratorValue() const { return const_cast<DeclarationNode *>(this)->enumeratorValue.release(); }
-
-	bool get_extension() const { return extension; }
-	DeclarationNode * set_extension( bool exten ) { extension = exten; return this; }
-
-	bool get_inLine() const { return inLine; }
-	DeclarationNode * set_inLine( bool inL ) { inLine = inL; return this; }
-
-	DeclarationNode * get_last() { return (DeclarationNode *)ParseNode::get_last(); }
-
-	struct Variable_t {
-//		const std::string * name;
-		ast::TypeDecl::Kind tyClass;
-		DeclarationNode * assertions;
-		DeclarationNode * initializer;
-	};
-	Variable_t variable;
-
-	struct StaticAssert_t {
-		ExpressionNode * condition;
-		ast::Expr * message;
-	};
-	StaticAssert_t assert;
-
-	BuiltinType builtin = NoBuiltinType;
-
-	TypeData * type = nullptr;
-
-	bool inLine = false;
-	bool enumInLine = false;
-	ast::Function::Specs funcSpecs;
-	ast::Storage::Classes storageClasses;
-
-	ExpressionNode * bitfieldWidth = nullptr;
-	std::unique_ptr<ExpressionNode> enumeratorValue;
-	bool hasEllipsis = false;
-	ast::Linkage::Spec linkage;
-	ast::Expr * asmName = nullptr;
-	std::vector<ast::ptr<ast::Attribute>> attributes;
-	InitializerNode * initializer = nullptr;
-	bool extension = false;
-	std::string error;
-	StatementNode * asmStmt = nullptr;
-	StatementNode * directiveStmt = nullptr;
-
-	static UniqueName anonymous;
-}; // DeclarationNode
-
-ast::Type * buildType( TypeData * type );
-
-static inline ast::Type * maybeMoveBuildType( const DeclarationNode * orig ) {
-	ast::Type * ret = orig ? orig->buildType() : nullptr;
-	delete orig;
-	return ret;
-}
-
-//##############################################################################
-
-struct StatementNode final : public ParseNode {
-	StatementNode() :
-		stmt( nullptr ), clause( nullptr ) {}
-	StatementNode( ast::Stmt * stmt ) :
-		stmt( stmt ), clause( nullptr ) {}
-	StatementNode( ast::StmtClause * clause ) :
-		stmt( nullptr ), clause( clause ) {}
-	StatementNode( DeclarationNode * decl );
-	virtual ~StatementNode() {}
-
-	virtual StatementNode * clone() const final { assert( false ); return nullptr; }
-	ast::Stmt * build() const { return const_cast<StatementNode *>(this)->stmt.release(); }
-
-	virtual StatementNode * add_label(
-			const CodeLocation & location,
-			const std::string * name,
-			DeclarationNode * attr = nullptr ) {
-		stmt->labels.emplace_back( location,
-			*name,
-			attr ? std::move( attr->attributes )
-				: std::vector<ast::ptr<ast::Attribute>>{} );
-		delete attr;
-		delete name;
-		return this;
-	}
-
-	virtual StatementNode * append_last_case( StatementNode * );
-
-	virtual void print( std::ostream & os, __attribute__((unused)) int indent = 0 ) const override {
-		os << stmt.get() << std::endl;
-	}
-
-	std::unique_ptr<ast::Stmt> stmt;
-	std::unique_ptr<ast::StmtClause> clause;
-}; // StatementNode
-
-ast::Stmt * build_expr( CodeLocation const &, ExpressionNode * ctl );
-
-struct CondCtl {
-	CondCtl( DeclarationNode * decl, ExpressionNode * condition ) :
-		init( decl ? new StatementNode( decl ) : nullptr ), condition( condition ) {}
-
-	StatementNode * init;
-	ExpressionNode * condition;
-};
-
-struct ForCtrl {
-	ForCtrl( StatementNode * stmt, ExpressionNode * condition, ExpressionNode * change ) :
-		init( stmt ), condition( condition ), change( change ) {}
-
-	StatementNode * init;
-	ExpressionNode * condition;
-	ExpressionNode * change;
-};
-
-ast::Stmt * build_if( const CodeLocation &, CondCtl * ctl, StatementNode * then, StatementNode * else_ );
-ast::Stmt * build_switch( const CodeLocation &, bool isSwitch, ExpressionNode * ctl, StatementNode * stmt );
-ast::CaseClause * build_case( ExpressionNode * ctl );
-ast::CaseClause * build_default( const CodeLocation & );
-ast::Stmt * build_while( const CodeLocation &, CondCtl * ctl, StatementNode * stmt, StatementNode * else_ = nullptr );
-ast::Stmt * build_do_while( const CodeLocation &, ExpressionNode * ctl, StatementNode * stmt, StatementNode * else_ = nullptr );
-ast::Stmt * build_for( const CodeLocation &, ForCtrl * forctl, StatementNode * stmt, StatementNode * else_ = nullptr );
-ast::Stmt * build_branch( const CodeLocation &, ast::BranchStmt::Kind kind );
-ast::Stmt * build_branch( const CodeLocation &, std::string * identifier, ast::BranchStmt::Kind kind );
-ast::Stmt * build_computedgoto( ExpressionNode * ctl );
-ast::Stmt * build_return( const CodeLocation &, ExpressionNode * ctl );
-ast::Stmt * build_throw( const CodeLocation &, ExpressionNode * ctl );
-ast::Stmt * build_resume( const CodeLocation &, ExpressionNode * ctl );
-ast::Stmt * build_resume_at( ExpressionNode * ctl , ExpressionNode * target );
-ast::Stmt * build_try( const CodeLocation &, StatementNode * try_, StatementNode * catch_, StatementNode * finally_ );
-ast::CatchClause * build_catch( const CodeLocation &, ast::ExceptionKind kind, DeclarationNode * decl, ExpressionNode * cond, StatementNode * body );
-ast::FinallyClause * build_finally( const CodeLocation &, StatementNode * stmt );
-ast::Stmt * build_compound( const CodeLocation &, StatementNode * first );
-StatementNode * maybe_build_compound( const CodeLocation &, StatementNode * first );
-ast::Stmt * build_asm( const CodeLocation &, bool voltile, ast::Expr * instruction, ExpressionNode * output = nullptr, ExpressionNode * input = nullptr, ExpressionNode * clobber = nullptr, LabelNode * gotolabels = nullptr );
-ast::Stmt * build_directive( const CodeLocation &, std::string * directive );
-ast::SuspendStmt * build_suspend( const CodeLocation &, StatementNode *, ast::SuspendStmt::Type );
-ast::WaitForStmt * build_waitfor( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt );
-ast::WaitForStmt * build_waitfor_else( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, StatementNode * stmt );
-ast::WaitForStmt * build_waitfor_timeout( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt );
-ast::Stmt * build_with( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
-ast::Stmt * build_mutex( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
-
-//##############################################################################
-
-template<typename AstType, typename NodeType,
-	template<typename, typename...> class Container, typename... Args>
-void buildList( const NodeType * firstNode,
-		Container<ast::ptr<AstType>, Args...> & output ) {
-	SemanticErrorException errors;
-	std::back_insert_iterator<Container<ast::ptr<AstType>, Args...>> out( output );
-	const NodeType * cur = firstNode;
-
-	while ( cur ) {
-		try {
-			if ( auto result = dynamic_cast<AstType *>( maybeBuild( cur ) ) ) {
-				*out++ = result;
-			} else {
-				assertf(false, __PRETTY_FUNCTION__ );
-				SemanticError( cur->location, "type specifier declaration in forall clause is currently unimplemented." );
-			} // if
-		} catch( SemanticErrorException & e ) {
-			errors.append( e );
-		} // try
-		const ParseNode * temp = cur->get_next();
-		// Should not return nullptr, then it is non-homogeneous:
-		cur = dynamic_cast<const NodeType *>( temp );
-		if ( !cur && temp ) {
-			SemanticError( temp->location, "internal error, non-homogeneous nodes founds in buildList processing." );
-		} // if
-	} // while
-	if ( ! errors.isEmpty() ) {
-		throw errors;
-	} // if
-}
-
-// in DeclarationNode.cc
-void buildList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::Decl>> & outputList );
-void buildList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::DeclWithType>> & outputList );
-void buildTypeList( const DeclarationNode * firstNode, std::vector<ast::ptr<ast::Type>> & outputList );
-
-template<typename AstType, typename NodeType,
-	template<typename, typename...> class Container, typename... Args>
-void buildMoveList( const NodeType * firstNode,
-		Container<ast::ptr<AstType>, Args...> & output ) {
-	buildList<AstType, NodeType, Container, Args...>( firstNode, output );
-	delete firstNode;
-}
-
-// in ParseNode.cc
 std::ostream & operator<<( std::ostream & out, const ParseNode * node );
 
Index: src/Parser/RunParser.cpp
===================================================================
--- src/Parser/RunParser.cpp	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/RunParser.cpp	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -20,5 +20,5 @@
 #include "CodeTools/TrackLoc.h"             // for fillLocations
 #include "Common/CodeLocationTools.hpp"     // for forceFillCodeLocations
-#include "Parser/ParseNode.h"               // for DeclarationNode, buildList
+#include "Parser/DeclarationNode.h"         // for DeclarationNode, buildList
 #include "Parser/TypedefTable.h"            // for TypedefTable
 
Index: src/Parser/StatementNode.cc
===================================================================
--- src/Parser/StatementNode.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/StatementNode.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -15,4 +15,6 @@
 //
 
+#include "StatementNode.h"
+
 #include <cassert>                 // for assert, strict_dynamic_cast, assertf
 #include <memory>                  // for unique_ptr
@@ -23,5 +25,6 @@
 #include "Common/SemanticError.h"  // for SemanticError
 #include "Common/utility.h"        // for maybeMoveBuild, maybeBuild
-#include "ParseNode.h"             // for StatementNode, ExpressionNode, bui...
+#include "DeclarationNode.h"       // for DeclarationNode
+#include "ExpressionNode.h"        // for ExpressionNode
 #include "parserutility.h"         // for notZeroExpr
 
@@ -52,4 +55,17 @@
 	stmt.reset( new ast::DeclStmt( declLocation, maybeMoveBuild( agg ) ) );
 } // StatementNode::StatementNode
+
+StatementNode * StatementNode::add_label(
+		const CodeLocation & location,
+		const std::string * name,
+		DeclarationNode * attr ) {
+	stmt->labels.emplace_back( location,
+		*name,
+		attr ? std::move( attr->attributes )
+			: std::vector<ast::ptr<ast::Attribute>>{} );
+	delete attr;
+	delete name;
+	return this;
+}
 
 StatementNode * StatementNode::append_last_case( StatementNode * stmt ) {
@@ -218,5 +234,5 @@
 		astelse.empty() ? nullptr : astelse.front().release(),
 		std::move( astinit ),
-		false
+		ast::While
 	);
 } // build_while
@@ -237,5 +253,5 @@
 		astelse.empty() ? nullptr : astelse.front().release(),
 		{},
-		true
+		ast::DoWhile
 	);
 } // build_do_while
@@ -362,5 +378,5 @@
 } // build_finally
 
-ast::SuspendStmt * build_suspend( const CodeLocation & location, StatementNode * then, ast::SuspendStmt::Type type ) {
+ast::SuspendStmt * build_suspend( const CodeLocation & location, StatementNode * then, ast::SuspendStmt::Kind kind ) {
 	std::vector<ast::ptr<ast::Stmt>> stmts;
 	buildMoveList( then, stmts );
@@ -370,7 +386,5 @@
 		then2 = stmts.front().strict_as<ast::CompoundStmt>();
 	}
-	auto node = new ast::SuspendStmt( location, then2, ast::SuspendStmt::None );
-	node->type = type;
-	return node;
+	return new ast::SuspendStmt( location, then2, kind );
 } // build_suspend
 
Index: src/Parser/StatementNode.h
===================================================================
--- src/Parser/StatementNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
+++ src/Parser/StatementNode.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -0,0 +1,101 @@
+//
+// Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo
+//
+// The contents of this file are covered under the licence agreement in the
+// file "LICENCE" distributed with Cforall.
+//
+// StatementNode.h --
+//
+// Author           : Andrew Beach
+// Created On       : Wed Apr  5 11:42:00 2023
+// Last Modified By : Andrew Beach
+// Last Modified On : Wed Apr  5 11:57:00 2023
+// Update Count     : 0
+//
+
+#pragma once
+
+#include "ParseNode.h"
+
+struct StatementNode final : public ParseNode {
+	StatementNode() :
+		stmt( nullptr ), clause( nullptr ) {}
+	StatementNode( ast::Stmt * stmt ) :
+		stmt( stmt ), clause( nullptr ) {}
+	StatementNode( ast::StmtClause * clause ) :
+		stmt( nullptr ), clause( clause ) {}
+	StatementNode( DeclarationNode * decl );
+	virtual ~StatementNode() {}
+
+	virtual StatementNode * clone() const final { assert( false ); return nullptr; }
+	ast::Stmt * build() const { return const_cast<StatementNode *>(this)->stmt.release(); }
+
+	StatementNode * add_label(
+			const CodeLocation & location,
+			const std::string * name,
+			DeclarationNode * attr = nullptr );/* {
+		stmt->labels.emplace_back( location,
+			*name,
+			attr ? std::move( attr->attributes )
+				: std::vector<ast::ptr<ast::Attribute>>{} );
+		delete attr;
+		delete name;
+		return this;
+	}*/
+
+	virtual StatementNode * append_last_case( StatementNode * );
+
+	virtual void print( std::ostream & os, __attribute__((unused)) int indent = 0 ) const override {
+		os << stmt.get() << std::endl;
+	}
+
+	std::unique_ptr<ast::Stmt> stmt;
+	std::unique_ptr<ast::StmtClause> clause;
+}; // StatementNode
+
+ast::Stmt * build_expr( CodeLocation const &, ExpressionNode * ctl );
+
+struct CondCtl {
+	CondCtl( DeclarationNode * decl, ExpressionNode * condition ) :
+		init( decl ? new StatementNode( decl ) : nullptr ), condition( condition ) {}
+
+	StatementNode * init;
+	ExpressionNode * condition;
+};
+
+struct ForCtrl {
+	ForCtrl( StatementNode * stmt, ExpressionNode * condition, ExpressionNode * change ) :
+		init( stmt ), condition( condition ), change( change ) {}
+
+	StatementNode * init;
+	ExpressionNode * condition;
+	ExpressionNode * change;
+};
+
+ast::Stmt * build_if( const CodeLocation &, CondCtl * ctl, StatementNode * then, StatementNode * else_ );
+ast::Stmt * build_switch( const CodeLocation &, bool isSwitch, ExpressionNode * ctl, StatementNode * stmt );
+ast::CaseClause * build_case( ExpressionNode * ctl );
+ast::CaseClause * build_default( const CodeLocation & );
+ast::Stmt * build_while( const CodeLocation &, CondCtl * ctl, StatementNode * stmt, StatementNode * else_ = nullptr );
+ast::Stmt * build_do_while( const CodeLocation &, ExpressionNode * ctl, StatementNode * stmt, StatementNode * else_ = nullptr );
+ast::Stmt * build_for( const CodeLocation &, ForCtrl * forctl, StatementNode * stmt, StatementNode * else_ = nullptr );
+ast::Stmt * build_branch( const CodeLocation &, ast::BranchStmt::Kind kind );
+ast::Stmt * build_branch( const CodeLocation &, std::string * identifier, ast::BranchStmt::Kind kind );
+ast::Stmt * build_computedgoto( ExpressionNode * ctl );
+ast::Stmt * build_return( const CodeLocation &, ExpressionNode * ctl );
+ast::Stmt * build_throw( const CodeLocation &, ExpressionNode * ctl );
+ast::Stmt * build_resume( const CodeLocation &, ExpressionNode * ctl );
+ast::Stmt * build_resume_at( ExpressionNode * ctl , ExpressionNode * target );
+ast::Stmt * build_try( const CodeLocation &, StatementNode * try_, StatementNode * catch_, StatementNode * finally_ );
+ast::CatchClause * build_catch( const CodeLocation &, ast::ExceptionKind kind, DeclarationNode * decl, ExpressionNode * cond, StatementNode * body );
+ast::FinallyClause * build_finally( const CodeLocation &, StatementNode * stmt );
+ast::Stmt * build_compound( const CodeLocation &, StatementNode * first );
+StatementNode * maybe_build_compound( const CodeLocation &, StatementNode * first );
+ast::Stmt * build_asm( const CodeLocation &, bool voltile, ast::Expr * instruction, ExpressionNode * output = nullptr, ExpressionNode * input = nullptr, ExpressionNode * clobber = nullptr, LabelNode * gotolabels = nullptr );
+ast::Stmt * build_directive( const CodeLocation &, std::string * directive );
+ast::SuspendStmt * build_suspend( const CodeLocation &, StatementNode *, ast::SuspendStmt::Kind );
+ast::WaitForStmt * build_waitfor( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt );
+ast::WaitForStmt * build_waitfor_else( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, StatementNode * stmt );
+ast::WaitForStmt * build_waitfor_timeout( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt );
+ast::Stmt * build_with( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
+ast::Stmt * build_mutex( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
Index: src/Parser/TypeData.cc
===================================================================
--- src/Parser/TypeData.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/TypeData.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -24,6 +24,6 @@
 #include "Common/SemanticError.h"  // for SemanticError
 #include "Common/utility.h"        // for splice, spliceBegin
-#include "Parser/parserutility.h"  // for maybeCopy, maybeBuild, maybeMoveB...
-#include "Parser/ParseNode.h"      // for DeclarationNode, ExpressionNode
+#include "Parser/ExpressionNode.h" // for ExpressionNode
+#include "Parser/StatementNode.h"  // for StatementNode
 
 class Attribute;
@@ -1397,5 +1397,5 @@
 		std::move( attributes ),
 		funcSpec,
-		isVarArgs
+		(isVarArgs) ? ast::VariableArgs : ast::FixedArgs
 	);
 	buildList( td->function.withExprs, decl->withExprs );
Index: src/Parser/TypeData.h
===================================================================
--- src/Parser/TypeData.h	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/TypeData.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -16,10 +16,10 @@
 #pragma once
 
-#include <iosfwd>										// for ostream
-#include <list>											// for list
-#include <string>										// for string
+#include <iosfwd>                                   // for ostream
+#include <list>                                     // for list
+#include <string>                                   // for string
 
-#include "AST/Type.hpp"									// for Type
-#include "ParseNode.h"									// for DeclarationNode, DeclarationNode::Ag...
+#include "AST/Type.hpp"                             // for Type
+#include "DeclarationNode.h"                        // for DeclarationNode
 
 struct TypeData {
Index: src/Parser/TypedefTable.cc
===================================================================
--- src/Parser/TypedefTable.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/TypedefTable.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -16,29 +16,34 @@
 
 #include "TypedefTable.h"
-#include <cassert>										// for assert
-#include <iostream>
+
+#include <cassert>                                // for assert
+#include <string>                                 // for string
+#include <iostream>                               // for iostream
+
+#include "ExpressionNode.h"                       // for LabelNode
+#include "ParserTypes.h"                          // for Token
+#include "StatementNode.h"                        // for CondCtl, ForCtrl
+// This (generated) header must come late as it is missing includes.
+#include "parser.hh"              // for IDENTIFIER, TYPEDEFname, TYPEGENname
+
 using namespace std;
 
 #if 0
 #define debugPrint( code ) code
+
+static const char *kindName( int kind ) {
+	switch ( kind ) {
+	case IDENTIFIER: return "identifier";
+	case TYPEDIMname: return "typedim";
+	case TYPEDEFname: return "typedef";
+	case TYPEGENname: return "typegen";
+	default:
+		cerr << "Error: cfa-cpp internal error, invalid kind of identifier" << endl;
+		abort();
+	} // switch
+} // kindName
 #else
 #define debugPrint( code )
 #endif
-
-using namespace std;									// string, iostream
-
-debugPrint(
-	static const char *kindName( int kind ) {
-		switch ( kind ) {
-		case IDENTIFIER: return "identifier";
-		case TYPEDIMname: return "typedim";
-		case TYPEDEFname: return "typedef";
-		case TYPEGENname: return "typegen";
-		default:
-			cerr << "Error: cfa-cpp internal error, invalid kind of identifier" << endl;
-			abort();
-		} // switch
-	} // kindName
-);
 
 TypedefTable::~TypedefTable() {
@@ -78,4 +83,8 @@
 		typedefTable.addToEnclosingScope( name, kind, "MTD" );
 	} // if
+} // TypedefTable::makeTypedef
+
+void TypedefTable::makeTypedef( const string & name ) {
+	return makeTypedef( name, TYPEDEFname );
 } // TypedefTable::makeTypedef
 
Index: src/Parser/TypedefTable.h
===================================================================
--- src/Parser/TypedefTable.h	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/TypedefTable.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -19,11 +19,9 @@
 
 #include "Common/ScopedMap.h"							// for ScopedMap
-#include "ParserTypes.h"
-#include "parser.hh"									// for IDENTIFIER, TYPEDEFname, TYPEGENname
 
 class TypedefTable {
 	struct Note { size_t level; bool forall; };
 	typedef ScopedMap< std::string, int, Note > KindTable;
-	KindTable kindTable;	
+	KindTable kindTable;
 	unsigned int level = 0;
   public:
@@ -33,5 +31,6 @@
 	bool existsCurr( const std::string & identifier ) const;
 	int isKind( const std::string & identifier ) const;
-	void makeTypedef( const std::string & name, int kind = TYPEDEFname );
+	void makeTypedef( const std::string & name, int kind );
+	void makeTypedef( const std::string & name );
 	void addToScope( const std::string & identifier, int kind, const char * );
 	void addToEnclosingScope( const std::string & identifier, int kind, const char * );
Index: src/Parser/lex.ll
===================================================================
--- src/Parser/lex.ll	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/lex.ll	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -44,6 +44,13 @@
 
 #include "config.h"										// configure info
+#include "DeclarationNode.h"                            // for DeclarationNode
+#include "ExpressionNode.h"                             // for LabelNode
+#include "InitializerNode.h"                            // for InitializerNode
 #include "ParseNode.h"
+#include "ParserTypes.h"                                // for Token
+#include "StatementNode.h"                              // for CondCtl, ForCtrl
 #include "TypedefTable.h"
+// This (generated) header must come late as it is missing includes.
+#include "parser.hh"                                    // generated info
 
 string * build_postfix_name( string * name );
Index: src/Parser/module.mk
===================================================================
--- src/Parser/module.mk	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/module.mk	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -21,6 +21,9 @@
 SRC += \
        Parser/DeclarationNode.cc \
+       Parser/DeclarationNode.h \
        Parser/ExpressionNode.cc \
+       Parser/ExpressionNode.h \
        Parser/InitializerNode.cc \
+       Parser/InitializerNode.h \
        Parser/lex.ll \
        Parser/ParseNode.cc \
@@ -33,4 +36,5 @@
        Parser/RunParser.hpp \
        Parser/StatementNode.cc \
+       Parser/StatementNode.h \
        Parser/TypeData.cc \
        Parser/TypeData.h \
Index: src/Parser/parser.yy
===================================================================
--- src/Parser/parser.yy	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/Parser/parser.yy	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -48,9 +48,12 @@
 using namespace std;
 
-#include "SynTree/Declaration.h"
-#include "ParseNode.h"
+#include "SynTree/Type.h"                               // for Type
+#include "DeclarationNode.h"                            // for DeclarationNode, ...
+#include "ExpressionNode.h"                             // for ExpressionNode, ...
+#include "InitializerNode.h"                            // for InitializerNode, ...
+#include "ParserTypes.h"
+#include "StatementNode.h"                              // for build_...
 #include "TypedefTable.h"
 #include "TypeData.h"
-#include "SynTree/LinkageSpec.h"
 #include "Common/SemanticError.h"						// error_str
 #include "Common/utility.h"								// for maybeMoveBuild, maybeBuild, CodeLo...
Index: src/ResolvExpr/CurrentObject.cc
===================================================================
--- src/ResolvExpr/CurrentObject.cc	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/ResolvExpr/CurrentObject.cc	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -9,7 +9,7 @@
 // Author           : Rob Schluntz
 // Created On       : Tue Jun 13 15:28:32 2017
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Fri Jul  1 09:16:01 2022
-// Update Count     : 15
+// Last Modified By : Andrew Beach
+// Last Modified On : Mon Apr 10  9:40:00 2023
+// Update Count     : 18
 //
 
@@ -593,4 +593,43 @@
 
 namespace ast {
+	/// Iterates members of a type by initializer.
+	class MemberIterator {
+	public:
+		virtual ~MemberIterator() {}
+
+		/// Internal set position based on iterator ranges.
+		virtual void setPosition(
+			std::deque< ptr< Expr > >::const_iterator it,
+			std::deque< ptr< Expr > >::const_iterator end ) = 0;
+
+		/// Walks the current object using the given designators as a guide.
+		void setPosition( const std::deque< ptr< Expr > > & designators ) {
+			setPosition( designators.begin(), designators.end() );
+		}
+
+		/// Retrieve the list of possible (Type,Designation) pairs for the
+		/// current position in the current object.
+		virtual std::deque< InitAlternative > operator* () const = 0;
+
+		/// True if the iterator is not currently at the end.
+		virtual operator bool() const = 0;
+
+		/// Moves the iterator by one member in the current object.
+		virtual MemberIterator & bigStep() = 0;
+
+		/// Moves the iterator by one member in the current subobject.
+		virtual MemberIterator & smallStep() = 0;
+
+		/// The type of the current object.
+		virtual const Type * getType() = 0;
+
+		/// The type of the current subobject.
+		virtual const Type * getNext() = 0;
+
+		/// Helper for operator*; aggregates must add designator to each init
+		/// alternative, but adding designators in operator* creates duplicates.
+		virtual std::deque< InitAlternative > first() const = 0;
+	};
+
 	/// create a new MemberIterator that traverses a type correctly
 	MemberIterator * createMemberIterator( const CodeLocation & loc, const Type * type );
@@ -684,6 +723,6 @@
 
 		void setPosition(
-			std::deque< ptr< Expr > >::const_iterator begin,
-			std::deque< ptr< Expr > >::const_iterator end
+			std::deque<ast::ptr<ast::Expr>>::const_iterator begin,
+			std::deque<ast::ptr<ast::Expr>>::const_iterator end
 		) override {
 			if ( begin == end ) return;
@@ -898,25 +937,84 @@
 	};
 
-	class TupleIterator final : public AggregateIterator {
-	public:
-		TupleIterator( const CodeLocation & loc, const TupleType * inst )
-		: AggregateIterator(
-			loc, "TupleIterator", toString("Tuple", inst->size()), inst, inst->members
-		) {}
+	/// Iterates across the positions in a tuple:
+	class TupleIterator final : public MemberIterator {
+		CodeLocation location;
+		ast::TupleType const * const tuple;
+		size_t index = 0;
+		size_t size = 0;
+		std::unique_ptr<MemberIterator> sub_iter;
+
+		const ast::Type * typeAtIndex() const {
+			assert( index < size );
+			return tuple->types[ index ].get();
+		}
+
+	public:
+		TupleIterator( const CodeLocation & loc, const TupleType * type )
+		: location( loc ), tuple( type ), size( type->size() ) {
+			PRINT( std::cerr << "Creating tuple iterator: " << type << std::endl; )
+			sub_iter.reset( createMemberIterator( loc, typeAtIndex() ) );
+		}
+
+		void setPosition( const ast::Expr * expr ) {
+			auto arg = eval( expr );
+			index = arg.first;
+		}
+
+		void setPosition(
+				std::deque< ptr< Expr > >::const_iterator begin,
+				std::deque< ptr< Expr > >::const_iterator end ) {
+			if ( begin == end ) return;
+
+			setPosition( *begin );
+			sub_iter->setPosition( ++begin, end );
+		}
+
+		std::deque< InitAlternative > operator*() const override {
+			return first();
+		}
 
 		operator bool() const override {
-			return curMember != members.end() || (memberIter && *memberIter);
+			return index < size;
 		}
 
 		TupleIterator & bigStep() override {
-			PRINT( std::cerr << "bigStep in " << kind << std::endl; )
-			atbegin = false;
-			memberIter = nullptr;
-			curType = nullptr;
-			while ( curMember != members.end() ) {
-				++curMember;
-				if ( init() ) return *this;
-			}
+			++index;
+			sub_iter.reset( index < size ?
+				createMemberIterator( location, typeAtIndex() ) : nullptr );
 			return *this;
+		}
+
+		TupleIterator & smallStep() override {
+			if ( sub_iter ) {
+				PRINT( std::cerr << "has member iter: " << *sub_iter << std::endl; )
+				sub_iter->smallStep();
+				if ( !sub_iter ) {
+					PRINT( std::cerr << "has valid member iter" << std::endl; )
+					return *this;
+				}
+			}
+			return bigStep();
+		}
+
+		const ast::Type * getType() override {
+			return tuple;
+		}
+
+		const ast::Type * getNext() override {
+			return ( sub_iter && *sub_iter ) ? sub_iter->getType() : nullptr;
+		}
+
+		std::deque< InitAlternative > first() const override {
+			PRINT( std::cerr << "first in TupleIterator (" << index << "/" << size << ")" << std::endl; )
+			if ( sub_iter && *sub_iter ) {
+				std::deque< InitAlternative > ret = sub_iter->first();
+				for ( InitAlternative & alt : ret ) {
+					alt.designation.get_and_mutate()->designators.emplace_front(
+						ConstantExpr::from_ulong( location, index ) );
+				}
+				return ret;
+			}
+			return {};
 		}
 	};
Index: src/ResolvExpr/CurrentObject.h
===================================================================
--- src/ResolvExpr/CurrentObject.h	(revision 2b01f8eb0956d35d26d892237add4fca234e1762)
+++ src/ResolvExpr/CurrentObject.h	(revision a0854709c4af89b15e20a76740a80ac044d10228)
@@ -9,7 +9,7 @@
 // Author           : Rob Schluntz
 // Created On       : Thu Jun  8 11:07:25 2017
-// Last Modified By : Peter A. Buhr
-// Last Modified On : Sat Jul 22 09:36:48 2017
-// Update Count     : 3
+// Last Modified By : Andrew Beach
+// Last Modified On : Thu Apr  6 16:14:00 2023
+// Update Count     : 4
 //
 
@@ -65,41 +65,5 @@
 
 	/// Iterates members of a type by initializer
-	class MemberIterator {
-	public:
-		virtual ~MemberIterator() {}
-
-		/// Internal set position based on iterator ranges
-		virtual void setPosition( 
-			std::deque< ptr< Expr > >::const_iterator it, 
-			std::deque< ptr< Expr > >::const_iterator end ) = 0;
-
-		/// walks the current object using the given designators as a guide
-		void setPosition( const std::deque< ptr< Expr > > & designators ) {
-			setPosition( designators.begin(), designators.end() );
-		}
-
-		/// retrieve the list of possible (Type,Designation) pairs for the current position in the 
-		/// current object
-		virtual std::deque< InitAlternative > operator* () const = 0;
-
-		/// true if the iterator is not currently at the end
-		virtual operator bool() const = 0;
-
-		/// moves the iterator by one member in the current object
-		virtual MemberIterator & bigStep() = 0;
-
-		/// moves the iterator by one member in the current subobject
-		virtual MemberIterator & smallStep() = 0;
-
-		/// the type of the current object
-		virtual const Type * getType() = 0;
-
-		/// the type of the current subobject
-		virtual const Type * getNext() = 0;
-	
-		/// helper for operator*; aggregates must add designator to each init alternative, but 
-		/// adding designators in operator* creates duplicates
-		virtual std::deque< InitAlternative > first() const = 0;
-	};
+	class MemberIterator;
 
 	/// Builds initializer lists in resolution
