Index: doc/theses/aaron_moss_PhD/comp_II/.gitignore
===================================================================
--- doc/theses/aaron_moss_PhD/comp_II/.gitignore	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/comp_II/.gitignore	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,4 @@
+# generated by latex
+build/*
+*.pdf
+*.ps
Index: doc/theses/aaron_moss_PhD/comp_II/Makefile
===================================================================
--- doc/theses/aaron_moss_PhD/comp_II/Makefile	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/comp_II/Makefile	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,81 @@
+## Define the configuration variables.
+
+Build = build
+Figures = figures
+Macros = ../../../LaTeXmacros
+TeXLIB = .:${Macros}:${Build}:../../../bibliography:
+LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
+BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
+
+MAKEFLAGS = --no-print-directory --silent #
+VPATH = ${Build} ${Figures}
+
+## Define the text source files.
+
+SOURCES = ${addsuffix .tex, \
+comp_II \
+}
+
+FIGURES = ${addsuffix .tex, \
+}
+
+PICTURES = ${addsuffix .pstex, \
+}
+
+PROGRAMS = ${addsuffix .tex, \
+}
+
+GRAPHS = ${addsuffix .tex, \
+}
+
+## Define the documents that need to be made.
+
+DOCUMENT = comp_II.pdf
+BASE = ${basename ${DOCUMENT}}
+
+# Directives #
+
+.PHONY : all clean					# not file names
+
+all : ${DOCUMENT}
+
+clean :
+	@rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
+
+# File Dependencies #
+
+${DOCUMENT} : ${BASE}.ps
+	ps2pdf $<
+
+${BASE}.ps : ${BASE}.dvi
+	dvips ${Build}/$< -o $@
+
+${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
+		${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib | ${Build}
+	# Must have *.aux file containing citations for bibtex
+	if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
+	-${BibTeX} ${Build}/${basename $@}
+	# Some citations reference others so run again to resolve these citations
+	${LaTeX} ${basename $@}.tex
+	-${BibTeX} ${Build}/${basename $@}
+	# Run again to finish citations
+	${LaTeX} ${basename $@}.tex
+
+## Define the default recipes.
+
+${Build}:
+	mkdir -p ${Build}
+
+%.tex : %.fig ${Build}
+	fig2dev -L eepic $< > ${Build}/$@
+
+%.ps : %.fig | ${Build}
+	fig2dev -L ps $< > ${Build}/$@
+
+%.pstex : %.fig | ${Build}
+	fig2dev -L pstex $< > ${Build}/$@
+	fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
+
+# Local Variables: #
+# compile-command: "make" #
+# End: #
Index: doc/theses/aaron_moss_PhD/comp_II/comp_II.tex
===================================================================
--- doc/theses/aaron_moss_PhD/comp_II/comp_II.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/comp_II/comp_II.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,644 @@
+\documentclass[twoside,11pt]{article}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Latex packages used in the document (copied from CFA user manual).
+\usepackage[T1]{fontenc}                                % allow Latin1 (extended ASCII) characters
+\usepackage{textcomp}
+\usepackage[latin1]{inputenc}
+\usepackage{fullpage,times,comment}
+\usepackage{epic,eepic}
+\usepackage{upquote}									% switch curled `'" to straight
+\usepackage{calc}
+\usepackage{xspace}
+\usepackage{graphicx}
+\usepackage{varioref}									% extended references
+\usepackage{listings}									% format program code
+\usepackage[flushmargin]{footmisc}						% support label/reference in footnote
+\usepackage{latexsym}                                   % \Box glyph
+\usepackage{mathptmx}                                   % better math font with "times"
+\usepackage[usenames]{color}
+\input{common}                                          % bespoke macros used in the document
+\usepackage[dvips,plainpages=false,pdfpagelabels,pdfpagemode=UseNone,colorlinks=true,pagebackref=true,linkcolor=blue,citecolor=blue,urlcolor=blue,pagebackref=true,breaklinks=true]{hyperref}
+\usepackage{breakurl}
+\renewcommand{\UrlFont}{\small\sf}
+
+\usepackage[pagewise]{lineno}
+\renewcommand{\linenumberfont}{\scriptsize\sffamily}
+
+% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
+% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
+% AFTER HYPERREF.
+\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
+
+\setlength{\topmargin}{-0.45in}							% move running title into header
+\setlength{\headsep}{0.25in}
+
+\CFAStyle												% use default CFA format-style
+
+% inline code ©...© (copyright symbol) emacs: C-q M-)
+% red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
+% blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
+% green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
+% LaTex escape §...§ (section symbol) emacs: C-q M-'
+% keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
+% math escape $...$ (dollar symbol)
+
+\usepackage{caption}
+\usepackage{subcaption}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\newsavebox{\LstBox}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\title{
+\Huge \vspace*{1in} Efficient Type Resolution in \CFA \\
+\huge \vspace*{0.25in} PhD Comprehensive II Research Proposal
+\vspace*{1in}
+}
+
+\author{
+\huge Aaron Moss \\
+\Large \vspace*{0.1in} \texttt{a3moss@uwaterloo.ca} \\
+\Large Cheriton School of Computer Science \\
+\Large University of Waterloo
+}
+
+\date{
+\today
+}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\newcommand{\bigO}[1]{O\!\left( #1 \right)}
+
+\begin{document}
+\pagestyle{headings}
+% changed after setting pagestyle
+\renewcommand{\sectionmark}[1]{\markboth{\thesection\quad #1}{\thesection\quad #1}}
+\renewcommand{\subsectionmark}[1]{\markboth{\thesubsection\quad #1}{\thesubsection\quad #1}}
+\pagenumbering{roman}
+\linenumbers                                            % comment out to turn off line numbering
+
+\maketitle
+\thispagestyle{empty}
+
+\clearpage
+\thispagestyle{plain}
+\pdfbookmark[1]{Contents}{section}
+\tableofcontents
+
+\clearpage
+\thispagestyle{plain}
+\pagenumbering{arabic}
+
+\section{Introduction}
+
+\CFA\footnote{Pronounced ``C-for-all'', and written \CFA or \CFL.} is an evolutionary modernization of the C programming language currently being designed and built at the University of Waterloo by a team led by Peter Buhr. 
+\CFA both fixes existing design problems and adds multiple new features to C, including name overloading, user-defined operators, parametric-polymorphic routines, and type constructors and destructors, among others. 
+The new features make \CFA more powerful and expressive than C, but impose a compile-time cost, particularly in the expression resolver, which must evaluate the typing rules of a significantly more complex type-system.
+
+The primary goal of this research project is to develop a sufficiently performant expression resolution algorithm, experimentally validate its performance, and integrate it into CFA, the \CFA reference compiler.
+Secondary goals of this project include the development of various new language features for \CFA: parametric-polymorphic (``generic'') types have already been designed and implemented, and reference types and user-defined conversions are under design consideration. 
+An experimental performance-testing architecture for resolution algorithms is under development to determine the relative performance of different expression resolution algorithms, as well as the compile-time cost of adding various new features to the \CFA type-system. 
+More broadly, this research should provide valuable data for implementers of compilers for other programming languages with similarly powerful static type-systems.
+
+\section{\CFA}
+
+To make the scope of the proposed expression resolution problem more explicit, it is necessary to define the features of both C and \CFA (both current and proposed) that affect this algorithm. 
+In some cases the interactions of multiple features make expression resolution a significantly more complex problem than any individual feature would; in other cases a feature that does not by itself add any complexity to expression resolution triggers previously rare edge cases more frequently.
+
+It is important to note that \CFA is not an object-oriented language.
+\CFA does have a system of (possibly implicit) type conversions derived from C's type conversions; while these conversions may be thought of as something like an inheritance hierarchy, the underlying semantics are significantly different and such an analogy is loose at best. 
+Particularly, \CFA has no concept of ``subclass'', and thus no need to integrate an inheritance-based form of polymorphism with its parametric and overloading-based polymorphism. 
+The graph structure of the \CFA type conversions is also markedly different than an inheritance graph; it has neither a top nor a bottom type, and does not satisfy the lattice properties typical of inheritance graphs.
+
+\subsection{Polymorphic Functions}
+The most significant feature \CFA adds is parametric-polymorphic functions. 
+Such functions are written using a ©forall© clause (which gives the language its name):
+\begin{lstlisting}
+®forall(otype T)®
+T identity(T x) {
+    return x;
+}
+
+int forty_two = identity(42); // T is bound to int, forty_two == 42
+\end{lstlisting}
+The ©identity© function above can be applied to any complete object type (or ``©otype©''). 
+The type variable ©T© is transformed into a set of additional implicit parameters to ©identity©, which encode sufficient information about ©T© to create and return a variable of that type. 
+The current \CFA implementation passes the size and alignment of the type represented by an ©otype© parameter, as well as an assignment operator, constructor, copy constructor and destructor. 
+Here, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments have shown this overhead to be similar to \CC virtual function calls. 
+Determining if packaging all polymorphic arguments to a function into a virtual function table would reduce the runtime overhead of polymorphic calls is an open research question. 
+
+Since bare polymorphic types do not provide a great range of available operations, \CFA provides a \emph{type assertion} mechanism to provide further information about a type:
+\begin{lstlisting}
+forall(otype T ®| { T twice(T); }®)
+T four_times(T x) {
+    return twice( twice(x) );
+}
+
+double twice(double d) { return d * 2.0; } // (1)
+
+double magic = four_times(10.5); // T is bound to double, uses (1) to satisfy type assertion
+\end{lstlisting}
+These type assertions may be either variable or function declarations that depend on a polymorphic type variable. 
+©four_times© can only be called with an argument for which there exists a function named ©twice© that can take that argument and return another value of the same type; a pointer to the appropriate ©twice© function is passed as an additional implicit parameter to the call of ©four_times©.
+
+Monomorphic specializations of polymorphic functions can themselves be used to satisfy type assertions. 
+For instance, ©twice© could have been defined using the \CFA syntax for operator overloading as:
+\begin{lstlisting}
+forall(otype S | { ®S ?+?(S, S);® })
+S twice(S x) { return x + x; }  // (2)
+\end{lstlisting} 
+This version of ©twice© works for any type ©S© that has an addition operator defined for it, and it could have been used to satisfy the type assertion on ©four_times©. 
+The compiler accomplishes this by creating a wrapper function calling ©twice // (2)© with ©S© bound to ©double©, then providing this wrapper function to ©four_times©\footnote{©twice // (2)© could also have had a type parameter named ©T©; \CFA specifies renaming of the type parameters, which would avoid the name conflict with the type variable ©T© of ©four_times©.}. 
+
+Finding appropriate functions to satisfy type assertions is essentially a recursive case of expression resolution, as it takes a name (that of the type assertion) and attempts to match it to a suitable declaration \emph{in the current scope}. 
+If a polymorphic function can be used to satisfy one of its own type assertions, this recursion may not terminate, as it is possible that function is examined as a candidate for its own type assertion unboundedly repeatedly. 
+To avoid infinite loops, the current CFA compiler imposes a fixed limit on the possible depth of recursion, similar to that employed by most \CC compilers for template expansion; this restriction means that there are some semantically well-typed expressions that cannot be resolved by CFA. 
+One area of potential improvement this project proposes to investigate is the possibility of using the compiler's knowledge of the current set of declarations to more precicely determine when further type assertion satisfaction recursion does not produce a well-typed expression.
+
+\subsubsection{Traits}
+\CFA provides \emph{traits} as a means to name a group of type assertions, as in the example below:
+\begin{lstlisting}
+®trait has_magnitude(otype T)® {
+    bool ?<?(T, T);        // comparison operator for T
+    T -?(T);               // negation operator for T
+    void ?{}(T*, zero_t);  // constructor from 0 literal
+};
+
+forall(otype M | has_magnitude(M))
+M abs( M m ) {
+    M zero = 0;  // uses zero_t constructor from trait
+    return m < zero ? -m : m;
+}
+
+forall(otype M | has_magnitude(M))
+M max_magnitude( M a, M b ) {
+    return abs(a) < abs(b) ? b : a; 
+}
+\end{lstlisting}
+
+Semantically, traits are simply a named lists of type assertions, but they may be used for many of the same purposes that interfaces in Java or abstract base classes in \CC are used for.
+Unlike Java interfaces or \CC base classes, \CFA types do not explicitly state any inheritance relationship to traits they satisfy; this can be considered a form of structural inheritance, similar to implementation of an interface in Go, as opposed to the nominal inheritance model of Java and \CC. 
+Nominal inheritance can be simulated with traits using marker variables or functions:
+\begin{lstlisting}
+trait nominal(otype T) {
+    ®T is_nominal;®
+};
+
+int is_nominal;  // int now satisfies the nominal trait
+{
+    char is_nominal; // char satisfies the nominal trait
+}
+// char no longer satisfies the nominal trait here  
+\end{lstlisting}
+
+Traits, however, are significantly more powerful than nominal-inheritance interfaces; firstly, due to the scoping rules of the declarations that satisfy a trait's type assertions, a type may not satisfy a trait everywhere that the type is declared, as with ©char© and the ©nominal© trait above. 
+Secondly, traits may be used to declare a relationship among multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems:
+\begin{lstlisting}
+trait pointer_like(®otype Ptr, otype El®) {
+    lvalue El *?(Ptr); // Ptr can be dereferenced into a modifiable value of type El
+}
+
+struct list {
+    int value;
+    list *next;  // may omit "struct" on type names
+};
+
+typedef list *list_iterator;
+
+lvalue int *?( list_iterator it ) {
+    return it->value;
+}
+\end{lstlisting}
+
+In the example above, ©(list_iterator, int)© satisfies ©pointer_like© by the user-defined dereference function, and ©(list_iterator, list)© also satisfies ©pointer_like© by the built-in dereference operator for pointers. 
+Given a declaration ©list_iterator it©, ©*it© can be either an ©int© or a ©list©, with the meaning disambiguated by context (\eg ©int x = *it;© interprets ©*it© as an ©int©, while ©(*it).value = 42;© interprets ©*it© as a ©list©).
+While a nominal-inheritance system with associated types could model one of those two relationships by making ©El© an associated type of ©Ptr© in the ©pointer_like© implementation, few such systems could model both relationships simultaneously.
+
+The flexibility of \CFA's implicit trait-satisfaction mechanism provides programmers with a great deal of power, but also blocks some optimization approaches for expression resolution. 
+The ability of types to begin to or cease to satisfy traits when declarations go into or out of scope makes caching of trait satisfaction judgements difficult, and the ability of traits to take multiple type parameters can lead to a combinatorial explosion of work in any attempt to pre-compute trait satisfaction relationships. 
+On the other hand, the addition of a nominal inheritance mechanism to \CFA's type system or replacement of \CFA's trait satisfaction system with a more object-oriented inheritance model and investigation of possible expression resolution optimizations for such a system may be an interesting avenue of further research.
+
+\subsection{Name Overloading}
+In C, no more than one variable or function in the same scope may share the same name\footnote{Technically, C has multiple separated namespaces, one holding ©struct©, ©union©, and ©enum© tags, one holding labels, one holding typedef names, variable, function, and enumerator identifiers, and one for each ©struct© or ©union© type holding the field names.}, and variable or function declarations in inner scopes with the same name as a declaration in an outer scope hide the outer declaration. 
+This restriction makes finding the proper declaration to match to a variable expression or function application a simple matter of symbol-table lookup, which can be easily and efficiently implemented. 
+\CFA, on the other hand, allows overloading of variable and function names, so long as the overloaded declarations do not have the same type, avoiding the multiplication of variable and function names for different types common in the C standard library, as in the following example:
+\begin{lstlisting}
+#include <limits.h>
+
+int max(int a, int b) { return a < b ? b : a; }  // (1)
+double max(double a, double b) { return a < b ? b : a; }  // (2)
+
+int max = INT_MAX;     // (3)
+double max = DBL_MAX;  // (4)
+
+max(7, -max);   // uses (1) and (3), by matching int type of the constant 7 
+max(max, 3.14); // uses (2) and (4), by matching double type of the constant 3.14
+
+max(max, -max);  // ERROR: ambiguous
+int m = max(max, -max); // uses (1) once and (3) twice, by matching return type
+\end{lstlisting}
+
+The presence of name overloading in \CFA means that simple table lookup is insufficient to match identifiers to declarations, and a type matching algorithm must be part of expression resolution.
+
+\subsection{Implicit Conversions}
+In addition to the multiple interpretations of an expression produced by name overloading, \CFA must support all of the implicit conversions present in C for backward compatibility, producing further candidate interpretations for expressions. 
+C does not have a inheritance hierarchy of types, but the C standard's rules for the ``usual arithmetic conversions'' define which of the built-in types are implicitly convertable to which other types, and the relative cost of any pair of such conversions from a single source type. 
+\CFA adds to the usual arithmetic conversions rules defining the cost of binding a polymorphic type variable in a function call; such bindings are cheaper than any \emph{unsafe} (narrowing) conversion, \eg ©int© to ©char©, but more expensive than any \emph{safe} (widening) conversion, \eg ©int© to ©double©. 
+
+The expression resolution problem, then, is to find the unique minimal-cost interpretation of each expression in the program, where all identifiers must be matched to a declaration, and implicit conversions or polymorphic bindings of the result of an expression may increase the cost of the expression. 
+Note that which subexpression interpretation is minimal-cost may require contextual information to disambiguate. 
+For instance, in the example in the previous subsection, ©max(max, -max)© cannot be unambiguously resolved, but ©int m = max(max, -max)© has a single minimal-cost resolution. 
+While the interpretation ©int m = (int)max((double)max, -(double)max)© is also a valid interpretation, it is not minimal-cost due to the unsafe cast from the ©double© result of ©max© to ©int© (the two ©double© casts function as type ascriptions selecting ©double max© rather than casts from ©int max© to ©double©, and as such are zero-cost).
+
+\subsubsection{User-generated Implicit Conversions}
+One possible additional feature to \CFA included in this research proposal is \emph{user-generated implicit conversions}. 
+Such a conversion system should be simple for programmers to utilize, and fit naturally with the existing design of implicit conversions in C; ideally it would also be sufficiently powerful to encode C's usual arithmetic conversions itself, so that \CFA only has one set of rules for conversions. 
+
+Ditchfield~\cite{Ditchfield:conversions} laid out a framework for using polymorphic-conversion-constructor functions to create a directed acyclic graph (DAG) of conversions. 
+A monomorphic variant of these functions can be used to mark a conversion arc in the DAG as only usable as the final step in a conversion. 
+With these two types of conversion arcs, separate DAGs can be created for the safe and the unsafe conversions, and conversion cost can be represented the length of the shortest path through the DAG from one type to another. 
+\begin{figure}[h]
+\centering
+\includegraphics{conversion_dag}
+\caption{A portion of the implicit conversion DAG for built-in types.}\label{fig:conv_dag}
+\end{figure}
+As can be seen in Figure~\ref{fig:conv_dag}, there are either safe or unsafe paths between each of the arithmetic types listed; the ``final'' arcs are important both to avoid creating cycles in the signed-unsigned conversions, and to disambiguate potential diamond conversions (\eg, if the ©int© to ©unsigned int© conversion was not marked final there would be two length-two paths from ©int© to ©unsigned long©, making it impossible to choose which one; however, since the ©unsigned int© to ©unsigned long© arc can not be traversed after the final ©int© to ©unsigned int© arc, there is a single unambiguous conversion path from ©int© to ©unsigned long©).
+
+Open research questions on this topic include:
+\begin{itemize}
+\item Can a conversion graph be generated that represents each allowable conversion in C with a unique minimal-length path such that the path lengths accurately represent the relative costs of the conversions?
+\item Can such a graph representation be usefully augmented to include user-defined types as well as built-in types?
+\item Can the graph be efficiently represented and used in the expression resolver?
+\end{itemize}
+
+\subsection{Constructors and Destructors}
+Rob Shluntz, a current member of the \CFA research team, has added constructors and destructors to \CFA. 
+Each type has an overridable default-generated zero-argument constructor, copy constructor, assignment operator, and destructor.
+For ©struct© types these functions each call their equivalents on each field of the ©struct©. 
+This feature affects expression resolution because an ©otype© type variable ©T© implicitly adds four type assertions, one for each of these four functions, so assertion resolution is pervasive in \CFA polymorphic functions, even those without any explicit type assertions. 
+The following example shows the implicitly-generated code in green:
+\begin{lstlisting}
+struct kv {
+    int key;
+    char *value;
+};
+
+¢void ?{}(kv *this) {  // default constructor
+    ?{}(&(this->key));  // call recursively on members
+    ?{}(&(this->value));
+}
+void ?{}(kv *this, kv that) {  // copy constructor
+    ?{}(&(this->key), that.key);
+    ?{}(&(this->value), that.value);
+}
+kv ?=?(kv *this, kv that) {  // assignment operator
+    ?=?(&(this->key), that.key);
+    ?=?(&(this->value), that.value);
+    return *this;
+}
+void ^?{}(kv *this) {  // destructor
+    ^?{}(&(this->key));
+    ^?{}(&(this->value));
+}¢
+
+forall(otype T ¢| { void ?{}(T*); void ?{}(T*, T); T ?=?(T*, T); void ^?{}(T*); }¢)
+void foo(T);
+\end{lstlisting}
+
+\subsection{Generic Types}
+I have already added a generic type capability to \CFA, designed to efficiently and naturally integrate with \CFA's existing polymorphic functions. 
+A generic type can be declared by placing a ©forall© specifier on a ©struct© or ©union© declaration, and instantiated using a parenthesized list of types after the type name:
+\begin{lstlisting}
+forall(otype R, otype S) struct pair {
+    R first;
+    S second;
+};
+
+forall(otype T)
+T value( pair(const char*, T) *p ) { return p->second; }
+
+pair(const char*, int) p = { "magic", 42 };
+int magic = value( &p );
+\end{lstlisting}
+For \emph{concrete} generic types, that is, those where none of the type parameters depend on polymorphic type variables (like ©pair(const char*, int)© above), the struct is essentially template expanded to a new struct type; for \emph{polymorphic} generic types (such as ©pair(const char*, T)© above), member access is handled by a runtime calculation of the field offset, based on the size and alignment information of the polymorphic parameter type. 
+The default-generated constructors, destructor and assignment operator for a generic type are polymorphic functions with the same list of type parameters as the generic type definition.
+
+Aside from giving users the ability to create more parameterized types than just the built-in pointer, array and function types, the combination of generic types with polymorphic functions and implicit conversions makes the edge case where the resolver may enter an infinite loop much more common, as in the following code example: 
+\begin{lstlisting}
+forall(otype T) struct box { T x; };
+
+void f(void*); // (1)
+
+forall(otype S)
+void f(box(S)* b) { // (2)
+	f(®(void*)0®);
+}
+\end{lstlisting}
+
+The loop in the resolver happens as follows:
+\begin{itemize} 
+\item Since there is an implicit conversion from ©void*© to any pointer type, the highlighted expression can be interpreted as either a ©void*©, matching ©f // (1)©, or a ©box(S)*© for some type ©S©, matching ©f // (2)©.
+\item To determine the cost of the ©box(S)© interpretation, a type must be found for ©S© that satisfies the ©otype© implicit type assertions (assignment operator, default and copy constructors, and destructor); one option is ©box(S2)© for some type ©S2©.
+\item The assignment operator, default and copy constructors, and destructor of ©box(T)© are also polymorphic functions, each of which require the type parameter ©T© to have an assignment operator, default and copy constructors, and destructor. When choosing an interpretation for ©S2©, one option is ©box(S3)©, for some type ©S3©.
+\item The previous step repeats until stopped, with four times as much work performed at each step.
+\end{itemize}
+This problem can occur in any resolution context where a polymorphic function can satisfy its own type assertions is required for a possible interpretation of an expression with no constraints on its type, and is thus not limited to combinations of generic types with ©void*© conversions.
+However, constructors for generic types often satisfy their own assertions and a polymorphic conversion such as the ©void*© conversion to a polymorphic variable is a common way to create an expression with no constraints on its type. 
+As discussed above, the \CFA expression resolver must handle this possible infinite recursion somehow, and it occurs fairly naturally in code like the above that uses generic types. 
+
+\subsection{Tuple Types}
+\CFA adds \emph{tuple types} to C, a syntactic facility for referring to lists of values anonymously or with a single identifier. 
+An identifier may name a tuple, and a function may return one. 
+Particularly relevantly for resolution, a tuple may be implicitly \emph{destructured} into a list of values, as in the call to ©swap©:
+\begin{lstlisting}
+[char, char] x = [ '!', '?' ];  // (1)
+int x = 42;  // (2)
+
+forall(otype T) [T, T] swap( T a, T b ) { return [b, a]; }  // (3)
+
+x = swap( x ); // destructure [char, char] x into two elements of parameter list
+// cannot use int x for parameter, not enough arguments to swap
+
+void swap( int, char, char ); // (4)
+
+swap( x, x ); // resolved as (4) on (2) and (1)
+// (3) on (2) and (2) is close, but the polymorphic binding makes it not minimal-cost
+\end{lstlisting}
+Tuple destructuring means that the mapping from the position of a subexpression in the argument list to the position of a paramter in the function declaration is not straightforward, as some arguments may be expandable to different numbers of parameters, like ©x© above. 
+In the second example, the second ©x© argument can be resolved starting at the second or third parameter of ©swap©, depending which interpretation of ©x© was chosen for the first argument.
+
+\subsection{Reference Types}
+I have been designing \emph{reference types} for \CFA, in collaboration with the rest of the \CFA research team. 
+Given some type ©T©, a ©T&© (``reference to ©T©'') is essentially an automatically dereferenced pointer; with these semantics most of the C standard's discussions of lvalues can be expressed in terms of references instead, with the benefit of being able to express the difference between the reference and non-reference version of a type in user code. 
+References preserve C's existing qualifier-dropping lvalue-to-rvalue conversion (\eg a ©const volatile int&© can be implicitly converted to a bare ©int©).
+The reference proposal also adds a rvalue-to-lvalue conversion to \CFA, implemented by storing the value in a new compiler-generated temporary and passing a reference to the temporary. 
+These two conversions can chain, producing a qualifier-dropping conversion for references, for instance converting a reference to a ©const int© into a reference to a non-©const int© by copying the originally refered to value into a fresh temporary and taking a reference to this temporary, as in:
+\begin{lstlisting} 
+const int magic = 42;
+
+void inc_print( int& x ) { printf("%d\n", ++x); }
+
+print_inc( magic ); // legal; implicitly generated code in green below:
+
+¢int tmp = magic;¢ // to safely strip const-qualifier
+¢print_inc( tmp );¢ // tmp is incremented, magic is unchanged
+\end{lstlisting}
+These reference conversions may also chain with the other implicit type-conversions. 
+The main implication of the reference conversions for expression resolution is the multiplication of available implicit conversions, though given the restricted context reference conversions may be able to be treated efficiently as a special case of implicit conversions.
+
+\subsection{Special Literal Types}
+Another proposal currently under consideration for the \CFA type-system is assigning special types to the literal values ©0© and ©1©. 
+Implicit conversions from these types allow ©0© and ©1© to be considered as values of many different types, depending on context, allowing expression desugarings like ©if ( x ) {}© $\Rightarrow$ ©if ( x != 0 ) {}© to be implemented efficiently and precisely. 
+This approach is a generalization of C's existing behaviour of treating ©0© as either an integer zero or a null pointer constant, and treating either of those values as boolean false. 
+The main implication for expression resolution is that the frequently encountered expressions ©0© and ©1© may have a large number of valid interpretations.
+
+\subsection{Deleted Function Declarations}
+One final proposal for \CFA with an impact on the expression resolver is \emph{deleted function declarations}; in \CCeleven, a function declaration can be deleted as below:
+\begin{lstlisting}
+int somefn(char) = delete;
+\end{lstlisting}
+This feature is typically used in \CCeleven to make a type non-copyable by deleting its copy constructor and assignment operator\footnote{In previous versions of \CC a type could be made non-copyable by declaring a private copy constructor and assignment operator, but not defining either. This idiom is well-known, but depends on some rather subtle and \CC-specific rules about private members and implicitly-generated functions; the deleted-function form is both clearer and less verbose.}, or forbidding some interpretations of a polymorphic function by specifically deleting the forbidden overloads\footnote{Specific polymorphic function overloads can also be forbidden in previous \CC versions through use of template metaprogramming techniques, though this advanced usage is beyond the skills of many programmers. A similar effect can be produced on an ad-hoc basis at the appropriate call sites through use of casts to determine the function type. In both cases, the deleted-function form is clearer and more concise.}. 
+To add a similar feature to \CFA involves including the deleted function declarations in expression resolution along with the normal declarations, but producing a compiler error if the deleted function is the best resolution. 
+How conflicts should be handled between resolution of an expression to both a deleted and a non-deleted function is a small but open research question.
+
+\section{Expression Resolution}
+\subsection{Analysis}
+The expression resolution problem is determining an optimal match between some combination of argument interpretations and the parameter list of some overloaded instance of a function; the argument interpretations are produced by recursive invocations of expression resolution, where the base case is zero-argument functions (which are, for purposes of this discussion, semantically equivalent to named variables or constant literal expressions). 
+Assuming that the matching between a function's parameter list and a combination of argument interpretations can be done in $\bigO{p^k}$ time, where $p$ is the number of parameters and $k$ is some positive number, if there are $\bigO{i}$ valid interpretations for each subexpression, there will be $\bigO{i}$ candidate functions and $\bigO{i^p}$ possible argument combinations for each expression, so for a single recursive call expression resolution takes $\bigO{i^{p+1} \cdot p^k}$ time if it must compare all combinations, or $\bigO{i(p+1) \cdot p^k}$ time if argument-parameter matches can be chosen independently of each other. 
+Given these bounds, resolution of a single top-level expression tree of depth $d$ takes $\bigO{i^{p+1} \cdot p^{k \cdot d}}$ time under full-combination matching, or $\bigO{i(p+1) \cdot p^{k \cdot d}}$ time for independent-parameter matching\footnote{A call tree has leaves at depth $\bigO{d}$, and each internal node has $\bigO{p}$ fan-out, producing $\bigO{p^d}$ total recursive calls.}.
+
+Expression resolution is somewhat unavoidably exponential in $d$, the depth of the expression tree, and if arguments cannot be matched to parameters independently of each other, expression resolution is also exponential in $p$. 
+However, both $d$ and $p$ are fixed by the programmer, and generally bounded by reasonably small constants. 
+$k$, on the other hand, is mostly dependent on the representation of types in the system and the efficiency of type assertion checking; if a candidate argument combination can be compared to a function parameter list in linear time in the length of the list (\ie $k = 1$), then the $p^{k \cdot d}$ factor is linear in the input size of the source code for the expression, otherwise the resolution algorithm exibits sub-linear performance scaling on code containing more-deeply nested expressions.
+The number of valid interpretations of any subexpression, $i$, is bounded by the number of types in the system, which is possibly infinite, though practical resolution algorithms for \CFA must be able to place some finite bound on $i$, possibly at the expense of type-system completeness. 
+
+\subsection{Expression Costs}
+The expression resolution problem involves minimization of a cost function; loosely defined, this cost function is the number of implicit conversions in the top-level expression interpretation. 
+With more specificity, the \emph{cost} of a particular expression interpretation is a lexicographically-ordered tuple, where each element of the tuple corresponds to a particular kind of conversion. 
+In \CFA today, cost is a three-tuple including the number of unsafe conversions, the number of polymorphic parameter bindings, and the number of safe conversions. 
+These counts include conversions used in subexpression interpretations, as well as those necessary to satisfy the type assertions of any polymorphic functions included in the interpretation. 
+
+\begin{lstlisting}
+void f(char, long);  // $f_1$ - cost (2, 0, 1)
+forall(otype T) void f(T, long); // $f_2$ - cost (0, 1, 1)
+void f(long, long); // $f_{3a}$ - cost (0, 0, 2)
+void f(int, float); // $f_{3b}$ - cost (0, 0, 2)
+void f(int, long);  // $f_4$ - cost (0, 0, 1)
+
+f(7, 11);
+\end{lstlisting}
+
+In the example above, the expression resolves to $f_4$. 
+$f_1$ has an unsafe conversion (from ©int© to ©char©), and is thus the highest cost, followed by $f_2$, which has a polymorphic binding (from ©int© to ©T©). 
+Neither $f_{3a}$, $f_{3b}$, or $f_4$ match exactly with the type of the call expression (©void (*)(int, int)©), each involving safe conversions, but in this case $f_4$ is cheaper than $f_{3a}$, because it converts fewer arguments, and is also cheaper than $f_{3b}$, because ©long© is a closer match for ©int© than ©float© is. 
+If the declaration of $f_4$ was missing, the expression would be ambiguous, because the two single-step ©int©-to-©long© conversions in $f_{3a}$ cost the same as the one double-step ©int©-to-©float© conversion in $f_{3b}$.
+
+In the course of this project I may modify the cost tuple,\footnote{I have considered adding an element to distinguish between cast expressions used as conversions and those used as type ascriptions, and another element to differentiate interpretations based on closer qualifier matches. The existing costing of polymorphic functions could also be made more precice than a bare count of parameter bindings.} but the essential nature of the cost calculation should remain the same.
+
+\subsection{Objectives}
+The research goal of this project is to develop a performant expression resolver for \CFA; this analysis suggests three primary areas of investigation to accomplish that end. 
+The first area of investigation is efficient argument-parameter matching; Bilson~\cite{Bilson03} mentions significant optimization opportunities available in the current literature to improve on the existing CFA compiler.
+%TODO: look up and lit review 
+The second area of investigation is minimizing dependencies between argument-parameter matches; the current CFA compiler attempts to match entire argument combinations against functions at once, potentially attempting to match the same argument against the same parameter multiple times. 
+Whether the feature set of \CFA admits an expression resolution algorithm where arguments can be matched to parameters independently of other arguments in the same function application is an area of open research; polymorphic type paramters produce enough cross-argument dependencies that the problem is not trivial. 
+If cross-argument resolution dependencies cannot be completely eliminated, effective caching strategies to reduce duplicated work between equivalent argument-parameter matches in different combinations may mitigate the asymptotic defecits of the whole-combination matching approach. 
+The final area of investigation is heuristics and algorithmic approaches to reduce the number of argument interpretations considered in the common case; if argument-parameter matches cannot be made independent, even small reductions in $i$ should yield significant reductions in the $i^{p+1}$ resolver runtime factor. 
+
+The discussion below presents a number of largely orthagonal axes for expression resolution algorithm design to be investigated, noting prior work where applicable. 
+Though some of the proposed improvements to the expression resolution algorithm are based on heuristics rather than asymptoticly superior algorithms, it should be noted that programmers often employ idioms and other programming patterns to reduce the mental burden of producing correct code, and if these patterns can be identified and exploited by the compiler then the significant reduction in expression resolution time for common, idiomatic expressions should result in lower total compilation time even for code including difficult-to-resolve expressions that push the expression resolver to its theoretical worst case.
+
+\subsection{Argument-Parameter Matching}
+The first axis for consideration is the argument-parameter matching direction --- whether the type matching for a candidate function to a set of candidate arguments is directed by the argument types or the parameter types. 
+For programming languages without implicit conversions, argument-parameter matching is essentially the entirety of the expression resolution problem, and is generally referred to as ``overload resolution'' in the literature.
+All expression-resolution algorithms form a DAG of interpretations, some explicitly, some implicitly; in this DAG, arcs point from function-call interpretations to argument interpretations, as in Figure~\ref{fig:res_dag}:
+\begin{figure}[h]
+\centering
+\begin{subfigure}[h]{2in}
+\begin{lstlisting}
+int *p;  // $p_i$
+char *p; // $p_c$ 
+
+double *f(int*, int*); // $f_d$
+char *f(char*, int*); // $f_c$
+
+f( f( p, p ), p );
+\end{lstlisting}
+\end{subfigure}~\begin{subfigure}[h]{2in}
+\includegraphics{resolution_dag}
+\end{subfigure}
+\caption{Resolution DAG for a simple expression. Functions that do not have a valid argument matching are covered with an \textsf{X}.}\label{fig:res_dag}
+\end{figure}
+
+Note that some interpretations may be part of more than one super-interpretation, as with the second $p_i$ in the bottom row, while some valid subexpression interpretations, like $f_d$ in the middle row, are not used in any interpretation of their superexpression.
+
+\subsubsection{Argument-directed (Bottom-up)}
+Baker's algorithm for expression resolution~\cite{Baker82} pre-computes argument candidates, from the leaves of the expression tree up.
+For each candidate function, Baker attempts to match argument types to parameter types in sequence, failing if any parameter cannot be matched.
+
+Bilson~\cite{Bilson03} similarly pre-computes argument candidates in the original \CFA compiler, but then explicitly enumerates all possible argument combinations for a multi-parameter function; these argument combinations are matched to the parameter types of the candidate function as a unit rather than individual arguments.
+This approach is less efficient than Baker's approach, as the same argument may be compared to the same parameter many times, but allows a more straightforward handling of polymorphic type-binding and multiple return-types.
+It is possible the efficiency losses here relative to Baker could be significantly reduced by keeping a memoized cache of argument-parameter type comparisons and reading previously-seen argument-parameter matches from this cache rather than recomputing them.
+
+\subsubsection{Parameter-directed (Top-down)}
+Unlike Baker and Bilson, Cormack's algorithm~\cite{Cormack81} requests argument candidates that match the type of each parameter of each candidate function, from the top-level expression down; memoization of these requests is presented as an optimization.
+As presented, this algorithm requires the result of the expression to have a known type, though an algorithm based on Cormack's could reasonably request a candidate set of any return type, though such a set may be quite large.
+
+\subsubsection{Hybrid}
+This proposal includes the investigation of hybrid top-down/bottom-up argument-parameter matching.
+A reasonable hybrid approach might take a top-down approach when the expression to be matched has a fixed type, and a bottom-up approach in untyped contexts.
+This approach may involve switching from one type to another at different levels of the expression tree. 
+For instance, in:
+\begin{lstlisting}
+forall(otype T)
+int f(T x);  // (1)
+
+void* f(char y);  // (2)
+
+int x = f( f( '!' ) );
+\end{lstlisting}
+the outer call to ©f© must have a return type that is (implicitly convertable to) ©int©, so a top-down approach is used to select \textit{(1)} as the proper interpretation of ©f©. \textit{(1)}'s parameter ©x©, however, is an unbound type variable, and can thus take a value of any complete type, providing no guidance for the choice of candidate for the inner call to ©f©. The leaf expression ©'!'©, however, determines a zero-cost interpretation of the inner ©f© as \textit{(2)}, providing a minimal-cost expression resolution where ©T© is bound to ©void*©.
+
+Deciding when to switch between bottom-up and top-down resolution to minimize wasted work in a hybrid algorithm is a necessarily heuristic process, and finding good heuristics for which subexpressions to swich matching strategies on is an open question.
+One reasonable approach might be to set a threshold $t$ for the number of candidate functions, and to use top-down resolution for any subexpression with fewer than $t$ candidate functions, to minimize the number of unmatchable argument interpretations computed, but to use bottom-up resolution for any subexpression with at least $t$ candidate functions, to reduce duplication in argument interpretation computation between the different candidate functions. 
+
+Ganzinger and Ripken~\cite{Ganzinger80} propose an approach (later refined by Pennello~\etal~\cite{Pennello80}) that uses a top-down filtering pass followed by a bottom-up filtering pass to reduce the number of candidate interpretations; they prove that for the Ada programming language a small number of such iterations is sufficient to converge to a solution for the expression resolution problem. 
+Persch~\etal~\cite{PW:overload} developed a similar two-pass approach where the bottom-up pass is followed by the top-down pass. 
+These algorithms differ from the hybrid approach under investigation in that they take multiple passes over the expression tree to yield a solution, and that they also apply both filtering heuristics to all expression nodes; \CFA's polymorphic functions and implicit conversions make the approach of filtering out invalid types taken by all of these algorithms infeasible.
+
+\subsubsection{Common Subexpression Caching}
+With any of these argument-parameter approaches, it may be a useful optimization to cache the resolution results for common subexpressions; in Figure~\ref{fig:res_dag} this optimization would result in the list of interpretations $[p_c, p_i]$ for ©p© only being calculated once, and re-used for each of the three instances of ©p©.
+
+\subsection{Implicit Conversion Application}
+With the exception of Bilson, the authors mentioned above do not account for implicit conversions in their algorithms\footnote{Baker does briefly comment on an approach for handling implicit conversions, but does not provide an implementable algorithm.}; all assume that there is at most one valid interpretation of a given expression for each distinct type. 
+Integrating implicit conversion handling into the presented argument-parameter matching algorithms thus provides some choice of implementation approach.
+
+Inference of polymorphic type variables can be considered a form of implicit conversion application, where monomorphic types are implicitly converted to instances of some polymorphic type\footnote{This ``conversion'' may not be implemented in any explicit way at runtime, but does need to be handled by the expression resolver as an inexact match between argument and parameter types.}. 
+This form of implicit conversion is particularly common in functional languages; Haskell's type classes~\cite{typeclass} are a particularly well-studied variant of this inference. 
+However, type classes arguably do not allow name overloading, as (at least in the Haskell implmentation) identifiers belonging to type classes may not be overloaded in any other context than an implementation of that type class; this provides a single (possibly polymorphic) interpretation of any identifier, simplifing the expression resolution problem relative to \CFA. 
+\CC~\cite{ANSI98:C++} includes both name overloading and implicit conversions in its expression resolution specification, though unlike \CFA it does complete type-checking on a generated monomorphization of template functions, where \CFA simply checks a list of type constraints. 
+The upcoming Concepts standard~\cite{C++concepts} defines a system of type constraints similar in principle to \CFA's.
+Cormack and Wright~\cite{Cormack90} present an algorithm that integrates overload resolution with a polymorphic type inference approach very similar to \CFA's.
+However, their algorithm does not account for implicit conversions other than polymorphic type binding and their discussion of their overload resolution algorithm is not sufficiently detailed to classify it with the other argument-parameter matching approaches\footnote{Their overload resolution algorithm is possibly a variant of Ganzinger and Ripken~\cite{Ganzinger80} or Pennello~\etal~\cite{Pennello80}, modified to allow for polymorphic type binding.}.
+
+\subsubsection{On Parameters}
+Bilson does account for implicit conversions in his algorithm, but it is unclear if the approach is optimal. 
+His algorithm integrates checking for valid implicit conversions into the argument-parameter-matching step, essentially trading more expensive matching for a smaller number of argument interpretations. 
+This approach may result in the same subexpression being checked for a type match with the same type multiple times, though again memoization may mitigate this cost; however, this approach does not generate implicit conversions that are not useful to match the containing function.
+
+\subsubsection{On Arguments}
+Another approach is to generate a set of possible implicit conversions for each set of interpretations of a given argument. 
+This approach has the benefit of detecting ambiguous interpretations of arguments at the level of the argument rather than its containing call, never finds more than one interpretation of the argument with a given type, and re-uses calculation of implicit conversions between function candidates. 
+On the other hand, this approach may unnecessarily generate argument interpretations that never match any parameter, wasting work. 
+Furthermore, in the presence of tuple types, this approach may lead to a combinatorial explosion of argument interpretations considered, unless the tuple can be considered as a sequence of elements rather than a unified whole. 
+
+\subsection{Candidate Set Generation}
+All the algorithms discussed to this point generate the complete set of candidate argument interpretations before attempting to match the containing function-call expression. 
+However, given that the top-level expression interpretation that is ultimately chosen is the minimal-cost valid interpretation, any consideration of non-minimal-cost interpretations is wasted work.
+Under the assumption that programmers generally write function calls with relatively low-cost interpretations, a possible work-saving heuristic is to generate only the lowest-cost argument interpretations first, attempt to find a valid top-level interpretation using them, and only if that fails generate the next higher-cost argument interpretations.
+
+\subsubsection{Eager}
+Within the eager approach taken by the existing top-down and bottom-up algorithms, there are still variants to explore. 
+Cormack and Baker do not account for implict conversions, and thus do not account for the possibility of multiple valid interpretations with distinct costs; Bilson, on the other hand, sorts the list of interpretations to aid in finding minimal-cost interpretations. 
+Sorting the lists of argument or function call interpretations by cost at some point during resolution may provide useful opportunities to short-circuit expression evaluation when a minimal-cost interpretation is found, though it is unclear if this short-circuiting behaviour justifies the cost of the sort.
+
+\subsubsection{Lazy}
+In the presence of implicit conversions, many argument interpretations may match a given parameter by application of an appropriate implicit conversion. 
+However, if programmers actually use relatively few implicit conversions, then the ``on arguments'' approach to implicit conversions generates a large number of high-cost interpretations that may never be used. 
+Even if the ``on parameters'' approach to implicit conversions is used, eager generation of interpretations spends extra time attempting possibly expensive polymorphic or conversion-based matches in cases where an exact monomorphic interpretation exists. 
+
+The essence of the lazy approach to candidate set generation is to wrap the matching algorithm into the element generator of a lazy list, only generating as few elements at a time to ensure the next-smallest-cost interpretation has been generated. 
+Assuming argument interpretations are provided to the parameter matching algorithm in sorted order, a sorted list of function call interpretations can be produced by generating combinations of arguments sorted by total cost\footnote{I have already developed a lazy $n$-way combination generation algorithm to perform this task.}, then generating function call interpretations in the order suggested by this list. 
+The function call interpretation chosen may have costs of its own, for instance polymorphic type binding, so in some cases a number of argument combinations (any combination whose marginal cost does not exceed the cost of the function call interpretation itself) may need to be considered to determine the next-smallest-cost function call interpretation.
+Ideally, this candidate generation approach leads to very few unused candidates being generated (in the expected case where the programmer has, in fact, provided a validly-typable program), but it is an open question whether or not the overheads of lazy generation exceed the benefit produced from considering fewer interpretations.
+
+\subsubsection{Stepwise Lazy}
+As a compromise between the trade-offs of the eager and lazy approaches, I also propose to investigate a ``stepwise lazy'' approach, where all the interpretations for some ``step'' are eagerly generated, then the interpretations in the later steps are only generated on demand. 
+Under this approach the \CFA resolver could, for instance, try expression interpretations in the following order:
+\begin{enumerate}
+\item Interpretations with no polymorphic type binding or implicit conversions.
+\item Interpretations containing no polymorphic type binding and at least one safe implicit conversion.
+\item Interpretations containing polymorphic type binding, but only safe implicit conversions.
+\item Interpretations containing at least one unsafe implicit conversion.
+\end{enumerate} 
+If a valid expression interpretation is found in one step, it is guaranteed to be lower-cost than any interpretation in a later step (by the structure of \CFA interpretation costs), so no further steps need be considered.
+This approach may save significant amounts of work, especially given that the first steps avoid potentially expensive handling of implicit conversions and type assertion satisfaction entirely, and cover a large proportion of common monomorphic code.
+
+%\subsection{Parameter-Directed}
+%\textbf{TODO: Richard's algorithm isn't Baker (Cormack?), disentangle from this section \ldots}. 
+%The expression resolution algorithm used by the existing iteration of CFA is based on Baker's\cite{Baker82} algorithm for overload resolution in Ada. 
+%The essential idea of this algorithm is to first find the possible interpretations of the most deeply nested subexpressions, then to use these interpretations to recursively generate valid interpretations of their superexpressions. 
+%To simplify matters, the only expressions considered in this discussion of the algorithm are function application and literal expressions; other expression types can generally be considered to be variants of one of these for the purposes of the resolver, \eg variables are essentially zero-argument functions. 
+%If we consider expressions as graph nodes with arcs connecting them to their subexpressions, these expressions form a DAG, generated by the algorithm from the bottom up.
+%Literal expressions are represented by leaf nodes, annotated with the type of the expression, while a function application will have a reference to the function declaration chosen, as well as arcs to the interpretation nodes for its argument expressions; functions are annotated with their return type (or types, in the case of multiple return values).
+%
+%\textbf{TODO: Figure}
+%
+%Baker's algorithm was designed to account for name overloading; Richard Bilson\cite{Bilson03} extended this algorithm to also handle polymorphic functions, implicit conversions and multiple return types when designing the original \CFA compiler. 
+%The core of the algorithm is a function which Baker refers to as $gen\_calls$. 
+%$gen\_calls$ takes as arguments the name of a function $f$ and a list containing the set of possible subexpression interpretations $S_j$ for each argument of the function and returns a set of possible interpretations of calling that function on those arguments. 
+%The subexpression interpretations are generally either singleton sets generated by the single valid interpretation of a literal expression, or the results of a previous call to $gen\_calls$. 
+%If there are no valid interpretations of an expression, the set returned by $gen\_calls$ will be empty, at which point resolution can cease, since each subexpression must have at least one valid interpretation to produce an interpretation of the whole expression. 
+%On the other hand, if for some type $T$ there is more than one valid interpretation of an expression with type $T$, all interpretations of that expression with type $T$ can be collapsed into a single \emph{ambiguous expression} of type $T$, since the only way to disambiguate expressions is by their return types. 
+%If a subexpression interpretation is ambiguous, than any expression interpretation containing it will also be ambiguous. 
+%In the variant of this algorithm including implicit conversions, the interpretation of an expression as type $T$ is ambiguous only if there is more than one \emph{minimal-cost} interpretation of the expression as type $T$, as cheaper expressions are always chosen in preference to more expensive ones.
+%
+%Given this description of the behaviour of $gen\_calls$, its implementation is quite straightforward: for each function declaration $f_i$ matching the name of the function, consider each of the parameter types $p_j$ of $f_i$, attempting to match the type of an element of $S_j$ to $p_j$ (this may include checking of implicit conversions).
+%If no such element can be found, there is no valid interpretation of the expression using $f_i$, while if more than one such (minimal-cost) element is found than an ambiguous interpretation with the result type of $f_i$ is produced. 
+%In the \CFA variant, which includes polymorphic functions, it is possible that a single polymorphic function definition $f_i$ can produce multiple valid interpretations by different choices of type variable bindings; these interpretations are unambiguous so long as the return type of $f_i$ is different for each type binding. 
+%If all the parameters $p_j$ of $f_i$ can be uniquely matched to a candidate interpretation, then a valid interpretation based on $f_i$ and those $p_j$ is produced. 
+%$gen\_calls$ collects the produced interpretations for each $f_i$ and returns them; a top level expression is invalid if this list is empty, ambiguous if there is more than one (minimal-cost) result, or if this single result is ambiguous, and valid otherwise.
+%
+%In this implementation, resolution of a single top-level expression takes time $O(\ldots)$, where \ldots. \textbf{TODO:} \textit{Look at 2.3.1 in Richard's thesis when working out complexity; I think he does get the Baker algorithm wrong on combinations though, maybe\ldots}
+%
+%\textbf{TODO: Basic Lit Review} \textit{Look at 2.4 in Richard's thesis for any possible more-recent citations of Baker\ldots} \textit{Look back at Baker's related work for other papers that look similar to what you're doing, then check their citations as well\ldots} \textit{Look at Richard's citations in 2.3.2 w.r.t. type data structures\ldots}
+%\textit{CormackWright90 seems to describe a solution for the same problem, mostly focused on how to find the implicit parameters}
+
+\section{Proposal}
+Baker~\cite{Baker82} discussed various expression resolution algorithms that can handle name overloading, but left experimental comparison of those algorithms to future work; Bilson~\cite{Bilson03} described one extension of Baker's algorithm to handle implicit conversions, but did not fully explore the space of algorithmic approaches to handle both overloaded names and implicit conversions. 
+This project is intended to experimentally test a number of expression resolution algorithms that are powerful enough to handle the \CFA type-system, including both name overloading and implicit conversions. 
+This comparison closes Baker's open research question, as well as potentially improving Bilson's \CFA compiler.
+
+Rather than testing all of these algorithms in-place in the \CFA compiler, a resolver prototype is being developed that acts on a simplified input language encapsulating the essential details of the \CFA type-system\footnote{Note this simplified input language is not a usable programming language.}. 
+Multiple variants of this resolver prototype will be implemented, each encapsulating a different expression resolution variant, sharing as much code as feasible. 
+These variants will be instrumented to test runtime performance, and run on a variety of input files; the input files may be generated programmatically or from exisiting code in \CFA or similar languages.
+These experimental results should make it possible to determine the algorithm likely to be most performant in practical use, and replace CFA's existing expression resolver. 
+
+The experimental results will also provide some empirical sense of the compile-time cost of various language features by comparing the results of the most performant resolver variant that supports a feature with the most performant resolver variant that does not support that feature, a useful capability to guide language design. 
+As an example, there are currently multiple open proposals for how implicit conversions should interact with polymorphic type binding in \CFA, each with distinct levels of expressive power; if the resolver prototype is modified to support each proposal, the optimal algorithm for each proposal can be compared, providing an empirical demonstration of the trade-off between expressive power and compiler runtime. 
+
+This proposed project should provide valuable data on how to implement a performant compiler for programming languages such as \CFA with powerful static type-systems, specifically targeting the feature interaction between name overloading and implicit conversions. 
+This work is not limited in applicability to \CFA, but may also be useful for supporting efficient compilation of the upcoming Concepts standard~\cite{C++concepts} for \CC template constraints, for instance. 
+
+\appendix
+\section{Completion Timeline}
+The following is a preliminary estimate of the time necessary to complete the major components of this research project:
+\begin{center}
+\begin{tabular}{ | r @{--} l | p{4in} | }
+\hline       May 2015 & April 2016   & Project familiarization and generic types design and implementation. \\
+\hline       May 2016 & April 2017   & Design and implement resolver prototype and run performance experiments. \\
+\hline       May 2017 & August 2017  & Integrate new language features and best-performing resolver prototype into CFA. \\
+\hline September 2017 & January 2018 & Thesis writing and defense. \\
+\hline
+\end{tabular}
+\end{center}
+
+\addcontentsline{toc}{section}{\refname}
+\bibliographystyle{plain}
+\bibliography{pl}
+
+%\addcontentsline{toc}{section}{\indexname} % add index name to table of contents
+%\begin{theindex}
+%Italic page numbers give the location of the main entry for the referenced term.
+%Plain page numbers denote uses of the indexed term.
+%Entries for grammar non-terminals are italicized.
+%A typewriter font is used for grammar terminals and program identifiers.
+%\indexspace
+%\input{comp_II.ind}
+%\end{theindex}
+
+\end{document}
Index: doc/theses/aaron_moss_PhD/phd/.gitignore
===================================================================
--- doc/theses/aaron_moss_PhD/phd/.gitignore	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/.gitignore	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,9 @@
+templates/
+thesis.pdf
+thesis.aux
+thesis.bbl
+thesis.blg
+thesis.log
+thesis.out
+thesis.toc
+thesis.lot
Index: doc/theses/aaron_moss_PhD/phd/Makefile
===================================================================
--- doc/theses/aaron_moss_PhD/phd/Makefile	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/Makefile	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,38 @@
+LATEX = pdflatex -interaction=nonstopmode
+BIBTEX = bibtex
+
+BASE = thesis
+DOCUMENT = ${BASE}.pdf
+AUX = ${BASE}.aux ${BASE}.bbl ${BASE}.blg ${BASE}.log ${BASE}.out ${BASE}.toc
+
+SOURCES = ${addsuffix .tex, \
+thesis \
+macros \
+cfa-macros \
+frontpgs \
+introduction \
+background \
+type-environment \
+resolution-heuristics \
+conclusion \
+}
+
+.PHONY : all rebuild-refs clean wc
+
+all : ${DOCUMENT}
+
+clean : 
+	@rm -frv ${DOCUMENT} ${AUX}
+
+wc :
+	wc ${SOURCES}
+
+${DOCUMENT} : ${SOURCES}
+	${LATEX} ${BASE}
+	${LATEX} ${BASE}
+
+rebuild-refs : ${SOURCES} aaron-thesis.bib
+	${LATEX} ${BASE}
+	${BIBTEX} ${BASE}
+	${LATEX} ${BASE}
+	${LATEX} ${BASE}
Index: doc/theses/aaron_moss_PhD/phd/aaron-thesis.bib
===================================================================
--- doc/theses/aaron_moss_PhD/phd/aaron-thesis.bib	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/aaron-thesis.bib	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,97 @@
+%    Predefined journal names:
+%  acmcs: Computing Surveys		acta: Acta Infomatica
+@string{acta="Acta Infomatica"}
+%  cacm: Communications of the ACM
+%  ibmjrd: IBM J. Research & Development ibmsj: IBM Systems Journal
+%  ieeese: IEEE Trans. on Soft. Eng.	ieeetc: IEEE Trans. on Computers
+%  ieeetcad: IEEE Trans. on Computer-Aided Design of Integrated Circuits
+%  ipl: Information Processing Letters	jacm: Journal of the ACM
+%  jcss: J. Computer & System Sciences	scp: Science of Comp. Programming
+%  sicomp: SIAM J. on Computing		tocs: ACM Trans. on Comp. Systems
+%  tods: ACM Trans. on Database Sys.	tog: ACM Trans. on Graphics
+%  toms: ACM Trans. on Math. Software	toois: ACM Trans. on Office Info. Sys.
+%  toplas: ACM Trans. on Prog. Lang. & Sys.
+%  tcs: Theoretical Computer Science
+@string{ieeepds="IEEE Transactions on Parallel and Distributed Systems"}
+@string{ieeese="IEEE Transactions on Software Engineering"}
+@string{spe="Software---\-Practice and Experience"}
+@string{ccpe="Concurrency and Computation: Practice and Experience"}
+@string{sigplan="SIGPLAN Notices"}
+@string{joop="Journal of Object-Oriented Programming"}
+@string{popl="Conference Record of the ACM Symposium on Principles of Programming Languages"}
+@string{osr="Operating Systems Review"}
+@string{pldi="Programming Language Design and Implementation"}
+@string{toplas="Transactions on Programming Languages and Systems"}
+@string{mathann="Mathematische Annalen"}
+
+@mastersthesis{Bilson03,
+    keywords	= {Cforall, parametric polymorphism, overloading},
+    contributer	= {pabuhr@plg},
+    author	= {Richard C. Bilson},
+    title	= {Implementing Overloading and Polymorphism in \textsf{C}$\mathbf{\forall}$},
+    school	= {School of Computer Science, University of Waterloo},
+    year	= 2003,
+    address	= {Waterloo, Ontario, Canada, N2L 3G1},
+    note	= {\href{http://plg.uwaterloo.ca/theses/BilsonThesis.pdf}{http://\-plg.uwaterloo.ca/\-theses/\-BilsonThesis.pdf}},
+}
+
+@article{Buhr94a,
+    keywords	= {assignment, parameter passing, multiple assignment},
+    contributer	= {pabuhr@plg},
+    author	= {P. A. Buhr and David Till and C. R. Zarnke},
+    title	= {Assignment as the Sole Means of Updating Objects},
+    journal	= spe,
+    month	= sep,
+    year	= 1994,
+    volume	= 24,
+    number	= 9,
+    pages	= {835-870},
+}
+
+@mastersthesis{Delisle18,
+    author	= {Thierry Delisle },
+    title	= {Concurrency in \textsf{C}$\mathbf{\forall}$},
+    school	= {School of Computer Science, University of Waterloo},
+    year	= 2018,
+    address	= {Waterloo, Ontario, Canada, N2L 3G1},
+    note	= {\href{https://uwspace.uwaterloo.ca/handle/10012/12888}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-12888}},
+}
+
+@phdthesis{Ditchfield92,
+    keywords	= {C, parametric polymorphism, overloading},
+    contributer	= {pabuhr@plg},
+    author	= {Glen Jeffrey Ditchfield},
+    title	= {Contextual Polymorphism},
+    school	= {Department of Computer Science, University of Waterloo},
+    year	= 1992,
+    address	= {Waterloo, Ontario, Canada, N2L 3G1},
+    note	= {\href{http://plg.uwaterloo.ca/theses/DitchfieldThesis.pdf}{http://\-plg.uwaterloo.ca/\-theses/\-DitchfieldThesis.pdf}}
+}
+
+@article{Moss18,
+    keywords	= {concurrency, C++},
+    contributer	= {pabuhr@plg},
+    author	= {Aaron Moss and Robert Schluntz and Peter A. Buhr},
+    title	= {\textsf{C}$\mathbf{\forall}$ : Adding Modern Programming Language Features to C},
+    year	= 2018,
+    journal	= spe,
+    note	= {Accepted, to appear},
+}
+
+@mastersthesis{Schluntz17,
+    keywords 	= {constructors, destructors, tuples},
+    author	= {Robert Schluntz},
+    title	= {Resource Management and Tuples in \textsf{C}$\mathbf{\forall}$},
+    school	= {School of Computer Science, University of Waterloo},
+    year	= 2017,
+    address	= {Waterloo, Ontario, Canada, N2L 3G1},
+    note	= {\href{https://uwspace.uwaterloo.ca/handle/10012/11830}{https://\-uwspace.uwaterloo.ca/\-handle/\-10012/\-11830}},
+}
+
+@misc{TIOBE,
+    contributer	= {pabuhr@plg},
+    key		= {TIOBE Index},
+    title	= {{TIOBE} Index},
+    howpublished= {\href{http://www.tiobe.com/tiobe_index}{http://\-www.tiobe.com/\-tiobe\_index}},
+    optnote	= {Accessed: 2018-08},
+}
Index: doc/theses/aaron_moss_PhD/phd/background.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/background.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/background.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,221 @@
+\chapter{Background}
+
+\CFA{} adds a number of features to C, some of them providing significant increases to the expressive power of the language, but all designed to maintain the existing procedural programming paradigm of C and to be as orthogonal as possible to each other. 
+To provide background for the contributions in subsequent chapters, this chapter provides a summary of the features of \CFA{} at the time this work was conducted.
+
+The core design of \CFA{} is laid out in Glen Ditchfield's 1992 PhD thesis, \emph{Contextual Polymorphism}\cite{Ditchfield92}; in that thesis, Ditchfield presents the theoretical underpinnings of the \CFA{} polymorphism model. 
+Building on Ditchfield's design for contextual polymorphism as well as KW-C\cite{Buhr94a}, an earlier set of (largely syntactic) extensions to C, Richard Bilson\cite{Bilson03} built the first version of the \CFA{} compiler, \CFACC{}, in the early 2000's. 
+This early \CFACC{} provided basic functionality, but incorporated a number of poor algorithmic choices due to a rushed implementation time frame, and as such lacked the runtime performance required for practical use; this thesis is substantially concerned with rectifying those deficits. 
+
+The \CFA{} project was revived in 2015 with the intention of building a production-ready language and compiler; at the time of this writing, both \CFA{} and \CFACC{} have been under active development continuously since. 
+As this development has been proceeding concurrently with the work described in this thesis, the state of \CFA{} has been somewhat of a moving target; however, Moss~\etal\cite{Moss18} provides a reasonable summary of the current design. 
+Notable features added during this period include generic types (Chapter~\ref{generic-chap}), constructors and destructors\cite{Schluntz17}, improved support for tuples\cite{Schluntz17}, reference types\cite{Moss18}, first-class concurrent and parallel programming support\cite{Delisle18}, as well as numerous pieces of syntactic sugar and the start of an idiomatic standard library\cite{Moss18}.
+
+\section{\CFA{} Features}
+
+The selection of features presented in this chapter are chosen to elucidate the design constraints of the work presented in this thesis. 
+In some cases the interactions of multiple features make this design a significantly more complex problem than any individual feature would; in other cases a feature that does not by itself add any complexity to expression resolution triggers previously rare edge cases more frequently.
+
+\subsection{Procedural Paradigm}
+
+It is important to note that \CFA{} is not an object-oriented language. 
+This is a deliberate choice intended to maintain the applicability of the mental model and language idioms already possessed by C programmers. 
+This choice is in marked contrast to \CC{}, which, though it has backward-compatibility with C on the source code level, is a much larger and more complex language, and requires extensive developer re-training before they can write idiomatic, efficient code in \CC{}'s object-oriented paradigm.
+
+\CFA{} does have a system of implicit type conversions derived from C's ``usual arithmetic conversions''; while these conversions may be thought of as something like an inheritance hierarchy, the underlying semantics are significantly different and such an analogy is loose at best. 
+Particularly, \CFA{} has no concept of \emph{subclass}, and thus no need to integrate an inheritance-based form of polymorphism with its parametric and overloading-based polymorphism. 
+The graph structure of the \CFA{} type conversions is also markedly different than an inheritance hierarchy; it has neither a top nor a bottom type, and does not satisfy the lattice properties typical of inheritance hierarchies. 
+
+Another aspect of \CFA{}'s procedural paradigm is that it retains C's translation-unit-based encapsulation model, rather than class-based encapsulation such as in \CC{}. 
+This choice implies that that separate compilation must be maintained to allow headers to act as an encapsulation boundary, rather than the header-only libraries used by \CC{} templates.
+
+\subsection{Name Overloading} \label{overloading-sec}
+
+In C, no more than one variable or function in the same scope may share the same name\footnote{Technically, C has multiple separated namespaces, one holding \lstinline{struct}, \lstinline{union}, and \lstinline{enum} tags, one holding labels, one holding \lstinline{typedef} names, variable, function, and enumerator identifiers, and one for each \lstinline{struct} and \lstinline{union} type holding the field names\cit{}.}, and variable or function declarations in inner scopes with the same name as a declaration in an outer scope hide the outer declaration. 
+This restriction makes finding the proper declaration to match to a variable expression or function application a simple matter of symbol-table lookup, which can be easily and efficiently implemented. 
+\CFA{}, on the other hand, allows overloading of variable and function names so long as the overloaded declarations do not have the same type, avoiding the multiplication of variable and function names for different types common in the C standard library, as in the following example: 
+
+\begin{cfa}
+#include <limits.h>
+
+const int max = INT_MAX;  $\C[1.75in]{// (1)}$
+const double max = DBL_MAX;  $\C[1.75in]{// (2)}$
+int max(int a, int b) { return a < b ? b : a; }  $\C[1.75in]{// (3)}$
+double max(double a, double b) { return a < b ? b : a; }  $\C[1.75in]{// (4)}$
+
+max( 7, -max );  $\C[3.75in]{// uses (3) and (1), by matching int from 7}$
+max( max, 3.14 );  $\C[3.75in]{// uses (4) and (2), by matching double from 3.14}$
+max( max, -max );  $\C[3.75in]{// ERROR, ambiguous}$
+int m = max( max, -max );  $\C[3.75in]{// uses (3) and (1) twice, by matching return type}$
+\end{cfa}
+
+While the wisdom of giving both the maximum value of a type and the function to take the maximum of two values the same name is debatable, \eg{} some developers may prefer !MAX! for the former, the guiding philosophy of \CFA{} is ``describe, don't prescribe'' --- we prefer to trust programmers with powerful tools, and there is no technical reason to restrict overloading between variables and functions. 
+However, the expressivity of \CFA{}'s name overloading has the consequence that simple table lookup is insufficient to match identifiers to declarations, and a type-matching algorithm must be part of expression resolution.
+
+\subsubsection{Operator Overloading}
+
+C does allow name overloading in one context: operator overloading. 
+From the perspective of the type system, there is nothing special about operators as opposed to other functions, nor is it desirable to restrict the clear and readable syntax of operators to only the built-in types. 
+For these reasons, \CFA{} also allows overloading of operators by writing specially-named functions where !?! stands in for the operands\footnote{This example uses \CFA{}'s reference types, described in Section~\ref{type-features-sec}}:
+
+\begin{cfa}
+struct counter { int x; };
+
+counter& `++?`(counter& c) { ++c.x; return c; }  $\C{// pre-increment}$
+counter `?++`(counter& c) {  $\C{// post-increment}$
+	counter tmp = c; ++c; return tmp;
+}
+bool `?<?`(const counter& a, const counter& b) {  $\C{// comparison}$
+	return a.x < b.x;
+}
+\end{cfa}
+
+Together, \CFA{}'s backward-compatibility with C and the inclusion of this operator overloading feature imply that \CFA{} must select among function overloads using a method compatible with C's ``usual arithmetic conversions''\cit{}, so as to present user programmers with only a single set of overloading rules.
+
+\subsection{Polymorphic Functions}
+
+The most significant feature \CFA{} adds is parametric-polymorphic functions. 
+Such functions are written using a !forall! clause (which gives the language its name):
+
+\begin{cfa}
+`forall(otype T)`
+T identity(T x) { return x; }
+\end{cfa}
+
+The !identity! function above can be applied to any complete object type (or ``!otype!''). 
+The type variable !T! is transformed into a set of additional implicit parameters to !identity!, which encode sufficient information about !T! to create and return a variable of that type. 
+\CFA{} passes the size and alignment of the type represented by an !otype! parameter, as well as a default constructor, copy constructor, assignment operator, and destructor. 
+Types which do not have one or more of these operators visible cannot be bound to !otype! parameters. 
+In this design, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; experiments have shown this overhead to be similar to \CC{} virtual function calls. \TODO{rerun experiments, possibly look at vtable variant}
+
+One benefit of this design is that it allows polymorphic functions to be separately compiled. 
+The forward declaration !forall(otype T) T identity(T);! uniquely defines a single callable function, which may be implemented in a different file. 
+The fact that there is only one implementation of each polymorphic function also reduces compile times relative to the template-expansion approach taken by \CC{}, as well as reducing binary sizes and runtime pressure on instruction cache at by re-using a single version of each function.
+
+\subsubsection{Type Assertions}
+
+Since bare polymorphic types do not provide a great range of available operations, \CFA{} provides a \emph{type assertion} mechanism to provide further information about a type:
+
+\begin{cfa}
+forall(otype T `| { T twice(T); }`)
+T four_times(T x) { return twice( twice(x) ); }
+double twice(double d) { return d * 2.0; }  $\C[2.75in]{// (1)}$
+
+double ans = four_times( 10.5 );  $\C[2.75in]{// T bound to double, ans == 42.0}$
+\end{cfa}
+
+These type assertions may be either variable or function declarations that depend on a polymorphic type variable. 
+!four_times! may only be called with an argument for which there exists a function named !twice! that can take that argument and return another value of the same type; a pointer to the appropriate function is passed as an additional implicit parameter of the call to !four_times!.
+
+Monomorphic specializations of polymorphic functions can themselves be used to satisfy type assertions. 
+For instance, !twice! could have been defined like this:
+
+\begin{cfa}
+forall(otype S | { S ?+?(S, S); })
+S twice(S x) { return x + x; }  $\C[2.75in]{// (2)}
+\end{cfa}
+
+This version of !twice! works for any type !S! that has an addition operator defined for it, and it could be used to satisfy the type assertion on !four_times!. 
+\CFACC{} accomplishes this by creating a wrapper function calling !twice // (2)! with !S! bound to !double!, then providing this wrapper function to !four_times!\footnote{\lstinline{twice // (2)} could also have had a type parameter named \lstinline{T}; \CFA{} specifies renaming of the type parameters, which would avoid the name conflict with the type variable \lstinline{T} of \lstinline{four_times}}.
+
+Finding appropriate functions to satisfy type assertions is essentially a recursive case of expression resolution, as it takes a name (that of the type assertion) and attempts to match it to a suitable declaration \emph{in the current scope}. 
+If a polymorphic function can be used to satisfy one of its own type assertions, this recursion may not terminate, as it is possible that that function is examined as a candidate for its own type assertion unboundedly repeatedly. 
+To avoid such infinite loops, \CFACC{} imposes a fixed limit on the possible depth of recursion, similar to that employed by most \CC{} compilers for template expansion; this restriction means that there are some semantically well-typed expressions that cannot be resolved by \CFACC{}.
+\TODO{Update this with final state} One contribution made in the course of this thesis was modifying \CFACC{} to use the more flexible expression resolution algorithm for assertion matching, rather than the previous simpler approach of unification on the types of the functions.
+
+\subsubsection{Deleted Declarations}
+
+Particular type combinations can be exempted from matching a given polymorphic function through use of a \emph{deleted function declaration}:
+
+\begin{cfa}
+int somefn(char) = void;
+\end{cfa}
+
+This feature is based on a \CCeleven{} feature typically used to make a type non-copyable by deleting its copy constructor and assignment operator\footnote{In previous versions of \CC{}, a type could be made non-copyable by declaring a private copy constructor and assignment operator, but not defining either. This idiom is well-known, but depends on some rather subtle and \CC{}-specific rules about private members and implicitly-generated functions; the deleted function form is both clearer and less verbose.} or forbidding some interpretations of a polymorphic function by specifically deleting the forbidden overloads\footnote{Specific polymorphic function overloads can also be forbidden in previous \CC{} versions through use of template metaprogramming techniques, though this advanced usage is beyond the skills of many programmers. A similar effect can be produced on an ad-hoc basis at the appropriate call sites through use of casts to determine the function type. In both cases, the deleted-function form is clearer and more concise.}. 
+Deleted function declarations are implemented in \CFACC{} by adding them to the symbol table as usual, but with a flag set that indicates that the function is deleted. 
+If this deleted declaration is selected as the unique minimal-cost interpretation of an expression than an error is produced. \TODO{Check this is implemented at print.}
+
+\subsubsection{Traits}
+
+\CFA{} provides \emph{traits} as a means to name a group of type assertions, as in the example below\footnote{This example uses \CFA{}'s reference types, constructors, and zero type, described in Section~\ref{type-features-sec}.}:
+
+\begin{cfa}
+`trait has_magnitude(otype T)` {
+	bool ?<?(T, T);  $\C[4in]{// comparison operator}$
+	T -?(T);  $\C[4in]{// negation operator}$
+	void ?{}(T&, zero_t);  $\C[4in]{// constructor from 0}$
+};
+
+forall(otype M | `has_magnitude(M)`)
+M abs(M m) { return m < (M){0} ? -m : m; }
+
+forall(otype M | `has_magnitude(M)`)
+M max_magnitude(M a, M b) { return abs(a) < abs(b) ? b : a; }
+\end{cfa}
+
+Semantically, traits are simply a named list of type assertions, but they may be used for many of the same purposes that interfaces in Java or abstract base classes in \CC{} are used for. 
+Unlike Java interfaces or \CC{} base classes, \CFA{} types do not explicitly state any inheritance relationship to traits they satisfy; this can be considered a form a structural inheritance, similar to interface implementation in Go, as opposed to the nominal inheritance model of Java and \CC{}. 
+Nominal inheritance can be simulated in \CFA{} using marker variables in traits:
+
+\begin{cfa}
+trait nominal(otype T) {
+	`T is_nominal;`
+};
+
+int is_nominal;  $\C{// int now satisfies nominal}$
+{
+	char is_nominal;  $\C{// char only satisfies nominal in this scope}$
+}
+\end{cfa}
+
+Traits, however, are significantly more powerful than nominal-inheritance interfaces; firstly, due to the scoping rules of the declarations that satisfy a trait's type assertions, a type may not satisfy a trait everywhere that that type is declared, as with !char! and the !nominal! trait above. 
+Secondly, because \CFA{} is not object-oriented and types do not have a closed set of methods, existing C library types can be extended to implement a trait simply by writing the requisite functions. 
+Finally, traits may be used to declare a relationship among multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems\footnote{This example uses \CFA{}'s reference types, described in Section~\ref{type-features-sec}.}: 
+
+\begin{cfa}
+trait pointer_like(`otype Ptr, otype El`) {
+	El& *?(Ptr);  $\C{Ptr can be dereferenced to El}$
+};
+
+struct list {
+	int value;
+	list* next; $\C{may omit struct on type names}$
+};
+
+typedef list* list_iterator;
+
+int& *?(list_iterator it) {
+	return it->value;
+}
+\end{cfa}
+
+In the example above, !(list_iterator, int)! satisfies !pointer_like! by the user-defined dereference function, and !(list_iterator, list)! also satisfies !pointer_like! by the built-in dereference operator for pointers. 
+Given a declaration !list_iterator it!, !*it! can be either an !int! or a !list!, with the meaning disambiguated by context (\eg{} !int x = *it;! interprets !*it! as !int!, while !(*it).value = 42;! interprets !*it! as !list!). 
+While a nominal-inheritance system with associated types could model one of those two relationships by making !El! an associated type of !Ptr! in the !pointer_like! implementation, few such systems could model both relationships simultaneously. 
+
+The flexibility of \CFA{}'s implicit trait-satisfaction mechanism provides programmers with a great deal of power, but also blocks some optimization approaches for expression resolution. 
+The ability of types to begin or cease to satisfy traits when declarations go into or out of scope makes caching of trait satisfaction judgements difficult, and the ability of traits to take multiple type parameters can lead to a combinatorial explosion of work in any attempt to pre-compute trait satisfaction relationships. 
+
+\subsection{Implicit Conversions}
+
+In addition to the multiple interpretations of an expression produced by name overloading and polymorphic functions, for backward compatibility \CFA{} must support all of the implicit conversions present in C, producing further candidate interpretations for expressions. 
+As mentioned above, C does not have an inheritance hierarchy of types, but the C standard's rules for the ``usual arithmetic conversions''\cit{} define which of the built-in tyhpes are implicitly convertable to which other types, and the relative cost of any pair of such conversions from a single source type. 
+\CFA{} adds to the usual arithmetic conversions rules defining the cost of binding a polymorphic type variable in a function call; such bindings are cheaper than any \emph{unsafe} (narrowing) conversion, \eg{} !int! to !char!, but more expensive than any \emph{safe} (widening) conversion, \eg{} !int! to !double!. 
+One contribution of this thesis, discussed in Section \TODO{add to resolution chapter}, is a number of refinements to this cost model to more efficiently resolve polymorphic function calls. 
+
+The expression resolution problem which is the focus of Chapter~\ref{resolution-chap} is to find the unique minimal-cost interpretation of each expression in the program, where all identifiers must be matched to a declaration, and implicit conversions or polymorphic bindings of the result of an expression may increase the cost of the expression. 
+While semantically valid \CFA{} code always has such a unique minimal-cost interpretation, \CFACC{} must also be able to detect and report as errors expressions which have either no interpretation or multiple ambiguous minimal-cost interpretations. 
+Note that which subexpression interpretation is minimal-cost may require contextual information to disambiguate. 
+For instance, in the example in Section~\ref{overloading-sec}, !max(max, -max)! cannot be unambiguously resolved, but !int m = max(max, -max)! has a single minimal-cost resolution. 
+While the interpretation !int m = (int)max((double)max, -(double)max)! is also a valid interpretation, it is not minimal-cost due to the unsafe cast from the !double! result of !max! to the !int!\footnote{The two \lstinline{double} casts function as type ascriptions selecting \lstinline{double max} rather than casts from \lstinline{int max} to \lstinline{double}, and as such are zero-cost.}. 
+These contextual effects make the expression resolution problem for \CFA{} both theoretically and practically difficult, but the observation driving the work in Chapter~\ref{resolution-chap} is that of the many top-level expressions in a given program, most will likely be straightforward and idiomatic so that programmers writing and maintaining the code can easily understand them; it follows that effective heuristics for common cases can bring down compiler runtime enough that a small proportion of harder-to-resolve expressions should not increase compiler runtime or memory usage inordinately.
+
+\subsection{Type Features} \label{type-features-sec}
+
+\subsubsection{Reference Types}
+
+% TODO mention contribution on reference rebind
+
+\subsubsection{Lifetime Management}
+
+\subsubsection{0 and 1 Literals}
Index: doc/theses/aaron_moss_PhD/phd/cfa-macros.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/cfa-macros.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/cfa-macros.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,69 @@
+\usepackage{listings}
+
+% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
+% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
+% AFTER HYPERREF.
+%\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}
+\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
+
+\renewcommand*{\thefootnote}{\Alph{footnote}} % hack because fnsymbol does not work
+%\renewcommand*{\thefootnote}{\fnsymbol{footnote}}
+
+\makeatletter
+% parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for
+% use rather than use \parident directly.
+\newlength{\parindentlnth}
+\setlength{\parindentlnth}{\parindent}
+
+\newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{\lst@basicstyle{#1}}}}
+\newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}}
+\newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}}
+
+\newcommand{\C}[2][2in]{\hfill\makebox[#1][l]{\LstCommentStyle{#2}}}
+
+% CFA programming language, based on ANSI C (with some gcc additions)
+\lstdefinelanguage{CFA}[ANSI]{C}{
+	morekeywords={
+		_Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__,
+		auto, bool, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
+		coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally,
+		__float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,
+		inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
+		otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,
+		_Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
+		virtual, __volatile, __volatile__, waitfor, when, with, zero_t},
+	moredirectives={defined,include_next}%
+}
+
+\lstset{
+language=CFA,
+columns=fullflexible,
+basicstyle=\linespread{0.9}\sf,							% reduce line spacing and use sanserif font
+stringstyle=\tt,										% use typewriter font
+tabsize=5,												% N space tabbing
+xleftmargin=\parindentlnth,								% indent code to paragraph indentation
+%mathescape=true,										% LaTeX math escape in CFA code $...$
+escapechar=\$,											% LaTeX escape in CFA code
+keepspaces=true,										%
+showstringspaces=false,									% do not show spaces with cup
+showlines=true,											% show blank lines at end of code
+aboveskip=4pt,											% spacing above/below code block
+belowskip=3pt,
+% replace/adjust listing characters that look bad in sanserif
+literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
+	{~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
+	{<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
+	{<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2,
+moredelim=**[is][\color{red}]{`}{`},
+}% lstset
+
+\lstnewenvironment{cfa}[1][]
+{\lstset{#1}}
+{}
+\lstnewenvironment{C++}[1][]                            % use C++ style
+{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
+{}
+
+% inline code !...!
+\lstMakeShortInline!
+
Index: doc/theses/aaron_moss_PhD/phd/conclusion.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/conclusion.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/conclusion.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,3 @@
+\chapter{Conclusion}
+
+Wrap it up --- Done, done done.
Index: doc/theses/aaron_moss_PhD/phd/frontpgs.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/frontpgs.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/frontpgs.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,174 @@
+% T I T L E   P A G E
+% -------------------
+
+% The title page is counted as page `i' but we need to suppress the
+% page number. Also, we don't want any headers or footers.
+\pagestyle{empty}
+\pagenumbering{roman}
+
+% The contents of the title page are specified in the "titlepage"
+% environment.
+\begin{titlepage}
+	\begin{center}
+	\vspace*{1.0cm}
+
+	\Huge
+	{\bf \CFA{} Type System Implementation }
+
+	\vspace*{1.0cm}
+
+	\normalsize
+	by \\
+
+	\vspace*{1.0cm}
+
+	\Large
+	Aaron Moss \\
+
+	\vspace*{3.0cm}
+
+	\normalsize
+	A thesis \\
+	presented to the University of Waterloo \\ 
+	in fulfillment of the \\
+	thesis requirement for the degree of \\
+	Doctor of Philosophy \\
+	in \\
+	Computer Science \\
+
+	\vspace*{2.0cm}
+
+	Waterloo, Ontario, Canada, 2019 \\
+
+	\vspace*{1.0cm}
+
+	\copyright\ Aaron Moss 2019 \\
+	\end{center}
+\end{titlepage}
+
+% The rest of the front pages should contain no headers and be numbered using Roman numerals starting with `ii'
+\pagestyle{plain}
+\setcounter{page}{2}
+
+\cleardoublepage % Ends the current page and causes all figures and tables that have so far appeared in the input to be printed.
+% In a two-sided printing style, it also makes the next page a right-hand (odd-numbered) page, producing a blank page if necessary.
+
+% E X A M I N I N G   C O M M I T T E E
+% -------------------------------------
+
+\begin{center}\textbf{Examining Committee Membership}\end{center}
+	\noindent
+  The following served on the Examining Committee for this thesis. The decision of the Examining Committee is by majority vote.
+	\bigskip
+	
+% 	\noindent
+%   \begin{tabbing}
+%   Internal-External Member: \=  \kill % using longest text to define tab length
+%   External Examiner: \>  Bruce Bruce \\ 
+%   \> Professor, Dept. of Philosophy of Zoology, University of Wallamaloo \\
+%   \end{tabbing} 
+% 	\bigskip
+	
+	\noindent
+  \begin{tabbing}
+  Internal-External Member: \=  \kill % using longest text to define tab length
+  Supervisor: \> Peter Buhr \\
+  \> Professor, School of Computer Science, University of Waterloo \\
+  \end{tabbing}
+	\bigskip
+	
+	\noindent
+  \begin{tabbing}
+  Internal-External Member: \=  \kill % using longest text to define tab length
+  Internal Members: \> Gregor Richards \\
+  \> Professor, School of Computer Science, University of Waterloo \\
+  \> Ond\v{r}ej Lhot\a'ak \\
+  \> Professor, School of Computer Science, University of Waterloo \\
+  \end{tabbing}
+% 	\bigskip
+	
+% 	\noindent
+%   \begin{tabbing}
+%   Internal-External Member: \=  \kill % using longest text to define tab length
+%   Internal-External Member: \> Deepa Thotta \\
+%   \> Professor, Dept. of Philosophy, University of Waterloo \\
+%   \end{tabbing}
+% 	\bigskip
+	
+% 	\noindent
+%   \begin{tabbing}
+%   Internal-External Member: \=  \kill % using longest text to define tab length
+%   Other Member(s): \> Leeping Fang \\
+%   \> Professor, Dept. of Fine Art, University of Waterloo \\
+%   \end{tabbing}
+  
+  \cleardoublepage
+
+% D E C L A R A T I O N   P A G E
+% -------------------------------
+  % The following is a sample Delaration Page as provided by the GSO
+  % December 13th, 2006.  It is designed for an electronic thesis.
+  \noindent
+I hereby declare that I am the sole author of this thesis. This is a true copy of the thesis, including any required final revisions, as accepted by my examiners.
+
+  \bigskip
+  
+  \noindent
+I understand that my thesis may be made electronically available to the public.
+
+\cleardoublepage
+
+% A B S T R A C T
+% ---------------
+
+\begin{center}\textbf{Abstract}\end{center}
+
+This is the abstract.
+
+\cleardoublepage
+
+% A C K N O W L E D G E M E N T S
+% -------------------------------
+
+% \begin{center}\textbf{Acknowledgements}\end{center}
+
+% I would like to thank all the little people who made this thesis possible.
+% \cleardoublepage
+
+% D E D I C A T I O N
+% -------------------
+
+% \begin{center}\textbf{Dedication}\end{center}
+
+% This is dedicated to the one I love.
+% \cleardoublepage
+
+% T A B L E   O F   C O N T E N T S
+% ---------------------------------
+\renewcommand\contentsname{Table of Contents}
+\tableofcontents
+\cleardoublepage
+\phantomsection    % allows hyperref to link to the correct page
+
+% L I S T   O F   T A B L E S
+% ---------------------------
+\addcontentsline{toc}{chapter}{List of Tables}
+\listoftables
+\cleardoublepage
+\phantomsection		% allows hyperref to link to the correct page
+
+% L I S T   O F   F I G U R E S
+% -----------------------------
+% \addcontentsline{toc}{chapter}{List of Figures}
+% \listoffigures
+% \cleardoublepage
+% \phantomsection		% allows hyperref to link to the correct page
+
+% GLOSSARIES (Lists of definitions, abbreviations, symbols, etc. provided by the glossaries-extra package)
+% -----------------------------
+% \printglossaries
+% \cleardoublepage
+% \phantomsection		% allows hyperref to link to the correct page
+
+% Change page numbering back to Arabic numerals
+\pagenumbering{arabic}
Index: doc/theses/aaron_moss_PhD/phd/generic-types.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/generic-types.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/generic-types.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,8 @@
+\chapter{Generic Types}
+\label{generic-chap}
+
+Talk about generic types. Pull from Moss~\etal\cite{Moss18}.
+
+% TODO mention impetus for zero_t design
+
+% TODO mention use in tuple-type implementation
Index: doc/theses/aaron_moss_PhD/phd/introduction.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/introduction.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/introduction.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,37 @@
+\chapter{Introduction}
+
+The C programming language has had a wide-ranging impact on the design of software and programming languages. 
+In the 30 years since its first standardization, it has consistently been one of the most popular programming languages, with millions of lines of C code still in active use, and tens of thousands of trained programmers producing it. The TIOBE index\cite{TIOBE} tracks popularity of programming languages over time, and C has never dropped below second place:
+
+\begin{table}[h]
+\label{tiobe-table}
+\caption[TIOBE index over time]{Current top 5 places in the TIOBE index averaged over years}
+
+\centering
+\begin{tabular}{@{}cccccccc@{}}
+	& 2018	& 2013	& 2008	& 2003	& 1998	& 1993	& 1988	\\
+Java		& 1		& 2		& 1		& 1		& 18	& --	& --	\\
+\textbf{C}	& \textbf{2} & \textbf{1} & \textbf{2} & \textbf{2} & \textbf{1} & \textbf{1} & \textbf{1} \\
+\CC{}		& 3		& 4		& 3		& 3		& 2		& 2		& 5		\\
+Python		& 4		& 7		& 6		& 11	& 22	& 17	& --	\\
+\Csharp{}	& 5		& 5		& 7		& 8		& --	& --	& --	\\
+\end{tabular}
+\end{table}
+
+The impact of C on programming language design is also obvious from Table~\ref{tiobe-table}; with the exception of Python, all of the top five languages use C-like syntax and procedural control structures. 
+\CC{} is even a largely backwards-compatible extension of C, with development dating back nearly as far as C itself. 
+Though its lasting popularity and wide impact on programming language design point to the continued relevance of C, they also highlight the widespread desire of programmers for languages with more expressive power and programmer-friendly features; accommodating both low-impact maintenance of legacy C code and low-effort development of the software of the future is a difficult task for a single programming language.
+
+\CFA{}\footnote{Pronounced ``C-for-all'', and written \CFA{} or \CFL{}.} is an evolutionary modernization of the C programming language which aims to fulfill both these ends well. 
+\CFA{} both fixes existing design problems and adds multiple new features to C, including name overloading, user-defined operators, parametric-polymorphic routines, and type constructors and destructors, among others. 
+The new features make \CFA{} more powerful and expressive than C, while maintaining strong backward-compatibility with both C code and the procedural paradigm expected by C programmers. 
+However, these new features do impose a compile-time cost, particularly in the expression resolver, which must evaluate the typing rules of a significantly more complex type-system.
+
+This thesis is focused on making \CFA{} a more powerful and expressive language, both by adding new features to the \CFA{} type system and ensuring that both added and existing features can be efficiently implemented in \CFACC{}, the \CFA{} reference compiler. 
+Particular contributions of this work include design and implementation of 
+parametric-polymorphic (``generic'') types in a manner compatible with the existing polymorphism design of \CFA{} (Chapter~\ref{generic-chap}), a type environment data structure based on a novel variant of the union-find algorithm (Chapter~\ref{env-chap}), and a new expression resolution algorithm designed to quickly locate the optimal declarations for a \CFA{} expression (Chapter~\ref{resolution-chap}). 
+This expression resolution algorithm was designed with the aid of a purpose-built prototype system which encapsulates the essential aspects of the \CFA{} type system without incurring the technical debt of the existing system or the friction-inducing necessity of maintaining a working compiler; the goal of this prototype system was to discover effective heuristics to avoid performing unnecessary work in the process of locating the optimal \CFA{} expression resolution.
+
+Though the direction and validation of this work was fairly narrowly focused on the \CFA{} programming language, the tools used and results obtained should be of interest to a wider compiler and programming language design community. 
+In particular, with the addition of \emph{concepts} in \CCtwenty{}, conforming \CC{} compilers must support a model of type assertions very similar to that in \CFA{}, and the algorithmic techniques used in the expression resolution algorithm presented here may prove useful. 
+Type environments are also widely modelled in compiler implementations, particularly of functional languages, though also increasingly commonly in other languages (such as Rust) which perform type inference; the type environment presented here may be useful to those language implementers.
Index: doc/theses/aaron_moss_PhD/phd/macros.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/macros.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/macros.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,22 @@
+% Common macros for this thesis
+% Based on LaTeXmacros/common.tex
+
+\newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}} % Cforall symbolic name
+\newcommand{\CFA}{\protect\CFAIcon}	% safe for section/caption
+\newcommand{\CFL}{\textrm{Cforall}} % Cforall symbolic name
+\newcommand{\CFACC}{\texttt{cfa-cc}} % Cforall compiler
+\newcommand{\Celeven}{\textrm{C11}} % C11 symbolic name
+\newcommand{\CC}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}} % C++ symbolic name
+\newcommand{\CCeleven}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}11} % C++11 symbolic name
+\newcommand{\CCfourteen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}14} % C++14 symbolic name
+\newcommand{\CCseventeen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}17} % C++17 symbolic name
+\newcommand{\CCtwenty}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}20} % C++20 symbolic name
+\newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}} % C# symbolic name
+
+\newcommand{\ie}{\textit{i.e.}}
+\newcommand{\eg}{\textit{e.g.}}
+\newcommand{\etc}{\textit{etc.}}
+\newcommand{\etal}{\textit{et~al.}}
+
+\newcommand{\TODO}[1]{\textbf{TODO:} \textit{#1}}
+\newcommand{\cit}{\textsuperscript{[citation needed]}}
Index: doc/theses/aaron_moss_PhD/phd/resolution-heuristics.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/resolution-heuristics.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/resolution-heuristics.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,6 @@
+\chapter{Resolution Heuristics}
+\label{resolution-chap}
+
+Talk about the resolution heuristics. This is the bulk of the thesis.
+
+% Discuss changes to cost model, as promised in Ch. 2
Index: doc/theses/aaron_moss_PhD/phd/thesis.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/thesis.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/thesis.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,151 @@
+% Specify the document class, default style attributes, and page dimensions
+% For hyperlinked PDF, suitable for viewing on a computer, use this:
+\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
+
+% For PDF, suitable for double-sided printing, change the PrintVersion variable below
+% to "true" and use this \documentclass line instead of the one above:
+%\documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book}
+
+% common macros for this thesis
+\input{macros}
+
+\newcommand{\href}[1]{#1} % does nothing, but defines the command so the
+% print-optimized version will ignore \href tags (redefined by hyperref pkg).
+
+% This package allows if-then-else control structures.
+\usepackage{ifthen}
+\newboolean{PrintVersion}
+\setboolean{PrintVersion}{false} 
+% CHANGE THIS VALUE TO "true" as necessary, to improve printed results for hard copies
+% by overriding some options of the hyperref package below.
+
+\usepackage{amsmath,amssymb,amstext} % Lots of math symbols and environments
+\usepackage[pdftex]{graphicx} % For including graphics N.B. pdftex graphics driver 
+
+% Hyperlinks make it very easy to navigate an electronic document.
+% In addition, this is where you should specify the thesis title
+% and author as they appear in the properties of the PDF document.
+% Use the "hyperref" package 
+% N.B. HYPERREF MUST BE THE LAST PACKAGE LOADED; ADD ADDITIONAL PKGS ABOVE
+\usepackage[pdftex,pagebackref=false]{hyperref} % with basic options
+% N.B. pagebackref=true provides links back from the References to the body text. This can cause trouble for printing.
+
+\hypersetup{
+	plainpages=false,       % needed if Roman numbers in frontpages
+	unicode=false,          % non-Latin characters in Acrobat’s bookmarks
+	pdftoolbar=true,        % show Acrobat’s toolbar?
+	pdfmenubar=true,        % show Acrobat’s menu?
+	pdffitwindow=false,     % window fit to page when opened
+	pdfstartview={FitH},    % fits the width of the page to the window
+	pdftitle={Cforall\ Type\ System\ Implementation},    % title
+    pdfauthor={Aaron\ Moss}, % author
+    pdfsubject={Cforall},  % subject
+%    pdfkeywords={keyword1} {key2} {key3}, % list of keywords, and uncomment this line if desired
+	pdfnewwindow=true,      % links in new window
+	colorlinks=true,        % false: boxed links; true: colored links
+	linkcolor=blue,         % color of internal links
+	citecolor=green,        % color of links to bibliography
+	filecolor=magenta,      % color of file links
+	urlcolor=cyan           % color of external links
+}
+\ifthenelse{\boolean{PrintVersion}}{   % for improved print quality, change some hyperref options
+\hypersetup{	% override some previously defined hyperref options
+%    colorlinks,%
+	citecolor=black,%
+	filecolor=black,%
+	linkcolor=black,%
+	urlcolor=black}
+}{} % end of ifthenelse (no else)
+
+\input{cfa-macros} % must be loaded after hyperref
+
+% \usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
+
+% Setting up the page margins...
+% uWaterloo thesis requirements specify a minimum of 1 inch (72pt) margin at the
+% top, bottom, and outside page edges and a 1.125 in. (81pt) gutter
+% margin (on binding side). While this is not an issue for electronic
+% viewing, a PDF may be printed, and so we have the same page layout for
+% both printed and electronic versions, we leave the gutter margin in.
+% Set margins to minimum permitted by uWaterloo thesis regulations:
+\setlength{\marginparwidth}{0pt} % width of margin notes
+% N.B. If margin notes are used, you must adjust \textwidth, \marginparwidth
+% and \marginparsep so that the space left between the margin notes and page
+% edge is less than 15 mm (0.6 in.)
+\setlength{\marginparsep}{0pt} % width of space between body text and margin notes
+\setlength{\evensidemargin}{0.125in} % Adds 1/8 in. to binding side of all 
+% even-numbered pages when the "twoside" printing option is selected
+\setlength{\oddsidemargin}{0.125in} % Adds 1/8 in. to the left of all pages
+% when "oneside" printing is selected, and to the left of all odd-numbered
+% pages when "twoside" printing is selected
+\setlength{\textwidth}{6.375in} % assuming US letter paper (8.5 in. x 11 in.) and 
+% side margins as above
+\raggedbottom
+
+% The following statement specifies the amount of space between
+% paragraphs. Other reasonable specifications are \bigskipamount and \smallskipamount.
+\setlength{\parskip}{\medskipamount}
+
+% The following statement controls the line spacing.  The default
+% spacing corresponds to good typographic conventions and only slight
+% changes (e.g., perhaps "1.2"), if any, should be made.
+\renewcommand{\baselinestretch}{1} % this is the default line space setting
+
+% By default, each chapter will start on a recto (right-hand side)
+% page.  We also force each section of the front pages to start on 
+% a recto page by inserting \cleardoublepage commands.
+% In many cases, this will require that the verso page be
+% blank and, while it should be counted, a page number should not be
+% printed.  The following statements ensure a page number is not
+% printed on an otherwise blank verso page.
+\let\origdoublepage\cleardoublepage
+\newcommand{\clearemptydoublepage}{%
+  \clearpage{\pagestyle{empty}\origdoublepage}}
+\let\cleardoublepage\clearemptydoublepage
+
+%======================================================================
+%   L O G I C A L    D O C U M E N T
+%======================================================================
+\begin{document}
+
+%----------------------------------------------------------------------
+% FRONT MATERIAL
+%----------------------------------------------------------------------
+\input{frontpgs}
+
+%----------------------------------------------------------------------
+% MAIN BODY
+%----------------------------------------------------------------------
+\input{introduction}
+\input{background}
+\input{generic-types}
+\input{type-environment}
+\input{resolution-heuristics}
+\input{conclusion}
+
+% B I B L I O G R A P H Y
+% -----------------------
+
+% The following statement selects the style to use for references.  It controls the sort order of the entries in the bibliography and also the formatting for the in-text labels.
+\bibliographystyle{plain}
+% This specifies the location of the file containing the bibliographic information.  
+% It assumes you're using BibTeX (if not, why not?).
+\cleardoublepage % This is needed if the book class is used, to place the anchor in the correct page,
+                 % because the bibliography will start on its own page.
+                 % Use \clearpage instead if the document class uses the "oneside" argument
+\phantomsection  % With hyperref package, enables hyperlinking from the table of contents to bibliography             
+% The following statement causes the title "References" to be used for the bibliography section:
+\renewcommand*{\bibname}{References}
+
+% Add the References to the Table of Contents
+\addcontentsline{toc}{chapter}{\textbf{References}}
+
+\bibliography{aaron-thesis}
+% Tip 5: You can create multiple .bib files to organize your references. 
+% Just list them all in the \bibliogaphy command, separated by commas (no spaces).
+
+% The following statement causes the specified references to be added to the bibliography% even if they were not 
+% cited in the text. The asterisk is a wildcard that causes all entries in the bibliographic database to be included (optional).
+% \nocite{*}
+
+\end{document}
Index: doc/theses/aaron_moss_PhD/phd/type-environment.tex
===================================================================
--- doc/theses/aaron_moss_PhD/phd/type-environment.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
+++ doc/theses/aaron_moss_PhD/phd/type-environment.tex	(revision 67982887d6d6ffea6e1c510c7ded3be0a1acea1f)
@@ -0,0 +1,4 @@
+\chapter{Type Environment}
+\label{env-chap}
+
+Talk about the type environment data structure. Pull from your presentation.
