source: doc/papers/general/Paper.tex @ 119bb6a

ADTaaron-thesisarm-ehast-experimentalcleanup-dtorsdeferred_resndemanglerenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerpthread-emulationqualifiedEnumresolv-newwith_gc
Last change on this file since 119bb6a was 119bb6a, checked in by Aaron Moss <a3moss@…>, 6 years ago

Started 0/1 section of paper

  • Property mode set to 100644
File size: 117.3 KB
Line 
1\documentclass{article}
2
3\usepackage{fullpage}
4\usepackage{epic,eepic}
5\usepackage{xspace,calc,comment}
6\usepackage{upquote}                                                                    % switch curled `'" to straight
7\usepackage{listings}                                                                   % format program code
8\usepackage{rotating}
9\usepackage[usenames]{color}
10\usepackage{pslatex}                                    % reduce size of san serif font
11\usepackage[plainpages=false,pdfpagelabels,pdfpagemode=UseNone,pagebackref=true,breaklinks=true,colorlinks=true,linkcolor=blue,citecolor=blue,urlcolor=blue]{hyperref}
12
13\setlength{\textheight}{9in}
14%\oddsidemargin 0.0in
15\renewcommand{\topfraction}{0.8}                % float must be greater than X of the page before it is forced onto its own page
16\renewcommand{\bottomfraction}{0.8}             % float must be greater than X of the page before it is forced onto its own page
17\renewcommand{\floatpagefraction}{0.8}  % float must be greater than X of the page before it is forced onto its own page
18\renewcommand{\textfraction}{0.0}               % the entire page maybe devoted to floats with no text on the page at all
19
20\lefthyphenmin=4                                                % hyphen only after 4 characters
21\righthyphenmin=4
22
23% Names used in the document.
24
25\newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}\xspace} % Cforall symbolic name
26\newcommand{\CFA}{\protect\CFAIcon} % safe for section/caption
27\newcommand{\CFL}{\textrm{Cforall}\xspace} % Cforall symbolic name
28\newcommand{\Celeven}{\textrm{C11}\xspace} % C11 symbolic name
29\newcommand{\CC}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name
30\newcommand{\CCeleven}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name
31\newcommand{\CCfourteen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name
32\newcommand{\CCseventeen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name
33\newcommand{\CCtwenty}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name
34\newcommand{\CCV}{\rm C\kern-.1em\hbox{+\kern-.25em+}obj\xspace} % C++ virtual symbolic name
35\newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name
36
37%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
38
39\newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}}
40\newcommand{\TODO}[1]{\textbf{TODO}: {\itshape #1}} % TODO included
41%\newcommand{\TODO}[1]{} % TODO elided
42
43% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
44% removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR
45% AFTER HYPERREF.
46%\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}
47\renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}}
48
49\makeatletter
50% parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for
51% use rather than use \parident directly.
52\newlength{\parindentlnth}
53\setlength{\parindentlnth}{\parindent}
54
55\newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}}
56\newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}}
57
58\newlength{\gcolumnposn}                                % temporary hack because lstlisting does not handle tabs correctly
59\newlength{\columnposn}
60\setlength{\gcolumnposn}{2.75in}
61\setlength{\columnposn}{\gcolumnposn}
62\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}}
63\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
64
65% Denote newterms in particular font and index them without particular font and in lowercase, e.g., \newterm{abc}.
66% The option parameter provides an index term different from the new term, e.g., \newterm[\texttt{abc}]{abc}
67% The star version does not lowercase the index information, e.g., \newterm*{IBM}.
68\newcommand{\newtermFontInline}{\emph}
69\newcommand{\newterm}{\@ifstar\@snewterm\@newterm}
70\newcommand{\@newterm}[2][\@empty]{\lowercase{\def\temp{#2}}{\newtermFontInline{#2}}\ifx#1\@empty\index{\temp}\else\index{#1@{\protect#2}}\fi}
71\newcommand{\@snewterm}[2][\@empty]{{\newtermFontInline{#2}}\ifx#1\@empty\index{#2}\else\index{#1@{\protect#2}}\fi}
72
73% Latin abbreviation
74\newcommand{\abbrevFont}{\textit}       % set empty for no italics
75\newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.}
76\newcommand*{\eg}{%
77        \@ifnextchar{,}{\EG}%
78                {\@ifnextchar{:}{\EG}%
79                        {\EG,\xspace}}%
80}%
81\newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.}
82\newcommand*{\ie}{%
83        \@ifnextchar{,}{\IE}%
84                {\@ifnextchar{:}{\IE}%
85                        {\IE,\xspace}}%
86}%
87\newcommand{\ETC}{\abbrevFont{etc}}
88\newcommand*{\etc}{%
89        \@ifnextchar{.}{\ETC}%
90        {\ETC\xspace}%
91}%
92\newcommand{\ETAL}{\abbrevFont{et}\hspace{2pt}\abbrevFont{al}}
93\newcommand*{\etal}{%
94        \@ifnextchar{.}{\protect\ETAL}%
95                {\abbrevFont{\protect\ETAL}.\xspace}%
96}%
97\newcommand{\VIZ}{\abbrevFont{viz}}
98\newcommand*{\viz}{%
99        \@ifnextchar{.}{\VIZ}%
100                {\abbrevFont{\VIZ}.\xspace}%
101}%
102\makeatother
103
104% CFA programming language, based on ANSI C (with some gcc additions)
105\lstdefinelanguage{CFA}[ANSI]{C}{
106        morekeywords={
107                _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, _At, __attribute,
108                __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__,
109                __const, __const__, disable, dtype, enable, __extension__, fallthrough, fallthru,
110                finally, forall, ftype, _Generic, _Imaginary, inline, __label__, lvalue, _Noreturn, one_t,
111                otype, restrict, _Static_assert, throw, throwResume, trait, try, ttype, typeof, __typeof,
112                __typeof__, virtual, with, zero_t},
113        moredirectives={defined,include_next}%
114}%
115
116\lstset{
117language=CFA,
118columns=fullflexible,
119basicstyle=\linespread{0.9}\sf,                                                 % reduce line spacing and use sanserif font
120stringstyle=\tt,                                                                                % use typewriter font
121tabsize=5,                                                                                              % N space tabbing
122xleftmargin=\parindentlnth,                                                             % indent code to paragraph indentation
123%mathescape=true,                                                                               % LaTeX math escape in CFA code $...$
124escapechar=\$,                                                                                  % LaTeX escape in CFA code
125keepspaces=true,                                                                                %
126showstringspaces=false,                                                                 % do not show spaces with cup
127showlines=true,                                                                                 % show blank lines at end of code
128aboveskip=4pt,                                                                                  % spacing above/below code block
129belowskip=3pt,
130% replace/adjust listing characters that look bad in sanserif
131literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
132        {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
133        {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex\textgreater}2,
134moredelim=**[is][\color{red}]{`}{`},
135}% lstset
136
137% inline code @...@
138\lstMakeShortInline@%
139
140\lstnewenvironment{cfa}[1][]
141{\lstset{#1}}
142{}
143\lstnewenvironment{C++}[1][]                            % use C++ style
144{\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
145{}
146
147
148\title{Generic and Tuple Types with Efficient Dynamic Layout in \protect\CFA}
149
150\author{Aaron Moss, Robert Schluntz, Peter Buhr}
151% \email{a3moss@uwaterloo.ca}
152% \email{rschlunt@uwaterloo.ca}
153% \email{pabuhr@uwaterloo.ca}
154% \affiliation{%
155%       \institution{University of Waterloo}
156%       \department{David R. Cheriton School of Computer Science}
157%       \streetaddress{Davis Centre, University of Waterloo}
158%       \city{Waterloo}
159%       \state{ON}
160%       \postcode{N2L 3G1}
161%       \country{Canada}
162% }
163
164%\terms{generic, tuple, variadic, types}
165%\keywords{generic types, tuple types, variadic types, polymorphic functions, C, Cforall}
166
167\begin{document}
168\maketitle
169
170
171\begin{abstract}
172The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
173This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
174Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive.
175The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers.
176Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers.
177Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers.
178This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design.
179\end{abstract}
180
181
182\section{Introduction and Background}
183
184The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
185This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
186The TIOBE~\cite{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \Csharp 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail.
187The top 3 rankings over the past 30 years are:
188\lstDeleteShortInline@%
189\begin{center}
190\setlength{\tabcolsep}{10pt}
191\begin{tabular}{@{}rccccccc@{}}
192                & 2017  & 2012  & 2007  & 2002  & 1997  & 1992  & 1987          \\ \hline
193Java    & 1             & 1             & 1             & 1             & 12    & -             & -                     \\
194\Textbf{C}      & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1}    \\
195\CC             & 3             & 3             & 3             & 3             & 2             & 2             & 4                     \\
196\end{tabular}
197\end{center}
198\lstMakeShortInline@%
199Love it or hate it, C is extremely popular, highly used, and one of the few systems languages.
200In many cases, \CC is often used solely as a better C.
201Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive.
202
203\CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers.
204The four key design goals for \CFA~\cite{Bilson03} are:
205(1) The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler;
206(2) Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler;
207(3) \CFA code must be at least as portable as standard C code;
208(4) Extensions introduced by \CFA must be translated in the most efficient way possible.
209These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used.
210\CC is used similarly, but has the disadvantages of multiple legacy design-choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project.
211
212\CFA is currently implemented as a source-to-source translator from \CFA to the GCC-dialect of C~\cite{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)--(3).
213Ultimately, a compiler is necessary for advanced features and optimal performance.
214
215This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings.
216Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions.
217The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance.
218
219
220\subsection{Polymorphic Functions}
221\label{sec:poly-fns}
222
223\CFA{}\hspace{1pt}'s polymorphism was originally formalized by Ditchfield~\cite{Ditchfield92}, and first implemented by Bilson~\cite{Bilson03}.
224The signature feature of \CFA is parametric-polymorphic functions~\cite{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name):
225\begin{lstlisting}
226`forall( otype T )` T identity( T val ) { return val; }
227int forty_two = identity( 42 );                         $\C{// T is bound to int, forty\_two == 42}$
228\end{lstlisting}
229The @identity@ function above can be applied to any complete \emph{object type} (or @otype@).
230The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type.
231The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor.
232If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@).
233
234In \CFA, the polymorphism runtime-cost is spread over each polymorphic call, due to passing more arguments to polymorphic functions;
235the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual-function calls.
236A design advantage is that, unlike \CC template-functions, \CFA polymorphic-functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat.
237
238Since bare polymorphic-types provide a restricted set of available operations, \CFA provides a \emph{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable.
239For example, the function @twice@ can be defined using the \CFA syntax for operator overloading:
240\begin{lstlisting}
241forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x + x; } $\C{// ? denotes operands}$
242int val = twice( twice( 3.7 ) );
243\end{lstlisting}
244which works for any type @T@ with a matching addition operator.
245The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@.
246There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada}, in its type analysis.
247The first approach has a late conversion from @double@ to @int@ on the final assignment, while the second has an eager conversion to @int@.
248\CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition.
249
250Crucial to the design of a new programming language are the libraries to access thousands of external software features.
251Like \CC, \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C.
252A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted floating-point array:
253\begin{lstlisting}
254void * bsearch( const void * key, const void * base, size_t nmemb, size_t size,
255                                int (* compar)( const void *, const void * ));
256int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 :
257                                *(double *)t2 < *(double *)t1 ? 1 : 0; }
258double key = 5.0, vals[10] = { /* 10 sorted floating-point values */ };
259double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp );      $\C{// search sorted array}$
260\end{lstlisting}
261which can be augmented simply with a generalized, type-safe, \CFA-overloaded wrappers:
262\begin{lstlisting}
263forall( otype T | { int ?<?( T, T ); } ) T * bsearch( T key, const T * arr, size_t size ) {
264        int comp( const void * t1, const void * t2 ) { /* as above with double changed to T */ }
265        return (T *)bsearch( &key, arr, size, sizeof(T), comp ); }
266forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) {
267        T * result = bsearch( key, arr, size ); $\C{// call first version}$
268        return result ? result - arr : size; }  $\C{// pointer subtraction includes sizeof(T)}$
269double * val = bsearch( 5.0, vals, 10 );        $\C{// selection based on return type}$
270int posn = bsearch( 5.0, vals, 10 );
271\end{lstlisting}
272The nested function @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result.
273Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope.
274As well, an alternate kind of return is made available: position versus pointer to found element.
275\CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a templated @bsearch@.
276
277\CFA has replacement libraries condensing hundreds of existing C functions into tens of \CFA overloaded functions, all without rewriting the actual computations.
278For example, it is possible to write a type-safe \CFA wrapper @malloc@ based on the C @malloc@:
279\begin{lstlisting}
280forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)malloc( sizeof(T) ); }
281int * ip = malloc();                                            $\C{// select type and size from left-hand side}$
282double * dp = malloc();
283struct S {...} * sp = malloc();
284\end{lstlisting}
285where the return type supplies the type/size of the allocation, which is impossible in most type systems.
286
287Call-site inferencing and nested functions provide a localized form of inheritance.
288For example, the \CFA @qsort@ only sorts in ascending order using @<@.
289However, it is trivial to locally change this behaviour:
290\begin{lstlisting}
291forall( otype T | { int ?<?( T, T ); } ) void qsort( const T * arr, size_t size ) { /* use C qsort */ }
292{       int ?<?( double x, double y ) { return x `>` y; }       $\C{// locally override behaviour}$
293        qsort( vals, size );                                    $\C{// descending sort}$
294}
295\end{lstlisting}
296Within the block, the nested version of @?<?@ performs @?>?@ and this local version overrides the built-in @?<?@ so it is passed to @qsort@.
297Hence, programmers can easily form local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.
298
299Finally, \CFA allows variable overloading:
300\begin{lstlisting}
301short int MAX = ...;   int MAX = ...;  double MAX = ...;
302short int s = MAX;    int i = MAX;    double d = MAX;   $\C{// select correct MAX}$
303\end{lstlisting}
304Here, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@.
305
306\subsection{Traits}
307
308\CFA provides \emph{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration:
309\begin{lstlisting}
310trait summable( otype T ) {
311        void ?{}( T *, zero_t );                                $\C{// constructor from 0 literal}$
312        T ?+?( T, T );                                                  $\C{// assortment of additions}$
313        T ?+=?( T *, T );
314        T ++?( T * );
315        T ?++( T * ); };
316forall( otype T `| summable( T )` ) T sum( T a[$\,$], size_t size ) {  // use trait
317        `T` total = { `0` };                                    $\C{// instantiate T from 0 by calling its constructor}$
318        for ( unsigned int i = 0; i < size; i += 1 ) total `+=` a[i]; $\C{// select appropriate +}$
319        return total; }
320\end{lstlisting}
321
322In fact, the set of @summable@ trait operators is incomplete, as it is missing assignment for type @T@, but @otype@ is syntactic sugar for the following implicit trait:
323\begin{lstlisting}
324trait otype( dtype T | sized(T) ) {  // sized is a pseudo-trait for types with known size and alignment
325        void ?{}( T * );                                                $\C{// default constructor}$
326        void ?{}( T *, T );                                             $\C{// copy constructor}$
327        void ?=?( T *, T );                                             $\C{// assignment operator}$
328        void ^?{}( T * ); };                                    $\C{// destructor}$
329\end{lstlisting}
330Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack-allocatable, default or copy-initialized, assigned, and deleted.
331
332In summation, the \CFA type-system uses \emph{nominal typing} for concrete types, matching with the C type-system, and \emph{structural typing} for polymorphic types.
333Hence, trait names play no part in type equivalence;
334the names are simply macros for a list of polymorphic assertions, which are expanded at usage sites.
335Nevertheless, trait names form a logical subtype-hierarchy with @dtype@ at the top, where traits often contain overlapping assertions, \eg operator @+@.
336Traits are used like interfaces in Java or abstract base-classes in \CC, but without the nominal inheritance-relationships.
337Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type-key), and this key is fulfilled at each call site from the lexical environment, which is similar to Go~\cite{Go} interfaces.
338Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal-inheritance hierarchy.
339(Nominal inheritance can be approximated with traits using marker variables or functions, as is done in Go.)
340
341% Nominal inheritance can be simulated with traits using marker variables or functions:
342% \begin{lstlisting}
343% trait nominal(otype T) {
344%     T is_nominal;
345% };
346% int is_nominal;                                                               $\C{// int now satisfies the nominal trait}$
347% \end{lstlisting}
348%
349% Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems:
350% \begin{lstlisting}
351% trait pointer_like(otype Ptr, otype El) {
352%     lvalue El *?(Ptr);                                                $\C{// Ptr can be dereferenced into a modifiable value of type El}$
353% }
354% struct list {
355%     int value;
356%     list * next;                                                              $\C{// may omit "struct" on type names as in \CC}$
357% };
358% typedef list * list_iterator;
359%
360% lvalue int *?( list_iterator it ) { return it->value; }
361% \end{lstlisting}
362% In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@).
363% While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously.
364
365
366\section{Generic Types}
367
368One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms.
369Broadly speaking, there are three approaches to implement abstract data-structures in C.
370One approach is to write bespoke data-structures for each context in which they are needed.
371While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures.
372A second approach is to use @void *@--based polymorphism, \eg the C standard-library functions @bsearch@ and @qsort@; an approach which does allow reuse of code for common functionality.
373However, basing all polymorphism on @void *@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that would not otherwise be needed.
374A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type-checked, but errors may be difficult to interpret.
375Furthermore, writing and using preprocessor macros can be unnatural and inflexible.
376
377\CC, Java, and other languages use \emph{generic types} to produce type-safe abstract data-types.
378\CFA also implements generic types that integrate efficiently and naturally with the existing polymorphic functions, while retaining backwards compatibility with C and providing separate compilation.
379However, for known concrete parameters, the generic-type definition can be inlined, like \CC templates.
380
381A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name:
382\begin{lstlisting}
383forall( otype R, otype S ) struct pair {
384        R first;
385        S second;
386};
387forall( otype T ) T value( pair( const char *, T ) p ) { return p.second; }
388forall( dtype F, otype T ) T value_p( pair( F *, T * ) p ) { return * p.second; }
389pair( const char *, int ) p = { "magic", 42 };
390int magic = value( p );
391pair( void *, int * ) q = { 0, &p.second };
392magic = value_p( q );
393double d = 1.0;
394pair( double *, double * ) r = { &d, &d };
395d = value_p( r );
396\end{lstlisting}
397
398\CFA classifies generic types as either \emph{concrete} or \emph{dynamic}.
399Concrete types have a fixed memory layout regardless of type parameters, while dynamic types vary in memory layout depending on their type parameters.
400A type may have polymorphic parameters but still be concrete, called \emph{dtype-static}.
401Polymorphic pointers are an example of dtype-static types, \eg @forall(dtype T) T *@ is a polymorphic type, but for any @T@, @T *@  is a fixed-sized pointer, and therefore, can be represented by a @void *@ in code generation.
402
403\CFA generic types also allow checked argument-constraints.
404For example, the following declaration of a sorted set-type ensures the set key supports equality and relational comparison:
405\begin{lstlisting}
406forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); } ) struct sorted_set;
407\end{lstlisting}
408
409
410\subsection{Concrete Generic-Types}
411
412The \CFA translator template-expands concrete generic-types into new structure types, affording maximal inlining.
413To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate.
414A function declaration that accepts or returns a concrete generic-type produces a declaration for the instantiated structure in the same scope, which all callers may reuse.
415For example, the concrete instantiation for @pair( const char *, int )@ is:
416\begin{lstlisting}
417struct _pair_conc1 {
418        const char * first;
419        int second;
420};
421\end{lstlisting}
422
423A concrete generic-type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations.
424In the above example, the @pair( F *, T * )@ parameter to @value_p@ is such a type; its expansion is below and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate:
425\begin{lstlisting}
426struct _pair_conc0 {
427        void * first;
428        void * second;
429};
430\end{lstlisting}
431
432
433\subsection{Dynamic Generic-Types}
434
435Though \CFA implements concrete generic-types efficiently, it also has a fully general system for dynamic generic types.
436As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller.
437Dynamic generic-types also have an \emph{offset array} containing structure-member offsets.
438A dynamic generic-union needs no such offset array, as all members are at offset 0, but size and alignment are still necessary.
439Access to members of a dynamic structure is provided at runtime via base-displacement addressing with the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime.
440
441The offset arrays are statically generated where possible.
442If a dynamic generic-type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume the generic type is complete (\ie has a known layout) at any call-site, and the offset array is passed from the caller;
443if the generic type is concrete at the call site, the elements of this offset array can even be statically generated using the C @offsetof@ macro.
444As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void *@, and @_offsetof_pair@ is the offset array passed into @value@ for @pair( const char *, T )@.
445The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) }@.
446
447In some cases the offset arrays cannot be statically generated.
448For instance, modularity is generally provided in C by including an opaque forward-declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately-compiled @.c@ file.
449\CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic-type, and only holds it by a pointer.
450The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed into a function from that function's caller.
451These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic structure (un@sized@ parameters are forbidden from being used in a context that affects layout).
452Results of these layout functions are cached so that they are only computed once per type per function. %, as in the example below for @pair@.
453Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature.
454For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values.
455This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function.
456
457Whether a type is concrete, dtype-static, or dynamic is decided solely on the @forall@'s type parameters.
458This design allows opaque forward declarations of generic types, \eg @forall(otype T)@ @struct Box@ -- like in C, all uses of @Box(T)@ can be separately compiled, and callers from other translation units know the proper calling conventions to use.
459If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T)@ @struct unique_ptr { T * p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off.
460
461
462\subsection{Applications}
463\label{sec:generic-apps}
464
465The reuse of dtype-static structure instantiations enables useful programming patterns at zero runtime cost.
466The most important such pattern is using @forall(dtype T) T *@ as a type-checked replacement for @void *@, \eg creating a lexicographic comparison for pairs of pointers used by @bsearch@ or @qsort@:
467\begin{lstlisting}
468forall(dtype T) int lexcmp( pair( T *, T * ) * a, pair( T *, T * ) * b, int (* cmp)( T *, T * ) ) {
469        return cmp( a->first, b->first ) ? : cmp( a->second, b->second );
470}
471\end{lstlisting}
472Since @pair(T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@, so the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type-checked to ensure the fields of both pairs and the arguments to the comparison function match in type.
473
474Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \emph{tag-structures}.
475Sometimes information is only used for type-checking and can be omitted at runtime, \eg:
476\begin{lstlisting}
477forall(dtype Unit) struct scalar { unsigned long value; };
478struct metres {};
479struct litres {};
480
481forall(dtype U) scalar(U) ?+?( scalar(U) a, scalar(U) b ) {
482        return (scalar(U)){ a.value + b.value };
483}
484scalar(metres) half_marathon = { 21093 };
485scalar(litres) swimming_pool = { 2500000 };
486scalar(metres) marathon = half_marathon + half_marathon;
487scalar(litres) two_pools = swimming_pool + swimming_pool;
488marathon + swimming_pool;                                       $\C{// compilation ERROR}$
489\end{lstlisting}
490@scalar@ is a dtype-static type, so all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@.
491These implementations may even be separately compiled, unlike \CC template functions.
492However, the \CFA type-checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume.
493
494
495\section{Tuples}
496\label{sec:tuples}
497
498In many languages, functions can return at most one value;
499however, many operations have multiple outcomes, some exceptional.
500Consider C's @div@ and @remquo@ functions, which return the quotient and remainder for a division of integer and floating-point values, respectively.
501\begin{lstlisting}
502typedef struct { int quo, rem; } div_t;         $\C{// from include stdlib.h}$
503div_t div( int num, int den );
504double remquo( double num, double den, int * quo );
505div_t qr = div( 13, 5 );                                        $\C{// return quotient/remainder aggregate}$
506int q;
507double r = remquo( 13.5, 5.2, &q );                     $\C{// return remainder, alias quotient}$
508\end{lstlisting}
509@div@ aggregates the quotient/remainder in a structure, while @remquo@ aliases a parameter to an argument.
510Both approaches are awkward.
511Alternatively, a programming language can directly support returning multiple values, \eg in \CFA:
512\begin{lstlisting}
513[ int, int ] div( int num, int den );           $\C{// return two integers}$
514[ double, double ] div( double num, double den ); $\C{// return two doubles}$
515int q, r;                                                                       $\C{// overloaded variable names}$
516double q, r;
517[ q, r ] = div( 13, 5 );                                        $\C{// select appropriate div and q, r}$
518[ q, r ] = div( 13.5, 5.2 );                            $\C{// assign into tuple}$
519\end{lstlisting}
520Clearly, this approach is straightforward to understand and use;
521therefore, why do few programming languages support this obvious feature or provide it awkwardly?
522The answer is that there are complex consequences that cascade through multiple aspects of the language, especially the type-system.
523This section show these consequences and how \CFA handles them.
524
525
526\subsection{Tuple Expressions}
527
528The addition of multiple-return-value functions (MRVF) are useless without a syntax for accepting multiple values at the call-site.
529The simplest mechanism for capturing the return values is variable assignment, allowing the values to be retrieved directly.
530As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions (as above), called a \emph{tuple}.
531
532However, functions also use \emph{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVF), \eg:
533\begin{lstlisting}
534printf( "%d %d\n", div( 13, 5 ) );                      $\C{// return values seperated into arguments}$
535\end{lstlisting}
536Here, the values returned by @div@ are composed with the call to @printf@ by flattening the tuple into separate arguments.
537However, the \CFA type-system must support significantly more complex composition:
538\begin{lstlisting}
539[ int, int ] foo$\(_1\)$( int );                        $\C{// overloaded foo functions}$
540[ double ] foo$\(_2\)$( int );
541void bar( int, double, double );
542bar( foo( 3 ), foo( 3 ) );
543\end{lstlisting}
544The type-resolver only has the tuple return-types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list.
545No combination of @foo@s are an exact match with @bar@'s parameters, so the resolver applies C conversions.
546The minimal cost is @bar( foo@$_1$@( 3 ), foo@$_2$@( 3 ) )@, giving (@int@, {\color{ForestGreen}@int@}, @double@) to (@int@, {\color{ForestGreen}@double@}, @double@) with one {\color{ForestGreen}safe} (widening) conversion from @int@ to @double@ versus ({\color{red}@double@}, {\color{ForestGreen}@int@}, {\color{ForestGreen}@int@}) to ({\color{red}@int@}, {\color{ForestGreen}@double@}, {\color{ForestGreen}@double@}) with one {\color{red}unsafe} (narrowing) conversion from @double@ to @int@ and two safe conversions.
547
548
549\subsection{Tuple Variables}
550
551An important observation from function composition is that new variable names are not required to initialize parameters from an MRVF.
552\CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types, \eg:
553\begin{lstlisting}
554[ int, int ] qr = div( 13, 5 );                         $\C{// tuple-variable declaration and initialization}$
555[ double, double ] qr = div( 13.5, 5.2 );
556\end{lstlisting}
557where the tuple variable-name serves the same purpose as the parameter name(s).
558Tuple variables can be composed of any types, except for array types, since array sizes are generally unknown in C.
559
560One way to access the tuple-variable components is with assignment or composition:
561\begin{lstlisting}
562[ q, r ] = qr;                                                          $\C{// access tuple-variable components}$
563printf( "%d %d\n", qr );
564\end{lstlisting}
565\CFA also supports \emph{tuple indexing} to access single components of a tuple expression:
566\begin{lstlisting}
567[int, int] * p = &qr;                                           $\C{// tuple pointer}$
568int rem = qr`.1`;                                                       $\C{// access remainder}$
569int quo = div( 13, 5 )`.0`;                                     $\C{// access quotient}$
570p`->0` = 5;                                                                     $\C{// change quotient}$
571bar( qr`.1`, qr );                                                      $\C{// pass remainder and quotient/remainder}$
572rem = [div( 13, 5 ), 42]`.0.1`;                         $\C{// access 2nd component of 1st component of tuple expression}$
573\end{lstlisting}
574
575
576\subsection{Flattening and Restructuring}
577
578In function call contexts, tuples support implicit flattening and restructuring conversions.
579Tuple flattening recursively expands a tuple into the list of its basic components.
580Tuple structuring packages a list of expressions into a value of tuple type, \eg:
581%\lstDeleteShortInline@%
582%\par\smallskip
583%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
584\begin{lstlisting}
585int f( int, int );
586int g( [int, int] );
587int h( int, [int, int] );
588[int, int] x;
589int y;
590f( x );                 $\C{// flatten}$
591g( y, 10 );             $\C{// structure}$
592h( x, y );              $\C{// flatten and structure}$
593\end{lstlisting}
594%\end{lstlisting}
595%&
596%\begin{lstlisting}
597%\end{tabular}
598%\smallskip\par\noindent
599%\lstMakeShortInline@%
600In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as the two arguments.
601In the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the parameter type of @g@.
602Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@.
603The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVF and MRVF, and with any number of arguments of arbitrarily complex structure.
604
605
606\subsection{Tuple Assignment}
607
608An assignment where the left side is a tuple type is called \emph{tuple assignment}.
609There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple} and \emph{mass assignment}, respectively.
610%\lstDeleteShortInline@%
611%\par\smallskip
612%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
613\begin{lstlisting}
614int x = 10;
615double y = 3.5;
616[int, double] z;
617z = [x, y];                                                                     $\C{// multiple assignment}$
618[x, y] = z;                                                                     $\C{// multiple assignment}$
619z = 10;                                                                         $\C{// mass assignment}$
620[y, x] = 3.14;                                                          $\C{// mass assignment}$
621\end{lstlisting}
622%\end{lstlisting}
623%&
624%\begin{lstlisting}
625%\end{tabular}
626%\smallskip\par\noindent
627%\lstMakeShortInline@%
628Both kinds of tuple assignment have parallel semantics, so that each value on the left and right side is evaluated before any assignments occur.
629As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function, \eg, @[x, y] = [y, x]@.
630This semantics means mass assignment differs from C cascading assignment (\eg @a = b = c@) in that conversions are applied in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment.
631For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@;
632whereas, C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@.
633Finally, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C.
634This example shows mass, multiple, and cascading assignment used in one expression:
635\begin{lstlisting}
636void f( [int, int] );
637f( [x, y] = z = 1.5 );                                          $\C{// assignments in parameter list}$
638\end{lstlisting}
639
640
641\subsection{Member Access}
642
643It is also possible to access multiple fields from a single expression using a \emph{member-access}.
644The result is a single tuple-valued expression whose type is the tuple of the types of the members, \eg:
645\begin{lstlisting}
646struct S { int x; double y; char * z; } s;
647s.[x, y, z] = 0;
648\end{lstlisting}
649Here, the mass assignment sets all members of @s@ to zero.
650Since tuple-index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange, drop, and duplicate components).
651%\lstDeleteShortInline@%
652%\par\smallskip
653%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
654\begin{lstlisting}
655[int, int, long, double] x;
656void f( double, long );
657x.[0, 1] = x.[1, 0];                                            $\C{// rearrange: [x.0, x.1] = [x.1, x.0]}$
658f( x.[0, 3] );                                                          $\C{// drop: f(x.0, x.3)}$
659[int, int, int] y = x.[2, 0, 2];                        $\C{// duplicate: [y.0, y.1, y.2] = [x.2, x.0.x.2]}$
660\end{lstlisting}
661%\end{lstlisting}
662%&
663%\begin{lstlisting}
664%\end{tabular}
665%\smallskip\par\noindent
666%\lstMakeShortInline@%
667It is also possible for a member access to contain other member accesses, \eg:
668\begin{lstlisting}
669struct A { double i; int j; };
670struct B { int * k; short l; };
671struct C { int x; A y; B z; } v;
672v.[x, y.[i, j], z.k];                                           $\C{// [v.x, [v.y.i, v.y.j], v.z.k]}$
673\end{lstlisting}
674
675
676\begin{comment}
677\subsection{Casting}
678
679In C, the cast operator is used to explicitly convert between types.
680In \CFA, the cast operator has a secondary use as type ascription.
681That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function:
682\begin{lstlisting}
683int f();     // (1)
684double f()// (2)
685
686f();       // ambiguous - (1),(2) both equally viable
687(int)f()// choose (2)
688\end{lstlisting}
689
690Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples.
691Taking a look at standard C provides some guidance with respect to the way casts should work with tuples:
692\begin{lstlisting}
693int f();
694void g();
695
696(void)f()// (1)
697(int)g()// (2)
698\end{lstlisting}
699In C, (1) is a valid cast, which calls @f@ and discards its result.
700On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical.
701Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid.
702
703Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$.
704Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression.
705This approach follows naturally from the way that a cast to @void@ works in C.
706
707For example, in
708\begin{lstlisting}
709[int, int, int] f();
710[int, [int, int], int] g();
711
712([int, double])f();           $\C{// (1)}$
713([int, int, int])g();         $\C{// (2)}$
714([void, [int, int]])g();      $\C{// (3)}$
715([int, int, int, int])g();    $\C{// (4)}$
716([int, [int, int, int]])g()$\C{// (5)}$
717\end{lstlisting}
718
719(1) discards the last element of the return value and converts the second element to @double@.
720Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@.
721If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@.
722Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@).
723
724Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}.
725As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3.
726Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid.
727That is, it is invalid to cast @[int, int]@ to @[int, int, int]@.
728\end{comment}
729
730
731\subsection{Polymorphism}
732
733Tuples also integrate with \CFA polymorphism as a kind of generic type.
734Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types, \eg:
735\begin{lstlisting}
736forall(otype T, dtype U) void f( T x, U * y );
737f( [5, "hello"] );
738\end{lstlisting}
739where @[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@.
740Tuples, however, may contain polymorphic components.
741For example, a plus operator can be written to add two triples together.
742\begin{lstlisting}
743forall(otype T | { T ?+?( T, T ); }) [T, T, T] ?+?( [T, T, T] x, [T, T, T] y ) {
744        return [x.0 + y.0, x.1 + y.1, x.2 + y.2];
745}
746[int, int, int] x;
747int i1, i2, i3;
748[i1, i2, i3] = x + ([10, 20, 30]);
749\end{lstlisting}
750
751Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions.
752\begin{lstlisting}
753int f( [int, double], double );
754forall(otype T, otype U | { T f( T, U, U ); }) void g( T, U );
755g( 5, 10.21 );
756\end{lstlisting}
757Hence, function parameter and return lists are flattened for the purposes of type unification allowing the example to pass expression resolution.
758This relaxation is possible by extending the thunk scheme described by Bilson~\cite{Bilson03}.
759Whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function:
760\begin{lstlisting}
761int _thunk( int _p0, double _p1, double _p2 ) { return f( [_p0, _p1], _p2 ); }
762\end{lstlisting}
763so the thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism.
764These thunks take advantage of GCC C nested-functions to produce closures that have the usual function-pointer signature.
765
766
767\subsection{Variadic Tuples}
768\label{sec:variadic-tuples}
769
770To define variadic functions, \CFA adds a new kind of type parameter, @ttype@ (tuple type).
771Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types.
772In a given parameter list, there must be at most one @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates.
773As such, @ttype@ variables are also called \emph{argument packs}.
774
775Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is via recursion.
776Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful.
777Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.
778For example, a generalized @sum@ function written using @ttype@:
779\begin{lstlisting}
780int sum$\(_0\)$() { return 0; }
781forall(ttype Params | { int sum( Params ); } ) int sum$\(_1\)$( int x, Params rest ) {
782        return x + sum( rest );
783}
784sum( 10, 20, 30 );
785\end{lstlisting}
786Since @sum@\(_0\) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.
787In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.
788The process continues unitl @Params@ is bound to @[]@, requiring an assertion @int sum()@, which matches @sum@\(_0\) and terminates the recursion.
789Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10 + sum(20, 30)@ $\rightarrow$ @10 + (20 + sum(30))@ $\rightarrow$ @10 + (20 + (30 + sum()))@ $\rightarrow$ @10 + (20 + (30 + 0))@.
790
791It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments:
792\begin{lstlisting}
793int sum( int x, int y ) { return x + y; }
794forall(ttype Params | { int sum( int, Params ); } ) int sum( int x, int y, Params rest ) {
795        return sum( x + y, rest );
796}
797\end{lstlisting}
798One more step permits the summation of any summable type with all arguments of the same type:
799\begin{lstlisting}
800trait summable(otype T) {
801        T ?+?( T, T );
802};
803forall(otype R | summable( R ) ) R sum( R x, R y ) {
804        return x + y;
805}
806forall(otype R, ttype Params | summable(R) | { R sum(R, Params); } ) R sum(R x, R y, Params rest) {
807        return sum( x + y, rest );
808}
809\end{lstlisting}
810Unlike C variadic functions, it is unnecessary to hard code the number and expected types.
811Furthermore, this code is extendable for any user-defined type with a @?+?@ operator.
812Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators.
813
814It is also possible to write a type-safe variadic print function to replace @printf@:
815\begin{lstlisting}
816struct S { int x, y; };
817forall(otype T, ttype Params | { void print(T); void print(Params); }) void print(T arg, Params rest) {
818        print(arg);  print(rest);
819}
820void print( char * x ) { printf( "%s", x ); }
821void print( int x ) { printf( "%d", x ); }
822void print( S s ) { print( "{ ", s.x, ",", s.y, " }" ); }
823print( "s = ", (S){ 1, 2 }, "\n" );
824\end{lstlisting}
825This example showcases a variadic-template-like decomposition of the provided argument list.
826The individual @print@ functions allow printing a single element of a type.
827The polymorphic @print@ allows printing any list of types, where as each individual type has a @print@ function.
828The individual print functions can be used to build up more complicated @print@ functions, such as @S@, which cannot be done with @printf@ in C.
829
830Finally, it is possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions.
831For example, it is possible to write @new@ as a library function:
832\begin{lstlisting}
833forall( otype R, otype S ) void ?{}( pair(R, S) *, R, S );
834forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } ) T * new( Params p ) {
835        return ((T *)malloc()){ p };                    $\C{// construct into result of malloc}$
836}
837pair( int, char ) * x = new( 42, '!' );
838\end{lstlisting}
839The @new@ function provides the combination of type-safe @malloc@ with a \CFA constructor call, making it impossible to forget constructing dynamically allocated objects.
840This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference.
841
842
843\subsection{Implementation}
844
845Tuples are implemented in the \CFA translator via a transformation into \emph{generic types}.
846For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated, \eg:
847\begin{lstlisting}
848[int, int] f() {
849        [double, double] x;
850        [int, double, int] y;
851}
852\end{lstlisting}
853is transformed into:
854\begin{lstlisting}
855forall(dtype T0, dtype T1 | sized(T0) | sized(T1)) struct _tuple2 {
856        T0 field_0;                                                             $\C{// generated before the first 2-tuple}$
857        T1 field_1;
858};
859_tuple2(int, int) f() {
860        _tuple2(double, double) x;
861        forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2)) struct _tuple3 {
862                T0 field_0;                                                     $\C{// generated before the first 3-tuple}$
863                T1 field_1;
864                T2 field_2;
865        };
866        _tuple3(int, double, int) y;
867}
868\end{lstlisting}
869\begin{sloppypar}
870Tuple expressions are then simply converted directly into compound literals, \eg @[5, 'x', 1.24]@ becomes @(_tuple3(int, char, double)){ 5, 'x', 1.24 }@.
871\end{sloppypar}
872
873\begin{comment}
874Since tuples are essentially structures, tuple indexing expressions are just field accesses:
875\begin{lstlisting}
876void f(int, [double, char]);
877[int, double] x;
878
879x.0+x.1;
880printf("%d %g\n", x);
881f(x, 'z');
882\end{lstlisting}
883Is transformed into:
884\begin{lstlisting}
885void f(int, _tuple2(double, char));
886_tuple2(int, double) x;
887
888x.field_0+x.field_1;
889printf("%d %g\n", x.field_0, x.field_1);
890f(x.field_0, (_tuple2){ x.field_1, 'z' });
891\end{lstlisting}
892Note that due to flattening, @x@ used in the argument position is converted into the list of its fields.
893In the call to @f@, the second and third argument components are structured into a tuple argument.
894Similarly, tuple member expressions are recursively expanded into a list of member access expressions.
895
896Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion.
897Each unique expression is assigned an identifier and is guaranteed to be executed exactly once:
898\begin{lstlisting}
899void g(int, double);
900[int, double] h();
901g(h());
902\end{lstlisting}
903Internally, this expression is converted to two variables and an expression:
904\begin{lstlisting}
905void g(int, double);
906[int, double] h();
907
908_Bool _unq0_finished_ = 0;
909[int, double] _unq0;
910g(
911        (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,
912        (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,
913);
914\end{lstlisting}
915Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order.
916The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true.
917Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression.
918Tuple member expressions also take advantage of unique expressions in the case of possible impurity.
919
920Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure.
921This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.
922
923The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions.
924A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression.
925The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language.
926However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new.
927\end{comment}
928
929
930\section{Control Structures}
931
932
933\subsection{\texorpdfstring{Labelled \LstKeywordStyle{continue} / \LstKeywordStyle{break}}{Labelled continue / break}}
934
935While C provides @continue@ and @break@ statements for altering control flow, both are restricted to one level of nesting for a particular control structure.
936Unfortunately, this restriction forces programmers to use @goto@ to achieve the equivalent control-flow for more than one level of nesting.
937To prevent having to switch to the @goto@, \CFA extends the @continue@ and @break@ with a target label to support static multi-level exit~\cite{Buhr85}, as in Java.
938For both @continue@ and @break@, the target label must be directly associated with a @for@, @while@ or @do@ statement;
939for @break@, the target label can also be associated with a @switch@, @if@ or compound (@{}@) statement.
940Figure~\ref{f:MultiLevelExit} shows @continue@ and @break@ indicating the specific control structure, and the corresponding C program using only @goto@ and labels.
941The innermost loop has 7 exit points, which cause continuation or termination of one or more of the 7 nested control-structures.
942
943\begin{figure}
944\lstDeleteShortInline@%
945\begin{tabular}{@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{}}
946\multicolumn{1}{@{\hspace{\parindentlnth}}c@{\hspace{\parindentlnth}}}{\textbf{\CFA}}   & \multicolumn{1}{@{\hspace{\parindentlnth}}c}{\textbf{C}}      \\
947\begin{cfa}
948`LC:` {
949        ... $declarations$ ...
950        `LS:` switch ( ... ) {
951          case 3:
952                `LIF:` if ( ... ) {
953                        `LF:` for ( ... ) {
954                                `LW:` while ( ... ) {
955                                        ... break `LC`; ...
956                                        ... break `LS`; ...
957                                        ... break `LIF`; ...
958                                        ... continue `LF;` ...
959                                        ... break `LF`; ...
960                                        ... continue `LW`; ...
961                                        ... break `LW`; ...
962                                } // while
963                        } // for
964                } else {
965                        ... break `LIF`; ...
966                } // if
967        } // switch
968} // compound
969\end{cfa}
970&
971\begin{cfa}
972{
973        ... $declarations$ ...
974        switch ( ... ) {
975          case 3:
976                if ( ... ) {
977                        for ( ... ) {
978                                while ( ... ) {
979                                        ... goto `LC`; ...
980                                        ... goto `LS`; ...
981                                        ... goto `LIF`; ...
982                                        ... goto `LFC`; ...
983                                        ... goto `LFB`; ...
984                                        ... goto `LWC`; ...
985                                        ... goto `LWB`; ...
986                                  `LWC`: ; } `LWB:` ;
987                          `LFC:` ; } `LFB:` ;
988                } else {
989                        ... goto `LIF`; ...
990                } `L3:` ;
991        } `LS:` ;
992} `LC:` ;
993\end{cfa}
994&
995\begin{cfa}
996
997
998
999
1000
1001
1002
1003// terminate compound
1004// terminate switch
1005// terminate if
1006// continue loop
1007// terminate loop
1008// continue loop
1009// terminate loop
1010
1011
1012
1013// terminate if
1014
1015
1016
1017\end{cfa}
1018\end{tabular}
1019\lstMakeShortInline@%
1020\caption{Multi-level Exit}
1021\label{f:MultiLevelExit}
1022\end{figure}
1023
1024Both labelled @continue@ and @break@ are a @goto@ restricted in the following ways:
1025\begin{itemize}
1026\item
1027They cannot create a loop, which means only the looping constructs cause looping.
1028This restriction means all situations resulting in repeated execution are clearly delineated.
1029\item
1030They cannot branch into a control structure.
1031This restriction prevents missing declarations and/or initializations at the start of a control structure resulting in undefined behaviour.
1032\end{itemize}
1033The advantage of the labelled @continue@/@break@ is allowing static multi-level exits without having to use the @goto@ statement, and tying control flow to the target control structure rather than an arbitrary point in a program.
1034Furthermore, the location of the label at the \emph{beginning} of the target control structure informs the reader (eye candy) that complex control-flow is occurring in the body of the control structure.
1035With @goto@, the label is at the end of the control structure, which fails to convey this important clue early enough to the reader.
1036Finally, using an explicit target for the transfer instead of an implicit target allows new constructs to be added or removed without affecting existing constructs.
1037The implicit targets of the current @continue@ and @break@, \ie the closest enclosing loop or @switch@, change as certain constructs are added or removed.
1038
1039\TODO{choose and fallthrough here as well?}
1040
1041
1042\subsection{\texorpdfstring{\LstKeywordStyle{with} Clause / Statement}{with Clause / Statement}}
1043\label{s:WithClauseStatement}
1044
1045Grouping heterogenous data into \newterm{aggregate}s (structure/union) is a common programming practice, and an aggregate can be further organized into more complex structures, such as arrays and containers:
1046\begin{cfa}
1047struct S {                                                                      $\C{// aggregate}$
1048        char c;                                                                 $\C{// fields}$
1049        int i;
1050        double d;
1051};
1052S s, as[10];
1053\end{cfa}
1054However, routines manipulating aggregates must repeat the aggregate name to access its containing fields:
1055\begin{cfa}
1056void f( S s ) {
1057        `s.`c; `s.`i; `s.`d;                                    $\C{// access containing fields}$
1058}
1059\end{cfa}
1060A similar situation occurs in object-oriented programming, \eg \CC:
1061\begin{C++}
1062class C {
1063        char c;                                                                 $\C{// fields}$
1064        int i;
1065        double d;
1066        int mem() {                                                             $\C{// implicit "this" parameter}$
1067                `this->`c; `this->`i; `this->`d;        $\C{// access containing fields}$
1068        }
1069}
1070\end{C++}
1071Nesting of member routines in a \lstinline[language=C++]@class@ allows eliding \lstinline[language=C++]@this->@ because of lexical scoping.
1072However, for other aggregate parameters, qualification is necessary:
1073\begin{cfa}
1074struct T { double m, n; };
1075int C::mem( T & t ) {                                           $\C{// multiple aggregate parameters}$
1076        c; i; d;                                                                $\C{\color{red}// this-\textgreater.c, this-\textgreater.i, this-\textgreater.d}$
1077        `t.`m; `t.`n;                                                   $\C{// must qualify}$
1078}
1079\end{cfa}
1080
1081% In object-oriented programming, there is an implicit first parameter, often names @self@ or @this@, which is elided.
1082% In any programming language, some functions have a naturally close relationship with a particular data type.
1083% Object-oriented programming allows this close relationship to be codified in the language by making such functions \emph{class methods} of their related data type.
1084% Class methods have certain privileges with respect to their associated data type, notably un-prefixed access to the fields of that data type.
1085% When writing C functions in an object-oriented style, this un-prefixed access is swiftly missed, as access to fields of a @Foo* f@ requires an extra three characters @f->@ every time, which disrupts coding flow and clutters the produced code.
1086%
1087% \TODO{Fill out section. Be sure to mention arbitrary expressions in with-blocks, recent change driven by Thierry to prioritize field name over parameters.}
1088
1089To simplify the programmer experience, \CFA provides a @with@ clause/statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate qualification to fields by opening a scope containing the field identifiers.
1090Hence, the qualified fields become variables with the side-effect that it is easier to optimizing field references in a block.
1091\begin{cfa}
1092void f( S s ) `with( s )` {                                     $\C{// with clause}$
1093        c; i; d;                                                                $\C{\color{red}// s.c, s.i, s.d}$
1094}
1095\end{cfa}
1096and the equivalence for object-style programming is:
1097\begin{cfa}
1098int mem( S & this ) `with( this )` {            $\C{// with clause}$
1099        c; i; d;                                                                $\C{\color{red}// this.c, this.i, this.d}$
1100}
1101\end{cfa}
1102with the generality of opening multiple aggregate-parameters:
1103\begin{cfa}
1104int mem( S & s, T & t ) `with( s, t )` {        $\C{// multiple aggregate parameters}$
1105        c; i; d;                                                                $\C{\color{red}// s.c, s.i, s.d}$
1106        m; n;                                                                   $\C{\color{red}// t.m, t.n}$
1107}
1108\end{cfa}
1109
1110In detail, the @with@ clause/statement has the form:
1111\begin{cfa}
1112$\emph{with-statement}$:
1113        'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$
1114\end{cfa}
1115and may appear as the body of a routine or nested within a routine body.
1116Each expression in the expression-list provides a type and object.
1117The type must be an aggregate type.
1118(Enumerations are already opened.)
1119The object is the implicit qualifier for the open structure-fields.
1120
1121All expressions in the expression list are open in ``parallel'' within the compound statement.
1122This semantic is different from Pascal, which nests the openings.
1123The difference between parallel and nesting occurs for fields with the same name but different type:
1124\begin{cfa}
1125struct S { int i; int j; double m; } s, w;
1126struct T { int i; int k; int m } t, w;
1127with( s, t ) {
1128        j + k;                                                                  $\C{// unambiguous, s.j + t.m}$
1129        m = 5.0;                                                                $\C{// unambiguous, t.m = 5.0}$
1130        m = 1;                                                                  $\C{// unambiguous, s.m = 1}$
1131        int a = s.i + m;                                                $\C{// unambiguous, a = s.i + t.i}$
1132        int b = s.i + t.i;                                              $\C{// unambiguous, qualification}$
1133        sout | (double)m | endl;                                $\C{// unambiguous, cast}$
1134        i;                                                                              $\C{// ambiguous}$
1135}
1136\end{cfa}
1137\CFA's ability to overload variables means usages of field with the same names can be automatically disambiguated, eliminating most qualification.
1138Qualification or a cast is used to disambiguate.
1139A cast may be necessary to disambiguate between the overload variables in a @with@ expression:
1140\begin{cfa}
1141with( w ) { ... }                                                       $\C{// ambiguous, same name and no context}$
1142with( (S)w ) { ... }                                            $\C{// unambiguous}$
1143\end{cfa}
1144
1145\begin{cfa}
1146struct S { int i, j; } sv;
1147with( sv ) {
1148        S & sr = sv;
1149        with( sr ) {
1150                S * sp = &sv;
1151                with( *sp ) {
1152                        i = 3; j = 4;                                   $\C{\color{red}// sp-{\textgreater}i, sp-{\textgreater}j}$
1153                }
1154                i = 3; j = 4;                                           $\C{\color{red}// sr.i, sr.j}$
1155        }
1156        i = 3; j = 4;                                                   $\C{\color{red}// sv.i, sv.j}$
1157}
1158\end{cfa}
1159
1160The statement form is used within a block:
1161\begin{cfa}
1162int foo() {
1163        struct S1 { ... } s1;
1164        struct S2 { ... } s2;
1165        `with( s1 )` {                                                  $\C{// with statement}$
1166                // access fields of s1 without qualification
1167                `with( s2 )` {                                          $\C{// nesting}$
1168                        // access fields of s1 and s2 without qualification
1169                }
1170        }
1171        `with( s1, s2 )` {
1172                // access unambiguous fields of s1 and s2 without qualification
1173        }
1174}
1175\end{cfa}
1176
1177
1178\subsection{Exception Handling ???}
1179
1180
1181\section{Declarations}
1182
1183It is important to the design team that \CFA subjectively ``feel like'' C to user programmers.
1184An important part of this subjective feel is maintaining C's procedural programming paradigm, as opposed to the object-oriented paradigm of other systems languages such as \CC and Rust.
1185Maintaining this procedural paradigm means that coding patterns that work in C will remain not only functional but idiomatic in \CFA, reducing the mental burden of retraining C programmers and switching between C and \CFA development.
1186Nonetheless, some features of object-oriented languages are undeniably convienient, and the \CFA design team has attempted to adapt them to a procedural paradigm so as to incorporate their benefits into \CFA; two of these features are resource management and name scoping.
1187
1188
1189\subsection{Alternative Declaration Syntax}
1190
1191\newcommand{\R}[1]{\Textbf{#1}}
1192\newcommand{\B}[1]{{\Textbf[blue]{#1}}}
1193\newcommand{\G}[1]{{\Textbf[OliveGreen]{#1}}}
1194
1195C declaration syntax is notoriously confusing and error prone.
1196For example, many C programmers are confused by a declaration as simple as:
1197\begin{flushleft}
1198\lstDeleteShortInline@%
1199\begin{tabular}{@{}ll@{}}
1200\begin{cfa}
1201int * x[5]
1202\end{cfa}
1203&
1204\raisebox{-0.75\totalheight}{\input{Cdecl}}
1205\end{tabular}
1206\lstMakeShortInline@%
1207\end{flushleft}
1208Is this an array of 5 pointers to integers or a pointer to an array of 5 integers?
1209The fact this declaration is unclear to many C programmers means there are productivity and safety issues even for basic programs.
1210Another example of confusion results from the fact that a routine name and its parameters are embedded within the return type, mimicking the way the return value is used at the routine's call site.
1211For example, a routine returning a pointer to an array of integers is defined and used in the following way:
1212\begin{cfa}
1213int `(*`f`())[`5`]` {...};                              $\C{// definition}$
1214 ... `(*`f`())[`3`]` += 1;                              $\C{// usage}$
1215\end{cfa}
1216Essentially, the return type is wrapped around the routine name in successive layers (like an onion).
1217While attempting to make the two contexts consistent is a laudable goal, it has not worked out in practice.
1218
1219\CFA provides its own type, variable and routine declarations, using a different syntax.
1220The new declarations place qualifiers to the left of the base type, while C declarations place qualifiers to the right of the base type.
1221In the following example, \R{red} is the base type and \B{blue} is qualifiers.
1222The \CFA declarations move the qualifiers to the left of the base type, \ie move the blue to the left of the red, while the qualifiers have the same meaning but are ordered left to right to specify a variable's type.
1223\begin{quote}
1224\lstDeleteShortInline@%
1225\lstset{moredelim=**[is][\color{blue}]{+}{+}}
1226\begin{tabular}{@{}l@{\hspace{3em}}l@{}}
1227\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c}{\textbf{C}}        \\
1228\begin{cfa}
1229+[5] *+ `int` x1;
1230+* [5]+ `int` x2;
1231+[* [5] int]+ f`( int p )`;
1232\end{cfa}
1233&
1234\begin{cfa}
1235`int` +*+ x1 +[5]+;
1236`int` +(*+x2+)[5]+;
1237+int (*+f`( int p )`+)[5]+;
1238\end{cfa}
1239\end{tabular}
1240\lstMakeShortInline@%
1241\end{quote}
1242The only exception is bit field specification, which always appear to the right of the base type.
1243% Specifically, the character ©*© is used to indicate a pointer, square brackets ©[©\,©]© are used to represent an array or function return value, and parentheses ©()© are used to indicate a routine parameter.
1244However, unlike C, \CFA type declaration tokens are distributed across all variables in the declaration list.
1245For instance, variables ©x© and ©y© of type pointer to integer are defined in \CFA as follows:
1246\begin{quote}
1247\lstDeleteShortInline@%
1248\begin{tabular}{@{}l@{\hspace{3em}}l@{}}
1249\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c}{\textbf{C}}        \\
1250\begin{cfa}
1251`*` int x, y;
1252\end{cfa}
1253&
1254\begin{cfa}
1255int `*`x, `*`y;
1256\end{cfa}
1257\end{tabular}
1258\lstMakeShortInline@%
1259\end{quote}
1260The downside of this semantics is the need to separate regular and pointer declarations:
1261\begin{quote}
1262\lstDeleteShortInline@%
1263\begin{tabular}{@{}l@{\hspace{3em}}l@{}}
1264\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c}{\textbf{C}}        \\
1265\begin{cfa}
1266`*` int x;
1267int y;
1268\end{cfa}
1269&
1270\begin{cfa}
1271int `*`x, y;
1272
1273\end{cfa}
1274\end{tabular}
1275\lstMakeShortInline@%
1276\end{quote}
1277which is prescribing a safety benefit.
1278Other examples are:
1279\begin{quote}
1280\lstDeleteShortInline@%
1281\begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}}
1282\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{C}} \\
1283\begin{cfa}
1284[ 5 ] int z;
1285[ 5 ] * char w;
1286* [ 5 ] double v;
1287struct s {
1288        int f0:3;
1289        * int f1;
1290        [ 5 ] * int f2;
1291};
1292\end{cfa}
1293&
1294\begin{cfa}
1295int z[ 5 ];
1296char * w[ 5 ];
1297double (* v)[ 5 ];
1298struct s {
1299        int f0:3;
1300        int * f1;
1301        int * f2[ 5 ]
1302};
1303\end{cfa}
1304&
1305\begin{cfa}
1306// array of 5 integers
1307// array of 5 pointers to char
1308// pointer to array of 5 doubles
1309
1310// common bit field syntax
1311
1312
1313
1314\end{cfa}
1315\end{tabular}
1316\lstMakeShortInline@%
1317\end{quote}
1318
1319All type qualifiers, \eg ©const©, ©volatile©, etc., are used in the normal way with the new declarations and also appear left to right, \eg:
1320\begin{quote}
1321\lstDeleteShortInline@%
1322\begin{tabular}{@{}l@{\hspace{1em}}l@{\hspace{1em}}l@{}}
1323\multicolumn{1}{c@{\hspace{1em}}}{\textbf{\CFA}}        & \multicolumn{1}{c@{\hspace{1em}}}{\textbf{C}} \\
1324\begin{cfa}
1325const * const int x;
1326const * [ 5 ] const int y;
1327\end{cfa}
1328&
1329\begin{cfa}
1330int const * const x;
1331const int (* const y)[ 5 ]
1332\end{cfa}
1333&
1334\begin{cfa}
1335// const pointer to const integer
1336// const pointer to array of 5 const integers
1337\end{cfa}
1338\end{tabular}
1339\lstMakeShortInline@%
1340\end{quote}
1341All declaration qualifiers, \eg ©extern©, ©static©, etc., are used in the normal way with the new declarations but can only appear at the start of a \CFA routine declaration,\footnote{\label{StorageClassSpecifier}
1342The placement of a storage-class specifier other than at the beginning of the declaration specifiers in a declaration is an obsolescent feature.~\cite[\S~6.11.5(1)]{C11}} \eg:
1343\begin{quote}
1344\lstDeleteShortInline@%
1345\begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}}
1346\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{C}} \\
1347\begin{cfa}
1348extern [ 5 ] int x;
1349static * const int y;
1350\end{cfa}
1351&
1352\begin{cfa}
1353int extern x[ 5 ];
1354const int static * y;
1355\end{cfa}
1356&
1357\begin{cfa}
1358// externally visible array of 5 integers
1359// internally visible pointer to constant int
1360\end{cfa}
1361\end{tabular}
1362\lstMakeShortInline@%
1363\end{quote}
1364
1365The new declaration syntax can be used in other contexts where types are required, \eg casts and the pseudo-routine ©sizeof©:
1366\begin{quote}
1367\lstDeleteShortInline@%
1368\begin{tabular}{@{}l@{\hspace{3em}}l@{}}
1369\multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c}{\textbf{C}}        \\
1370\begin{cfa}
1371y = (`* int`)x;
1372i = sizeof(`[ 5 ] * int`);
1373\end{cfa}
1374&
1375\begin{cfa}
1376y = (`int *`)x;
1377i = sizeof(`int * [ 5 ]`);
1378\end{cfa}
1379\end{tabular}
1380\lstMakeShortInline@%
1381\end{quote}
1382
1383Finally, new \CFA declarations may appear together with C declarations in the same program block, but cannot be mixed within a specific declaration.
1384Therefore, a programmer has the option of either continuing to use traditional C declarations or take advantage of the new style.
1385Clearly, both styles need to be supported for some time due to existing C-style header-files, particularly for UNIX systems.
1386
1387
1388\subsection{References}
1389
1390All variables in C have an \emph{address}, a \emph{value}, and a \emph{type}; at the position in the program's memory denoted by the address, there exists a sequence of bits (the value), with the length and semantic meaning of this bit sequence defined by the type.
1391The C type system does not always track the relationship between a value and its address; a value that does not have a corresponding address is called a \emph{rvalue} (for ``right-hand value''), while a value that does have an address is called a \emph{lvalue} (for ``left-hand value''); in @int x; x = 42;@ the variable expression @x@ on the left-hand-side of the assignment is a lvalue, while the constant expression @42@ on the right-hand-side of the assignment is a rvalue.
1392Which address a value is located at is sometimes significant; the imperative programming paradigm of C relies on the mutation of values at specific addresses.
1393Within a lexical scope, lvalue exressions can be used in either their \emph{address interpretation} to determine where a mutated value should be stored or in their \emph{value interpretation} to refer to their stored value; in @x = y;@ in @{ int x, y = 7; x = y; }@, @x@ is used in its address interpretation, while y is used in its value interpretation.
1394Though this duality of interpretation is useful, C lacks a direct mechanism to pass lvalues between contexts, instead relying on \emph{pointer types} to serve a similar purpose.
1395In C, for any type @T@ there is a pointer type @T*@, the value of which is the address of a value of type @T@; a pointer rvalue can be explicitly \emph{dereferenced} to the pointed-to lvalue with the dereference operator @*?@, while the rvalue representing the address of a lvalue can be obtained with the address-of operator @&?@.
1396
1397\begin{cfa}
1398int x = 1, y = 2, * p1, * p2, ** p3;
1399p1 = &x;  $\C{// p1 points to x}$
1400p2 = &y;  $\C{// p2 points to y}$
1401p3 = &p1;  $\C{// p3 points to p1}$
1402*p2 = ((*p1 + *p2) * (**p3 - *p1)) / (**p3 - 15);
1403\end{cfa}
1404
1405Unfortunately, the dereference and address-of operators introduce a great deal of syntactic noise when dealing with pointed-to values rather than pointers, as well as the potential for subtle bugs.
1406For both brevity and clarity, it would be desirable to have the compiler figure out how to elide the dereference operators in a complex expression such as the assignment to @*p2@ above.
1407However, since C defines a number of forms of \emph{pointer arithmetic}, two similar expressions involving pointers to arithmetic types (\eg @*p1 + x@ and @p1 + x@) may each have well-defined but distinct semantics, introducing the possibility that a user programmer may write one when they mean the other, and precluding any simple algorithm for elision of dereference operators.
1408To solve these problems, \CFA introduces reference types @T&@; a @T&@ has exactly the same value as a @T*@, but where the @T*@ takes the address interpretation by default, a @T&@ takes the value interpretation by default, as below:
1409
1410\begin{cfa}
1411inx x = 1, y = 2, & r1, & r2, && r3;
1412&r1 = &x;  $\C{// r1 points to x}$
1413&r2 = &y;  $\C{// r2 points to y}$
1414&&r3 = &&r1;  $\C{// r3 points to r2}$
1415r2 = ((r1 + r2) * (r3 - r1)) / (r3 - 15);  $\C{// implicit dereferencing}$
1416\end{cfa}
1417
1418Except for auto-dereferencing by the compiler, this reference example is exactly the same as the previous pointer example.
1419Hence, a reference behaves like a variable name -- an lvalue expression which is interpreted as a value, but also has the type system track the address of that value.
1420One way to conceptualize a reference is via a rewrite rule, where the compiler inserts a dereference operator before the reference variable for each reference qualifier in the reference variable declaration, so the previous example implicitly acts like:
1421
1422\begin{cfa}
1423`*`r2 = ((`*`r1 + `*`r2) * (`**`r3 - `*`r1)) / (`**`r3 - 15);
1424\end{cfa}
1425
1426References in \CFA are similar to those in \CC, but with a couple important improvements, both of which can be seen in the example above.
1427Firstly, \CFA does not forbid references to references, unlike \CC.
1428This provides a much more orthogonal design for library implementors, obviating the need for workarounds such as @std::reference_wrapper@.
1429
1430Secondly, unlike the references in \CC which always point to a fixed address, \CFA references are rebindable.
1431This allows \CFA references to be default-initialized (\eg to a null pointer), and also to point to different addresses throughout their lifetime.
1432This rebinding is accomplished without adding any new syntax to \CFA, but simply by extending the existing semantics of the address-of operator in C.
1433In C, the address of a lvalue is always a rvalue, as in general that address is not stored anywhere in memory, and does not itself have an address.
1434In \CFA, the address of a @T&@ is a lvalue @T*@, as the address of the underlying @T@ is stored in the reference, and can thus be mutated there.
1435The result of this rule is that any reference can be rebound using the existing pointer assignment semantics by assigning a compatible pointer into the address of the reference, \eg @&r1 = &x;@ above.
1436This rebinding can occur to an arbitrary depth of reference nesting; loosely speaking, nested address-of operators will produce an lvalue nested pointer up to as deep as the reference they're applied to.
1437These explicit address-of operators can be thought of as ``cancelling out'' the implicit dereference operators, \eg @(&`*`)r1 = &x@ or @(&(&`*`)`*`)r3 = &(&`*`)r1@ or even @(&`*`)r2 = (&`*`)`*`r3@ for @&r2 = &r3@.
1438More precisely:
1439\begin{itemize}
1440        \item
1441        if @R@ is an rvalue of type {@T &@$_1 \cdots$@ &@$_r$} where $r \ge 1$ references (@&@ symbols) than @&R@ has type {@T `*`&@$_{\color{red}2} \cdots$@ &@$_{\color{red}r}$}, \\ \ie @T@ pointer with $r-1$ references (@&@ symbols).
1442       
1443        \item
1444        if @L@ is an lvalue of type {@T &@$_1 \cdots$@ &@$_l$} where $l \ge 0$ references (@&@ symbols) then @&L@ has type {@T `*`&@$_{\color{red}1} \cdots$@ &@$_{\color{red}l}$}, \\ \ie @T@ pointer with $l$ references (@&@ symbols).
1445\end{itemize}
1446
1447Since pointers and references share the same internal representation, code using either is equally performant; in fact the \CFA compiler converts references to pointers internally, and the choice between them in user code can be made based solely on convenience.
1448By analogy to pointers, \CFA references also allow cv-qualifiers:
1449
1450\begin{cfa}
1451const int cx = 5;               $\C{// cannot change cx}$
1452const int & cr = cx;    $\C{// cannot change cr's referred value}$
1453&cr = &cx;                              $\C{// rebinding cr allowed}$
1454cr = 7;                                 $\C{// ERROR, cannot change cr}$
1455int & const rc = x;             $\C{// must be initialized, like in \CC}$
1456&rc = &x;                               $\C{// ERROR, cannot rebind rc}$
1457rc = 7;                                 $\C{// x now equal to 7}$
1458\end{cfa}
1459
1460Given that a reference is meant to represent a lvalue, \CFA provides some syntactic shortcuts when initializing references.
1461There are three initialization contexts in \CFA: declaration initialization, argument/parameter binding, and return/temporary binding.
1462In each of these contexts, the address-of operator on the target lvalue may (in fact, must) be elided.
1463The syntactic motivation for this is clearest when considering overloaded operator-assignment, \eg @int ?+=?(int &, int)@; given @int x, y@, the expected call syntax is @x += y@, not @&x += y@.
1464
1465More generally, this initialization of references from lvalues rather than pointers is an instance of a ``lvalue-to-reference'' conversion rather than an elision of the address-of operator; this conversion can actually be used in any context in \CFA an implicit conversion would be allowed.
1466Similarly, use of a the value pointed to by a reference in an rvalue context can be thought of as a ``reference-to-rvalue'' conversion, and \CFA also includes a qualifier-adding ``reference-to-reference'' conversion, analagous to the @T *@ to @const T *@ conversion in standard C.
1467The final reference conversion included in \CFA is ``rvalue-to-reference'' conversion, implemented by means of an implicit temporary.
1468When an rvalue is used to initialize a reference, it is instead used to initialize a hidden temporary value with the same lexical scope as the reference, and the reference is initialized to the address of this temporary.
1469This allows complex values to be succinctly and efficiently passed to functions, without the syntactic overhead of explicit definition of a temporary variable or the runtime cost of pass-by-value.
1470\CC allows a similar binding, but only for @const@ references; the more general semantics of \CFA are an attempt to avoid the \emph{const hell} problem, in which addition of a @const@ qualifier to one reference requires a cascading chain of added qualifiers.
1471
1472\subsection{Constructors and Destructors}
1473
1474One of the strengths of C is the control over memory management it gives programmers, allowing resource release to be more consistent and precisely timed than is possible with garbage-collected memory management.
1475However, this manual approach to memory management is often verbose, and it is useful to manage resources other than memory (\eg file handles) using the same mechanism as memory.
1476\CC is well-known for an approach to manual memory management that addresses both these issues, Resource Aquisition Is Initialization (RAII), implemented by means of special \emph{constructor} and \emph{destructor} functions; we have implemented a similar feature in \CFA.
1477While RAII is a common feature of object-oriented programming languages, its inclusion in \CFA does not violate the design principle that \CFA retain the same procedural paradigm as C.
1478In particular, \CFA does not implement class-based encapsulation: neither the constructor nor any other function has privileged access to the implementation details of a type, except through the translation-unit-scope method of opaque structs provided by C.
1479
1480In \CFA, a constructor is a function named @?{}@, while a destructor is a function named @^?{}@; like other \CFA operators, these names represent the syntax used to call the constructor or destructor, \eg @x{ ... };@ or @^x{};@.
1481Every constructor and destructor must have a return type of @void@, and its first parameter must have a reference type whose base type is the type of the object the function constructs or destructs.
1482This first parameter is informally called the @this@ parameter, as in many object-oriented languages, though a programmer may give it an arbitrary name.
1483Destructors must have exactly one parameter, while constructors allow passing of zero or more additional arguments along with the @this@ parameter.
1484
1485\begin{cfa}
1486struct Array {
1487        int * data;
1488        int len;
1489};
1490
1491void ?{}( Array& arr ) {
1492        arr.len = 10;
1493        arr.data = calloc( arr.len, sizeof(int) );
1494}
1495
1496void ^?{}( Array& arr ) {
1497        free( arr.data );
1498}
1499
1500{
1501        Array x;
1502        `?{}(x);`       $\C{// implicitly compiler-generated}$
1503        // ... use x
1504        `^?{}(x);`      $\C{// implicitly compiler-generated}$
1505}
1506\end{cfa}
1507
1508In the example above, a \emph{default constructor} (\ie one with no parameters besides the @this@ parameter) and destructor are defined for the @Array@ struct, a dynamic array of @int@.
1509@Array@ is an example of a \emph{managed type} in \CFA, a type with a non-trivial constructor or destructor, or with a field of a managed type.
1510As in the example, all instances of managed types are implicitly constructed upon allocation, and destructed upon deallocation; this ensures proper initialization and cleanup of resources contained in managed types, in this case the @data@ array on the heap.
1511The exact details of the placement of these implicit constructor and destructor calls are omitted here for brevity, the interested reader should consult \cite{Schluntz17}.
1512
1513Constructor calls are intended to seamlessly integrate with existing C initialization syntax, providing a simple and familiar syntax to veteran C programmers and allowing constructor calls to be inserted into legacy C code with minimal code changes.
1514As such, \CFA also provides syntax for \emph{copy initialization} and \emph{initialization parameters}:
1515
1516\begin{cfa}
1517void ?{}( Array& arr, Array other );
1518
1519void ?{}( Array& arr, int size, int fill );
1520
1521Array y = { 20, 0xDEADBEEF }, z = y;
1522\end{cfa}
1523
1524Copy constructors have exactly two parameters, the second of which has the same type as the base type of the @this@ parameter; appropriate care is taken in the implementation to avoid recursive calls to the copy constructor when initializing this second parameter.
1525Other constructor calls look just like C initializers, except rather than using field-by-field initialization (as in C), an initialization which matches a defined constructor will call the constructor instead.
1526
1527In addition to initialization syntax, \CFA provides two ways to explicitly call constructors and destructors.
1528Explicit calls to constructors double as a placement syntax, useful for construction of member fields in user-defined constructors and reuse of large storage allocations.
1529While the existing function-call syntax works for explicit calls to constructors and destructors, \CFA also provides a more concise \emph{operator syntax} for both:
1530
1531\begin{cfa}
1532Array a, b;
1533a{};                            $\C{// default construct}$
1534b{ a };                         $\C{// copy construct}$
1535^a{};                           $\C{// destruct}$
1536a{ 5, 0xFFFFFFFF };     $\C{// explicit constructor call}$
1537\end{cfa}
1538
1539To provide a uniform type interface for @otype@ polymorphism, the \CFA compiler automatically generates a default constructor, copy constructor, assignment operator, and destructor for all types.
1540These default functions can be overridden by user-generated versions of them.
1541For compatibility with the standard behaviour of C, the default constructor and destructor for all basic, pointer, and reference types do nothing, while the copy constructor and assignment operator are bitwise copies; if default zero-initialization is desired, the default constructors can be overridden.
1542For user-generated types, the four functions are also automatically generated.
1543@enum@ types are handled the same as their underlying integral type, and unions are also bitwise copied and no-op initialized and destructed.
1544For compatibility with C, a copy constructor from the first union member type is also defined.
1545For @struct@ types, each of the four functions are implicitly defined to call their corresponding functions on each member of the struct.
1546To better simulate the behaviour of C initializers, a set of \emph{field constructors} is also generated for structures.
1547A constructor is generated for each non-empty prefix of a structure's member-list which copy-constructs the members passed as parameters and default-constructs the remaining members.
1548To allow users to limit the set of constructors available for a type, when a user declares any constructor or destructor, the corresponding generated function and all field constructors for that type are hidden from expression resolution; similarly, the generated default constructor is hidden upon declaration of any constructor.
1549These semantics closely mirror the rule for implicit declaration of constructors in \CC\cite[p.~186]{ANSI98:C++}.
1550
1551In rare situations user programmers may not wish to have constructors and destructors called; in these cases, \CFA provides an ``escape hatch'' to not call them.
1552If a variable is initialized using the syntax \lstinline|S x @= {}| it will be an \emph{unmanaged object}, and will not have constructors or destructors called.
1553Any C initializer can be the right-hand side of an \lstinline|@=| initializer, \eg  \lstinline|Array a @= { 0, 0x0 }|, with the usual C initialization semantics.
1554In addition to the expressive power, \lstinline|@=| provides a simple path for migrating legacy C code to \CFA, by providing a mechanism to incrementally convert initializers; the \CFA design team decided to introduce a new syntax for this escape hatch because we believe that our RAII implementation will handle the vast majority of code in a desirable way, and we wished to maintain familiar syntax for this common case.
1555
1556\subsection{Default Parameters}
1557
1558
1559\section{Literals}
1560
1561C already includes limited polymorphism for literals -- @0@ can be either an integer or a pointer literal, depending on context, while the syntactic forms of literals of the various integer and floating-point types are very similar, differing from each other only in suffix.
1562In keeping with the general \CFA approach of adding features while respecting ``the C way'' of doing things, we have extended both C's polymorphic zero and typed literal syntax to interoperate with user-defined types, while maintaining a backwards-compatible semantics.
1563
1564\subsection{0/1}
1565
1566In C, @0@ has the special property that it is the only ``false'' value; by the standard, any value which compares equal to @0@ is false, while any value that compares unequal to @0@ is true.
1567As such, an expression @x@ in any boolean context (such as the condition of an @if@ or @while@ statement, or the arguments to an @&&@, @||@, or ternary operator) can be rewritten as @x != 0@ without changing its semantics.
1568The operator overloading feature of \CFA provides a natural means to implement this truth value comparison for arbitrary types, but the C type system is not precise enough to distinguish an equality comparison with @0@ from an equality comparison with an arbitrary integer or pointer.
1569To provide this precision, \CFA introduces a new type @zero_t@ as type type of literal @0@ (somewhat analagous to @nullptr_t@ and @nullptr@ in \CCeleven); @zero_t@ can only take the value @0@, but has implicit conversions to the integer and pointer types so that standard C code involving @0@ continues to work properly.
1570With this addition, the \CFA compiler rewrites @if (x)@ and similar expressions to @if (x != 0)@ or the appropriate analogue, and any type @T@ can be made ``truthy'' by defining a single function @int ?!=?(T, zero_t)@.
1571
1572\TODO{Clean up and integrate this paragraph} As well, restricted constant overloading is allowed for the values @0@ and @1@, which have special status in C, \eg the value @0@ is both an integer and a pointer literal, so its meaning depends on context.
1573In addition, several operations are defined in terms values @0@ and @1@, \eg:
1574\begin{lstlisting}
1575int x;
1576if (x) x++                                                                      $\C{// if (x != 0) x += 1;}$
1577\end{lstlisting}
1578Every @if@ and iteration statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
1579Due to these rewrite rules, the values @0@ and @1@ have the types @zero_t@ and @one_t@ in \CFA, which allows overloading various operations for new types that seamlessly connect to all special @0@ and @1@ contexts.
1580The types @zero_t@ and @one_t@ have special built in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal.
1581
1582
1583\subsection{Units}
1584
1585Alternative call syntax (literal argument before routine name) to convert basic literals into user literals.
1586
1587{\lstset{language=CFA,deletedelim=**[is][]{`}{`},moredelim=**[is][\color{red}]{@}{@}}
1588\begin{cfa}
1589struct Weight { double stones; };
1590
1591void ?{}( Weight & w ) { w.stones = 0; } $\C{// operations}$
1592void ?{}( Weight & w, double w ) { w.stones = w; }
1593Weight ?+?( Weight l, Weight r ) { return (Weight){ l.stones + r.stones }; }
1594
1595Weight @?`st@( double w ) { return (Weight){ w }; } $\C{// backquote for units}$
1596Weight @?`lb@( double w ) { return (Weight){ w / 14.0 }; }
1597Weight @?`kg@( double w ) { return (Weight) { w * 0.1575}; }
1598
1599int main() {
1600        Weight w, hw = { 14 };                  $\C{// 14 stone}$
1601        w = 11@`st@ + 1@`lb@;
1602        w = 70.3@`kg@;
1603        w = 155@`lb@;
1604        w = 0x_9b_u@`lb@;                               $\C{// hexadecimal unsigned weight (155)}$
1605        w = 0_233@`lb@;                                 $\C{// octal weight (155)}$
1606        w = 5@`st@ + 8@`kg@ + 25@`lb@ + hw;
1607}
1608\end{cfa}
1609}%
1610
1611\section{Evaluation}
1612\label{sec:eval}
1613
1614Though \CFA provides significant added functionality over C, these features have a low runtime penalty.
1615In fact, \CFA's features for generic programming can enable faster runtime execution than idiomatic @void *@-based C code.
1616This claim is demonstrated through a set of generic-code-based micro-benchmarks in C, \CFA, and \CC (see stack implementations in Appendix~\ref{sec:BenchmarkStackImplementation}).
1617Since all these languages share a subset essentially comprising standard C, maximal-performance benchmarks would show little runtime variance, other than in length and clarity of source code.
1618A more illustrative benchmark measures the costs of idiomatic usage of each language's features.
1619Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked-list, a generic pair-data-structure, and a variadic @print@ routine similar to that in Section~\ref{sec:variadic-tuples}.
1620The benchmark test is similar for C and \CC.
1621The experiment uses element types @int@ and @pair(_Bool, char)@, and pushes $N=40M$ elements on a generic stack, copies the stack, clears one of the stacks, finds the maximum value in the other stack, and prints $N/2$ (to reduce graph height) constants.
1622
1623\begin{figure}
1624\begin{lstlisting}[xleftmargin=3\parindentlnth,aboveskip=0pt,belowskip=0pt]
1625int main( int argc, char * argv[] ) {
1626        FILE * out = fopen( "cfa-out.txt", "w" );
1627        int maxi = 0, vali = 42;
1628        stack(int) si, ti;
1629
1630        REPEAT_TIMED( "push_int", N, push( &si, vali ); )
1631        TIMED( "copy_int", ti = si; )
1632        TIMED( "clear_int", clear( &si ); )
1633        REPEAT_TIMED( "pop_int", N,
1634                int xi = pop( &ti ); if ( xi > maxi ) { maxi = xi; } )
1635        REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); )
1636
1637        pair(_Bool, char) maxp = { (_Bool)0, '\0' }, valp = { (_Bool)1, 'a' };
1638        stack(pair(_Bool, char)) sp, tp;
1639
1640        REPEAT_TIMED( "push_pair", N, push( &sp, valp ); )
1641        TIMED( "copy_pair", tp = sp; )
1642        TIMED( "clear_pair", clear( &sp ); )
1643        REPEAT_TIMED( "pop_pair", N,
1644                pair(_Bool, char) xp = pop( &tp ); if ( xp > maxp ) { maxp = xp; } )
1645        REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); )
1646        fclose(out);
1647}
1648\end{lstlisting}
1649\caption{\protect\CFA Benchmark Test}
1650\label{fig:BenchmarkTest}
1651\end{figure}
1652
1653The structure of each benchmark implemented is: C with @void *@-based polymorphism, \CFA with the presented features, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV.
1654The \CCV variant illustrates an alternative object-oriented idiom where all objects inherit from a base @object@ class, mimicking a Java-like interface;
1655hence runtime checks are necessary to safely down-cast objects.
1656The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, while C and \CCV lack such a capability and instead must store generic objects via pointers to separately-allocated objects.
1657For the print benchmark, idiomatic printing is used: the C and \CFA variants used @stdio.h@, while the \CC and \CCV variants used @iostream@; preliminary tests show this distinction has negligible runtime impact.
1658Note, the C benchmark uses unchecked casts as there is no runtime mechanism to perform such checks, while \CFA and \CC provide type-safety statically.
1659
1660Figure~\ref{fig:eval} and Table~\ref{tab:eval} show the results of running the benchmark in Figure~\ref{fig:BenchmarkTest} and its C, \CC, and \CCV equivalents.
1661The graph plots the median of 5 consecutive runs of each program, with an initial warm-up run omitted.
1662All code is compiled at \texttt{-O2} by GCC or G++ 6.2.0, with all \CC code compiled as \CCfourteen.
1663The benchmarks are run on an Ubuntu 16.04 workstation with 16 GB of RAM and a 6-core AMD FX-6300 CPU with 3.5 GHz maximum clock frequency.
1664
1665\begin{figure}
1666\centering
1667\input{timing}
1668\caption{Benchmark Timing Results (smaller is better)}
1669\label{fig:eval}
1670\end{figure}
1671
1672\begin{table}
1673\caption{Properties of benchmark code}
1674\label{tab:eval}
1675\newcommand{\CT}[1]{\multicolumn{1}{c}{#1}}
1676\begin{tabular}{rrrrr}
1677                                                                        & \CT{C}        & \CT{\CFA}     & \CT{\CC}      & \CT{\CCV}             \\ \hline
1678maximum memory usage (MB)                       & 10001         & 2502          & 2503          & 11253                 \\
1679source code size (lines)                        & 247           & 222           & 165           & 339                   \\
1680redundant type annotations (lines)      & 39            & 2                     & 2                     & 15                    \\
1681binary size (KB)                                        & 14            & 229           & 18            & 38                    \\
1682\end{tabular}
1683\end{table}
1684
1685The C and \CCV variants are generally the slowest with the largest memory footprint, because of their less-efficient memory layout and the pointer-indirection necessary to implement generic types;
1686this inefficiency is exacerbated by the second level of generic types in the pair-based benchmarks.
1687By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair of @_Bool@ and @char@ because the storage layout is equivalent, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead.
1688\CCV is slower than C largely due to the cost of runtime type-checking of down-casts (implemented with @dynamic_cast@);
1689There are two outliers in the graph for \CFA: all prints and pop of @pair@.
1690Both of these cases result from the complexity of the C-generated polymorphic code, so that the GCC compiler is unable to optimize some dead code and condense nested calls.
1691A compiler designed for \CFA could easily perform these optimizations.
1692Finally, the binary size for \CFA is larger because of static linking with the \CFA libraries.
1693
1694\CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, though it should be noted that \CFA and \CC have pre-written data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 73 and 54 lines, respectively.
1695On the other hand, C does not have a generic collections-library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers.
1696\CCV does not use the \CC standard template library by construction, and in fact includes the definition of @object@ and wrapper classes for @bool@, @char@, @int@, and @const char *@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library;
1697with their omission, the \CCV line count is similar to C.
1698We justify the given line count by noting that many object-oriented languages do not allow implementing new interfaces on library types without subclassing or wrapper types, which may be similarly verbose.
1699
1700Raw line-count, however, is a fairly rough measure of code complexity;
1701another important factor is how much type information the programmer must manually specify, especially where that information is not checked by the compiler.
1702Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs, and is much less common in \CFA than C, with its manually specified function pointers arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts (\eg @object@ to @integer@ when popping a stack, or @object@ to @printable@ when printing the elements of a @pair@).
1703To quantify this, the ``redundant type annotations'' line in Table~\ref{tab:eval} counts the number of lines on which the type of a known variable is re-specified, either as a format specifier, explicit downcast, type-specific function, or by name in a @sizeof@, struct literal, or @new@ expression.
1704The \CC benchmark uses two redundant type annotations to create a new stack nodes, while the C and \CCV benchmarks have several such annotations spread throughout their code.
1705The two instances in which the \CFA benchmark still uses redundant type specifiers are to cast the result of a polymorphic @malloc@ call (the @sizeof@ argument is inferred by the compiler).
1706These uses are similar to the @new@ expressions in \CC, though the \CFA compiler's type resolver should shortly render even these type casts superfluous.
1707
1708
1709\section{Related Work}
1710
1711
1712\subsection{Polymorphism}
1713
1714\CC is the most similar language to \CFA;
1715both are extensions to C with source and runtime backwards compatibility.
1716The fundamental difference is in their engineering approach to C compatibility and programmer expectation.
1717While \CC provides good backwards compatibility with C, it has a steep learning curve for many of its extensions.
1718For example, polymorphism is provided via three disjoint mechanisms: overloading, inheritance, and templates.
1719The overloading is restricted because resolution does not use the return type, inheritance requires learning object-oriented programming and coping with a restricted nominal-inheritance hierarchy, templates cannot be separately compiled resulting in compilation/code bloat and poor error messages, and determining how these mechanisms interact and which to use is confusing.
1720In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate-compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm.
1721The key mechanism to support separate compilation is \CFA's \emph{explicit} use of assumed properties for a type.
1722Until \CC concepts~\cite{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors during template expansion;
1723furthermore, \CC concepts are restricted to template polymorphism.
1724
1725Cyclone~\cite{Grossman06} also provides capabilities for polymorphic functions and existential types, similar to \CFA's @forall@ functions and generic types.
1726Cyclone existential types can include function pointers in a construct similar to a virtual function-table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process.
1727Furthermore, Cyclone's polymorphic functions and types are restricted to abstraction over types with the same layout and calling convention as @void *@, \ie only pointer types and @int@.
1728In \CFA terms, all Cyclone polymorphism must be dtype-static.
1729While the Cyclone design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, it is more restrictive than \CFA's general model.
1730Smith and Volpano~\cite{Smith98} present Polymorphic C, an ML dialect with polymorphic functions, C-like syntax, and pointer types; it lacks many of C's features, however, most notably structure types, and so is not a practical C replacement.
1731
1732Objective-C~\cite{obj-c-book} is an industrially successful extension to C.
1733However, Objective-C is a radical departure from C, using an object-oriented model with message-passing.
1734Objective-C did not support type-checked generics until recently \cite{xcode7}, historically using less-efficient runtime checking of object types.
1735The GObject~\cite{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage-collection to C;
1736these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference-counting.
1737Vala~\cite{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases.
1738Java~\cite{Java8} included generic types in Java~5, which are type-checked at compilation and type-erased at runtime, similar to \CFA's.
1739However, in Java, each object carries its own table of method pointers, while \CFA passes the method pointers separately to maintain a C-compatible layout.
1740Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens.
1741
1742D~\cite{D}, Go, and Rust~\cite{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust.
1743However, each language represents a significant departure from C in terms of language model, and none has the same level of compatibility with C as \CFA.
1744D and Go are garbage-collected languages, imposing the associated runtime overhead.
1745The necessity of accounting for data transfer between managed runtimes and the unmanaged C runtime complicates foreign-function interfaces to C.
1746Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more.
1747D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime more interoperable with C.
1748Rust also possesses much more powerful abstraction capabilities for writing generic code than Go.
1749On the other hand, Rust's borrow-checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style.
1750\CFA, with its more modest safety features, allows direct ports of C code while maintaining the idiomatic style of the original source.
1751
1752
1753\subsection{Tuples/Variadics}
1754
1755Many programming languages have some form of tuple construct and/or variadic functions, \eg SETL, C, KW-C, \CC, D, Go, Java, ML, and Scala.
1756SETL~\cite{SETL} is a high-level mathematical programming language, with tuples being one of the primary data types.
1757Tuples in SETL allow subscripting, dynamic expansion, and multiple assignment.
1758C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types, so the mechanism is type unsafe.
1759KW-C~\cite{Buhr94a}, a predecessor of \CFA, introduced tuples to C as an extension of the C syntax, taking much of its inspiration from SETL.
1760The main contributions of that work were adding MRVF, tuple mass and multiple assignment, and record-field access.
1761\CCeleven introduced @std::tuple@ as a library variadic template structure.
1762Tuples are a generalization of @std::pair@, in that they allow for arbitrary length, fixed-size aggregation of heterogeneous values.
1763Operations include @std::get<N>@ to extract values, @std::tie@ to create a tuple of references used for assignment, and lexicographic comparisons.
1764\CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate pre-declaring variables and use of @std::tie@ for binding the results.
1765This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must be documented with some other mechanism.
1766Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables.
1767Like \CC, D provides tuples through a library variadic-template structure.
1768Go does not have tuples but supports MRVF.
1769Java's variadic functions appear similar to C's but are type-safe using homogeneous arrays, which are less useful than \CFA's heterogeneously-typed variadic functions.
1770Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML~\cite{sml} and~\cite{Scala}, which decompose tuples using pattern matching.
1771
1772
1773\section{Conclusion and Future Work}
1774
1775The goal of \CFA is to provide an evolutionary pathway for large C development-environments to be more productive and safer, while respecting the talent and skill of C programmers.
1776While other programming languages purport to be a better C, they are in fact new and interesting languages in their own right, but not C extensions.
1777The purpose of this paper is to introduce \CFA, and showcase language features that illustrate the \CFA type-system and approaches taken to achieve the goal of evolutionary C extension.
1778The contributions are a powerful type-system using parametric polymorphism and overloading, generic types, and tuples, which all have complex interactions.
1779The work is a challenging design, engineering, and implementation exercise.
1780On the surface, the project may appear as a rehash of similar mechanisms in \CC.
1781However, every \CFA feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supporting separate compilation.
1782All of these new features are being used by the \CFA development-team to build the \CFA runtime-system.
1783Finally, we demonstrate that \CFA performance for some idiomatic cases is better than C and close to \CC, showing the design is practically applicable.
1784
1785There is ongoing work on a wide range of \CFA feature extensions, including arrays with size, exceptions, concurrent primitives, modules, and user-defined conversions.
1786(While all examples in the paper compile and run, a public beta-release of \CFA will take another 8--12 months to finalize these additional extensions.)
1787In addition, there are interesting future directions for the polymorphism design.
1788Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions.
1789\CFA polymorphic functions use dynamic virtual-dispatch;
1790the runtime overhead of this approach is low, but not as low as inlining, and it may be beneficial to provide a mechanism for performance-sensitive code.
1791Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template-specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function-definitions to instantiate a specialized version for some set of types (\CC template specialization).
1792These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code-bloat.
1793In general, we believe separate compilation, producing smaller code, works well with loaded hardware-caches, which may offset the benefit of larger inlined-code.
1794
1795
1796\section{Acknowledgments}
1797
1798The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, and Thierry Delisle on the features described in this paper, and thank Magnus Madsen and the three anonymous reviewers for valuable feedback.
1799%This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}, and Aaron Moss and Peter Buhr are funded by the \grantsponsor{Natural Sciences and Engineering Research Council} of Canada.
1800% the first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship.
1801
1802
1803\bibliographystyle{plain}
1804\bibliography{pl}
1805
1806
1807\appendix
1808
1809\section{Benchmark Stack Implementation}
1810\label{sec:BenchmarkStackImplementation}
1811
1812\lstset{basicstyle=\linespread{0.9}\sf\small}
1813
1814Throughout, @/***/@ designates a counted redundant type annotation.
1815
1816\smallskip\noindent
1817\CFA
1818\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
1819forall(otype T) struct stack_node {
1820        T value;
1821        stack_node(T) * next;
1822};
1823forall(otype T) void ?{}(stack(T) * s) { (&s->head){ 0 }; }
1824forall(otype T) void ?{}(stack(T) * s, stack(T) t) {
1825        stack_node(T) ** crnt = &s->head;
1826        for ( stack_node(T) * next = t.head; next; next = next->next ) {
1827                *crnt = ((stack_node(T) *)malloc()){ next->value }; /***/
1828                stack_node(T) * acrnt = *crnt;
1829                crnt = &acrnt->next;
1830        }
1831        *crnt = 0;
1832}
1833forall(otype T) stack(T) ?=?(stack(T) * s, stack(T) t) {
1834        if ( s->head == t.head ) return *s;
1835        clear(s);
1836        s{ t };
1837        return *s;
1838}
1839forall(otype T) void ^?{}(stack(T) * s) { clear(s); }
1840forall(otype T) _Bool empty(const stack(T) * s) { return s->head == 0; }
1841forall(otype T) void push(stack(T) * s, T value) {
1842        s->head = ((stack_node(T) *)malloc()){ value, s->head }; /***/
1843}
1844forall(otype T) T pop(stack(T) * s) {
1845        stack_node(T) * n = s->head;
1846        s->head = n->next;
1847        T x = n->value;
1848        ^n{};
1849        free(n);
1850        return x;
1851}
1852forall(otype T) void clear(stack(T) * s) {
1853        for ( stack_node(T) * next = s->head; next; ) {
1854                stack_node(T) * crnt = next;
1855                next = crnt->next;
1856                delete(crnt);
1857        }
1858        s->head = 0;
1859}
1860\end{lstlisting}
1861
1862\medskip\noindent
1863\CC
1864\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
1865template<typename T> class stack {
1866        struct node {
1867                T value;
1868                node * next;
1869                node( const T & v, node * n = nullptr ) : value(v), next(n) {}
1870        };
1871        node * head;
1872        void copy(const stack<T>& o) {
1873                node ** crnt = &head;
1874                for ( node * next = o.head;; next; next = next->next ) {
1875                        *crnt = new node{ next->value }; /***/
1876                        crnt = &(*crnt)->next;
1877                }
1878                *crnt = nullptr;
1879        }
1880  public:
1881        stack() : head(nullptr) {}
1882        stack(const stack<T>& o) { copy(o); }
1883        stack(stack<T> && o) : head(o.head) { o.head = nullptr; }
1884        ~stack() { clear(); }
1885        stack & operator= (const stack<T>& o) {
1886                if ( this == &o ) return *this;
1887                clear();
1888                copy(o);
1889                return *this;
1890        }
1891        stack & operator= (stack<T> && o) {
1892                if ( this == &o ) return *this;
1893                head = o.head;
1894                o.head = nullptr;
1895                return *this;
1896        }
1897        bool empty() const { return head == nullptr; }
1898        void push(const T & value) { head = new node{ value, head };  /***/ }
1899        T pop() {
1900                node * n = head;
1901                head = n->next;
1902                T x = std::move(n->value);
1903                delete n;
1904                return x;
1905        }
1906        void clear() {
1907                for ( node * next = head; next; ) {
1908                        node * crnt = next;
1909                        next = crnt->next;
1910                        delete crnt;
1911                }
1912                head = nullptr;
1913        }
1914};
1915\end{lstlisting}
1916
1917\medskip\noindent
1918C
1919\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
1920struct stack_node {
1921        void * value;
1922        struct stack_node * next;
1923};
1924struct stack new_stack() { return (struct stack){ NULL }; /***/ }
1925void copy_stack(struct stack * s, const struct stack * t, void * (*copy)(const void *)) {
1926        struct stack_node ** crnt = &s->head;
1927        for ( struct stack_node * next = t->head; next; next = next->next ) {
1928                *crnt = malloc(sizeof(struct stack_node)); /***/
1929                **crnt = (struct stack_node){ copy(next->value) }; /***/
1930                crnt = &(*crnt)->next;
1931        }
1932        *crnt = 0;
1933}
1934_Bool stack_empty(const struct stack * s) { return s->head == NULL; }
1935void push_stack(struct stack * s, void * value) {
1936        struct stack_node * n = malloc(sizeof(struct stack_node)); /***/
1937        *n = (struct stack_node){ value, s->head }; /***/
1938        s->head = n;
1939}
1940void * pop_stack(struct stack * s) {
1941        struct stack_node * n = s->head;
1942        s->head = n->next;
1943        void * x = n->value;
1944        free(n);
1945        return x;
1946}
1947void clear_stack(struct stack * s, void (*free_el)(void *)) {
1948        for ( struct stack_node * next = s->head; next; ) {
1949                struct stack_node * crnt = next;
1950                next = crnt->next;
1951                free_el(crnt->value);
1952                free(crnt);
1953        }
1954        s->head = NULL;
1955}
1956\end{lstlisting}
1957
1958\medskip\noindent
1959\CCV
1960\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
1961stack::node::node( const object & v, node * n ) : value( v.new_copy() ), next( n ) {}
1962void stack::copy(const stack & o) {
1963        node ** crnt = &head;
1964        for ( node * next = o.head; next; next = next->next ) {
1965                *crnt = new node{ *next->value };
1966                crnt = &(*crnt)->next;
1967        }
1968        *crnt = nullptr;
1969}
1970stack::stack() : head(nullptr) {}
1971stack::stack(const stack & o) { copy(o); }
1972stack::stack(stack && o) : head(o.head) { o.head = nullptr; }
1973stack::~stack() { clear(); }
1974stack & stack::operator= (const stack & o) {
1975        if ( this == &o ) return *this;
1976        clear();
1977        copy(o);
1978        return *this;
1979}
1980stack & stack::operator= (stack && o) {
1981        if ( this == &o ) return *this;
1982        head = o.head;
1983        o.head = nullptr;
1984        return *this;
1985}
1986bool stack::empty() const { return head == nullptr; }
1987void stack::push(const object & value) { head = new node{ value, head }; /***/ }
1988ptr<object> stack::pop() {
1989        node * n = head;
1990        head = n->next;
1991        ptr<object> x = std::move(n->value);
1992        delete n;
1993        return x;
1994}
1995void stack::clear() {
1996        for ( node * next = head; next; ) {
1997                node * crnt = next;
1998                next = crnt->next;
1999                delete crnt;
2000        }
2001        head = nullptr;
2002}
2003\end{lstlisting}
2004
2005
2006\begin{comment}
2007
2008\subsubsection{bench.h}
2009(\texttt{bench.hpp} is similar.)
2010
2011\lstinputlisting{evaluation/bench.h}
2012
2013\subsection{C}
2014
2015\subsubsection{c-stack.h} ~
2016
2017\lstinputlisting{evaluation/c-stack.h}
2018
2019\subsubsection{c-stack.c} ~
2020
2021\lstinputlisting{evaluation/c-stack.c}
2022
2023\subsubsection{c-pair.h} ~
2024
2025\lstinputlisting{evaluation/c-pair.h}
2026
2027\subsubsection{c-pair.c} ~
2028
2029\lstinputlisting{evaluation/c-pair.c}
2030
2031\subsubsection{c-print.h} ~
2032
2033\lstinputlisting{evaluation/c-print.h}
2034
2035\subsubsection{c-print.c} ~
2036
2037\lstinputlisting{evaluation/c-print.c}
2038
2039\subsubsection{c-bench.c} ~
2040
2041\lstinputlisting{evaluation/c-bench.c}
2042
2043\subsection{\CFA}
2044
2045\subsubsection{cfa-stack.h} ~
2046
2047\lstinputlisting{evaluation/cfa-stack.h}
2048
2049\subsubsection{cfa-stack.c} ~
2050
2051\lstinputlisting{evaluation/cfa-stack.c}
2052
2053\subsubsection{cfa-print.h} ~
2054
2055\lstinputlisting{evaluation/cfa-print.h}
2056
2057\subsubsection{cfa-print.c} ~
2058
2059\lstinputlisting{evaluation/cfa-print.c}
2060
2061\subsubsection{cfa-bench.c} ~
2062
2063\lstinputlisting{evaluation/cfa-bench.c}
2064
2065\subsection{\CC}
2066
2067\subsubsection{cpp-stack.hpp} ~
2068
2069\lstinputlisting[language=c++]{evaluation/cpp-stack.hpp}
2070
2071\subsubsection{cpp-print.hpp} ~
2072
2073\lstinputlisting[language=c++]{evaluation/cpp-print.hpp}
2074
2075\subsubsection{cpp-bench.cpp} ~
2076
2077\lstinputlisting[language=c++]{evaluation/cpp-bench.cpp}
2078
2079\subsection{\CCV}
2080
2081\subsubsection{object.hpp} ~
2082
2083\lstinputlisting[language=c++]{evaluation/object.hpp}
2084
2085\subsubsection{cpp-vstack.hpp} ~
2086
2087\lstinputlisting[language=c++]{evaluation/cpp-vstack.hpp}
2088
2089\subsubsection{cpp-vstack.cpp} ~
2090
2091\lstinputlisting[language=c++]{evaluation/cpp-vstack.cpp}
2092
2093\subsubsection{cpp-vprint.hpp} ~
2094
2095\lstinputlisting[language=c++]{evaluation/cpp-vprint.hpp}
2096
2097\subsubsection{cpp-vbench.cpp} ~
2098
2099\lstinputlisting[language=c++]{evaluation/cpp-vbench.cpp}
2100\end{comment}
2101
2102\end{document}
2103
2104% Local Variables: %
2105% tab-width: 4 %
2106% compile-command: "make" %
2107% End: %
Note: See TracBrowser for help on using the repository browser.