source: doc/generic_types/generic_types.tex @ 3720bf8f

ADTaaron-thesisarm-ehast-experimentalcleanup-dtorsdeferred_resndemanglerenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerpthread-emulationqualifiedEnumresolv-newwith_gc
Last change on this file since 3720bf8f was 3720bf8f, checked in by Peter A. Buhr <pabuhr@…>, 7 years ago

more cleanup for pages 1-8

  • Property mode set to 100644
File size: 76.1 KB
RevLine 
[95448f1e]1% take off review (for line numbers) and anonymous (for anonymization) on submission
[e6dceef]2\documentclass[format=acmlarge,anonymous,review]{acmart}
3% \documentclass[format=acmlarge,review]{acmart}
[06cf47f]4
5\usepackage{xspace,calc,comment}
6\usepackage{upquote}                                                                    % switch curled `'" to straight
7\usepackage{listings}                                                                   % format program code
8
9\makeatletter
10% parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for
11% use rather than use \parident directly.
12\newlength{\parindentlnth}
13\setlength{\parindentlnth}{\parindent}
14
15\newlength{\gcolumnposn}                                % temporary hack because lstlisting does handle tabs correctly
16\newlength{\columnposn}
17\setlength{\gcolumnposn}{2.75in}
18\setlength{\columnposn}{\gcolumnposn}
19\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@commentstyle{#2}}}
20\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
[3720bf8f]21
22\newcommand{\TODO}[1]{\textbf{TODO}: {\itshape #1}} % TODO included
23%\newcommand{\TODO}[1]{} % TODO elided
24% Latin abbreviation
25\newcommand{\abbrevFont}{\textit}       % set empty for no italics
26\newcommand*{\eg}{%
27        \@ifnextchar{,}{\abbrevFont{e}.\abbrevFont{g}.}%
28                {\@ifnextchar{:}{\abbrevFont{e}.\abbrevFont{g}.}%
29                        {\abbrevFont{e}.\abbrevFont{g}.,\xspace}}%
30}%
31\newcommand*{\ie}{%
32        \@ifnextchar{,}{\abbrevFont{i}.\abbrevFont{e}.}%
33                {\@ifnextchar{:}{\abbrevFont{i}.\abbrevFont{e}.}%
34                        {\abbrevFont{i}.\abbrevFont{e}.,\xspace}}%
35}%
36\newcommand*{\etc}{%
37        \@ifnextchar{.}{\abbrevFont{etc}}%
38        {\abbrevFont{etc}.\xspace}%
39}%
40\newcommand{\etal}{%
41        \@ifnextchar{.}{\abbrevFont{et~al}}%
42                {\abbrevFont{et al}.\xspace}%
43}%
44% \newcommand{\eg}{\textit{e}.\textit{g}.,\xspace}
45% \newcommand{\ie}{\textit{i}.\textit{e}.,\xspace}
46% \newcommand{\etc}{\textit{etc}.,\xspace}
[06cf47f]47\makeatother
[1c2c253]48
49% Useful macros
[06cf47f]50\newcommand{\CFA}{C$\mathbf\forall$\xspace} % Cforall symbolic name
51\newcommand{\CC}{\rm C\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name
52\newcommand{\CCeleven}{\rm C\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name
53\newcommand{\CCfourteen}{\rm C\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name
54\newcommand{\CCseventeen}{\rm C\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name
55\newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name
[4fc45ff]56\newcommand{\CS}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace}
57\newcommand{\Textbf}[1]{{\color{red}\textbf{#1}}}
[a53e10a]58
[1c2c253]59% CFA programming language, based on ANSI C (with some gcc additions)
60\lstdefinelanguage{CFA}[ANSI]{C}{
61        morekeywords={_Alignas,_Alignof,__alignof,__alignof__,asm,__asm,__asm__,_At,_Atomic,__attribute,__attribute__,auto,
[06cf47f]62                _Bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__,disable,dtype,enable,__extension__,
63                fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,_Noreturn,one_t,otype,restrict,_Static_assert,
[3ed64ff]64                _Thread_local,throw,throwResume,trait,try,ttype,typeof,__typeof,__typeof__,zero_t},
[1c2c253]65}%
66
67\lstset{
68language=CFA,
69columns=fullflexible,
70basicstyle=\linespread{0.9}\sf,                                                 % reduce line spacing and use sanserif font
71stringstyle=\tt,                                                                                % use typewriter font
72tabsize=4,                                                                                              % 4 space tabbing
[115a868]73xleftmargin=\parindentlnth,                                                             % indent code to paragraph indentation
[06cf47f]74%mathescape=true,                                                                               % LaTeX math escape in CFA code $...$
75escapechar=\$,                                                                                  % LaTeX escape in CFA code
[ea73caf]76keepspaces=true,                                                                                %
[1c2c253]77showstringspaces=false,                                                                 % do not show spaces with cup
78showlines=true,                                                                                 % show blank lines at end of code
79aboveskip=4pt,                                                                                  % spacing above/below code block
80belowskip=3pt,
81% replace/adjust listing characters that look bad in sanserif
82literate={-}{\raisebox{-0.15ex}{\texttt{-}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
[06cf47f]83        {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {_}{\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
84        {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2,
85moredelim=**[is][\color{red}]{`}{`},
[1c2c253]86}% lstset
87
88% inline code @...@
89\lstMakeShortInline@
90
91% ACM Information
92\citestyle{acmauthoryear}
[a53e10a]93
94\acmJournal{PACMPL}
95
[06cf47f]96\title{Generic and Tuple Types with Efficient Dynamic Layout in \CFA}
[a53e10a]97
98\author{Aaron Moss}
99\email{a3moss@uwaterloo.ca}
[06ccbc7]100\author{Robert Schluntz}
101\email{rschlunt@uwaterloo.ca}
102\author{Peter Buhr}
[06cf47f]103\email{pabuhr@uwaterloo.ca}
[06ccbc7]104\affiliation{%
105        \institution{University of Waterloo}
106        \department{David R. Cheriton School of Computer Science}
107        \streetaddress{Davis Centre, University of Waterloo}
108        \city{Waterloo}
109        \state{ON}
110        \postcode{N2L 3G1}
111        \country{Canada}
112}
113
[ae6cc8b]114\terms{generic, tuple, variadic, types}
115\keywords{generic types, tuple types, variadic types, polymorphic functions, C, Cforall}
[a53e10a]116
117\begin{CCSXML}
118<ccs2012>
119<concept>
120<concept_id>10011007.10011006.10011008.10011024.10011025</concept_id>
121<concept_desc>Software and its engineering~Polymorphism</concept_desc>
122<concept_significance>500</concept_significance>
123</concept>
124<concept>
125<concept_id>10011007.10011006.10011008.10011024.10011028</concept_id>
126<concept_desc>Software and its engineering~Data types and structures</concept_desc>
127<concept_significance>500</concept_significance>
128</concept>
129<concept>
130<concept_id>10011007.10011006.10011041.10011047</concept_id>
131<concept_desc>Software and its engineering~Source code generation</concept_desc>
132<concept_significance>300</concept_significance>
133</concept>
134</ccs2012>
135\end{CCSXML}
136
137\ccsdesc[500]{Software and its engineering~Polymorphism}
138\ccsdesc[500]{Software and its engineering~Data types and structures}
139\ccsdesc[300]{Software and its engineering~Source code generation}
140
141\begin{abstract}
[ae6cc8b]142The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers. This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design.
[a53e10a]143\end{abstract}
144
145\begin{document}
146\maketitle
147
148
[4fc45ff]149\section{Introduction and Background}
150
151The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
[0a4c59b]152The \citet{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \CS 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail. The top 3 rankings over the past 30 years are:
[4fc45ff]153\lstDeleteShortInline@
154\begin{center}
155\setlength{\tabcolsep}{10pt}
156\begin{tabular}{@{}r|c|c|c|c|c|c|c@{}}
157                & 2017  & 2012  & 2007  & 2002  & 1997  & 1992  & 1987          \\
158\hline
[a0fc78a]159Java    & 1             & 1             & 1             & 1             & 12    & -             & -                     \\
[4fc45ff]160\hline
[a0fc78a]161\Textbf{C}      & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1}    \\
[4fc45ff]162\hline
163\CC             & 3             & 3             & 3             & 3             & 2             & 2             & 4                     \\
164\end{tabular}
165\end{center}
166\lstMakeShortInline@
167Love it or hate it, C is extremely popular, highly used, and one of the few system's languages.
168In many cases, \CC is often used solely as a better C.
169Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive.
170
[a95310e3]171\CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers. The four key design goals for \CFA~\citep{Bilson03} are:
[4fc45ff]172(1) The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler;
173(2) Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler;
174(3) \CFA code must be at least as portable as standard C code;
175(4) Extensions introduced by \CFA must be translated in the most efficient way possible.
[b39e3dae]176These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used.
[3720bf8f]177Unfortunately, \CC is actively diverging from C, so incremental additions require significant effort and training, coupled with multiple legacy design-choices that cannot be updated.
[b39e3dae]178
179\CFA is currently implemented as a source-to-source translator from \CFA to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)-(3). Ultimately, a compiler is necessary for advanced features and optimal performance.
[06cf47f]180
[a95310e3]181This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings. Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance.
[4fc45ff]182
[1c2c253]183
184\subsection{Polymorphic Functions}
[1ca52db]185\label{sec:poly-fns}
[1c2c253]186
[4fc45ff]187\CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA is parametric-polymorphic functions where functions are generalized using a @forall@ clause (giving the language its name):
[1c2c253]188\begin{lstlisting}
[06cf47f]189`forall( otype T )` T identity( T val ) { return val; }
190int forty_two = identity( 42 );                         $\C{// T is bound to int, forty\_two == 42}$
[1c2c253]191\end{lstlisting}
[115a868]192The @identity@ function above can be applied to any complete \emph{object type} (or @otype@). The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@).
[06ccbc7]193
[b39e3dae]194In \CFA, the polymorphism runtime-cost is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments show this overhead is similar to \CC virtual-function calls. An advantage of this design is that, unlike \CC template-functions, \CFA polymorphic-functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat.
[1c2c253]195
[4fc45ff]196Since bare polymorphic-types provide only a narrow set of available operations, \CFA provides a \emph{type assertion} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable. For example, the function @twice@ can be defined using the \CFA syntax for operator overloading:
[1c2c253]197\begin{lstlisting}
[4fc45ff]198forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x + x; } $\C{// ? denotes operands}$
[06cf47f]199int val = twice( twice( 3.7 ) );
200\end{lstlisting}
[3720bf8f]201which works for any type @T@ with a matching addition operator. The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@. There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type, as in~\cite{Ada}, in its type analysis.
[a0fc78a]202The first approach has a late conversion from @double@ to @int@ on the final assignment, while the second has an eager conversion to @int@. \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition.
[1c2c253]203
[115a868]204Crucial to the design of a new programming language are the libraries to access thousands of external software features.
[b39e3dae]205Like \CC, \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C.
[115a868]206A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted floating-point array:
[06cf47f]207\begin{lstlisting}
208void * bsearch( const void * key, const void * base, size_t nmemb, size_t size,
[3720bf8f]209                                int (* compar)( const void *, const void * ));
[06cf47f]210int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 :
[ea73caf]211                                *(double *)t2 < *(double *)t1 ? 1 : 0; }
[115a868]212double vals[10] = { /* 10 floating-point values */ };
[ea73caf]213double key = 5.0;
[115a868]214double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp );      $\C{// search sorted array}$
[06cf47f]215\end{lstlisting}
[115a868]216which can be augmented simply with a generalized, type-safe, \CFA-overloaded wrappers:
[06cf47f]217\begin{lstlisting}
[36e05a2]218forall( otype T | { int ?<?( T, T ); } ) T * bsearch( T key, const T * arr, size_t size ) {
[06cf47f]219        int comp( const void * t1, const void * t2 ) { /* as above with double changed to T */ }
[115a868]220        return (T *)bsearch( &key, arr, size, sizeof(T), comp ); }
[36e05a2]221forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) {
222        T *result = bsearch( key, arr, size );  $\C{// call first version}$
[115a868]223        return result ? result - arr : size; }  $\C{// pointer subtraction includes sizeof(T)}$
[ea73caf]224double * val = bsearch( 5.0, vals, 10 );        $\C{// selection based on return type}$
[06cf47f]225int posn = bsearch( 5.0, vals, 10 );
226\end{lstlisting}
[3720bf8f]227The nested function @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result.
228Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope.
[4fc45ff]229As well, an alternate kind of return is made available: position versus pointer to found element.
[06cf47f]230\CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a templated @bsearch@.
[1c2c253]231
[115a868]232\CFA has replacement libraries condensing hundreds of existing C functions into tens of \CFA overloaded functions, all without rewriting the actual computations.
233For example, it is possible to write a type-safe \CFA wrapper @malloc@ based on the C @malloc@:
[06cf47f]234\begin{lstlisting}
[115a868]235forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)(void *)malloc( (size_t)sizeof(T) ); }
236int * ip = malloc();                                            $\C{// select type and size from left-hand side}$
237double * dp = malloc();
238struct S {...} * sp = malloc();
239\end{lstlisting}
240where the return type supplies the type/size of the allocation, which is impossible in most type systems.
241
242Call-site inferencing and nested functions provide a localized form of inheritance. For example, the \CFA @qsort@ only sorts in ascending order using @<@. However, it is trivial to locally change this behaviour:
243\begin{lstlisting}
244forall( otype T | { int ?<?( T, T ); } ) void qsort( const T * arr, size_t size ) { /* use C qsort */ }
245{       int ?<?( double x, double y ) { return x `>` y; }       $\C{// locally override behaviour}$
[06cf47f]246        qsort( vals, size );                                    $\C{// descending sort}$
247}
[1c2c253]248\end{lstlisting}
[06cf47f]249Within the block, the nested version of @<@ performs @>@ and this local version overrides the built-in @<@ so it is passed to @qsort@.
[32bcef7]250Hence, programmers can easily form local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.
[1c2c253]251
[8396044]252Finally, \CFA allows variable overloading:
[ea73caf]253\lstDeleteShortInline@
254\par\smallskip
255\begin{tabular}{@{}l@{\hspace{\parindent}}|@{\hspace{\parindent}}l@{}}
[06cf47f]256\begin{lstlisting}
257short int MAX = ...;
258int MAX = ...;
259double MAX = ...;
260\end{lstlisting}
[ea73caf]261&
[1c2c253]262\begin{lstlisting}
[06cf47f]263short int s = MAX;  // select correct MAX
264int i = MAX;
265double d = MAX;
[6a86a0a]266\end{lstlisting}
[ea73caf]267\end{tabular}
268\lstMakeShortInline@
269\smallskip\par\noindent
270Hence, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@.
[115a868]271As well, restricted constant overloading is allowed for the values @0@ and @1@, which have special status in C, \eg the value @0@ is both an integer and a pointer literal, so its meaning depends on context.
[3720bf8f]272In addition, several operations are defined in terms values @0@ and @1@, \eg:
[115a868]273\begin{lstlisting}
274int x;
275if (x)        // if (x != 0)
276        x++;    //   x += 1;
277\end{lstlisting}
278Every if statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
279Due to these rewrite rules, the values @0@ and @1@ have the types @zero_t@ and @one_t@ in \CFA, which allows overloading various operations for new types that seamlessly connect to all special @0@ and @1@ contexts.
280The types @zero_t@ and @one_t@ have special built in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal.
[1c2c253]281
[4fc45ff]282
[1c2c253]283\subsection{Traits}
284
[4fc45ff]285\CFA provides \emph{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration:
[1c2c253]286\begin{lstlisting}
[ee89ad43]287trait summable( otype T ) {
[a95310e3]288        void ?{}( T *, zero_t );                                $\C{// constructor from 0 literal}$
[06cf47f]289        T ?+?( T, T );                                                  $\C{// assortment of additions}$
290        T ?+=?( T *, T );
291        T ++?( T * );
[115a868]292        T ?++( T * ); };
[a95310e3]293forall( otype T `| summable( T )` ) T sum( T a[$\,$], size_t size ) {  // use trait
294        `T` total = { `0` };                                    $\C{// instantiate T from 0 by calling its constructor}$
[b39e3dae]295        for ( unsigned int i = 0; i < size; i += 1 ) total `+=` a[i]; $\C{// select appropriate +}$
[115a868]296        return total; }
[1c2c253]297\end{lstlisting}
298
[3720bf8f]299In fact, the set of trait operators is incomplete, as there is no assignment requirement for type @T@, but @otype@ is syntactic sugar for the following implicit trait:
[06ccbc7]300\begin{lstlisting}
[4fc45ff]301trait otype( dtype T | sized(T) ) {  // sized is a pseudo-trait for types with known size and alignment
[06cf47f]302        void ?{}( T * );                                                $\C{// default constructor}$
303        void ?{}( T *, T );                                             $\C{// copy constructor}$
304        void ?=?( T *, T );                                             $\C{// assignment operator}$
[115a868]305        void ^?{}( T * ); };                                    $\C{// destructor}$
[06ccbc7]306\end{lstlisting}
[4fc45ff]307Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack-allocatable, default or copy-initialized, assigned, and deleted.
308% As an example, the @sum@ function produces generated code something like the following (simplified for clarity and brevity)\TODO{fix example, maybe elide, it's likely too long with the more complicated function}:
309% \begin{lstlisting}
310% void abs( size_t _sizeof_M, size_t _alignof_M,
311%               void (*_ctor_M)(void*), void (*_copy_M)(void*, void*),
312%               void (*_assign_M)(void*, void*), void (*_dtor_M)(void*),
313%               _Bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*),
314%       void (*_ctor_M_zero)(void*, int),
315%               void* m, void* _rtn ) {                         $\C{// polymorphic parameter and return passed as void*}$
316%                                                                                       $\C{// M zero = { 0 };}$
317%       void* zero = alloca(_sizeof_M);                 $\C{// stack allocate zero temporary}$
318%       _ctor_M_zero(zero, 0);                                  $\C{// initialize using zero\_t constructor}$
319%                                                                                       $\C{// return m < zero ? -m : m;}$
320%       void *_tmp = alloca(_sizeof_M);
321%       _copy_M( _rtn,                                                  $\C{// copy-initialize return value}$
322%               _lt_M( m, zero ) ?                                      $\C{// check condition}$
323%                (_neg_M(m, _tmp), _tmp) :                      $\C{// negate m}$
324%                m);
325%       _dtor_M(_tmp); _dtor_M(zero);                   $\C{// destroy temporaries}$
326% }
327% \end{lstlisting}
328
[3720bf8f]329In summation, the \CFA type-system uses \emph{nominal typing} for concrete types, matching with the C type-system and \emph{structural typing} for polymorphic types.
[a0fc78a]330Hence, trait names play no part in type equivalence;
331the names are simply macros for a list of polymorphic assertions, which are expanded at usage sites.
332Nevertheless, trait names form a logical subtype-hierarchy with @dtype@ at the top, where traits often contain overlapping assertions.
333Traits are used like interfaces in Java or abstract base-classes in \CC, but without the nominal inheritance-relationships.
334Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type-key), and this key is fulfilled at each call site from the lexical environment, which is similar to Go~\citep{Go} interfaces.
335Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal-inheritance hierarchy.
336(Nominal inheritance can be approximated with traits using marker variables or functions, as is done in Go.)
[06ccbc7]337
[a0fc78a]338% Nominal inheritance can be simulated with traits using marker variables or functions:
339% \begin{lstlisting}
340% trait nominal(otype T) {
341%     T is_nominal;
342% };
343% int is_nominal;                                                               $\C{// int now satisfies the nominal trait}$
344% \end{lstlisting}
345%
346% Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems:
347% \begin{lstlisting}
348% trait pointer_like(otype Ptr, otype El) {
349%     lvalue El *?(Ptr);                                                $\C{// Ptr can be dereferenced into a modifiable value of type El}$
350% }
351% struct list {
352%     int value;
353%     list *next;                                                               $\C{// may omit "struct" on type names as in \CC}$
354% };
355% typedef list *list_iterator;
356%
357% lvalue int *?( list_iterator it ) { return it->value; }
358% \end{lstlisting}
359% In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@).
360% While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously.
[1c2c253]361
[a53e10a]362
[06ccbc7]363\section{Generic Types}
364
[3720bf8f]365One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to create data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures.
366A second approach is to use @void *@--based polymorphism, \eg the C standard-library functions @bsearch@ and @qsort@, and does allow the use of common code for common functionality. However, basing all polymorphism on @void *@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that would not otherwise be needed.
367A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type-checked, but errors may be difficult to interpret. Furthermore, writing and using preprocessor macros can be unnatural and inflexible.
[cb4d825]368
[a0fc78a]369Other languages use \emph{generic types}, \eg \CC and Java, to produce type-safe abstract data-types. \CFA also implements generic types that integrate efficiently and naturally with the existing polymorphic functions, while retaining backwards compatibility with C and providing separate compilation. However, for known concrete parameters, the generic type can be inlined, like \CC templates.
[06ccbc7]370
371A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name:
372\begin{lstlisting}
[a0fc78a]373forall( otype R, otype S ) struct pair {
374        R first;
375        S second;
[06ccbc7]376};
[a0fc78a]377forall( otype T ) T value( pair( const char *, T ) p ) { return p.second; }
378forall( dtype F, otype T ) T value_p( pair( F *, T * ) p ) { return *p.second; }
[06ccbc7]379
[a0fc78a]380pair( const char *, int ) p = { "magic", 42 };
[231f01c]381int magic = value( p );
[a0fc78a]382pair( void *, int * ) q = { 0, &p.second };
[06ccbc7]383magic = value_p( q );
384double d = 1.0;
[a0fc78a]385pair( double *, double * ) r = { &d, &d };
[06ccbc7]386d = value_p( r );
387\end{lstlisting}
388
[3720bf8f]389\CFA classifies generic types as either \emph{concrete} or \emph{dynamic}. Concrete have a fixed memory layout regardless of type parameters, while dynamic vary in memory layout depending on their type parameters. A type may have polymorphic parameters but still be concrete, called \emph{dtype-static}. Polymorphic pointers are an example of dtype-static types, \eg @forall(dtype T) T *@ is a polymorphic type, but for any @T@, @T *@  is a fixed-sized pointer, and therefore, can be represented by a @void *@ in code generation.
[06ccbc7]390
[a0fc78a]391\CFA generic types also allow checked argument-constraints. For example, the following declaration of a sorted set-type ensures the set key supports equality and relational comparison:
[c1fb1f2f]392\begin{lstlisting}
[a0fc78a]393forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); } ) struct sorted_set;
[c1fb1f2f]394\end{lstlisting}
395
[41cd57c0]396
[a0fc78a]397\subsection{Concrete Generic-Types}
398
[3720bf8f]399The \CFA translator template-expands concrete generic-types into new structure types, affording maximal inlining. To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate. For example, a function declaration that accepts or returns a concrete generic-type produces a declaration for the instantiated struct in the same scope, which all callers may reuse. For example, the concrete instantiation for @pair( const char *, int )@ is:
[06ccbc7]400\begin{lstlisting}
401struct _pair_conc1 {
[a0fc78a]402        const char * first;
[06ccbc7]403        int second;
404};
405\end{lstlisting}
406
[3720bf8f]407A concrete generic-type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations. In the above example, the @pair( F *, T * )@ parameter to @value_p@ is such a type; its expansion is below and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate:
[06ccbc7]408\begin{lstlisting}
409struct _pair_conc0 {
[a0fc78a]410        void * first;
411        void * second;
[06ccbc7]412};
413\end{lstlisting}
414
[115a868]415
[a0fc78a]416\subsection{Dynamic Generic-Types}
[41cd57c0]417
[3720bf8f]418Though \CFA implements concrete generic-types efficiently, it also has a fully general system for dynamic generic types.
419As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller.
420Dynamic generic-types also have an \emph{offset array} containing structure member-offsets.
421A dynamic generic-union needs no such offset array, as all members are at offset 0 but size and alignment are still necessary.
422Access to members of a dynamic structure is provided at runtime via base-displacement addressing with the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime.
423
424The offset arrays are statically generated where possible.
425If a dynamic generic-type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume the generic type is complete (\ie has a known layout) at any call-site, and the offset array is passed from the caller;
426if the generic type is concrete at the call site, the elements of this offset array can even be statically generated using the C @offsetof@ macro.
427As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void *@, and @_offsetof_pair@ is the offset array passed into @value@ for @pair( const char *, T )@.
428The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) }@.
429
430In some cases the offset arrays cannot be statically generated. For instance, modularity is generally provided in C by including an opaque forward-declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately-compiled @.c@ file.
431\CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic-type, and only holds it by a pointer.
432The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed into a function from that function's caller.
433These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic structure (un@sized@ parameters are forbidden from being used in a context that affects layout).
434Results of these layout functions are cached so that they are only computed once per type per function. %, as in the example below for @pair@.
[231f01c]435% \begin{lstlisting}
[6a86a0a]436% static inline void _layoutof_pair(size_t* _szeof_pair, size_t* _alignof_pair, size_t* _offsetof_pair,
[231f01c]437%               size_t _szeof_R, size_t _alignof_R, size_t _szeof_S, size_t _alignof_S) {
438%     *_szeof_pair = 0; // default values
439%     *_alignof_pair = 1;
[a0fc78a]440%
[231f01c]441%       // add offset, size, and alignment of first field
442%     _offsetof_pair[0] = *_szeof_pair;
443%     *_szeof_pair += _szeof_R;
444%     if ( *_alignof_pair < _alignof_R ) *_alignof_pair = _alignof_R;
[a0fc78a]445%
[231f01c]446%       // padding, offset, size, and alignment of second field
447%     if ( *_szeof_pair & (_alignof_S - 1) )
448%               *_szeof_pair += (_alignof_S - ( *_szeof_pair & (_alignof_S - 1) ) );
449%     _offsetof_pair[1] = *_szeof_pair;
450%     *_szeof_pair += _szeof_S;
451%     if ( *_alignof_pair < _alignof_S ) *_alignof_pair = _alignof_S;
[a0fc78a]452%
[231f01c]453%       // pad to struct alignment
454%     if ( *_szeof_pair & (*_alignof_pair - 1) )
455%               *_szeof_pair += ( *_alignof_pair - ( *_szeof_pair & (*_alignof_pair - 1) ) );
456% }
457% \end{lstlisting}
[3720bf8f]458Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature.
459For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values.
460This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function.
[1ca52db]461
[3720bf8f]462Whether a type is concrete, dtype-static, or dynamic is decided solely on the type parameters and @forall@ clause on a declaration.
463This design allows opaque forward declarations of generic types, \eg @forall(otype T) struct Box@ -- like in C, all uses of @Box(T)@ can be separately compiled, and callers from other translation units know the proper calling conventions to use.
464If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T* p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off.
[a0fc78a]465
[1ca52db]466
[41cd57c0]467\subsection{Applications}
[c1fb1f2f]468\label{sec:generic-apps}
[41cd57c0]469
[a0fc78a]470The reuse of dtype-static structure instantiations enables useful programming patterns at zero runtime cost. The most important such pattern is using @forall(dtype T) T *@ as a type-checked replacement for @void *@, \eg creating a lexicographic comparison for pairs of pointers used by @bsearch@ or @qsort@:
[06ccbc7]471\begin{lstlisting}
[3720bf8f]472forall(dtype T) int lexcmp( pair( T *, T * ) * a, pair( T *, T * ) * b, int (* cmp)( T *, T * ) ) {
[a0fc78a]473        int c = cmp( a->first, b->first );
474        if ( c == 0 ) c = cmp( a->second, b->second );
[06ccbc7]475        return c;
476}
477\end{lstlisting}
[3720bf8f]478Since @pair(T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@, so the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type-checked to ensure the fields of both pairs and the arguments to the comparison function match in type.
[06ccbc7]479
[3720bf8f]480Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \emph{tag-structures}.
481Sometimes information is only used for type-checking and can be omitted at runtime, \eg:
[41c25b8]482\begin{lstlisting}
483forall(dtype Unit) struct scalar { unsigned long value; };
484struct metres {};
485struct litres {};
486
[a0fc78a]487forall(dtype U) scalar(U) ?+?( scalar(U) a, scalar(U) b ) {
[41c25b8]488        return (scalar(U)){ a.value + b.value };
489}
490scalar(metres) half_marathon = { 21093 };
491scalar(litres) swimming_pool = { 2500000 };
492scalar(metres) marathon = half_marathon + half_marathon;
493scalar(litres) two_pools = swimming_pool + swimming_pool;
[a0fc78a]494marathon + swimming_pool;                       $\C{// compilation ERROR}$
[41c25b8]495\end{lstlisting}
[3720bf8f]496@scalar@ is a dtype-static type, so all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@.
497These implementations may even be separately compiled, unlike \CC template functions.
498However, the \CFA type-checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume.
[06ccbc7]499
500\section{Tuples}
[3ed64ff]501\label{sec:tuples}
[06ccbc7]502
[06cf47f]503The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA, with a design particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}.
[cb4d825]504
[3720bf8f]505In standard C, functions can return at most one value. This restriction results in code that emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. In the former situation, the function designer creates a record type that combines all of the return values into a single type. Unfortunately, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the function body to emulate a return. Using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. This responsibility complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the function signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C functions that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C functions that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler.
[cb4d825]506
[3ed64ff]507C does provide a mechanism for variadic functions through manipulation of @va_list@ objects, but it is notoriously type-unsafe. A variadic function is one that contains at least one parameter, followed by @...@ as the last token in the parameter list. In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types, commonly a format string or counter parameter. It is important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly. This required repetition is error prone, because it is easy for the user to add or remove arguments without updating the argument descriptor. In addition, C requires the programmer to hard code all of the possible expected types. As a result, it is cumbersome to write a variadic function that is open to extension. For example, consider a simple function that sums $N$ @int@s:
[cb4d825]508\begin{lstlisting}
509int sum(int N, ...) {
510  va_list args;
511  va_start(args, N);  // must manually specify last non-variadic argument
512  int ret = 0;
513  while(N) {
[a0fc78a]514        ret += va_arg(args, int);  // must specify type
515        N--;
[cb4d825]516  }
517  va_end(args);
518  return ret;
519}
[95448f1e]520
[cb4d825]521sum(3, 10, 20, 30);  // must keep initial counter argument in sync
522\end{lstlisting}
523
[06cf47f]524The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined~\citep{C11}. Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body. Since they rely on programmer convention rather than compile-time checks, variadic functions are inherently unsafe.
[cb4d825]525
[3ed64ff]526In practice, compilers can provide warnings to help mitigate some of the problems. For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers. Unfortunately, this attribute does not permit extensions to the format string syntax, so a programmer cannot extend it to warn for mismatches with custom types.
[cb4d825]527
[3720bf8f]528
[cb4d825]529\subsection{Tuple Expressions}
530
[06cf47f]531The tuple extensions in \CFA can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}.
[cb4d825]532
[06cf47f]533\CFA allows declaration of \emph{tuple variables}, variables of tuple type. For example:
[cb4d825]534\begin{lstlisting}
[3720bf8f]535[int, char] most_frequent(const char * );
[cb4d825]536
537const char* str = "hello, world!";
538[int, char] freq = most_frequent(str);
539printf("%s -- %d %c\n", str, freq);
540\end{lstlisting}
541In this example, the type of the @freq@ and the return type of @most_frequent@ are both tuple types. Also of note is how the tuple expression @freq@ is implicitly flattened into separate @int@ and @char@ arguments to @printf@; this code snippet could have been shortened by replacing the last two lines with @printf("%s -- %d %c\n", str, most_frequent(str));@ using exactly the same mechanism.
542
543In addition to variables of tuple type, it is also possible to have pointers to tuples, and arrays of tuples. Tuple types can be composed of any types, except for array types, since arrays are not of fixed size, which makes tuple assignment difficult when a tuple contains an array.
544\begin{lstlisting}
545[double, int] di;
546[double, int] * pdi
547[double, int] adi[10];
548\end{lstlisting}
[3ed64ff]549This example declares a variable of type @[double, int]@, a variable of type pointer to @[double, int]@, and an array of ten @[double, int]@.
[cb4d825]550
551\subsection{Flattening and Restructuring}
552
553In function call contexts, tuples support implicit flattening and restructuring conversions. Tuple flattening recursively expands a tuple into the list of its basic components. Tuple structuring packages a list of expressions into a value of tuple type.
554\begin{lstlisting}
555int f(int, int);
556int g([int, int]);
557int h(int, [int, int]);
558[int, int] x;
559int y;
560
561f(x);      // flatten
562g(y, 10);  // structure
563h(x, y);   // flatten & structure
564\end{lstlisting}
[06cf47f]565In \CFA, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure.
[cb4d825]566
[fc19129]567% In {K-W C} \citep{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components.
[cb4d825]568
[06cf47f]569In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in
[cb4d825]570\begin{lstlisting}
571int f(int, [double, int]);
572f([5, 10.2], 4);
573\end{lstlisting}
574There is only a single definition of @f@, and 3 arguments with only single interpretations. First, the argument alternative list @[5, 10.2], 4@ is flattened to produce the argument list @5, 10.2, 4@. Next, the parameter matching algorithm begins, with $P =~$@int@ and $A =~$@int@, which unifies exactly. Moving to the next parameter and argument, $P =~$@[double, int]@ and $A =~$@double@. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =~$@double@ and $A =~$@double@, which unifies exactly. Then $P' =~$@int@ and $A =~$@double@, which again unifies exactly. At this point, the end of $P'$ has been reached, so the arguments @10.2, 4@ are structured into the tuple expression @[10.2, 4]@. Finally, the end of the parameter list $P$ has also been reached, so the final expression is @f(5, [10.2, 4])@.
575
576\subsection{Member Access}
577
578At times, it is desirable to access a single component of a tuple-valued expression without creating unnecessary temporary variables to assign to. Given a tuple-valued expression @e@ and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in @e@, @e.i@ accesses the $i$\textsuperscript{th} component of @e@. For example,
579\begin{lstlisting}
580[int, double] x;
581[char *, int] f();
582void g(double, int);
583[int, double] * p;
584
585int y = x.0;  // access int component of x
586y = f().1;  // access int component of f
587p->0 = 5;  // access int component of tuple pointed-to by p
588g(x.1, x.0);  // rearrange x to pass to g
589double z = [x, f()].0.1;  // access second component of first component of tuple expression
590\end{lstlisting}
[06cf47f]591As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, but never implemented~\citep[p.~45]{Till89}.
[cb4d825]592
593It is possible to access multiple fields from a single expression using a \emph{member-access tuple expression}. The result is a single tuple expression whose type is the tuple of the types of the members. For example,
594\begin{lstlisting}
595struct S { int x; double y; char * z; } s;
596s.[x, y, z];
597\end{lstlisting}
[3ed64ff]598Here, the type of @s.[x, y, z]@ is @[int, double, char *]@. A member tuple expression has the form @a.[x, y, z];@ where @a@ is an expression with type @T@, where @T@ supports member access expressions, and @x, y, z@ are all members of @T@ with types @T$_x$@, @T$_y$@, and @T$_z$@ respectively. Then the type of @a.[x, y, z]@ is @[T$_x$, T$_y$, T$_z$]@.
[cb4d825]599
[06cf47f]600Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange components, drop components, duplicate components, etc.):
[cb4d825]601\begin{lstlisting}
602[int, int, long, double] x;
603void f(double, long);
604
605f(x.[0, 3]);          // f(x.0, x.3)
606x.[0, 1] = x.[1, 0];  // [x.0, x.1] = [x.1, x.0]
607[long, int, long] y = x.[2, 0, 2];
608\end{lstlisting}
609
610It is possible for a member tuple expression to contain other member access expressions:
611\begin{lstlisting}
612struct A { double i; int j; };
613struct B { int * k; short l; };
614struct C { int x; A y; B z; } v;
615v.[x, y.[i, j], z.k];
616\end{lstlisting}
617This expression is equivalent to @[v.x, [v.y.i, v.y.j], v.z.k]@. That is, the aggregate expression is effectively distributed across the tuple, which allows simple and easy access to multiple components in an aggregate, without repetition. It is guaranteed that the aggregate expression to the left of the @.@ in a member tuple expression is evaluated exactly once. As such, it is safe to use member tuple expressions on the result of a side-effecting function.
618
619\subsection{Tuple Assignment}
620
621In addition to tuple-index expressions, individual components of tuples can be accessed by a \emph{destructuring assignment} which has a tuple expression with lvalue components on its left-hand side. More generally, an assignment where the left-hand side of the assignment operator has a tuple type is called \emph{tuple assignment}. There are two kinds of tuple assignment depending on whether the right-hand side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple assignment} and \emph{mass assignment}, respectively.
622\begin{lstlisting}
623int x;
624double y;
625[int, double] z;
626[y, x] = 3.14;  // mass assignment
627[x, y] = z;     // multiple assignment
628z = 10;         // mass assignment
629z = [x, y];     // multiple assignment
630\end{lstlisting}
[3ed64ff]631Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left-hand side, $R_i$ represent each component of the flattened right-hand side of a multiple assignment, and $R$ represent the right-hand side of a mass assignment.
[cb4d825]632
633For a multiple assignment to be valid, both tuples must have the same number of elements when flattened. Multiple assignment assigns $R_i$ to $L_i$ for each $i$.
634That is, @?=?(&$L_i$, $R_i$)@ must be a well-typed expression. In the previous example, @[x, y] = z@, @z@ is flattened into @z.0, z.1@, and the assignments @x = z.0@ and @y = z.1@ are executed.
635
[06cf47f]636A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This rule differs from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well.
[cb4d825]637
638Both kinds of tuple assignment have parallel semantics, such that each value on the left side and right side is evaluated \emph{before} any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function:
639\begin{lstlisting}
640int x = 10, y = 20;
641[x, y] = [y, x];
642\end{lstlisting}
643After executing this code, @x@ has the value @20@ and @y@ has the value @10@.
644
[3ed64ff]645Tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. This definition allows cascading tuple assignment and use of tuple assignment in other expression contexts, an occasionally useful idiom to keep code succinct and reduce repetition.
[06cf47f]646% In \CFA, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C}~\citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision was made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA that is not an expression.
[cb4d825]647
648\subsection{Casting}
649
[06cf47f]650In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function:
[cb4d825]651\begin{lstlisting}
652int f();     // (1)
653double f();  // (2)
654
655f();       // ambiguous - (1),(2) both equally viable
656(int)f();  // choose (2)
657\end{lstlisting}
658
[06cf47f]659Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples:
[cb4d825]660\begin{lstlisting}
661int f();
662void g();
663
664(void)f();  // (1)
665(int)g();  // (2)
666\end{lstlisting}
[3ed64ff]667In C, (1) is a valid cast, which calls @f@ and discards its result. On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid.
[cb4d825]668
[3ed64ff]669Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. This approach follows naturally from the way that a cast to @void@ works in C.
[cb4d825]670
671For example, in
672\begin{lstlisting}
673  [int, int, int] f();
674  [int, [int, int], int] g();
675
[5995116]676  ([int, double])f();           $\C{// (1)}$
677  ([int, int, int])g();         $\C{// (2)}$
678  ([void, [int, int]])g();      $\C{// (3)}$
679  ([int, int, int, int])g();    $\C{// (4)}$
680  ([int, [int, int, int]])g();  $\C{// (5)}$
[cb4d825]681\end{lstlisting}
682
[3ed64ff]683(1) discards the last element of the return value and converts the second element to @double@. Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@.
[cb4d825]684Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@).
685
[06cf47f]686Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@.
[cb4d825]687
[41cd57c0]688\subsection{Polymorphism}
689
[06cf47f]690Tuples also integrate with \CFA polymorphism as a special sort of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types.
[41cd57c0]691\begin{lstlisting}
692forall(otype T, dtype U)
693void f(T x, U * y);
694
695f([5, "hello"]);
696\end{lstlisting}
[3ed64ff]697In this example, @[5, "hello"]@ is flattened, so that the argument list appears as @5, "hello"@. The argument matching algorithm binds @T@ to @int@ and @U@ to @const char*@, and calls the function as normal.
[41cd57c0]698
[3ed64ff]699Tuples, however, may contain polymorphic components. For example, a plus operator can be written to add two triples of a type together.
[41cd57c0]700\begin{lstlisting}
701forall(otype T | { T ?+?(T, T); })
702[T, T, T] ?+?([T, T, T] x, [T, T, T] y) {
703  return [x.0+y.0, x.1+y.1, x.2+y.2];
704}
705[int, int, int] x;
706int i1, i2, i3;
707[i1, i2, i3] = x + ([10, 20, 30]);
708\end{lstlisting}
709
[06cf47f]710Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. Previously in \CFA, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie no implicit conversions are applied to assertion arguments). In the example below:
[41cd57c0]711\begin{lstlisting}
712int f([int, double], double);
713forall(otype T, otype U | { T f(T, U, U); })
714void g(T, U);
715g(5, 10.21);
716\end{lstlisting}
717If assertion arguments must match exactly, then the call to @g@ cannot be resolved, since the expected type of @f@ is flat, while the only @f@ in scope requires a tuple type. Since tuples are fluid, this requirement reduces the usability of tuples in polymorphic code. To ease this pain point, function parameter and return lists are flattened for the purposes of type unification, which allows the previous example to pass expression resolution.
718
[ee89ad43]719This relaxation is made possible by extending the existing thunk generation scheme, as described by \citet{Bilson03}. Now, whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function:
[41cd57c0]720\begin{lstlisting}
721int _thunk(int _p0, double _p1, double _p2) {
722  return f([_p0, _p1], _p2);
[cb4d825]723}
724\end{lstlisting}
[3ed64ff]725Essentially, this thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism. These thunks take advantage of GCC C nested functions to produce closures that have the usual function pointer signature.
[cb4d825]726
[41cd57c0]727\subsection{Variadic Tuples}
728
[06cf47f]729To define variadic functions, \CFA adds a new kind of type parameter, @ttype@. Matching against a @ttype@ (``tuple type'') parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule. This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. As such, @ttype@ variables are also referred to as \emph{argument} or \emph{parameter packs} in this paper.
[41cd57c0]730
731Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion. Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.
732
[3ed64ff]733For example, the C @sum@ function at the beginning of Section~\ref{sec:tuples} could be written using @ttype@ as:
[cb4d825]734\begin{lstlisting}
[41cd57c0]735int sum(){ return 0; }        // (0)
736forall(ttype Params | { int sum(Params); })
737int sum(int x, Params rest) { // (1)
738  return x+sum(rest);
739}
740sum(10, 20, 30);
741\end{lstlisting}
742Since (0) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.
743In order to call (1), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.
[3ed64ff]744In order to finish the resolution of @sum@, an assertion parameter that matches @int sum(int, int)@ is required.
745Like in the previous iteration, (0) is not a valid candidate, so (1) is examined with @Params@ bound to @[int]@, requiring the assertion @int sum(int)@.
[41cd57c0]746Next, (0) fails, and to satisfy (1) @Params@ is bound to @[]@, requiring an assertion @int sum()@.
747Finally, (0) matches and (1) fails, which terminates the recursion.
[3ed64ff]748Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@.
[41cd57c0]749
[06cf47f]750As a point of note, this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details. It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments:
[41cd57c0]751\begin{lstlisting}
752int sum(int x, int y){
753  return x+y;
754}
755forall(ttype Params | { int sum(int, Params); })
756int sum(int x, int y, Params rest) {
757  return sum(x+y, rest);
758}
[cb4d825]759\end{lstlisting}
760
[41cd57c0]761One more iteration permits the summation of any summable type, as long as all arguments are the same type:
[cb4d825]762\begin{lstlisting}
[41cd57c0]763trait summable(otype T) {
764  T ?+?(T, T);
765};
766forall(otype R | summable(R))
767R sum(R x, R y){
768  return x+y;
769}
770forall(otype R, ttype Params
771  | summable(R)
772  | { R sum(R, Params); })
773R sum(R x, R y, Params rest) {
774  return sum(x+y, rest);
775}
776\end{lstlisting}
[3ed64ff]777Unlike C, it is not necessary to hard code the expected type. This code is naturally open to extension, in that any user-defined type with a @?+?@ operator is automatically able to be used with the @sum@ function. That is to say, the programmer who writes @sum@ does not need full program knowledge of every possible data type, unlike what is necessary to write an equivalent function using the standard C mechanisms. Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators.
[cb4d825]778
[3720bf8f]779It is also possible to write a type-safe variadic print function which can replace @printf@:
[41cd57c0]780\begin{lstlisting}
781struct S { int x, y; };
782forall(otype T, ttype Params |
783  { void print(T); void print(Params); })
784void print(T arg, Params rest) {
785  print(arg);
786  print(rest);
787}
788void print(char * x) { printf("%s", x); }
789void print(int x) { printf("%d", x);  }
790void print(S s) { print("{ ", s.x, ",", s.y, " }"); }
[95448f1e]791
[41cd57c0]792print("s = ", (S){ 1, 2 }, "\n");
[cb4d825]793\end{lstlisting}
[3720bf8f]794This example function showcases a variadic-template-like decomposition of the provided argument list. The individual @print@ functions allow printing a single element of a type. The polymorphic @print@ allows printing any list of types, as long as each individual type has a @print@ function. The individual print functions can be used to build up more complicated @print@ functions, such as for @S@, which is something that cannot be done with @printf@ in C.
[41cd57c0]795
796It is also possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. For example, it is possible to write @new@ as a library function:
797\begin{lstlisting}
[e6dceef]798struct pair(otype R, otype S);
[41cd57c0]799forall(otype R, otype S)
[e6dceef]800void ?{}(pair(R, S) *, R, S);  // (1)
[cb4d825]801
[41cd57c0]802forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })
803T * new(Params p) {
804  return ((T*)malloc( sizeof(T) )){ p }; // construct into result of malloc
805}
[95448f1e]806
[e6dceef]807pair(int, char) * x = new(42, '!');
[41cd57c0]808\end{lstlisting}
[06cf47f]809The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects. This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference.
[cb4d825]810
[e6dceef]811In the call to @new@, @pair(double, char)@ is selected to match @T@, and @Params@ is expanded to match @[double, char]@. The constructor (1) may be specialized to  satisfy the assertion for a constructor with an interface compatible with @void ?{}(pair(int, char) *, int, char)@.
[06ccbc7]812
[41cd57c0]813\subsection{Implementation}
814
[06cf47f]815Tuples are implemented in the \CFA translator via a transformation into generic types. For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated. For example:
[41cd57c0]816\begin{lstlisting}
817[int, int] f() {
818  [double, double] x;
819  [int, double, int] y;
820}
821\end{lstlisting}
822Is transformed into:
823\begin{lstlisting}
824forall(dtype T0, dtype T1 | sized(T0) | sized(T1))
825struct _tuple2 {  // generated before the first 2-tuple
826  T0 field_0;
827  T1 field_1;
828};
[3ed64ff]829_tuple2(int, int) f() {
830  _tuple2(double, double) x;
[41cd57c0]831  forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2))
832  struct _tuple3 {  // generated before the first 3-tuple
[a0fc78a]833        T0 field_0;
834        T1 field_1;
835        T2 field_2;
[41cd57c0]836  };
837  _tuple3_(int, double, int) y;
838}
839\end{lstlisting}
840
841Tuple expressions are then simply converted directly into compound literals:
842\begin{lstlisting}
843[5, 'x', 1.24];
844\end{lstlisting}
845Becomes:
846\begin{lstlisting}
[3ed64ff]847(_tuple3(int, char, double)){ 5, 'x', 1.24 };
[41cd57c0]848\end{lstlisting}
849
850Since tuples are essentially structures, tuple indexing expressions are just field accesses:
851\begin{lstlisting}
852void f(int, [double, char]);
853[int, double] x;
854
855x.0+x.1;
856printf("%d %g\n", x);
857f(x, 'z');
858\end{lstlisting}
859Is transformed into:
860\begin{lstlisting}
[3ed64ff]861void f(int, _tuple2(double, char));
862_tuple2(int, double) x;
[41cd57c0]863
864x.field_0+x.field_1;
865printf("%d %g\n", x.field_0, x.field_1);
866f(x.field_0, (_tuple2){ x.field_1, 'z' });
867\end{lstlisting}
[6a86a0a]868Note that due to flattening, @x@ used in the argument position is converted into the list of its fields. In the call to @f@, the second and third argument components are structured into a tuple argument. Similarly, tuple member expressions are recursively expanded into a list of member access expressions.
[41cd57c0]869
[3ed64ff]870Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion. Each unique expression is assigned an identifier and is guaranteed to be executed exactly once:
[41cd57c0]871\begin{lstlisting}
872void g(int, double);
873[int, double] h();
874g(h());
875\end{lstlisting}
[3ed64ff]876Internally, this expression is converted to two variables and an expression:
[41cd57c0]877\begin{lstlisting}
878void g(int, double);
879[int, double] h();
880
881_Bool _unq0_finished_ = 0;
882[int, double] _unq0;
883g(
884  (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,
885  (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,
886);
887\end{lstlisting}
[6a86a0a]888Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. Tuple member expressions also take advantage of unique expressions in the case of possible impurity.
[41cd57c0]889
[06cf47f]890Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.
[41cd57c0]891
[06cf47f]892The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new.
[41cd57c0]893
[5995116]894\section{Evaluation}
895
896\TODO{Magnus suggests we need some graphs, it's kind of a done thing that the reviewers will be looking for. Also, we've made some unsubstantiated claims about the runtime performance of \CFA, which some micro-benchmarks could help with. I'm thinking a simple stack push and pop, with an idiomatic \lstinline@void*@, \CFA, \CC template and \CC virtual inheritance versions (the void* and virtual inheritance versions likely need to be linked lists, or clumsy in their API -- possibly both versions) to test generics, and variadic print to test tuples. We measure SLOC, runtime performance, executable size (making sure to include benchmarks for multiple types in the executable), and possibly manually count the number of places where the programmer must provide un-type-checked type information. Appendices don't count against our page limit, so we might want to include the source code for the benchmarks (or at least the relevant implementation details) in one.}
897
[06ccbc7]898\section{Related Work}
899
[06cf47f]900\CC is the existing language it is most natural to compare \CFA to, as they are both more modern extensions to C with backwards source compatibility. The most fundamental difference in approach between \CC and \CFA is their approach to this C compatibility. \CC does provide fairly strong source backwards compatibility with C, but is a dramatically more complex language than C, and imposes a steep learning curve to use many of its extension features. For instance, in a break from general C practice, template code is typically written in header files, with a variety of subtle restrictions implied on its use by this choice, while the other polymorphism mechanism made available by \CC, class inheritance, requires programmers to learn an entirely new object-oriented programming paradigm; the interaction between templates and inheritance is also quite complex. \CFA, by contrast, has a single facility for polymorphic code, one which supports separate compilation and the existing procedural paradigm of C code. A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA. One of the major limiting factors of \CC's approach is that templates cannot be separately compiled, and, until concepts~\citep{C++Concepts} are standardized (currently anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors for template expansion failures. By contrast, the explicit nature of assertions in \CFA allows polymorphic functions to be separately compiled, and for their requirements to be checked by the compiler; similarly, \CFA generic types may be opaque, unlike \CC template classes.
[c1fb1f2f]901
[06cf47f]902Cyclone also provides capabilities for polymorphic functions and existential types~\citep{Grossman06}, similar in concept to \CFA's @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted in that they may only abstract over types with the same layout and calling convention as @void*@, in practice only pointer types and @int@ - in \CFA terms, all Cyclone polymorphism must be dtype-static. This design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, but is more restrictive than \CFA's more general model.
[1ca52db]903
[22444f4]904Apple's Objective-C \citep{obj-c-book} is another industrially successful set of extensions to C. The Objective-C language model is a fairly radical departure from C, adding object-orientation and message-passing. Objective-C implements variadic functions using the C @va_arg@ mechanism, and did not support type-checked generics until recently \citep{xcode7}, historically using less-efficient and more error-prone runtime checking of object types instead. The GObject framework \citep{GObject} also adds object-orientation with runtime type-checking and reference-counting garbage-collection to C; these are much more intrusive feature additions than those provided by \CFA, in addition to the runtime overhead of reference-counting. The Vala programming language \citep{Vala} compiles to GObject-based C, and so adds the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases. Java \citep{Java8} has had generic types and variadic functions since Java~5; Java's generic types are type-checked at compilation and type-erased at runtime, similar to \CFA's, though in Java each object carries its own table of method pointers, while \CFA passes the method pointers separately so as to maintain a C-compatible struct layout. Java variadic functions are simply syntactic sugar for an array of a single type, and therefore less useful than \CFA's heterogeneously-typed variadic functions. Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens.
[ae6cc8b]905
[22444f4]906D \citep{D}, Go \citep{Go}, and Rust \citep{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust. However, each language represents dramatic departures from C in terms of language model, and none has the same level of compatibility with C as \CFA. D and Go are garbage-collected languages, imposing the associated runtime overhead. The necessity of accounting for data transfer between the managed Go runtime and the unmanaged C runtime complicates foreign-function interface between Go and C. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime that is more easily interoperable with C. Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow-checker, while it does provide strong safety guarantees, is complex and difficult to learn, and imposes a distinctly idiomatic programming style on Rust. \CFA, with its more modest safety features, is significantly easier to port C code to, while maintaining the idiomatic style of the original source.
[06ccbc7]907
[1ca52db]908\section{Conclusion \& Future Work}
[06ccbc7]909
[ae6cc8b]910There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, and concurrent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA polymorphic functions, by contrast, use an approach that is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the translator to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unnecessary.
911
912In conclusion, the authors' design for generic types and tuples, unlike those available in existing work, is both reusable and type-checked, while still supporting a full range of C features, including separately-compiled modules. We have experimentally validated the performance of our design against both \CC and standard C, showing it is \TODO{shiny, cap'n}.
[06ccbc7]913
[95448f1e]914\begin{acks}
[5995116]915The authors would like to thank Magnus Madsen for valuable editorial feedback.
916
[95448f1e]917This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}\ and the first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship.
918\end{acks}
919
[a53e10a]920\bibliographystyle{ACM-Reference-Format}
[06cf47f]921\bibliography{cfa}
[a53e10a]922
[1c2c253]923\end{document}
[06cf47f]924
925% Local Variables: %
926% tab-width: 4 %
927% compile-command: "make" %
928% End: %
Note: See TracBrowser for help on using the repository browser.