\documentclass[AMA,STIX1COL]{WileyNJD-v2} \setlength\typewidth{170mm} \setlength\textwidth{170mm} \articletype{RESEARCH ARTICLE}% \received{12 March 2018} \revised{8 May 2018} \accepted{28 June 2018} \setlength\typewidth{168mm} \setlength\textwidth{168mm} \raggedbottom %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Latex packages used in the document. \usepackage{epic,eepic} \usepackage{xspace} \usepackage{comment} \usepackage{upquote} % switch curled `'" to straight \usepackage{listings} % format program code \captionsetup{justification=raggedright,singlelinecheck=false} %\usepackage{enumitem} %\setlist[itemize]{topsep=3pt,itemsep=2pt,parsep=0pt}% global %\usepackage{rotating} \hypersetup{breaklinks=true} \definecolor{ForestGreen}{cmyk}{1, 0, 0.99995, 0} \usepackage[pagewise]{lineno} \renewcommand{\linenumberfont}{\scriptsize\sffamily} \lefthyphenmin=3 % hyphen only after 4 characters \righthyphenmin=3 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Names used in the document. \newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}\xspace} % Cforall symbolic name \newcommand{\CFA}{\protect\CFAIcon} % safe for section/caption \newcommand{\CFL}{\textrm{Cforall}\xspace} % Cforall symbolic name \newcommand{\Celeven}{\textrm{C11}\xspace} % C11 symbolic name \newcommand{\CC}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name \newcommand{\CCeleven}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name \newcommand{\CCfourteen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name \newcommand{\CCseventeen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name \newcommand{\CCtwenty}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name \newcommand{\CCV}{\rm C\kern-.1em\hbox{+\kern-.25em+}obj\xspace} % C++ virtual symbolic name \newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}} %\newcommand{\TODO}[1]{\textbf{TODO}: {\itshape #1}} % TODO included \newcommand{\TODO}[1]{} % TODO elided %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore % removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR % AFTER HYPERREF. %\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}} \renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}} \renewcommand*{\thefootnote}{\Alph{footnote}} % hack because fnsymbol does not work %\renewcommand*{\thefootnote}{\fnsymbol{footnote}} \makeatletter % parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for % use rather than use \parident directly. \newlength{\parindentlnth} \setlength{\parindentlnth}{\parindent} \newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{\lst@basicstyle{#1}}}} \newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}} \newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}} \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly \newlength{\columnposn} \setlength{\gcolumnposn}{3.5in} \setlength{\columnposn}{\gcolumnposn} \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}} \newcommand{\CRT}{\global\columnposn=\gcolumnposn} % Denote newterms in particular font and index them without particular font and in lowercase, e.g., \newterm{abc}. % The option parameter provides an index term different from the new term, e.g., \newterm[\texttt{abc}]{abc} % The star version does not lowercase the index information, e.g., \newterm*{IBM}. \newcommand{\newtermFontInline}{\emph} \newcommand{\newterm}{\@ifstar\@snewterm\@newterm} \newcommand{\@newterm}[2][\@empty]{\lowercase{\def\temp{#2}}{\newtermFontInline{#2}}\ifx#1\@empty\index{\temp}\else\index{#1@{\protect#2}}\fi} \newcommand{\@snewterm}[2][\@empty]{{\newtermFontInline{#2}}\ifx#1\@empty\index{#2}\else\index{#1@{\protect#2}}\fi} % Latin abbreviation \newcommand{\abbrevFont}{\textit} % set empty for no italics \@ifundefined{eg}{ \newcommand{\EG}{\abbrevFont{e}\abbrevFont{g}} \newcommand*{\eg}{% \@ifnextchar{,}{\EG}% {\@ifnextchar{:}{\EG}% {\EG,\xspace}}% }}{}% \@ifundefined{ie}{ \newcommand{\IE}{\abbrevFont{i}\abbrevFont{e}} \newcommand*{\ie}{% \@ifnextchar{,}{\IE}% {\@ifnextchar{:}{\IE}% {\IE,\xspace}}% }}{}% \@ifundefined{etc}{ \newcommand{\ETC}{\abbrevFont{etc}} \newcommand*{\etc}{% \@ifnextchar{.}{\ETC}% {\ETC.\xspace}% }}{}% \@ifundefined{etal}{ \newcommand{\ETAL}{\abbrevFont{et}~\abbrevFont{al}} \newcommand*{\etal}{% \@ifnextchar{.}{\protect\ETAL}% {\protect\ETAL.\xspace}% }}{}% \@ifundefined{viz}{ \newcommand{\VIZ}{\abbrevFont{viz}} \newcommand*{\viz}{% \@ifnextchar{.}{\VIZ}% {\VIZ.\xspace}% }}{}% \makeatother \newenvironment{cquote}{% \list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% \item\relax }{% \endlist }% cquote % CFA programming language, based on ANSI C (with some gcc additions) \lstdefinelanguage{CFA}[ANSI]{C}{ morekeywords={ _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__, coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally, __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__, inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or, otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread, _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__, virtual, __volatile, __volatile__, waitfor, when, with, zero_t}, moredirectives={defined,include_next}% } \lstset{ language=CFA, columns=fullflexible, basicstyle=\linespread{0.9}\sf, % reduce line spacing and use sanserif font stringstyle=\tt, % use typewriter font tabsize=5, % N space tabbing xleftmargin=\parindentlnth, % indent code to paragraph indentation %mathescape=true, % LaTeX math escape in CFA code $...$ escapechar=\$, % LaTeX escape in CFA code keepspaces=true, % showstringspaces=false, % do not show spaces with cup showlines=true, % show blank lines at end of code aboveskip=4pt, % spacing above/below code block belowskip=3pt, % replace/adjust listing characters that look bad in sanserif literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2, moredelim=**[is][\color{red}]{`}{`}, }% lstset \lstnewenvironment{cfa}[1][] {\lstset{#1}} {} \lstnewenvironment{C++}[1][] % use C++ style {\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}} {} % inline code @...@ \lstMakeShortInline@% \let\OLDthebibliography\thebibliography \renewcommand\thebibliography[1]{ \OLDthebibliography{#1} \setlength{\parskip}{0pt} \setlength{\itemsep}{4pt plus 0.3ex} } \title{\texorpdfstring{\protect\CFA : Adding modern programming language features to C}{Cforall : Adding modern programming language features to C}} \author[1]{Aaron Moss} \author[1]{Robert Schluntz} \author[1]{Peter A. Buhr} \authormark{MOSS \textsc{et al}} \address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, Ontario}, \country{Canada}}} \corres{Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}} \fundingInfo{Natural Sciences and Engineering Research Council of Canada} \abstract[Summary]{ The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating systems. This installation base and the programmers producing it represent a massive software engineering investment spanning decades and likely to continue for decades more. Nevertheless, C, which was first standardized almost 30 years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA project (pronounced ``C for all'') is to create an extension of C that provides modern safety and productivity features while still ensuring strong backward compatibility with C and its programmers. Prior projects have attempted similar goals but failed to honor the C programming style; for instance, adding object-oriented or functional programming with garbage collection is a nonstarter for many C developers. Specifically, \CFA is designed to have an orthogonal feature set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and programmers. This paper presents a quick tour of \CFA features, showing how their design avoids shortcomings of similar features in C and other C-like languages. Experimental results are presented to validate several of the new features. }% \keywords{C, Cforall, generic types, polymorphic functions, tuple types, variadic types} \begin{document} %\linenumbers % comment out to turn off line numbering \maketitle \vspace*{-10pt} \section{Introduction} The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating systems. This installation base and the programmers producing it represent a massive software engineering investment spanning decades and likely to continue for decades more. The TIOBE index~\cite{TIOBE} ranks the top five most \emph{popular} programming languages as Java 15\%, \Textbf{C 12\%}, \Textbf{\CC 5.5\%}, and Python 5\%, \Csharp 4.5\% = 42\%, where the next 50 languages are less than 4\% each with a long tail. The top three rankings over the past 30 years are as follows. \begin{center} \setlength{\tabcolsep}{10pt} \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{tabular}{@{}cccccccc@{}} & 2018 & 2013 & 2008 & 2003 & 1998 & 1993 & 1988 \\ Java & 1 & 2 & 1 & 1 & 18 & -- & -- \\ \Textbf{C}& \Textbf{2} & \Textbf{1} & \Textbf{2} & \Textbf{2} & \Textbf{1} & \Textbf{1} & \Textbf{1} \\ \CC & 3 & 4 & 3 & 3 & 2 & 2 & 5 \\ \end{tabular} \lstMakeShortInline@% \end{center} Love it or hate it, C is extremely popular, highly used, and one of the few systems languages. In many cases, \CC is often used solely as a better C. Nevertheless, C, which was first standardized almost 30 years ago~\cite{ANSI89:C}, lacks many features that make programming in more modern languages safer and more productive. \CFA (pronounced ``C for all'' and written \CFA or Cforall) is an evolutionary extension of the C programming language that adds modern language features to C, while maintaining source and runtime compatibility in the familiar C programming model. The four key design goals for \CFA~\cite{Bilson03} are as follows: (1) the behavior of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler; (2) the standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler; (3) the \CFA code must be at least as portable as standard C code; (4) extensions introduced by \CFA must be translated in the most efficient way possible. These goals ensure that the existing C code bases can be converted into \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. \CC is used similarly but has the disadvantages of multiple legacy design choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project. All language features discussed in this paper are working, except some advanced exception-handling features. Not discussed in this paper are the integrated concurrency constructs and user-level threading library~\cite{Delisle18}. \CFA is an \emph{open-source} project implemented as a source-to-source translator from \CFA to the gcc-dialect of C~\cite{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by gcc, meeting goals (1)--(3). % @plg2[9]% cd cfa-cc/src; cloc ArgTweak CodeGen CodeTools Common Concurrency ControlStruct Designators GenPoly InitTweak MakeLibCfa.cc MakeLibCfa.h Parser ResolvExpr SymTab SynTree Tuples driver prelude main.cc % ------------------------------------------------------------------------------- % Language files blank comment code % ------------------------------------------------------------------------------- % C++ 108 5420 5232 34961 % C/C++ Header 86 2379 2450 8464 % Teamcenter def 2 115 65 1387 % make 5 168 87 1052 % C 20 109 403 488 % awk 1 12 26 121 % sed 1 0 0 6 % ------------------------------------------------------------------------------- % SUM: 223 8203 8263 46479 % ------------------------------------------------------------------------------- The \CFA translator is 200+ files and 46\,000+ lines of code written in C/\CC. A translator versus a compiler makes it easier and faster to generate and debug the C object code rather than the intermediate, assembler, or machine code; ultimately, a compiler is necessary for advanced features and optimal performance. % The translator design is based on the \emph{visitor pattern}, allowing multiple passes over the abstract code-tree, which works well for incrementally adding new feature through additional visitor passes. Two key translator components are expression analysis, determining expression validity and what operations are required for its implementation, and code generation, dealing with multiple forms of overloading, polymorphism, and multiple return values by converting them into the C code for a C compiler that supports none of these features. Details of these components are available in chapters 2 and 3 in the work of Bilson~\cite{Bilson03} and form the base for the current \CFA translator. % @plg2[8]% cd cfa-cc/src; cloc libcfa % ------------------------------------------------------------------------------- % Language files blank comment code % ------------------------------------------------------------------------------- % C 35 1256 1240 9116 % C/C++ Header 54 358 1106 1198 % make 2 201 325 1167 % C++ 3 18 17 124 % Assembly 3 56 97 111 % Bourne Shell 2 2 0 25 % awk 1 4 0 22 % ------------------------------------------------------------------------------- % SUM: 100 1895 2785 11763 % ------------------------------------------------------------------------------- The \CFA runtime system is 100+ files and 11\,000+ lines of code, written in \CFA. Currently, the \CFA runtime is the largest \emph{user} of \CFA providing a vehicle to test the language features and implementation. % @plg2[6]% cd cfa-cc/src; cloc tests examples benchmark % ------------------------------------------------------------------------------- % Language files blank comment code % ------------------------------------------------------------------------------- % C 237 12260 2869 23286 % make 8 464 245 2838 % C/C++ Header 22 225 175 785 % Python 5 131 93 420 % C++ 10 48 5 201 % Lua 2 31 4 126 % Java 4 5 0 80 % Go 2 11 9 40 % ------------------------------------------------------------------------------- % SUM: 290 13175 3400 27776 % ------------------------------------------------------------------------------- % The \CFA tests are 290+ files and 27,000+ lines of code. % The tests illustrate syntactic and semantic features in \CFA, plus a growing number of runtime benchmarks. % The tests check for correctness and are used for daily regression testing of 3800+ commits. Finally, it is impossible to describe a programming language without usage before definition. Therefore, syntax and semantics appear before explanations; hence, patience is necessary until sufficient details are presented and discussed. Similarly, a detailed comparison with other programming languages is postponed until Section~\ref{s:RelatedWork}. \vspace*{-6pt} \section{Polymorphic Functions} \CFA introduces both ad hoc and parametric polymorphism to C, with a design originally formalized by Ditchfield~\cite{Ditchfield92} and first implemented by Bilson~\cite{Bilson03}. Shortcomings are identified in the existing approaches to generic and variadic data types in C-like languages and how these shortcomings are avoided in \CFA. Specifically, the solution is both reusable and type checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. The new constructs are empirically compared with C and \CC approaches via performance experiments in Section~\ref{sec:eval}. \vspace*{-6pt} \subsection{Name overloading} \label{s:NameOverloading} \begin{quote} ``There are only two hard things in Computer Science: cache invalidation and \emph{naming things}.''---Phil Karlton \end{quote} \vspace{-9pt} C already has a limited form of ad hoc polymorphism in its basic arithmetic operators, which apply to a variety of different types using identical syntax. \CFA extends the built-in operator overloading by allowing users to define overloads for any function, not just operators, and even any variable; Section~\ref{sec:libraries} includes a number of examples of how this overloading simplifies \CFA programming relative to C. Code generation for these overloaded functions and variables is implemented by the usual approach of mangling the identifier names to include a representation of their type, while \CFA decides which overload to apply based on the same ``usual arithmetic conversions'' used in C to disambiguate operator overloads. \newpage \begin{cfa} int max = 2147483647; $\C[4in]{// (1)}$ double max = 1.7976931348623157E+308; $\C{// (2)}$ int max( int a, int b ) { return a < b ? b : a; } $\C{// (3)}$ double max( double a, double b ) { return a < b ? b : a; } $\C{// (4)}\CRT$ max( 7, -max ); $\C[3in]{// uses (3) and (1), by matching int from constant 7}$ max( max, 3.14 ); $\C{// uses (4) and (2), by matching double from constant 3.14}$ max( max, -max ); $\C{// ERROR, ambiguous}$ int m = max( max, -max ); $\C{// uses (3) and (1) twice, by matching return type}\CRT$ \end{cfa} \CFA maximizes the ability to reuse names to aggressively address the naming problem. In some cases, hundreds of names can be reduced to tens, resulting in a significant cognitive reduction. In the above, the name @max@ has a consistent meaning, and a programmer only needs to remember the single concept: maximum. To prevent significant ambiguities, \CFA uses the return type in selecting overloads, \eg in the assignment to @m@, the compiler uses @m@'s type to unambiguously select the most appropriate call to function @max@ (as does Ada). As is shown later, there are a number of situations where \CFA takes advantage of available type information to disambiguate, where other programming languages generate ambiguities. \Celeven added @_Generic@ expressions (see section~6.5.1.1 of the ISO/IEC 9899~\cite{C11}), which is used with preprocessor macros to provide ad hoc polymorphism; however, this polymorphism is both functionally and ergonomically inferior to \CFA name overloading. The macro wrapping the generic expression imposes some limitations, for instance, it cannot implement the example above, because the variables @max@ are ambiguous with the functions @max@. Ergonomic limitations of @_Generic@ include the necessity to put a fixed list of supported types in a single place and manually dispatch to appropriate overloads, as well as possible namespace pollution from the dispatch functions, which must all have distinct names. \CFA supports @_Generic@ expressions for backward compatibility, but it is an unnecessary mechanism. % http://fanf.livejournal.com/144696.html % http://www.robertgamble.net/2012/01/c11-generic-selections.html % https://abissell.com/2014/01/16/c11s-_generic-keyword-macro-applications-and-performance-impacts/ \vspace*{-10pt} \subsection{\texorpdfstring{\protect\lstinline{forall} functions}{forall functions}} \label{sec:poly-fns} The signature feature of \CFA is parametric-polymorphic functions~\cite{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name). \begin{cfa} `forall( otype T )` T identity( T val ) { return val; } int forty_two = identity( 42 ); $\C{// T is bound to int, forty\_two == 42}$ \end{cfa} This @identity@ function can be applied to any complete \newterm{object type} (or @otype@). The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor, and destructor. If this extra information is not needed, for instance, for a pointer, the type parameter can be declared as a \newterm{data type} (or @dtype@). In \CFA, the polymorphic runtime cost is spread over each polymorphic call, because more arguments are passed to polymorphic functions; the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual function calls. A design advantage is that, unlike \CC template functions, \CFA polymorphic functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat. Since bare polymorphic types provide a restricted set of available operations, \CFA provides a \newterm{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type variable. For example, the function @twice@ can be defined using the \CFA syntax for operator overloading. \begin{cfa} forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x `+` x; } $\C{// ? denotes operands}$ int val = twice( twice( 3.7 ) ); $\C{// val == 14}$ \end{cfa} This works for any type @T@ with a matching addition operator. The polymorphism is achieved by creating a wrapper function for calling @+@ with the @T@ bound to @double@ and then passing this function to the first call of @twice@. There is now the option of using the same @twice@ and converting the result into @int@ on assignment or creating another @twice@ with the type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada} in its type analysis. The first approach has a late conversion from @double@ to @int@ on the final assignment, whereas the second has an early conversion to @int@. \CFA minimizes the number of conversions and their potential to lose information; hence, it selects the first approach, which corresponds with C programmer intuition. Crucial to the design of a new programming language are the libraries to access thousands of external software features. Like \CC, \CFA inherits a massive compatible library base, where other programming languages must rewrite or provide fragile interlanguage communication with C. A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted float array. \begin{cfa} void * bsearch( const void * key, const void * base, size_t nmemb, size_t size, int (* compar)( const void *, const void * )); int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 : *(double *)t2 < *(double *)t1 ? 1 : 0; } double key = 5.0, vals[10] = { /* 10 sorted float values */ }; double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); $\C{// search sorted array}$ \end{cfa} This can be augmented simply with generalized, type-safe, \CFA-overloaded wrappers. \begin{cfa} forall( otype T | { int ?` y; } $\C{// locally override behavior}$ qsort( vals, 10 ); $\C{// descending sort}$ } \end{cfa} The local version of @??@ overriding the built-in @?value; } % \end{cfa} % In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@). % While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously. \section{Generic Types} A significant shortcoming of standard C is the lack of reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to implement abstract data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type checker and tooling, it is also tedious and error prone, especially for more complex data structures. A second approach is to use @void *@-based polymorphism, \eg the C standard library functions @bsearch@ and @qsort@, which allow for the reuse of code with common functionality. However, basing all polymorphism on @void *@ eliminates the type checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that is otherwise not needed. A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type checked, but errors may be difficult to interpret. Furthermore, writing and using preprocessor macros is unnatural and inflexible. \CC, Java, and other languages use \newterm{generic types} to produce type-safe abstract data types. \CFA generic types integrate efficiently and naturally with the existing polymorphic functions, while retaining backward compatibility with C and providing separate compilation. However, for known concrete parameters, the generic-type definition can be inlined, like \CC templates. A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration and instantiated using a parenthesized list of types after the type name. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \begin{cfa} `forall( otype R, otype S )` struct pair { R first; S second; }; `forall( otype T )` // dynamic T value( pair(const char *, T) p ) { return p.second; } `forall( dtype F, otype T )` // dtype-static (concrete) T value( pair(F *, T * ) p) { return *p.second; } \end{cfa} & \begin{cfa} pair(const char *, int) p = {"magic", 42}; // concrete int i = value( p ); pair(void *, int *) q = { 0, &p.second }; // concrete i = value( q ); double d = 1.0; pair(double *, double *) r = { &d, &d }; // concrete d = value( r ); \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} \CFA classifies generic types as either \newterm{concrete} or \newterm{dynamic}. Concrete types have a fixed memory layout regardless of type parameters, whereas dynamic types vary in memory layout depending on their type parameters. A \newterm{dtype-static} type has polymorphic parameters but is still concrete. Polymorphic pointers are an example of dtype-static types; given some type variable @T@, @T@ is a polymorphic type, as is @T *@, but @T *@ has a fixed size and can, therefore, be represented by @void *@ in code generation. \CFA generic types also allow checked argument constraints. For example, the following declaration of a sorted set type ensures the set key supports equality and relational comparison. \begin{cfa} forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?first, b->first ) ? : cmp( a->second, b->second ); } \end{cfa} Since @pair( T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@; hence, the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type checked to ensure members of both pairs and arguments to the comparison function match in type. Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \newterm{tag structures}. Sometimes, information is only used for type checking and can be omitted at runtime. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \begin{cfa} forall( dtype Unit ) struct scalar { unsigned long value; }; struct metres {}; struct litres {}; forall( dtype U ) scalar(U) ?+?( scalar(U) a, scalar(U) b ) { return (scalar(U)){ a.value + b.value }; } \end{cfa} & \begin{cfa} scalar(metres) half_marathon = { 21_098 }; scalar(litres) pool = { 2_500_000 }; scalar(metres) marathon = half_marathon + half_marathon; scalar(litres) two_pools = pool + pool; `marathon + pool;` // ERROR, mismatched types \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} Here, @scalar@ is a dtype-static type; hence, all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@. These implementations may even be separately compiled, unlike \CC template functions. However, the \CFA type checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume. \section{Tuples} \label{sec:tuples} In many languages, functions can return, at most, one value; however, many operations have multiple outcomes, some exceptional. Consider C's @div@ and @remquo@ functions, which return the quotient and remainder for a division of integer and float values, respectively. \begin{cfa} typedef struct { int quo, rem; } div_t; $\C{// from include stdlib.h}$ div_t div( int num, int den ); double remquo( double num, double den, int * quo ); div_t qr = div( 13, 5 ); $\C{// return quotient/remainder aggregate}$ int q; double r = remquo( 13.5, 5.2, &q ); $\C{// return remainder, alias quotient}$ \end{cfa} Here, @div@ aggregates the quotient/remainder in a structure, whereas @remquo@ aliases a parameter to an argument. Both approaches are awkward. % FIX Alternatively, a programming language can directly support returning multiple values, \eg \CFA provides the following. \begin{cfa} [ int, int ] div( int num, int den ); $\C{// return two integers}$ [ double, double ] div( double num, double den ); $\C{// return two doubles}$ int q, r; $\C{// overloaded variable names}$ double q, r; [ q, r ] = div( 13, 5 ); $\C{// select appropriate div and q, r}$ [ q, r ] = div( 13.5, 5.2 ); $\C{// assign into tuple}$ \end{cfa} This approach is straightforward to understand and use; therefore, why do few programming languages support this obvious feature or provide it awkwardly? To answer, there are complex consequences that cascade through multiple aspects of the language, especially the type system. This section shows these consequences and how \CFA handles them. \subsection{Tuple Expressions} The addition of multiple-return-value functions (MRVFs) is \emph{useless} without a syntax for accepting multiple values at the call site. The simplest mechanism for capturing the return values is variable assignment, allowing the values to be retrieved directly. As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions (as above), called a \newterm{tuple}. However, functions also use \newterm{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVFs), \eg, \CFA provides the following. \begin{cfa} printf( "%d %d\n", div( 13, 5 ) ); $\C{// return values seperated into arguments}$ \end{cfa} Here, the values returned by @div@ are composed with the call to @printf@ by flattening the tuple into separate arguments. However, the \CFA type-system must support significantly more complex composition. \begin{cfa} [ int, int ] foo$\(_1\)$( int ); $\C{// overloaded foo functions}$ [ double ] foo$\(_2\)$( int ); void bar( int, double, double ); `bar`( foo( 3 ), foo( 3 ) ); \end{cfa} The type resolver only has the tuple return types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list. No combination of @foo@s is an exact match with @bar@'s parameters; thus, the resolver applies C conversions. % FIX The minimal cost is @bar( foo@$_1$@( 3 ), foo@$_2$@( 3 ) )@, giving (@int@, {\color{ForestGreen}@int@}, @double@) to (@int@, {\color{ForestGreen}@double@}, @double@) with one {\color{ForestGreen}safe} (widening) conversion from @int@ to @double@ versus ({\color{red}@double@}, {\color{ForestGreen}@int@}, {\color{ForestGreen}@int@}) to ({\color{red}@int@}, {\color{ForestGreen}@double@}, {\color{ForestGreen}@double@}) with one {\color{red}unsafe} (narrowing) conversion from @double@ to @int@ and two safe conversions. \subsection{Tuple variables} An important observation from function composition is that new variable names are not required to initialize parameters from an MRVF. \CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types. \newpage \begin{cfa} [ int, int ] qr = div( 13, 5 ); $\C{// tuple-variable declaration and initialization}$ [ double, double ] qr = div( 13.5, 5.2 ); \end{cfa} Here, the tuple variable name serves the same purpose as the parameter name(s). Tuple variables can be composed of any types, except for array types, since array sizes are generally unknown in C. One way to access the tuple variable components is with assignment or composition. \begin{cfa} [ q, r ] = qr; $\C{// access tuple-variable components}$ printf( "%d %d\n", qr ); \end{cfa} \CFA also supports \newterm{tuple indexing} to access single components of a tuple expression. \begin{cfa} [int, int] * p = &qr; $\C{// tuple pointer}$ int rem = qr`.1`; $\C{// access remainder}$ int quo = div( 13, 5 )`.0`; $\C{// access quotient}$ p`->0` = 5; $\C{// change quotient}$ bar( qr`.1`, qr ); $\C{// pass remainder and quotient/remainder}$ rem = [div( 13, 5 ), 42]`.0.1`; $\C{// access 2nd component of 1st component}$ \end{cfa} \subsection{Flattening and restructuring} In function call contexts, tuples support implicit flattening and restructuring conversions. Tuple flattening recursively expands a tuple into the list of its basic components. Tuple structuring packages a list of expressions into a value of tuple type. \begin{cfa} int f( int, int ); [int] g( [int, int] ); [int] h( int, [int, int] ); [int, int] x; int y; f( x ); $\C{// flatten}$ g( y, 10 ); $\C{// structure}$ h( x, y ); $\C{// flatten and structure}$ \end{cfa} In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as two arguments. In the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the parameter type of @g@. Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVFs and MRVFs with any number of arguments of arbitrarily complex structure. \subsection{Tuple assignment} \enlargethispage{-10pt} An assignment where the left side is a tuple type is called \newterm{tuple assignment}. There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a nontuple type, called \newterm{multiple} and \newterm{mass assignment}, respectively. \begin{cfa} int x = 10; double y = 3.5; [int, double] z; z = [x, y]; $\C{// multiple assignment}$ [x, y] = z; $\C{// multiple assignment}$ z = 10; $\C{// mass assignment}$ [y, x] = 3.14; $\C{// mass assignment}$ \end{cfa} Both kinds of tuple assignment have parallel semantics, so that each value on the left and right sides is evaluated before any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function, \eg, @[x, y] = [y, x]@. This semantics means mass assignment differs from C cascading assignment (\eg @a = b = c@) in that conversions are applied in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@, whereas C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@. Finally, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. This example shows mass, multiple, and cascading assignment used in one expression. \begin{cfa} [void] f( [int, int] ); f( [x, y] = z = 1.5 ); $\C{// assignments in parameter list}$ \end{cfa} \subsection{Member access} It is also possible to access multiple members from a single expression using a \newterm{member access}. The result is a single tuple-valued expression whose type is the tuple of the types of the members. \begin{cfa} struct S { int x; double y; char * z; } s; s.[x, y, z] = 0; \end{cfa} Here, the mass assignment sets all members of @s@ to zero. Since tuple-index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member-tuple expressions to manually restructure a tuple (\eg rearrange, drop, and duplicate components). \begin{cfa} [int, int, long, double] x; void f( double, long ); x.[0, 1] = x.[1, 0]; $\C{// rearrange: [x.0, x.1] = [x.1, x.0]}$ f( x.[0, 3] ); $\C{// drop: f(x.0, x.3)}$ [int, int, int] y = x.[2, 0, 2]; $\C{// duplicate: [y.0, y.1, y.2] = [x.2, x.0.x.2]}$ \end{cfa} It is also possible for a member access to contain other member accesses. \begin{cfa} struct A { double i; int j; }; struct B { int * k; short l; }; struct C { int x; A y; B z; } v; v.[x, y.[i, j], z.k]; $\C{// [v.x, [v.y.i, v.y.j], v.z.k]}$ \end{cfa} \begin{comment} \subsection{Casting} In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function: \begin{cfa} int f(); // (1) double f(); // (2) f(); // ambiguous - (1),(2) both equally viable (int)f(); // choose (2) \end{cfa} Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples: \begin{cfa} int f(); void g(); (void)f(); // (1) (int)g(); // (2) \end{cfa} In C, (1) is a valid cast, which calls @f@ and discards its result. On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid. Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. This approach follows naturally from the way that a cast to @void@ works in C. For example, in \begin{cfa} [int, int, int] f(); [int, [int, int], int] g(); ([int, double])f(); $\C{// (1)}$ ([int, int, int])g(); $\C{// (2)}$ ([void, [int, int]])g(); $\C{// (3)}$ ([int, int, int, int])g(); $\C{// (4)}$ ([int, [int, int, int]])g(); $\C{// (5)}$ \end{cfa} (1) discards the last element of the return value and converts the second element to @double@. Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@. Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@). Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions requires more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@. \end{comment} \subsection{Polymorphism} Tuples also integrate with \CFA polymorphism as a kind of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with nontuple types. \begin{cfa} forall( otype T, dtype U ) void f( T x, U * y ); f( [5, "hello"] ); \end{cfa} Here, @[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@. Tuples, however, may contain polymorphic components. For example, a plus operator can be written to sum two triples. \begin{cfa} forall( otype T | { T ?+?( T, T ); } ) [T, T, T] ?+?( [T, T, T] x, [T, T, T] y ) { return [x.0 + y.0, x.1 + y.1, x.2 + y.2]; } [int, int, int] x; int i1, i2, i3; [i1, i2, i3] = x + ([10, 20, 30]); \end{cfa} Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. \begin{cfa} [int] f( [int, double], double ); forall( otype T, otype U | { T f( T, U, U ); } ) void g( T, U ); g( 5, 10.21 ); \end{cfa} \newpage Hence, function parameter and return lists are flattened for the purposes of type unification allowing the example to pass expression resolution. This relaxation is possible by extending the thunk scheme described by Bilson~\cite{Bilson03}. % Whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function: % \begin{cfa} % int _thunk( int _p0, double _p1, double _p2 ) { return f( [_p0, _p1], _p2 ); } % \end{cfa} % so the thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism. % These thunks are generated locally using gcc nested-functions, rather hoisting them to the external scope, so they can easily access local state. \subsection{Variadic tuples} \label{sec:variadic-tuples} To define variadic functions, \CFA adds a new kind of type parameter, \ie @ttype@ (tuple type). Matching against a @ttype@ parameter consumes all the remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there must be, at most, one @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. As such, @ttype@ variables are also called \newterm{argument packs}. Like variadic templates, @ttype@ polymorphic functions are primarily manipulated via recursion. Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled. For example, the following is a generalized @sum@ function. \begin{cfa} int sum$\(_0\)$() { return 0; } forall( ttype Params | { int sum( Params ); } ) int sum$\(_1\)$( int x, Params rest ) { return x + sum( rest ); } sum( 10, 20, 30 ); \end{cfa} Since @sum@\(_0\) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@. In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list, and @Params@ is bound to @[20, 30]@. The process continues until @Params@ is bound to @[]@, requiring an assertion @int sum()@, which matches @sum@\(_0\) and terminates the recursion. Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10 + sum(20, 30)@ $\rightarrow$ @10 + (20 + sum(30))@ $\rightarrow$ @10 + (20 + (30 + sum()))@ $\rightarrow$ @10 + (20 + (30 + 0))@. It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments. \begin{cfa} int sum( int x, int y ) { return x + y; } forall( ttype Params | { int sum( int, Params ); } ) int sum( int x, int y, Params rest ) { return sum( x + y, rest ); } \end{cfa} One more step permits the summation of any sumable type with all arguments of the same type. \begin{cfa} trait sumable( otype T ) { T ?+?( T, T ); }; forall( otype R | sumable( R ) ) R sum( R x, R y ) { return x + y; } forall( otype R, ttype Params | sumable(R) | { R sum(R, Params); } ) R sum(R x, R y, Params rest) { return sum( x + y, rest ); } \end{cfa} Unlike C variadic functions, it is unnecessary to hard code the number and expected types. Furthermore, this code is extendable for any user-defined type with a @?+?@ operator. Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators. It is also possible to write a type-safe variadic print function to replace @printf@: \begin{cfa} struct S { int x, y; }; forall( otype T, ttype Params | { void print(T); void print(Params); } ) void print(T arg, Params rest) { print(arg); print(rest); } void print( const char * x ) { printf( "%s", x ); } void print( int x ) { printf( "%d", x ); } void print( S s ) { print( "{ ", s.x, ",", s.y, " }" ); } print( "s = ", (S){ 1, 2 }, "\n" ); \end{cfa} This example showcases a variadic-template-like decomposition of the provided argument list. The individual @print@ functions allow printing a single element of a type. The polymorphic @print@ allows printing any list of types, where each individual type has a @print@ function. The individual print functions can be used to build up more complicated @print@ functions, such as @S@, which cannot be done with @printf@ in C. This mechanism is used to seamlessly print tuples in the \CFA I/O library (see Section~\ref{s:IOLibrary}). Finally, it is possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. For example, it is possible to write @new@ as a library function. \begin{cfa} forall( otype R, otype S ) void ?{}( pair(R, S) *, R, S ); forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } ) T * new( Params p ) { return ((T *)malloc()){ p }; $\C{// construct into result of malloc}$ } pair( int, char ) * x = new( 42, '!' ); \end{cfa} The @new@ function provides the combination of type-safe @malloc@ with a \CFA constructor call, making it impossible to forget constructing dynamically allocated objects. This function provides the type safety of @new@ in \CC, without the need to specify the allocated type again, due to return-type inference. \subsection{Implementation} Tuples are implemented in the \CFA translator via a transformation into \newterm{generic types}. For each $N$, the first time an $N$-tuple is seen in a scope, a generic type with $N$ type parameters is generated. For example, the following \begin{cfa} [int, int] f() { [double, double] x; [int, double, int] y; } \end{cfa} is transformed into \begin{cfa} forall( dtype T0, dtype T1 | sized(T0) | sized(T1) ) struct _tuple2 { T0 member_0; T1 member_1; $\C{// generated before the first 2-tuple}$ }; _tuple2(int, int) f() { _tuple2(double, double) x; forall( dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2) ) struct _tuple3 { T0 member_0; T1 member_1; T2 member_2; $\C{// generated before the first 3-tuple}$ }; _tuple3(int, double, int) y; } \end{cfa} Tuple expressions are then converted directly into compound literals, \eg @[5, 'x', 1.24]@ becomes @(_tuple3(int, char,@ @double)){ 5, 'x', 1.24 }@. \begin{comment} Since tuples are essentially structures, tuple indexing expressions are just member accesses: \begin{cfa} void f(int, [double, char]); [int, double] x; x.0+x.1; printf("%d %g\n", x); f(x, 'z'); \end{cfa} Is transformed into: \begin{cfa} void f(int, _tuple2(double, char)); _tuple2(int, double) x; x.member_0+x.member_1; printf("%d %g\n", x.member_0, x.member_1); f(x.member_0, (_tuple2){ x.member_1, 'z' }); \end{cfa} Note that due to flattening, @x@ used in the argument position is converted into the list of its members. In the call to @f@, the second and third argument components are structured into a tuple argument. Similarly, tuple member expressions are recursively expanded into a list of member access expressions. Expressions that may contain side effects are made into \newterm{unique expressions} before being expanded by the flattening conversion. Each unique expression is assigned an identifier and is guaranteed to be executed exactly once: \begin{cfa} void g(int, double); [int, double] h(); g(h()); \end{cfa} Internally, this expression is converted to two variables and an expression: \begin{cfa} void g(int, double); [int, double] h(); _Bool _unq0_finished_ = 0; [int, double] _unq0; g( (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0, (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1, ); \end{cfa} Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. Tuple member expressions also take advantage of unique expressions in the case of possible impurity. Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions. The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its members may need to be constructed with a nontrivial constructor and it may need to be referred to multiple time, \eg in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new. \end{comment} \section{Control Structures} \CFA identifies inconsistent, problematic, and missing control structures in C, as well as extends, modifies, and adds control structures to increase functionality and safety. \subsection{\texorpdfstring{\protect\lstinline@if@ statement}{if statement}} The @if@ expression allows declarations, similar to the @for@ declaration expression. \begin{cfa} if ( int x = f() ) ... $\C{// x != 0}$ if ( int x = f(), y = g() ) ... $\C{// x != 0 \&\& y != 0}$ if ( int x = f(), y = g(); `x < y` ) ... $\C{// relational expression}$ \end{cfa} Unless a relational expression is specified, each variable is compared not equal to 0, which is the standard semantics for the @if@ expression, and the results are combined using the logical @&&@ operator.\footnote{\CC only provides a single declaration always compared not equal to 0.} The scope of the declaration(s) is local to the @if@ statement but exists within both the ``then'' and ``else'' clauses. \subsection{\texorpdfstring{\protect\lstinline@switch@ statement}{switch statement}} There are a number of deficiencies with the C @switch@ statements: enumerating @case@ lists, placement of @case@ clauses, scope of the switch body, and fall through between case clauses. C has no shorthand for specifying a list of case values, whether the list is noncontiguous or contiguous\footnote{C provides this mechanism via fall through.}. \CFA provides a shorthand for a noncontiguous list: \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} case 2, 10, 34, 42: \end{cfa} & \begin{cfa} case 2: case 10: case 34: case 42: \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} for a contiguous list:\footnote{gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, as a space is required after a number; otherwise, the first period is a decimal point.} \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} case 2~42: \end{cfa} & \begin{cfa} case 2: case 3: ... case 41: case 42: \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} and a combination: \begin{cfa} case -12~-4, -1~5, 14~21, 34~42: \end{cfa} C allows placement of @case@ clauses \emph{within} statements nested in the @switch@ body (called Duff's device~\cite{Duff83}); \begin{cfa} switch ( i ) { case 0: for ( int i = 0; i < 10; i += 1 ) { ... `case 1:` // no initialization of loop index ... } } \end{cfa} \CFA precludes this form of transfer \emph{into} a control structure because it causes an undefined behavior, especially with respect to missed initialization, and provides very limited functionality. C allows placement of declaration within the @switch@ body and unreachable code at the start, resulting in an undefined behavior. \begin{cfa} switch ( x ) { `int y = 1;` $\C{// unreachable initialization}$ `x = 7;` $\C{// unreachable code without label/branch}$ case 0: ... `int z = 0;` $\C{// unreachable initialization, cannot appear after case}$ z = 2; case 1: `x = z;` $\C{// without fall through, z is undefined}$ } \end{cfa} \CFA allows the declaration of local variables, \eg @y@, at the start of the @switch@ with scope across the entire @switch@ body, \ie all @case@ clauses. \CFA disallows the declaration of local variable, \eg @z@, directly within the @switch@ body, because a declaration cannot occur immediately after a @case@ since a label can only be attached to a statement, and the use of @z@ is undefined in @case 1@ as neither storage allocation nor initialization may have occurred. C @switch@ provides multiple entry points into the statement body, but once an entry point is selected, control continues across \emph{all} @case@ clauses until the end of the @switch@ body, called \newterm{fall through}; @case@ clauses are made disjoint by the @break@ \newpage \noindent statement. While fall through \emph{is} a useful form of control flow, it does not match well with programmer intuition, resulting in errors from missing @break@ statements. For backward compatibility, \CFA provides a \emph{new} control structure, \ie @choose@, which mimics @switch@, but reverses the meaning of fall through (see Figure~\ref{f:ChooseSwitchStatements}), similar to Go. \begin{figure} \centering \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{}c|@{\hspace{\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} `choose` ( day ) { case Mon~Thu: // program case Fri: // program wallet += pay; `fallthrough;` case Sat: // party wallet -= party; case Sun: // rest default: // print error } \end{cfa} & \begin{cfa} switch ( day ) { case Mon: case Tue: case Wed: case Thu: // program `break;` case Fri: // program wallet += pay; case Sat: // party wallet -= party; `break;` case Sun: // rest `break;` default: // print error } \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{\lstinline|choose| versus \lstinline|switch| statements} \label{f:ChooseSwitchStatements} \vspace*{-11pt} \end{figure} Finally, Figure~\ref{f:FallthroughStatement} shows @fallthrough@ may appear in contexts other than terminating a @case@ clause and have an explicit transfer label allowing separate cases but common final code for a set of cases. The target label must be below the @fallthrough@ and may not be nested in a control structure, \ie @fallthrough@ cannot form a loop, and the target label must be at the same or higher level as the containing @case@ clause and located at the same level as a @case@ clause; the target label may be case @default@, but only associated with the current @switch@/@choose@ statement. \begin{figure} \centering \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{}c|@{\hspace{\parindentlnth}}}{\textbf{non-terminator}} & \multicolumn{1}{c@{}}{\textbf{target label}} \\ \begin{cfa} choose ( ... ) { case 3: if ( ... ) { ... `fallthrough;` // goto case 4 } else { ... } // implicit break case 4: \end{cfa} & \begin{cfa} choose ( ... ) { case 3: ... `fallthrough common;` case 4: ... `fallthrough common;` `common`: // below fallthrough at same level as case clauses ... // common code for cases 3 and 4 // implicit break case 4: \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{\lstinline|fallthrough| statement} \label{f:FallthroughStatement} \vspace*{-11pt} \end{figure} \vspace*{-8pt} \subsection{\texorpdfstring{Labeled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labeled continue / break}} While C provides @continue@ and @break@ statements for altering control flow, both are restricted to one level of nesting for a particular control structure. Unfortunately, this restriction forces programmers to use @goto@ to achieve the equivalent control flow for more than one level of nesting. To prevent having to switch to the @goto@, \CFA extends @continue@ and @break@ with a target label to support static multilevel exit~\cite{Buhr85}, as in Java. For both @continue@ and @break@, the target label must be directly associated with a @for@, @while@ or @do@ statement; for @break@, the target label can also be associated with a @switch@, @if@ or compound (@{}@) statement. Figure~\ref{f:MultiLevelExit} shows @continue@ and @break@ indicating the specific control structure and the corresponding C program using only @goto@ and labels. The innermost loop has seven exit points, which cause a continuation or termination of one or more of the seven nested control structures. \begin{figure} \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{tabular}{@{\hspace{\parindentlnth}}l|@{\hspace{\parindentlnth}}l@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{\hspace{\parindentlnth}}c|@{\hspace{\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{@{\hspace{\parindentlnth}}c@{}}{\textbf{C}} \\ \begin{cfa} `LC:` { ... $declarations$ ... `LS:` switch ( ... ) { case 3: `LIF:` if ( ... ) { `LF:` for ( ... ) { ... break `LC`; ... ... break `LS`; ... ... break `LIF`; ... ... continue `LF;` ... ... break `LF`; ... } // for } else { ... break `LIF`; ... } // if } // switch } // compound \end{cfa} & \begin{cfa} { ... $declarations$ ... switch ( ... ) { case 3: if ( ... ) { for ( ... ) { ... goto `LC`; ... ... goto `LS`; ... ... goto `LIF`; ... ... goto `LFC`; ... ... goto `LFB`; ... `LFC:` ; } `LFB:` ; } else { ... goto `LIF`; ... } `LIF:` ; } `LS:` ; } `LC:` ; \end{cfa} & \begin{cfa} // terminate compound // terminate switch // terminate if // continue loop // terminate loop // terminate if \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{Multilevel exit} \label{f:MultiLevelExit} \vspace*{-5pt} \end{figure} With respect to safety, both labeled @continue@ and @break@ are @goto@ restricted in the following ways. \begin{list}{$\bullet$}{\topsep=4pt\itemsep=0pt\parsep=0pt} \item They cannot create a loop, which means only the looping constructs cause looping. This restriction means all situations resulting in repeated execution are clearly delineated. \item They cannot branch into a control structure. This restriction prevents missing declarations and/or initializations at the start of a control structure resulting in an undefined behavior. \end{list} The advantage of the labeled @continue@/@break@ is allowing static multilevel exits without having to use the @goto@ statement and tying control flow to the target control structure rather than an arbitrary point in a program. Furthermore, the location of the label at the \emph{beginning} of the target control structure informs the reader (eye candy) that complex control flow is occurring in the body of the control structure. With @goto@, the label is at the end of the control structure, which fails to convey this important clue early enough to the reader. Finally, using an explicit target for the transfer instead of an implicit target allows new constructs to be added or removed without affecting the existing constructs. Otherwise, the implicit targets of the current @continue@ and @break@, \ie the closest enclosing loop or @switch@, change as certain constructs are added or removed. \vspace*{-5pt} \subsection{Exception handling} The following framework for \CFA exception handling is in place, excluding some runtime type information and virtual functions. \CFA provides two forms of exception handling: \newterm{fix-up} and \newterm{recovery} (see Figure~\ref{f:CFAExceptionHandling})~\cite{Buhr92b,Buhr00a}. Both mechanisms provide dynamic call to a handler using dynamic name lookup, where fix-up has dynamic return and recovery has static return from the handler. \CFA restricts exception types to those defined by aggregate type @exception@. The form of the raise dictates the set of handlers examined during propagation: \newterm{resumption propagation} (@resume@) only examines resumption handlers (@catchResume@); \newterm{terminating propagation} (@throw@) only examines termination handlers (@catch@). If @resume@ or @throw@ has no exception type, it is a reresume/rethrow, which means that the current exception continues propagation. If there is no current exception, the reresume/rethrow results in a runtime error. \begin{figure} \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{cquote} \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{}c|@{\hspace{\parindentlnth}}}{\textbf{Resumption}} & \multicolumn{1}{c@{}}{\textbf{Termination}} \\ \begin{cfa} `exception R { int fix; };` void f() { R r; ... `resume( r );` ... ... r.fix // control returns here after handler } `try` { ... f(); ... } `catchResume( R r )` { ... r.fix = ...; // return correction to raise } // dynamic return to _Resume \end{cfa} & \begin{cfa} `exception T {};` void f() { ... `throw( T{} );` ... // control does NOT return here after handler } `try` { ... f(); ... } `catch( T t )` { ... // recover and continue } // static return to next statement \end{cfa} \end{tabular} \end{cquote} \lstMakeShortInline@% \caption{\CFA exception handling} \label{f:CFAExceptionHandling} \vspace*{-5pt} \end{figure} The set of exception types in a list of catch clauses may include both a resumption and a termination handler. \begin{cfa} try { ... resume( `R{}` ); ... } catchResume( `R` r ) { ... throw( R{} ); ... } $\C{\color{red}// H1}$ catch( `R` r ) { ... } $\C{\color{red}// H2}$ \end{cfa} The resumption propagation raises @R@ and the stack is not unwound; the exception is caught by the @catchResume@ clause and handler H1 is invoked. The termination propagation in handler H1 raises @R@ and the stack is unwound; the exception is caught by the @catch@ clause and handler H2 is invoked. The termination handler is available because the resumption propagation did not unwind the stack. An additional feature is conditional matching in a catch clause. \begin{cfa} try { ... write( `datafile`, ... ); ... $\C{// may throw IOError}$ ... write( `logfile`, ... ); ... } catch ( IOError err; `err.file == datafile` ) { ... } $\C{// handle datafile error}$ catch ( IOError err; `err.file == logfile` ) { ... } $\C{// handle logfile error}$ catch ( IOError err ) { ... } $\C{// handler error from other files}$ \end{cfa} Here, the throw inserts the failing file handle into the I/O exception. Conditional catch cannot be trivially mimicked by other mechanisms because once an exception is caught, handler clauses in that @try@ statement are no longer eligible. The resumption raise can specify an alternate stack on which to raise an exception, called a \newterm{nonlocal raise}. \begin{cfa} resume( $\emph{exception-type}$, $\emph{alternate-stack}$ ) resume( $\emph{alternate-stack}$ ) \end{cfa} These overloads of @resume@ raise the specified exception or the currently propagating exception (reresume) at another \CFA coroutine or task~\cite{Delisle18}. Nonlocal raise is restricted to resumption to provide the exception handler the greatest flexibility because processing the exception does not unwind its stack, allowing it to continue after the handler returns. To facilitate nonlocal raise, \CFA provides dynamic enabling and disabling of nonlocal exception propagation. The constructs for controlling propagation of nonlocal exceptions are the @enable@ and @disable@ blocks. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \begin{cfa} enable $\emph{exception-type-list}$ { // allow nonlocal raise } \end{cfa} & \begin{cfa} disable $\emph{exception-type-list}$ { // disallow nonlocal raise } \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The arguments for @enable@/@disable@ specify the exception types allowed to be propagated or postponed, respectively. Specifying no exception type is shorthand for specifying all exception types. Both @enable@ and @disable@ blocks can be nested; turning propagation on/off on entry and on exit, the specified exception types are restored to their prior state. Coroutines and tasks start with nonlocal exceptions disabled, allowing handlers to be put in place, before nonlocal exceptions are explicitly enabled. \begin{cfa} void main( mytask & t ) { $\C{// thread starts here}$ // nonlocal exceptions disabled try { $\C{// establish handles for nonlocal exceptions}$ enable { $\C{// allow nonlocal exception delivery}$ // task body } // appropriate catchResume/catch handlers } } \end{cfa} Finally, \CFA provides a Java-like @finally@ clause after the catch clauses. \begin{cfa} try { ... f(); ... // catchResume or catch clauses } `finally` { // house keeping } \end{cfa} The finally clause is always executed, \ie, if the try block ends normally or if an exception is raised. If an exception is raised and caught, the handler is run before the finally clause. Like a destructor (see Section~\ref{s:ConstructorsDestructors}), a finally clause can raise an exception but not if there is an exception being propagated. Mimicking the @finally@ clause with mechanisms like Resource Aquisition Is Initialization (RAII) is nontrivial when there are multiple types and local accesses. \subsection{\texorpdfstring{\protect\lstinline{with} statement}{with statement}} \label{s:WithStatement} Heterogeneous data are often aggregated into a structure/union. To reduce syntactic noise, \CFA provides a @with@ statement (see section~4.F in the Pascal User Manual and Report~\cite{Pascal}) to elide aggregate member qualification by opening a scope containing the member identifiers. \begin{cquote} \vspace*{-\baselineskip}%??? \lstDeleteShortInline@% \begin{cfa} struct S { char c; int i; double d; }; struct T { double m, n; }; // multiple aggregate parameters \end{cfa} \begin{tabular}{@{}l@{\hspace{\parindentlnth}}|@{\hspace{\parindentlnth}}l@{}} \begin{cfa} void f( S & s, T & t ) { `s.`c; `s.`i; `s.`d; `t.`m; `t.`n; } \end{cfa} & \begin{cfa} void f( S & s, T & t ) `with ( s, t )` { c; i; d; // no qualification m; n; } \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} Object-oriented programming languages only provide implicit qualification for the receiver. In detail, the @with@ statement has the form \begin{cfa} $\emph{with-statement}$: 'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$ \end{cfa} and may appear as the body of a function or nested within a function body. Each expression in the expression list provides a type and object. The type must be an aggregate type. (Enumerations are already opened.) The object is the implicit qualifier for the open structure members. All expressions in the expression list are open in parallel within the compound statement, which is different from Pascal, which nests the openings from left to right. The difference between parallel and nesting occurs for members with the same name and type. \begin{cfa} struct S { int `i`; int j; double m; } s, w; $\C{// member i has same type in structure types S and T}$ struct T { int `i`; int k; int m; } t, w; with ( s, t ) { $\C{// open structure variables s and t in parallel}$ j + k; $\C{// unambiguous, s.j + t.k}$ m = 5.0; $\C{// unambiguous, s.m = 5.0}$ m = 1; $\C{// unambiguous, t.m = 1}$ int a = m; $\C{// unambiguous, a = t.m }$ double b = m; $\C{// unambiguous, b = s.m}$ int c = s.i + t.i; $\C{// unambiguous, qualification}$ (double)m; $\C{// unambiguous, cast s.m}$ } \end{cfa} For parallel semantics, both @s.i@ and @t.i@ are visible and, therefore, @i@ is ambiguous without qualification; for nested semantics, @t.i@ hides @s.i@ and, therefore, @i@ implies @t.i@. \CFA's ability to overload variables means members with the same name but different types are automatically disambiguated, eliminating most qualification when opening multiple aggregates. Qualification or a cast is used to disambiguate. There is an interesting problem between parameters and the function body @with@. \begin{cfa} void ?{}( S & s, int i ) with ( s ) { $\C{// constructor}$ `s.i = i;` j = 3; m = 5.5; $\C{// initialize members}$ } \end{cfa} Here, the assignment @s.i = i@ means @s.i = s.i@, which is meaningless, and there is no mechanism to qualify the parameter @i@, making the assignment impossible using the function body @with@. To solve this problem, parameters are treated like an initialized aggregate \begin{cfa} struct Params { S & s; int i; } params; \end{cfa} \newpage and implicitly opened \emph{after} a function body open, to give them higher priority \begin{cfa} void ?{}( S & s, int `i` ) with ( s ) `{` `with( $\emph{\color{red}params}$ )` { s.i = `i`; j = 3; m = 5.5; } `}` \end{cfa} Finally, a cast may be used to disambiguate among overload variables in a @with@ expression \begin{cfa} with ( w ) { ... } $\C{// ambiguous, same name and no context}$ with ( (S)w ) { ... } $\C{// unambiguous, cast}$ \end{cfa} and @with@ expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate \begin{cfa} struct S { int i, j; } sv; with ( sv ) { $\C{// implicit reference}$ S & sr = sv; with ( sr ) { $\C{// explicit reference}$ S * sp = &sv; with ( *sp ) { $\C{// computed reference}$ i = 3; j = 4; $\C{\color{red}// sp--{\textgreater}i, sp--{\textgreater}j}$ } i = 2; j = 3; $\C{\color{red}// sr.i, sr.j}$ } i = 1; j = 2; $\C{\color{red}// sv.i, sv.j}$ } \end{cfa} Collectively, these control-structure enhancements reduce programmer burden and increase readability and safety. \section{Declarations} Declarations in C have weaknesses and omissions. \CFA attempts to correct and add to C declarations, while ensuring \CFA subjectively ``feels like'' C. An important part of this subjective feel is maintaining C's syntax and procedural paradigm, as opposed to functional and object-oriented approaches in other systems languages such as \CC and Rust. Maintaining the C approach means that C coding patterns remain not only useable but idiomatic in \CFA, reducing the mental burden of retraining C programmers and switching between C and \CFA development. Nevertheless, some features from other approaches are undeniably convenient; \CFA attempts to adapt these features to the C paradigm. \subsection{Alternative declaration syntax} C declaration syntax is notoriously confusing and error prone. For example, many C programmers are confused by a declaration as simple as the following. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}ll@{}} \begin{cfa} int * x[5] \end{cfa} & \raisebox{-0.75\totalheight}{\input{Cdecl}} \end{tabular} \lstMakeShortInline@% \end{cquote} Is this an array of five pointers to integers or a pointer to an array of five integers? If there is any doubt, it implies productivity and safety issues even for basic programs. Another example of confusion results from the fact that a function name and its parameters are embedded within the return type, mimicking the way the return value is used at the function's call site. For example, a function returning a pointer to an array of integers is defined and used in the following way. \begin{cfa} int `(*`f`())[`5`]` {...}; $\C{// definition}$ ... `(*`f`())[`3`]` += 1; $\C{// usage}$ \end{cfa} Essentially, the return type is wrapped around the function name in successive layers (like an onion). While attempting to make the two contexts consistent is a laudable goal, it has not worked out in practice. \newpage \CFA provides its own type, variable, and function declarations, using a different syntax~\cite[pp.~856--859]{Buhr94a}. The new declarations place qualifiers to the left of the base type, whereas C declarations place qualifiers to the right. The qualifiers have the same meaning but are ordered left to right to specify a variable's type. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} `[5] *` int x1; `* [5]` int x2; `[* [5] int]` f( int p ); \end{cfa} & \begin{cfa} int `*` x1 `[5]`; int `(*`x2`)[5]`; `int (*`f( int p )`)[5]`; \end{cfa} & \begin{cfa} // array of 5 pointers to int // pointer to array of 5 int // function returning pointer to array of 5 int and taking int \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The only exception is bit-field specification, which always appears to the right of the base type. % Specifically, the character @*@ is used to indicate a pointer, square brackets @[@\,@]@ are used to represent an array or function return value, and parentheses @()@ are used to indicate a function parameter. However, unlike C, \CFA type declaration tokens are distributed across all variables in the declaration list. For instance, variables @x@ and @y@ of type pointer to integer are defined in \CFA as \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} `*` int x, y; int z; \end{cfa} & \begin{cfa} int `*`x, `*`y, z; \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} % The downside of the \CFA semantics is the need to separate regular and pointer declarations. The separation of regular and pointer declarations by \CFA declarations enforces greater clarity with only slightly more syntax. \begin{comment} Other examples are: \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{C}} \\ \begin{cfa} [ 5 ] int z; [ 5 ] * char w; * [ 5 ] double v; struct s { int f0:3; * int f1; [ 5 ] * int f2; }; \end{cfa} & \begin{cfa} int z[ 5 ]; char * w[ 5 ]; double (* v)[ 5 ]; struct s { int f0:3; int * f1; int * f2[ 5 ] }; \end{cfa} & \begin{cfa} // array of 5 integers // array of 5 pointers to char // pointer to array of 5 doubles // common bit-field syntax \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} \end{comment} All specifiers (@extern@, @static@, \etc) and qualifiers (@const@, @volatile@, \etc) are used in the normal way with the new declarations and also appear left to right. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{C}} \\ \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] extern const * const int x; static const * [5] const int y; \end{cfa} & \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] int extern const * const x; static const int (* const y)[5] \end{cfa} & \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{12bp}\selectfont\sf] // external const pointer to const int // internal const pointer to array of 5 const int \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} Specifiers must appear at the start of a \CFA function declaration\footnote{\label{StorageClassSpecifier} The placement of a storage-class specifier other than at the beginning of the declaration specifiers in a declaration is an obsolescent feature (see section~6.11.5(1) in ISO/IEC 9899~\cite{C11}).}. The new declaration syntax can be used in other contexts where types are required, \eg casts and the pseudo-function @sizeof@: \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} y = (* int)x; i = sizeof([ 5 ] * int); \end{cfa} & \begin{cfa} y = (int *)x; i = sizeof(int * [ 5 ]); \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The syntax of the new function-prototype declaration follows directly from the new function-definition syntax; also, parameter names are optional. \begin{cfa} [ int x ] f ( /* void */ ); $\C[2.5in]{// returning int with no parameters}$ [ int x ] f (...); $\C{// returning int with unknown parameters}$ [ * int ] g ( int y ); $\C{// returning pointer to int with int parameter}$ [ void ] h ( int, char ); $\C{// returning no result with int and char parameters}$ [ * int, int ] j ( int ); $\C{// returning pointer to int and int with int parameter}$ \end{cfa} This syntax allows a prototype declaration to be created by cutting and pasting the source text from the function-definition header (or vice versa). Like C, it is possible to declare multiple function prototypes in a single declaration, where the return type is distributed across \emph{all} function names in the declaration list. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} [double] foo(), foo( int ), foo( double ) {...} \end{cfa} & \begin{cfa} double foo1( void ), foo2( int ), foo3( double ); \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} Here, \CFA allows the last function in the list to define its body. The syntax for pointers to \CFA functions specifies the pointer name on the right. \begin{cfa} * [ int x ] () fp; $\C{// pointer to function returning int with no parameters}$ * [ * int ] ( int y ) gp; $\C{// pointer to function returning pointer to int with int parameter}$ * [ ] ( int, char ) hp; $\C{// pointer to function returning no result with int and char parameters}$ * [ * int, int ] ( int ) jp; $\C{// pointer to function returning pointer to int and int with int parameter}\CRT$ \end{cfa} \newpage \noindent Note that the name of the function pointer is specified last, as for other variable declarations. Finally, new \CFA declarations may appear together with C declarations in the same program block but cannot be mixed within a specific declaration. Therefore, a programmer has the option of either continuing to use traditional C declarations or taking advantage of the new style. Clearly, both styles need to be supported for some time due to existing C-style header files, particularly for UNIX-like systems. \subsection{References} \label{s:References} All variables in C have an \newterm{address}, a \newterm{value}, and a \newterm{type}; at the position in the program's memory denoted by the address, there exists a sequence of bits (the value), with the length and semantic meaning of this bit sequence defined by the type. The C type system does not always track the relationship between a value and its address; a value that does not have a corresponding address is called an \newterm{rvalue} (for ``right-hand value''), whereas a value that does have an address is called an \newterm{lvalue} (for ``left-hand value''). For example, in @int x; x = 42;@ the variable expression @x@ on the left-hand side of the assignment is an lvalue, whereas the constant expression @42@ on the right-hand side of the assignment is an rvalue. Despite the nomenclature of ``left-hand'' and ``right-hand'', an expression's classification as an lvalue or an rvalue is entirely dependent on whether it has an address or not; in imperative programming, the address of a value is used for both reading and writing (mutating) a value, and as such, lvalues can be converted into rvalues and read from, but rvalues cannot be mutated because they lack a location to store the updated value. Within a lexical scope, lvalue expressions have an \newterm{address interpretation} for writing a value or a \newterm{value interpretation} to read a value. For example, in @x = y@, @x@ has an address interpretation, whereas @y@ has a value interpretation. While this duality of interpretation is useful, C lacks a direct mechanism to pass lvalues between contexts, instead relying on \newterm{pointer types} to serve a similar purpose. In C, for any type @T@ there is a pointer type @T *@, the value of which is the address of a value of type @T@. A pointer rvalue can be explicitly \newterm{dereferenced} to the pointed-to lvalue with the dereference operator @*?@, whereas the rvalue representing the address of an lvalue can be obtained with the address-of operator @&?@. \begin{cfa} int x = 1, y = 2, * p1, * p2, ** p3; p1 = &x; $\C{// p1 points to x}$ p2 = &y; $\C{// p2 points to y}$ p3 = &p1; $\C{// p3 points to p1}$ *p2 = ((*p1 + *p2) * (**p3 - *p1)) / (**p3 - 15); \end{cfa} Unfortunately, the dereference and address-of operators introduce a great deal of syntactic noise when dealing with pointed-to values rather than pointers, as well as the potential for subtle bugs because of pointer arithmetic. For both brevity and clarity, it is desirable for the compiler to figure out how to elide the dereference operators in a complex expression such as the assignment to @*p2@ above. However, since C defines a number of forms of \newterm{pointer arithmetic}, two similar expressions involving pointers to arithmetic types (\eg @*p1 + x@ and @p1 + x@) may each have well-defined but distinct semantics, introducing the possibility that a programmer may write one when they mean the other and precluding any simple algorithm for elision of dereference operators. To solve these problems, \CFA introduces reference types @T &@; a @T &@ has exactly the same value as a @T *@, but where the @T *@ takes the address interpretation by default, a @T &@ takes the value interpretation by default, as below. \begin{cfa} int x = 1, y = 2, & r1, & r2, && r3; &r1 = &x; $\C{// r1 points to x}$ &r2 = &y; $\C{// r2 points to y}$ &&r3 = &&r1; $\C{// r3 points to r2}$ r2 = ((r1 + r2) * (r3 - r1)) / (r3 - 15); $\C{// implicit dereferencing}$ \end{cfa} Except for auto-dereferencing by the compiler, this reference example is exactly the same as the previous pointer example. Hence, a reference behaves like a variable name---an lvalue expression that is interpreted as a value---but also has the type system track the address of that value. One way to conceptualize a reference is via a rewrite rule, where the compiler inserts a dereference operator before the reference variable for each reference qualifier in the reference variable declaration; thus, the previous example implicitly acts like the following. \begin{cfa} `*`r2 = ((`*`r1 + `*`r2) * (`**`r3 - `*`r1)) / (`**`r3 - 15); \end{cfa} References in \CFA are similar to those in \CC, with important improvements, which can be seen in the example above. Firstly, \CFA does not forbid references to references. This provides a much more orthogonal design for library \mbox{implementors}, obviating the need for workarounds such as @std::reference_wrapper@. Secondly, \CFA references are rebindable, whereas \CC references have a fixed address. Rebinding allows \CFA references to be default initialized (\eg to a null pointer\footnote{ While effort has been made into non-null reference checking in \CC and Java, the exercise seems moot for any nonmanaged languages (C/\CC), given that it only handles one of many different error situations, \eg using a pointer after its storage is deleted.}) and point to different addresses throughout their lifetime, like pointers. Rebinding is accomplished by extending the existing syntax and semantics of the address-of operator in C. In C, the address of an lvalue is always an rvalue, as, in general, that address is not stored anywhere in memory and does not itself have an address. In \CFA, the address of a @T &@ is an lvalue @T *@, as the address of the underlying @T@ is stored in the reference and can thus be mutated there. The result of this rule is that any reference can be rebound using the existing pointer assignment semantics by assigning a compatible pointer into the address of the reference, \eg @&r1 = &x;@ above. This rebinding occurs to an arbitrary depth of reference nesting; loosely speaking, nested address-of operators produce a nested lvalue pointer up to the depth of the reference. These explicit address-of operators can be thought of as ``cancelling out'' the implicit dereference operators, \eg @(&`*`)r1 = &x@ or @(&(&`*`)`*`)r3 = &(&`*`)r1@ or even @(&`*`)r2 = (&`*`)`*`r3@ for @&r2 = &r3@. The precise rules are \begin{itemize} \item If @R@ is an rvalue of type @T &@$_1\cdots$ @&@$_r$, where $r \ge 1$ references (@&@ symbols), than @&R@ has type @T `*`&@$_{\color{red}2}\cdots$ @&@$_{\color{red}r}$, \ie @T@ pointer with $r-1$ references (@&@ symbols). \item If @L@ is an lvalue of type @T &@$_1\cdots$ @&@$_l$, where $l \ge 0$ references (@&@ symbols), than @&L@ has type @T `*`&@$_{\color{red}1}\cdots$ @&@$_{\color{red}l}$, \ie @T@ pointer with $l$ references (@&@ symbols). \end{itemize} Since pointers and references share the same internal representation, code using either is equally performant; in fact, the \CFA compiler converts references into pointers internally, and the choice between them is made solely on convenience, \eg many pointer or value accesses. By analogy to pointers, \CFA references also allow cv-qualifiers such as @const@: \begin{cfa} const int cx = 5; $\C{// cannot change cx}$ const int & cr = cx; $\C{// cannot change cr's referred value}$ &cr = &cx; $\C{// rebinding cr allowed}$ cr = 7; $\C{// ERROR, cannot change cr}$ int & const rc = x; $\C{// must be initialized, like in \CC}$ &rc = &x; $\C{// ERROR, cannot rebind rc}$ rc = 7; $\C{// x now equal to 7}$ \end{cfa} Given that a reference is meant to represent a lvalue, \CFA provides some syntactic shortcuts when initializing references. There are three initialization contexts in \CFA: declaration initialization, argument/parameter binding, and return/temporary binding. In each of these contexts, the address-of operator on the target lvalue is elided. The syntactic motivation is clearest when considering overloaded operator assignment, \eg @int ?+=?(int &, int)@; given @int x, y@, the expected call syntax is @x += y@, not @&x += y@. More generally, this initialization of references from lvalues rather than pointers is an instance of an ``lvalue-to-reference'' conversion rather than an elision of the address-of operator; this conversion is used in any context in \CFA where an implicit conversion is allowed. Similarly, use of the value pointed to by a reference in an rvalue context can be thought of as a ``reference-to-rvalue'' conversion, and \CFA also includes a qualifier-adding ``reference-to-reference'' conversion, analogous to the @T *@ to @const T *@ conversion in standard C. The final reference conversion included in \CFA is an ``rvalue-to-reference'' conversion, implemented by means of an implicit temporary. When an rvalue is used to initialize a reference, it is instead used to initialize a hidden temporary value with the same lexical scope as the reference, and the reference is initialized to the address of this temporary. \begin{cfa} struct S { double x, y; }; int x, y; void f( int & i, int & j, S & s, int v[] ); f( 3, x + y, (S){ 1.0, 7.0 }, (int [3]){ 1, 2, 3 } ); $\C{// pass rvalue to lvalue \(\Rightarrow\) implicit temporary}$ \end{cfa} This allows complex values to be succinctly and efficiently passed to functions, without the syntactic overhead of the explicit definition of a temporary variable or the runtime cost of pass-by-value. \CC allows a similar binding, but only for @const@ references; the more general semantics of \CFA are an attempt to avoid the \newterm{const poisoning} problem~\cite{Taylor10}, in which the addition of a @const@ qualifier to one reference requires a cascading chain of added qualifiers. \subsection{Type nesting} Nested types provide a mechanism to organize associated types and refactor a subset of members into a named aggregate (\eg subaggregates @name@, @address@, @department@, within aggregate @employe@). Java nested types are dynamic (apply to objects), \CC are static (apply to the \lstinline[language=C++]@class@), and C hoists (refactors) nested types into the enclosing scope, which means there is no need for type qualification. Since \CFA in not object oriented, adopting dynamic scoping does not make sense; instead, \CFA adopts \CC static nesting, using the member-selection operator ``@.@'' for type qualification, as does Java, rather than the \CC type-selection operator ``@::@'' (see Figure~\ref{f:TypeNestingQualification}). In the C left example, types @C@, @U@ and @T@ are implicitly hoisted outside of type @S@ into the containing block scope. In the \CFA right example, the types are not hoisted and accessible. \begin{figure} \centering \fontsize{9bp}{11bp}\selectfont\sf \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{3em}}l|l@{}} \multicolumn{1}{c@{\hspace{3em}}}{\textbf{C Type Nesting}} & \multicolumn{1}{c|}{\textbf{C Implicit Hoisting}} & \multicolumn{1}{c}{\textbf{\CFA}} \\ \begin{cfa} struct S { enum C { R, G, B }; struct T { union U { int i, j; }; enum C c; short int i, j; }; struct T t; } s; int rtn() { s.t.c = R; struct T t = { R, 1, 2 }; enum C c; union U u; } \end{cfa} & \begin{cfa} enum C { R, G, B }; union U { int i, j; }; struct T { enum C c; short int i, j; }; struct S { struct T t; } s; \end{cfa} & \begin{cfa} struct S { enum C { R, G, B }; struct T { union U { int i, j; }; enum C c; short int i, j; }; struct T t; } s; int rtn() { s.t.c = `S.`R; // type qualification struct `S.`T t = { `S.`R, 1, 2 }; enum `S.`C c; union `S.T.`U u; } \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{Type nesting / qualification} \label{f:TypeNestingQualification} \vspace*{-8pt} \end{figure} \vspace*{-8pt} \subsection{Constructors and destructors} \label{s:ConstructorsDestructors} One of the strengths (and weaknesses) of C is memory-management control, allowing resource release to be precisely specified versus unknown release with garbage-collected memory management. However, this manual approach is verbose, and it is useful to manage resources other than memory (\eg file handles) using the same mechanism as memory. \CC addresses these issues using RAII, implemented by means of \newterm{constructor} and \newterm{destructor} functions; \CFA adopts constructors and destructors (and @finally@) to facilitate RAII. While constructors and destructors are a common feature of object-oriented programming languages, they are an independent capability allowing \CFA to adopt them while retaining a procedural paradigm. Specifically, \CFA constructors and destructors are denoted by name and first parameter type versus name and nesting in an aggregate type. Constructor calls seamlessly integrate with existing C initialization syntax, providing a simple and familiar syntax to C programmers and allowing constructor calls to be inserted into legacy C code with minimal code changes. In \CFA, a constructor is named @?{}@ and a destructor is named @^?{}@\footnote{% The symbol \lstinline+^+ is used for the destructor name because it was the last binary operator that could be used in a unary context.}. The name @{}@ comes from the syntax for the initializer: @struct S { int i, j; } s = `{` 2, 3 `}`@. Like other \CFA operators, these names represent the syntax used to explicitly call the constructor or destructor, \eg @s{...}@ or @^s{...}@. The constructor and destructor have return type @void@, and the first parameter is a reference to the object type to be constructed or destructed. While the first parameter is informally called the @this@ parameter, as in object-oriented languages, any variable name may be used. Both constructors and destructors allow additional parameters after the @this@ parameter for specifying values for initialization/deinitialization\footnote{ Destruction parameters are useful for specifying storage-management actions, such as deinitialize but not deallocate.}. \begin{cfa}[basicstyle=\linespread{0.9}\fontsize{9bp}{11bp}\selectfont\sf] struct VLA { int size, * data; }; $\C{// variable length array of integers}$ void ?{}( VLA & vla ) with ( vla ) { size = 10; data = alloc( size ); } $\C{// default constructor}$ void ^?{}( VLA & vla ) with ( vla ) { free( data ); } $\C{// destructor}$ { VLA x; $\C{// implicit:\ \ x\{\};}$ } $\C{// implicit:\ \textasciicircum{}x\{\};}$ \end{cfa} @VLA@ is a \newterm{managed type}\footnote{ A managed type affects the runtime environment versus a self-contained type.}: a type requiring a nontrivial constructor or destructor, or with a member of a managed type. A managed type is implicitly constructed at allocation and destructed at deallocation to ensure proper interaction with runtime resources, in this case, the @data@ array in the heap. For details of the code-generation placement of implicit constructor and destructor calls among complex executable statements, see section~2.2 in the work of Schlintz~\cite{Schluntz17}. \CFA also provides syntax for \newterm{initialization} and \newterm{copy}. \begin{cfa} void ?{}( VLA & vla, int size, char fill = '\0' ) { $\C{// initialization}$ vla.[ size, data ] = [ size, alloc( size, fill ) ]; } void ?{}( VLA & vla, VLA other ) { $\C{// copy, shallow}$ vla = other; } \end{cfa} (Note that the example is purposely simplified using shallow-copy semantics.) An initialization constructor call has the same syntax as a C initializer, except that the initialization values are passed as arguments to a matching constructor (number and type of parameters). \begin{cfa} VLA va = `{` 20, 0 `}`, * arr = alloc()`{` 5, 0 `}`; \end{cfa} Note the use of a \newterm{constructor expression} to initialize the storage from the dynamic storage allocation. Like \CC, the copy constructor has two parameters, the second of which is a value parameter with the same type as the first parameter; appropriate care is taken to not recursively call the copy constructor when initializing the second parameter. \CFA constructors may be explicitly called, like Java, and destructors may be explicitly called, like \CC. Explicit calls to constructors double as a \CC-style \emph{placement syntax}, useful for construction of members in user-defined constructors and reuse of existing storage allocations. Like the other operators in \CFA, there is a concise syntax for constructor/destructor function calls. \begin{cfa} { VLA x, y = { 20, 0x01 }, z = y; $\C{// z points to y}$ // x{}; y{ 20, 0x01 }; z{ z, y }; ^x{}; $\C{// deallocate x}$ x{}; $\C{// reallocate x}$ z{ 5, 0xff }; $\C{// reallocate z, not pointing to y}$ ^y{}; $\C{// deallocate y}$ y{ x }; $\C{// reallocate y, points to x}$ x{}; $\C{// reallocate x, not pointing to y}$ } // ^z{}; ^y{}; ^x{}; \end{cfa} To provide a uniform type interface for @otype@ polymorphism, the \CFA compiler automatically generates a default constructor, copy constructor, assignment operator, and destructor for all types. These default functions can be overridden by user-generated versions. For compatibility with the standard behavior of C, the default constructor and destructor for all basic, pointer, and reference types do nothing, whereas the copy constructor and assignment operator are bitwise copies; if default zero initialization is desired, the default constructors can be overridden. For user-generated types, the four functions are also automatically generated. @enum@ types are handled the same as their underlying integral type, and unions are also bitwise copied and no-op initialized and destructed. For compatibility with C, a copy constructor from the first union member type is also defined. For @struct@ types, each of the four functions is implicitly defined to call their corresponding functions on each member of the struct. To better simulate the behavior of C initializers, a set of \newterm{member constructors} is also generated for structures. A constructor is generated for each nonempty prefix of a structure's member list to copy-construct the members passed as parameters and default-construct the remaining members. To allow users to limit the set of constructors available for a type, when a user declares any constructor or destructor, the corresponding generated function and all member constructors for that type are hidden from expression resolution; similarly, the generated default constructor is hidden upon the declaration of any constructor. These semantics closely mirror the rule for implicit declaration of constructors in \CC\cite[p.~186]{ANSI98:C++}. In some circumstance, programmers may not wish to have implicit constructor and destructor generation and calls. In these cases, \CFA provides the initialization syntax \lstinline|S x `@=` {}|, and the object becomes unmanaged; hence, implicit \mbox{constructor} and destructor calls are not generated. Any C initializer can be the right-hand side of an \lstinline|@=| initializer, \eg \lstinline|VLA a @= { 0, 0x0 }|, with the usual C initialization semantics. The same syntax can be used in a compound literal, \eg \lstinline|a = (VLA)`@`{ 0, 0x0 }|, to create a C-style literal. The point of \lstinline|@=| is to provide a migration path from legacy C code to \CFA, by providing a mechanism to incrementally convert into implicit initialization. % \subsection{Default Parameters} \section{Literals} C already includes limited polymorphism for literals---@0@ can be either an integer or a pointer literal, depending on context, whereas the syntactic forms of literals of the various integer and float types are very similar, differing from each other only in suffix. In keeping with the general \CFA approach of adding features while respecting the ``C style'' of doing things, C's polymorphic constants and typed literal syntax are extended to interoperate with user-defined types, while maintaining a backward-compatible semantics. A simple example is allowing the underscore, as in Ada, to separate prefixes, digits, and suffixes in all \CFA constants, \eg @0x`_`1.ffff`_`ffff`_`p`_`128`_`l@, where the underscore is also the standard separator in C identifiers. \CC uses a single quote as a separator, but it is restricted among digits, precluding its use in the literal prefix or suffix, \eg @0x1.ffff@@`'@@ffffp128l@, and causes problems with most integrated development environments (IDEs), which must be extended to deal with this alternate use of the single quote. \begin{comment} \subsection{Integral Suffixes} New integral suffixes @hh@ (half of half of @int@) for @char@, @h@ (half of @int@) for @short@, and @z@ for @size_t@, and length suffixes for 8, 16, 32, 64, and 128 bit integers. %Additional integral suffixes are added to cover all the integral types and lengths. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} \begin{cfa} 20_`hh` // signed char 21_`hh`u // unsigned char 22_`h` // signed short int 23_u`h` // unsigned short int 24`z` // size_t \end{cfa} & \begin{cfa} 20_`L8` // int8_t 21_u`l8` // uint8_t 22_`l16` // int16_t 23_u`l16` // uint16_t 24_`l32` // int32_t \end{cfa} & \begin{cfa} 25_u`l32` // uint32_t 26_`l64` // int64_t 27_`l64`u // uint64_t 26_`L128` // int128 27_`L128`u // unsigned int128 \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} \end{comment} \subsection{0/1} In C, @0@ has the special property that it is the only ``false'' value; by the standard, any value that compares equal to @0@ is false, whereas any value that compares unequal to @0@ is true. As such, an expression @x@ in any Boolean context (such as the condition of an @if@ or @while@ statement, or the arguments to @&&@, @||@, or @?:@\,) can be rewritten as @x != 0@ without changing its semantics. Operator overloading in \CFA provides a natural means to implement this truth-value comparison for arbitrary types, but the C type system is not precise enough to distinguish an equality comparison with @0@ from an equality comparison with an arbitrary integer or pointer. To provide this precision, \CFA introduces a new type @zero_t@ as the type of literal @0@ (somewhat analagous to @nullptr_t@ and @nullptr@ in \CCeleven); @zero_t@ can only take the value @0@, but has implicit conversions to the integer and pointer types so that C code involving @0@ continues to work. With this addition, \CFA rewrites @if (x)@ and similar expressions to @if ( (x) != 0 )@ or the appropriate analogue, and any type @T@ is ``truthy'' by defining an operator overload @int ?!=?( T, zero_t )@. \CC makes types truthy by adding a conversion to @bool@; prior to the addition of explicit cast operators in \CCeleven, this approach had the pitfall of making truthy types transitively convertible into any numeric type; \CFA avoids this issue. Similarly, \CFA also has a special type for @1@, @one_t@; like @zero_t@, @one_t@ has built-in implicit conversions to the various integral types so that @1@ maintains its expected semantics in legacy code for operations @++@ and @--@. The addition of @one_t@ allows generic algorithms to handle the unit value uniformly for types where it is meaningful. \TODO{Make this sentence true} In particular, polymorphic functions in the \CFA prelude define @++x@ and @x++@ in terms of @x += 1@, allowing users to idiomatically define all forms of increment for a type @T@ by defining the single function @T & ?+=(T &, one_t)@; analogous overloads for the decrement operators are present as well. \subsection{User literals} For readability, it is useful to associate units to scale literals, \eg weight (stone, pound, kilogram) or time (seconds, minutes, hours). The left of Figure~\ref{f:UserLiteral} shows the \CFA alternative call syntax (postfix: literal argument before function name), using the backquote, to convert basic literals into user literals. The backquote is a small character, making the unit (function name) predominate. For examples, the multiprecision integer type in Section~\ref{s:MultiPrecisionIntegers} has the following user literals. {\lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} \begin{cfa} y = 9223372036854775807L|`mp| * 18446744073709551615UL|`mp|; y = "12345678901234567890123456789"|`mp| + "12345678901234567890123456789"|`mp|; \end{cfa} Because \CFA uses a standard function, all types and literals are applicable, as well as overloading and conversions, where @?`@ denotes a postfix-function name and @`@ denotes a postfix-function call. }% \begin{cquote} \lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{postfix function}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{constant}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{variable/expression}} & \multicolumn{1}{c@{}}{\textbf{postfix pointer}} \\ \begin{cfa} int |?`h|( int s ); int |?`h|( double s ); int |?`m|( char c ); int |?`m|( const char * s ); int |?`t|( int a, int b, int c ); \end{cfa} & \begin{cfa} 0 |`h|; 3.5|`h|; '1'|`m|; "123" "456"|`m|; [1,2,3]|`t|; \end{cfa} & \begin{cfa} int i = 7; i|`h|; (i + 3)|`h|; (i + 3.5)|`h|; \end{cfa} & \begin{cfa} int (* |?`p|)( int i ); |?`p| = |?`h|; 3|`p|; i|`p|; (i + 3)|`p|; \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The right of Figure~\ref{f:UserLiteral} shows the equivalent \CC version using the underscore for the call syntax. However, \CC restricts the types, \eg @unsigned long long int@ and @long double@ to represent integral and floating literals. After which, user literals must match (no conversions); hence, it is necessary to overload the unit with all appropriate types. \begin{figure} \centering \fontsize{9bp}{11bp}\selectfont \lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{1.25\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{1.25\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{\CC}} \\ \begin{cfa} struct W { double stones; }; void ?{}( W & w ) { w.stones = 0; } void ?{}( W & w, double w ) { w.stones = w; } W ?+?( W l, W r ) { return (W){ l.stones + r.stones }; } W |?`st|(double w) { return (W){ w }; } W |?`lb|(double w) { return (W){ w/14.0 }; } W |?`kg|(double w) { return (W){ w*0.16 }; } int main() { W w, heavy = { 20 }; w = 155|`lb|; w = 0b_1111|`st|; w = 0_233|`lb|; w = 0x_9b_u|`kg|; w = 5.5|`st| + 8|`kg| + 25.01|`lb| + heavy; } \end{cfa} & \begin{cfa} struct W { double stones; W() { stones = 0.0; } W( double w ) { stones = w; } }; W operator+( W l, W r ) { return W( l.stones + r.stones ); } W |operator""_st|(unsigned long long int w) {return W(w); } W |operator""_lb|(unsigned long long int w) {return W(w/14.0); } W |operator""_kg|(unsigned long long int w) {return W(w*0.16); } W |operator""_st|(long double w ) { return W( w ); } W |operator""_lb|(long double w ) { return W( w / 14.0 ); } W |operator""_kg|(long double w ) { return W( w * 0.16 ); } int main() { W w, heavy = { 20 }; w = 155|_lb|; // binary unsupported w = 0${\color{red}\LstBasicStyle{'}}$233|_lb|; // quote separator w = 0x9b|_kg|; w = 5.5d|_st| + 8|_kg| + 25.01|_lb| + heavy; } \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{User literal} \label{f:UserLiteral} \end{figure} \section{Libraries} \label{sec:libraries} As stated in Section~\ref{sec:poly-fns}, \CFA inherits a large corpus of library code, where other programming languages must rewrite or provide fragile interlanguage communication with C. \CFA has replacement libraries condensing hundreds of existing C names into tens of \CFA overloaded names, all without rewriting the actual computations. In many cases, the interface is an inline wrapper providing overloading during compilation but of zero cost at runtime. The following sections give a glimpse of the interface reduction to many C libraries. In many cases, @signed@/@unsigned@ @char@, @short@, and @_Complex@ functions are available (but not shown) to ensure expression computations remain in a single type, as conversions can distort results. \subsection{Limits} C library @limits.h@ provides lower and upper bound constants for the basic types. \CFA name overloading is used to condense these typed constants. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{Definition}} & \multicolumn{1}{c@{}}{\textbf{Usage}} \\ \begin{cfa} const short int `MIN` = -32768; const int `MIN` = -2147483648; const long int `MIN` = -9223372036854775808L; \end{cfa} & \begin{cfa} short int si = `MIN`; int i = `MIN`; long int li = `MIN`; \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The result is a significant reduction in names to access typed constants. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} MIN MAX PI E \end{cfa} & \begin{cfa} CHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, FLT_MIN, DBL_MIN, LDBL_MIN UCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, FLT_MAX, DBL_MAX, LDBL_MAX M_PI, M_PIl M_E, M_El \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} \subsection{Math} C library @math.h@ provides many mathematical functions. \CFA function overloading is used to condense these mathematical functions. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{Definition}} & \multicolumn{1}{c@{}}{\textbf{Usage}} \\ \begin{cfa} float `log`( float x ); double `log`( double ); double _Complex `log`( double _Complex x ); \end{cfa} & \begin{cfa} float f = `log`( 3.5 ); double d = `log`( 3.5 ); double _Complex dc = `log`( 3.5+0.5I ); \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The result is a significant reduction in names to access math functions. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} log sqrt sin \end{cfa} & \begin{cfa} logf, log, logl, clogf, clog, clogl sqrtf, sqrt, sqrtl, csqrtf, csqrt, csqrtl sinf, sin, sinl, csinf, csin, csinl \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} While \Celeven has type-generic math (see section~7.25 of the ISO/IEC 9899\cite{C11}) in @tgmath.h@ to provide a similar mechanism, these macros are limited, matching a function name with a single set of floating type(s). For example, it is impossible to overload @atan@ for both one and two arguments; instead, the names @atan@ and @atan2@ are required (see Section~\ref{s:NameOverloading}). The key observation is that only a restricted set of type-generic macros is provided for a limited set of function names, which do not generalize across the type system, as in \CFA. \subsection{Standard} C library @stdlib.h@ provides many general functions. \CFA function overloading is used to condense these utility functions. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{Definition}} & \multicolumn{1}{c@{}}{\textbf{Usage}} \\ \begin{cfa} unsigned int `abs`( int ); double `abs`( double ); double abs( double _Complex ); \end{cfa} & \begin{cfa} unsigned int i = `abs`( -1 ); double d = `abs`( -1.5 ); double d = `abs`( -1.5+0.5I ); \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The result is a significant reduction in names to access the utility functions. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} abs strto random \end{cfa} & \begin{cfa} abs, labs, llabs, fabsf, fabs, fabsl, cabsf, cabs, cabsl strtol, strtoul, strtoll, strtoull, strtof, strtod, strtold srand48, mrand48, lrand48, drand48 \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} In addition, there are polymorphic functions, like @min@ and @max@, that work on any type with operator @??@. The following shows one example where \CFA \emph{extends} an existing standard C interface to reduce complexity and provide safety. C/\Celeven provide a number of complex and overlapping storage-management operations to support the following capabilities. \begin{list}{}{\itemsep=0pt\parsep=0pt\labelwidth=0pt\leftmargin\parindent\itemindent-\leftmargin\let\makelabel\descriptionlabel} \item[fill] an allocation with a specified character. \item[resize] an existing allocation to decrease or increase its size. In either case, new storage may or may not be allocated, and if there is a new allocation, as much data from the existing allocation are copied. For an increase in storage size, new storage after the copied data may be filled. \newpage \item[align] an allocation on a specified memory boundary, \eg, an address multiple of 64 or 128 for cache-line purposes. \item[array] allocation with a specified number of elements. An array may be filled, resized, or aligned. \end{list} Table~\ref{t:StorageManagementOperations} shows the capabilities provided by C/\Celeven allocation functions and how all the capabilities can be combined into two \CFA functions. \CFA storage-management functions extend the C equivalents by overloading, providing shallow type safety, and removing the need to specify the base allocation size. Figure~\ref{f:StorageAllocation} contrasts \CFA and C storage allocation performing the same operations with the same type safety. \begin{table} \caption{Storage-management operations} \label{t:StorageManagementOperations} \centering \lstDeleteShortInline@% \lstMakeShortInline~% \begin{tabular}{@{}rrllll@{}} \multicolumn{1}{c}{}& & \multicolumn{1}{c}{fill} & resize & align & array \\ C & ~malloc~ & no & no & no & no \\ & ~calloc~ & yes (0 only) & no & no & yes \\ & ~realloc~ & no/copy & yes & no & no \\ & ~memalign~ & no & no & yes & no \\ & ~posix_memalign~ & no & no & yes & no \\ C11 & ~aligned_alloc~ & no & no & yes & no \\ \CFA & ~alloc~ & yes/copy & no/yes & no & yes \\ & ~align_alloc~ & yes & no & yes & yes \\ \end{tabular} \lstDeleteShortInline~% \lstMakeShortInline@% \end{table} \begin{figure} \centering \fontsize{9bp}{11bp}\selectfont \begin{cfa}[aboveskip=0pt,xleftmargin=0pt] size_t dim = 10; $\C{// array dimension}$ char fill = '\xff'; $\C{// initialization fill value}$ int * ip; \end{cfa} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa}[xleftmargin=-10pt] ip = alloc(); ip = alloc( fill ); ip = alloc( dim ); ip = alloc( dim, fill ); ip = alloc( ip, 2 * dim ); ip = alloc( ip, 4 * dim, fill ); ip = align_alloc( 16 ); ip = align_alloc( 16, fill ); ip = align_alloc( 16, dim ); ip = align_alloc( 16, dim, fill ); \end{cfa} & \begin{cfa} ip = (int *)malloc( sizeof(int) ); ip = (int *)malloc( sizeof(int) ); memset( ip, fill, sizeof(int) ); ip = (int *)malloc( dim * sizeof(int) ); ip = (int *)malloc( sizeof(int) ); memset( ip, fill, dim * sizeof(int) ); ip = (int *)realloc( ip, 2 * dim * sizeof(int) ); ip = (int *)realloc( ip, 4 * dim * sizeof(int) ); memset( ip, fill, 4 * dim * sizeof(int)); ip = memalign( 16, sizeof(int) ); ip = memalign( 16, sizeof(int) ); memset( ip, fill, sizeof(int) ); ip = memalign( 16, dim * sizeof(int) ); ip = memalign( 16, dim * sizeof(int) ); memset( ip, fill, dim * sizeof(int) ); \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{\CFA versus C storage allocation} \label{f:StorageAllocation} \end{figure} Variadic @new@ (see Section~\ref{sec:variadic-tuples}) cannot support the same overloading because extra parameters are for initialization. Hence, there are @new@ and @anew@ functions for single and array variables, and the fill value is the arguments to the constructor. \begin{cfa} struct S { int i, j; }; void ?{}( S & s, int i, int j ) { s.i = i; s.j = j; } S * s = new( 2, 3 ); $\C{// allocate storage and run constructor}$ S * as = anew( dim, 2, 3 ); $\C{// each array element initialized to 2, 3}$ \end{cfa} Note that \CC can only initialize array elements via the default constructor. Finally, the \CFA memory allocator has \newterm{sticky properties} for dynamic storage: fill and alignment are remembered with an object's storage in the heap. When a @realloc@ is performed, the sticky properties are respected, so that new storage is correctly aligned and initialized with the fill character. \subsection{I/O} \label{s:IOLibrary} The goal of \CFA I/O is to simplify the common cases, while fully supporting polymorphism and user-defined types in a consistent way. The approach combines ideas from \CC and Python. The \CFA header file for the I/O library is @fstream@. The common case is printing out a sequence of variables separated by whitespace. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{2\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{\CC}} \\ \begin{cfa} int x = 1, y = 2, z = 3; sout | x `|` y `|` z | endl; \end{cfa} & \begin{cfa} cout << x `<< " "` << y `<< " "` << z << endl; \end{cfa} \\ \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 1` `2` `3 \end{cfa} & \begin{cfa}[showspaces=true,aboveskip=0pt,belowskip=0pt] 1 2 3 \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} The \CFA form has half the characters of the \CC form and is similar to Python I/O with respect to implicit separators. Similar simplification occurs for tuple I/O, which prints all tuple values separated by ``\lstinline[showspaces=true]@, @''. \begin{cfa} [int, [ int, int ] ] t1 = [ 1, [ 2, 3 ] ], t2 = [ 4, [ 5, 6 ] ]; sout | t1 | t2 | endl; $\C{// print tuples}$ \end{cfa} \begin{cfa}[showspaces=true,aboveskip=0pt] 1`, `2`, `3 4`, `5`, `6 \end{cfa} Finally, \CFA uses the logical-or operator for I/O as it is the lowest-priority overloadable operator, other than assignment. Therefore, fewer output expressions require parenthesis. \begin{cquote} \lstDeleteShortInline@% \begin{tabular}{@{}ll@{}} \textbf{\CFA:} & \begin{cfa} sout | x * 3 | y + 1 | z << 2 | x == y | (x | y) | (x || y) | (x > z ? 1 : 2) | endl; \end{cfa} \\ \textbf{\CC:} & \begin{cfa} cout << x * 3 << y + 1 << `(`z << 2`)` << `(`x == y`)` << (x | y) << (x || y) << (x > z ? 1 : 2) << endl; \end{cfa} \\ & \begin{cfa}[showspaces=true,aboveskip=0pt] 3 3 12 0 3 1 2 \end{cfa} \end{tabular} \lstMakeShortInline@% \end{cquote} There is a weak similarity between the \CFA logical-or operator and the Shell pipe operator for moving data, where data flow in the correct direction for input but in the opposite direction for output. \begin{comment} The implicit separator character (space/blank) is a separator not a terminator. The rules for implicitly adding the separator are: \begin{itemize} \item A separator does not appear at the start or end of a line. \item A separator does not appear before or after a character literal or variable. \item A separator does not appear before or after a null (empty) C string, which is a local mechanism to disable insertion of the separator character. \item A separator does not appear before a C string starting with the characters: \lstinline[mathescape=off,basicstyle=\tt]@([{=$@ \item A separator does not appear after a C string ending with the characters: \lstinline[basicstyle=\tt]@,.;!?)]}%@ \item {\lstset{language=CFA,deletedelim=**[is][]{`}{`}} A separator does not appear before or after a C string beginning/ending with the quote or whitespace characters: \lstinline[basicstyle=\tt,showspaces=true]@`'": \t\v\f\r\n@ }% \end{itemize} \end{comment} There are functions to set and get the separator string and manipulators to toggle separation on and off in the middle of output. \subsection{Multiprecision integers} \label{s:MultiPrecisionIntegers} \CFA has an interface to the GNU multiple precision (GMP) signed integers~\cite{GMP}, similar to the \CC interface provided by GMP. The \CFA interface wraps GMP functions into operator functions to make programming with multiprecision integers identical to using fixed-sized integers. The \CFA type name for multiprecision signed integers is @Int@ and the header file is @gmp@. Figure~\ref{f:GMPInterface} shows a multiprecision factorial program contrasting the GMP interface in \CFA and C. \begin{figure}[b] \centering \fontsize{9bp}{11bp}\selectfont \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}} \multicolumn{1}{@{}c@{\hspace{3\parindentlnth}}}{\textbf{\CFA}} & \multicolumn{1}{c@{}}{\textbf{C}} \\ \begin{cfa} #include int main( void ) { sout | "Factorial Numbers" | endl; Int fact = 1; sout | 0 | fact | endl; for ( unsigned int i = 1; i <= 40; i += 1 ) { fact *= i; sout | i | fact | endl; } } \end{cfa} & \begin{cfa} #include int main( void ) { `gmp_printf`( "Factorial Numbers\n" ); `mpz_t` fact; `mpz_init_set_ui`( fact, 1 ); `gmp_printf`( "%d %Zd\n", 0, fact ); for ( unsigned int i = 1; i <= 40; i += 1 ) { `mpz_mul_ui`( fact, fact, i ); `gmp_printf`( "%d %Zd\n", i, fact ); } } \end{cfa} \end{tabular} \lstMakeShortInline@% \caption{GMP interface \CFA versus C} \label{f:GMPInterface} \end{figure} \vspace{-4pt} \section{Polymorphism Evaluation} \label{sec:eval} \CFA adds parametric polymorphism to C. A runtime evaluation is performed to compare the cost of alternative styles of polymorphism. The goal is to compare just the underlying mechanism for implementing different kinds of polymorphism. % Though \CFA provides significant added functionality over C, these features have a low runtime penalty. % In fact, it is shown that \CFA's generic programming can enable faster runtime execution than idiomatic @void *@-based C code. The experiment is a set of generic-stack microbenchmarks~\cite{CFAStackEvaluation} in C, \CFA, and \CC (see implementations in Appendix~\ref{sec:BenchmarkStackImplementations}). Since all these languages share a subset essentially comprising standard C, maximal-performance benchmarks should show little runtime variance, differing only in length and clarity of source code. A more illustrative comparison measures the costs of idiomatic usage of each language's features. Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked list. The benchmark test is similar for the other languages. The experiment uses element types @int@ and @pair(short, char)@, and pushes $N=40M$ elements on a generic stack, copies the stack, clears one of the stacks, and finds the maximum value in the other stack. \begin{figure} \fontsize{9bp}{11bp}\selectfont \begin{cfa}[xleftmargin=3\parindentlnth,aboveskip=0pt,belowskip=0pt] int main() { int max = 0, val = 42; stack( int ) si, ti; REPEAT_TIMED( "push_int", N, push( si, val ); ) TIMED( "copy_int", ti{ si }; ) TIMED( "clear_int", clear( si ); ) REPEAT_TIMED( "pop_int", N, int x = pop( ti ); if ( x > max ) max = x; ) pair( short, char ) max = { 0h, '\0' }, val = { 42h, 'a' }; stack( pair( short, char ) ) sp, tp; REPEAT_TIMED( "push_pair", N, push( sp, val ); ) TIMED( "copy_pair", tp{ sp }; ) TIMED( "clear_pair", clear( sp ); ) REPEAT_TIMED( "pop_pair", N, pair(short, char) x = pop( tp ); if ( x > max ) max = x; ) } \end{cfa} \caption{\protect\CFA benchmark test} \label{fig:BenchmarkTest} \vspace*{-10pt} \end{figure} The structure of each benchmark implemented is C with @void *@-based polymorphism, \CFA with parametric polymorphism, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV. The \CCV variant illustrates an alternative object-oriented idiom where all objects inherit from a base @object@ class, mimicking a Java-like interface; hence, runtime checks are necessary to safely downcast objects. The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, whereas C and \CCV lack such capability and, instead, must store generic objects via pointers to separately allocated objects. Note that the C benchmark uses unchecked casts as C has no runtime mechanism to perform such checks, whereas \CFA and \CC provide type safety statically. Figure~\ref{fig:eval} and Table~\ref{tab:eval} show the results of running the benchmark in Figure~\ref{fig:BenchmarkTest} and its C, \CC, and \CCV equivalents. The graph plots the median of five consecutive runs of each program, with an initial warm-up run omitted. All code is compiled at \texttt{-O2} by gcc or g++ 6.4.0, with all \CC code compiled as \CCfourteen. The benchmarks are run on an Ubuntu 16.04 workstation with 16 GB of RAM and a 6-core AMD FX-6300 CPU with 3.5 GHz maximum clock frequency. \begin{figure} \centering \resizebox{0.7\textwidth}{!}{\input{timing}} \caption{Benchmark timing results (smaller is better)} \label{fig:eval} \vspace*{-10pt} \end{figure} \begin{table} \vspace*{-10pt} \caption{Properties of benchmark code} \label{tab:eval} \centering \vspace*{-4pt} \newcommand{\CT}[1]{\multicolumn{1}{c}{#1}} \begin{tabular}{lrrrr} & \CT{C} & \CT{\CFA} & \CT{\CC} & \CT{\CCV} \\ maximum memory usage (MB) & 10\,001 & 2\,502 & 2\,503 & 11\,253 \\ source code size (lines) & 201 & 191 & 125 & 294 \\ redundant type annotations (lines) & 27 & 0 & 2 & 16 \\ binary size (KB) & 14 & 257 & 14 & 37 \\ \end{tabular} \vspace*{-16pt} \end{table} \enlargethispage{-10pt} The C and \CCV variants are generally the slowest with the largest memory footprint, due to their less-efficient memory layout and the pointer indirection necessary to implement generic types; this inefficiency is exacerbated by the second level of generic types in the pair benchmarks. By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair because of the equivalent storage layout, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead. \CCV is slower than C largely due to the cost of runtime type checking of downcasts (implemented with @dynamic_cast@). The outlier for \CFA, pop @pair@, results from the complexity of the generated-C polymorphic code. The gcc compiler is unable to optimize some dead code and condense nested calls; a compiler designed for \CFA could easily perform these optimizations. Finally, the binary size for \CFA is larger because of static linking with the \CFA libraries. \CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, although it should be noted that \CFA and \CC have prewritten data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 39 and 42 lines, respectively. The difference between the \CFA and \CC line counts is primarily declaration duplication to implement separate compilation; a header-only \CFA library would be similar in length to the \CC version. On the other hand, C does not have a generic collections library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers. \CCV does not use the \CC standard template library by construction and, in fact, includes the definition of @object@ and wrapper classes for @char@, @short@, and @int@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library; with their omission, the \CCV line count is similar to C. We justify the given line count by noting that many object-oriented languages do not allow implementing new interfaces on library types without subclassing or wrapper types, which may be similarly verbose. Line count is a fairly rough measure of code complexity; another important factor is how much type information the programmer must specify manually, especially where that information is not compiler checked. Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs and is much less common in \CFA than C, with its manually specified function pointer arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts, \eg @object@ to @integer@ when popping a stack. To quantify this manual typing, the ``redundant type annotations'' line in Table~\ref{tab:eval} counts the number of lines on which the type of a known variable is respecified, either as a format specifier, explicit downcast, type-specific function, or by name in a @sizeof@, struct literal, or @new@ expression. The \CC benchmark uses two redundant type annotations to create a new stack nodes, whereas the C and \CCV benchmarks have several such annotations spread throughout their code. The \CFA benchmark is able to eliminate all redundant type annotations through use of the polymorphic @alloc@ function discussed in Section~\ref{sec:libraries}. We conjecture that these results scale across most generic data types as the underlying polymorphism implement is constant. \vspace*{-8pt} \section{Related Work} \label{s:RelatedWork} \subsection{Polymorphism} ML~\cite{ML} was the first language to support parametric polymorphism. Like \CFA, it supports universal type parameters, but not the use of assertions and traits to constrain type arguments. Haskell~\cite{Haskell10} combines ML-style polymorphism, polymorphic data types, and type inference with the notion of type classes, collections of overloadable methods that correspond in intent to traits in \CFA. Unlike \CFA, Haskell requires an explicit association between types and their classes that specifies the implementation of operations. These associations determine the functions that are assertion arguments for particular combinations of class and type, in contrast to \CFA where the assertion arguments are selected at function call sites based upon the set of operations in scope at that point. Haskell also severely restricts the use of overloading: an overloaded name can only be associated with a single class, and methods with overloaded names can only be defined as part of instance declarations. \CC provides three disjoint polymorphic extensions to C: overloading, inheritance, and templates. The overloading is restricted because resolution does not use the return type, inheritance requires learning object-oriented programming and coping with a restricted nominal-inheritance hierarchy, templates cannot be separately compiled resulting in compilation/code bloat and poor error messages, and determining how these mechanisms interact and which to use is confusing. In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm. The key mechanism to support separate compilation is \CFA's \emph{explicit} use of assumed type properties. Until \CC concepts~\cite{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way of specifying the requirements of a generic function beyond compilation errors during template expansion; furthermore, \CC concepts are restricted to template polymorphism. Cyclone~\cite{Grossman06} also provides capabilities for polymorphic functions and existential types, similar to \CFA's @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, which is a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted to abstraction over types with the same layout and calling convention as @void *@, \ie only pointer types and @int@. In \CFA terms, all Cyclone polymorphism must be dtype-static. While the Cyclone design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, it is more restrictive than \CFA's general model. Smith and Volpano~\cite{Smith98} present Polymorphic C, an ML dialect with polymorphic functions, C-like syntax, and pointer types; it lacks many of C's features, most notably structure types, and hence, is not a practical C replacement. Objective-C~\cite{obj-c-book} is an industrially successful extension to C. However, Objective-C is a radical departure from C, using an object-oriented model with message passing. Objective-C did not support type-checked generics until recently \cite{xcode7}, historically using less-efficient runtime checking of object types. The GObject~\cite{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage collection to C; these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference counting. Vala~\cite{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code bases. Java~\cite{Java8} included generic types in Java~5, which are type checked at compilation and type erased at runtime, similar to \CFA's. However, in Java, each object carries its own table of method pointers, whereas \CFA passes the method pointers separately to maintain a C-compatible layout. Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens. D~\cite{D}, Go, and Rust~\cite{Rust} are modern compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go, and \emph{traits} in Rust. However, each language represents a significant departure from C in terms of language model, and none has the same level of compatibility with C as \CFA. D and Go are garbage-collected languages, imposing the associated runtime overhead. The necessity of accounting for data transfer between managed runtimes and the unmanaged C runtime complicates foreign-function interfaces to C. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. D restricts garbage collection to its own heap by default, whereas Rust is not garbage collected and, thus, has a lighter-weight runtime more interoperable with C. Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style. \CFA, with its more modest safety features, allows direct ports of C code while maintaining the idiomatic style of the original source. \vspace*{-18pt} \subsection{Tuples/variadics} \vspace*{-5pt} Many programming languages have some form of tuple construct and/or variadic functions, \eg SETL, C, KW-C, \CC, D, Go, Java, ML, and Scala. SETL~\cite{SETL} is a high-level mathematical programming language, with tuples being one of the primary data types. Tuples in SETL allow subscripting, dynamic expansion, and multiple assignment. C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types; thus, the mechanism is type unsafe. KW-C~\cite{Buhr94a}, a predecessor of \CFA, introduced tuples to C as an extension of the C syntax, taking much of its inspiration from SETL. The main contributions of that work were adding MRVF, tuple mass and multiple assignment, and record-member access. \CCeleven introduced @std::tuple@ as a library variadic-template structure. Tuples are a generalization of @std::pair@, in that they allow for arbitrary length, fixed-size aggregation of heterogeneous values. Operations include @std::get@ to extract values, @std::tie@ to create a tuple of references used for assignment, and lexicographic comparisons. \CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate predeclaring variables and the use of @std::tie@ for binding the results. This extension requires the use of @auto@ to infer the types of the new variables; hence, complicated expressions with a nonobvious type must be documented with some other mechanism. Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables. Like \CC, D provides tuples through a library variadic-template structure. Go does not have tuples but supports MRVF. Java's variadic functions appear similar to C's but are type safe using homogeneous arrays, which are less useful than \CFA's heterogeneously typed variadic functions. Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML~\cite{sml}, Haskell, and Scala~\cite{Scala}, which decompose tuples using pattern matching. \vspace*{-18pt} \subsection{C Extensions} \vspace*{-5pt} \CC is the best known C-based language and is similar to \CFA in that both are extensions to C with source and runtime backward compatibility. Specific differences between \CFA and \CC have been identified in prior sections, with a final observation that \CFA has equal or fewer tokens to express the same notion in many cases. The key difference in design philosophies is that \CFA is easier for C programmers to understand by maintaining a procedural paradigm and avoiding complex interactions among extensions. \CC, on the other hand, has multiple overlapping features (such as the three forms of polymorphism), many of which have complex interactions with its object-oriented design. As a result, \CC has a steep learning curve for even experienced C programmers, especially when attempting to maintain performance equivalent to C legacy code. There are several other C extension languages with less usage and even more dramatic changes than \CC. \mbox{Objective-C} and Cyclone are two other extensions to C with different design goals than \CFA, as discussed above. Other languages extend C with more focused features. $\mu$\CC~\cite{uC++book}, CUDA~\cite{Nickolls08}, ispc~\cite{Pharr12}, and Sierra~\cite{Leissa14} add concurrent or data-parallel primitives to C or \CC; data-parallel features have not yet been added to \CFA, but are easily incorporated within its design, whereas concurrency primitives similar to those in $\mu$\CC have already been added~\cite{Delisle18}. Finally, CCured~\cite{Necula02} and Ironclad \CC~\cite{DeLozier13} attempt to provide a more memory-safe C by annotating pointer types with garbage collection information; type-checked polymorphism in \CFA covers several of C's memory-safety issues, but more aggressive approaches such as annotating all pointer types with their nullability or requiring runtime garbage collection are contradictory to \CFA's backward compatibility goals. \section{Conclusion and Future Work} The goal of \CFA is to provide an evolutionary pathway for large C development environments to be more productive and safer, while respecting the talent and skill of C programmers. While other programming languages purport to be a better C, they are, in fact, new and interesting languages in their own right, but not C extensions. The purpose of this paper is to introduce \CFA, and showcase language features that illustrate the \CFA type system and approaches taken to achieve the goal of evolutionary C extension. The contributions are a powerful type system using parametric polymorphism and overloading, generic types, tuples, advanced control structures, and extended declarations, which all have complex interactions. The work is a challenging design, engineering, and implementation exercise. On the surface, the project may appear as a rehash of similar mechanisms in \CC. However, every \CFA feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supporting separate compilation. All of these new features are being used by the \CFA development team to build the \CFA runtime system. Finally, we demonstrate that \CFA performance for some idiomatic cases is better than C and close to \CC, showing the design is practically applicable. While all examples in the paper compile and run, there are ongoing efforts to reduce compilation time, provide better debugging, and add more libraries; when this work is complete in early 2019, a public beta release will be available at \url{https://github.com/cforall/cforall}. There is also new work on a number of \CFA features, including arrays with size, runtime type information, virtual functions, user-defined conversions, and modules. While \CFA polymorphic functions use dynamic virtual dispatch with low runtime overhead (see Section~\ref{sec:eval}), it is not as low as \CC template inlining. Hence, it may be beneficial to provide a mechanism for performance-sensitive code. Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function definitions to instantiate a specialized version for some set of types (\CC template specialization). These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code bloat. In general, we believe separate compilation, producing smaller code, works well with loaded hardware caches, which may offset the benefit of larger inlined code. \section{Acknowledgments} The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, Thierry Delisle, Andrew Beach, and Brice Dobry on the features described in this paper and thank Magnus Madsen for feedback on the writing. Funding for this project was provided by Huawei Ltd (\url{http://www.huawei.com}), and Aaron Moss and Peter Buhr were partially funded by the Natural Sciences and Engineering Research Council of Canada. {% \fontsize{9bp}{12bp}\selectfont% \vspace*{-3pt} \bibliography{pl} }% \appendix \section{Benchmark Stack Implementations} \label{sec:BenchmarkStackImplementations} Throughout, @/***/@ designates a counted redundant type annotation; code reformatted slightly for brevity. \subsection{C} \begin{flushleft} \lstDeleteShortInline@% \begin{tabular}{@{}l@{\hspace{1.8\parindentlnth}}|@{\hspace{\parindentlnth}}l@{}} \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] typedef struct node { void * value; struct node * next; } node; typedef struct stack { struct node * head; } stack; void copy_stack( stack * s, const stack * t, void * (*copy)( const void * ) ) { node ** cr = &s->head; for (node * nx = t->head; nx; nx = nx->next) { *cr = malloc( sizeof(node) ); /***/ (*cr)->value = copy( nx->value ); cr = &(*cr)->next; } *cr = NULL; } void clear_stack( stack * s, void (* free_el)( void * ) ) { for ( node * nx = s->head; nx; ) { node * cr = nx; nx = cr->next; free_el( cr->value ); free( cr ); } s->head = NULL; } \end{cfa} & \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] stack new_stack() { return (stack){ NULL }; /***/ } stack * assign_stack( stack * s, const stack * t, void * (*copy_el)( const void * ), void (*free_el)( void * ) ) { if ( s->head == t->head ) return s; clear_stack( s, free_el ); /***/ copy_stack( s, t, copy_el ); /***/ return s; } _Bool stack_empty( const stack * s ) { return s->head == NULL; } void push_stack( stack * s, void * v ) { node * n = malloc( sizeof(node) ); /***/ *n = (node){ v, s->head }; /***/ s->head = n; } void * pop_stack( stack * s ) { node * n = s->head; s->head = n->next; void * v = n->value; free( n ); return v; } \end{cfa} \end{tabular} \lstMakeShortInline@% \end{flushleft} \enlargethispage{1000pt} \subsection{\CFA} \label{s:CforallStack} \begin{flushleft} \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] forall( otype T ) { struct node { T value; node(T) * next; }; struct stack { node(T) * head; }; void ?{}( stack(T) & s, stack(T) t ) { // copy node(T) ** cr = &s.head; for ( node(T) * nx = t.head; nx; nx = nx->next ) { *cr = alloc(); ((*cr)->value){ nx->value }; cr = &(*cr)->next; } *cr = 0; } void clear( stack(T) & s ) with( s ) { for ( node(T) * nx = head; nx; ) { node(T) * cr = nx; nx = cr->next; ^(*cr){}; free( cr ); } head = 0; } \end{cfa} & \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] void ?{}( stack(T) & s ) { (s.head){ 0 }; } void ^?{}( stack(T) & s) { clear( s ); } stack(T) ?=?( stack(T) & s, stack(T) t ) { if ( s.head == t.head ) return s; clear( s ); s{ t }; return s; } _Bool empty( const stack(T) & s ) { return s.head == 0; } void push( stack(T) & s, T value ) with( s ) { node(T) * n = alloc(); (*n){ value, head }; head = n; } T pop( stack(T) & s ) with( s ) { node(T) * n = head; head = n->next; T v = n->value; ^(*n){}; free( n ); return v; } } \end{cfa} \end{tabular} \lstMakeShortInline@% \end{flushleft} \newpage \subsection{\CC} \begin{flushleft} \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] template struct stack { struct node { T value; node * next; node( const T & v, node * n = nullptr ) : value( v ), next( n ) {} }; node * head; void copy( const stack & o ) { node ** cr = &head; for ( node * nx = o.head; nx; nx = nx->next ) { *cr = new node{ nx->value }; /***/ cr = &(*cr)->next; } *cr = nullptr; } void clear() { for ( node * nx = head; nx; ) { node * cr = nx; nx = cr->next; delete cr; } head = nullptr; } \end{cfa} & \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] stack() : head( nullptr ) {} stack( const stack & o ) { copy( o ); } ~stack() { clear(); } stack & operator=( const stack & o ) { if ( this == &o ) return *this; clear(); copy( o ); return *this; } bool empty() const { return head == nullptr; } void push( const T & value ) { head = new node{ value, head }; /***/ } T pop() { node * n = head; head = n->next; T v = std::move( n->value ); delete n; return v; } }; \end{cfa} \end{tabular} \lstMakeShortInline@% \end{flushleft} \subsection{\CCV} \begin{flushleft} \lstDeleteShortInline@% \begin{tabular}{@{}l|@{\hspace{\parindentlnth}}l@{}} \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] struct stack { struct node { ptr value; node * next; node( const object & v, node * n = nullptr ) : value( v.new_copy() ), next( n ) {} }; node * head; void copy( const stack & o ) { node ** cr = &head; for ( node * nx = o.head; nx; nx = nx->next ) { *cr = new node{ *nx->value }; /***/ cr = &(*cr)->next; } *cr = nullptr; } void clear() { for ( node * nx = head; nx; ) { node * cr = nx; nx = cr->next; delete cr; } head = nullptr; } \end{cfa} & \begin{cfa}[xleftmargin=0pt,aboveskip=0pt,belowskip=0pt] stack() : head( nullptr ) {} stack( const stack & o ) { copy( o ); } ~stack() { clear(); } stack & operator=( const stack & o ) { if ( this == &o ) return *this; clear(); copy( o ); return *this; } bool empty() const { return head == nullptr; } void push( const object & value ) { head = new node{ value, head }; /***/ } ptr pop() { node * n = head; head = n->next; ptr v = std::move( n->value ); delete n; return v; } }; \end{cfa} \end{tabular} \lstMakeShortInline@% \end{flushleft} \end{document} % Local Variables: % % tab-width: 4 % % compile-command: "make" % % End: %