Changeset 727cf70f
- Timestamp:
- Apr 3, 2017, 12:00:50 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- a6af031
- Parents:
- 23063ea (diff), 1d29d46 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 1 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/bibliography/cfa.bib
r23063ea r727cf70f 35 35 @string{osr="Operating Systems Review"} 36 36 @string{pldi="Programming Language Design and Implementation"} 37 @string{toplas="Transactions on Programming Languages and Systems"} 37 38 @string{mathann="Mathematische Annalen"} 38 39 % @string{mathann="Math. Ann."} … … 2718 2719 implementations are the same. 2719 2720 } 2721 } 2722 2723 @online{GCCExtensions, 2724 contributer = {a3moss@uwaterloo.ca}, 2725 key = {{GNU}}, 2726 title = {Extensions to the {C} Language Family}, 2727 year = 2014, 2728 url = {https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html}, 2729 urldate = {2017-04-02} 2720 2730 } 2721 2731 … … 5589 5599 keywords = {Cyclone, existential types, polymorphism, type variables}, 5590 5600 contributer = {a3moss@plg}, 5591 author = { Grossman, Dan},5601 author = {Dan Grossman}, 5592 5602 title = {Quantified Types in an Imperative Language}, 5593 5603 journal = toplas, … … 5596 5606 number = {3}, 5597 5607 month = may, 5598 year = {2006},5608 year = 2006, 5599 5609 issn = {0164-0925}, 5600 pages = {429--475}, 5601 numpages = {47}, 5610 pages = {429-475}, 5602 5611 url = {http://doi.acm.org.proxy.lib.uwaterloo.ca/10.1145/1133651.1133653}, 5603 5612 doi = {10.1145/1133651.1133653}, … … 5758 5767 month = dec, 5759 5768 year = 1988, 5769 } 5770 5771 @mastersthesis{Schluntz17, 5772 author = {Robert Schluntz}, 5773 title = {Resource Management and Tuples in C$\mathbf{\forall}$}, 5774 school = {School of Computer Science, University of Waterloo}, 5775 year = 2017, 5776 address = {Waterloo, Ontario, Canada, N2L 3G1}, 5777 note = {[[unpublished]]} 5760 5778 } 5761 5779 -
doc/generic_types/acmart.cls
r23063ea r727cf70f 370 370 \fi 371 371 \if@ACM@screen 372 \hypersetup{colorlinks,373 linkcolor=ACMRed,374 citecolor=ACMPurple,375 urlcolor=ACMDarkBlue,376 filecolor=ACMDarkBlue}372 % \hypersetup{colorlinks, 373 % linkcolor=ACMRed, 374 % citecolor=ACMPurple, 375 % urlcolor=ACMDarkBlue, 376 % filecolor=ACMDarkBlue} 377 377 \else 378 378 \hypersetup{hidelinks} … … 1830 1830 \newlength\ACM@linecount@bxht\setlength{\ACM@linecount@bxht}{-\baselineskip} 1831 1831 \@tempcnta\@ne\relax 1832 \loop{\color{ACMRed}\scriptsize\the\@tempcnta}\\ 1832 % \loop{\color{ACMRed}\scriptsize\the\@tempcnta}\\ 1833 \loop{\scriptsize\the\@tempcnta}\\ 1833 1834 \advance\@tempcnta by \@ne 1834 1835 \addtolength{\ACM@linecount@bxht}{\baselineskip} -
doc/generic_types/generic_types.tex
r23063ea r727cf70f 1 1 % take off review (for line numbers) and anonymous (for anonymization) on submission 2 2 % \documentclass[format=acmlarge, anonymous, review]{acmart} 3 \documentclass[format=acmlarge, review]{acmart} 4 5 \usepackage{listings} % For code listings 3 \documentclass[format=acmlarge,review]{acmart} 4 5 \usepackage{xspace,calc,comment} 6 \usepackage{upquote} % switch curled `'" to straight 7 \usepackage{listings} % format program code 8 9 \makeatletter 10 % parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for 11 % use rather than use \parident directly. 12 \newlength{\parindentlnth} 13 \setlength{\parindentlnth}{\parindent} 14 15 \newlength{\gcolumnposn} % temporary hack because lstlisting does handle tabs correctly 16 \newlength{\columnposn} 17 \setlength{\gcolumnposn}{2.75in} 18 \setlength{\columnposn}{\gcolumnposn} 19 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@commentstyle{#2}}} 20 \newcommand{\CRT}{\global\columnposn=\gcolumnposn} 21 \makeatother 6 22 7 23 % Useful macros 8 \newcommand{\CFA}{C$\mathbf\forall$ } % Cforall symbolic name9 \newcommand{\CC}{\rm C\kern-.1em\hbox{+\kern-.25em+} } % C++ symbolic name10 \newcommand{\CCeleven}{\rm C\kern-.1em\hbox{+\kern-.25em+}11 } % C++11 symbolic name11 \newcommand{\CCfourteen}{\rm C\kern-.1em\hbox{+\kern-.25em+}14 } % C++14 symbolic name12 \newcommand{\CCseventeen}{\rm C\kern-.1em\hbox{+\kern-.25em+}17 } % C++17 symbolic name13 \newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20 } % C++20 symbolic name24 \newcommand{\CFA}{C$\mathbf\forall$\xspace} % Cforall symbolic name 25 \newcommand{\CC}{\rm C\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name 26 \newcommand{\CCeleven}{\rm C\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name 27 \newcommand{\CCfourteen}{\rm C\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name 28 \newcommand{\CCseventeen}{\rm C\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name 29 \newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name 14 30 15 31 \newcommand{\TODO}{\textbf{TODO}} 16 \newcommand{\eg}{\textit{e}.\textit{g}. }17 \newcommand{\ie}{\textit{i}.\textit{e}. }18 \newcommand{\etc}{\textit{etc}. }32 \newcommand{\eg}{\textit{e}.\textit{g}.,\xspace} 33 \newcommand{\ie}{\textit{i}.\textit{e}.,\xspace} 34 \newcommand{\etc}{\textit{etc}.,\xspace} 19 35 20 36 % CFA programming language, based on ANSI C (with some gcc additions) 21 37 \lstdefinelanguage{CFA}[ANSI]{C}{ 22 38 morekeywords={_Alignas,_Alignof,__alignof,__alignof__,asm,__asm,__asm__,_At,_Atomic,__attribute,__attribute__,auto, 23 _Bool, bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__,disable,dtype,enable,__extension__,24 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,_Noreturn,one_t,otype,restrict, size_t,sized,_Static_assert,39 _Bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__,disable,dtype,enable,__extension__, 40 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,_Noreturn,one_t,otype,restrict,_Static_assert, 25 41 _Thread_local,throw,throwResume,trait,try,ttype,typeof,__typeof,__typeof__,zero_t}, 26 42 }% … … 33 49 tabsize=4, % 4 space tabbing 34 50 xleftmargin=\parindent, % indent code to paragraph indentation 35 % extendedchars=true, % allow ASCII characters in the range 128-255 36 % escapechar=§, % LaTeX escape in CFA code §...§ (section symbol), emacs: C-q M-' 37 mathescape=true, % LaTeX math escape in CFA code $...$ 51 %mathescape=true, % LaTeX math escape in CFA code $...$ 52 escapechar=\$, % LaTeX escape in CFA code 38 53 keepspaces=true, % 39 54 showstringspaces=false, % do not show spaces with cup … … 43 58 % replace/adjust listing characters that look bad in sanserif 44 59 literate={-}{\raisebox{-0.15ex}{\texttt{-}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1 45 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {_}{\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}1 {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 46 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{$\rightarrow$}2, 47 % moredelim=**[is][\color{red}]{®}{®}, % red highlighting ®...® (registered trademark symbol) emacs: C-q M-. 48 % moredelim=**[is][\color{blue}]{ß}{ß}, % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_ 49 % moredelim=**[is][\color{OliveGreen}]{¢}{¢}, % green highlighting ¢...¢ (cent symbol) emacs: C-q M-" 50 % moredelim=[is][\lstset{keywords={}}]{¶}{¶}, % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^ 60 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {_}{\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 61 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2, 62 moredelim=**[is][\color{red}]{`}{`}, 51 63 }% lstset 52 64 … … 59 71 \acmJournal{PACMPL} 60 72 61 \title{Generic and Tuple Types with Efficient Dynamic Layout in \CFA {}}73 \title{Generic and Tuple Types with Efficient Dynamic Layout in \CFA} 62 74 63 75 \author{Aaron Moss} 76 \email{a3moss@uwaterloo.ca} 77 \author{Robert Schluntz} 78 \email{rschlunt@uwaterloo.ca} 79 \author{Peter Buhr} 80 \email{pabuhr@uwaterloo.ca} 64 81 \affiliation{% 65 82 \institution{University of Waterloo} … … 71 88 \country{Canada} 72 89 } 73 \email{a3moss@uwaterloo.ca}74 75 \author{Robert Schluntz}76 \affiliation{%77 \institution{University of Waterloo}78 \department{David R. Cheriton School of Computer Science}79 \streetaddress{Davis Centre, University of Waterloo}80 \city{Waterloo}81 \state{ON}82 \postcode{N2L 3G1}83 \country{Canada}84 }85 \email{rschlunt@uwaterloo.ca}86 87 \author{Peter Buhr}88 \affiliation{%89 \institution{University of Waterloo}90 \department{David R. Cheriton School of Computer Science}91 \streetaddress{Davis Centre, University of Waterloo}92 \city{Waterloo}93 \state{ON}94 \postcode{N2L 3G1}95 \country{Canada}96 }97 \email{pabuhr@uwaterloo.ca}98 90 99 91 \terms{generic, tuple, types} … … 125 117 126 118 \begin{abstract} 127 The C programming language is a foundational technology for modern computing , with millions of lines of code implementing everything from commercial operating systems to hobby projects. This installed base of code and the programmers who produced it represent a massive software engineering investment spanning decades. Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA{} project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C. Particularly, \CFA{} is designed to have an orthogonal feature set based closely on the C programming paradigm, so that \CFA{} features can be added incrementally to existing C code-bases, and C programmers can learn \CFA{} extensions on an as-needed basis, preserving investment in existing engineers and code. This paper describes how generic and tuple types are implemented in \CFA{}in accordance with these principles.119 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers. This paper describes only two \CFA extensions, generic and tuple types, and how they are implemented in accordance with these principles. 128 120 \end{abstract} 129 121 130 122 \begin{document} 131 132 123 \maketitle 133 124 134 125 \section{Introduction \& Background} 135 126 136 \CFA {}\footnote{Pronounced ``C-for-all'', and written \CFA{} or Cforall.} is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar mental model for programmers. Four key design goals were set out in the original design of \CFA{}\citep{Bilson03}:127 \CFA\footnote{Pronounced ``C-for-all'', and written \CFA or Cforall.} is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers. Four key design goals were set out in the original design of \CFA~\citep{Bilson03}: 137 128 \begin{enumerate} 138 \item The behaviour of standard C code must remain the same when translated by a \CFA {}compiler as when translated by a C compiler.139 \item Standard C code must be as fast and as small when translated by a \CFA {}compiler as when translated by a C compiler.140 \item \CFA {}code must be at least as portable as standard C code.141 \item Extensions introduced by \CFA {}must be translated in the most efficient way possible.129 \item The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler. 130 \item Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler. 131 \item \CFA code must be at least as portable as standard C code. 132 \item Extensions introduced by \CFA must be translated in the most efficient way possible. 142 133 \end{enumerate} 143 The purpose of these goals is to ensure that existing C code-bases can be converted to \CFA{} incrementally and with minimal effort, and that programmers who already know C can productively produce \CFA{} code without training in \CFA{} beyond the extension features they wish to employ. In its current implementation, \CFA{} is compiled by translating it to GCC-dialect C, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)-(3). 144 145 \CFA{} has been previously extended with polymorphic functions and name overloading (including operator overloading) \citep{Bilson03}, and deterministically-executed constructors and destructors \citep{Schluntz17}. This paper describes how generic and tuple types are designed and implemented in \CFA{} in accordance with both the backward compatibility goals and existing features described above. 134 These goals ensure existing C code-bases can be converted to \CFA incrementally and with minimal effort, and C programmers can productively generate \CFA code without training beyond the features they wish to employ. In its current implementation, \CFA is compiled by translating it to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)-(3). Ultimately, a compiler is necessary for advanced features and optimal performance. 135 136 \CFA has been previously extended with polymorphic functions and name overloading (including operator overloading) by \citet{Bilson03}, and deterministically-executed constructors and destructors by \citet{Schluntz17}. This paper describes how generic and tuple types are designed and implemented in \CFA in accordance with both the backward compatibility goals and existing features described above. 137 146 138 147 139 \subsection{Polymorphic Functions} 148 140 \label{sec:poly-fns} 149 141 150 \CFA{}'s polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA{} is parametric-polymorphic functions; such functions are written using a @forall@ clause (which gives the language its name): 151 \begin{lstlisting} 152 forall(otype T) 153 T identity(T x) { return x; } 154 155 int forty_two = identity(42); // T is bound to int, forty_two == 42 156 \end{lstlisting} 157 The @identity@ function above can be applied to any complete object type (or ``@otype@''). The type variable @T@ is transformed into a set of additional implicit parameters to @identity@, that encode sufficient information about @T@ to create and return a variable of that type. The \CFA{} implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. If this extra information is not needed, the type parameter can be declared as @dtype T@, where @dtype@ is short for ``data type''. 158 159 Here, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments have shown this overhead to be similar to \CC{} virtual function calls. An advantage of this design is that, unlike \CC{} template functions, \CFA{} @forall@ functions are compatible with separate compilation. 160 161 Since bare polymorphic types do not provide a great range of available operations, \CFA{} provides a \emph{type assertion} mechanism to provide further information about a type: 162 \begin{lstlisting} 163 forall(otype T | { T twice(T); }) 164 T four_times(T x) { return twice( twice(x) ); } 165 166 double twice(double d) { return d * 2.0; } // (1) 167 168 double magic = four_times(10.5); // T is bound to double, uses (1) to satisfy type assertion 169 \end{lstlisting} 170 These type assertions may be either variable or function declarations that depend on a polymorphic type variable. @four_times@ can only be called with an argument for which there exists a function named @twice@ that can take that argument and return another value of the same type; a pointer to the appropriate @twice@ function is passed as an additional implicit parameter to the call of @four_times@. 171 172 Monomorphic specializations of polymorphic functions can themselves be used to satisfy type assertions. For instance, @twice@ could have been defined using the \CFA{} syntax for operator overloading as: 173 \begin{lstlisting} 174 forall(otype S | { S ?+?(S, S); }) 175 S twice(S x) { return x + x; } // (2) 176 \end{lstlisting} 177 This version of @twice@ works for any type @S@ that has an addition operator defined for it, and it could have been used to satisfy the type assertion on @four_times@. 178 The translator accomplishes this polymorphism by creating a wrapper function calling @twice // (2)@ with @S@ bound to @double@, then providing this wrapper function to @four_times@\footnote{\lstinline@twice // (2)@ could also have had a type parameter named \lstinline@T@; \CFA{} specifies renaming of the type parameters, which would avoid the name conflict with the type variable \lstinline@T@ of \lstinline@four_times@.}. 142 \CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA is parametric-polymorphic functions; such functions are written using a @forall@ clause (which gives the language its name): 143 \begin{lstlisting} 144 `forall( otype T )` T identity( T val ) { return val; } 145 int forty_two = identity( 42 ); $\C{// T is bound to int, forty\_two == 42}$ 146 \end{lstlisting} 147 The @identity@ function above can be applied to any complete object-type (or ``@otype@''). The type variable @T@ is transformed into a set of additional implicit parameters to @identity@ that encode sufficient information about @T@ to create and return a variable of that type. The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. If this extra information is not needed, \eg for a pointer, the type parameter can be declared as @dtype T@, where @dtype@ is short for ``data type''. 148 149 Here, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments have shown this overhead to be similar to \CC virtual function calls. An advantage of this design is that, unlike \CC template functions, \CFA @forall@ functions are compatible with C separate compilation. 150 151 Since bare polymorphic-types provide only a narrow set of available operations, \CFA provides a \emph{type assertion} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type variable. For instance, @twice@ can be defined using the \CFA syntax for operator overloading: 152 \begin{lstlisting} 153 forall( otype T | { T `?`+`?`(T, T); } ) $\C{// ? denotes operands}$ 154 T twice( T x ) { return x + x; } $\C{// (2)}$ 155 int val = twice( twice( 3.7 ) ); 156 \end{lstlisting} 157 which works for any type @T@ with an addition operator defined. The translator accomplishes this polymorphism by creating a wrapper function for calling @+@ with @T@ bound to @double@, then providing this function to the first call of @twice@. It then has the option of using the same @twice@ again and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type in its type analysis. The first approach has a late conversion from integer to floating-point on the final assignment, while the second has an eager conversion to integer. \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach. 158 159 Monomorphic specializations of polymorphic functions can satisfy polymorphic type-assertions. 160 % \begin{lstlisting} 161 % forall(otype T `| { T twice(T); }`) $\C{// type assertion}$ 162 % T four_times(T x) { return twice( twice(x) ); } 163 % double twice(double d) { return d * 2.0; } $\C{// (1)}$ 164 % double magic = four_times(10.5); $\C{// T bound to double, uses (1) to satisfy type assertion}$ 165 % \end{lstlisting} 166 \begin{lstlisting} 167 forall( otype T `| { int ?<?( T, T ); }` ) $\C{// type assertion}$ 168 void qsort( const T * arr, size_t size ); 169 forall( otype T `| { int ?<?( T, T ); }` ) $\C{// type assertion}$ 170 T * bsearch( T key, const T * arr, size_t size ); 171 double vals[10] = { /* 10 floating-point values */ }; 172 qsort( vals, 10 ); $\C{// sort array}$ 173 double * val = bsearch( 5.0, vals, 10 ); $\C{// binary search sorted array for key}$ 174 \end{lstlisting} 175 @qsort@ and @bsearch@ can only be called with arguments for which there exists a function named @<@ taking two arguments of the same type and returning an @int@ value. 176 Here, the built-in monomorphic specialization of @<@ for type @double@ is passed as an additional implicit parameter to the calls of @qsort@ and @bsearch@. 177 178 Crucial to the design of a new programming language are the libraries to access thousands of external features. 179 \CFA inherits a massive compatible library-base, where other programming languages have to rewrite or provide fragile inter-language communication with C. 180 A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@, shown here searching a floating-point array: 181 \begin{lstlisting} 182 void * bsearch( const void * key, const void * base, size_t nmemb, size_t size, 183 int (* compar)(const void *, const void *)); 184 int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 : 185 *(double *)t2 < *(double *)t1 ? 1 : 0; } 186 double key = 5.0; 187 double * val = (double *)bsearch( &key, vals, size, sizeof(vals[0]), comp ); 188 \end{lstlisting} 189 but providing a type-safe \CFA overloaded wrapper. 190 \begin{lstlisting} 191 forall( otype T | { int ?<?( T, T ); } ) T * bsearch( T key, const T * arr, size_t size ) { 192 int comp( const void * t1, const void * t2 ) { /* as above with double changed to T */ } 193 return (T *)bsearch( &key, arr, size, sizeof(T), comp ); 194 } 195 forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) { 196 T *result = bsearch( key, arr, size ); $\C{// call first version}$ 197 return result ? result - arr : size; $\C{// pointer subtraction includes sizeof(T)}$ 198 } 199 double * val = bsearch( 5.0, vals, 10 ); $\C{// selection based on return type}$ 200 int posn = bsearch( 5.0, vals, 10 ); 201 \end{lstlisting} 202 The nested routine @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result. 203 As well, an alternate kind of return is made available, position versus pointer to found element. 204 \CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a templated @bsearch@. 205 206 Call-site inferencing and nested functions provide a localized form of inheritance. For example, @qsort@ only sorts in ascending order using @<@. However, it is trivial to locally change this behaviour: 207 \begin{lstlisting} 208 { int ?<?( double x, double y ) { return x `>` y; } $\C{// override behaviour}$ 209 qsort( vals, size ); $\C{// descending sort}$ 210 } 211 \end{lstlisting} 212 Within the block, the nested version of @<@ performs @>@ and this local version overrides the built-in @<@ so it is passed to @qsort@. 213 Hence, programmers can easily form new local environments to maximize reuse of existing functions and types. 214 215 Finally, variables may be overloaded: 216 \lstDeleteShortInline@ 217 \par\smallskip 218 \begin{tabular}{@{}l@{\hspace{\parindent}}|@{\hspace{\parindent}}l@{}} 219 \begin{lstlisting} 220 short int MAX = ...; 221 int MAX = ...; 222 double MAX = ...; 223 \end{lstlisting} 224 & 225 \begin{lstlisting} 226 short int s = MAX; // select correct MAX 227 int i = MAX; 228 double d = MAX; 229 \end{lstlisting} 230 \end{tabular} 231 \lstMakeShortInline@ 232 \smallskip\par\noindent 233 Hence, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@. 179 234 180 235 \subsection{Traits} 181 236 182 \CFA{} provides \emph{traits} as a means to name a group of type assertions, as in the example below: 183 \begin{lstlisting} 184 trait has_magnitude(otype T) { 185 bool ?<?(T, T); // comparison operator for T 186 T -?(T); // negation operator for T 187 void ?{}(T*, zero_t); // constructor from 0 literal 237 \CFA provides \emph{traits} to name a group of type assertions: 238 % \begin{lstlisting} 239 % trait has_magnitude(otype T) { 240 % _Bool ?<?(T, T); $\C{// comparison operator for T}$ 241 % T -?(T); $\C{// negation operator for T}$ 242 % void ?{}(T*, zero_t); $\C{// constructor from 0 literal}$ 243 % }; 244 % forall(otype M | has_magnitude(M)) 245 % M abs( M m ) { 246 % M zero = { 0 }; $\C{// uses zero\_t constructor from trait}$ 247 % return m < zero ? -m : m; 248 % } 249 % forall(otype M | has_magnitude(M)) 250 % M max_magnitude( M a, M b ) { 251 % return abs(a) < abs(b) ? b : a; 252 % } 253 % \end{lstlisting} 254 \begin{lstlisting} 255 trait summable( otype T ) { 256 void ?{}(T*, zero_t); $\C{// constructor from 0 literal}$ 257 T ?+?( T, T ); $\C{// assortment of additions}$ 258 T ?+=?( T *, T ); 259 T ++?( T * ); 260 T ?++( T * ); 188 261 }; 189 190 forall(otype M | has_magnitude(M)) 191 M abs( M m ) { 192 M zero = { 0 }; // uses zero_t constructor from trait 193 return m < zero ? -m : m; 194 } 195 196 forall(otype M | has_magnitude(M)) 197 M max_magnitude( M a, M b ) { 198 return abs(a) < abs(b) ? b : a; 199 } 200 \end{lstlisting} 201 This capability allows specifying the same set of assertions in multiple locations, without the repetition and likelihood of mistakes that come with manually writing them out for each function declaration. 202 203 @otype@ is essentially syntactic sugar for the following trait: 204 \begin{lstlisting} 205 trait otype(dtype T | sized(T)) { 206 // sized is a compiler-provided pseudo-trait for types with known size & alignment 207 void ?{}(T*); // default constructor 208 void ?{}(T*, T); // copy constructor 209 void ?=?(T*, T); // assignment operator 210 void ^?{}(T*); // destructor 262 forall( otype T | summable( T ) ) 263 T sum( T a[$\,$], size_t size ) { 264 T total = { 0 }; $\C{// instantiate T from 0}$ 265 for ( unsigned int i = 0; i < size; i += 1 ) 266 total += a[i]; $\C{// select appropriate +}$ 267 return total; 268 } 269 \end{lstlisting} 270 The trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration. 271 272 In fact, the set of operators is incomplete, \eg no assignment, but @otype@ is syntactic sugar for the following implicit trait: 273 \begin{lstlisting} 274 trait otype( dtype T | sized(T) ) { 275 // sized is a compiler-provided pseudo-trait for types with known size and alignment} 276 void ?{}( T * ); $\C{// default constructor}$ 277 void ?{}( T *, T ); $\C{// copy constructor}$ 278 void ?=?( T *, T ); $\C{// assignment operator}$ 279 void ^?{}( T * ); $\C{// destructor}$ 211 280 }; 212 281 \end{lstlisting} 213 Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete struct type -- they can be stack-allocated using the @alloca@ compiler builtin, default or copy-initialized, assigned, and deleted. As an example, the @ abs@ function above produces generated code something like the following (simplified for clarity and brevity):282 Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete struct type -- they can be stack-allocated using the @alloca@ compiler builtin, default or copy-initialized, assigned, and deleted. As an example, the @sum@ function produces generated code something like the following (simplified for clarity and brevity) \TODO{} fix example, maybe elide, it's likely too long with the more complicated function: 214 283 \begin{lstlisting} 215 284 void abs( size_t _sizeof_M, size_t _alignof_M, 216 285 void (*_ctor_M)(void*), void (*_copy_M)(void*, void*), 217 286 void (*_assign_M)(void*, void*), void (*_dtor_M)(void*), 218 bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*),287 _Bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*), 219 288 void (*_ctor_M_zero)(void*, int), 220 void* m, void* _rtn ) { // polymorphic parameter and return passed as void*221 // M zero = { 0 };222 void* zero = alloca(_sizeof_M); // stack allocate zero temporary223 _ctor_M_zero(zero, 0); // initialize using zero_t constructor224 // return m < zero ? -m : m;289 void* m, void* _rtn ) { $\C{// polymorphic parameter and return passed as void*}$ 290 $\C{// M zero = { 0 };}$ 291 void* zero = alloca(_sizeof_M); $\C{// stack allocate zero temporary}$ 292 _ctor_M_zero(zero, 0); $\C{// initialize using zero\_t constructor}$ 293 $\C{// return m < zero ? -m : m;}$ 225 294 void *_tmp = alloca(_sizeof_M); 226 _copy_M( _rtn, // copy-initialize return value227 _lt_M( m, zero ) ? // check condition228 (_neg_M(m, _tmp), _tmp) : // negate m295 _copy_M( _rtn, $\C{// copy-initialize return value}$ 296 _lt_M( m, zero ) ? $\C{// check condition}$ 297 (_neg_M(m, _tmp), _tmp) : $\C{// negate m}$ 229 298 m); 230 _dtor_M(_tmp); _dtor_M(zero); // destroy temporaries231 } 232 \end{lstlisting} 233 234 Semantically, traits are simply a named lists of type assertions, but they may be used for many of the same purposes that interfaces in Java or abstract base classes in \CC {} are used for. Unlike Java interfaces or \CC{} base classes, \CFA{} types do not explicitly state any inheritance relationship to traits they satisfy; this can be considered a form of structural inheritance, similar to implementation of an interface in Go, as opposed to the nominal inheritance model of Java and \CC{}. Nominal inheritance can be simulated with traits using marker variables or functions:299 _dtor_M(_tmp); _dtor_M(zero); $\C{// destroy temporaries}$ 300 } 301 \end{lstlisting} 302 303 Semantically, traits are simply a named lists of type assertions, but they may be used for many of the same purposes that interfaces in Java or abstract base classes in \CC are used for. Unlike Java interfaces or \CC base classes, \CFA types do not explicitly state any inheritance relationship to traits they satisfy; this can be considered a form of structural inheritance, similar to implementation of an interface in Go, as opposed to the nominal inheritance model of Java and \CC. Nominal inheritance can be simulated with traits using marker variables or functions: 235 304 \begin{lstlisting} 236 305 trait nominal(otype T) { … … 238 307 }; 239 308 240 int is_nominal; // int now satisfies the nominal trait309 int is_nominal; $\C{// int now satisfies the nominal trait}$ 241 310 \end{lstlisting} 242 311 … … 244 313 \begin{lstlisting} 245 314 trait pointer_like(otype Ptr, otype El) { 246 lvalue El *?(Ptr); // Ptr can be dereferenced into a modifiable value of type El315 lvalue El *?(Ptr); $\C{// Ptr can be dereferenced into a modifiable value of type El}$ 247 316 } 248 317 249 318 struct list { 250 319 int value; 251 list *next; // may omit "struct" on type names320 list *next; $\C{// may omit "struct" on type names}$ 252 321 }; 253 322 … … 257 326 \end{lstlisting} 258 327 259 In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg {}@int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@).328 In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@). 260 329 While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously. 261 330 … … 264 333 One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to create data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures. A second approach is to use @void*@-based polymorphism. This approach is taken by the C standard library functions @qsort@ and @bsearch@, and does allow the use of common code for common functionality. However, basing all polymorphism on @void*@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requires a number of extra function parameters, and also adds pointer indirection and dynamic allocation to algorithms and data structures that would not otherwise require them. A third approach to generic code is to use pre-processor macros to generate it -- this approach does allow the generated code to be both generic and type-checked, though any errors produced may be difficult to interpret. Furthermore, writing and invoking C code as preprocessor macros is unnatural and somewhat inflexible. 265 334 266 Other C-like languages such as \CC {} and Java use \emph{generic types} to produce type-safe abstract data types. The authors have chosen to implement generic types as well, with some care taken that the generic types design for \CFA{} integrates efficiently and naturally with the existing polymorphic functions in \CFA{}while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there is not extra overhead for the use of a generic type.335 Other C-like languages such as \CC and Java use \emph{generic types} to produce type-safe abstract data types. The authors have chosen to implement generic types as well, with some care taken that the generic types design for \CFA integrates efficiently and naturally with the existing polymorphic functions in \CFA while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there is not extra overhead for the use of a generic type. 267 336 268 337 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name: … … 289 358 \end{lstlisting} 290 359 291 \CFA {} classifies generic types as either \emph{concrete} or \emph{dynamic}. Dynamic generic types vary in their in-memory layout depending on their type parameters, while concrete generic types have a fixed memory layout regardless of type parameters. A type may have polymorphic parameters but still be concrete; in \CFA{}such types are called \emph{dtype-static}. Polymorphic pointers are an example of dtype-static types -- @forall(dtype T) T*@ is a polymorphic type, but for any @T@ chosen, @T*@ has exactly the same in-memory representation as a @void*@, and can therefore be represented by a @void*@ in code generation.292 293 \CFA {}generic types may also specify constraints on their argument type to be checked by the compiler. For example, consider the following declaration of a sorted set type, which ensures that the set key supports comparison and tests for equality:294 \begin{lstlisting} 295 forall(otype Key | { bool ?==?(Key, Key); bool ?<?(Key, Key); })360 \CFA classifies generic types as either \emph{concrete} or \emph{dynamic}. Dynamic generic types vary in their in-memory layout depending on their type parameters, while concrete generic types have a fixed memory layout regardless of type parameters. A type may have polymorphic parameters but still be concrete; in \CFA such types are called \emph{dtype-static}. Polymorphic pointers are an example of dtype-static types -- @forall(dtype T) T*@ is a polymorphic type, but for any @T@ chosen, @T*@ has exactly the same in-memory representation as a @void*@, and can therefore be represented by a @void*@ in code generation. 361 362 \CFA generic types may also specify constraints on their argument type to be checked by the compiler. For example, consider the following declaration of a sorted set type, which ensures that the set key supports comparison and tests for equality: 363 \begin{lstlisting} 364 forall(otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); }) 296 365 struct sorted_set; 297 366 \end{lstlisting} … … 299 368 \subsection{Concrete Generic Types} 300 369 301 The \CFA {}translator instantiates concrete generic types by template-expanding them to fresh struct types; concrete generic types can therefore be used with zero runtime overhead. To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated struct declarations where appropriate. For example, a function declaration that accepts or returns a concrete generic type produces a declaration for the instantiated struct in the same scope, which all callers that can see that declaration may reuse. As an example of the expansion, the concrete instantiation for @pair(const char*, int)@ looks like this:370 The \CFA translator instantiates concrete generic types by template-expanding them to fresh struct types; concrete generic types can therefore be used with zero runtime overhead. To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated struct declarations where appropriate. For example, a function declaration that accepts or returns a concrete generic type produces a declaration for the instantiated struct in the same scope, which all callers that can see that declaration may reuse. As an example of the expansion, the concrete instantiation for @pair(const char*, int)@ looks like this: 302 371 \begin{lstlisting} 303 372 struct _pair_conc1 { … … 317 386 \subsection{Dynamic Generic Types} 318 387 319 Though \CFA {} implements concrete generic types efficiently, it also has a fully general system for computing with dynamic generic types. As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. Dynamic generic structs have the same size and alignment parameter, and also an \emph{offset array} which contains the offsets of each member of the struct\footnote{Dynamic generic unions need no such offset array, as all members are at offset 0; the size and alignment parameters are still provided for dynamic unions, however.}. Access to members\footnote{The \lstinline@offsetof@ macro is implemented similarly.} of a dynamic generic struct is provided by adding the corresponding member of the offset array to the struct pointer at runtime, essentially moving a compile-time offset calculation to runtime where necessary.388 Though \CFA implements concrete generic types efficiently, it also has a fully general system for computing with dynamic generic types. As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. Dynamic generic structs also have implicit size and alignment parameters, and also an \emph{offset array} which contains the offsets of each member of the struct\footnote{Dynamic generic unions need no such offset array, as all members are at offset 0; the size and alignment parameters are still provided for dynamic unions, however.}. Access to members\footnote{The \lstinline@offsetof@ macro is implemented similarly.} of a dynamic generic struct is provided by adding the corresponding member of the offset array to the struct pointer at runtime, essentially moving a compile-time offset calculation to runtime where necessary. 320 389 321 390 These offset arrays are statically generated where possible. If a dynamic generic type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume that the generic type is complete (that is, has a known layout) at any call-site, and the offset array is passed from the caller; if the generic type is concrete at the call site the elements of this offset array can even be statically generated using the C @offsetof@ macro. As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void*@, and @_offsetof_pair@ is the offset array passed in to @value@ for @pair(const char*, T)@. The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) };@. 322 391 323 In some cases the offset arrays cannot be statically generated. For instance, modularity is generally provided in C by including an opaque forward-declaration of a struct and associated accessor and mutator routines in a header file, with the actual implementations in a separately-compiled \texttt{.c} file. \CFA {} supports this pattern for generic types, and in this instance the caller does not know the actual layout or size of the dynamic generic type, and only holds it by pointer. The \CFA{}translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed in to a function from that function's caller. These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic struct (un-@sized@ parameters are forbidden from the language from being used in a context that affects layout). Results of these layout functions are cached so that they are only computed once per type per function.%, as in the example below for @pair@.392 In some cases the offset arrays cannot be statically generated. For instance, modularity is generally provided in C by including an opaque forward-declaration of a struct and associated accessor and mutator routines in a header file, with the actual implementations in a separately-compiled \texttt{.c} file. \CFA supports this pattern for generic types, and in this instance the caller does not know the actual layout or size of the dynamic generic type, and only holds it by pointer. The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed in to a function from that function's caller. These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic struct (un-@sized@ parameters are forbidden from the language from being used in a context that affects layout). Results of these layout functions are cached so that they are only computed once per type per function.%, as in the example below for @pair@. 324 393 % \begin{lstlisting} 325 394 % static inline void _layoutof_pair(size_t* _szeof_pair, size_t* _alignof_pair, size_t* _offsetof_pair, … … 348 417 Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature. For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values. This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function. 349 418 350 Whether a type is concrete, dtype-static, or dynamic is decided based solely on the type parameters and @forall@ clause on the struct declaration. This design allows opaque forward declarations of generic types like @forall(otype T) struct Box;@ -- like in C, all uses of @Box(T)@ can be in a separately compiled translation unit, and callers from other translation units know the proper calling conventions to use. If the definition of a struct type was included in the decision of whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg {}@forall(otype T) struct unique_ptr { T* p };@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off.419 Whether a type is concrete, dtype-static, or dynamic is decided based solely on the type parameters and @forall@ clause on the struct declaration. This design allows opaque forward declarations of generic types like @forall(otype T) struct Box;@ -- like in C, all uses of @Box(T)@ can be in a separately compiled translation unit, and callers from other translation units know the proper calling conventions to use. If the definition of a struct type was included in the decision of whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T* p };@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off. 351 420 352 421 \subsection{Applications} … … 362 431 } 363 432 \end{lstlisting} 364 Since @pair(T*, T*)@ is a concrete type, there are no added implicit parameters to @lexcmp@, so the code generated by \CFA {} is effectively identical to a version of this function written in standard C using @void*@, yet the \CFA{}version is type-checked to ensure that the fields of both pairs and the arguments to the comparison function match in type.433 Since @pair(T*, T*)@ is a concrete type, there are no added implicit parameters to @lexcmp@, so the code generated by \CFA is effectively identical to a version of this function written in standard C using @void*@, yet the \CFA version is type-checked to ensure that the fields of both pairs and the arguments to the comparison function match in type. 365 434 366 435 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost ``tag'' structs. Sometimes a particular bit of information is only useful for type-checking, and can be omitted at runtime. Tag structs can be used to provide this information to the compiler without further runtime overhead, as in the following example: … … 383 452 marathon + swimming_pool; // ERROR -- caught by compiler 384 453 \end{lstlisting} 385 @scalar@ is a dtype-static type, so all uses of it use a single struct definition, containing only a single @unsigned long@, and can share the same implementations of common routines like @?+?@ -- these implementations may even be separately compiled, unlike \CC {} template functions. However, the \CFA{}type-checker ensures that matching types are used by all calls to @?+?@, preventing nonsensical computations like adding the length of a marathon to the volume of an olympic pool.454 @scalar@ is a dtype-static type, so all uses of it use a single struct definition, containing only a single @unsigned long@, and can share the same implementations of common routines like @?+?@ -- these implementations may even be separately compiled, unlike \CC template functions. However, the \CFA type-checker ensures that matching types are used by all calls to @?+?@, preventing nonsensical computations like adding the length of a marathon to the volume of an olympic pool. 386 455 387 456 \section{Tuples} 388 457 \label{sec:tuples} 389 458 390 The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA {}, with a design particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}.459 The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA, with a design particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}. 391 460 392 461 In standard C, functions can return at most one value. This restriction results in code that emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. In the former situation, the function designer creates a record type that combines all of the return values into a single type. Unfortunately, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the routine body to emulate a return. Using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. This responsibility complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. … … 409 478 \end{lstlisting} 410 479 411 The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined 480 The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined~\citep{C11}. Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body. Since they rely on programmer convention rather than compile-time checks, variadic functions are inherently unsafe. 412 481 413 482 In practice, compilers can provide warnings to help mitigate some of the problems. For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers. Unfortunately, this attribute does not permit extensions to the format string syntax, so a programmer cannot extend it to warn for mismatches with custom types. … … 415 484 \subsection{Tuple Expressions} 416 485 417 The tuple extensions in \CFA {} can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA{} introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA{}, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA{}expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}.418 419 \CFA {}allows declaration of \emph{tuple variables}, variables of tuple type. For example:486 The tuple extensions in \CFA can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}. 487 488 \CFA allows declaration of \emph{tuple variables}, variables of tuple type. For example: 420 489 \begin{lstlisting} 421 490 [int, char] most_frequent(const char*); … … 449 518 h(x, y); // flatten & structure 450 519 \end{lstlisting} 451 In \CFA {}, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure.452 453 In {K-W C} \citep{Buhr94a,Till89}, a precursor to \CFA{}, there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, \ie{} it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, \ie{}it takes a flat tuple value and provides structure by introducing nested tuple components.454 455 In \CFA {}, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in520 In \CFA, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure. 521 522 % In {K-W C} \citep{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components. 523 524 In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in 456 525 \begin{lstlisting} 457 526 int f(int, [double, int]); … … 475 544 double z = [x, f()].0.1; // access second component of first component of tuple expression 476 545 \end{lstlisting} 477 As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, but never implemented 546 As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, but never implemented~\citep[p.~45]{Till89}. 478 547 479 548 It is possible to access multiple fields from a single expression using a \emph{member-access tuple expression}. The result is a single tuple expression whose type is the tuple of the types of the members. For example, … … 484 553 Here, the type of @s.[x, y, z]@ is @[int, double, char *]@. A member tuple expression has the form @a.[x, y, z];@ where @a@ is an expression with type @T@, where @T@ supports member access expressions, and @x, y, z@ are all members of @T@ with types @T$_x$@, @T$_y$@, and @T$_z$@ respectively. Then the type of @a.[x, y, z]@ is @[T$_x$, T$_y$, T$_z$]@. 485 554 486 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg {}rearrange components, drop components, duplicate components, etc.):555 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange components, drop components, duplicate components, etc.): 487 556 \begin{lstlisting} 488 557 [int, int, long, double] x; … … 520 589 That is, @?=?(&$L_i$, $R_i$)@ must be a well-typed expression. In the previous example, @[x, y] = z@, @z@ is flattened into @z.0, z.1@, and the assignments @x = z.0@ and @y = z.1@ are executed. 521 590 522 A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This rule differs from C cascading assignment (\eg {}@a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well.591 A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This rule differs from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well. 523 592 524 593 Both kinds of tuple assignment have parallel semantics, such that each value on the left side and right side is evaluated \emph{before} any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function: … … 530 599 531 600 Tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. This definition allows cascading tuple assignment and use of tuple assignment in other expression contexts, an occasionally useful idiom to keep code succinct and reduce repetition. 532 % In \CFA {}, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C} \citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision was made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA{} without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA{}that is not an expression.601 % In \CFA, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C}~\citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision was made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA that is not an expression. 533 602 534 603 \subsection{Casting} 535 604 536 In C, the cast operator is used to explicitly convert between types. In \CFA {}, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function:605 In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function: 537 606 \begin{lstlisting} 538 607 int f(); // (1) … … 543 612 \end{lstlisting} 544 613 545 Since casting is a fundamental operation in \CFA {}, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples:614 Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples: 546 615 \begin{lstlisting} 547 616 int f(); … … 570 639 Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@). 571 640 572 Note that a cast is not a function call in \CFA {}, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@.641 Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@. 573 642 574 643 \subsection{Polymorphism} 575 644 576 Tuples also integrate with \CFA {}polymorphism as a special sort of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types.645 Tuples also integrate with \CFA polymorphism as a special sort of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types. 577 646 \begin{lstlisting} 578 647 forall(otype T, dtype U) … … 594 663 \end{lstlisting} 595 664 596 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. Previously in \CFA {}, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie{}no implicit conversions are applied to assertion arguments). In the example below:665 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. Previously in \CFA, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie no implicit conversions are applied to assertion arguments). In the example below: 597 666 \begin{lstlisting} 598 667 int f([int, double], double); … … 613 682 \subsection{Variadic Tuples} 614 683 615 To define variadic functions, \CFA {} adds a new kind of type parameter, @ttype@. Matching against a @ttype@ (``tuple type'') parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule. This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven{}variadic templates. As such, @ttype@ variables are also referred to as \emph{argument} or \emph{parameter packs} in this paper.684 To define variadic functions, \CFA adds a new kind of type parameter, @ttype@. Matching against a @ttype@ (``tuple type'') parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule. This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. As such, @ttype@ variables are also referred to as \emph{argument} or \emph{parameter packs} in this paper. 616 685 617 686 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion. Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled. … … 634 703 Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@. 635 704 636 As a point of note, this version does not require any form of argument descriptor, since the \CFA {}type system keeps track of all of these details. It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments:705 As a point of note, this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details. It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments: 637 706 \begin{lstlisting} 638 707 int sum(int x, int y){ … … 693 762 Pair(int, char) * x = new(42, '!'); 694 763 \end{lstlisting} 695 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects. This function provides the type-safety of @new@ in \CC {}, without the need to specify the allocated type again, thanks to return-type inference.764 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects. This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference. 696 765 697 766 In the call to @new@, @Pair(double, char)@ is selected to match @T@, and @Params@ is expanded to match @[double, char]@. The constructor (1) may be specialized to satisfy the assertion for a constructor with an interface compatible with @void ?{}(Pair(int, char) *, int, char)@. … … 701 770 \subsection{Implementation} 702 771 703 Tuples are implemented in the \CFA {}translator via a transformation into generic types. For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated. For example:772 Tuples are implemented in the \CFA translator via a transformation into generic types. For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated. For example: 704 773 \begin{lstlisting} 705 774 [int, int] f() { … … 776 845 Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. Tuple member expressions also take advantage of unique expressions in the case of possible impurity. 777 846 778 Currently, the \CFA {}translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.779 780 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg {} in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA{}translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new.847 Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions. 848 849 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new. 781 850 782 851 \section{Related Work} 783 852 784 \CC {} is the existing language it is most natural to compare \CFA{} to, as they are both more modern extensions to C with backwards source compatibility. The most fundamental difference in approach between \CC{} and \CFA{} is their approach to this C compatibility. \CC{} does provide fairly strong source backwards compatibility with C, but is a dramatically more complex language than C, and imposes a steep learning curve to use many of its extension features. For instance, in a break from general C practice, template code is typically written in header files, with a variety of subtle restrictions implied on its use by this choice, while the other polymorphism mechanism made available by \CC{}, class inheritance, requires programmers to learn an entirely new object-oriented programming paradigm; the interaction between templates and inheritance is also quite complex. \CFA{}, by contrast, has a single facility for polymorphic code, one which supports separate compilation and the existing procedural paradigm of C code. A major difference between the approaches of \CC{} and \CFA{} to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA{}. One of the major limiting factors of \CC{}'s approach is that templates cannot be separately compiled, and, until concepts \citep{C++Concepts} are standardized (currently anticipated for \CCtwenty{}), \CC{} provides no way to specify the requirements of a generic function in code beyond compilation errors for template expansion failures. By contrast, the explicit nature of assertions in \CFA{} allows polymorphic functions to be separately compiled, and for their requirements to be checked by the compiler; similarly, \CFA{} generic types may be opaque, unlike \CC{}template classes.785 786 Cyclone also provides capabilities for polymorphic functions and existential types \citep{Gro06}, similar in concept to \CFA{}'s @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted in that they may only abstract over types with the same layout and calling convention as @void*@, in practice only pointer types and @int@ - in \CFA{} terms, all Cyclone polymorphism must be dtype-static. This design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, but is more restrictive than \CFA{}'s more general model.787 788 Go and Rust are both modern, compiled languages with abstraction features similar to \CFA {} traits, \emph{interfaces} in Go and \emph{traits} in Rust. However, both languages represent dramatic departures from C in terms of language model, and neither has the same level of compatibility with C as \CFA{}. Go is a garbage-collected language, imposing the associated runtime overhead, and complicating foreign-function calls with the necessity of accounting for data transfer between the managed Go runtime and the unmanaged C runtime. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. Rust is not garbage-collected, and thus has a lighter-weight runtime that is more easily interoperable with C. It also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow-checker, while it does provide strong safety guarantees, is complex and difficult to learn, and imposes a distinctly idiomatic programming style on Rust. \CFA{}, with its more modest safety features, is significantly easier to port C code to, while maintaining the idiomatic style of the original source.853 \CC is the existing language it is most natural to compare \CFA to, as they are both more modern extensions to C with backwards source compatibility. The most fundamental difference in approach between \CC and \CFA is their approach to this C compatibility. \CC does provide fairly strong source backwards compatibility with C, but is a dramatically more complex language than C, and imposes a steep learning curve to use many of its extension features. For instance, in a break from general C practice, template code is typically written in header files, with a variety of subtle restrictions implied on its use by this choice, while the other polymorphism mechanism made available by \CC, class inheritance, requires programmers to learn an entirely new object-oriented programming paradigm; the interaction between templates and inheritance is also quite complex. \CFA, by contrast, has a single facility for polymorphic code, one which supports separate compilation and the existing procedural paradigm of C code. A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA. One of the major limiting factors of \CC's approach is that templates cannot be separately compiled, and, until concepts~\citep{C++Concepts} are standardized (currently anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors for template expansion failures. By contrast, the explicit nature of assertions in \CFA allows polymorphic functions to be separately compiled, and for their requirements to be checked by the compiler; similarly, \CFA generic types may be opaque, unlike \CC template classes. 854 855 Cyclone also provides capabilities for polymorphic functions and existential types~\citep{Grossman06}, similar in concept to \CFA's @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted in that they may only abstract over types with the same layout and calling convention as @void*@, in practice only pointer types and @int@ - in \CFA terms, all Cyclone polymorphism must be dtype-static. This design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, but is more restrictive than \CFA's more general model. 856 857 Go and Rust are both modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in Go and \emph{traits} in Rust. However, both languages represent dramatic departures from C in terms of language model, and neither has the same level of compatibility with C as \CFA. Go is a garbage-collected language, imposing the associated runtime overhead, and complicating foreign-function calls with the necessity of accounting for data transfer between the managed Go runtime and the unmanaged C runtime. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. Rust is not garbage-collected, and thus has a lighter-weight runtime that is more easily interoperable with C. It also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow-checker, while it does provide strong safety guarantees, is complex and difficult to learn, and imposes a distinctly idiomatic programming style on Rust. \CFA, with its more modest safety features, is significantly easier to port C code to, while maintaining the idiomatic style of the original source. 789 858 790 859 \section{Conclusion \& Future Work} 791 860 792 In conclusion, the authors' design for generic types and tuples imposes minimal runtime overhead while still supporting a full range of C features, including separately-compiled modules. There is ongoing work on a wide range of \CFA {} feature extensions, including reference types, exceptions, and concurrent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC{} template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA{} polymorphic functions, by contrast, use an approach that is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC{}template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the translator to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unnecessary.861 In conclusion, the authors' design for generic types and tuples imposes minimal runtime overhead while still supporting a full range of C features, including separately-compiled modules. There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, and concurrent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA polymorphic functions, by contrast, use an approach that is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the translator to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unnecessary. 793 862 794 863 \begin{acks} … … 797 866 798 867 \bibliographystyle{ACM-Reference-Format} 799 \bibliography{ generic_types}868 \bibliography{cfa} 800 869 801 870 \end{document} 871 872 % Local Variables: % 873 % tab-width: 4 % 874 % compile-command: "make" % 875 % End: % -
src/GenPoly/InstantiateGeneric.cc
r23063ea r727cf70f 255 255 } 256 256 257 assert ( baseParam == baseParams.end() && param == params.end() &&"Type parameters should match type variables" );257 assertf( baseParam == baseParams.end() && param == params.end(), "Type parameters should match type variables" ); 258 258 return gt; 259 259 } -
src/ResolvExpr/PtrsCastable.cc
r23063ea r727cf70f 68 68 return 1; 69 69 } 70 int functionCast( Type *src, const TypeEnvironment &env, const SymTab::Indexer &indexer ) { 71 return -1 * objectCast( src, env, indexer ); // reverse the sense of objectCast 72 } 70 73 71 74 int ptrsCastable( Type *src, Type *dest, const TypeEnvironment &env, const SymTab::Indexer &indexer ) { … … 106 109 107 110 void PtrsCastable::visit(FunctionType *functionType) { 108 result = -1; 111 // result = -1; 112 result = functionCast( dest, env, indexer ); 109 113 } 110 114 … … 136 140 137 141 void PtrsCastable::visit(TypeInstType *inst) { 138 result = objectCast( inst, env, indexer ) > 0 && objectCast( dest, env, indexer ) > 0 ? 1 : -1; 142 //result = objectCast( inst, env, indexer ) > 0 && objectCast( dest, env, indexer ) > 0 ? 1 : -1; 143 result = objectCast( inst, env, indexer ) == objectCast( dest, env, indexer ) ? 1 : -1; 139 144 } 140 145 -
src/ResolvExpr/Unify.cc
r23063ea r727cf70f 134 134 case TypeDecl::Ftype: 135 135 return isFtype( type, indexer ); 136 136 case TypeDecl::Ttype: 137 137 // ttype unifies with any tuple type 138 138 return dynamic_cast< TupleType * >( type ) || Tuples::isTtype( type ); … … 592 592 for ( ; it != params.end() && jt != otherParams.end(); ++it, ++jt ) { 593 593 TypeExpr *param = dynamic_cast< TypeExpr* >(*it); 594 assert (param &&"Aggregate parameters should be type expressions");594 assertf(param, "Aggregate parameters should be type expressions"); 595 595 TypeExpr *otherParam = dynamic_cast< TypeExpr* >(*jt); 596 assert(otherParam && "Aggregate parameters should be type expressions"); 597 598 if ( ! unifyExact( param->get_type(), otherParam->get_type(), env, needAssertions, haveAssertions, openVars, WidenMode(false, false), indexer ) ) { 596 assertf(otherParam, "Aggregate parameters should be type expressions"); 597 598 Type* paramTy = param->get_type(); 599 Type* otherParamTy = otherParam->get_type(); 600 601 bool tupleParam = Tuples::isTtype( paramTy ); 602 bool otherTupleParam = Tuples::isTtype( otherParamTy ); 603 604 if ( tupleParam && otherTupleParam ) { 605 ++it; ++jt; // skip ttype parameters for break 606 } else if ( tupleParam ) { 607 // bundle other parameters into tuple to match 608 TupleType* binder = new TupleType{ paramTy->get_qualifiers() }; 609 610 do { 611 binder->get_types().push_back( otherParam->get_type()->clone() ); 612 ++jt; 613 614 if ( jt == otherParams.end() ) break; 615 616 otherParam = dynamic_cast< TypeExpr* >(*jt); 617 assertf(otherParam, "Aggregate parameters should be type expressions"); 618 } while (true); 619 620 otherParamTy = binder; 621 ++it; // skip ttype parameter for break 622 } else if ( otherTupleParam ) { 623 // bundle parameters into tuple to match other 624 TupleType* binder = new TupleType{ otherParamTy->get_qualifiers() }; 625 626 do { 627 binder->get_types().push_back( param->get_type()->clone() ); 628 ++it; 629 630 if ( it == params.end() ) break; 631 632 param = dynamic_cast< TypeExpr* >(*it); 633 assertf(param, "Aggregate parameters should be type expressions"); 634 } while (true); 635 636 paramTy = binder; 637 ++jt; // skip ttype parameter for break 638 } 639 640 if ( ! unifyExact( paramTy, otherParamTy, env, needAssertions, haveAssertions, openVars, WidenMode(false, false), indexer ) ) { 599 641 result = false; 600 642 return; 601 643 } 644 645 // ttype parameter should be last 646 if ( tupleParam || otherTupleParam ) break; 602 647 } 603 648 result = ( it == params.end() && jt == otherParams.end() ); -
src/libcfa/stdlib
r23063ea r727cf70f 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Mar 4 22:03:54 201713 // Update Count : 10 212 // Last Modified On : Sat Apr 1 17:35:24 2017 13 // Update Count : 104 14 14 // 15 15 … … 84 84 forall( otype T | { int ?<?( T, T ); } ) 85 85 T * bsearch( T key, const T * arr, size_t dimension ); 86 forall( otype T | { int ?<?( T, T ); } ) 87 unsigned int bsearch( T key, const T * arr, size_t dimension ); 86 88 87 89 forall( otype T | { int ?<?( T, T ); } ) -
src/libcfa/stdlib.c
r23063ea r727cf70f 10 10 // Created On : Thu Jan 28 17:10:29 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Mar 4 22:02:22201713 // Update Count : 1 7212 // Last Modified On : Sat Apr 1 18:31:26 2017 13 // Update Count : 181 14 14 // 15 15 … … 228 228 229 229 forall( otype T | { int ?<?( T, T ); } ) 230 unsigned int bsearch( T key, const T * arr, size_t dimension ) { 231 int comp( const void * t1, const void * t2 ) { return *(T *)t1 < *(T *)t2 ? -1 : *(T *)t2 < *(T *)t1 ? 1 : 0; } 232 T *result = (T *)bsearch( &key, arr, dimension, sizeof(T), comp ); 233 return result ? result - arr : dimension; // pointer subtraction includes sizeof(T) 234 } // bsearch 235 236 forall( otype T | { int ?<?( T, T ); } ) 230 237 void qsort( const T * arr, size_t dimension ) { 231 238 int comp( const void * t1, const void * t2 ) { return *(T *)t1 < *(T *)t2 ? -1 : *(T *)t2 < *(T *)t1 ? 1 : 0; } -
src/tests/.expect/searchsort.txt
r23063ea r727cf70f 1 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,2 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,3 1 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 4 2 5 3 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 4 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 5 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 6 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 7 8 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 9 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 6 10 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 7 11 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, … … 10 14 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11 15 10.5, 9.5, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 16 10.5, 9.5, 8.5, 7.5, 6.5, 5.5, 4.5, 3.5, 2.5, 1.5, 12 17 13 18 10 11, 9 10, 8 9, 7 8, 6 7, 5 6, 4 5, 3 4, 2 3, 1 2, 14 19 1 2, 2 3, 3 4, 4 5, 5 6, 6 7, 7 8, 8 9, 9 10, 10 11, 15 20 10 11, 9 10, 8 9, 7 8, 6 7, 5 6, 4 5, 3 4, 2 3, 1 2, 21 10 11, 9 10, 8 9, 7 8, 6 7, 5 6, 4 5, 3 4, 2 3, 1 2, 16 22 -
src/tests/searchsort.c
r23063ea r727cf70f 10 10 // Created On : Thu Feb 4 18:17:50 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Jul 5 18:06:07 201613 // Update Count : 5612 // Last Modified On : Sun Apr 2 11:29:30 2017 13 // Update Count : 76 14 14 // 15 15 16 16 #include <fstream> 17 17 #include <stdlib> // bsearch, qsort 18 #include <stdlib.h> // C version of bsearch 19 20 int comp( const void * t1, const void * t2 ) { return *(int *)t1 < *(int *)t2 ? -1 : *(int *)t2 < *(int *)t1 ? 1 : 0; } 18 21 19 22 int main( void ) { … … 25 28 sout | iarr[i] | ", "; 26 29 } // for 27 sout | endl; 30 sout | endl | endl; 31 32 // ascending sort/search by changing < to > 28 33 qsort( iarr, size ); 29 34 for ( unsigned int i = 0; i < size; i += 1 ) { … … 31 36 } // for 32 37 sout | endl; 38 for ( unsigned int i = 0; i < size; i += 1 ) { // C version 39 int key = size - i; 40 int *v = bsearch( &key, iarr, size, sizeof( iarr[0] ), comp ); 41 sout | *v | ", "; 42 } // for 43 sout | endl; 33 44 for ( unsigned int i = 0; i < size; i += 1 ) { 34 45 int *v = bsearch( size - i, iarr, size ); 35 46 sout | *v | ", "; 47 } // for 48 sout | endl; 49 for ( unsigned int i = 0; i < size; i += 1 ) { 50 unsigned int posn = bsearch( size - i, iarr, size ); 51 sout | iarr[posn] | ", "; 36 52 } // for 37 53 sout | endl | endl; … … 54 70 sout | *v | ", "; 55 71 } // for 72 sout | endl; 73 for ( unsigned int i = 0; i < size; i += 1 ) { 74 unsigned int posn = bsearch( size - i, iarr, size ); 75 sout | iarr[posn] | ", "; 76 } // for 56 77 } 57 78 sout | endl | endl; … … 71 92 double *v = bsearch( size - i + 0.5, darr, size ); 72 93 sout | *v | ", "; 94 } // for 95 sout | endl; 96 for ( unsigned int i = 0; i < size; i += 1 ) { 97 unsigned int posn = bsearch( size - i + 0.5, darr, size ); 98 sout | darr[posn] | ", "; 73 99 } // for 74 100 sout | endl | endl; … … 93 119 sout | *v | ", "; 94 120 } // for 121 sout | endl; 122 for ( unsigned int i = 0; i < size; i += 1 ) { 123 S temp = { size - i, size - i + 1 }; 124 unsigned int posn = bsearch( temp, sarr, size ); 125 sout | sarr[posn] | ", "; 126 } // for 95 127 sout | endl | endl; 96 128 } // main
Note: See TracChangeset
for help on using the changeset viewer.