- Timestamp:
- Mar 27, 2017, 5:00:57 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 41cd57c0
- Parents:
- ae6f1ec (diff), 656ee4f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- doc
- Files:
-
- 1 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/generic_types/.gitignore
rae6f1ec r626da644 13 13 *.ps 14 14 *.toc 15 *.lof 16 *.lot 15 17 *.synctex.gz -
doc/generic_types/generic_types.bib
rae6f1ec r626da644 8 8 address = {Waterloo, Ontario, Canada, N2L 3G1}, 9 9 note = {\href{http://plg.uwaterloo.ca/theses/BilsonThesis.pdf}{http://\-plg.uwaterloo.ca/\-theses/\-BilsonThesis.pdf}}, 10 } 11 12 @article{Buhr94a, 13 keywords = {assignment, parameter passing, multiple assignment}, 14 contributer = {pabuhr@plg}, 15 author = {P. A. Buhr and David Till and C. R. Zarnke}, 16 title = {Assignment as the Sole Means of Updating Objects}, 17 journal = spe, 18 month = sep, 19 year = 1994, 20 volume = 24, 21 number = 9, 22 pages = {835-870}, 23 } 24 25 @techreport{C11, 26 type = {International Standard}, 27 keywords = {ISO/IEC C 11}, 28 contributer = {pabuhr@plg}, 29 key = {{ISO/IEC} 9889-2011}, 30 title = {American National Standard Information technology -- Programming Languages -- {C}}, 31 institution = {International Standard Organization}, 32 address = {http://www.iso.org}, 33 year = 2012, 10 34 } 11 35 … … 20 44 note = {\href{http://plg.uwaterloo.ca/theses/DitchfieldThesis.pdf}{http://\-plg.uwaterloo.ca/\-theses/\-DitchfieldThesis.pdf}} 21 45 } 46 47 @mastersthesis{Schluntz17, 48 author = {Schluntz, Robert}, 49 title = {Resource Management and Tuples in C$\mathbf{\forall}$}, 50 school = {School of Computer Science, University of Waterloo}, 51 year = 2017, 52 address = {Waterloo, Ontario, Canada, N2L 3G1}, 53 note = {[[unpublished]]} 54 } 55 56 @mastersthesis{Till89, 57 keywords = {C, multiple return values, tuples}, 58 contributer = {pabuhr@plg}, 59 author = {David W. Till}, 60 title = {Tuples In Imperative Programming Languages}, 61 school = {Department of Computer Science, University of Waterloo}, 62 year = 1989, 63 address = {Waterloo, Ontario, Canada, N2L 3G1}, 64 } -
doc/generic_types/generic_types.tex
rae6f1ec r626da644 20 20 morekeywords={_Alignas,_Alignof,__alignof,__alignof__,asm,__asm,__asm__,_At,_Atomic,__attribute,__attribute__,auto, 21 21 _Bool,catch,catchResume,choose,_Complex,__complex,__complex__,__const,__const__,disable,dtype,enable,__extension__, 22 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,_Noreturn,one_t,otype,restrict, _Static_assert,22 fallthrough,fallthru,finally,forall,ftype,_Generic,_Imaginary,inline,__label__,lvalue,_Noreturn,one_t,otype,restrict,size_t,sized,_Static_assert, 23 23 _Thread_local,throw,throwResume,trait,try,typeof,__typeof,__typeof__,zero_t}, 24 24 }% … … 57 57 \acmJournal{PACMPL} 58 58 59 \title{Generic Types with Efficient Dynamic Layout in \CFA{}}59 \title{Generic and Tuple Types with Efficient Dynamic Layout in \CFA{}} 60 60 61 61 \author{Aaron Moss} … … 95 95 \email{pabuhr@uwaterloo.ca} 96 96 97 \terms{generic, t ypes}98 \keywords{generic types, polymorphic functions, Cforall}97 \terms{generic, tuple, types} 98 \keywords{generic types, tuple types, polymorphic functions, C, Cforall} 99 99 100 100 \begin{CCSXML} … … 132 132 \section{Introduction \& Background} 133 133 134 \CFA{}\footnote{Pronounced ``C-for-all'', and written \CFA{} or Cforall.} is an evolutionary extension of the C programming language which aims to add modern language features to C while maintaining both source compatibility with C and a familiar mental model for programmers. This paper describes how generic types are designed and implemented in \CFA{}, and how they interact with \CFA{}'s polymorphic functions. 134 \CFA{}\footnote{Pronounced ``C-for-all'', and written \CFA{} or Cforall.} is an evolutionary extension of the C programming language which aims to add modern language features to C while maintaining both source compatibility with C and a familiar mental model for programmers. Four key design goals were set out in the original design of \CFA{} \citep{Bilson03}: 135 \begin{enumerate} 136 \item The behaviour of standard C code must remain the same when translated by a \CFA{} compiler as when translated by a C compiler. 137 \item Standard C code must be as fast and as small when translated by a \CFA{} compiler as when translated by a C compiler. 138 \item \CFA{} code must be at least as portable as standard C code. 139 \item Extensions introduced by \CFA{} must be translated in the most efficient way possible. 140 \end{enumerate} 141 The purpose of these goals is to ensure that existing C codebases can be converted to \CFA{} incrementally and with minimal effort, and that programmers who already know C can productively produce \CFA{} code without extensive training beyond the extension features they wish to employ. 142 143 \CFA{} has been previously extended with polymorphic functions and name overloading (including operator overloading) \citep{Bilson03}, and deterministically-executed constructors and destructors \citep{Schluntz17}. This paper describes how generic and tuple types are designed and implemented in \CFA{} in accordance with both the backward compatibility goals and existing features described above. 135 144 136 145 \subsection{Polymorphic Functions} 146 \label{sec:poly-fns} 137 147 138 148 \CFA{}'s polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA{} is parametric-polymorphic functions; such functions are written using a @forall@ clause (which gives the language its name): … … 191 201 } 192 202 \end{lstlisting} 203 This capability allows specifying the same set of assertions in multiple locations, without the repetition and likelihood of mistakes that come with manually writing them out for each function declaration. 193 204 194 205 @otype@ is essentially syntactic sugar for the following trait: … … 198 209 void ?{}(T*); // default constructor 199 210 void ?{}(T*, T); // copy constructor 200 T?=?(T*, T); // assignment operator211 void ?=?(T*, T); // assignment operator 201 212 void ^?{}(T*); // destructor 202 213 }; 214 \end{lstlisting} 215 Given this information, variables of polymorphic type can be treated as if they were a complete struct type -- they can be stack-allocated using the @alloca@ compiler builtin, default or copy-initialized, assigned, and deleted. As an example, the @abs@ function above would produce generated code something like the following (simplified for clarity and brevity): 216 \begin{lstlisting} 217 void abs( size_t _sizeof_M, size_t _alignof_M, 218 void (*_ctor_M)(void*), void (*_copy_M)(void*, void*), 219 void (*_assign_M)(void*, void*), void (*_dtor_M)(void*), 220 bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*), 221 void (*_ctor_M_zero)(void*, int), 222 void* m, void* _rtn ) { // polymorphic parameter and return passed as void* 223 // M zero = { 0 }; 224 void* zero = alloca(_sizeof_M); // stack allocate 0 temporary 225 _ctor_M_zero(zero, 0); // initialize using zero_t constructor 226 // return m < zero ? -m : m; 227 void *_tmp = alloca(_sizeof_M) 228 _copy_M( _rtn, // copy-initialize return value 229 _lt_M( m, zero ) ? // check condition 230 (_neg_M(m, _tmp), _tmp) : // negate m 231 m); 232 _dtor_M(_tmp); _dtor_M(zero); // destroy temporaries 233 } 203 234 \end{lstlisting} 204 235 … … 239 270 \section{Generic Types} 240 271 241 The generic types design for \CFA{} must integrate efficiently and naturally with the existing polymorphic functions in \CFA{}, while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there should not be extra overhead for the use of a generic type. 272 One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to create data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for data structures more complicated than a singly-linked list. A second approach is to use @void*@-based polymorphism. This approach is taken by the C standard library functions @qsort@ and @bsearch@, and does allow the use of common code for common functionality. However, basing all polymorphism on @void*@ eliminates the type-checker's ability to ensure that argument types are properly matched, as well as adding pointer indirection and dynamic allocation to algorithms and data structures which would not otherwise require them. A third approach to generic code is to use pre-processor macros to generate it -- this approach does allow the generated code to be both generic and type-checked, though any errors produced may be difficult to read. Furthermore, writing and invoking C code as preprocessor macros is unnatural and somewhat inflexible. 273 274 Other C-like languages such as \CC{} and Java use \emph{generic types} to produce type-safe abstract data types. The \CFA{} team has chosen to implement generic types as well, with the constraints that the generic types design for \CFA{} must integrate efficiently and naturally with the existing polymorphic functions in \CFA{}, while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there should not be extra overhead for the use of a generic type. 242 275 243 276 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name: … … 249 282 250 283 forall(otype T) 251 T value( pair(const char*, T) *p ) { return p->second; }284 T value( pair(const char*, T) p ) { return p.second; } 252 285 253 286 forall(dtype F, otype T) … … 255 288 256 289 pair(const char*, int) p = { "magic", 42 }; 257 int magic = value( &p );290 int magic = value( p ); 258 291 259 292 pair(void*, int*) q = { 0, &p.second }; … … 282 315 \end{lstlisting} 283 316 284 \TODO{} Maybe move this after the rest of the discussion. 285 This re-use of dtype-static struct instantiations enables some useful programming patterns at zero runtime cost. The most important such pattern is using @forall(dtype T) T*@ as a type-checked replacement for @void*@, as in this example, which takes a @qsort@ or @bsearch@-compatible comparison routine and creates a similar lexicographic comparison for pairs of pointers: 317 Though \CFA{} implements concrete generic types efficiently, it also has a fully-general system for computing with dynamic generic types. As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. Dynamic generic structs also come with an \emph{offset array} which contains the offsets of each member of the struct\footnote{Dynamic generic unions need no such offset array, as all members are at offset 0.}. Access to members\footnote{The \lstinline@offsetof@ macro is implmented similarly.} of a dynamic generic struct is provided by adding the corresponding member of the offset array to the struct pointer at runtime, essentially moving a compile-time offset calculation to runtime where neccessary. 318 319 These offset arrays are staticly generated where possible. If a dynamic generic type is declared to be passed or returned by value from a polymorphic function, the compiler can safely assume that the generic type is complete (that is, has a known layout) at any call-site, and the offset array is passed from the caller; if the generic type is concrete at the call site this offset array can even be statically generated using the C @offsetof@ macro. As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void*@, and @_offsetof_pair@ is the offset array passed in to @value@ for @pair(const char*, T)@. The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) };@. 320 321 In some cases the offset arrays cannot be staticly generated. For instance, modularity is generally produced in C by including an opaque forward-declaration of a struct and associated accessor and mutator routines in a header file, with the actual implementations in a separately-compiled \texttt{.c} file. \CFA{} supports this pattern for generic types, and in tis instance the caller does not know the actual layout or size of the dynamic generic type, and only holds it by pointer. The \CFA{} compiler automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed in to a function from that function's caller. These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic struct (un-@sized@ parameters are forbidden from the language from being used in a context which affects layout). Results of these layout functions are cached so that they are only computed once per type per function.%, as in the example below for @pair@. 322 % \begin{lstlisting} 323 % static inline void _layoutof_pair(size_t* _szeof_pair, size_t* _alignof_pair, size_t* _offsetof_pair, 324 % size_t _szeof_R, size_t _alignof_R, size_t _szeof_S, size_t _alignof_S) { 325 % *_szeof_pair = 0; // default values 326 % *_alignof_pair = 1; 327 328 % // add offset, size, and alignment of first field 329 % _offsetof_pair[0] = *_szeof_pair; 330 % *_szeof_pair += _szeof_R; 331 % if ( *_alignof_pair < _alignof_R ) *_alignof_pair = _alignof_R; 332 333 % // padding, offset, size, and alignment of second field 334 % if ( *_szeof_pair & (_alignof_S - 1) ) 335 % *_szeof_pair += (_alignof_S - ( *_szeof_pair & (_alignof_S - 1) ) ); 336 % _offsetof_pair[1] = *_szeof_pair; 337 % *_szeof_pair += _szeof_S; 338 % if ( *_alignof_pair < _alignof_S ) *_alignof_pair = _alignof_S; 339 340 % // pad to struct alignment 341 % if ( *_szeof_pair & (*_alignof_pair - 1) ) 342 % *_szeof_pair += ( *_alignof_pair - ( *_szeof_pair & (*_alignof_pair - 1) ) ); 343 % } 344 % \end{lstlisting} 345 346 Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature. For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but internally may use some sort of @set(T)@ to test for duplicate values. This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function. 347 348 Whether a type is concrete, dtype-static, or dynamic is decided based solely on the type parameters and @forall@ clause on the struct declaration. This allows opaque forward declarations of generic types like @forall(otype T) struct Box;@ -- like in C, all uses of @Box(T)@ can be in a separately compiled translation unit, and callers from other translation units will know the proper calling conventions to use. If the definition of a struct type was included in the determination of dynamic or concrete, some further types may be recognized as dtype-static (\eg, @forall(otype T) struct unique_ptr { T* p };@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in this way is judged to be an appropriate trade-off. 349 350 The re-use of dtype-static struct instantiations enables some useful programming patterns at zero runtime cost. The most important such pattern is using @forall(dtype T) T*@ as a type-checked replacement for @void*@, as in this example, which takes a @qsort@ or @bsearch@-compatible comparison routine and creates a similar lexicographic comparison for pairs of pointers: 286 351 \begin{lstlisting} 287 352 forall(dtype T) … … 311 376 scalar(metres) marathon = half_marathon + half_marathon; 312 377 scalar(litres) two_pools = swimming_pool + swimming_pool; 313 marathon + swimming_pool; // ERROR v-- caught by compiler378 marathon + swimming_pool; // ERROR -- caught by compiler 314 379 \end{lstlisting} 315 380 @scalar@ is a dtype-static type, so all uses of it will use a single struct definition, containing only a single @unsigned long@, and can share the same implementations of common routines like @?+?@ -- these implementations may even be separately compiled, unlike \CC{} template functions. However, the \CFA{} type-checker will ensure that matching types are used by all calls to @?+?@, preventing nonsensical computations like adding the length of a marathon to the volume of an olympic pool. … … 317 382 \section{Tuples} 318 383 319 \TODO{} Integrate Rob's work 384 The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA{} as an extension to C. The \CFA{} tuple design is particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}. 385 386 In standard C, functions can return at most one value. This restriction results in code which emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. In the former situation, the function designer creates a record type that combines all of the return values into a single type. Of note, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the routine body to emulate a return. Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. This complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. 387 388 C does provide a mechanism for variadic functions through manipulation of @va_list@ objects, but it is notoriously not type-safe. A variadic function is one which contains at least one parameter, followed by @...@ as the last token in the parameter list. In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types, commonly a format string or counter parameter. It is important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly. This required repetition is error prone, because it is easy for the user to add or remove arguments without updating the argument descriptor. In addition, C requires the programmer to hard code all of the possible expected types. As a result, it is cumbersome to write a variadic function that is open to extension. For example, consider a simple function which sums $N$ @int@s: 389 \begin{lstlisting} 390 int sum(int N, ...) { 391 va_list args; 392 va_start(args, N); // must manually specify last non-variadic argument 393 int ret = 0; 394 while(N) { 395 ret += va_arg(args, int); // must specify type 396 N--; 397 } 398 va_end(args); 399 return ret; 400 } 401 sum(3, 10, 20, 30); // must keep initial counter argument in sync 402 \end{lstlisting} 403 404 The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined \citep{C11}. Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body. Since they rely on programmer convention rather than compile-time checks, variadic functions are generally unsafe. 405 406 In practice, compilers can provide warnings to help mitigate some of the problems. For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers. Unfortunately, this does not permit extensions to the format string syntax, so a programmer cannot extend the attribute to warn for mismatches with custom types. 407 408 \subsection{Tuple Expressions} 409 410 The tuple extensions in \CFA{} can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA{} introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA{}, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA{} expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}. 411 412 \CFA{} allows declaration of \emph{tuple variables}, variables of tuple type. For example: 413 \begin{lstlisting} 414 [int, char] most_frequent(const char*); 415 416 const char* str = "hello, world!"; 417 [int, char] freq = most_frequent(str); 418 printf("%s -- %d %c\n", str, freq); 419 \end{lstlisting} 420 In this example, the type of the @freq@ and the return type of @most_frequent@ are both tuple types. Also of note is how the tuple expression @freq@ is implicitly flattened into separate @int@ and @char@ arguments to @printf@; this code snippet could have been shortened by replacing the last two lines with @printf("%s -- %d %c\n", str, most_frequent(str));@ using exactly the same mechanism. 421 422 In addition to variables of tuple type, it is also possible to have pointers to tuples, and arrays of tuples. Tuple types can be composed of any types, except for array types, since arrays are not of fixed size, which makes tuple assignment difficult when a tuple contains an array. 423 \begin{lstlisting} 424 [double, int] di; 425 [double, int] * pdi 426 [double, int] adi[10]; 427 \end{lstlisting} 428 This examples declares a variable of type @[double, int]@, a variable of type pointer to @[double, int]@, and an array of ten @[double, int]@. 429 430 \subsection{Flattening and Restructuring} 431 432 In function call contexts, tuples support implicit flattening and restructuring conversions. Tuple flattening recursively expands a tuple into the list of its basic components. Tuple structuring packages a list of expressions into a value of tuple type. 433 \begin{lstlisting} 434 int f(int, int); 435 int g([int, int]); 436 int h(int, [int, int]); 437 [int, int] x; 438 int y; 439 440 f(x); // flatten 441 g(y, 10); // structure 442 h(x, y); // flatten & structure 443 \end{lstlisting} 444 In \CFA{}, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure. 445 446 In {K-W C} \citep{Buhr94a,Till89} there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, i.e. it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, i.e. it takes a flat tuple value and provides structure by introducing nested tuple components. 447 448 In \CFA{}, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in 449 \begin{lstlisting} 450 int f(int, [double, int]); 451 f([5, 10.2], 4); 452 \end{lstlisting} 453 There is only a single definition of @f@, and 3 arguments with only single interpretations. First, the argument alternative list @[5, 10.2], 4@ is flattened to produce the argument list @5, 10.2, 4@. Next, the parameter matching algorithm begins, with $P =~$@int@ and $A =~$@int@, which unifies exactly. Moving to the next parameter and argument, $P =~$@[double, int]@ and $A =~$@double@. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =~$@double@ and $A =~$@double@, which unifies exactly. Then $P' =~$@int@ and $A =~$@double@, which again unifies exactly. At this point, the end of $P'$ has been reached, so the arguments @10.2, 4@ are structured into the tuple expression @[10.2, 4]@. Finally, the end of the parameter list $P$ has also been reached, so the final expression is @f(5, [10.2, 4])@. 454 455 \subsection{Member Access} 456 457 At times, it is desirable to access a single component of a tuple-valued expression without creating unnecessary temporary variables to assign to. Given a tuple-valued expression @e@ and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in @e@, @e.i@ accesses the $i$\textsuperscript{th} component of @e@. For example, 458 \begin{lstlisting} 459 [int, double] x; 460 [char *, int] f(); 461 void g(double, int); 462 [int, double] * p; 463 464 int y = x.0; // access int component of x 465 y = f().1; // access int component of f 466 p->0 = 5; // access int component of tuple pointed-to by p 467 g(x.1, x.0); // rearrange x to pass to g 468 double z = [x, f()].0.1; // access second component of first component of tuple expression 469 \end{lstlisting} 470 As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, a precursor of \CFA{}, but never implemented \citep[p.~45]{Till89}. 471 472 It is possible to access multiple fields from a single expression using a \emph{member-access tuple expression}. The result is a single tuple expression whose type is the tuple of the types of the members. For example, 473 \begin{lstlisting} 474 struct S { int x; double y; char * z; } s; 475 s.[x, y, z]; 476 \end{lstlisting} 477 Here, the type of @s.[x, y, z]@ is @[int, double, char *]@. A member tuple expression has the form @a.[x, y, z];@ where @a@ is an expression with type @T@, where @T@ supports member access expressions, and @x, y, z@ are all members of @T@ with types @T$_x$@, @T$_y$@, and @T$_z$@ respectively. Then the type of @a.[x, y, z]@ is @[T_x, T_y, T_z]@. 478 479 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (e.g. rearrange components, drop components, duplicate components, etc.): 480 \begin{lstlisting} 481 [int, int, long, double] x; 482 void f(double, long); 483 484 f(x.[0, 3]); // f(x.0, x.3) 485 x.[0, 1] = x.[1, 0]; // [x.0, x.1] = [x.1, x.0] 486 [long, int, long] y = x.[2, 0, 2]; 487 \end{lstlisting} 488 489 It is possible for a member tuple expression to contain other member access expressions: 490 \begin{lstlisting} 491 struct A { double i; int j; }; 492 struct B { int * k; short l; }; 493 struct C { int x; A y; B z; } v; 494 v.[x, y.[i, j], z.k]; 495 \end{lstlisting} 496 This expression is equivalent to @[v.x, [v.y.i, v.y.j], v.z.k]@. That is, the aggregate expression is effectively distributed across the tuple, which allows simple and easy access to multiple components in an aggregate, without repetition. It is guaranteed that the aggregate expression to the left of the @.@ in a member tuple expression is evaluated exactly once. As such, it is safe to use member tuple expressions on the result of a side-effecting function. 497 498 \subsection{Tuple Assignment} 499 500 In addition to tuple-index expressions, individual components of tuples can be accessed by a \emph{destructuring assignment} which has a tuple expression with lvalue components on its left-hand side. More generally, an assignment where the left-hand side of the assignment operator has a tuple type is called \emph{tuple assignment}. There are two kinds of tuple assignment depending on whether the right-hand side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple assignment} and \emph{mass assignment}, respectively. 501 \begin{lstlisting} 502 int x; 503 double y; 504 [int, double] z; 505 [y, x] = 3.14; // mass assignment 506 [x, y] = z; // multiple assignment 507 z = 10; // mass assignment 508 z = [x, y]; // multiple assignment 509 \end{lstlisting} 510 Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left side, $R_i$ represent each component of the flattened right side of a multiple assignment, and $R$ represent the right side of a mass assignment. 511 512 For a multiple assignment to be valid, both tuples must have the same number of elements when flattened. Multiple assignment assigns $R_i$ to $L_i$ for each $i$. 513 That is, @?=?(&$L_i$, $R_i$)@ must be a well-typed expression. In the previous example, @[x, y] = z@, @z@ is flattened into @z.0, z.1@, and the assignments @x = z.0@ and @y = z.1@ are executed. 514 515 A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This differs from C cascading assignment (e.g. @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well. 516 517 Both kinds of tuple assignment have parallel semantics, such that each value on the left side and right side is evaluated \emph{before} any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function: 518 \begin{lstlisting} 519 int x = 10, y = 20; 520 [x, y] = [y, x]; 521 \end{lstlisting} 522 After executing this code, @x@ has the value @20@ and @y@ has the value @10@. 523 524 In \CFA{}, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C} \citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision wa made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA{} without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA{} that is not an expression. 525 526 \subsection{Casting} 527 528 In C, the cast operator is used to explicitly convert between types. In \CFA{}, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function: 529 \begin{lstlisting} 530 int f(); // (1) 531 double f(); // (2) 532 533 f(); // ambiguous - (1),(2) both equally viable 534 (int)f(); // choose (2) 535 \end{lstlisting} 536 537 Since casting is a fundamental operation in \CFA{}, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples: 538 \begin{lstlisting} 539 int f(); 540 void g(); 541 542 (void)f(); // (1) 543 (int)g(); // (2) 544 545 struct A { int x; }; 546 (struct A)f(); // (3) 547 \end{lstlisting} 548 In C, (1) is a valid cast, which calls @f@ and discards its result. On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. Finally, (3) is also invalid, because in C casts only provide conversion between scalar types \cite{C11}. For consistency, this implies that any case wherein the number of components increases as a result of the cast is invalid, while casts which have the same or fewer number of components may be valid. 549 550 Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. This discarding naturally follows the way that a cast to @void@ works in C. 551 552 For example, in 553 \begin{lstlisting} 554 [int, int, int] f(); 555 [int, [int, int], int] g(); 556 557 ([int, double])f(); // (1) 558 ([int, int, int])g(); // (2) 559 ([void, [int, int]])g(); // (3) 560 ([int, int, int, int])g(); // (4) 561 ([int, [int, int, int]])g(); // (5) 562 \end{lstlisting} 563 564 (1) discards the last element of the return value and converts the second element to @double@. Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. If @g@ is free of side effects, this is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@. 565 Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@). 566 567 Note that a cast is not a function call in \CFA{}, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@. 568 569 \TODO{} 570 571 In \CFA{}, functions can be declared to return multiple values with an extension to the function declaration syntax. Multiple return values are declared as a comma-separated list of types in square brackets in the same location that the return type appears in standard C function declarations. The ability to return multiple values from a function requires a new syntax for the @return@ statement. For consistency, the @return@ statement in \CFA{} accepts a comma-separated list of expressions in square brackets. The expression resolution phase of the \CFA{} compiler ensures that the correct form is used depending on the values being returned and the return type of the current function. A multiple-returning function with return type @T@ can return any expression that is implicitly convertible to @T@. As an example, a function returning the most frequent character in a string and its frequency can be written in \CFA{} as: 572 \begin{lstlisting} 573 [int, char] most_frequent(const char * str) { // tuple return type 574 char freqs [26] = { 0 }; 575 int ret_freq = 0; 576 char ret_ch = 'a'; 577 for (int i = 0; str[i] != '\0'; ++i) { 578 if (isalpha(str[i])) { // only count letters 579 int ch = tolower(str[i]); // convert to lower case 580 int idx = ch-'a'; 581 if (++freqs[idx] > ret_freq) { // update on new max 582 ret_freq = freqs[idx]; 583 ret_ch = ch; 584 } 585 } 586 } 587 return [ret_freq, ret_ch]; // return tuple expression 588 } 589 \end{lstlisting} 590 This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type. 591 592 The addition of multiple-return-value functions necessitates a syntax for accepting multiple values at the call-site. The simplest mechanism for retaining a return value in C is variable assignment. By assigning the return value into a variable, its value can be retrieved later at any point in the program. As such, \CFA{} allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions on the left side. 593 \begin{lstlisting} 594 const char * str = "hello world"; 595 int freq; 596 char ch; 597 [freq, ch] = most_frequent(str); // assign into multiple variables 598 printf("%s -- %d %c\n", str, freq, ch); 599 \end{lstlisting} 600 601 It is also common to use a function's output as the input to another function. \CFA{} also allows this case, without any new syntax. When a function call is passed as an argument to another call, the expression resolver attempts to find the best match of actual arguments to formal parameters given all of the possible expression interpretations in the current scope \citep{Bilson03}. For example, 602 \begin{lstlisting} 603 void process(int); // (1) 604 void process(char); // (2) 605 void process(int, char); // (3) 606 void process(char, int); // (4) 607 608 process(most_frequent("hello world")); // selects (3) 609 \end{lstlisting} 610 In this case, there is only one option for a function named @most_frequent@ that takes a string as input. This function returns two values, one @int@ and one @char@. There are four options for a function named @process@, but only two which accept two arguments, and of those the best match is (3), which is also an exact match. This expression first calls @most_frequent("hello world")@, which produces the values @3@ and @'l'@, which are fed directly to the first and second parameters of (3), respectively. 611 612 320 613 321 614 \TODO{} Check if we actually can use ttype parameters on generic types (if they set the complete flag, it should work, or nearly so). … … 323 616 \section{Related Work} 324 617 325 \TODO{} Talk about \CC{}, Cyclone, \etc{} 326 327 \section{Conclusion} 328 329 \TODO{} 618 \TODO{} Talk about \CC{}, Cyclone, maybe D, Rust, \etc{} 619 620 A major difference between the approaches of \CC{} and \CFA{} to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA{}. One of the major limiting factors of \CC{}'s approach is that templates cannot be separately compiled. In contrast, the explicit nature of assertions allows \CFA{}'s polymorphic functions to be separately compiled. 621 622 \section{Conclusion \& Future Work} 623 624 In conclusion, the authors' design for generic types and tuples imposes minimal runtime overhead while still supporting a full range of C features, including separately-compiled modules. There is ongoing work on a wide range of \CFA{} feature extensions, including reference types, exceptions, and concurent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC{} template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA{} polymorphic functions, by contrast, use an approach which is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC{} template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the compiler to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unneccessary. 330 625 331 626 \bibliographystyle{ACM-Reference-Format} -
doc/rob_thesis/tuples.tex
rae6f1ec r626da644 87 87 Multiple return values are declared as a comma-separated list of types in square brackets in the same location that the return type appears in standard C function declarations. 88 88 The ability to return multiple values from a function requires a new syntax for the return statement. 89 For consistency, the return statement in \CFA accepts a comm on-separated list of expressions in square brackets.89 For consistency, the return statement in \CFA accepts a comma-separated list of expressions in square brackets. 90 90 The expression resolution phase of the \CFA translator ensures that the correct form is used depending on the values being returned and the return type of the current function. 91 91 A multiple-returning function with return type @T@ can return any expression that is implicitly convertible to @T@.
Note: See TracChangeset
for help on using the changeset viewer.