Changeset 154fdc8


Ignore:
Timestamp:
Apr 19, 2017, 10:15:45 AM (8 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
cd348e7
Parents:
221c2de7 (diff), de4ce0e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
34 added
1 deleted
42 edited

Legend:

Unmodified
Added
Removed
  • doc/LaTeXmacros/common.tex

    r221c2de7 r154fdc8  
     1
    12%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -*- Mode: Latex -*- %%%%%%%%%%%%%%%%%%%%%%%%%%%%
    23%%
     
    1112%% Created On       : Sat Apr  9 10:06:17 2016
    1213%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Wed Apr  5 23:19:42 2017
    14 %% Update Count     : 255
     14%% Last Modified On : Wed Apr 12 11:32:26 2017
     15%% Update Count     : 257
    1516%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1617
     
    4445\newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20} % C++20 symbolic name
    4546\newcommand{\Celeven}{C11\xspace}               % C11 symbolic name
    46 \newcommand{\Csharp}{C\raisebox{0.4ex}{\#}\xspace}      % C# symbolic name
     47\newcommand{\Csharp}{C\raisebox{-0.65ex}{\large$^\sharp$}\xspace}       % C# symbolic name
    4748
    4849%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  • doc/bibliography/cfa.bib

    r221c2de7 r154fdc8  
    99%    Predefined journal names:
    1010%  acmcs: Computing Surveys             acta: Acta Infomatica
     11@string{acta="Acta Infomatica"}
    1112%  cacm: Communications of the ACM
    1213%  ibmjrd: IBM J. Research & Development ibmsj: IBM Systems Journal
     
    433434    keywords    = {Parametric polymorphism, alphard, iterators, nested types},
    434435    contributer = {gjditchfield@plg},
     436    key         = {Alphard},
    435437    editor      = {Mary Shaw},
    436438    title       = {{ALPHARD}: Form and Content},
     
    861863
    862864@techreport{C11,
    863     type = {International Standard},
     865    type        = {International Standard},
    864866    keywords    = {ISO/IEC C 11},
    865867    contributer = {pabuhr@plg},
     
    872874
    873875@techreport{C++Concepts,
    874     type = {International Standard},
     876    type        = {International Standard},
    875877    keywords    = {ISO/IEC TS 19217:2015},
    876878    contributer = {a3moss@uwaterloo.ca},
    877879    key         = {{ISO/IEC} {TS} 19217},
     880    author      = {Concepts},
    878881    title       = {Information technology -- Programming languages -- {C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Extensions for concepts},
    879882    institution = {International Standard Organization},
     
    27222725
    27232726@online{GCCExtensions,
    2724     contributer = {a3moss@uwaterloo.ca},
    2725     key = {{GNU}},
    2726     title = {Extensions to the {C} Language Family},
    2727     year = 2014,
    2728     url = {https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html},
    2729     urldate = {2017-04-02}
     2727    contributer = {a3moss@uwaterloo.ca},
     2728    key         = {{GNU}},
     2729    author      = {{C Extensions}},
     2730    title       = {Extensions to the {C} Language Family},
     2731    year        = 2014,
     2732    note        = {\href{https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html}{https://\-gcc.gnu.org/\-onlinedocs/\-gcc-4.7.2/\-gcc/\-C\-Extensions.html}},
     2733    urldate     = {2017-04-02}
    27302734}
    27312735
     
    28232827    key         = {Fortran95},
    28242828    title       = {Fortran 95 Standard, ISO/IEC 1539},
    2825     organization = {Unicomp, Inc.},
     2829    organization= {Unicomp, Inc.},
    28262830    address     = {7660 E. Broadway, Tucson, Arizona, U.S.A, 85710},
    28272831    month       = jan,
     
    30603064
    30613065@online{GObject,
    3062     keywords = {GObject},
    3063     contributor = {a3moss@uwaterloo.ca},
    3064     author = {{The GNOME Project}},
    3065     title = {{GObject} Reference Manual},
    3066     year = 2014,
    3067     url = {https://developer.gnome.org/gobject/stable/},
    3068     urldate = {2017-04-04}
     3066    keywords    = {GObject},
     3067    contributor = {a3moss@uwaterloo.ca},
     3068    author      = {{GObject}},
     3069    organization= {The GNOME Project},
     3070    title       = {{GObject} Reference Manual},
     3071    year        = 2014,
     3072    url         = {https://developer.gnome.org/gobject/stable/},
     3073    urldate     = {2017-04-04}
    30693074}
    30703075
     
    36453650    contributer = {pabuhr@plg},
    36463651    author      = {James Gosling and Bill Joy and Guy Steele and Gilad Bracha and Alex Buckley},
    3647     title       = {{Java} Language Specification},
     3652    title       = {{Java} Language Spec.},
     3653    organization= {Oracle},
    36483654    publisher   = {Oracle},
    36493655    year        = 2015,
     
    45644570
    45654571@manual{obj-c-book,
    4566     keywords = {objective-c},
    4567     contributor = {a3moss@uwaterloo.ca},
    4568     author = {{Apple Computer Inc.}},
    4569     title = {The {Objective-C} Programming Language},
    4570     publisher = {Apple Computer Inc.},
    4571     address = {Cupertino, CA},
    4572     year = 2003
     4572    keywords    = {objective-c},
     4573    contributor = {a3moss@uwaterloo.ca},
     4574    author      = {{Objective-C}},
     4575    title       = {The {Objective-C} Programming Language},
     4576    organization= {Apple Computer Inc.},
     4577    address     = {Cupertino, CA},
     4578    year        = 2003
    45734579}
    45744580
     
    45764582    keywords    = {objective-c},
    45774583    contributor = {a3moss@uwaterloo.ca},
    4578     author      = {{Apple Computer Inc.}},
     4584    author      = {{Xcode}},
    45794585    title       = {{Xcode} 7 Release Notes},
    45804586    year        = 2015,
     
    48944900    year        = 1980,
    48954901    month       = nov, volume = 15, number = 11, pages = {47-56},
    4896     note        = {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming
    4897          Language},
     4902    note        = {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming Language},
    48984903    comment     = {
    48994904        The two-pass (bottom-up, then top-down) algorithm, with a proof
     
    53185323    title       = {Programming with Sets: An Introduction to {SETL}},
    53195324    publisher   = {Springer},
     5325    address     = {New York, NY, USA},
    53205326    year        = 1986,
    53215327}
     
    54635469    contributer = {pabuhr@plg},
    54645470    title       = {The Programming Language {Ada}: Reference Manual},
     5471    author      = {Ada},
    54655472    organization= {United States Department of Defense},
    54665473    edition     = {{ANSI/MIL-STD-1815A-1983}},
     
    58805887    keywords    = {Rust programming language},
    58815888    contributer = {pabuhr@plg},
     5889    author      = {{Rust}},
    58825890    title       = {The {Rust} Programming Language},
    58835891    organization= {The Rust Project Developers},
     
    58915899    keywords    = {Scala programming language},
    58925900    contributer = {pabuhr@plg},
     5901    author      = {{Scala}},
    58935902    title       = {{Scala} Language Specification, Version 2.11},
    58945903    organization= {\'{E}cole Polytechnique F\'{e}d\'{e}rale de Lausanne},
     
    62126221}
    62136222
     6223@article{Smith98,
     6224  keywords = {Polymorphic C},
     6225  contributor = {a3moss@uwaterloo.ca},
     6226  title={A sound polymorphic type system for a dialect of C},
     6227  author={Smith, Geoffrey and Volpano, Dennis},
     6228  journal={Science of computer programming},
     6229  volume={32},
     6230  number={1-3},
     6231  pages={49--72},
     6232  year={1998},
     6233  publisher={Elsevier}
     6234}
     6235
    62146236@book{Campbell74,
    62156237    keywords    = {path expressions},
     
    63086330    number      = 5,
    63096331    pages       = {341-346}
     6332}
     6333
     6334@online{Sutter15,
     6335    contributer = {pabuhr@plg},
     6336    author      = {Herb Sutter and Bjarne Stroustrup and Gabriel Dos Reis},
     6337    title       = {Structured bindings},
     6338    issue_date  = {2015-10-14},
     6339    month       = oct,
     6340    year        = 2015,
     6341    pages       = {1--6},
     6342    numpages    = {6},
     6343    note        = {\href{http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/p0144r0.pdf}{http://\-www.open-std.org/\-jtc1/\-sc22/\-wg21/\-docs/\-papers/\-2015/\-p0144r0.pdf}},
    63106344}
    63116345
     
    67506784    number      = 6,
    67516785    month       = jun,
     6786    publisher   = {ACM},
     6787    address     = {New York, NY, USA},
    67526788    year        = 1990,
    67536789    pages       = {127-136},
     
    69026938
    69036939@online{Vala,
    6904     keywords = {GObject, Vala},
    6905     contributor = {a3moss@uwaterloo.ca},
    6906     author = {{The GNOME Project}},
    6907     title = {Vala Reference Manual},
    6908     year = 2017,
    6909     url = {https://wiki.gnome.org/Projects/Vala/Manual},
    6910     urldate = {2017-04-04}
     6940    keywords    = {GObject, Vala},
     6941    contributor = {a3moss@uwaterloo.ca},
     6942    author      = {{Vala}},
     6943    organization= {The GNOME Project},
     6944    title       = {Vala Reference Manual},
     6945    year        = 2017,
     6946    url         = {https://wiki.gnome.org/Projects/Vala/Manual},
     6947    urldate     = {2017-04-04}
    69116948}
    69126949
  • doc/generic_types/.gitignore

    r221c2de7 r154fdc8  
    1717*.synctex.gz
    1818comment.cut
     19timing.tex
  • doc/generic_types/Makefile

    r221c2de7 r154fdc8  
    2121
    2222GRAPHS = ${addsuffix .tex, \
     23timing \
    2324}
    2425
     
    4546#${DOCUMENT} : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \
    4647
    47 ${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \
    48                 ../LaTeXmacros/common.tex ../LaTeXmacros/indexstyle ../bibliography/cfa.bib
     48${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex ../bibliography/cfa.bib
    4949        # Conditionally create an empty *.idx (index) file for inclusion until makeindex is run.
    5050        if [ ! -r ${basename $@}.idx ] ; then touch ${basename $@}.idx ; fi
     
    6666## Define the default recipes.
    6767
     68${GRAPHS} : evaluation/timing.gp evaluation/timing.dat
     69        gnuplot evaluation/timing.gp
     70
    6871%.tex : %.fig
    6972        fig2dev -L eepic $< > $@
  • doc/generic_types/acmart.cls

    r221c2de7 r154fdc8  
    401401       \immediate\write\@auxout{\string\bibstyle{#1}}%
    402402     \fi}}
    403 \RequirePackage{graphicx, xcolor}
    404 \definecolor[named]{ACMBlue}{cmyk}{1,0.1,0,0.1}
    405 \definecolor[named]{ACMYellow}{cmyk}{0,0.16,1,0}
    406 \definecolor[named]{ACMOrange}{cmyk}{0,0.42,1,0.01}
    407 \definecolor[named]{ACMRed}{cmyk}{0,0.90,0.86,0}
    408 \definecolor[named]{ACMLightBlue}{cmyk}{0.49,0.01,0,0}
    409 \definecolor[named]{ACMGreen}{cmyk}{0.20,0,1,0.19}
    410 \definecolor[named]{ACMPurple}{cmyk}{0.55,1,0,0.15}
    411 \definecolor[named]{ACMDarkBlue}{cmyk}{1,0.58,0,0.21}
     403%\RequirePackage{graphicx, xcolor}
     404%\definecolor[named]{ACMBlue}{cmyk}{1,0.1,0,0.1}
     405%\definecolor[named]{ACMYellow}{cmyk}{0,0.16,1,0}
     406%\definecolor[named]{ACMOrange}{cmyk}{0,0.42,1,0.01}
     407%\definecolor[named]{ACMRed}{cmyk}{0,0.90,0.86,0}
     408%\definecolor[named]{ACMLightBlue}{cmyk}{0.49,0.01,0,0}
     409%\definecolor[named]{ACMGreen}{cmyk}{0.20,0,1,0.19}
     410%\definecolor[named]{ACMPurple}{cmyk}{0.55,1,0,0.15}
     411%\definecolor[named]{ACMDarkBlue}{cmyk}{1,0.58,0,0.21}
    412412\RequirePackage{geometry}
    413413\ifcase\ACM@format@nr
  • doc/generic_types/evaluation/.gitignore

    r221c2de7 r154fdc8  
    11c-bench
    22cpp-bench
     3cpp-vbench
    34cfa-bench
    4 c2-bench
    5 cpp2-bench
    6 cfa2-bench
    75*.o
    86*.d
  • doc/generic_types/evaluation/Makefile

    r221c2de7 r154fdc8  
    1 CFA = my-cfa
     1CFA = cfa
    22DEPFLAGS = -MMD -MP
     3CFLAGS = -O2
     4ifdef N
     5CFLAGS += -DN=$(N)
     6endif
     7CXXFLAGS = $(CFLAGS) --std=c++14
    38
    4 .PHONY: all clean distclean bench
     9.PHONY: all clean distclean run-c run-cpp run-cfa run
    510
    6 all: c-bench cpp-bench cfa-bench c2-bench cpp2-bench cfa2-bench
     11all: c-bench cpp-bench cfa-bench cpp-vbench
    712
    813# rewrite object generation to auto-determine deps
     
    1318c-%.o : c-%.c
    1419c-%.o : c-%.c c-%.d
    15         $(COMPILE.c) -O0 $(OUTPUT_OPTION) -c $<
     20        $(COMPILE.c) $(OUTPUT_OPTION) -c $<
    1621
    1722cpp-%.o : cpp-%.cpp
    1823cpp-%.o : cpp-%.cpp cpp-%.d
    19         $(COMPILE.cpp) -O0 $(OUTPUT_OPTION) -c $<
     24        $(COMPILE.cpp) $(OUTPUT_OPTION) -c $<
    2025
    2126cfa-%.o : cfa-%.c
    2227cfa-%.o : cfa-%.c cfa-%.d
    23         $(COMPILE.cfa) -O0 $(OUTPUT_OPTION) -c $<
     28        $(COMPILE.cfa) $(OUTPUT_OPTION) -c $<
    2429
    25 c2-%.o : c-%.c
    26 c2-%.o : c-%.c c-%.d
    27         $(COMPILE.c) -O2 $(OUTPUT_OPTION) -c $<
     30COBJS = c-stack.o c-pair.o c-print.o
     31CPPOBJS =
     32CPPVOBJS = cpp-vstack.o
     33CFAOBJS = cfa-stack.o cfa-pair.o cfa-print.o
    2834
    29 cpp2-%.o : cpp-%.cpp
    30 cpp2-%.o : cpp-%.cpp cpp-%.d
    31         $(COMPILE.cpp) -O2 $(OUTPUT_OPTION) -c $<
    32 
    33 cfa2-%.o : cfa-%.c
    34 cfa2-%.o : cfa-%.c cfa-%.d
    35         $(COMPILE.cfa) -O2 $(OUTPUT_OPTION) -c $<
    36 
    37 COBJS = c-stack.o
    38 CPPOBJS =
    39 CFAOBJS = cfa-stack.o
    40 C2OBJS = $(patsubst c-%,c2-%, $(COBJS))
    41 CPP2OBJS = $(patsubst cpp-%,cpp2-%, $(CPPOBJS))
    42 CFA2OBJS = $(patsubst cfa-%,cfa2-%, $(CFAOBJS))
     35CFILES = c-bench.c bench.h $(COBJS:.o=.h) $(COBJS:.o=.c)
     36CPPFILES = cpp-bench.cpp bench.hpp cpp-stack.hpp cpp-pair.hpp cpp-print.hpp
     37CPPVFILES = cpp-vbench.cpp bench.hpp object.hpp $(CPPVOBJS:.o=.hpp) $(CPPVOBJS:.o=.cpp) cpp-vprint.hpp
     38CFAFILES = cfa-bench.c bench.h $(CFAOBJS:.o=.h) $(CFAOBJS:.o=.c)
    4339
    4440c-bench: c-bench.c c-bench.d $(COBJS)
    45         $(COMPILE.c) -O0 -o $@ $< $(COBJS) $(LDFLAGS)
     41        $(COMPILE.c) -o $@ $< $(COBJS) $(LDFLAGS)
    4642
    4743cpp-bench: cpp-bench.cpp cpp-bench.d $(CPPOBJS)
    48         $(COMPILE.cpp) -O0 -o $@ $< $(CPPOBJS) $(LDFLAGS)
     44        $(COMPILE.cpp) -o $@ $< $(CPPOBJS) $(LDFLAGS)
     45
     46cpp-vbench: cpp-vbench.cpp cpp-vbench.d $(CPPVOBJS)
     47        $(COMPILE.cpp) -o $@ $< $(CPPVOBJS) $(LDFLAGS)
    4948
    5049cfa-bench: cfa-bench.c cfa-bench.d $(CFAOBJS)
    51         $(COMPILE.cfa) -O0 -o $@ $< $(CFAOBJS) $(LDFLAGS)
    52 
    53 c2-bench: c-bench.c c-bench.d $(C2OBJS)
    54         $(COMPILE.c) -O2 -o $@ $< $(C2OBJS) $(LDFLAGS)
    55 
    56 cpp2-bench: cpp-bench.cpp cpp-bench.d $(CPP2OBJS)
    57         $(COMPILE.cpp) -O2 -o $@ $< $(CPP2OBJS) $(LDFLAGS)
    58 
    59 cfa2-bench: cfa-bench.c cfa-bench.d $(CFA2OBJS)
    60         $(COMPILE.cfa) -O2 -o $@ $< $(CFA2OBJS) $(LDFLAGS)
     50        $(COMPILE.cfa) -o $@ $< $(CFAOBJS) $(LDFLAGS)
    6151
    6252clean:
    6353        -rm $(COBJS) c-bench
    6454        -rm $(CPPOBJS) cpp-bench
     55        -rm $(CPPVOBJS) cpp-vbench
    6556        -rm $(CFAOBJS) cfa-bench
    66         -rm $(C2OBJS) c2-bench
    67         -rm $(CPP2OBJS) cpp2-bench
    68         -rm $(CFA2OBJS) cfa2-bench
    6957
    7058distclean: clean
    7159        -rm $(COBJS:.o=.d) c-bench.d
    7260        -rm $(CPPOBJS:.o=.d) cpp-bench.d
     61        -rm $(CPPVOBJS:.o=.d) cpp-vbench.d
    7362        -rm $(CFAOBJS:.o=.d) cfa-bench.d
    7463
    75 bench: c-bench cpp-bench cfa-bench c2-bench cpp2-bench cfa2-bench
     64run-c: c-bench
     65        @echo
    7666        @echo '## C ##'
    77         @./c-bench
     67        @/usr/bin/time -f 'max_memory:\t%M kilobytes' ./c-bench
     68        @printf 'source_size:\t%8d lines\n' `cat $(CFILES) | wc -l`
     69        @printf 'redundant_type_annotations:%8d lines\n' `cat $(CFILES) | fgrep '/***/' -c`
     70        @printf 'binary_size:\t%8d bytes\n' `stat -c %s c-bench`
     71
     72run-cfa: cfa-bench
     73        @echo
     74        @echo '## Cforall ##'
     75        @/usr/bin/time -f 'max_memory:\t %M kilobytes' ./cfa-bench
     76        @printf 'source_size:\t%8d lines\n' `cat $(CFAFILES) | wc -l`
     77        @printf 'redundant_type_annotations:%8d lines\n' `cat $(CFAFILES) | fgrep '/***/' -c`
     78        @printf 'binary_size:\t%8d bytes\n' `stat -c %s cfa-bench`
     79
     80run-cpp: cpp-bench
     81        @echo
    7882        @echo '## C++ ##'
    79         @./cpp-bench
    80         @echo '## Cforall ##'
    81         @./cfa-bench
    82         @echo '## C -O2 ##'
    83         @./c2-bench
    84         @echo '## C++ -O2 ##'
    85         @./cpp2-bench
    86         @echo '## Cforall -O2 ##'
    87         @./cfa2-bench
     83        @/usr/bin/time -f 'max_memory:\t %M kilobytes' ./cpp-bench
     84        @printf 'source_size:\t%8d lines\n' `cat $(CPPFILES) | wc -l`
     85        @printf 'redundant_type_annotations:%8d lines\n' `cat $(CPPFILES) | fgrep '/***/' -c`
     86        @printf 'binary_size:\t%8d bytes\n' `stat -c %s cpp-bench`
     87
     88run-cppv: cpp-vbench
     89        @echo
     90        @echo '## C++obj ##'
     91        @/usr/bin/time -f 'max_memory:\t%M kilobytes' ./cpp-vbench
     92        @printf 'source_size:\t%8d lines\n' `cat $(CPPVFILES) | wc -l`
     93        @printf 'redundant_type_annotations:%8d lines\n' `cat $(CPPVFILES) | fgrep '/***/' -c`
     94        @printf 'binary_size:\t%8d bytes\n' `stat -c %s cpp-vbench`
     95
     96run: run-c run-cfa run-cpp run-cppv
    8897
    8998# so make doesn't fail without dependency files
  • doc/generic_types/evaluation/bench.h

    r221c2de7 r154fdc8  
     1#pragma once
    12#include <stdio.h>
    23#include <time.h>
    34
    4 #define N 200000000
     5long ms_between(clock_t start, clock_t end) { return (end - start) / (CLOCKS_PER_SEC / 1000); }
    56
    6 long ms_between(clock_t start, clock_t end) {
    7         return (end - start) / (CLOCKS_PER_SEC / 1000);
     7#define N 40000000
     8#define TIMED(name, code) { \
     9        volatile clock_t _start, _end; \
     10        _start = clock(); \
     11        code \
     12        _end = clock(); \
     13        printf("%s:\t%8ld ms\n", name, ms_between(_start, _end)); \
    814}
    9 
    10 #define TIMED(name, code) { \
    11         clock_t start, end; \
    12         start = clock(); \
    13         code \
    14         end = clock(); \
    15         printf("%s:\t%7ld ms\n", name, ms_between(start, end)); \
    16 }
    17 
    18 #define REPEAT_TIMED(name, code) TIMED( name, for (int i = 0; i < N; ++i) { code } )
     15#define REPEAT_TIMED(name, n, code) TIMED( name, for (int _i = 0; _i < n; ++_i) { code } )
  • doc/generic_types/evaluation/c-bench.c

    r221c2de7 r154fdc8  
     1#include <stdio.h>
    12#include <stdlib.h>
    23#include "bench.h"
     4#include "c-pair.h"
    35#include "c-stack.h"
     6#include "c-print.h"
     7
     8_Bool* new_bool( _Bool b ) {
     9        _Bool* q = malloc(sizeof(_Bool)); /***/
     10        *q = b;
     11        return q;
     12}
     13
     14char* new_char( char c ) {
     15        char* q = malloc(sizeof(char)); /***/
     16        *q = c;
     17        return q;
     18}
     19
     20int* new_int( int i ) {
     21        int* q = malloc(sizeof(int)); /***/
     22        *q = i;
     23        return q;
     24}
     25
     26void* copy_bool( const void* p ) { return new_bool( *(const _Bool*)p ); } /***/
     27void* copy_char( const void* p ) { return new_char( *(const char*)p ); } /***/
     28void* copy_int( const void* p ) { return new_int( *(const int*)p ); } /***/
     29void* copy_pair_bool_char( const void* p ) { return copy_pair( p, copy_bool, copy_char ); } /***/
     30void free_pair_bool_char( void* p ) { free_pair( p, free, free ); } /***/
     31
     32int cmp_bool( const void* a, const void* b ) { /***/
     33        return *(const _Bool*)a == *(const _Bool*)b ? 0 : *(const _Bool*)a < *(const _Bool*)b ? -1 : 1;
     34}
     35
     36int cmp_char( const void* a, const void* b ) { /***/
     37        return *(const char*)a == *(const char*)b ? 0 : *(const char*)a < *(const char*)b ? -1 : 1;
     38}
    439
    540int main(int argc, char** argv) {
    6         srand(20171025);
     41        FILE * out = fopen("c-out.txt", "w");
     42        int maxi = 0, vali = 42;
     43        struct stack si = new_stack(), ti;
    744
    8         struct stack s = new_stack();
     45        REPEAT_TIMED( "push_int", N, push_stack( &si, new_int( vali ) ); )
     46        TIMED( "copy_int",      copy_stack( &ti, &si, copy_int ); /***/ )
     47        TIMED( "clear_int", clear_stack( &si, free ); /***/ )
     48        REPEAT_TIMED( "pop_int", N,
     49                int* xi = pop_stack( &ti );
     50                if ( *xi > maxi ) { maxi = *xi; }
     51                free(xi); )
     52        REPEAT_TIMED( "print_int", N/2, print( out, "dsds", vali, ":", vali, "\n" ); /***/ )
    953
    10         REPEAT_TIMED( "push_int",
    11                 int* x = malloc(sizeof(int));
    12                 *x = rand();
    13                 push_stack(&s, x);
    14         )
     54        struct pair * maxp = new_pair( new_bool(0), new_char('\0') ),
     55                * valp = new_pair( new_bool(1), new_char('a') );
     56        struct stack sp = new_stack(), tp;
    1557
    16         clear_stack(&s);
     58        REPEAT_TIMED( "push_pair", N, push_stack( &sp, copy_pair_bool_char( valp ) ); )
     59        TIMED( "copy_pair", copy_stack( &tp, &sp, copy_pair_bool_char ); /***/ )
     60        TIMED( "clear_pair", clear_stack( &sp, free_pair_bool_char ); /***/ )
     61        REPEAT_TIMED( "pop_pair", N,
     62                struct pair * xp = pop_stack( &tp );
     63                if ( cmp_pair( xp, maxp, cmp_bool, cmp_char /***/ ) > 0 ) {
     64                        free_pair_bool_char( maxp ); /***/
     65                        maxp = xp;
     66                } else {
     67                        free_pair_bool_char( xp ); /***/
     68                } )
     69        REPEAT_TIMED( "print_pair", N/2, print( out, "pbcspbcs", *valp, ":", *valp, "\n" ); /***/ )
     70        free_pair_bool_char( maxp ); /***/
     71        free_pair_bool_char( valp ); /***/
     72        fclose(out);
    1773}
  • doc/generic_types/evaluation/c-stack.c

    r221c2de7 r154fdc8  
    77};
    88
    9 struct stack new_stack() {
    10         return (struct stack){ NULL };
     9struct stack new_stack() { return (struct stack){ NULL }; /***/ }
     10
     11void copy_stack(struct stack* s, const struct stack* t, void* (*copy)(const void*)) {
     12        struct stack_node** crnt = &s->head;
     13        struct stack_node* next = t->head;
     14        while ( next ) {
     15                *crnt = malloc(sizeof(struct stack_node)); /***/
     16                **crnt = (struct stack_node){ copy(next->value) }; /***/
     17                crnt = &(*crnt)->next;
     18                next = next->next;
     19        }
     20        *crnt = 0;
    1121}
    1222
    13 void clear_stack(struct stack* s) {
     23void clear_stack(struct stack* s, void (*free_el)(void*)) {
    1424        struct stack_node* next = s->head;
    1525        while ( next ) {
    1626                struct stack_node* crnt = next;
    1727                next = crnt->next;
    18                 free(crnt->value);
     28                free_el(crnt->value);
    1929                free(crnt);
    2030        }
     31        s->head = NULL;
    2132}
    2233
    23 _Bool stack_empty(const struct stack* s) {
    24         return s->head == NULL;
    25 }
     34_Bool stack_empty(const struct stack* s) { return s->head == NULL; }
    2635
    2736void push_stack(struct stack* s, void* value) {
    28         struct stack_node* n = malloc(sizeof(struct stack_node));
    29         *n = (struct stack_node){ value, s->head };
     37        struct stack_node* n = malloc(sizeof(struct stack_node)); /***/
     38        *n = (struct stack_node){ value, s->head }; /***/
    3039        s->head = n;
    3140}
  • doc/generic_types/evaluation/c-stack.h

    r221c2de7 r154fdc8  
     1#pragma once
     2
    13struct stack_node;
    2 
    34struct stack {
    45        struct stack_node* head;
     
    67
    78struct stack new_stack();
    8 
    9 void clear_stack(struct stack* s);
     9void copy_stack(struct stack* dst, const struct stack* src, void* (*copy)(const void*));
     10void clear_stack(struct stack* s, void (*free_el)(void*));
    1011
    1112_Bool stack_empty(const struct stack* s);
    12 
    1313void push_stack(struct stack* s, void* value);
    14 
    1514void* pop_stack(struct stack* s);
  • doc/generic_types/evaluation/cfa-bench.c

    r221c2de7 r154fdc8  
    1 #include <stdlib.h>
     1#include <stdio.h>
    22#include "bench.h"
    33#include "cfa-stack.h"
     4#include "cfa-pair.h"
     5#include "cfa-print.h"
    46
    5 int main(int argc, char** argv) {
    6         srand(20171025);
     7int main( int argc, char *argv[] ) {
     8        FILE * out = fopen( "cfa-out.txt", "w" );
     9        int maxi = 0, vali = 42;
     10        stack(int) si, ti;
    711
    8         stack(int) s;
     12        REPEAT_TIMED( "push_int", N, push( &si, vali ); )
     13        TIMED( "copy_int", ti = si; )
     14        TIMED( "clear_int", clear( &si ); )
     15        REPEAT_TIMED( "pop_int", N,
     16                int xi = pop( &ti );
     17                if ( xi > maxi ) { maxi = xi; } )
     18        REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); )
    919
    10         REPEAT_TIMED( "push_int",
    11                 push( &s, rand() );
    12         )
     20        pair(_Bool, char) maxp = { (_Bool)0, '\0' }, valp = { (_Bool)1, 'a' };
     21        stack(pair(_Bool, char)) sp, tp;
     22
     23        REPEAT_TIMED( "push_pair", N, push( &sp, valp ); )
     24        TIMED( "copy_pair", tp = sp; )
     25        TIMED( "clear_pair", clear( &sp ); )
     26        REPEAT_TIMED( "pop_pair", N,
     27                pair(_Bool, char) xp = pop( &tp );
     28                if ( xp > maxp ) { maxp = xp; } )
     29        REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); )
     30        fclose(out);
    1331}
  • doc/generic_types/evaluation/cfa-stack.c

    r221c2de7 r154fdc8  
    77};
    88
    9 forall(otype T) void ?{}(stack(T)* s) {
    10         ?{}( &s->head, 0 );
     9forall(otype T) void ?{}(stack(T)* s) { (&s->head){ 0 }; }
     10
     11forall(otype T) void ?{}(stack(T)* s, stack(T) t) {
     12        stack_node(T)** crnt = &s->head;
     13        stack_node(T)* next = t.head;
     14        while ( next ) {
     15                *crnt = ((stack_node(T)*)malloc()){ next->value }; /***/
     16                stack_node(T)* acrnt = *crnt;
     17                crnt = &acrnt->next;
     18                next = next->next;
     19        }
     20        *crnt = 0;
    1121}
    1222
    13 forall(otype T) void ^?{}(stack(T)* s) {
     23forall(otype T) stack(T) ?=?(stack(T)* s, stack(T) t) {
     24        if ( s->head == t.head ) return *s;
     25        clear(s);
     26        s{ t };
     27        return *s;
     28}
     29
     30forall(otype T) void ^?{}(stack(T)* s) { clear(s); }
     31
     32forall(otype T) _Bool empty(const stack(T)* s) { return s->head == 0; }
     33
     34forall(otype T) void push(stack(T)* s, T value) {
     35        s->head = ((stack_node(T)*)malloc()){ value, s->head }; /***/
     36}
     37
     38forall(otype T) T pop(stack(T)* s) {
     39        stack_node(T)* n = s->head;
     40        s->head = n->next;
     41        T x = n->value;
     42        ^n{};
     43        free(n);
     44        return x;
     45}
     46
     47forall(otype T) void clear(stack(T)* s) {
    1448        stack_node(T)* next = s->head;
    1549        while ( next ) {
     
    1852                delete(crnt);
    1953        }
     54        s->head = 0;
    2055}
    21 
    22 forall(otype T) _Bool empty(const stack(T)* s) {
    23         return s->head == 0;
    24 }
    25 
    26 forall(otype T) void push(stack(T)* s, T value) {
    27         s->head = ((stack_node(T)*)malloc()){ value, s->head };
    28 }
    29 
    30 forall(otype T) T pop(stack(T)* s) {
    31         stack_node(T)* n = s->head;
    32         s->head = n->next;
    33         T x = n->value;
    34         delete(n);
    35         return x;
    36 }
  • doc/generic_types/evaluation/cfa-stack.h

    r221c2de7 r154fdc8  
     1#pragma once
     2
    13forall(otype T) struct stack_node;
    2 
    34forall(otype T) struct stack {
    45        stack_node(T)* head;
     
    67
    78forall(otype T) void ?{}(stack(T)* s);
    8 
     9forall(otype T) void ?{}(stack(T)* s, stack(T) t);
     10forall(otype T) stack(T) ?=?(stack(T)* s, stack(T) t);
    911forall(otype T) void ^?{}(stack(T)* s);
    1012
    1113forall(otype T) _Bool empty(const stack(T)* s);
    12 
    1314forall(otype T) void push(stack(T)* s, T value);
    14 
    1515forall(otype T) T pop(stack(T)* s);
     16forall(otype T) void clear(stack(T)* s);
  • doc/generic_types/evaluation/cpp-bench.cpp

    r221c2de7 r154fdc8  
    1 #include <stdlib.h>
    2 #include "bench.h"
    3 #include "cpp-stack.h"
     1#include <algorithm>
     2#include <fstream>
     3#include "bench.hpp"
     4#include "cpp-stack.hpp"
     5#include "cpp-pair.hpp"
     6#include "cpp-print.hpp"
    47
    58int main(int argc, char** argv) {
    6         srand(20171025);
     9        std::ofstream out{"cpp-out.txt"};
     10        int maxi = 0, vali = 42;
     11        stack<int> si, ti;
     12       
     13        REPEAT_TIMED( "push_int", N, si.push( vali ); )
     14        TIMED( "copy_int", ti = si; )
     15        TIMED( "clear_int", si.clear(); )
     16        REPEAT_TIMED( "pop_int", N, maxi = std::max( maxi, ti.pop() ); )
     17        REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); )
    718
    8         stack<int> s;
    9 
    10         REPEAT_TIMED( "push_int",
    11                 s.push( rand() );
    12         )
     19        pair<bool, char> maxp = { false, '\0' }, valp = { true, 'a' };
     20        stack<pair<bool, char>> sp, tp;
     21       
     22        REPEAT_TIMED( "push_pair", N, sp.push( valp ); )
     23        TIMED( "copy_pair", tp = sp; )
     24        TIMED( "clear_pair", sp.clear(); )
     25        REPEAT_TIMED( "pop_pair", N, maxp = std::max( maxp, tp.pop() ); )
     26        REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); )
    1327}
  • doc/generic_types/generic_types.tex

    r221c2de7 r154fdc8  
    11% take off review (for line numbers) and anonymous (for anonymization) on submission
    2 % \documentclass[format=acmlarge, anonymous, review]{acmart}
    3 \documentclass[format=acmlarge,review]{acmart}
     2\documentclass[format=acmlarge,anonymous,review]{acmart}
     3% \documentclass[format=acmlarge,review]{acmart}
    44
    55\usepackage{xspace,calc,comment}
    66\usepackage{upquote}                                                                    % switch curled `'" to straight
    77\usepackage{listings}                                                                   % format program code
     8\usepackage[usenames]{color}
    89
    910\makeatletter
     11% Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore
     12% removes it as a variable-name character so keyworks in variables are highlighted
     13\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}
     14
    1015% parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for
    1116% use rather than use \parident directly.
     
    1318\setlength{\parindentlnth}{\parindent}
    1419
    15 \newlength{\gcolumnposn}                                % temporary hack because lstlisting does handle tabs correctly
     20\newlength{\gcolumnposn}                                % temporary hack because lstlisting does not handle tabs correctly
    1621\newlength{\columnposn}
    1722\setlength{\gcolumnposn}{2.75in}
     
    1924\newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@commentstyle{#2}}}
    2025\newcommand{\CRT}{\global\columnposn=\gcolumnposn}
     26
     27% Latin abbreviation
     28\newcommand{\abbrevFont}{\textit}       % set empty for no italics
     29\newcommand*{\eg}{%
     30        \@ifnextchar{,}{\abbrevFont{e}.\abbrevFont{g}.}%
     31                {\@ifnextchar{:}{\abbrevFont{e}.\abbrevFont{g}.}%
     32                        {\abbrevFont{e}.\abbrevFont{g}.,\xspace}}%
     33}%
     34\newcommand*{\ie}{%
     35        \@ifnextchar{,}{\abbrevFont{i}.\abbrevFont{e}.}%
     36                {\@ifnextchar{:}{\abbrevFont{i}.\abbrevFont{e}.}%
     37                        {\abbrevFont{i}.\abbrevFont{e}.,\xspace}}%
     38}%
     39\newcommand*{\etc}{%
     40        \@ifnextchar{.}{\abbrevFont{etc}}%
     41        {\abbrevFont{etc}.\xspace}%
     42}%
     43\newcommand{\etal}{%
     44        \@ifnextchar{.}{\abbrevFont{et~al}}%
     45                {\abbrevFont{et al}.\xspace}%
     46}%
    2147\makeatother
    2248
     
    2854\newcommand{\CCseventeen}{\rm C\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name
    2955\newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name
    30 \newcommand{\CS}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace}
     56\newcommand{\CCV}{\rm C\kern-.1em\hbox{+\kern-.25em+}obj\xspace} % C++ virtual symbolic name
     57\newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name
    3158\newcommand{\Textbf}[1]{{\color{red}\textbf{#1}}}
    32 
    3359\newcommand{\TODO}[1]{\textbf{TODO}: {\itshape #1}} % TODO included
    3460%\newcommand{\TODO}[1]{} % TODO elided
    35 \newcommand{\eg}{\textit{e}.\textit{g}.,\xspace}
    36 \newcommand{\ie}{\textit{i}.\textit{e}.,\xspace}
    37 \newcommand{\etc}{\textit{etc}.,\xspace}
    3861
    3962% CFA programming language, based on ANSI C (with some gcc additions)
     
    6083belowskip=3pt,
    6184% replace/adjust listing characters that look bad in sanserif
    62 literate={-}{\raisebox{-0.15ex}{\texttt{-}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
    63         {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {_}{\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
    64         {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2,
     85literate={-}{\makebox[1.4ex][c]{\raisebox{0.5ex}{\rule{1.2ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1
     86        {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
     87        {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1.4ex][c]{\raisebox{0.5ex}{\rule{1.2ex}{0.1ex}}}\kern-0.3ex\textgreater}2,
    6588moredelim=**[is][\color{red}]{`}{`},
    6689}% lstset
    6790
    6891% inline code @...@
    69 \lstMakeShortInline@
     92\lstMakeShortInline@%
    7093
    7194% ACM Information
     
    120143
    121144\begin{abstract}
    122 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers. This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design.
     145The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
     146This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
     147Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive.
     148The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers.
     149Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers.
     150Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers.
     151This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design.
    123152\end{abstract}
    124153
     
    129158\section{Introduction and Background}
    130159
    131 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
    132 The \citet{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \CS 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail. The top 3 rankings over the past 30 years are:
    133 \lstDeleteShortInline@
     160The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
     161This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
     162The \citet{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \Csharp 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail.
     163The top 3 rankings over the past 30 years are:
     164\lstDeleteShortInline@%
    134165\begin{center}
    135166\setlength{\tabcolsep}{10pt}
    136 \begin{tabular}{@{}r|c|c|c|c|c|c|c@{}}
    137                 & 2017  & 2012  & 2007  & 2002  & 1997  & 1992  & 1987          \\
    138 \hline
    139 Java    & 1             & 1             & 1             & 3             & 13    & -             & -                     \\
    140 \hline
    141 \Textbf{C}      & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1}& \Textbf{1}    \\
    142 \hline
     167\begin{tabular}{@{}rccccccc@{}}
     168                & 2017  & 2012  & 2007  & 2002  & 1997  & 1992  & 1987          \\ \hline
     169Java    & 1             & 1             & 1             & 1             & 12    & -             & -                     \\
     170\Textbf{C}      & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1}    \\
    143171\CC             & 3             & 3             & 3             & 3             & 2             & 2             & 4                     \\
    144172\end{tabular}
    145173\end{center}
    146 \lstMakeShortInline@
    147 Love it or hate it, C is extremely popular, highly used, and one of the few system's languages.
     174\lstMakeShortInline@%
     175Love it or hate it, C is extremely popular, highly used, and one of the few systems languages.
    148176In many cases, \CC is often used solely as a better C.
    149177Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive.
    150178
    151 \CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers. The four key design goals for \CFA~\citep{Bilson03} are:
     179\CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers.
     180The four key design goals for \CFA~\citep{Bilson03} are:
    152181(1) The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler;
    153182(2) Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler;
    154183(3) \CFA code must be at least as portable as standard C code;
    155184(4) Extensions introduced by \CFA must be translated in the most efficient way possible.
    156 These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. In its current implementation, \CFA is compiled by translating it to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)-(3). Ultimately, a compiler is necessary for advanced features and optimal performance.
    157 
    158 This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings. Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance.
     185These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used.
     186\CC is used similarly, but has the disadvantages of multiple legacy design-choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project.
     187
     188\CFA is currently implemented as a source-to-source translator from \CFA to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)--(3).
     189Ultimately, a compiler is necessary for advanced features and optimal performance.
     190
     191This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings.
     192Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions.
     193The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance.
    159194
    160195
     
    162197\label{sec:poly-fns}
    163198
    164 \CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA is parametric-polymorphic functions where functions are generalized using a @forall@ clause (giving the language its name):
     199\CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}.
     200The signature feature of \CFA is parametric-polymorphic functions~\citep{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name):
    165201\begin{lstlisting}
    166202`forall( otype T )` T identity( T val ) { return val; }
    167203int forty_two = identity( 42 );                         $\C{// T is bound to int, forty\_two == 42}$
    168204\end{lstlisting}
    169 The @identity@ function above can be applied to any complete \emph{object type} (or @otype@). The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@).
    170 
    171 Here, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments have shown this overhead is similar to \CC virtual function calls. An advantage of this design is that, unlike \CC template functions, \CFA polymorphic functions are compatible with C \emph{separate} compilation, preventing code bloat.
    172 
    173 Since bare polymorphic-types provide only a narrow set of available operations, \CFA provides a \emph{type assertion} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable. For example, the function @twice@ can be defined using the \CFA syntax for operator overloading:
     205The @identity@ function above can be applied to any complete \emph{object type} (or @otype@).
     206The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type.
     207The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor.
     208If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@).
     209
     210In \CFA, the polymorphism runtime-cost is spread over each polymorphic call, due to passing more arguments to polymorphic functions;
     211the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual-function calls.
     212A design advantage is that, unlike \CC template-functions, \CFA polymorphic-functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat.
     213
     214Since bare polymorphic-types provide a restricted set of available operations, \CFA provides a \emph{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable.
     215For example, the function @twice@ can be defined using the \CFA syntax for operator overloading:
    174216\begin{lstlisting}
    175217forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x + x; } $\C{// ? denotes operands}$
    176218int val = twice( twice( 3.7 ) );
    177219\end{lstlisting}
    178 which works for any type @T@ with a matching addition operator. The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@. There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Ada} in its type analysis. The first approach has a late conversion from @int@ to @double@ on the final assignment, while the second has an eager conversion to @int@. \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition.
     220which works for any type @T@ with a matching addition operator.
     221The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@.
     222There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada}, in its type analysis.
     223The first approach has a late conversion from @double@ to @int@ on the final assignment, while the second has an eager conversion to @int@.
     224\CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition.
    179225
    180226Crucial to the design of a new programming language are the libraries to access thousands of external software features.
    181 \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C.
     227Like \CC, \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C.
    182228A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted floating-point array:
    183229\begin{lstlisting}
    184230void * bsearch( const void * key, const void * base, size_t nmemb, size_t size,
    185                                 int (* compar)(const void *, const void *));
     231                                int (* compar)( const void *, const void * ));
    186232int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 :
    187233                                *(double *)t2 < *(double *)t1 ? 1 : 0; }
    188 double vals[10] = { /* 10 floating-point values */ };
    189 double key = 5.0;
     234double key = 5.0, vals[10] = { /* 10 floating-point values */ };
    190235double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp );      $\C{// search sorted array}$
    191236\end{lstlisting}
     
    196241        return (T *)bsearch( &key, arr, size, sizeof(T), comp ); }
    197242forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) {
    198         T *result = bsearch( key, arr, size );  $\C{// call first version}$
     243        T * result = bsearch( key, arr, size ); $\C{// call first version}$
    199244        return result ? result - arr : size; }  $\C{// pointer subtraction includes sizeof(T)}$
    200245double * val = bsearch( 5.0, vals, 10 );        $\C{// selection based on return type}$
    201246int posn = bsearch( 5.0, vals, 10 );
    202247\end{lstlisting}
    203 The nested routine @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result.
     248The nested function @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result.
     249Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope.
    204250As well, an alternate kind of return is made available: position versus pointer to found element.
    205251\CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a templated @bsearch@.
     
    208254For example, it is possible to write a type-safe \CFA wrapper @malloc@ based on the C @malloc@:
    209255\begin{lstlisting}
    210 forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)(void *)malloc( (size_t)sizeof(T) ); }
     256forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)malloc( sizeof(T) ); }
    211257int * ip = malloc();                                            $\C{// select type and size from left-hand side}$
    212258double * dp = malloc();
     
    215261where the return type supplies the type/size of the allocation, which is impossible in most type systems.
    216262
    217 Call-site inferencing and nested functions provide a localized form of inheritance. For example, the \CFA @qsort@ only sorts in ascending order using @<@. However, it is trivial to locally change this behaviour:
     263Call-site inferencing and nested functions provide a localized form of inheritance.
     264For example, the \CFA @qsort@ only sorts in ascending order using @<@.
     265However, it is trivial to locally change this behaviour:
    218266\begin{lstlisting}
    219267forall( otype T | { int ?<?( T, T ); } ) void qsort( const T * arr, size_t size ) { /* use C qsort */ }
     
    223271\end{lstlisting}
    224272Within the block, the nested version of @<@ performs @>@ and this local version overrides the built-in @<@ so it is passed to @qsort@.
    225 Hence, programmers can easily form a local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.
     273Hence, programmers can easily form local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.
    226274
    227275Finally, \CFA allows variable overloading:
    228 \lstDeleteShortInline@
    229 \par\smallskip
    230 \begin{tabular}{@{}l@{\hspace{\parindent}}|@{\hspace{\parindent}}l@{}}
    231 \begin{lstlisting}
    232 short int MAX = ...;
    233 int MAX = ...;
    234 double MAX = ...;
    235 \end{lstlisting}
    236 &
    237 \begin{lstlisting}
    238 short int s = MAX;  // select correct MAX
    239 int i = MAX;
    240 double d = MAX;
    241 \end{lstlisting}
    242 \end{tabular}
    243 \lstMakeShortInline@
    244 \smallskip\par\noindent
    245 Hence, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@.
     276\begin{lstlisting}
     277short int MAX = ...;   int MAX = ...;  double MAX = ...;
     278short int s = MAX;    int i = MAX;    double d = MAX;   $\C{// select correct MAX}$
     279\end{lstlisting}
     280Here, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@.
    246281As well, restricted constant overloading is allowed for the values @0@ and @1@, which have special status in C, \eg the value @0@ is both an integer and a pointer literal, so its meaning depends on context.
    247 In addition, several operations are defined in terms values @0@ and @1@.
    248 For example,
     282In addition, several operations are defined in terms values @0@ and @1@, \eg:
    249283\begin{lstlisting}
    250284int x;
    251 if (x)        // if (x != 0)
    252         x++;    //   x += 1;
    253 \end{lstlisting}
    254 Every if statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
     285if (x) x++                                                                      $\C{// if (x != 0) x += 1;}$
     286\end{lstlisting}
     287Every if and iteration statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
    255288Due to these rewrite rules, the values @0@ and @1@ have the types @zero_t@ and @one_t@ in \CFA, which allows overloading various operations for new types that seamlessly connect to all special @0@ and @1@ contexts.
    256289The types @zero_t@ and @one_t@ have special built in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal.
     
    269302forall( otype T `| summable( T )` ) T sum( T a[$\,$], size_t size ) {  // use trait
    270303        `T` total = { `0` };                                    $\C{// instantiate T from 0 by calling its constructor}$
    271         for ( unsigned int i = 0; i < size; i += 1 )
    272                 total `+=` a[i];                                        $\C{// select appropriate +}$
     304        for ( unsigned int i = 0; i < size; i += 1 ) total `+=` a[i]; $\C{// select appropriate +}$
    273305        return total; }
    274306\end{lstlisting}
    275307
    276 In fact, the set of operators is incomplete, \eg no assignment, but @otype@ is syntactic sugar for the following implicit trait:
     308In fact, the set of @summable@ trait operators is incomplete, as it is missing assignment for type @T@, but @otype@ is syntactic sugar for the following implicit trait:
    277309\begin{lstlisting}
    278310trait otype( dtype T | sized(T) ) {  // sized is a pseudo-trait for types with known size and alignment
     
    283315\end{lstlisting}
    284316Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack-allocatable, default or copy-initialized, assigned, and deleted.
    285 % As an example, the @sum@ function produces generated code something like the following (simplified for clarity and brevity)\TODO{fix example, maybe elide, it's likely too long with the more complicated function}:
     317
     318In summation, the \CFA type-system uses \emph{nominal typing} for concrete types, matching with the C type-system, and \emph{structural typing} for polymorphic types.
     319Hence, trait names play no part in type equivalence;
     320the names are simply macros for a list of polymorphic assertions, which are expanded at usage sites.
     321Nevertheless, trait names form a logical subtype-hierarchy with @dtype@ at the top, where traits often contain overlapping assertions, \eg operator @+@.
     322Traits are used like interfaces in Java or abstract base-classes in \CC, but without the nominal inheritance-relationships.
     323Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type-key), and this key is fulfilled at each call site from the lexical environment, which is similar to Go~\citep{Go} interfaces.
     324Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal-inheritance hierarchy.
     325(Nominal inheritance can be approximated with traits using marker variables or functions, as is done in Go.)
     326
     327% Nominal inheritance can be simulated with traits using marker variables or functions:
    286328% \begin{lstlisting}
    287 % void abs( size_t _sizeof_M, size_t _alignof_M,
    288 %               void (*_ctor_M)(void*), void (*_copy_M)(void*, void*),
    289 %               void (*_assign_M)(void*, void*), void (*_dtor_M)(void*),
    290 %               _Bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*),
    291 %       void (*_ctor_M_zero)(void*, int),
    292 %               void* m, void* _rtn ) {                         $\C{// polymorphic parameter and return passed as void*}$
    293 %                                                                                       $\C{// M zero = { 0 };}$
    294 %       void* zero = alloca(_sizeof_M);                 $\C{// stack allocate zero temporary}$
    295 %       _ctor_M_zero(zero, 0);                                  $\C{// initialize using zero\_t constructor}$
    296 %                                                                                       $\C{// return m < zero ? -m : m;}$
    297 %       void *_tmp = alloca(_sizeof_M);
    298 %       _copy_M( _rtn,                                                  $\C{// copy-initialize return value}$
    299 %               _lt_M( m, zero ) ?                                      $\C{// check condition}$
    300 %                (_neg_M(m, _tmp), _tmp) :                      $\C{// negate m}$
    301 %                m);
    302 %       _dtor_M(_tmp); _dtor_M(zero);                   $\C{// destroy temporaries}$
     329% trait nominal(otype T) {
     330%     T is_nominal;
     331% };
     332% int is_nominal;                                                               $\C{// int now satisfies the nominal trait}$
     333% \end{lstlisting}
     334%
     335% Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems:
     336% \begin{lstlisting}
     337% trait pointer_like(otype Ptr, otype El) {
     338%     lvalue El *?(Ptr);                                                $\C{// Ptr can be dereferenced into a modifiable value of type El}$
    303339% }
     340% struct list {
     341%     int value;
     342%     list * next;                                                              $\C{// may omit "struct" on type names as in \CC}$
     343% };
     344% typedef list * list_iterator;
     345%
     346% lvalue int *?( list_iterator it ) { return it->value; }
    304347% \end{lstlisting}
    305 
    306 Traits may be used for many of the same purposes as interfaces in Java or abstract base classes in \CC. Unlike Java interfaces or \CC base classes, \CFA types do not explicitly state any inheritance relationship to traits they satisfy, which is a form of structural inheritance, similar to the implementation of an interface in Go~\citep{Go}, as opposed to the nominal inheritance model of Java and \CC.
    307 
    308 Nominal inheritance can be simulated with traits using marker variables or functions:
    309 \begin{lstlisting}
    310 trait nominal(otype T) {
    311     T is_nominal;
     348% In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@).
     349% While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously.
     350
     351
     352\section{Generic Types}
     353
     354One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms.
     355Broadly speaking, there are three approaches to implement abstract data-structures in C.
     356One approach is to write bespoke data structures for each context in which they are needed.
     357While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures.
     358A second approach is to use @void *@--based polymorphism, \eg the C standard-library functions @bsearch@ and @qsort@; an approach which does allow reuse of code for common functionality.
     359However, basing all polymorphism on @void *@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that would not otherwise be needed.
     360A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type-checked, but errors may be difficult to interpret.
     361Furthermore, writing and using preprocessor macros can be unnatural and inflexible.
     362
     363\CC, Java, and other languages use \emph{generic types} to produce type-safe abstract data-types.
     364\CFA also implements generic types that integrate efficiently and naturally with the existing polymorphic functions, while retaining backwards compatibility with C and providing separate compilation.
     365However, for known concrete parameters, the generic-type definition can be inlined, like \CC templates.
     366
     367A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name:
     368\begin{lstlisting}
     369forall( otype R, otype S ) struct pair {
     370        R first;
     371        S second;
    312372};
    313 int is_nominal;                                                         $\C{// int now satisfies the nominal trait}$
    314 \end{lstlisting}
    315 
    316 Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems:
    317 \begin{lstlisting}
    318 trait pointer_like(otype Ptr, otype El) {
    319     lvalue El *?(Ptr);                                          $\C{// Ptr can be dereferenced into a modifiable value of type El}$
    320 }
    321 struct list {
    322     int value;
    323     list *next;                                                         $\C{// may omit "struct" on type names as in \CC}$
    324 };
    325 typedef list *list_iterator;
    326 
    327 lvalue int *?( list_iterator it ) { return it->value; }
    328 \end{lstlisting}
    329 
    330 In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@).
    331 While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously.
    332 
    333 \section{Generic Types}
    334 
    335 One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to create data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures. A second approach is to use @void*@-based polymorphism. This approach is taken by the C standard library functions @qsort@ and @bsearch@, and does allow the use of common code for common functionality. However, basing all polymorphism on @void*@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requires a number of extra function parameters, and also adds pointer indirection and dynamic allocation to algorithms and data structures that would not otherwise require them. A third approach to generic code is to use pre-processor macros to generate it -- this approach does allow the generated code to be both generic and type-checked, though any errors produced may be difficult to interpret. Furthermore, writing and invoking C code as preprocessor macros is unnatural and somewhat inflexible.
    336 
    337 Other C-like languages such as \CC and Java use \emph{generic types} to produce type-safe abstract data types. \CFA implements generic types with some care taken that the generic types design for \CFA integrates efficiently and naturally with the existing polymorphic functions in \CFA while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there is no extra overhead for the use of a generic type, as for \CC templates.
    338 
    339 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name:
    340 \begin{lstlisting}
    341 forall(otype R, otype S) struct pair {
    342     R first;
    343     S second;
    344 };
    345 
    346 forall(otype T)
    347 T value( pair(const char*, T) p ) { return p.second; }
    348 
    349 forall(dtype F, otype T)
    350 T value_p( pair(F*, T*) p ) { return *p.second; }
    351 
    352 pair(const char*, int) p = { "magic", 42 };
     373forall( otype T ) T value( pair( const char *, T ) p ) { return p.second; }
     374forall( dtype F, otype T ) T value_p( pair( F *, T * ) p ) { return * p.second; }
     375pair( const char *, int ) p = { "magic", 42 };
    353376int magic = value( p );
    354 
    355 pair(void*, int*) q = { 0, &p.second };
     377pair( void *, int * ) q = { 0, &p.second };
    356378magic = value_p( q );
    357379double d = 1.0;
    358 pair(double*, double*) r = { &d, &d };
     380pair( double *, double * ) r = { &d, &d };
    359381d = value_p( r );
    360382\end{lstlisting}
    361383
    362 \CFA classifies generic types as either \emph{concrete} or \emph{dynamic}. Concrete generic types have a fixed memory layout regardless of type parameters, while dynamic generic types vary in their in-memory layout depending on their type parameters. A type may have polymorphic parameters but still be concrete; in \CFA such types are called \emph{dtype-static}. Polymorphic pointers are an example of dtype-static types -- @forall(dtype T) T*@ is a polymorphic type, but for any @T@ chosen, @T*@ has exactly the same in-memory representation as a @void*@, and can therefore be represented by a @void*@ in code generation.
    363 
    364 \CFA generic types may also specify constraints on their argument type to be checked by the compiler. For example, consider the following declaration of a sorted set-type, which ensures that the set key supports equality and relational comparison:
    365 \begin{lstlisting}
    366 forall(otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); })
    367   struct sorted_set;
    368 \end{lstlisting}
    369 
    370 \subsection{Concrete Generic Types}
    371 
    372 The \CFA translator instantiates concrete generic types by template-expanding them to fresh struct types; concrete generic types can therefore be used with zero runtime overhead. To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated struct declarations where appropriate. For example, a function declaration that accepts or returns a concrete generic type produces a declaration for the instantiated struct in the same scope, which all callers that can see that declaration may reuse. As an example of the expansion, the concrete instantiation for @pair(const char*, int)@ looks like this:
     384\CFA classifies generic types as either \emph{concrete} or \emph{dynamic}.
     385Concrete types have a fixed memory layout regardless of type parameters, while dynamic types vary in memory layout depending on their type parameters.
     386A type may have polymorphic parameters but still be concrete, called \emph{dtype-static}.
     387Polymorphic pointers are an example of dtype-static types, \eg @forall(dtype T) T *@ is a polymorphic type, but for any @T@, @T *@  is a fixed-sized pointer, and therefore, can be represented by a @void *@ in code generation.
     388
     389\CFA generic types also allow checked argument-constraints.
     390For example, the following declaration of a sorted set-type ensures the set key supports equality and relational comparison:
     391\begin{lstlisting}
     392forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); } ) struct sorted_set;
     393\end{lstlisting}
     394
     395
     396\subsection{Concrete Generic-Types}
     397
     398The \CFA translator template-expands concrete generic-types into new structure types, affording maximal inlining.
     399To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate.
     400For example, a function declaration that accepts or returns a concrete generic-type produces a declaration for the instantiated struct in the same scope, which all callers may reuse.
     401For example, the concrete instantiation for @pair( const char *, int )@ is:
    373402\begin{lstlisting}
    374403struct _pair_conc1 {
    375         const char* first;
     404        const char * first;
    376405        int second;
    377406};
    378407\end{lstlisting}
    379408
    380 A concrete generic type with dtype-static parameters is also expanded to a struct type, but this struct type is used for all matching instantiations. In the example above, the @pair(F*, T*)@ parameter to @value_p@ is such a type; its expansion looks something like this, and is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate:
     409A concrete generic-type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations.
     410In the above example, the @pair( F *, T * )@ parameter to @value_p@ is such a type; its expansion is below and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate:
    381411\begin{lstlisting}
    382412struct _pair_conc0 {
    383         void* first;
    384         void* second;
     413        void * first;
     414        void * second;
    385415};
    386416\end{lstlisting}
    387417
    388418
    389 \subsection{Dynamic Generic Types}
    390 
    391 Though \CFA implements concrete generic types efficiently, it also has a fully general system for computing with dynamic generic types. As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. Dynamic generic structs also have implicit size and alignment parameters, and also an \emph{offset array} which contains the offsets of each member of the struct\footnote{Dynamic generic unions need no such offset array, as all members are at offset 0; the size and alignment parameters are still provided for dynamic unions, however.}. Access to members\footnote{The \lstinline@offsetof@ macro is implemented similarly.} of a dynamic generic struct is provided by adding the corresponding member of the offset array to the struct pointer at runtime, essentially moving a compile-time offset calculation to runtime where necessary.
    392 
    393 These offset arrays are statically generated where possible. If a dynamic generic type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume that the generic type is complete (that is, has a known layout) at any call-site, and the offset array is passed from the caller; if the generic type is concrete at the call site the elements of this offset array can even be statically generated using the C @offsetof@ macro. As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void*@, and @_offsetof_pair@ is the offset array passed in to @value@ for @pair(const char*, T)@. The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) };@.
    394 
    395 In some cases the offset arrays cannot be statically generated. For instance, modularity is generally provided in C by including an opaque forward-declaration of a struct and associated accessor and mutator routines in a header file, with the actual implementations in a separately-compiled \texttt{.c} file. \CFA supports this pattern for generic types, and in this instance the caller does not know the actual layout or size of the dynamic generic type, and only holds it by pointer. The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed in to a function from that function's caller. These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic struct (un-@sized@ parameters are forbidden from the language from being used in a context that affects layout). Results of these layout functions are cached so that they are only computed once per type per function.%, as in the example below for @pair@.
    396 % \begin{lstlisting}
    397 % static inline void _layoutof_pair(size_t* _szeof_pair, size_t* _alignof_pair, size_t* _offsetof_pair,
    398 %               size_t _szeof_R, size_t _alignof_R, size_t _szeof_S, size_t _alignof_S) {
    399 %     *_szeof_pair = 0; // default values
    400 %     *_alignof_pair = 1;
    401 
    402 %       // add offset, size, and alignment of first field
    403 %     _offsetof_pair[0] = *_szeof_pair;
    404 %     *_szeof_pair += _szeof_R;
    405 %     if ( *_alignof_pair < _alignof_R ) *_alignof_pair = _alignof_R;
    406 
    407 %       // padding, offset, size, and alignment of second field
    408 %     if ( *_szeof_pair & (_alignof_S - 1) )
    409 %               *_szeof_pair += (_alignof_S - ( *_szeof_pair & (_alignof_S - 1) ) );
    410 %     _offsetof_pair[1] = *_szeof_pair;
    411 %     *_szeof_pair += _szeof_S;
    412 %     if ( *_alignof_pair < _alignof_S ) *_alignof_pair = _alignof_S;
    413 
    414 %       // pad to struct alignment
    415 %     if ( *_szeof_pair & (*_alignof_pair - 1) )
    416 %               *_szeof_pair += ( *_alignof_pair - ( *_szeof_pair & (*_alignof_pair - 1) ) );
    417 % }
    418 % \end{lstlisting}
    419 
    420 Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature. For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values. This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function.
    421 
    422 Whether a type is concrete, dtype-static, or dynamic is decided based solely on the type parameters and @forall@ clause on the struct declaration. This design allows opaque forward declarations of generic types like @forall(otype T) struct Box;@ -- like in C, all uses of @Box(T)@ can be in a separately compiled translation unit, and callers from other translation units know the proper calling conventions to use. If the definition of a struct type was included in the decision of whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T* p };@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off.
     419\subsection{Dynamic Generic-Types}
     420
     421Though \CFA implements concrete generic-types efficiently, it also has a fully general system for dynamic generic types.
     422As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller.
     423Dynamic generic-types also have an \emph{offset array} containing structure-member offsets.
     424A dynamic generic-union needs no such offset array, as all members are at offset 0, but size and alignment are still necessary.
     425Access to members of a dynamic structure is provided at runtime via base-displacement addressing with the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime.
     426
     427The offset arrays are statically generated where possible.
     428If a dynamic generic-type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume the generic type is complete (\ie has a known layout) at any call-site, and the offset array is passed from the caller;
     429if the generic type is concrete at the call site, the elements of this offset array can even be statically generated using the C @offsetof@ macro.
     430As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void *@, and @_offsetof_pair@ is the offset array passed into @value@ for @pair( const char *, T )@.
     431The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) }@.
     432
     433In some cases the offset arrays cannot be statically generated.
     434For instance, modularity is generally provided in C by including an opaque forward-declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately-compiled @.c@ file.
     435\CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic-type, and only holds it by a pointer.
     436The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed into a function from that function's caller.
     437These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic structure (un@sized@ parameters are forbidden from being used in a context that affects layout).
     438Results of these layout functions are cached so that they are only computed once per type per function. %, as in the example below for @pair@.
     439Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature.
     440For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values.
     441This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function.
     442
     443Whether a type is concrete, dtype-static, or dynamic is decided solely on the type parameters and @forall@ clause on a declaration.
     444This design allows opaque forward declarations of generic types, \eg @forall(otype T) struct Box@ -- like in C, all uses of @Box(T)@ can be separately compiled, and callers from other translation units know the proper calling conventions to use.
     445If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T * p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off.
     446
    423447
    424448\subsection{Applications}
    425449\label{sec:generic-apps}
    426450
    427 The reuse of dtype-static struct instantiations enables some useful programming patterns at zero runtime cost. The most important such pattern is using @forall(dtype T) T*@ as a type-checked replacement for @void*@, as in this example, which takes a @qsort@ or @bsearch@-compatible comparison routine and creates a similar lexicographic comparison for pairs of pointers:
    428 \begin{lstlisting}
    429 forall(dtype T)
    430 int lexcmp( pair(T*, T*)* a, pair(T*, T*)* b, int (*cmp)(T*, T*) ) {
    431         int c = cmp(a->first, b->first);
    432         if ( c == 0 ) c = cmp(a->second, b->second);
    433         return c;
    434 }
    435 \end{lstlisting}
    436 Since @pair(T*, T*)@ is a concrete type, there are no added implicit parameters to @lexcmp@, so the code generated by \CFA is effectively identical to a version of this function written in standard C using @void*@, yet the \CFA version is type-checked to ensure that the fields of both pairs and the arguments to the comparison function match in type.
    437 
    438 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost ``tag'' structs. Sometimes a particular bit of information is only useful for type-checking, and can be omitted at runtime. Tag structs can be used to provide this information to the compiler without further runtime overhead, as in the following example:
     451The reuse of dtype-static structure instantiations enables useful programming patterns at zero runtime cost.
     452The most important such pattern is using @forall(dtype T) T *@ as a type-checked replacement for @void *@, \eg creating a lexicographic comparison for pairs of pointers used by @bsearch@ or @qsort@:
     453\begin{lstlisting}
     454forall(dtype T) int lexcmp( pair( T *, T * ) * a, pair( T *, T * ) * b, int (* cmp)( T *, T * ) ) {
     455        return cmp( a->first, b->first ) ? : cmp( a->second, b->second );
     456}
     457\end{lstlisting}
     458Since @pair(T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@, so the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type-checked to ensure the fields of both pairs and the arguments to the comparison function match in type.
     459
     460Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \emph{tag-structures}.
     461Sometimes information is only used for type-checking and can be omitted at runtime, \eg:
    439462\begin{lstlisting}
    440463forall(dtype Unit) struct scalar { unsigned long value; };
    441 
    442464struct metres {};
    443465struct litres {};
    444466
    445 forall(dtype U)
    446 scalar(U) ?+?(scalar(U) a, scalar(U) b) {
     467forall(dtype U) scalar(U) ?+?( scalar(U) a, scalar(U) b ) {
    447468        return (scalar(U)){ a.value + b.value };
    448469}
    449 
    450470scalar(metres) half_marathon = { 21093 };
    451471scalar(litres) swimming_pool = { 2500000 };
    452 
    453472scalar(metres) marathon = half_marathon + half_marathon;
    454473scalar(litres) two_pools = swimming_pool + swimming_pool;
    455 marathon + swimming_pool; // ERROR -- caught by compiler
    456 \end{lstlisting}
    457 @scalar@ is a dtype-static type, so all uses of it use a single struct definition, containing only a single @unsigned long@, and can share the same implementations of common routines like @?+?@ -- these implementations may even be separately compiled, unlike \CC template functions. However, the \CFA type-checker ensures that matching types are used by all calls to @?+?@, preventing nonsensical computations like adding the length of a marathon to the volume of an olympic pool.
     474marathon + swimming_pool;                                       $\C{// compilation ERROR}$
     475\end{lstlisting}
     476@scalar@ is a dtype-static type, so all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@.
     477These implementations may even be separately compiled, unlike \CC template functions.
     478However, the \CFA type-checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume.
     479
    458480
    459481\section{Tuples}
    460482\label{sec:tuples}
    461483
    462 The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA, with a design particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}.
    463 
    464 In standard C, functions can return at most one value. This restriction results in code that emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. In the former situation, the function designer creates a record type that combines all of the return values into a single type. Unfortunately, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the routine body to emulate a return. Using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. This responsibility complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler.
    465 
    466 C does provide a mechanism for variadic functions through manipulation of @va_list@ objects, but it is notoriously type-unsafe. A variadic function is one that contains at least one parameter, followed by @...@ as the last token in the parameter list. In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types, commonly a format string or counter parameter. It is important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly. This required repetition is error prone, because it is easy for the user to add or remove arguments without updating the argument descriptor. In addition, C requires the programmer to hard code all of the possible expected types. As a result, it is cumbersome to write a variadic function that is open to extension. For example, consider a simple function that sums $N$ @int@s:
    467 \begin{lstlisting}
    468 int sum(int N, ...) {
    469   va_list args;
    470   va_start(args, N);  // must manually specify last non-variadic argument
    471   int ret = 0;
    472   while(N) {
    473     ret += va_arg(args, int);  // must specify type
    474     N--;
    475   }
    476   va_end(args);
    477   return ret;
    478 }
    479 
    480 sum(3, 10, 20, 30);  // must keep initial counter argument in sync
    481 \end{lstlisting}
    482 
    483 The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined~\citep{C11}. Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body. Since they rely on programmer convention rather than compile-time checks, variadic functions are inherently unsafe.
    484 
    485 In practice, compilers can provide warnings to help mitigate some of the problems. For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers. Unfortunately, this attribute does not permit extensions to the format string syntax, so a programmer cannot extend it to warn for mismatches with custom types.
     484In many languages, functions can return at most one value;
     485however, many operations have multiple outcomes, some exceptional.
     486Consider C's @div@ and @remquo@ functions, which return the quotient and remainder for a division of integer and floating-point values, respectively.
     487\begin{lstlisting}
     488typedef struct { int quo, rem; } div_t;         $\C{// from include stdlib.h}$
     489div_t div( int num, int den );
     490double remquo( double num, double den, int * quo );
     491div_t qr = div( 13, 5 );                                        $\C{// return quotient/remainder aggregate}$
     492int q;
     493double r = remquo( 13.5, 5.2, &q );                     $\C{// return remainder, alias quotient}$
     494\end{lstlisting}
     495@div@ aggregates the quotient/remainder in a structure, while @remquo@ aliases a parameter to an argument.
     496Both approaches are awkward.
     497Alternatively, a programming language can directly support returning multiple values, \eg in \CFA:
     498\begin{lstlisting}
     499[ int, int ] div( int num, int den );           $\C{// return two integers}$
     500[ double, double ] div( double num, double den ); $\C{// return two doubles}$
     501int q, r;                                                                       $\C{// overloaded variable names}$
     502double q, r;
     503[ q, r ] = div( 13, 5 );                                        $\C{// select appropriate div and q, r}$
     504[ q, r ] = div( 13.5, 5.2 );                            $\C{// assign into tuple}$
     505\end{lstlisting}
     506Clearly, this approach is straightforward to understand and use;
     507therefore, why do few programming languages support this obvious feature or provide it awkwardly?
     508The answer is that there are complex consequences that cascade through multiple aspects of the language, especially the type-system.
     509This section show these consequences and how \CFA handles them.
     510
    486511
    487512\subsection{Tuple Expressions}
    488513
    489 The tuple extensions in \CFA can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}.
    490 
    491 \CFA allows declaration of \emph{tuple variables}, variables of tuple type. For example:
    492 \begin{lstlisting}
    493 [int, char] most_frequent(const char*);
    494 
    495 const char* str = "hello, world!";
    496 [int, char] freq = most_frequent(str);
    497 printf("%s -- %d %c\n", str, freq);
    498 \end{lstlisting}
    499 In this example, the type of the @freq@ and the return type of @most_frequent@ are both tuple types. Also of note is how the tuple expression @freq@ is implicitly flattened into separate @int@ and @char@ arguments to @printf@; this code snippet could have been shortened by replacing the last two lines with @printf("%s -- %d %c\n", str, most_frequent(str));@ using exactly the same mechanism.
    500 
    501 In addition to variables of tuple type, it is also possible to have pointers to tuples, and arrays of tuples. Tuple types can be composed of any types, except for array types, since arrays are not of fixed size, which makes tuple assignment difficult when a tuple contains an array.
    502 \begin{lstlisting}
    503 [double, int] di;
    504 [double, int] * pdi
    505 [double, int] adi[10];
    506 \end{lstlisting}
    507 This example declares a variable of type @[double, int]@, a variable of type pointer to @[double, int]@, and an array of ten @[double, int]@.
     514The addition of multiple-return-value functions (MRVF) are useless without a syntax for accepting multiple values at the call-site.
     515The simplest mechanism for capturing the return values is variable assignment, allowing the values to be retrieved directly.
     516As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions (as above), called a \emph{tuple}.
     517
     518However, functions also use \emph{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVF), \eg:
     519\begin{lstlisting}
     520printf( "%d %d\n", div( 13, 5 ) );                      $\C{// return values seperated into arguments}$
     521\end{lstlisting}
     522Here, the values returned by @div@ are composed with the call to @printf@ by flattening the tuple into separate arguments.
     523However, the \CFA type-system must support significantly more complex composition:
     524\begin{lstlisting}
     525[ int, int ] foo$\(_1\)$( int );
     526[ double ] foo$\(_2\)$( int );
     527void bar( int, double, double );
     528bar( foo( 3 ), foo( 3 ) );
     529\end{lstlisting}
     530The type-resolver only has the tuple return-types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list.
     531No combination of @foo@s are an exact match with @bar@'s parameters, so the resolver applies C conversions.
     532The minimal cost is @bar( foo@$_1$@( 3 ), foo@$_2$@( 3 ) )@, giving (@int@, {\color{ForestGreen}@int@}, @double@) to (@int@, {\color{ForestGreen}@double@}, @double@) with one {\color{ForestGreen}safe} (widening) conversion from @int@ to @double@ versus ({\color{red}@double@}, {\color{ForestGreen}@int@}, {\color{ForestGreen}@int@}) to ({\color{red}@int@}, {\color{ForestGreen}@double@}, {\color{ForestGreen}@double@}) with one {\color{red}unsafe} (narrowing) conversion from @double@ to @int@ and two safe conversions.
     533
     534
     535\subsection{Tuple Variables}
     536
     537An important observation from function composition is that new variable names are not required to initialize parameters from an MRVF.
     538\CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types, \eg:
     539\begin{lstlisting}
     540[ int, int ] qr = div( 13, 5 );                         $\C{// tuple-variable declaration and initialization}$
     541[ double, double ] qr = div( 13.5, 5.2 );
     542\end{lstlisting}
     543where the tuple variable-name serves the same purpose as the parameter name(s).
     544Tuple variables can be composed of any types, except for array types, since array sizes are generally unknown.
     545
     546One way to access the tuple-variable components is with assignment or composition:
     547\begin{lstlisting}
     548[ q, r ] = qr;                                                          $\C{// access tuple-variable components}$
     549printf( "%d %d\n", qr );
     550\end{lstlisting}
     551\CFA also supports \emph{tuple indexing} to access single components of a tuple expression:
     552\begin{lstlisting}
     553[int, int] * p = &qr;                                           $\C{// tuple pointer}$
     554int rem = qr.1;                                                         $\C{// access remainder}$
     555int quo = div( 13, 5 ).0;                                       $\C{// access quotient}$
     556p->0 = 5;                                                                       $\C{// change quotient}$
     557bar( qr.1, qr );                                                        $\C{// pass remainder and quotient/remainder}$
     558rem = [42, div( 13, 5 )].0.1;                           $\C{// access 2nd component of 1st component of tuple expression}$
     559\end{lstlisting}
     560
    508561
    509562\subsection{Flattening and Restructuring}
    510563
    511 In function call contexts, tuples support implicit flattening and restructuring conversions. Tuple flattening recursively expands a tuple into the list of its basic components. Tuple structuring packages a list of expressions into a value of tuple type.
    512 \begin{lstlisting}
    513 int f(int, int);
    514 int g([int, int]);
    515 int h(int, [int, int]);
     564In function call contexts, tuples support implicit flattening and restructuring conversions.
     565Tuple flattening recursively expands a tuple into the list of its basic components.
     566Tuple structuring packages a list of expressions into a value of tuple type, \eg:
     567%\lstDeleteShortInline@%
     568%\par\smallskip
     569%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
     570\begin{lstlisting}
     571int f( int, int );
     572int g( [int, int] );
     573int h( int, [int, int] );
    516574[int, int] x;
    517575int y;
    518 
    519 f(x);      // flatten
    520 g(y, 10);  // structure
    521 h(x, y);   // flatten & structure
    522 \end{lstlisting}
    523 In \CFA, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure.
    524 
    525 % In {K-W C} \citep{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components.
    526 
    527 In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in
    528 \begin{lstlisting}
    529 int f(int, [double, int]);
    530 f([5, 10.2], 4);
    531 \end{lstlisting}
    532 There is only a single definition of @f@, and 3 arguments with only single interpretations. First, the argument alternative list @[5, 10.2], 4@ is flattened to produce the argument list @5, 10.2, 4@. Next, the parameter matching algorithm begins, with $P =~$@int@ and $A =~$@int@, which unifies exactly. Moving to the next parameter and argument, $P =~$@[double, int]@ and $A =~$@double@. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =~$@double@ and $A =~$@double@, which unifies exactly. Then $P' =~$@int@ and $A =~$@double@, which again unifies exactly. At this point, the end of $P'$ has been reached, so the arguments @10.2, 4@ are structured into the tuple expression @[10.2, 4]@. Finally, the end of the parameter list $P$ has also been reached, so the final expression is @f(5, [10.2, 4])@.
     576f( x );                 $\C{// flatten}$
     577g( y, 10 );             $\C{// structure}$
     578h( x, y );              $\C{// flatten and structure}$
     579\end{lstlisting}
     580%\end{lstlisting}
     581%&
     582%\begin{lstlisting}
     583%\end{tabular}
     584%\smallskip\par\noindent
     585%\lstMakeShortInline@%
     586In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as the two arguments.
     587In the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the parameter type of @g@.
     588Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@.
     589The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVF and MRVF, and with any number of arguments of arbitrarily complex structure.
     590
     591
     592\subsection{Tuple Assignment}
     593
     594An assignment where the left side is a tuple type is called \emph{tuple assignment}.
     595There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple} and \emph{mass assignment}, respectively.
     596%\lstDeleteShortInline@%
     597%\par\smallskip
     598%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
     599\begin{lstlisting}
     600int x = 10;
     601double y = 3.5;
     602[int, double] z;
     603z = [x, y];                                                                     $\C{// multiple assignment}$
     604[x, y] = z;                                                                     $\C{// multiple assignment}$
     605z = 10;                                                                         $\C{// mass assignment}$
     606[y, x] = 3.14;                                                          $\C{// mass assignment}$
     607\end{lstlisting}
     608%\end{lstlisting}
     609%&
     610%\begin{lstlisting}
     611%\end{tabular}
     612%\smallskip\par\noindent
     613%\lstMakeShortInline@%
     614Both kinds of tuple assignment have parallel semantics, so that each value on the left and right side is evaluated before any assignments occur.
     615As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function, \eg, @[x, y] = [y, x]@.
     616This semantics means mass assignment differs from C cascading assignment (\eg @a = b = c@) in that conversions are applied in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment.
     617For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@;
     618whereas C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@.
     619Finally, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C.
     620This example shows mass, multiple, and cascading assignment used in one expression:
     621\begin{lstlisting}
     622void f( [int, int] );
     623f( [x, y] = z = 1.5 );                                          $\C{// assignments in parameter list}$
     624\end{lstlisting}
     625
    533626
    534627\subsection{Member Access}
    535628
    536 At times, it is desirable to access a single component of a tuple-valued expression without creating unnecessary temporary variables to assign to. Given a tuple-valued expression @e@ and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in @e@, @e.i@ accesses the $i$\textsuperscript{th} component of @e@. For example,
    537 \begin{lstlisting}
    538 [int, double] x;
    539 [char *, int] f();
    540 void g(double, int);
    541 [int, double] * p;
    542 
    543 int y = x.0;  // access int component of x
    544 y = f().1;  // access int component of f
    545 p->0 = 5;  // access int component of tuple pointed-to by p
    546 g(x.1, x.0);  // rearrange x to pass to g
    547 double z = [x, f()].0.1;  // access second component of first component of tuple expression
    548 \end{lstlisting}
    549 As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, but never implemented~\citep[p.~45]{Till89}.
    550 
    551 It is possible to access multiple fields from a single expression using a \emph{member-access tuple expression}. The result is a single tuple expression whose type is the tuple of the types of the members. For example,
     629It is also possible to access multiple fields from a single expression using a \emph{member-access}.
     630The result is a single tuple-valued expression whose type is the tuple of the types of the members, \eg:
    552631\begin{lstlisting}
    553632struct S { int x; double y; char * z; } s;
    554 s.[x, y, z];
    555 \end{lstlisting}
    556 Here, the type of @s.[x, y, z]@ is @[int, double, char *]@. A member tuple expression has the form @a.[x, y, z];@ where @a@ is an expression with type @T@, where @T@ supports member access expressions, and @x, y, z@ are all members of @T@ with types @T$_x$@, @T$_y$@, and @T$_z$@ respectively. Then the type of @a.[x, y, z]@ is @[T$_x$, T$_y$, T$_z$]@.
    557 
    558 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange components, drop components, duplicate components, etc.):
     633s.[x, y, z] = 0;
     634\end{lstlisting}
     635Here, the mass assignment sets all members of @s@ to zero.
     636Since tuple-index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange, drop, and duplicate components).
     637%\lstDeleteShortInline@%
     638%\par\smallskip
     639%\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}}
    559640\begin{lstlisting}
    560641[int, int, long, double] x;
    561 void f(double, long);
    562 
    563 f(x.[0, 3]);          // f(x.0, x.3)
    564 x.[0, 1] = x.[1, 0];  // [x.0, x.1] = [x.1, x.0]
    565 [long, int, long] y = x.[2, 0, 2];
    566 \end{lstlisting}
    567 
    568 It is possible for a member tuple expression to contain other member access expressions:
     642void f( double, long );
     643x.[0, 1] = x.[1, 0];                                            $\C{// rearrange: [x.0, x.1] = [x.1, x.0]}$
     644f( x.[0, 3] );                                                          $\C{// drop: f(x.0, x.3)}$
     645[int, int, int] y = x.[2, 0, 2];                        $\C{// duplicate: [y.0, y.1, y.2] = [x.2, x.0.x.2]}$
     646\end{lstlisting}
     647%\end{lstlisting}
     648%&
     649%\begin{lstlisting}
     650%\end{tabular}
     651%\smallskip\par\noindent
     652%\lstMakeShortInline@%
     653It is also possible for a member access to contain other member accesses, \eg:
    569654\begin{lstlisting}
    570655struct A { double i; int j; };
    571656struct B { int * k; short l; };
    572657struct C { int x; A y; B z; } v;
    573 v.[x, y.[i, j], z.k];
    574 \end{lstlisting}
    575 This expression is equivalent to @[v.x, [v.y.i, v.y.j], v.z.k]@. That is, the aggregate expression is effectively distributed across the tuple, which allows simple and easy access to multiple components in an aggregate, without repetition. It is guaranteed that the aggregate expression to the left of the @.@ in a member tuple expression is evaluated exactly once. As such, it is safe to use member tuple expressions on the result of a side-effecting function.
    576 
    577 \subsection{Tuple Assignment}
    578 
    579 In addition to tuple-index expressions, individual components of tuples can be accessed by a \emph{destructuring assignment} which has a tuple expression with lvalue components on its left-hand side. More generally, an assignment where the left-hand side of the assignment operator has a tuple type is called \emph{tuple assignment}. There are two kinds of tuple assignment depending on whether the right-hand side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple assignment} and \emph{mass assignment}, respectively.
    580 \begin{lstlisting}
    581 int x;
    582 double y;
    583 [int, double] z;
    584 [y, x] = 3.14;  // mass assignment
    585 [x, y] = z;     // multiple assignment
    586 z = 10;         // mass assignment
    587 z = [x, y];     // multiple assignment
    588 \end{lstlisting}
    589 Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left-hand side, $R_i$ represent each component of the flattened right-hand side of a multiple assignment, and $R$ represent the right-hand side of a mass assignment.
    590 
    591 For a multiple assignment to be valid, both tuples must have the same number of elements when flattened. Multiple assignment assigns $R_i$ to $L_i$ for each $i$.
    592 That is, @?=?(&$L_i$, $R_i$)@ must be a well-typed expression. In the previous example, @[x, y] = z@, @z@ is flattened into @z.0, z.1@, and the assignments @x = z.0@ and @y = z.1@ are executed.
    593 
    594 A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This rule differs from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well.
    595 
    596 Both kinds of tuple assignment have parallel semantics, such that each value on the left side and right side is evaluated \emph{before} any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function:
    597 \begin{lstlisting}
    598 int x = 10, y = 20;
    599 [x, y] = [y, x];
    600 \end{lstlisting}
    601 After executing this code, @x@ has the value @20@ and @y@ has the value @10@.
    602 
    603 Tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. This definition allows cascading tuple assignment and use of tuple assignment in other expression contexts, an occasionally useful idiom to keep code succinct and reduce repetition.
    604 % In \CFA, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C}~\citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision was made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA that is not an expression.
    605 
     658v.[x, y.[i, j], z.k];                                           $\C{// [v.x, [v.y.i, v.y.j], v.z.k]}$
     659\end{lstlisting}
     660
     661
     662\begin{comment}
    606663\subsection{Casting}
    607664
    608 In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function:
     665In C, the cast operator is used to explicitly convert between types.
     666In \CFA, the cast operator has a secondary use as type ascription.
     667That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function:
    609668\begin{lstlisting}
    610669int f();     // (1)
     
    615674\end{lstlisting}
    616675
    617 Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples:
     676Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples.
     677Taking a look at standard C provides some guidance with respect to the way casts should work with tuples:
    618678\begin{lstlisting}
    619679int f();
     
    623683(int)g();  // (2)
    624684\end{lstlisting}
    625 In C, (1) is a valid cast, which calls @f@ and discards its result. On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid.
    626 
    627 Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. This approach follows naturally from the way that a cast to @void@ works in C.
     685In C, (1) is a valid cast, which calls @f@ and discards its result.
     686On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical.
     687Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid.
     688
     689Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$.
     690Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression.
     691This approach follows naturally from the way that a cast to @void@ works in C.
    628692
    629693For example, in
    630694\begin{lstlisting}
    631   [int, int, int] f();
    632   [int, [int, int], int] g();
    633 
    634   ([int, double])f();           $\C{// (1)}$
    635   ([int, int, int])g();         $\C{// (2)}$
    636   ([void, [int, int]])g();      $\C{// (3)}$
    637   ([int, int, int, int])g();    $\C{// (4)}$
    638   ([int, [int, int, int]])g();  $\C{// (5)}$
    639 \end{lstlisting}
    640 
    641 (1) discards the last element of the return value and converts the second element to @double@. Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@.
     695[int, int, int] f();
     696[int, [int, int], int] g();
     697
     698([int, double])f();           $\C{// (1)}$
     699([int, int, int])g();         $\C{// (2)}$
     700([void, [int, int]])g();      $\C{// (3)}$
     701([int, int, int, int])g();    $\C{// (4)}$
     702([int, [int, int, int]])g();  $\C{// (5)}$
     703\end{lstlisting}
     704
     705(1) discards the last element of the return value and converts the second element to @double@.
     706Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@.
     707If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@.
    642708Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@).
    643709
    644 Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@.
     710Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}.
     711As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3.
     712Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid.
     713That is, it is invalid to cast @[int, int]@ to @[int, int, int]@.
     714\end{comment}
     715
    645716
    646717\subsection{Polymorphism}
    647718
    648 Tuples also integrate with \CFA polymorphism as a special sort of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types.
    649 \begin{lstlisting}
    650 forall(otype T, dtype U)
    651 void f(T x, U * y);
    652 
    653 f([5, "hello"]);
    654 \end{lstlisting}
    655 In this example, @[5, "hello"]@ is flattened, so that the argument list appears as @5, "hello"@. The argument matching algorithm binds @T@ to @int@ and @U@ to @const char*@, and calls the function as normal.
    656 
    657 Tuples, however, may contain polymorphic components. For example, a plus operator can be written to add two triples of a type together.
    658 \begin{lstlisting}
    659 forall(otype T | { T ?+?(T, T); })
    660 [T, T, T] ?+?([T, T, T] x, [T, T, T] y) {
    661   return [x.0+y.0, x.1+y.1, x.2+y.2];
     719Tuples also integrate with \CFA polymorphism as a kind of generic type.
     720Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types, \eg:
     721\begin{lstlisting}
     722forall(otype T, dtype U) void f( T x, U * y );
     723f( [5, "hello"] );
     724\end{lstlisting}
     725where @[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@.
     726Tuples, however, may contain polymorphic components.
     727For example, a plus operator can be written to add two triples together.
     728\begin{lstlisting}
     729forall(otype T | { T ?+?( T, T ); }) [T, T, T] ?+?( [T, T, T] x, [T, T, T] y ) {
     730        return [x.0 + y.0, x.1 + y.1, x.2 + y.2];
    662731}
    663732[int, int, int] x;
     
    666735\end{lstlisting}
    667736
    668 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. Previously in \CFA, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie no implicit conversions are applied to assertion arguments). In the example below:
    669 \begin{lstlisting}
    670 int f([int, double], double);
    671 forall(otype T, otype U | { T f(T, U, U); })
    672 void g(T, U);
    673 g(5, 10.21);
    674 \end{lstlisting}
    675 If assertion arguments must match exactly, then the call to @g@ cannot be resolved, since the expected type of @f@ is flat, while the only @f@ in scope requires a tuple type. Since tuples are fluid, this requirement reduces the usability of tuples in polymorphic code. To ease this pain point, function parameter and return lists are flattened for the purposes of type unification, which allows the previous example to pass expression resolution.
    676 
    677 This relaxation is made possible by extending the existing thunk generation scheme, as described by \citet{Bilson03}. Now, whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function:
    678 \begin{lstlisting}
    679 int _thunk(int _p0, double _p1, double _p2) {
    680   return f([_p0, _p1], _p2);
    681 }
    682 \end{lstlisting}
    683 Essentially, this thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism. These thunks take advantage of GCC C nested functions to produce closures that have the usual function pointer signature.
     737Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions.
     738\begin{lstlisting}
     739int f( [int, double], double );
     740forall(otype T, otype U | { T f( T, U, U ); }) void g( T, U );
     741g( 5, 10.21 );
     742\end{lstlisting}
     743Hence, function parameter and return lists are flattened for the purposes of type unification allowing the example to pass expression resolution.
     744This relaxation is possible by extending the thunk scheme described by \citet{Bilson03}.
     745Whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function:
     746\begin{lstlisting}
     747int _thunk( int _p0, double _p1, double _p2 ) { return f( [_p0, _p1], _p2 ); }
     748\end{lstlisting}
     749so the thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism.
     750These thunks take advantage of GCC C nested-functions to produce closures that have the usual function pointer signature.
     751
    684752
    685753\subsection{Variadic Tuples}
    686 
    687 To define variadic functions, \CFA adds a new kind of type parameter, @ttype@. Matching against a @ttype@ (``tuple type'') parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule. This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. As such, @ttype@ variables are also referred to as \emph{argument} or \emph{parameter packs} in this paper.
    688 
    689 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion. Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.
    690 
    691 For example, the C @sum@ function at the beginning of Section~\ref{sec:tuples} could be written using @ttype@ as:
    692 \begin{lstlisting}
    693 int sum(){ return 0; }        // (0)
    694 forall(ttype Params | { int sum(Params); })
    695 int sum(int x, Params rest) { // (1)
    696   return x+sum(rest);
    697 }
    698 sum(10, 20, 30);
    699 \end{lstlisting}
    700 Since (0) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.
    701 In order to call (1), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.
    702 In order to finish the resolution of @sum@, an assertion parameter that matches @int sum(int, int)@ is required.
    703 Like in the previous iteration, (0) is not a valid candidate, so (1) is examined with @Params@ bound to @[int]@, requiring the assertion @int sum(int)@.
    704 Next, (0) fails, and to satisfy (1) @Params@ is bound to @[]@, requiring an assertion @int sum()@.
    705 Finally, (0) matches and (1) fails, which terminates the recursion.
    706 Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@.
    707 
    708 As a point of note, this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details. It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments:
    709 \begin{lstlisting}
    710 int sum(int x, int y){
    711   return x+y;
    712 }
    713 forall(ttype Params | { int sum(int, Params); })
    714 int sum(int x, int y, Params rest) {
    715   return sum(x+y, rest);
    716 }
    717 \end{lstlisting}
    718 
    719 One more iteration permits the summation of any summable type, as long as all arguments are the same type:
     754\label{sec:variadic-tuples}
     755
     756To define variadic functions, \CFA adds a new kind of type parameter, @ttype@ (tuple type).
     757Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types.
     758In a given parameter list, there must be at most one @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates.
     759As such, @ttype@ variables are also called \emph{argument packs}.
     760
     761Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is via recursion.
     762Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful.
     763Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.
     764For example, a generalized @sum@ function written using @ttype@:
     765\begin{lstlisting}
     766int sum$\(_0\)$() { return 0; }
     767forall(ttype Params | { int sum( Params ); } ) int sum$\(_1\)$( int x, Params rest ) {
     768        return x + sum( rest );
     769}
     770sum( 10, 20, 30 );
     771\end{lstlisting}
     772Since @sum@\(_0\) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.
     773In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.
     774The process continues, @Params@ is bound to @[]@, requiring an assertion @int sum()@, which matches @sum@\(_0\) and terminates the recursion.
     775Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10 + sum(20, 30)@ $\rightarrow$ @10 + (20 + sum(30))@ $\rightarrow$ @10 + (20 + (30 + sum()))@ $\rightarrow$ @10 + (20 + (30 + 0))@.
     776
     777It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments:
     778\begin{lstlisting}
     779int sum( int x, int y ) { return x + y; }
     780forall(ttype Params | { int sum( int, Params ); } ) int sum( int x, int y, Params rest ) {
     781        return sum( x + y, rest );
     782}
     783\end{lstlisting}
     784One more step permits the summation of any summable type with all arguments of the same type:
    720785\begin{lstlisting}
    721786trait summable(otype T) {
    722   T ?+?(T, T);
     787        T ?+?( T, T );
    723788};
    724 forall(otype R | summable(R))
    725 R sum(R x, R y){
    726   return x+y;
    727 }
    728 forall(otype R, ttype Params
    729   | summable(R)
    730   | { R sum(R, Params); })
    731 R sum(R x, R y, Params rest) {
    732   return sum(x+y, rest);
    733 }
    734 \end{lstlisting}
    735 Unlike C, it is not necessary to hard code the expected type. This code is naturally open to extension, in that any user-defined type with a @?+?@ operator is automatically able to be used with the @sum@ function. That is to say, the programmer who writes @sum@ does not need full program knowledge of every possible data type, unlike what is necessary to write an equivalent function using the standard C mechanisms. Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators.
    736 
    737 It is also possible to write a type-safe variadic print routine which can replace @printf@:
     789forall(otype R | summable( R ) ) R sum( R x, R y ) {
     790        return x + y;
     791}
     792forall(otype R, ttype Params | summable(R) | { R sum(R, Params); } ) R sum(R x, R y, Params rest) {
     793        return sum( x + y, rest );
     794}
     795\end{lstlisting}
     796Unlike C variadic functions, it is unnecessary to hard code the number and expected types.
     797Furthermore, this code is extendable so any user-defined type with a @?+?@ operator.
     798Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators.
     799
     800It is also possible to write a type-safe variadic print function to replace @printf@:
    738801\begin{lstlisting}
    739802struct S { int x, y; };
    740 forall(otype T, ttype Params |
    741   { void print(T); void print(Params); })
    742 void print(T arg, Params rest) {
    743   print(arg);
    744   print(rest);
    745 }
    746 void print(char * x) { printf("%s", x); }
    747 void print(int x) { printf("%d", x);  }
    748 void print(S s) { print("{ ", s.x, ",", s.y, " }"); }
    749 
    750 print("s = ", (S){ 1, 2 }, "\n");
    751 \end{lstlisting}
    752 This example routine showcases a variadic-template-like decomposition of the provided argument list. The individual @print@ routines allow printing a single element of a type. The polymorphic @print@ allows printing any list of types, as long as each individual type has a @print@ function. The individual print functions can be used to build up more complicated @print@ routines, such as for @S@, which is something that cannot be done with @printf@ in C.
    753 
    754 It is also possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. For example, it is possible to write @new@ as a library function:
    755 \begin{lstlisting}
    756 struct Pair(otype R, otype S);
    757 forall(otype R, otype S)
    758 void ?{}(Pair(R, S) *, R, S);  // (1)
    759 
    760 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })
    761 T * new(Params p) {
    762   return ((T*)malloc( sizeof(T) )){ p }; // construct into result of malloc
    763 }
    764 
    765 Pair(int, char) * x = new(42, '!');
    766 \end{lstlisting}
    767 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects. This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference.
    768 
    769 In the call to @new@, @Pair(double, char)@ is selected to match @T@, and @Params@ is expanded to match @[double, char]@. The constructor (1) may be specialized to  satisfy the assertion for a constructor with an interface compatible with @void ?{}(Pair(int, char) *, int, char)@.
    770 
    771 \TODO{Check if we actually can use ttype parameters on generic types (if they set the complete flag, it should work, or nearly so).}
     803forall(otype T, ttype Params | { void print(T); void print(Params); }) void print(T arg, Params rest) {
     804        print(arg);  print(rest);
     805}
     806void print( char * x ) { printf( "%s", x ); }
     807void print( int x ) { printf( "%d", x ); }
     808void print( S s ) { print( "{ ", s.x, ",", s.y, " }" ); }
     809print( "s = ", (S){ 1, 2 }, "\n" );
     810\end{lstlisting}
     811This example showcases a variadic-template-like decomposition of the provided argument list.
     812The individual @print@ functions allow printing a single element of a type.
     813The polymorphic @print@ allows printing any list of types, where as each individual type has a @print@ function.
     814The individual print functions can be used to build up more complicated @print@ functions, such as @S@, which cannot be done with @printf@ in C.
     815
     816Finally, it is possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions.
     817For example, it is possible to write @new@ as a library function:
     818\begin{lstlisting}
     819forall( otype R, otype S ) void ?{}( pair(R, S) *, R, S );
     820forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } ) T * new( Params p ) {
     821        return ((T *)malloc()){ p };                    $\C{// construct into result of malloc}$
     822}
     823pair( int, char ) * x = new( 42, '!' );
     824\end{lstlisting}
     825The @new@ function provides the combination of type-safe @malloc@ with a \CFA constructor call, making it impossible to forget constructing dynamically allocated objects.
     826This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference.
     827
    772828
    773829\subsection{Implementation}
    774830
    775 Tuples are implemented in the \CFA translator via a transformation into generic types. For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated. For example:
     831Tuples are implemented in the \CFA translator via a transformation into generic types.
     832For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated, \eg:
    776833\begin{lstlisting}
    777834[int, int] f() {
    778   [double, double] x;
    779   [int, double, int] y;
    780 }
    781 \end{lstlisting}
    782 Is transformed into:
    783 \begin{lstlisting}
    784 forall(dtype T0, dtype T1 | sized(T0) | sized(T1))
    785 struct _tuple2 {  // generated before the first 2-tuple
    786   T0 field_0;
    787   T1 field_1;
     835        [double, double] x;
     836        [int, double, int] y;
     837}
     838\end{lstlisting}
     839is transformed into:
     840\begin{lstlisting}
     841forall(dtype T0, dtype T1 | sized(T0) | sized(T1)) struct _tuple2 {
     842        T0 field_0;                                                             $\C{// generated before the first 2-tuple}$
     843        T1 field_1;
    788844};
    789845_tuple2(int, int) f() {
    790   _tuple2(double, double) x;
    791   forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2))
    792   struct _tuple3 {  // generated before the first 3-tuple
    793     T0 field_0;
    794     T1 field_1;
    795     T2 field_2;
    796   };
    797   _tuple3_(int, double, int) y;
    798 }
    799 \end{lstlisting}
    800 
    801 Tuple expressions are then simply converted directly into compound literals:
    802 \begin{lstlisting}
    803 [5, 'x', 1.24];
    804 \end{lstlisting}
    805 Becomes:
    806 \begin{lstlisting}
    807 (_tuple3(int, char, double)){ 5, 'x', 1.24 };
    808 \end{lstlisting}
    809 
     846        _tuple2(double, double) x;
     847        forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2)) struct _tuple3 {
     848                T0 field_0;                                                     $\C{// generated before the first 3-tuple}$
     849                T1 field_1;
     850                T2 field_2;
     851        };
     852        _tuple3(int, double, int) y;
     853}
     854\end{lstlisting}
     855Tuple expressions are then simply converted directly into compound literals, \eg @[5, 'x', 1.24]@ becomes @(_tuple3(int, char, double)){ 5, 'x', 1.24 }@.
     856
     857\begin{comment}
    810858Since tuples are essentially structures, tuple indexing expressions are just field accesses:
    811859\begin{lstlisting}
     
    826874f(x.field_0, (_tuple2){ x.field_1, 'z' });
    827875\end{lstlisting}
    828 Note that due to flattening, @x@ used in the argument position is converted into the list of its fields. In the call to @f@, the second and third argument components are structured into a tuple argument. Similarly, tuple member expressions are recursively expanded into a list of member access expressions.
    829 
    830 Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion. Each unique expression is assigned an identifier and is guaranteed to be executed exactly once:
     876Note that due to flattening, @x@ used in the argument position is converted into the list of its fields.
     877In the call to @f@, the second and third argument components are structured into a tuple argument.
     878Similarly, tuple member expressions are recursively expanded into a list of member access expressions.
     879
     880Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion.
     881Each unique expression is assigned an identifier and is guaranteed to be executed exactly once:
    831882\begin{lstlisting}
    832883void g(int, double);
     
    842893[int, double] _unq0;
    843894g(
    844   (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,
    845   (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,
     895        (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,
     896        (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,
    846897);
    847898\end{lstlisting}
    848 Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. Tuple member expressions also take advantage of unique expressions in the case of possible impurity.
    849 
    850 Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.
    851 
    852 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new.
     899Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order.
     900The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true.
     901Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression.
     902Tuple member expressions also take advantage of unique expressions in the case of possible impurity.
     903
     904Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure.
     905This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.
     906
     907The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions.
     908A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression.
     909The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language.
     910However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new.
     911\end{comment}
     912
    853913
    854914\section{Evaluation}
    855 
    856 \TODO{Magnus suggests we need some graphs, it's kind of a done thing that the reviewers will be looking for. Also, we've made some unsubstantiated claims about the runtime performance of \CFA, which some micro-benchmarks could help with. I'm thinking a simple stack push and pop, with an idiomatic \lstinline@void*@, \CFA, \CC template and \CC virtual inheritance versions (the void* and virtual inheritance versions likely need to be linked lists, or clumsy in their API -- possibly both versions) to test generics, and variadic print to test tuples. We measure SLOC, runtime performance, executable size (making sure to include benchmarks for multiple types in the executable), and possibly manually count the number of places where the programmer must provide un-type-checked type information. Appendices don't count against our page limit, so we might want to include the source code for the benchmarks (or at least the relevant implementation details) in one.}
     915\label{sec:eval}
     916
     917Though \CFA provides significant added functionality over C, these features have a low runtime penalty.
     918In fact, \CFA's features for generic programming can enable faster runtime execution than idiomatic @void *@-based C code.
     919This claim is demonstrated through a set of generic-code-based micro-benchmarks in C, \CFA, and \CC (see stack implementations in Appendix~\ref{sec:BenchmarkStackImplementation}).
     920Since all these languages share a subset essentially comprising standard C, maximal-performance benchmarks would show little runtime variance, other than in length and clarity of source code.
     921A more illustrative benchmark measures the costs of idiomatic usage of each language's features.
     922Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked-list, a generic pair-data-structure, and a variadic @print@ routine similar to that in Section~\ref{sec:variadic-tuples}.
     923The benchmark test is similar for C and \CC.
     924The experiment uses element types @int@ and @pair(_Bool, char)@, and pushes $N=40M$ elements on a generic stack, copies the stack, clears one of the stacks, finds the maximum value in the other stack, and prints $N/2$ (to reduce graph height) constants.
     925
     926\begin{figure}
     927\begin{lstlisting}[xleftmargin=3\parindentlnth,aboveskip=0pt,belowskip=0pt]
     928int main( int argc, char * argv[] ) {
     929        FILE * out = fopen( "cfa-out.txt", "w" );
     930        int maxi = 0, vali = 42;
     931        stack(int) si, ti;
     932
     933        REPEAT_TIMED( "push_int", N, push( &si, vali ); )
     934        TIMED( "copy_int", ti = si; )
     935        TIMED( "clear_int", clear( &si ); )
     936        REPEAT_TIMED( "pop_int", N,
     937                int xi = pop( &ti ); if ( xi > maxi ) { maxi = xi; } )
     938        REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); )
     939
     940        pair(_Bool, char) maxp = { (_Bool)0, '\0' }, valp = { (_Bool)1, 'a' };
     941        stack(pair(_Bool, char)) sp, tp;
     942
     943        REPEAT_TIMED( "push_pair", N, push( &sp, valp ); )
     944        TIMED( "copy_pair", tp = sp; )
     945        TIMED( "clear_pair", clear( &sp ); )
     946        REPEAT_TIMED( "pop_pair", N,
     947                pair(_Bool, char) xp = pop( &tp ); if ( xp > maxp ) { maxp = xp; } )
     948        REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); )
     949        fclose(out);
     950}
     951\end{lstlisting}
     952\caption{\CFA Benchmark Test}
     953\label{fig:BenchmarkTest}
     954\end{figure}
     955
     956The structure of each benchmark implemented is: C with @void *@-based polymorphism, \CFA with the presented features, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV.
     957The \CCV variant illustrates an alternative object-oriented idiom where all objects inherit from a base @object@ class, mimicking a Java-like interface;
     958hence runtime checks are necessary to safely down-cast objects.
     959The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, while C and \CCV lack such a capability and instead must store generic objects via pointers to separately-allocated objects.
     960For the print benchmark, idiomatic printing is used: the C and \CFA variants used @stdio.h@, while the \CC and \CCV variants used @iostream@; preliminary tests show this distinction has negligible runtime impact.
     961Note, the C benchmark uses unchecked casts as there is no runtime mechanism to perform such checks, while \CFA and \CC provide type-safety statically.
     962
     963Figure~\ref{fig:eval} and Table~\ref{tab:eval} show the results of running the benchmark in Figure~\ref{fig:BenchmarkTest} and its C, \CC, and \CCV equivalents.
     964The graph plots the median of 5 consecutive runs of each program, with an initial warm-up run omitted.
     965All code is compiled at \texttt{-O2} by GCC or G++ 6.2.0, with all \CC code compiled as \CCfourteen.
     966The benchmarks are run on an Ubuntu 16.04 workstation with 16 GB of RAM and a 6-core AMD FX-6300 CPU with 3.5 GHz maximum clock frequency.
     967
     968\begin{figure}
     969\centering
     970\input{timing}
     971\caption{Benchmark Timing Results (smaller is better)}
     972\label{fig:eval}
     973\end{figure}
     974
     975\begin{table}
     976\caption{Properties of benchmark code}
     977\label{tab:eval}
     978\newcommand{\CT}[1]{\multicolumn{1}{c}{#1}}
     979\begin{tabular}{rrrrr}
     980                                                                        & \CT{C}        & \CT{\CFA}     & \CT{\CC}      & \CT{\CCV}             \\ \hline
     981maximum memory usage (MB)                       & 10001         & 2502          & 2503          & 11253                 \\
     982source code size (lines)                        & 247           & 222           & 165           & 339                   \\
     983redundant type annotations (lines)      & 39            & 2                     & 2                     & 15                    \\
     984binary size (KB)                                        & 14            & 229           & 18            & 38                    \\
     985\end{tabular}
     986\end{table}
     987
     988The C and \CCV variants are generally the slowest with the largest memory footprint, because of their less-efficient memory layout and the pointer-indirection necessary to implement generic types;
     989this inefficiency is exacerbated by the second level of generic types in the pair-based benchmarks.
     990By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair of @_Bool@ and @char@ because the storage layout is equivalent, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead.
     991\CCV is slower than C largely due to the cost of runtime type-checking of down-casts (implemented with @dynamic_cast@);
     992There are two outliers in the graph for \CFA: all prints and pop of @pair@.
     993Both of these cases result from the complexity of the C-generated polymorphic code, so that the GCC compiler is unable to optimize some dead code and condense nested calls.
     994A compiler designed for \CFA could easily perform these optimizations.
     995Finally, the binary size for \CFA is larger because of static linking with the \CFA libraries.
     996
     997\CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, though it should be noted that \CFA and \CC have pre-written data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 73 and 54 lines, respectively.
     998On the other hand, C does not have a generic collections-library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers.
     999\CCV does not use the \CC standard template library by construction, and in fact includes the definition of @object@ and wrapper classes for @bool@, @char@, @int@, and @const char *@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library;
     1000with their omission the \CCV line count is similar to C.
     1001We justify the given line count by noting that many object-oriented languages do not allow implementing new interfaces on library types without subclassing or wrapper types, which may be similarly verbose.
     1002
     1003Raw line-count, however, is a fairly rough measure of code complexity;
     1004another important factor is how much type information the programmer must manually specify, especially where that information is not checked by the compiler.
     1005Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs, and is much less common in \CFA than C, with its manually specified function pointers arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts (\eg @object@ to @integer@ when popping a stack, or @object@ to @printable@ when printing the elements of a @pair@).
     1006To quantify this, the ``redundant type annotations'' line in Table~\ref{tab:eval} counts the number of lines on which the type of a known variable is re-specified, either as a format specifier, explicit downcast, type-specific function, or by name in a @sizeof@, struct literal, or @new@ expression.
     1007The \CC benchmark uses two redundant type annotations to create a new stack nodes, while the C and \CCV benchmarks have several such annotations spread throughout their code.
     1008The two instances in which the \CFA benchmark still uses redundant type specifiers are to cast the result of a polymorphic @malloc@ call (the @sizeof@ argument is inferred by the compiler).
     1009These uses are similar to the @new@ expressions in \CC, though the \CFA compiler's type resolver should shortly render even these type casts superfluous.
     1010
    8571011
    8581012\section{Related Work}
    8591013
    860 \CC is the existing language it is most natural to compare \CFA to, as they are both more modern extensions to C with backwards source compatibility. The most fundamental difference in approach between \CC and \CFA is their approach to this C compatibility. \CC does provide fairly strong source backwards compatibility with C, but is a dramatically more complex language than C, and imposes a steep learning curve to use many of its extension features. For instance, in a break from general C practice, template code is typically written in header files, with a variety of subtle restrictions implied on its use by this choice, while the other polymorphism mechanism made available by \CC, class inheritance, requires programmers to learn an entirely new object-oriented programming paradigm; the interaction between templates and inheritance is also quite complex. \CFA, by contrast, has a single facility for polymorphic code, one which supports separate compilation and the existing procedural paradigm of C code. A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA. One of the major limiting factors of \CC's approach is that templates cannot be separately compiled, and, until concepts~\citep{C++Concepts} are standardized (currently anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors for template expansion failures. By contrast, the explicit nature of assertions in \CFA allows polymorphic functions to be separately compiled, and for their requirements to be checked by the compiler; similarly, \CFA generic types may be opaque, unlike \CC template classes.
    861 
    862 Cyclone also provides capabilities for polymorphic functions and existential types~\citep{Grossman06}, similar in concept to \CFA's @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted in that they may only abstract over types with the same layout and calling convention as @void*@, in practice only pointer types and @int@ - in \CFA terms, all Cyclone polymorphism must be dtype-static. This design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, but is more restrictive than \CFA's more general model.
    863 
    864 Apple's Objective-C \citep{obj-c-book} is another industrially successful set of extensions to C. The Objective-C language model is a fairly radical departure from C, adding object-orientation and message-passing. Objective-C implements variadic functions using the C @va_arg@ mechanism, and did not support type-checked generics until recently \citep{xcode7}, historically using less-efficient and more error-prone runtime checking of object types instead. The GObject framework \citep{GObject} also adds object-orientation with runtime type-checking and reference-counting garbage-collection to C; these are much more intrusive feature additions than those provided by \CFA, in addition to the runtime overhead of reference-counting. The Vala programming language \citep{Vala} compiles to GObject-based C, and so adds the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases. Java \citep{Java8} has had generic types and variadic functions since Java~5; Java's generic types are type-checked at compilation and type-erased at runtime, similar to \CFA's, though in Java each object carries its own table of method pointers, while \CFA passes the method pointers separately so as to maintain a C-compatible struct layout. Java variadic functions are simply syntactic sugar for an array of a single type, and therefore less useful than \CFA's heterogeneously-typed variadic functions. Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens.
    865 
    866 D \citep{D}, Go \citep{Go}, and Rust \citep{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust. However, each language represents dramatic departures from C in terms of language model, and none has the same level of compatibility with C as \CFA. D and Go are garbage-collected languages, imposing the associated runtime overhead. The necessity of accounting for data transfer between the managed Go runtime and the unmanaged C runtime complicates foreign-function interface between Go and C. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime that is more easily interoperable with C. Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow-checker, while it does provide strong safety guarantees, is complex and difficult to learn, and imposes a distinctly idiomatic programming style on Rust. \CFA, with its more modest safety features, is significantly easier to port C code to, while maintaining the idiomatic style of the original source.
    867 
    868 \section{Conclusion \& Future Work}
    869 
    870 There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, and concurrent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA polymorphic functions, by contrast, use an approach that is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the translator to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unnecessary.
    871 
    872 In conclusion, the authors' design for generic types and tuples, unlike those available in existing work, is both reusable and type-checked, while still supporting a full range of C features, including separately-compiled modules. We have experimentally validated the performance of our design against both \CC and standard C, showing it is \TODO{shiny, cap'n}.
     1014
     1015\subsection{Polymorphism}
     1016
     1017\CC is the most similar language to \CFA;
     1018both are extensions to C with source and runtime backwards compatibility.
     1019The fundamental difference is in their engineering approach to C compatibility and programmer expectation.
     1020While \CC provides good backwards compatibility with C, it has a steep learning curve for many of its extensions.
     1021For example, polymorphism is provided via three disjoint mechanisms: overloading, inheritance, and templates.
     1022The overloading is restricted because resolution does not using the return type, inheritance requires learning object-oriented programming and coping with a restricted nominal-inheritance hierarchy, templates cannot be separately compiled resulting in compilation/code bloat and poor error messages, and determining how these mechanisms interact and which to use is confusing.
     1023In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate-compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm.
     1024The key mechanism to support separate compilation is \CFA's \emph{explicit} use of assumed properties for a type.
     1025Until \CC~\citet{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors during template expansion;
     1026furthermore, \CC concepts are restricted to template polymorphism.
     1027
     1028Cyclone~\citep{Grossman06} also provides capabilities for polymorphic functions and existential types, similar to \CFA's @forall@ functions and generic types.
     1029Cyclone existential types can include function pointers in a construct similar to a virtual function-table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process.
     1030Furthermore, Cyclone's polymorphic functions and types are restricted to abstraction over types with the same layout and calling convention as @void *@, \ie only pointer types and @int@.
     1031In \CFA terms, all Cyclone polymorphism must be dtype-static.
     1032While the Cyclone design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, it is more restrictive than \CFA's general model.
     1033\citet{Smith98} present Polymorphic C, an ML dialect with polymorphic functions and C-like syntax and pointer types; it lacks many of C's features, however, most notably structure types, and so is not a practical C replacement.
     1034
     1035\citet{obj-c-book} is an industrially successful extension to C.
     1036However, Objective-C is a radical departure from C, using an object-oriented model with message-passing.
     1037Objective-C did not support type-checked generics until recently \citet{xcode7}, historically using less-efficient runtime checking of object types.
     1038The~\citet{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage-collection to C;
     1039these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference-counting.
     1040\citet{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases.
     1041Java~\citep{Java8} included generic types in Java~5, which are type-checked at compilation and type-erased at runtime, similar to \CFA's.
     1042However, in Java, each object carries its own table of method pointers, while \CFA passes the method pointers separately to maintain a C-compatible layout.
     1043Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens.
     1044
     1045D~\citep{D}, Go, and~\citet{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust.
     1046However, each language represents a significant departure from C in terms of language model, and none has the same level of compatibility with C as \CFA.
     1047D and Go are garbage-collected languages, imposing the associated runtime overhead.
     1048The necessity of accounting for data transfer between managed runtimes and the unmanaged C runtime complicates foreign-function interfaces to C.
     1049Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more.
     1050D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime more interoperable with C.
     1051Rust also possesses much more powerful abstraction capabilities for writing generic code than Go.
     1052On the other hand, Rust's borrow-checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style.
     1053\CFA, with its more modest safety features, allows direct ports of C code while maintaining the idiomatic style of the original source.
     1054
     1055
     1056\subsection{Tuples/Variadics}
     1057
     1058Many programming languages have some form of tuple construct and/or variadic functions, \eg SETL, C, KW-C, \CC, D, Go, Java, ML, and Scala.
     1059SETL~\cite{SETL} is a high-level mathematical programming language, with tuples being one of the primary data types.
     1060Tuples in SETL allow subscripting, dynamic expansion, and multiple assignment.
     1061C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types, so the mechanism is type unsafe.
     1062KW-C~\cite{Buhr94a}, a predecessor of \CFA, introduced tuples to C as an extension of the C syntax, taking much of its inspiration from SETL.
     1063The main contributions of that work were adding MRVF, tuple mass and multiple assignment, and record-field access.
     1064\CCeleven introduced @std::tuple@ as a library variadic template structure.
     1065Tuples are a generalization of @std::pair@, in that they allow for arbitrary length, fixed-size aggregation of heterogeneous values.
     1066Operations include @std::get<N>@ to extract vales, @std::tie@ to create a tuple of references used for assignment, and lexicographic comparisons.
     1067\CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate pre-declaring variables and use of @std::tie@ for binding the results.
     1068This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must be documented with some other mechanism.
     1069Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables.
     1070Like \CC, D provides tuples through a library variadic-template structure.
     1071Go does not have tuples but supports MRVF.
     1072Java's variadic functions appear similar to C's but are type-safe using homogeneous arrays, which are less useful than \CFA's heterogeneously-typed variadic functions.
     1073Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML~\cite{sml} and~\cite{Scala}, which decompose tuples using pattern matching.
     1074
     1075
     1076\section{Conclusion and Future Work}
     1077
     1078The goal of \CFA is to provide an evolutionary pathway for large C development-environments to be more productive and safer, while respecting the talent and skill of C programmers.
     1079While other programming languages purport to be a better C, they are in fact new and interesting languages in their own right, but not C extensions.
     1080The purpose of this paper is to introduce \CFA, and showcase two language features that illustrate the \CFA type-system and approaches taken to achieve the goal of evolutionary C extension.
     1081The contributions are a powerful type-system using parametric polymorphism and overloading, generic types, and tuples, which all have complex interactions.
     1082The work is a challenging design, engineering, and implementation exercise.
     1083On the surface, the project may appear as a rehash of similar mechanisms in \CC.
     1084However, every \CFA feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supporting separate compilation.
     1085All of these new features are being used by the \CFA development-team to build the \CFA runtime-system.
     1086Finally, we demonstrate that \CFA performance for some idiomatic cases is better than C and close to \CC, showing the design is practically applicable.
     1087
     1088There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, concurrent primitives and modules.
     1089(While all examples in the paper compile and run, a public beta-release of \CFA will take another 8--12 months to finalize these additional extensions.)
     1090In addition, there are interesting future directions for the polymorphism design.
     1091Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions.
     1092\CFA polymorphic functions use dynamic virtual-dispatch;
     1093the runtime overhead of this approach is low, but not as low as inlining, and it may be beneficial to provide a mechanism for performance-sensitive code.
     1094Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template-specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function-definitions to instantiate a specialized version for some set of types.
     1095These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code-bloat.
     1096In general, we believe separate compilation, producing smaller code, works well with loaded hardware-caches, which may offset the benefit of larger inlined-code.
     1097
    8731098
    8741099\begin{acks}
    875 The authors would like to thank Magnus Madsen for valuable editorial feedback.
    876 
    877 This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}\ and the first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship.
     1100The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, and Thierry Delisle on the features described in this paper, and thank Magnus Madsen and the three anonymous reviewers for valuable feedback.
     1101This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}, and Aaron Moss and Peter Buhr are funded by the \grantsponsor{Natural Sciences and Engineering Research Council} of Canada.
     1102% the first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship.
    8781103\end{acks}
     1104
    8791105
    8801106\bibliographystyle{ACM-Reference-Format}
    8811107\bibliography{cfa}
     1108
     1109
     1110\appendix
     1111
     1112\section{Benchmark Stack Implementation}
     1113\label{sec:BenchmarkStackImplementation}
     1114
     1115\lstset{basicstyle=\linespread{0.9}\sf\small}
     1116
     1117Throughout, @/***/@ designates a counted redundant type annotation.
     1118
     1119\medskip\noindent
     1120\CFA
     1121\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
     1122forall(otype T) struct stack_node {
     1123        T value;
     1124        stack_node(T) * next;
     1125};
     1126forall(otype T) void ?{}(stack(T) * s) { (&s->head){ 0 }; }
     1127forall(otype T) void ?{}(stack(T) * s, stack(T) t) {
     1128        stack_node(T) ** crnt = &s->head;
     1129        for ( stack_node(T) * next = t.head; next; next = next->next ) {
     1130                *crnt = ((stack_node(T) *)malloc()){ next->value }; /***/
     1131                stack_node(T) * acrnt = *crnt;
     1132                crnt = &acrnt->next;
     1133        }
     1134        *crnt = 0;
     1135}
     1136forall(otype T) stack(T) ?=?(stack(T) * s, stack(T) t) {
     1137        if ( s->head == t.head ) return *s;
     1138        clear(s);
     1139        s{ t };
     1140        return *s;
     1141}
     1142forall(otype T) void ^?{}(stack(T) * s) { clear(s); }
     1143forall(otype T) _Bool empty(const stack(T) * s) { return s->head == 0; }
     1144forall(otype T) void push(stack(T) * s, T value) {
     1145        s->head = ((stack_node(T) *)malloc()){ value, s->head }; /***/
     1146}
     1147forall(otype T) T pop(stack(T) * s) {
     1148        stack_node(T) * n = s->head;
     1149        s->head = n->next;
     1150        T x = n->value;
     1151        ^n{};
     1152        free(n);
     1153        return x;
     1154}
     1155forall(otype T) void clear(stack(T) * s) {
     1156        for ( stack_node(T) * next = s->head; next; ) {
     1157                stack_node(T) * crnt = next;
     1158                next = crnt->next;
     1159                delete(crnt);
     1160        }
     1161        s->head = 0;
     1162}
     1163\end{lstlisting}
     1164
     1165\medskip\noindent
     1166\CC
     1167\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
     1168template<typename T> class stack {
     1169        struct node {
     1170                T value;
     1171                node * next;
     1172                node( const T & v, node * n = nullptr ) : value(v), next(n) {}
     1173        };
     1174        node * head;
     1175        void copy(const stack<T>& o) {
     1176                node ** crnt = &head;
     1177                for ( node * next = o.head;; next; next = next->next ) {
     1178                        *crnt = new node{ next->value }; /***/
     1179                        crnt = &(*crnt)->next;
     1180                }
     1181                *crnt = nullptr;
     1182        }
     1183  public:
     1184        stack() : head(nullptr) {}
     1185        stack(const stack<T>& o) { copy(o); }
     1186        stack(stack<T> && o) : head(o.head) { o.head = nullptr; }
     1187        ~stack() { clear(); }
     1188        stack & operator= (const stack<T>& o) {
     1189                if ( this == &o ) return *this;
     1190                clear();
     1191                copy(o);
     1192                return *this;
     1193        }
     1194        stack & operator= (stack<T> && o) {
     1195                if ( this == &o ) return *this;
     1196                head = o.head;
     1197                o.head = nullptr;
     1198                return *this;
     1199        }
     1200        bool empty() const { return head == nullptr; }
     1201        void push(const T & value) { head = new node{ value, head };  /***/ }
     1202        T pop() {
     1203                node * n = head;
     1204                head = n->next;
     1205                T x = std::move(n->value);
     1206                delete n;
     1207                return x;
     1208        }
     1209        void clear() {
     1210                for ( node * next = head; next; ) {
     1211                        node * crnt = next;
     1212                        next = crnt->next;
     1213                        delete crnt;
     1214                }
     1215                head = nullptr;
     1216        }
     1217};
     1218\end{lstlisting}
     1219
     1220\medskip\noindent
     1221C
     1222\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
     1223struct stack_node {
     1224        void * value;
     1225        struct stack_node * next;
     1226};
     1227struct stack new_stack() { return (struct stack){ NULL }; /***/ }
     1228void copy_stack(struct stack * s, const struct stack * t, void * (*copy)(const void *)) {
     1229        struct stack_node ** crnt = &s->head;
     1230        for ( struct stack_node * next = t->head; next; next = next->next ) {
     1231                *crnt = malloc(sizeof(struct stack_node)); /***/
     1232                **crnt = (struct stack_node){ copy(next->value) }; /***/
     1233                crnt = &(*crnt)->next;
     1234        }
     1235        *crnt = 0;
     1236}
     1237_Bool stack_empty(const struct stack * s) { return s->head == NULL; }
     1238void push_stack(struct stack * s, void * value) {
     1239        struct stack_node * n = malloc(sizeof(struct stack_node)); /***/
     1240        *n = (struct stack_node){ value, s->head }; /***/
     1241        s->head = n;
     1242}
     1243void * pop_stack(struct stack * s) {
     1244        struct stack_node * n = s->head;
     1245        s->head = n->next;
     1246        void * x = n->value;
     1247        free(n);
     1248        return x;
     1249}
     1250void clear_stack(struct stack * s, void (*free_el)(void *)) {
     1251        for ( struct stack_node * next = s->head; next; ) {
     1252                struct stack_node * crnt = next;
     1253                next = crnt->next;
     1254                free_el(crnt->value);
     1255                free(crnt);
     1256        }
     1257        s->head = NULL;
     1258}
     1259\end{lstlisting}
     1260
     1261\medskip\noindent
     1262\CCV
     1263\begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt]
     1264stack::node::node( const object & v, node * n ) : value( v.new_copy() ), next( n ) {}
     1265void stack::copy(const stack & o) {
     1266        node ** crnt = &head;
     1267        for ( node * next = o.head; next; next = next->next ) {
     1268                *crnt = new node{ *next->value };
     1269                crnt = &(*crnt)->next;
     1270        }
     1271        *crnt = nullptr;
     1272}
     1273stack::stack() : head(nullptr) {}
     1274stack::stack(const stack & o) { copy(o); }
     1275stack::stack(stack && o) : head(o.head) { o.head = nullptr; }
     1276stack::~stack() { clear(); }
     1277stack & stack::operator= (const stack & o) {
     1278        if ( this == &o ) return *this;
     1279        clear();
     1280        copy(o);
     1281        return *this;
     1282}
     1283stack & stack::operator= (stack && o) {
     1284        if ( this == &o ) return *this;
     1285        head = o.head;
     1286        o.head = nullptr;
     1287        return *this;
     1288}
     1289bool stack::empty() const { return head == nullptr; }
     1290void stack::push(const object & value) { head = new node{ value, head }; /***/ }
     1291ptr<object> stack::pop() {
     1292        node * n = head;
     1293        head = n->next;
     1294        ptr<object> x = std::move(n->value);
     1295        delete n;
     1296        return x;
     1297}
     1298void stack::clear() {
     1299        for ( node * next = head; next; ) {
     1300                node * crnt = next;
     1301                next = crnt->next;
     1302                delete crnt;
     1303        }
     1304        head = nullptr;
     1305}
     1306\end{lstlisting}
     1307
     1308
     1309\begin{comment}
     1310
     1311\subsubsection{bench.h}
     1312(\texttt{bench.hpp} is similar.)
     1313
     1314\lstinputlisting{evaluation/bench.h}
     1315
     1316\subsection{C}
     1317
     1318\subsubsection{c-stack.h} ~
     1319
     1320\lstinputlisting{evaluation/c-stack.h}
     1321
     1322\subsubsection{c-stack.c} ~
     1323
     1324\lstinputlisting{evaluation/c-stack.c}
     1325
     1326\subsubsection{c-pair.h} ~
     1327
     1328\lstinputlisting{evaluation/c-pair.h}
     1329
     1330\subsubsection{c-pair.c} ~
     1331
     1332\lstinputlisting{evaluation/c-pair.c}
     1333
     1334\subsubsection{c-print.h} ~
     1335
     1336\lstinputlisting{evaluation/c-print.h}
     1337
     1338\subsubsection{c-print.c} ~
     1339
     1340\lstinputlisting{evaluation/c-print.c}
     1341
     1342\subsubsection{c-bench.c} ~
     1343
     1344\lstinputlisting{evaluation/c-bench.c}
     1345
     1346\subsection{\CFA}
     1347
     1348\subsubsection{cfa-stack.h} ~
     1349
     1350\lstinputlisting{evaluation/cfa-stack.h}
     1351
     1352\subsubsection{cfa-stack.c} ~
     1353
     1354\lstinputlisting{evaluation/cfa-stack.c}
     1355
     1356\subsubsection{cfa-print.h} ~
     1357
     1358\lstinputlisting{evaluation/cfa-print.h}
     1359
     1360\subsubsection{cfa-print.c} ~
     1361
     1362\lstinputlisting{evaluation/cfa-print.c}
     1363
     1364\subsubsection{cfa-bench.c} ~
     1365
     1366\lstinputlisting{evaluation/cfa-bench.c}
     1367
     1368\subsection{\CC}
     1369
     1370\subsubsection{cpp-stack.hpp} ~
     1371
     1372\lstinputlisting[language=c++]{evaluation/cpp-stack.hpp}
     1373
     1374\subsubsection{cpp-print.hpp} ~
     1375
     1376\lstinputlisting[language=c++]{evaluation/cpp-print.hpp}
     1377
     1378\subsubsection{cpp-bench.cpp} ~
     1379
     1380\lstinputlisting[language=c++]{evaluation/cpp-bench.cpp}
     1381
     1382\subsection{\CCV}
     1383
     1384\subsubsection{object.hpp} ~
     1385
     1386\lstinputlisting[language=c++]{evaluation/object.hpp}
     1387
     1388\subsubsection{cpp-vstack.hpp} ~
     1389
     1390\lstinputlisting[language=c++]{evaluation/cpp-vstack.hpp}
     1391
     1392\subsubsection{cpp-vstack.cpp} ~
     1393
     1394\lstinputlisting[language=c++]{evaluation/cpp-vstack.cpp}
     1395
     1396\subsubsection{cpp-vprint.hpp} ~
     1397
     1398\lstinputlisting[language=c++]{evaluation/cpp-vprint.hpp}
     1399
     1400\subsubsection{cpp-vbench.cpp} ~
     1401
     1402\lstinputlisting[language=c++]{evaluation/cpp-vbench.cpp}
     1403\end{comment}
    8821404
    8831405\end{document}
  • doc/proposals/concurrency/concurrency.tex

    r221c2de7 r154fdc8  
    6161\newcommand{\uC}{$\mu$\CC}
    6262\newcommand{\cit}{\textsuperscript{[Citation Needed]}\xspace}
    63 \newcommand{\code}[1]{\lstinline{#1}}
     63\newcommand{\code}[1]{\lstinline[language=CFA]{#1}}
    6464\newcommand{\pseudo}[1]{\lstinline[language=Pseudo]{#1}}
    6565
     
    160160Here, the constructor(\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual exclusion when constructing. This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} key word depending on whether or not reading an \code{size_t} is an atomic operation or not.
    161161
    162 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without wualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. In the end, which solution should be picked is still up for debate. For the reminder of this proposal, the explicit approach is used for clarity.
     162Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without quualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. In the end, which solution should be picked is still up for debate. For the reminder of this proposal, the explicit approach is used for clarity.
    163163
    164164The next semantic decision is to establish when mutex/nomutex may be used as a type qualifier. Consider the following declarations:
     
    350350
    351351\subsection{Internal scheduling} \label{insched}
    352 Monitors also need to schedule waiting threads internally as a mean of synchronization. Internal scheduling is one of the simple examples of such a feature. It allows users to declare condition variables and have threads wait and signaled from them. Here is a simple example of such a technique :
    353 
    354 \begin{lstlisting}
    355         mutex struct A {
    356                 condition e;
    357         }
    358 
    359         void foo(A & mutex a) {
    360                 //...
    361                 wait(a.e);
    362                 //...
    363         }
    364 
    365         void bar(A & mutex a) {
    366                 signal(a.e);
    367         }
    368 \end{lstlisting}
    369 
    370 Note that in \CFA, \code{condition} have no particular need to be stored inside a monitor, beyond any software engineering reasons. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. This semantic can easily be extended to multi-monitor calls by offering the same guarantee.
     352
    371353\begin{center}
    372354\begin{tabular}{ c @{\hskip 0.65in} c }
    373 Thread 1 & Thread 2 \\
    374 \begin{lstlisting}
    375 void foo(monitor & mutex a,
    376            monitor & mutex b) {
    377         //...
    378         wait(a.e);
    379         //...
    380 }
    381 
    382 foo(a, b);
    383 \end{lstlisting} &\begin{lstlisting}
    384 void bar(monitor & mutex a,
    385            monitor & mutex b) {
    386         signal(a.e);
    387 }
    388 
    389 
    390 
    391 bar(a, b);
     355\begin{lstlisting}[language=Pseudo]
     356acquire A
     357        wait A
     358release A
     359\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     360acquire A
     361        signal A
     362release A
    392363\end{lstlisting}
    393364\end{tabular}
    394365\end{center}
    395 A direct extension of the single monitor semantics is to release all locks when waiting and transferring ownership of all locks when signalling. However, for the purpose of synchronization it may be usefull to only release some of the locks but keep others. It is possible to support internal scheduling and \gls{group-acquire} without any extra syntax by relying on order of acquisition. Here is an example of the different contexts in which internal scheduling can be used. (Note that here the use of helper routines is irrelevant, only routines acquire mutual exclusion have an impact on internal scheduling):
     366
     367Easy : like uC++
    396368
    397369\begin{center}
    398 \begin{tabular}{|c|c|c|}
    399 Context 1 & Context 2 & Context 3 \\
    400 \hline
    401 \begin{lstlisting}
    402 condition e;
    403 
    404 //acquire a & b
    405 void foo(monitor & mutex a,
    406            monitor & mutex b) {
    407 
    408         wait(e); //release a & b
    409 }
    410 
    411 
    412 
    413 
    414 
    415 
    416 foo(a,b);
    417 \end{lstlisting} &\begin{lstlisting}
    418 condition e;
    419 
    420 //acquire a
    421 void bar(monitor & mutex a,
    422            monitor & nomutex b) {
    423         foo(a,b);
    424 }
    425 
    426 //acquire a & b
    427 void foo(monitor & mutex a,
    428            monitor & mutex b) {
    429         wait(e);  //release a & b
    430 }
    431 
    432 bar(a, b);
    433 \end{lstlisting} &\begin{lstlisting}
    434 condition e;
    435 
    436 //acquire a
    437 void bar(monitor & mutex a,
    438            monitor & nomutex b) {
    439         baz(a,b);
    440 }
    441 
    442 //acquire b
    443 void baz(monitor & nomutex a,
    444            monitor & mutex b) {
    445         wait(e);  //release b
    446 }
    447 
    448 bar(a, b);
     370\begin{tabular}{ c @{\hskip 0.65in} c }
     371\begin{lstlisting}[language=Pseudo]
     372acquire A
     373        acquire B
     374                wait B
     375        release B
     376release A
     377\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     378acquire A
     379        acquire B
     380                signal B
     381        release B
     382release A
    449383\end{lstlisting}
    450384\end{tabular}
    451385\end{center}
    452386
    453 Context 1 is the simplest way of acquiring more than one monitor (\gls{group-acquire}), using a routine with multiple parameters having the \code{mutex} keyword. Context 2 also uses \gls{group-acquire} as well in routine \code{foo}. However, the routine is called by routine \code{bar}, which only acquires monitor \code{a}. Since monitors can be acquired multiple times this does not cause a deadlock by itself but it does force the acquiring order to \code{a} then \code{b}. Context 3 also forces the acquiring order to be \code{a} then \code{b} but does not use \gls{group-acquire}. The previous example tries to illustrate the semantics that must be established to support releasing monitors in a \code{wait} statement. In all cases, the behavior of the wait statment is to release all the locks that were acquired my the inner-most monitor call. That is \code{a & b} in context 1 and 2 and \code{b} only in context 3. Here are a few other examples of this behavior.
    454 
     387Also easy : like uC++
    455388
    456389\begin{center}
    457 \begin{tabular}{|c|c|c|}
    458 \begin{lstlisting}
    459 condition e;
    460 
    461 //acquire b
    462 void foo(monitor & nomutex a,
    463            monitor & mutex b) {
    464         bar(a,b);
    465 }
    466 
    467 //acquire a
    468 void bar(monitor & mutex a,
    469            monitor & nomutex b) {
    470 
    471         wait(e); //release a
    472                   //keep b
    473 }
    474 
    475 foo(a, b);
    476 \end{lstlisting} &\begin{lstlisting}
    477 condition e;
    478 
    479 //acquire a & b
    480 void foo(monitor & mutex a,
    481            monitor & mutex b) {
    482         bar(a,b);
    483 }
    484 
    485 //acquire b
    486 void bar(monitor & mutex a,
    487            monitor & nomutex b) {
    488 
    489         wait(e); //release b
    490                   //keep a
    491 }
    492 
    493 foo(a, b);
    494 \end{lstlisting} &\begin{lstlisting}
    495 condition e;
    496 
    497 //acquire a & b
    498 void foo(monitor & mutex a,
    499            monitor & mutex b) {
    500         bar(a,b);
    501 }
    502 
    503 //acquire none
    504 void bar(monitor & nomutex a,
    505            monitor & nomutex b) {
    506 
    507         wait(e); //release a & b
    508                   //keep none
    509 }
    510 
    511 foo(a, b);
     390\begin{tabular}{ c @{\hskip 0.65in} c }
     391\begin{lstlisting}[language=Pseudo]
     392acquire A & B
     393        wait A & B
     394release A & B
     395\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     396acquire A & B
     397        signal A & B
     398release A & B
    512399\end{lstlisting}
    513400\end{tabular}
    514401\end{center}
    515 Note the right-most example is actually a trick pulled on the reader. Monitor state information is stored in thread local storage rather then in the routine context, which means that helper routines and other \code{nomutex} routines are invisible to the runtime system in regards to concurrency. This means that in the right-most example, the routine parameters are completly unnecessary. However, calling this routine from outside a valid monitor context is undefined.
    516 
    517 These semantics imply that in order to release of subset of the monitors currently held, users must write (and name) a routine that only acquires the desired subset and simply calls wait. While users can use this method, \CFA offers the \code{wait_release}\footnote{Not sure if an overload of \code{wait} would work...} which will release only the specified monitors. In the center previous examples, the code in the center uses the \code{bar} routine to only release monitor \code{b}. Using the \code{wait_release} helper, this can be rewritten without having the name two routines :
     402
     403Simplest extension : can be made like uC++ by tying B to A
     404
    518405\begin{center}
    519 \begin{tabular}{ c c c }
    520 \begin{lstlisting}
    521         condition e;
    522 
    523         //acquire a & b
    524         void foo(monitor & mutex a,
    525                    monitor & mutex b) {
    526                 bar(a,b);
    527         }
    528 
    529         //acquire b
    530         void bar(monitor & mutex a,
    531                    monitor & nomutex b) {
    532 
    533                 wait(e); //release b
    534                           //keep a
    535         }
    536 
    537         foo(a, b);
    538 \end{lstlisting} &\begin{lstlisting}
    539         =>
    540 \end{lstlisting} &\begin{lstlisting}
    541         condition e;
    542 
    543         //acquire a & b
    544         void foo(monitor & mutex a,
    545                    monitor & mutex b) {
    546                 wait_release(e,b); //release b
    547                                          //keep a
    548         }
    549 
    550         foo(a, b);
     406\begin{tabular}{ c @{\hskip 0.65in} c }
     407\begin{lstlisting}[language=Pseudo]
     408acquire A
     409        // Code Section 1
     410        acquire B
     411                // Code Section 2
     412                wait A & B
     413                // Code Section 3
     414        release B
     415        // Code Section 4
     416release A
     417\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     418acquire A
     419        // Code Section 5
     420        acquire B
     421                // Code Section 6
     422                signal A & B
     423                // Code Section 7
     424        release B
     425        // Code Section 8
     426release A
    551427\end{lstlisting}
    552428\end{tabular}
    553429\end{center}
    554430
    555 Regardless of the context in which the \code{wait} statement is used, \code{signal} must be called holding the same set of monitors. In all cases, signal only needs a single parameter, the condition variable that needs to be signalled. But \code{signal} needs to be called from the same monitor(s) that call to \code{wait}. Otherwise, mutual exclusion cannot be properly transferred back to the waiting monitor.
    556 
    557 Finally, an additional semantic which can be very usefull is the \code{signal_block} routine. This routine behaves like signal for all of the semantics discussed above, but with the subtelty that mutual exclusion is transferred to the waiting task immediately rather than wating for the end of the critical section.
    558 \\
     431Hard extension :
     432
     433Incorrect options for the signal :
     434
     435\begin{description}
     436 \item[-] Release B and baton pass after Code Section 8 : Passing b without having it
     437 \item[-] Keep B during Code Section 8 : Can lead to deadlocks since we secretly keep a lock longer than specified by the user
     438 \item[-] Instead of release B transfer A and B to waiter then try to reacquire A before running Code Section 8 : This allows barging
     439\end{description}
     440
     441Since we don't want barging we need to pass A \& B and somehow block and get A back.
     442
     443\begin{center}
     444\begin{tabular}{ c @{\hskip 0.65in} c }
     445\begin{lstlisting}[language=Pseudo]
     446acquire A
     447        acquire B
     448                acquire C
     449                        wait A & B & C
     450                1: release C
     451        2: release B
     4523: release A
     453\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     454acquire A
     455        acquire B
     456                acquire C
     457                        signal A & B & C
     458                4: release C
     459        5: release B
     4606: release A
     461\end{lstlisting}
     462\end{tabular}
     463\end{center}
     464
     465To prevent barging :
     466
     467\begin{description}
     468 \item[-] When the signaller hits 4 : pass A, B, C to waiter
     469 \item[-] When the waiter hits 2 : pass A, B to signaller
     470 \item[-] When the signaller hits 5 : pass A to waiter
     471\end{description}
     472
     473
     474\begin{center}
     475\begin{tabular}{ c @{\hskip 0.65in} c }
     476\begin{lstlisting}[language=Pseudo]
     477acquire A
     478        acquire C
     479                acquire B
     480                        wait A & B & C
     481                1: release B
     482        2: release C
     4833: release A
     484\end{lstlisting}&\begin{lstlisting}[language=Pseudo]
     485acquire B
     486        acquire A
     487                acquire C
     488                        signal A & B & C
     489                4: release C
     490        5: release A
     4916: release B
     492\end{lstlisting}
     493\end{tabular}
     494\end{center}
     495
     496To prevent barging : When the signaller hits 4 : pass A, B, C to waiter. When the waiter hits 1 it must release B,
     497
     498\begin{description}
     499 \item[-]
     500 \item[-] When the waiter hits 1 : pass A, B to signaller
     501 \item[-] When the signaller hits 5 : pass A, B to waiter
     502 \item[-] When the waiter hits 2 : pass A to signaller
     503\end{description}
     504
     505% Monitors also need to schedule waiting threads internally as a mean of synchronization. Internal scheduling is one of the simple examples of such a feature. It allows users to declare condition variables and have threads wait and signaled from them. Here is a simple example of such a technique :
     506
     507% \begin{lstlisting}
     508%       mutex struct A {
     509%               condition e;
     510%       }
     511
     512%       void foo(A & mutex a) {
     513%               //...
     514%               wait(a.e);
     515%               //...
     516%       }
     517
     518%       void bar(A & mutex a) {
     519%               signal(a.e);
     520%       }
     521% \end{lstlisting}
     522
     523% Note that in \CFA, \code{condition} have no particular need to be stored inside a monitor, beyond any software engineering reasons. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering.
     524
     525% As for simple mutual exclusion, these semantics must also be extended to include \gls{group-acquire} :
     526% \begin{center}
     527% \begin{tabular}{ c @{\hskip 0.65in} c }
     528% Thread 1 & Thread 2 \\
     529% \begin{lstlisting}
     530% void foo(A & mutex a,
     531%            A & mutex b) {
     532%       //...
     533%       wait(a.e);
     534%       //...
     535% }
     536
     537% foo(a, b);
     538% \end{lstlisting} &\begin{lstlisting}
     539% void bar(A & mutex a,
     540%            A & mutex b) {
     541%       signal(a.e);
     542% }
     543
     544
     545
     546% bar(a, b);
     547% \end{lstlisting}
     548% \end{tabular}
     549% \end{center}
     550
     551% To define the semantics of internal scheduling, it is important to look at nesting and \gls{group-acquire}. Indeed, beyond concerns about lock ordering, without scheduling the two following pseudo codes are mostly equivalent. In fact, if we assume monitors are ordered alphabetically, these two pseudo codes would probably lead to exactly the same implementation :
     552
     553% \begin{table}[h!]
     554% \centering
     555% \begin{tabular}{c c}
     556% \begin{lstlisting}[language=pseudo]
     557% monitor A, B, C
     558
     559% acquire A
     560%       acquire B & C
     561
     562%                       //Do stuff
     563
     564%       release B & C
     565% release A
     566% \end{lstlisting} &\begin{lstlisting}[language=pseudo]
     567% monitor A, B, C
     568
     569% acquire A
     570%       acquire B
     571%               acquire C
     572%                       //Do stuff
     573%               release C
     574%       release B
     575% release A
     576% \end{lstlisting}
     577% \end{tabular}
     578% \end{table}
     579
     580% Once internal scheduling is introduce however, semantics of \gls{group-acquire} become relevant. For example, let us look into the semantics of the following pseudo-code :
     581
     582% \begin{lstlisting}[language=Pseudo]
     583% 1: monitor A, B, C
     584% 2: condition c1
     585% 3:
     586% 4: acquire A
     587% 5:            acquire A & B & C
     588% 6:                            signal c1
     589% 7:            release A & B & C
     590% 8: release A
     591% \end{lstlisting}
     592
     593% Without \gls{group-acquire} signal simply baton passes the monitor lock on the next release. In the case above, we therefore need to indentify the next release. If line 8 is picked at the release point, then the signal will attempt to pass A \& B \& C, without having ownership of B \& C. Since this violates mutual exclusion, we conclude that line 7 is the only valid location where signalling can occur. The traditionnal meaning of signalling is to transfer ownership of the monitor(s) and immediately schedule the longest waiting task. However, in the discussed case, the signalling thread expects to maintain ownership of monitor A. This can be expressed in two differents ways : 1) the thread transfers ownership of all locks and reacquires A when it gets schedulled again or 2) it transfers ownership of all three monitors and then expects the ownership of A to be transferred back.
     594
     595% However, the question is does these behavior motivate supporting acquireing non-disjoint set of monitors. Indeed, if the previous example was modified to only acquire B \& C at line 5 (an release the accordingly) then in respects to scheduling, we could add the simplifying constraint that all monitors in a bulk will behave the same way, simplifying the problem back to a single monitor problem which has already been solved. For this constraint to be acceptble however, we need to demonstrate that in does not prevent any meaningful possibilities. And, indeed, we can look at the two previous interpretation of the above pseudo-code and conclude that supporting the acquiring of non-disjoint set of monitors does not add any expressiveness to the language.
     596
     597% Option 1 reacquires the lock after the signal statement, this can be rewritten as follows without the need for non-disjoint sets :
     598% \begin{lstlisting}[language=Pseudo]
     599% monitor A, B, C
     600% condition c1
     601
     602% acquire A & B & C
     603%       signal c1
     604% release A & B & C
     605% acquire A
     606
     607% release A
     608% \end{lstlisting}
     609
     610% This pseudo code has almost exaclty the same semantics as the code acquiring intersecting sets of monitors.
     611
     612% Option 2 uses two-way lock ownership transferring instead of reacquiring monitor A. Two-way monitor ownership transfer is normally done using signalBlock semantics, which immedietely transfers ownership of a monitor before getting the ownership back when the other thread no longer needs the monitor. While the example pseudo-code for Option 2 seems toe transfer ownership of A, B and C and only getting A back, this is not a requirement. Getting back all 3 monitors and releasing B and C differs only in performance. For this reason, the second option could arguably be rewritten as :
     613
     614% \begin{lstlisting}[language=Pseudo]
     615% monitor A, B, C
     616% condition c1
     617
     618% acquire A
     619%       acquire B & C
     620%               signalBlock c1
     621%       release B & C
     622% release A
     623% \end{lstlisting}
     624
     625% Obviously, the difference between these two snippets of pseudo code is that the first one transfers ownership of A, B and C while the second one only transfers ownership of B and C. However, this limitation can be removed by allowing user to release extra monitors when using internal scheduling, referred to as extended internal scheduling (pattent pending) from this point on. Extended internal scheduling means the two following pseudo-codes are functionnaly equivalent :
     626% \begin{table}[h!]
     627% \centering
     628% \begin{tabular}{c @{\hskip 0.65in} c}
     629% \begin{lstlisting}[language=pseudo]
     630% monitor A, B, C
     631% condition c1
     632
     633% acquire A
     634%       acquire B & C
     635%               signalBlock c1 with A
     636%       release B & C
     637% release A
     638% \end{lstlisting} &\begin{lstlisting}[language=pseudo]
     639% monitor A, B, C
     640% condition c1
     641
     642% acquire A
     643%       acquire A & B & C
     644%               signal c1
     645%       release A & B & C
     646% release A
     647% \end{lstlisting}
     648% \end{tabular}
     649% \end{table}
     650
     651% It must be stated that the extended internal scheduling only makes sense when using wait and signalBlock, since they need to prevent barging, which cannot be done in the context of signal since the ownership transfer is strictly one-directionnal.
     652
     653% One critic that could arise is that extended internal schedulling is not composable since signalBlock must be explicitly aware of which context it is in. However, this argument is not relevant since acquire A, B and C in a context where a subset of them is already acquired cannot be achieved without spurriously releasing some locks or having an oracle aware of all monitors. Therefore, composability of internal scheduling is no more an issue than composability of monitors in general.
     654
     655% The main benefit of using extended internal scheduling is that it offers the same expressiveness as intersecting monitor set acquiring but greatly simplifies the selection of a leader (or representative) for a group of monitor. Indeed, when using intersecting sets, it is not obvious which set intersects with other sets which means finding a leader representing only the smallest scope is a hard problem. Where as when using disjoint sets, any monitor that would be intersecting must be specified in the extended set, the leader can be chosen as any monitor in the primary set.
     656
     657% We need to make sure the semantics for internally scheduling N monitors are a natural extension of the single monitor semantics. For this reason, we introduce the concept of \gls{mon-ctx}. In terms of context internal scheduling means "releasing a \gls{mon-ctx} and waiting for an other thread to acquire the same \gls{mon-ctx} and baton-pass it back to the initial thread". This definitions requires looking into what a \gls{mon-ctx} is and what the semantics of waiting and baton-passing are.
     658
     659% \subsubsection{Internal scheduling: Context} \label{insched-context}
     660% Monitor scheduling operations are defined in terms of the context they are in. In languages that only supports operations on a single monitor at once, the context is completly defined by which most recently acquired monitors. Indeed, acquiring several monitors will form a stack of monitors which will be released in FILO order. In \CFA, a \gls{mon-ctx} cannot be simply defined by the last monitor that was acquired since \gls{group-acquire} means multiple monitors can be "the last monitor acquired". The \gls{mon-ctx} is therefore defined as the last set of monitors to have been acquired. This means taht when any new monitor is acquired, the group it belongs to is the new \gls{mon-ctx}. Correspondingly, if any monitor is released, the \gls{mon-ctx} reverts back to the context that was used prior to the monitor being acquired. In the most common case, \gls{group-acquire} means every monitor of a group will be acquired in released at the same time. However, since every monitor has its own recursion level, \gls{group-acquire} does not prevent users from reacquiring certain monitors while acquireing new monitors in the same operation. For example :
     661
     662% \begin{lstlisting}
     663% //Forward declarations
     664% monitor a, b, c
     665% void foo( monitor & mutex a,
     666%             monitor & mutex b);
     667% void bar( monitor & mutex a,
     668%             monitor & mutex b);
     669% void baz( monitor & mutex a,
     670%             monitor & mutex b,
     671%             monitor & mutex c);
     672
     673% //Routines defined inline to illustrate context changed compared to the stack
     674
     675% //main thread
     676% foo(a, b) {
     677%       //thread calls foo
     678%       //acquiring context a & b
     679
     680%       baz(a, b) {
     681%               //thread calls baz
     682%               //no context change
     683
     684%               bar(a, b, c) {
     685%                       //thread calls bar
     686%                       //acquiring context a & b & c
     687
     688%                       //Do stuff
     689
     690%                       return;             
     691%                       //call to bar returns
     692%               }
     693%               //context back to a & b
     694
     695%               return;
     696%               //call to baz returns
     697%       }
     698%       //no context change
     699
     700%       return;
     701%       //call to foo returns
     702% }
     703% //context back to initial state
     704
     705% \end{lstlisting}
     706
     707% As illustrated by the previous example, context changes can be caused by only one of the monitors comming into context or going out of context.
     708
     709% \subsubsection{Internal scheduling: Waiting} \label{insched-wait}
     710
     711% \subsubsection{Internal scheduling: Baton Passing} \label{insched-signal}
     712% Baton passing in internal scheduling is done in terms of \code{signal} and \code{signalBlock}\footnote{Arguably, \code{signal_now} is a more evocative name and \code{signal} could be changed appropriately. }. While \code{signalBlock} is the more straight forward way of baton passing, transferring ownership immediately, it must rely on \code{signal} which is why t is discussed first.
     713% \code{signal} has for effect to transfer the current context to another thread when the context would otherwise be released. This means that instead of releasing the concerned monitors, the first thread on the condition ready-queue is scheduled to run. The monitors are not released and when the signalled thread runs, it assumes it regained ownership of all the monitors it had in its context.
     714
     715% \subsubsection{Internal scheduling: Implementation} \label{insched-impl}
     716% Too implement internal scheduling, three things are need : a data structure for waiting tasks, a data structure for signalled task and a leaving procedure to run the signalled task. In the case of both data structures, it is desireable to have to use intrusive data structures in order to prevent the need for any dynamic allocation. However, in both cases being able to queue several items in the same position in a queue is non trivial, even more so in the presence of concurrency. However, within a given \gls{mon-ctx}, all monitors have exactly the same behavior in regards to scheduling. Therefore, the problem of queuing multiple monitors at once can be ignored by choosing one monitor to represent every monitor in a context. While this could prove difficult in other situations, \gls{group-acquire} requires that the monitors be sorted according to some stable predicate. Since monitors are sorted in all contexts, the representative can simply be the first in the list. Choosing a representative means a simple intrusive queue inside the condition is sufficient to implement the data structure for both waiting and signalled monitors.
     717
     718% Since \CFA monitors don't have a complete image of the \gls{mon-ctx}, choosing the representative and maintaning the current context information cannot easily be done by any single monitors. However, as discussed in section [Missing section here], monitor mutual exclusion is implemented using an raii object which is already in charge of sorting monitors. This object has a complete picture of the \gls{mon-ctx} which means it is well suited to choose the reprensentative and detect context changes.
     719
     720% \newpage
     721% \begin{lstlisting}
     722% void ctor( monitor ** _monitors, int _count ) {
     723%       bool ctx_changed = false;
     724%       for( mon in _monitors ) {
     725%               ctx_changed = acquire( mon ) || ctx_changed;
     726%       }
     727
     728%       if( ctx_changed ) {
     729%               set_representative();
     730%               set_context();
     731%       }
     732% }
     733
     734% void dtor( monitor ** _monitors, int _count ) {
     735%       if( context_will_exit( _monitors, count ) ) {
     736%               baton_pass();
     737%               return;
     738%       }
     739
     740%       for( mon in _monitors ) {
     741%               release( mon );
     742%       }
     743% }
     744
     745% \end{lstlisting}
     746
     747
     748
     749% A direct extension of the single monitor semantics is to release all locks when waiting and transferring ownership of all locks when signalling. However, for the purpose of synchronization it may be usefull to only release some of the locks but keep others. It is possible to support internal scheduling and \gls{group-acquire} without any extra syntax by relying on order of acquisition. Here is an example of the different contexts in which internal scheduling can be used. (Note that here the use of helper routines is irrelevant, only routines acquire mutual exclusion have an impact on internal scheduling):
     750
     751% \begin{table}[h!]
     752% \centering
     753% \begin{tabular}{|c|c|c|}
     754% Context 1 & Context 2 & Context 3 \\
     755% \hline
     756% \begin{lstlisting}
     757% condition e;
     758
     759% //acquire a & b
     760% void foo(monitor & mutex a,
     761%            monitor & mutex b) {
     762
     763%       wait(e); //release a & b
     764% }
     765
     766
     767
     768
     769
     770
     771% foo(a,b);
     772% \end{lstlisting} &\begin{lstlisting}
     773% condition e;
     774
     775% //acquire a
     776% void bar(monitor & mutex a,
     777%            monitor & nomutex b) {
     778%       foo(a,b);
     779% }
     780
     781% //acquire a & b
     782% void foo(monitor & mutex a,
     783%            monitor & mutex b) {
     784%       wait(e);  //release a & b
     785% }
     786
     787% bar(a, b);
     788% \end{lstlisting} &\begin{lstlisting}
     789% condition e;
     790
     791% //acquire a
     792% void bar(monitor & mutex a,
     793%            monitor & nomutex b) {
     794%       baz(a,b);
     795% }
     796
     797% //acquire b
     798% void baz(monitor & nomutex a,
     799%            monitor & mutex b) {
     800%       wait(e);  //release b
     801% }
     802
     803% bar(a, b);
     804% \end{lstlisting}
     805% \end{tabular}
     806% \end{table}
     807
     808% Context 1 is the simplest way of acquiring more than one monitor (\gls{group-acquire}), using a routine with multiple parameters having the \code{mutex} keyword. Context 2 also uses \gls{group-acquire} as well in routine \code{foo}. However, the routine is called by routine \code{bar}, which only acquires monitor \code{a}. Since monitors can be acquired multiple times this does not cause a deadlock by itself but it does force the acquiring order to \code{a} then \code{b}. Context 3 also forces the acquiring order to be \code{a} then \code{b} but does not use \gls{group-acquire}. The previous example tries to illustrate the semantics that must be established to support releasing monitors in a \code{wait} statement. In all cases, the behavior of the wait statment is to release all the locks that were acquired my the inner-most monitor call. That is \code{a & b} in context 1 and 2 and \code{b} only in context 3. Here are a few other examples of this behavior.
     809
     810
     811% \begin{center}
     812% \begin{tabular}{|c|c|c|}
     813% \begin{lstlisting}
     814% condition e;
     815
     816% //acquire b
     817% void foo(monitor & nomutex a,
     818%            monitor & mutex b) {
     819%       bar(a,b);
     820% }
     821
     822% //acquire a
     823% void bar(monitor & mutex a,
     824%            monitor & nomutex b) {
     825
     826%       wait(e); //release a
     827%                 //keep b
     828% }
     829
     830% foo(a, b);
     831% \end{lstlisting} &\begin{lstlisting}
     832% condition e;
     833
     834% //acquire a & b
     835% void foo(monitor & mutex a,
     836%            monitor & mutex b) {
     837%       bar(a,b);
     838% }
     839
     840% //acquire b
     841% void bar(monitor & mutex a,
     842%            monitor & nomutex b) {
     843
     844%       wait(e); //release b
     845%                 //keep a
     846% }
     847
     848% foo(a, b);
     849% \end{lstlisting} &\begin{lstlisting}
     850% condition e;
     851
     852% //acquire a & b
     853% void foo(monitor & mutex a,
     854%            monitor & mutex b) {
     855%       bar(a,b);
     856% }
     857
     858% //acquire none
     859% void bar(monitor & nomutex a,
     860%            monitor & nomutex b) {
     861
     862%       wait(e); //release a & b
     863%                 //keep none
     864% }
     865
     866% foo(a, b);
     867% \end{lstlisting}
     868% \end{tabular}
     869% \end{center}
     870% Note the right-most example is actually a trick pulled on the reader. Monitor state information is stored in thread local storage rather then in the routine context, which means that helper routines and other \code{nomutex} routines are invisible to the runtime system in regards to concurrency. This means that in the right-most example, the routine parameters are completly unnecessary. However, calling this routine from outside a valid monitor context is undefined.
     871
     872% These semantics imply that in order to release of subset of the monitors currently held, users must write (and name) a routine that only acquires the desired subset and simply calls wait. While users can use this method, \CFA offers the \code{wait_release}\footnote{Not sure if an overload of \code{wait} would work...} which will release only the specified monitors. In the center previous examples, the code in the center uses the \code{bar} routine to only release monitor \code{b}. Using the \code{wait_release} helper, this can be rewritten without having the name two routines :
     873% \begin{center}
     874% \begin{tabular}{ c c c }
     875% \begin{lstlisting}
     876%       condition e;
     877
     878%       //acquire a & b
     879%       void foo(monitor & mutex a,
     880%                  monitor & mutex b) {
     881%               bar(a,b);
     882%       }
     883
     884%       //acquire b
     885%       void bar(monitor & mutex a,
     886%                  monitor & nomutex b) {
     887
     888%               wait(e); //release b
     889%                         //keep a
     890%       }
     891
     892%       foo(a, b);
     893% \end{lstlisting} &\begin{lstlisting}
     894%       =>
     895% \end{lstlisting} &\begin{lstlisting}
     896%       condition e;
     897
     898%       //acquire a & b
     899%       void foo(monitor & mutex a,
     900%                  monitor & mutex b) {
     901%               wait_release(e,b); //release b
     902%                                        //keep a
     903%       }
     904
     905%       foo(a, b);
     906% \end{lstlisting}
     907% \end{tabular}
     908% \end{center}
     909
     910% Regardless of the context in which the \code{wait} statement is used, \code{signal} must be called holding the same set of monitors. In all cases, signal only needs a single parameter, the condition variable that needs to be signalled. But \code{signal} needs to be called from the same monitor(s) that call to \code{wait}. Otherwise, mutual exclusion cannot be properly transferred back to the waiting monitor.
     911
     912% Finally, an additional semantic which can be very usefull is the \code{signal_block} routine. This routine behaves like signal for all of the semantics discussed above, but with the subtelty that mutual exclusion is transferred to the waiting task immediately rather than wating for the end of the critical section.
     913% \\
    559914
    560915% ####### #     # #######         #####   #####  #     # ####### ######
  • doc/proposals/concurrency/glossary.tex

    r221c2de7 r154fdc8  
    1414
    1515\longnewglossaryentry{group-acquire}
    16 {name={bulked acquiring}}
     16{name={bulk acquiring}}
    1717{
    1818Implicitly acquiring several monitors when entering a monitor.
     19}
     20
     21\longnewglossaryentry{mon-ctx}
     22{name={monitor context}}
     23{
     24The state of the current thread regarding which monitors are owned.
    1925}
    2026
  • doc/proposals/concurrency/style.tex

    r221c2de7 r154fdc8  
    11\input{common}                                          % bespoke macros used in the document
     2
     3% \CFADefaultStyle
    24
    35\lstset{
  • doc/proposals/concurrency/version

    r221c2de7 r154fdc8  
    1 0.7.61
     10.7.141
  • doc/rob_thesis/cfa-format.tex

    r221c2de7 r154fdc8  
    7272  morecomment=[n]{/+}{+/},
    7373  morecomment=[n][\color{blue}]{/++}{+/},
     74  % Options
     75  sensitive=true
     76}
     77
     78\lstdefinelanguage{rust}{
     79  % Keywords
     80  morekeywords=[1]{
     81    abstract, alignof, as, become, box,
     82    break, const, continue, crate, do,
     83    else, enum, extern, false, final,
     84    fn, for, if, impl, in,
     85    let, loop, macro, match, mod,
     86    move, mut, offsetof, override, priv,
     87    proc, pub, pure, ref, return,
     88    Self, self, sizeof, static, struct,
     89    super, trait, true,  type, typeof,
     90    unsafe, unsized, use, virtual, where,
     91    while, yield
     92  },
     93  % Strings
     94  morestring=[b]{"},
     95  % Comments
     96  comment=[l]{//},
     97  morecomment=[s]{/*}{*/},
    7498  % Options
    7599  sensitive=true
     
    107131  style=defaultStyle
    108132}
    109 \lstMakeShortInline[basewidth=0.5em,breaklines=true]@  % single-character for \lstinline
     133\lstMakeShortInline[basewidth=0.5em,breaklines=true,basicstyle=\normalsize\ttfamily\color{basicCol}]@  % single-character for \lstinline
    110134
    111135\lstnewenvironment{cfacode}[1][]{
     
    155179  \lstset{
    156180    language = D,
     181    style=defaultStyle,
     182    #1
     183  }
     184}{}
     185
     186\lstnewenvironment{rustcode}[1][]{
     187  \lstset{
     188    language = rust,
    157189    style=defaultStyle,
    158190    #1
  • doc/rob_thesis/conclusions.tex

    r221c2de7 r154fdc8  
    33%======================================================================
    44
    5 Conclusion paragraphs.
     5Adding resource management and tuples to \CFA has been a challenging design, engineering, and implementation exercise.
     6On the surface, the work may appear as a rehash of similar mechanisms in \CC.
     7However, every added feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supports separate compilation.
     8All of these new features are being used by the \CFA development-team to build the \CFA runtime system.
     9
     10\section{Constructors and Destructors}
     11\CFA supports the RAII idiom using constructors and destructors.
     12There are many engineering challenges in introducing constructors and destructors, partially since \CFA is not an object-oriented language.
     13By making use of managed types, \CFA programmers are afforded an extra layer of safety and ease of use in comparison to C programmers.
     14While constructors and destructors provide a sensible default behaviour, \CFA allows experienced programmers to declare unmanaged objects to take control of object management for performance reasons.
     15Constructors and destructors as named functions fit the \CFA polymorphism model perfectly, allowing polymorphic code to use managed types seamlessly.
     16
     17\section{Tuples}
     18\CFA can express functions with multiple return values in a way that is simple, concise, and safe.
     19The addition of multiple-return-value functions naturally requires a way to use multiple return values, which begets tuple types.
     20Tuples provide two useful notions of assignment: multiple assignment, allowing simple, yet expressive assignment between multiple variables, and mass assignment, allowing a lossless assignment of a single value across multiple variables.
     21Tuples have a flexible structure that allows the \CFA type-system to decide how to restructure tuples, making it syntactically simple to pass tuples between functions.
     22Tuple types can be combined with polymorphism and tuple conversions can apply during assertion inference to produce a cohesive feel.
     23
     24\section{Variadic Functions}
     25Type-safe variadic functions, with a similar feel to variadic templates, are added to \CFA.
     26The new variadic functions can express complicated recursive algorithms.
     27Unlike variadic templates, it is possible to write @new@ as a library routine and to separately compile @ttype@ polymorphic functions.
     28Variadic functions are statically type checked and provide a user experience that is consistent with that of tuples and polymorphic functions.
     29
     30\section{Future Work}
     31\subsection{Constructors and Destructors}
     32Both \CC and Rust support move semantics, which expand the user's control of memory management by providing the ability to transfer ownership of large data, rather than forcing potentially expensive copy semantics.
     33\CFA currently does not support move semantics, partially due to the complexity of the model.
     34The design space is currently being explored with the goal of finding an alternative to move semantics that provides necessary performance benefits, while reducing the amount of repetition required to create a new type, along with the cognitive burden placed on the user.
     35
     36% One technique being evaluated is whether named return-values can be used to eliminate unnecessary temporaries \cite{Buhr94a}.
     37% For example,
     38% \begin{cfacode}
     39% struct A { ... };
     40% [A x] f(A x);
     41% [A y] g(A y);
     42% [A z] h(A z);
     43
     44% struct A a1, a2;
     45% a2 = h(g(f(a1)));
     46% \end{cfacode}
     47% Here, since both @f@'s argument and return value have the same name and type, the compiler can infer that @f@ returns its argument.
     48% With this knowledge, the compiler can reuse the storage for the argument to @f@ as the argument to @g@.  % TODO: cite Till thesis?
     49
     50Exception handling is among the features expected to be added to \CFA in the near future.
     51For exception handling to properly interact with the rest of the language, it must ensure all RAII guarantees continue to be met.
     52That is, when an exception is raised, it must properly unwind the stack by calling the destructors for any objects that live between the raise and the handler.
     53This can be accomplished either by augmenting the translator to properly emit code that executes the destructors, or by switching destructors to hook into the GCC @cleanup@ attribute \cite[6.32.1]{GCCExtensions}.
     54
     55The @cleanup@ attribute, which is attached to a variable declaration, takes a function name as an argument and schedules that routine to be executed when the variable goes out of scope.
     56\begin{cfacode}
     57struct S { int x; };
     58void __dtor_S(struct S *);
     59{
     60  __attribute__((cleanup(__dtor_S))) struct S s;
     61} // calls __dtor_S(&s)
     62\end{cfacode}
     63This mechanism is known and understood by GCC, so that the destructor is properly called in any situation where a variable goes out of scope, including function returns, branches, and built-in GCC exception handling mechanisms using libunwind.
     64
     65A caveat of this approach is that the @cleanup@ attribute only permits a function that consumes a single argument of type @T *@ for a variable of type @T@.
     66This restriction means that any destructor that consumes multiple arguments (\eg, because it is polymorphic) or any destructor that is a function pointer (\eg, because it is an assertion parameter) must be called through a local thunk.
     67For example,
     68\begin{cfacode}
     69forall(otype T)
     70struct Box {
     71  T x;
     72};
     73forall(otype T) void ^?{}(Box(T) * x); // has implicit parameters
     74
     75forall(otype T)
     76void f(T x) {
     77  T y = x;  // destructor is a function-pointer parameter
     78  Box(T) z = { x }; // destructor has multiple parameters
     79}
     80\end{cfacode}
     81currently generates the following
     82\begin{cfacode}
     83void _dtor_BoxT(  // consumes more than 1 parameter due to assertions
     84  void (*_adapter_PTT)(void (*)(), void *, void *),
     85  void (*_adapter_T_PTT)(void (*)(), void *, void *, void *),
     86  long unsigned int _sizeof_T,
     87  long unsigned int _alignof_T,
     88  void *(*_assign_T_PTT)(void *, void *),
     89  void (*_ctor_PT)(void *),
     90  void (*_ctor_PTT)(void *, void *),
     91  void (*_dtor_PT)(void *),
     92  void *x
     93);
     94
     95void f(
     96  void (*_adapter_PTT)(void (*)(), void *, void *),
     97  void (*_adapter_T_PTT)(void (*)(), void *, void *, void *),
     98  long unsigned int _sizeof_T,
     99  long unsigned int _alignof_T,
     100  void *(*_assign_TT)(void *, void *),
     101  void (*_ctor_T)(void *),
     102  void (*_ctor_TT)(void *, void *),
     103  void (*_dtor_T)(void *),
     104  void *x
     105){
     106  void *y = __builtin_alloca(_sizeof_T);
     107  // constructor call elided
     108
     109  // generic layout computation elided
     110  long unsigned int _sizeof_BoxT = ...;
     111  void *z = __builtin_alloca(_sizeof_BoxT);
     112  // constructor call elided
     113
     114  _dtor_BoxT(  // ^?{}(&z); -- _dtor_BoxT has > 1 arguments
     115    _adapter_PTT,
     116    _adapter_T_PTT,
     117    _sizeof_T,
     118    _alignof_T,
     119    _assign_TT,
     120    _ctor_T,
     121    _ctor_TT,
     122    _dtor_T,
     123    z
     124  );
     125  _dtor_T(y);  // ^?{}(&y); -- _dtor_T is a function pointer
     126}
     127\end{cfacode}
     128Further to this point, every distinct array type will require a thunk for its destructor, where array destructor code is currently inlined, since array destructors hard code the length of the array.
     129
     130For function call temporaries, new scopes have to be added for destructor ordering to remain consistent.
     131In particular, the translator currently destroys argument and return value temporary objects as soon as the statement they were created for ends.
     132In order for this behaviour to be maintained, new scopes have to be added around every statement that contains a function call.
     133Since a nested expression can raise an exception, care must be taken when destroying temporary objects.
     134One way to achieve this is to split statements at every function call, to provide the correct scoping to destroy objects as necessary.
     135For example,
     136\begin{cfacode}
     137struct S { ... };
     138void ?{}(S *, S);
     139void ^?{}(S *);
     140
     141S f();
     142S g(S);
     143
     144g(f());
     145\end{cfacode}
     146would generate
     147\begin{cfacode}
     148struct S { ... };
     149void _ctor_S(struct S *, struct S);
     150void _dtor_S(struct S *);
     151
     152{
     153  __attribute__((cleanup(_dtor_S))) struct S _tmp1 = f();
     154  __attribute__((cleanup(_dtor_S))) struct S _tmp2 =
     155    (_ctor_S(&_tmp2, _tmp1), _tmp2);
     156  __attribute__((cleanup(_dtor_S))) struct S _tmp3 = g(_tmp2);
     157} // destroy _tmp3, _tmp2, _tmp1
     158\end{cfacode}
     159Note that destructors must be registered after the temporary is fully initialized, since it is possible for initialization expressions to raise exceptions, and a destructor should never be called on an uninitialized object.
     160This requires a slightly strange looking initializer for constructor calls, where a comma expression is used to produce the value of the object being initialized, after the constructor call, conceptually bitwise copying the initialized data into itself.
     161Since this copy is wholly unnecessary, it is easily optimized away.
     162
     163A second approach is to attach an accompanying boolean to every temporary that records whether the object contains valid data, and thus whether the value should be destructed.
     164\begin{cfacode}
     165struct S { ... };
     166void _ctor_S(struct S *, struct S);
     167void _dtor_S(struct S *);
     168
     169struct _tmp_bundle_S {
     170  bool valid;
     171  struct S value;
     172};
     173
     174void _dtor_tmpS(struct _tmp_bundle_S * ret) {
     175  if (ret->valid) {
     176    _dtor_S(&ret->value);
     177  }
     178}
     179
     180{
     181  __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp1 = { 0 };
     182  __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp2 = { 0 };
     183  __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp3 = { 0 };
     184  _tmp2.value = g(
     185    (_ctor_S(
     186      &_tmp2.value,
     187      (_tmp1.value = f(), _tmp1.valid = 1, _tmp1.value)
     188    ), _tmp2.valid = 1, _tmp2.value)
     189  ), _tmp3.valid = 1, _tmp3.value;
     190} // destroy _tmp3, _tmp2, _tmp1
     191\end{cfacode}
     192In particular, the boolean is set immediately after argument construction and immediately after return value copy.
     193The boolean is checked as a part of the @cleanup@ routine, forwarding to the object's destructor if the object is valid.
     194One such type and @cleanup@ routine needs to be generated for every type used in a function parameter or return value.
     195
     196The former approach generates much simpler code, however splitting expressions requires care to ensure that expression evaluation order does not change.
     197Expression ordering has to be performed by a full compiler, so it is possible that the latter approach would be more suited to the \CFA prototype, whereas the former approach is clearly the better option in a full compiler.
     198More investigation is needed to determine whether the translator's current design can easily handle proper expression ordering.
     199
     200As discussed in Section \ref{s:implicit_copy_construction}, return values are destructed with a different @this@ pointer than they are constructed with.
     201This problem can be easily fixed once a full \CFA compiler is built, since it would have full control over the call/return mechanism.
     202In particular, since the callee is aware of where it needs to place the return value, it can construct the return value directly, rather than bitwise copy the internal data.
     203
     204Currently, the special functions are always auto-generated, except for generic types where the type parameter does not have assertions for the corresponding operation.
     205For example,
     206\begin{cfacode}
     207forall(dtype T | sized(T) | { void ?{}(T *); })
     208struct S { T x; };
     209\end{cfacode}
     210only auto-generates the default constructor for @S@, since the member @x@ is missing the other 3 special functions.
     211Once deleted functions have been added, function generation can make use of this information to disable generation of special functions when a member has a deleted function.
     212For example,
     213\begin{cfacode}
     214struct A {};
     215void ?{}(A *) = delete;
     216struct S { A x; };  // does not generate void ?{}(S *);
     217\end{cfacode}
     218
     219Unmanaged objects and their interactions with the managed \CFA environment are an open problem that deserves greater attention.
     220In particular, the interactions between unmanaged objects and copy semantics are subtle and can easily lead to errors.
     221It is possible that the compiler should mark some of these situations as errors by default, and possibly conditionally emit warnings for some situations.
     222Another possibility is to construct, destruct, and assign unmanaged objects using the intrinsic and auto-generated functions.
     223A more thorough examination of the design space for this problem is required.
     224
     225Currently, the \CFA translator does not support any warnings.
     226Ideally, the translator should support optional warnings in the case where it can detect that an object has been constructed twice.
     227For example, forwarding constructor calls are guaranteed to initialize the entire object, so redundant constructor calls can cause problems such as memory leaks, while looking innocuous to a novice user.
     228\begin{cfacode}
     229struct B { ... };
     230struct A {
     231  B x, y, z;
     232};
     233void ?{}(A * a, B x) {
     234  // y, z implicitly default constructed
     235  (&a->x){ ... }; // explicitly construct x
     236} // constructs an entire A
     237void ?{}(A * a) {
     238  (&a->y){}; // initialize y
     239  a{ (B){ ... } }; // forwarding constructor call
     240                   // initializes entire object, including y
     241}
     242\end{cfacode}
     243
     244Finally, while constructors provide a mechanism for establishing invariants, there is currently no mechanism for maintaining invariants without resorting to opaque types.
     245That is, structure fields can be accessed and modified by any block of code without restriction, so while it is possible to ensure that an object is initially set to a valid state, it is not possible to ensure that it remains in a consistent state throughout its lifetime.
     246A popular technique for ensuring consistency in object-oriented programming languages is to provide access modifiers such as @private@, which provides compile-time checks that only privileged code accesses private data.
     247This approach could be added to \CFA, but it requires an idiomatic way of specifying what code is privileged.
     248One possibility is to tie access control into an eventual module system.
     249
     250\subsection{Tuples}
     251Named result values are planned, but not yet implemented.
     252This feature ties nicely into named tuples, as seen in D and Swift.
     253
     254Currently, tuple flattening and structuring conversions are 0-cost.
     255This makes tuples conceptually very simple to work with, but easily causes unnecessary ambiguity in situations where the type system should be able to differentiate between alternatives.
     256Adding an appropriate cost function to tuple conversions will allow tuples to interact with the rest of the programming language more cohesively.
     257
     258\subsection{Variadic Functions}
     259Use of @ttype@ functions currently relies heavily on recursion.
     260\CC has opened variadic templates up so that recursion is not strictly necessary in some cases, and it would be interesting to see if any such cases can be applied to \CFA.
     261
     262\CC supports variadic templated data-types, making it possible to express arbitrary length tuples, arbitrary parameter function objects, and more with generic types.
     263Currently, \CFA does not support @ttype@-parameter generic-types, though there does not appear to be a technical reason that it cannot.
     264Notably, opening up support for this makes it possible to implement the exit form of scope guard (see section \ref{s:ResMgmt}), making it possible to call arbitrary functions at scope exit in idiomatic \CFA.
  • doc/rob_thesis/ctordtor.tex

    r221c2de7 r154fdc8  
    33%======================================================================
    44
    5 % TODO: discuss move semantics; they haven't been implemented, but could be. Currently looking at alternative models. (future work)
    6 
    7 % TODO: as an experiment, implement Andrei Alexandrescu's ScopeGuard http://www.drdobbs.com/cpp/generic-change-the-way-you-write-excepti/184403758?pgno=2
     5% TODO now: as an experiment, implement Andrei Alexandrescu's ScopeGuard http://www.drdobbs.com/cpp/generic-change-the-way-you-write-excepti/184403758?pgno=2
    86% doesn't seem possible to do this without allowing ttype on generic structs?
    97
    10 % If a Cforall constructor is in scope, C style initialization is
    11 % disabled by default.
    12 % * initialization rule: if any constructor is in scope for type T, try
    13 %   to find a matching constructor for the call. If there are no
    14 %   constructors in scope for type T, then attempt to fall back on
    15 %   C-style initialization.
    16 % + if this rule was not in place, it would be easy to accidentally
    17 %   use C-style initialization in certain cases, which could lead to
    18 %   subtle errors [2]
    19 % - this means we need special syntax if we want to allow users to force
    20 %   a C-style initialization (to give users more control)
    21 % - two different declarations in the same scope can be implicitly
    22 %   initialized differently. That is, there may be two objects of type
    23 %   T that are initialized differently because there is a constructor
    24 %   definition between them. This is not technically specific to
    25 %   constructors.
    26 
    27 % C-style initializers can be accessed with @= syntax
    28 % + provides a way to get around the requirement of using a constructor
    29 %   (for advanced programmers only)
    30 % - can break invariants in the type => unsafe
    31 % * provides a way of asserting that a variable is an instance of a
    32 %   C struct (i.e. a POD struct), and so will not be implicitly
    33 %   destructed (this can be useful at times, maybe mitigates the need
    34 %   for move semantics?) [3]
    35 % + can modernize a code base one step at a time
    36 
    37 % Cforall constructors can be used in expressions to initialize any
    38 % piece of memory.
    39 % + malloc() { ... } calls the appropriate constructor on the newly
    40 %   allocated space; the argument is moved into the constructor call
    41 %   without taking its address [4]
    42 % - with the above form, there is still no way to ensure that
    43 %   dynamically allocated objects are constructed. To resolve this,
    44 %   we might want a stronger "new" function which always calls the
    45 %   constructor, although how we accomplish that is currently still
    46 %   unresolved (compiler magic vs. better variadic functions?)
    47 % + This can be used as a placement syntax [5]
    48 % - can call the constructor on an object more than once, which could
    49 %   cause resource leaks and reinitialize const fields (can try to
    50 %   detect and prevent this in some cases)
    51 %   * compiler always tries to implicitly insert a ctor/dtor pair for
    52 %     non-@= objects.
    53 %     * For POD objects, this will resolve to an autogenerated or
    54 %       intrinsic function.
    55 %     * Intrinsic functions are not automatically called. Autogenerated
    56 %       are, because they may call a non-autogenerated function.
    57 %     * destructors are automatically inserted at appropriate branches
    58 %       (e.g. return, break, continue, goto) and at the end of the block
    59 %       in which they are declared.
    60 %   * For @= objects, the compiler never tries to interfere and insert
    61 %     constructor and destructor calls for that object. Copy constructor
    62 %     calls do not count, because the object is not the target of the copy
    63 %     constructor.
    64 
    65 % A constructor is declared with the name ?{}
    66 % + combines the look of C initializers with the precedent of ?() being
    67 %   the name for the function call operator
    68 % + it is possible to easily search for all constructors in a project
    69 %   and immediately know that a function is a constructor by seeing the
    70 %   name "?{}"
    71 
    72 % A destructor is declared with the name ^?{}
    73 % + name mirrors a constructor's name, with an extra symbol to
    74 %   distinguish it
    75 % - the symbol '~' cannot be used due to parsing conflicts with the
    76 %   unary '~' (bitwise negation) operator - this conflict exists because
    77 %   we want to allow users to write ^x{}; to destruct x, rather than
    78 %   ^?{}(&x);
    79 
    80 % The first argument of a constructor must be a pointer. The constructed
    81 % type is the base type of the pointer. E.g. void ?{}(T *) is a default
    82 % constructor for a T.
    83 % + can name the argument whatever you like, so not constrained by
    84 %   language keyword "this" or "self", etc.
    85 % - have to explicitly qualify all object members to initialize them
    86 %   (e.g. this->x = 0, rather than just x = 0)
    87 
    88 % Destructors can take arguments other than just the destructed pointer
    89 % * open research problem: not sure how useful this is
    90 
    91 % Pointer constructors
    92 % + can construct separately compiled objects (opaque types) [6]
    93 % + orthogonal design, follows directly from the definition of the first
    94 %   argument of a constructor
    95 % - may require copy constructor or move constructor (or equivalent)
    96 %   for correct implementation, which may not be obvious to everyone
    97 % + required feature for the prelude to specify as much behavior as possible
    98 %   (similar to pointer assignment operators in this respect)
    99 
    100 % Designations can only be used for C-style initialization
    101 % * designation for constructors is equivalent to designation for any
    102 %   general function call. Since a function prototype can be redeclared
    103 %   many times, with arguments named differently each time (or not at
    104 %   all!), this is considered to be an undesirable feature. We could
    105 %   construct some set of rules to allow this behaviour, but it is
    106 %   probably more trouble than it's worth, and no matter what we choose,
    107 %   it is not likely to be obvious to most people.
    108 
    109 % Constructing an anonymous member [7]
    110 % + same as with calling any other function on an anonymous member
    111 %   (implicit conversion by the compiler)
    112 % - may be some cases where this is ambiguous => clarify with a cast
    113 %   (try to design APIs to avoid sharing function signatures between
    114 %   composed types to avoid this)
    115 
    116 % Default Constructors and Destructors are called implicitly
    117 % + cannot forget to construct or destruct an object
    118 % - requires special syntax to specify that an object is not to be
    119 %   constructed (@=)
    120 % * an object will not be implicitly constructed OR destructed if
    121 %   explicitly initialized like a C object (@= syntax)
    122 % * an object will be destructed if there are no constructors in scope
    123 %   (even though it is initialized like a C object) [8]
    124 
    125 % An object which changes from POD type to non POD type will not change
    126 % the semantics of a type containing it by composition
    127 % * That is, constructors will not be regenerated at the point where
    128 %   an object changes from POD type to non POD type, because this could
    129 %   cause a cascade of constructors being regenerated for many other
    130 %   types. Further, there is precedence for this behaviour in other
    131 %   facets of Cforall's design, such as how nested functions interact.
    132 % * This behaviour can be simplified in a language without declaration
    133 %   before use, because a type can be classified as POD or non POD
    134 %   (rather than potentially changing between the two at some point) at
    135 %   at the global scope (which is likely the most common case)
    136 % * [9]
    137 
    138 % Changes to polymorphic type classes
    139 % * dtype and ftype remain the same
    140 % * forall(otype T) is currently essentially the same as
    141 %   forall(dtype T | { @size(T); void ?=?(T *, T); }).
    142 %   The big addition is that you can declare an object of type T, rather
    143 %   than just a pointer to an object of type T since you know the size,
    144 %   and you can assign into a T.
    145 %   * this definition is changed to add default constructor and
    146 %     destructor declarations, to remain consistent with what type meant
    147 %     before the introduction of constructors and destructors.
    148 %     * that is, forall(type T) is now essentially the same as
    149 %       forall(dtype T | { @size(T); void ?=?(T *, T);
    150 %                          void ?{}(T *); void ^?{}(T *); })
    151 %     + this is required to make generic types work correctly in
    152 %       polymorphic functions
    153 %     ? since declaring a constructor invalidates the autogenerated
    154 %       routines, it is possible for a type to have constructors, but
    155 %       not default constructors. That is, it might be the case that
    156 %       you want to write a polymorphic function for a type which has
    157 %       a size, but non-default constructors? Some options:
    158 %       * declaring a constructor as a part of the assertions list for
    159 %         a type declaration invalidates the default, so
    160 %         forall(otype T | { void ?{}(T *, int); })
    161 %         really means
    162 %         forall(dtype T | { @size(T); void ?=?(T *, T);
    163 %                            void ?{}(T *, int); void ^?{}(T *); })
    164 %       * force users to fully declare the assertions list like the
    165 %         above in this case (this seems very undesirable)
    166 %       * add another type class with the current desugaring of type
    167 %         (just size and assignment)
    168 %       * provide some way of subtracting from an existing assertions
    169 %         list (this might be useful to have in general)
    170 
    171 % Implementation issues:
    172 % Changes to prelude/autogen or built in defaults?
    173 % * pointer ctors/dtors [prelude]
    174 %   * other pointer type routines are declared in the prelude, and this
    175 %     doesn't seem like it should be any different
    176 % * basic type ctors/dtors [prelude]
    177 %   * other basic type routines are declared in the prelude, and this
    178 %     doesn't seem like it should be any different
    179 % ? aggregate types [undecided, but leaning towards autogenerate]
    180 %   * prelude
    181 %     * routines specific to aggregate types cannot be predeclared in
    182 %       the prelude because we don't know the name of every
    183 %       aggregate type in the entire program
    184 %   * autogenerate
    185 %     + default assignment operator is already autogenerated for
    186 %       aggregate types
    187 %       * this seems to lead us in the direction of autogenerating,
    188 %         because we may have a struct which contains other objects
    189 %         that require construction [10]. If we choose not to
    190 %         autogenerate in this case, then objects which are part of
    191 %         other objects by composition will not be constructed unless
    192 %         a constructor for the outer type is explicitly defined
    193 %       * in this case, we would always autogenerate the appropriate
    194 %         constructor(s) for an aggregate type, but just like with
    195 %         basic types, pointer types, and enum types, the constructor
    196 %         call can be elided when when it is not necessary.
    197 %     + constructors will have to be explicitly autogenerated
    198 %       in the case where they are required for a polymorphic function,
    199 %       when no user defined constructor is in scope, which may make it
    200 %       easiest to always autogenerate all appropriate constructors
    201 %     - n+2 constructors would have to be generated for a POD type
    202 %       * one constructor for each number of valid arguments [0, n],
    203 %         plus the copy constructor
    204 %         * this is taking a simplified approach: in C, it is possible
    205 %           to omit the enclosing braces in a declaration, which would
    206 %           lead to a combinatorial explosion of generated constructors.
    207 %           In the interest of keeping things tractable, Cforall may be
    208 %           incompatible with C in this case. [11]
    209 %       * for non-POD types, only autogenerate the default and copy
    210 %         constructors
    211 %       * alternative: generate only the default constructor and
    212 %         special case initialization for any other constructor when
    213 %         only the autogenerated one exists
    214 %         - this is not very sensible, as by the previous point, these
    215 %           constructors may be needed for polymorphic functions
    216 %           anyway.
    217 %     - must somehow distinguish in resolver between autogenerated and
    218 %       user defined constructors (autogenerated should never be chosen
    219 %       when a user defined option exists [check first parameter], even
    220 %       if full signature differs) (this may also have applications
    221 %       to other autogenerated routines?)
    222 %     - this scheme does not naturally support designation (i.e. general
    223 %       functions calls do not support designation), thus these cases
    224 %       will have to be treated specially in either case
    225 %   * defaults
    226 %     * i.e. hardcode a new set of rules for some "appropriate" default
    227 %       behaviour for
    228 %     + when resolving an initialization expression, explicitly check to
    229 %       see if any constructors are in scope. If yes, attempt to resolve
    230 %       to a constructor, and produce an error message if a match is not
    231 %       found. If there are no constructors in scope, resolve to
    232 %       initializing each field individually (C-style)
    233 %     + does not attempt to autogenerate constructors for POD types,
    234 %       which can be seen as a space optimization for the program
    235 %       binary
    236 %     - as stated previously, a polymorphic routine may require these
    237 %       autogenerated constructors, so this doesn't seem like a big win,
    238 %       because this leads to more complicated logic and tracking of
    239 %       which constructors have already been generated
    240 %     - even though a constructor is not explicitly declared or used
    241 %       polymorphically, we might still need one for all uses of a
    242 %       struct (e.g. in the case of composition).
    243 %   * the biggest tradeoff in autogenerating vs. defaulting appears to
    244 %     be in where and how the special code to check if constructors are
    245 %     present is handled. It appears that there are more reasons to
    246 %     autogenerate than not.
    247 
    248 % --- examples
    249 % [1] As an example of using constructors polymorphically, consider a
    250 % slight modification on the foldl example I put on the mailing list a
    251 % few months ago:
    252 
    253 % context iterable(type collection, type element, type iterator) {
    254 %   void ?{}(iterator *, collection); // used to be makeIterator, but can
    255 %                             // idiomatically use constructor
    256 %   int hasNext(iterator);
    257 %   iterator ++?(iterator *);
    258 %   lvalue element *?(iterator);
    259 % };
    260 
    261 
    262 % forall(type collection, type element, type result, type iterator
    263 %   | iterable(collection, element, iterator))
    264 % result foldl(collection c, result acc,
    265 %     result (*reduce)(result, element)) {
    266 %   iterator it = { c };
    267 %   while (hasNext(it)) {
    268 %     acc = reduce(acc, *it);
    269 %     ++it;
    270 %   }
    271 %   return acc;
    272 % }
    273 
    274 % Now foldl makes use of the knowledge that the iterator type has a
    275 % single argument constructor which takes the collection to iterate
    276 % over. This pattern allows polymorphic code to look more natural
    277 % (constructors are generally preferred to named initializer/creation
    278 % routines, e.g. "makeIterator")
    279 
    280 % [2] An example of some potentially dangerous code that we don't want
    281 % to let easily slip through the cracks - if this is really what you
    282 % want, then use @= syntax for the second declaration to quiet the
    283 % compiler.
    284 
    285 % struct A { int x, y, z; }
    286 % ?{}(A *, int);
    287 % ?{}(A *, int, int, int);
    288 
    289 % A a1 = { 1 };         // uses ?{}(A *, int);
    290 % A a2 = { 2, 3 };      // C-style initialization -> no invariants!
    291 % A a3 = { 4, 5, 6 };   // uses ?{}(A *, int, int, int);
    292 
    293 % [3] Since @= syntax creates a C object (essentially a POD, as far as
    294 % the compiler is concerned), the object will not be destructed
    295 % implicitly when it leaves scope, nor will it be copy constructed when
    296 % it is returned. In this case, a memcpy should be equivalent to a move.
    297 
    298 % // Box.h
    299 % struct Box;
    300 % void ?{}(Box **, int};
    301 % void ^?{}(Box **);
    302 % Box * make_fortytwo();
    303 
    304 % // Box.cfa
    305 % Box * make_fortytwo() {
    306 %   Box *b @= {};
    307 %   (&b){ 42 }; // construct explicitly
    308 %   return b; // no destruction, essentially a move?
    309 % }
    310 
    311 % [4] Cforall's typesafe malloc can be composed with constructor
    312 % expressions. It is possible for a user to define their own functions
    313 % similar to malloc and achieve the same effects (e.g. Aaron's example
    314 % of an arena allocator)
    315 
    316 % // CFA malloc
    317 % forall(type T)
    318 % T * malloc() { return (T *)malloc(sizeof(T)); }
    319 
    320 % struct A { int x, y, z; };
    321 % void ?{}(A *, int);
    322 
    323 % int foo(){
    324 %   ...
    325 %   // desugars to:
    326 %   // A * a = ?{}(malloc(), 123);
    327 %   A * a = malloc() { 123 };
    328 %   ...
    329 % }
    330 
    331 % [5] Aaron's example of combining function calls with constructor
    332 % syntax to perform an operation similar to C++'s std::vector::emplace
    333 % (i.e. to construct a new element in place, without the need to
    334 % copy)
    335 
    336 % forall(type T)
    337 % struct vector {
    338 %   T * elem;
    339 %   int len;
    340 %   ...
    341 % };
    342 
    343 % ...
    344 % forall(type T)
    345 % T * vector_new(vector(T) * v) {
    346 %   // reallocate if needed
    347 %   return &v->elem[len++];
    348 % }
    349 
    350 % int main() {
    351 %   vector(int) * v = ...
    352 %   vector_new(v){ 42 };  // add element to the end of vector
    353 % }
    354 
    355 % [6] Pointer Constructors. It could be useful to use the existing
    356 % constructor syntax even more uniformly for ADTs. With this, ADTs can
    357 % be initialized in the same manor as any other object in a polymorphic
    358 % function.
    359 
    360 % // vector.h
    361 % forall(type T) struct vector;
    362 % forall(type T) void ?{}(vector(T) **);
    363 % // adds an element to the end
    364 % forall(type T) vector(T) * ?+?(vector(T) *, T);
    365 
    366 % // vector.cfa
    367 % // don't want to expose the implementation to the user and/or don't
    368 % // want to recompile the entire program if the struct definition
    369 % // changes
    370 
    371 % forall(type T) struct vector {
    372 %   T * elem;
    373 %   int len;
    374 %   int capacity;
    375 % };
    376 
    377 % forall(type T) void resize(vector(T) ** v) { ... }
    378 
    379 % forall(type T) void ?{}(vector(T) ** v) {
    380 %   vector(T) * vect = *v = malloc();
    381 %   vect->capacity = 10;
    382 %   vect->len = 0;
    383 %   vect->elem = malloc(vect->capacity);
    384 % }
    385 
    386 % forall(type T) vector(T) * ?+?(vector(T) *v, T elem) {
    387 %   if (v->len == v->capacity) resize(&v);
    388 %   v->elem[v->len++] = elem;
    389 % }
    390 
    391 % // main.cfa
    392 % #include "adt.h"
    393 % forall(type T | { T ?+?(T, int); }
    394 % T sumRange(int lower, int upper) {
    395 %   T x;    // default construct
    396 %   for (int i = lower; i <= upper; i++) {
    397 %     x = x + i;
    398 %   }
    399 %   return x;
    400 % }
    401 
    402 % int main() {
    403 %   vector(int) * numbers = sumRange(1, 10);
    404 %   // numbers is now a vector containing [1..10]
    405 
    406 %   int sum = sumRange(1, 10);
    407 %   // sum is now an int containing the value 55
    408 % }
    409 
    410 % [7] The current proposal is to use the plan 9 model of inheritance.
    411 % Under this model, all of the members of an unnamed struct instance
    412 % become members of the containing struct. In addition, an object
    413 % can be passed as an argument to a function expecting one of its
    414 % base structs.
    415 
    416 % struct Point {
    417 %   double x;
    418 %   double y;
    419 % };
    420 
    421 % struct ColoredPoint {
    422 %   Point;        // anonymous member (no identifier)
    423 %                 // => a ColoredPoint has an x and y of type double
    424 %   int color;
    425 % };
    426 
    427 % ColoredPoint cp = ...;
    428 % cp.x = 10.3;    // x from Point is accessed directly
    429 % cp.color = 0x33aaff; // color is accessed normally
    430 % foo(cp);        // cp can be used directly as a Point
    431 
    432 % void ?{}(Point *p, double x, double y) {
    433 %   p->x = x;
    434 %   p->y = y;
    435 % }
    436 
    437 % void ?{}(ColoredPoint *cp, double x, double y, int color) {
    438 %   (&cp){ x, y };  // unambiguous, no ?{}(ColoredPoint*,double,double)
    439 %   cp->color = color;
    440 % }
    441 
    442 % struct Size {
    443 %   double width;
    444 %   double height;
    445 % };
    446 
    447 % void ?{}(Size *s, double w, double h) {
    448 %   p->width = w;
    449 %   p->height = h;
    450 % }
    451 
    452 % struct Foo {
    453 %   Point;
    454 %   Size;
    455 % }
    456 
    457 % ?{}(Foo &f, double x, double y, double w, double h) {
    458 %   // (&F,x,y) is ambiguous => is it ?{}(Point*,double,double) or
    459 %   // ?{}(Size*,double,double)? Solve with a cast:
    460 %   ((Point*)&F){ x, y };
    461 %   ((Size*)&F){ w, h };
    462 % }
    463 
    464 % [8] Destructors will be called on objects that were not constructed.
    465 
    466 % struct A { ... };
    467 % ^?{}(A *);
    468 % {
    469 %   A x;
    470 %   A y @= {};
    471 % } // x is destructed, even though it wasn't constructed
    472 %   // y is not destructed, because it is explicitly a C object
    473 
    474 
    475 % [9] A type's constructor is generated at declaration time using
    476 % current information about an object's members. This is analogous to
    477 % the treatment of other operators. For example, an object's assignment
    478 % operator will not change to call the override of a member's assignment
    479 % operator unless the object's assignment is also explicitly overridden.
    480 % This problem can potentially be treated differently in Do, since each
    481 % compilation unit is passed over at least twice (once to gather
    482 % symbol information, once to generate code - this is necessary to
    483 % achieve the "No declarations" goal)
    484 
    485 % struct A { ... };
    486 % struct B { A x; };
    487 % ...
    488 % void ?{}(A *);  // from this point on, A objects will be constructed
    489 % B b1;           // b1 and b1.x are both NOT constructed, because B
    490 %                 // objects are not constructed
    491 % void ?{}(B *);  // from this point on, B objects will be constructed
    492 % B b2;           // b2 and b2.x are both constructed
    493 
    494 % struct C { A x; };
    495 % // implicit definition of ?{}(C*), because C is not a POD type since
    496 % // it contains a non-POD type by composition
    497 % C c;            // c and c.x are both constructed
    498 
    499 % [10] Requiring construction by composition
    500 
    501 % struct A {
    502 %   ...
    503 % };
    504 
    505 % // declared ctor disables default c-style initialization of
    506 % // A objects; A is no longer a POD type
    507 % void ?{}(A *);
    508 
    509 % struct B {
    510 %   A x;
    511 % };
    512 
    513 % // B objects can not be C-style initialized, because A objects
    514 % // must be constructed => B objects are transitively not POD types
    515 % B b; // b.x must be constructed, but B is not constructible
    516 %      // => must autogenerate ?{}(B *) after struct B definition,
    517 %      // which calls ?{}(&b.x)
    518 
    519 % [11] Explosion in the number of generated constructors, due to strange
    520 % C semantics.
    521 
    522 % struct A { int x, y; };
    523 % struct B { A u, v, w; };
    524 
    525 % A a = { 0, 0 };
    526 
    527 % // in C, you are allowed to do this
    528 % B b1 = { 1, 2, 3, 4, 5, 6 };
    529 % B b2 = { 1, 2, 3 };
    530 % B b3 = { a, a, a };
    531 % B b4 = { a, 5, 4, a };
    532 % B b5 = { 1, 2, a, 3 };
    533 
    534 % // we want to disallow b1, b2, b4, and b5 in Cforall.
    535 % // In particular, we will autogenerate these constructors:
    536 % void ?{}(A *);             // default/0 parameters
    537 % void ?{}(A *, int);        // 1 parameter
    538 % void ?{}(A *, int, int);   // 2 parameters
    539 % void ?{}(A *, const A *);  // copy constructor
    540 
    541 % void ?{}(B *);             // default/0 parameters
    542 % void ?{}(B *, A);          // 1 parameter
    543 % void ?{}(B *, A, A);       // 2 parameters
    544 % void ?{}(B *, A, A, A);    // 3 parameters
    545 % void ?{}(B *, const B *);  // copy constructor
    546 
    547 % // we will not generate constructors for every valid combination
    548 % // of members in C. For example, we will not generate
    549 % void ?{}(B *, int, int, int, int, int, int);   // b1 would need this
    550 % void ?{}(B *, int, int, int);                  // b2 would need this
    551 % void ?{}(B *, A, int, int, A);                 // b4 would need this
    552 % void ?{}(B *, int, int, A, int);               // b5 would need this
    553 % // and so on
    554 
    555 
    556 
    557 % TODO: talk somewhere about compound literals?
    558 
    5598Since \CFA is a true systems language, it does not provide a garbage collector.
    560 As well, \CFA is not an object-oriented programming language, i.e. structures cannot have routine members.
     9As well, \CFA is not an object-oriented programming language, \ie, structures cannot have routine members.
    56110Nevertheless, one important goal is to reduce programming complexity and increase safety.
    56211To that end, \CFA provides support for implicit pre/post-execution of routines for objects, via constructors and destructors.
    56312
    564 % TODO: this is old. remove or refactor
    565 % Manual resource management is difficult.
    566 % Part of the difficulty results from not having any guarantees about the current state of an object.
    567 % Objects can be internally composed of pointers that may reference resources which may or may not need to be manually released, and keeping track of that state for each object can be difficult for the end user.
    568 
    569 % Constructors and destructors provide a mechanism to bookend the lifetime of an object, allowing the designer of a type to establish invariants for objects of that type.
    570 % Constructors guarantee that object initialization code is run before the object can be used, while destructors provide a mechanism that is guaranteed to be run immediately before an object's lifetime ends.
    571 % Constructors and destructors can help to simplify resource management when used in a disciplined way.
    572 % In particular, when all resources are acquired in a constructor, and all resources are released in a destructor, no resource leaks are possible.
    573 % This pattern is a popular idiom in several languages, such as \CC, known as RAII (Resource Acquisition Is Initialization).
    574 
    57513This chapter details the design of constructors and destructors in \CFA, along with their current implementation in the translator.
    576 Generated code samples have been edited to provide comments for clarity and to save on space.
     14Generated code samples have been edited for clarity and brevity.
    57715
    57816\section{Design Criteria}
     
    59230Next, @x@ is assigned the value of @y@.
    59331In the last line, @z@ is implicitly initialized to 0 since it is marked @static@.
    594 The key difference between assignment and initialization being that assignment occurs on a live object (i.e. an object that contains data).
     32The key difference between assignment and initialization being that assignment occurs on a live object (\ie, an object that contains data).
    59533It is important to note that this means @x@ could have been used uninitialized prior to being assigned, while @y@ could not be used uninitialized.
    596 Use of uninitialized variables yields undefined behaviour, which is a common source of errors in C programs. % TODO: *citation*
    597 
    598 Declaration initialization is insufficient, because it permits uninitialized variables to exist and because it does not allow for the insertion of arbitrary code before the variable is live.
    599 Many C compilers give good warnings most of the time, but they cannot in all cases.
    600 \begin{cfacode}
    601 int f(int *);  // never reads the parameter, only writes
    602 int g(int *);  // reads the parameter - expects an initialized variable
     34Use of uninitialized variables yields undefined behaviour, which is a common source of errors in C programs.
     35
     36Initialization of a declaration is strictly optional, permitting uninitialized variables to exist.
     37Furthermore, declaration initialization is limited to expressions, so there is no way to insert arbitrary code before a variable is live, without delaying the declaration.
     38Many C compilers give good warnings for uninitialized variables most of the time, but they cannot in all cases.
     39\begin{cfacode}
     40int f(int *);  // output parameter: never reads, only writes
     41int g(int *);  // input parameter: never writes, only reads,
     42               // so requires initialized variable
    60343
    60444int x, y;
    60545f(&x);  // okay - only writes to x
    606 g(&y);  // will use y uninitialized
    607 \end{cfacode}
    608 Other languages are able to give errors in the case of uninitialized variable use, but due to backwards compatibility concerns, this cannot be the case in \CFA.
    609 
    610 In C, constructors and destructors are often mimicked by providing routines that create and teardown objects, where the teardown function is typically only necessary if the type modifies the execution environment.
     46g(&y);  // uses y uninitialized
     47\end{cfacode}
     48Other languages are able to give errors in the case of uninitialized variable use, but due to backwards compatibility concerns, this is not the case in \CFA.
     49
     50In C, constructors and destructors are often mimicked by providing routines that create and tear down objects, where the tear down function is typically only necessary if the type modifies the execution environment.
    61151\begin{cfacode}
    61252struct array_int {
     
    61454};
    61555struct array_int create_array(int sz) {
    616   return (struct array_int) { malloc(sizeof(int)*sz) };
     56  return (struct array_int) { calloc(sizeof(int)*sz) };
    61757}
    61858void destroy_rh(struct resource_holder * rh) {
     
    63474Furthermore, even with this idiom it is easy to make mistakes, such as forgetting to destroy an object or destroying it multiple times.
    63575
    636 A constructor provides a way of ensuring that the necessary aspects of object initialization is performed, from setting up invariants to providing compile-time checks for appropriate initialization parameters.
     76A constructor provides a way of ensuring that the necessary aspects of object initialization is performed, from setting up invariants to providing compile- and run-time checks for appropriate initialization parameters.
    63777This goal is achieved through a guarantee that a constructor is called implicitly after every object is allocated from a type with associated constructors, as part of an object's definition.
    63878Since a constructor is called on every object of a managed type, it is impossible to forget to initialize such objects, as long as all constructors perform some sensible form of initialization.
    63979
    64080In \CFA, a constructor is a function with the name @?{}@.
     81Like other operators in \CFA, the name represents the syntax used to call the constructor, \eg, @struct S = { ... };@.
    64182Every constructor must have a return type of @void@ and at least one parameter, the first of which is colloquially referred to as the \emph{this} parameter, as in many object-oriented programming-languages (however, a programmer can give it an arbitrary name).
    64283The @this@ parameter must have a pointer type, whose base type is the type of object that the function constructs.
     
    65596
    65697In C, if the user creates an @Array@ object, the fields @data@ and @len@ are uninitialized, unless an explicit initializer list is present.
    657 It is the user's responsibility to remember to initialize both of the fields to sensible values.
     98It is the user's responsibility to remember to initialize both of the fields to sensible values, since there are no implicit checks for invalid values or reasonable defaults.
    65899In \CFA, the user can define a constructor to handle initialization of @Array@ objects.
    659100
     
    671112This constructor initializes @x@ so that its @length@ field has the value 10, and its @data@ field holds a pointer to a block of memory large enough to hold 10 @int@s, and sets the value of each element of the array to 0.
    672113This particular form of constructor is called the \emph{default constructor}, because it is called on an object defined without an initializer.
    673 In other words, a default constructor is a constructor that takes a single argument, the @this@ parameter.
    674 
    675 In \CFA, a destructor is a function much like a constructor, except that its name is \lstinline!^?{}!.
    676 A destructor for the @Array@ type can be defined as such.
     114In other words, a default constructor is a constructor that takes a single argument: the @this@ parameter.
     115
     116In \CFA, a destructor is a function much like a constructor, except that its name is \lstinline!^?{}! and it takes only one argument.
     117A destructor for the @Array@ type can be defined as:
    677118\begin{cfacode}
    678119void ^?{}(Array * arr) {
     
    680121}
    681122\end{cfacode}
    682 Since the destructor is automatically called at deallocation for all objects of type @Array@, the memory associated with an @Array@ is automatically freed when the object's lifetime ends.
     123The destructor is automatically called at deallocation for all objects of type @Array@.
     124Hence, the memory associated with an @Array@ is automatically freed when the object's lifetime ends.
    683125The exact guarantees made by \CFA with respect to the calling of destructors are discussed in section \ref{sub:implicit_dtor}.
    684126
     
    691133\end{cfacode}
    692134By the previous definition of the default constructor for @Array@, @x@ and @y@ are initialized to valid arrays of length 10 after their respective definitions.
    693 On line 3, @z@ is initialized with the value of @x@, while on line @4@, @y@ is assigned the value of @x@.
     135On line 2, @z@ is initialized with the value of @x@, while on line 3, @y@ is assigned the value of @x@.
    694136The key distinction between initialization and assignment is that a value to be initialized does not hold any meaningful values, whereas an object to be assigned might.
    695137In particular, these cases cannot be handled the same way because in the former case @z@ does not currently own an array, while @y@ does.
     
    712154The first function is called a \emph{copy constructor}, because it constructs its argument by copying the values from another object of the same type.
    713155The second function is the standard copy-assignment operator.
    714 These four functions are special in that they control the state of most objects.
     156The four functions (default constructor, destructor, copy constructor, and assignment operator) are special in that they safely control the state of most objects.
    715157
    716158It is possible to define a constructor that takes any combination of parameters to provide additional initialization options.
    717 For example, a reasonable extension to the array type would be a constructor that allocates the array to a given initial capacity and initializes the array to a given @fill@ value.
     159For example, a reasonable extension to the array type would be a constructor that allocates the array to a given initial capacity and initializes the elements of the array to a given @fill@ value.
    718160\begin{cfacode}
    719161void ?{}(Array * arr, int capacity, int fill) {
     
    725167}
    726168\end{cfacode}
     169
    727170In \CFA, constructors are called implicitly in initialization contexts.
    728171\begin{cfacode}
    729172Array x, y = { 20, 0xdeadbeef }, z = y;
    730173\end{cfacode}
    731 In \CFA, constructor calls look just like C initializers, which allows them to be inserted into legacy C code with minimal code changes, and also provides a very simple syntax that veteran C programmers are familiar with.
    732 One downside of reusing C initialization syntax is that it isn't possible to determine whether an object is constructed just by looking at its declaration, since that requires knowledge of whether the type is managed at that point.
     174Constructor calls look just like C initializers, which allows them to be inserted into legacy C code with minimal code changes, and also provides a very simple syntax that veteran C programmers are familiar with.
     175One downside of reusing C initialization syntax is that it is not possible to determine whether an object is constructed just by looking at its declaration, since that requires knowledge of whether the type is managed at that point in the program.
    733176
    734177This example generates the following code
     
    748191Destructors are implicitly called in reverse declaration-order so that objects with dependencies are destructed before the objects they are dependent on.
    749192
    750 \subsection{Syntax}
    751 \label{sub:syntax} % TODO: finish this section
     193\subsection{Calling Syntax}
     194\label{sub:syntax}
    752195There are several ways to construct an object in \CFA.
    753196As previously introduced, every variable is automatically constructed at its definition, which is the most natural way to construct an object.
     
    773216A * y = malloc();  // copy construct: ?{}(&y, malloc())
    774217
    775 ?{}(&x);    // explicit construct x
    776 ?{}(y, x);  // explit construct y from x
    777 ^?{}(&x);   // explicit destroy x
     218?{}(&x);    // explicit construct x, second construction
     219?{}(y, x);  // explit construct y from x, second construction
     220^?{}(&x);   // explicit destroy x, in different order
    778221^?{}(y);    // explicit destroy y
    779222
     
    781224// implicit ^?{}(&x);
    782225\end{cfacode}
    783 Calling a constructor or destructor directly is a flexible feature that allows complete control over the management of a piece of storage.
     226Calling a constructor or destructor directly is a flexible feature that allows complete control over the management of storage.
    784227In particular, constructors double as a placement syntax.
    785228\begin{cfacode}
     
    803246\end{cfacode}
    804247Finally, constructors and destructors support \emph{operator syntax}.
    805 Like other operators in \CFA, the function name mirrors the use-case, in that the first $N$ arguments fill in the place of the question mark.
     248Like other operators in \CFA, the function name mirrors the use-case, in that the question marks are placeholders for the first $N$ arguments.
     249This syntactic form is similar to the new initialization syntax in \CCeleven, except that it is used in expression contexts, rather than declaration contexts.
    806250\begin{cfacode}
    807251struct A { ... };
     
    822266Destructor operator syntax is actually an statement, and requires parentheses for symmetry with constructor syntax.
    823267
     268One of these three syntactic forms should appeal to either C or \CC programmers using \CFA.
     269
     270\subsection{Constructor Expressions}
     271In \CFA, it is possible to use a constructor as an expression.
     272Like other operators, the function name @?{}@ matches its operator syntax.
     273For example, @(&x){}@ calls the default constructor on the variable @x@, and produces @&x@ as a result.
     274A key example for this capability is the use of constructor expressions to initialize the result of a call to @malloc@.
     275\begin{cfacode}
     276struct X { ... };
     277void ?{}(X *, double);
     278X * x = malloc(){ 1.5 };
     279\end{cfacode}
     280In this example, @malloc@ dynamically allocates storage and initializes it using a constructor, all before assigning it into the variable @x@.
     281If this extension is not present, constructing dynamically allocated objects is much more cumbersome, requiring separate initialization of the pointer and initialization of the pointed-to memory.
     282\begin{cfacode}
     283X * x = malloc();
     284x{ 1.5 };
     285\end{cfacode}
     286Not only is this verbose, but it is also more error prone, since this form allows maintenance code to easily sneak in between the initialization of @x@ and the initialization of the memory that @x@ points to.
     287This feature is implemented via a transformation producing the value of the first argument of the constructor, since constructors do not themselves have a return value.
     288Since this transformation results in two instances of the subexpression, care is taken to allocate a temporary variable to hold the result of the subexpression in the case where the subexpression may contain side effects.
     289The previous example generates the following code.
     290\begin{cfacode}
     291struct X *_tmp_ctor;
     292struct X *x = ?{}(  // construct result of malloc
     293  _tmp_ctor=malloc_T( // store result of malloc
     294    sizeof(struct X),
     295    _Alignof(struct X)
     296  ),
     297  1.5
     298), _tmp_ctor; // produce constructed result of malloc
     299\end{cfacode}
     300It should be noted that this technique is not exclusive to @malloc@, and allows a user to write a custom allocator that can be idiomatically used in much the same way as a constructed @malloc@ call.
     301
     302It should be noted that while it is possible to use operator syntax with destructors, destructors invalidate their argument, thus operator syntax with destructors is a statement and does not produce a value.
     303
    824304\subsection{Function Generation}
    825 In \CFA, every type is defined to have the core set of four functions described previously.
     305In \CFA, every type is defined to have the core set of four special functions described previously.
    826306Having these functions exist for every type greatly simplifies the semantics of the language, since most operations can simply be defined directly in terms of function calls.
    827307In addition to simplifying the definition of the language, it also simplifies the analysis that the translator must perform.
     
    833313There are several options for user-defined types: structures, unions, and enumerations.
    834314To aid in ease of use, the standard set of four functions is automatically generated for a user-defined type after its definition is completed.
    835 By auto-generating these functions, it is ensured that legacy C code will continue to work correctly in every context where \CFA expects these functions to exist, since they are generated for every complete type.
     315By auto-generating these functions, it is ensured that legacy C code continues to work correctly in every context where \CFA expects these functions to exist, since they are generated for every complete type.
    836316
    837317The generated functions for enumerations are the simplest.
    838 Since enumerations in C are essentially just another integral type, the generated functions behave in the same way that the builtin functions for the basic types work.
    839 % TODO: examples for enums
     318Since enumerations in C are essentially just another integral type, the generated functions behave in the same way that the built-in functions for the basic types work.
    840319For example, given the enumeration
    841320\begin{cfacode}
     
    850329}
    851330void ?{}(enum Colour *_dst, enum Colour _src){
    852   (*_dst)=_src;  // bitwise copy
     331  *_dst=_src;  // bitwise copy
    853332}
    854333void ^?{}(enum Colour *_dst){
     
    856335}
    857336enum Colour ?=?(enum Colour *_dst, enum Colour _src){
    858   return (*_dst)=_src; // bitwise copy
     337  return *_dst=_src; // bitwise copy
    859338}
    860339\end{cfacode}
    861340In the future, \CFA will introduce strongly-typed enumerations, like those in \CC.
    862 The existing generated routines will be sufficient to express this restriction, since they are currently set up to take in values of that enumeration type.
     341The existing generated routines are sufficient to express this restriction, since they are currently set up to take in values of that enumeration type.
    863342Changes related to this feature only need to affect the expression resolution phase, where more strict rules will be applied to prevent implicit conversions from integral types to enumeration types, but should continue to permit conversions from enumeration types to @int@.
    864 In this way, it will still be possible to add an @int@ to an enumeration, but the resulting value will be an @int@, meaning that it won't be possible to reassign the value into an enumeration without a cast.
     343In this way, it is still possible to add an @int@ to an enumeration, but the resulting value is an @int@, meaning it cannot be reassigned to an enumeration without a cast.
    865344
    866345For structures, the situation is more complicated.
    867 For a structure @S@ with members @M$_0$@, @M$_1$@, ... @M$_{N-1}$@, each function @f@ in the standard set calls \lstinline{f(s->M$_i$, ...)} for each @$i$@.
    868 That is, a default constructor for @S@ default constructs the members of @S@, the copy constructor with copy construct them, and so on.
    869 For example given the struct definition
     346Given a structure @S@ with members @M$_0$@, @M$_1$@, ... @M$_{N-1}$@, each function @f@ in the standard set calls \lstinline{f(s->M$_i$, ...)} for each @$i$@.
     347That is, a default constructor for @S@ default constructs the members of @S@, the copy constructor copy constructs them, and so on.
     348For example, given the structure definition
    870349\begin{cfacode}
    871350struct A {
     
    893372}
    894373\end{cfacode}
    895 It is important to note that the destructors are called in reverse declaration order to resolve conflicts in the event there are dependencies among members.
     374It is important to note that the destructors are called in reverse declaration order to prevent conflicts in the event there are dependencies among members.
    896375
    897376In addition to the standard set, a set of \emph{field constructors} is also generated for structures.
    898 The field constructors are constructors that consume a prefix of the struct's member list.
     377The field constructors are constructors that consume a prefix of the structure's member-list.
    899378That is, $N$ constructors are built of the form @void ?{}(S *, T$_{\text{M}_0}$)@, @void ?{}(S *, T$_{\text{M}_0}$, T$_{\text{M}_1}$)@, ..., @void ?{}(S *, T$_{\text{M}_0}$, T$_{\text{M}_1}$, ..., T$_{\text{M}_{N-1}}$)@, where members are copy constructed if they have a corresponding positional argument and are default constructed otherwise.
    900 The addition of field constructors allows structs in \CFA to be used naturally in the same ways that they could be used in C (i.e. to initialize any prefix of the struct), e.g., @A a0 = { b }, a1 = { b, c }@.
     379The addition of field constructors allows structures in \CFA to be used naturally in the same ways as used in C (\ie, to initialize any prefix of the structure), \eg, @A a0 = { b }, a1 = { b, c }@.
    901380Extending the previous example, the following constructors are implicitly generated for @A@.
    902381\begin{cfacode}
     
    911390\end{cfacode}
    912391
    913 For unions, the default constructor and destructor do nothing, as it is not obvious which member if any should be constructed.
     392For unions, the default constructor and destructor do nothing, as it is not obvious which member, if any, should be constructed.
    914393For copy constructor and assignment operations, a bitwise @memcpy@ is applied.
    915394In standard C, a union can also be initialized using a value of the same type as its first member, and so a corresponding field constructor is generated to perform a bitwise @memcpy@ of the object.
    916 An alterantive to this design is to always construct and destruct the first member of a union, to match with the C semantics of initializing the first member of the union.
     395An alternative to this design is to always construct and destruct the first member of a union, to match with the C semantics of initializing the first member of the union.
    917396This approach ultimately feels subtle and unsafe.
    918397Another option is to, like \CC, disallow unions from containing members that are themselves managed types.
     
    947426
    948427% This feature works in the \CFA model, since constructors are simply special functions and can be called explicitly, unlike in \CC. % this sentence isn't really true => placement new
    949 In \CCeleven, this restriction has been loosened to allow unions with managed members, with the caveat that any if there are any members with a user-defined operation, then that operation is not implicitly defined, forcing the user to define the operation if necessary.
     428In \CCeleven, unions may have managed members, with the caveat that if there are any members with a user-defined operation, then that operation is not implicitly defined, forcing the user to define the operation if necessary.
    950429This restriction could easily be added into \CFA once \emph{deleted} functions are added.
    951430
    952431\subsection{Using Constructors and Destructors}
    953 Implicitly generated constructor and destructor calls ignore the outermost type qualifiers, e.g. @const@ and @volatile@, on a type by way of a cast on the first argument to the function.
     432Implicitly generated constructor and destructor calls ignore the outermost type qualifiers, \eg @const@ and @volatile@, on a type by way of a cast on the first argument to the function.
    954433For example,
    955434\begin{cfacode}
     
    970449Here, @&s@ and @&s2@ are cast to unqualified pointer types.
    971450This mechanism allows the same constructors and destructors to be used for qualified objects as for unqualified objects.
    972 Since this applies only to implicitly generated constructor calls, the language does not allow qualified objects to be re-initialized with a constructor without an explicit cast.
     451This rule applies only to implicitly generated constructor calls.
     452Hence, explicitly re-initializing qualified objects with a constructor requires an explicit cast.
     453
     454As discussed in Section \ref{sub:c_background}, compound literals create unnamed objects.
     455This mechanism can continue to be used seamlessly in \CFA with managed types to create temporary objects.
     456The object created by a compound literal is constructed using the provided brace-enclosed initializer-list, and is destructed at the end of the scope it is used in.
     457For example,
     458\begin{cfacode}
     459struct A { int x; };
     460void ?{}(A *, int, int);
     461{
     462  int x = (A){ 10, 20 }.x;
     463}
     464\end{cfacode}
     465is equivalent to
     466\begin{cfacode}
     467struct A { int x, y; };
     468void ?{}(A *, int, int);
     469{
     470  A _tmp;
     471  ?{}(&_tmp, 10, 20);
     472  int x = _tmp.x;
     473  ^?{}(&tmp);
     474}
     475\end{cfacode}
    973476
    974477Unlike \CC, \CFA provides an escape hatch that allows a user to decide at an object's definition whether it should be managed or not.
     
    984487A a2 @= { 0 };  // unmanaged
    985488\end{cfacode}
    986 In this example, @a1@ is a managed object, and thus is default constructed and destructed at the end of @a1@'s lifetime, while @a2@ is an unmanaged object and is not implicitly constructed or destructed.
    987 Instead, @a2->x@ is initialized to @0@ as if it were a C object, due to the explicit initializer.
    988 Existing constructors are ignored when \ateq is used, so that any valid C initializer is able to initialize the object.
    989 
    990 In addition to freedom, \ateq provides a simple path to migrating legacy C code to Cforall, in that objects can be moved from C-style initialization to \CFA gradually and individually.
     489In this example, @a1@ is a managed object, and thus is default constructed and destructed at the start/end of @a1@'s lifetime, while @a2@ is an unmanaged object and is not implicitly constructed or destructed.
     490Instead, @a2->x@ is initialized to @0@ as if it were a C object, because of the explicit initializer.
     491
     492In addition to freedom, \ateq provides a simple path for migrating legacy C code to \CFA, in that objects can be moved from C-style initialization to \CFA gradually and individually.
    991493It is worth noting that the use of unmanaged objects can be tricky to get right, since there is no guarantee that the proper invariants are established on an unmanaged object.
    992494It is recommended that most objects be managed by sensible constructors and destructors, except where absolutely necessary.
    993495
    994 When the user declares any constructor or destructor, the corresponding intrinsic/generated function and all field constructors for that type are hidden, so that they will not be found during expression resolution unless the user-defined function goes out of scope.
    995 Furthermore, if the user declares any constructor, then the intrinsic/generated default constructor is also hidden, making it so that objects of a type may not be default constructable.
    996 This closely mirrors the rule for implicit declaration of constructors in \CC, wherein the default constructor is implicitly declared if there is no user-declared constructor. % TODO: cite C++98 page 186??
     496When a user declares any constructor or destructor, the corresponding intrinsic/generated function and all field constructors for that type are hidden, so that they are not found during expression resolution until the user-defined function goes out of scope.
     497Furthermore, if the user declares any constructor, then the intrinsic/generated default constructor is also hidden, precluding default construction.
     498These semantics closely mirror the rule for implicit declaration of constructors in \CC, wherein the default constructor is implicitly declared if there is no user-declared constructor \cite[p.~186]{ANSI98:C++}.
    997499\begin{cfacode}
    998500struct S { int x, y; };
     
    1001503  S s0, s1 = { 0 }, s2 = { 0, 2 }, s3 = s2;  // okay
    1002504  {
    1003     void ?{}(S * s, int i) { s->x = i*2; }
    1004     S s4;  // error
    1005     S s5 = { 3 };  // okay
    1006     S s6 = { 4, 5 };  // error
     505    void ?{}(S * s, int i) { s->x = i*2; } // locally hide autogen ctors
     506    S s4;  // error, no default constructor
     507    S s5 = { 3 };  // okay, local constructor
     508    S s6 = { 4, 5 };  // error, no field constructor
    1007509    S s7 = s5; // okay
    1008510  }
     
    1012514In this example, the inner scope declares a constructor from @int@ to @S@, which hides the default constructor and field constructors until the end of the scope.
    1013515
    1014 When defining a constructor or destructor for a struct @S@, any members that are not explicitly constructed or destructed are implicitly constructed or destructed automatically.
     516When defining a constructor or destructor for a structure @S@, any members that are not explicitly constructed or destructed are implicitly constructed or destructed automatically.
    1015517If an explicit call is present, then that call is taken in preference to any implicitly generated call.
    1016 A consequence of this rule is that it is possible, unlike \CC, to precisely control the order of construction and destruction of subobjects on a per-constructor basis, whereas in \CC subobject initialization and destruction is always performed based on the declaration order.
     518A consequence of this rule is that it is possible, unlike \CC, to precisely control the order of construction and destruction of sub-objects on a per-constructor basis, whereas in \CC sub-object initialization and destruction is always performed based on the declaration order.
    1017519\begin{cfacode}
    1018520struct A {
     
    1033535}
    1034536\end{cfacode}
    1035 Finally, it is illegal for a subobject to be explicitly constructed for the first time after it is used for the first time.
     537Finally, it is illegal for a sub-object to be explicitly constructed for the first time after it is used for the first time.
    1036538If the translator cannot be reasonably sure that an object is constructed prior to its first use, but is constructed afterward, an error is emitted.
    1037 More specifically, the translator searches the body of a constructor to ensure that every subobject is initialized.
     539More specifically, the translator searches the body of a constructor to ensure that every sub-object is initialized.
    1038540\begin{cfacode}
    1039541void ?{}(A * a, double x) {
     
    1042544}
    1043545\end{cfacode}
    1044 However, if the translator sees a subobject used within the body of a constructor, but does not see a constructor call that uses the subobject as the target of a constructor, then the translator assumes the object is to be implicitly constructed (copy constructed in a copy constructor and default constructed in any other constructor).
     546However, if the translator sees a sub-object used within the body of a constructor, but does not see a constructor call that uses the sub-object as the target of a constructor, then the translator assumes the object is to be implicitly constructed (copy constructed in a copy constructor and default constructed in any other constructor).
    1045547\begin{cfacode}
    1046548void ?{}(A * a) {
     
    1058560} // z, y, w implicitly destructed, in this order
    1059561\end{cfacode}
    1060 If at any point, the @this@ parameter is passed directly as the target of another constructor, then it is assumed that constructor handles the initialization of all of the object's members and no implicit constructor calls are added. % TODO: confirm that this is correct. It might be possible to get subtle errors if you initialize some members then call another constructor... -- in fact, this is basically always wrong. if anything, I should check that such a constructor does not initialize any members, otherwise it'll always initialize the member twice (once locally, once by the called constructor).
     562If at any point, the @this@ parameter is passed directly as the target of another constructor, then it is assumed that constructor handles the initialization of all of the object's members and no implicit constructor calls are added.
    1061563To override this rule, \ateq can be used to force the translator to trust the programmer's discretion.
    1062564This form of \ateq is not yet implemented.
     
    1064566Despite great effort, some forms of C syntax do not work well with constructors in \CFA.
    1065567In particular, constructor calls cannot contain designations (see \ref{sub:c_background}), since this is equivalent to allowing designations on the arguments to arbitrary function calls.
    1066 In C, function prototypes are permitted to have arbitrary parameter names, including no names at all, which may have no connection to the actual names used at function definition.
    1067 Furthermore, a function prototype can be repeated an arbitrary number of times, each time using different names.
    1068568\begin{cfacode}
    1069569// all legal forward declarations in C
     
    1076576f(b:10, a:20, c:30);  // which parameter is which?
    1077577\end{cfacode}
     578In C, function prototypes are permitted to have arbitrary parameter names, including no names at all, which may have no connection to the actual names used at function definition.
     579Furthermore, a function prototype can be repeated an arbitrary number of times, each time using different names.
    1078580As a result, it was decided that any attempt to resolve designated function calls with C's function prototype rules would be brittle, and thus it is not sensible to allow designations in constructor calls.
    1079 % Many other languages do allow named arguments, such as Python and Scala, but they do not allow multiple arbitrarily named forward declarations of a function.
    1080 
    1081 In addition, constructor calls cannot have a nesting depth greater than the number of array components in the type of the initialized object, plus one.
     581
     582\begin{sloppypar}
     583In addition, constructor calls do not support unnamed nesting.
     584\begin{cfacode}
     585struct B { int x; };
     586struct C { int y; };
     587struct A { B b; C c; };
     588void ?{}(A *, B);
     589void ?{}(A *, C);
     590
     591A a = {
     592  { 10 },  // construct B? - invalid
     593};
     594\end{cfacode}
     595In C, nesting initializers means that the programmer intends to initialize sub-objects with the nested initializers.
     596The reason for this omission is to both simplify the mental model for using constructors, and to make initialization simpler for the expression resolver.
     597If this were allowed, it would be necessary for the expression resolver to decide whether each argument to the constructor call could initialize to some argument in one of the available constructors, making the problem highly recursive and potentially much more expensive.
     598That is, in the previous example the line marked as an error could mean construct using @?{}(A *, B)@ or with @?{}(A *, C)@, since the inner initializer @{ 10 }@ could be taken as an intermediate object of type @B@ or @C@.
     599In practice, however, there could be many objects that can be constructed from a given @int@ (or, indeed, any arbitrary parameter list), and thus a complete solution to this problem would require fully exploring all possibilities.
     600\end{sloppypar}
     601
     602More precisely, constructor calls cannot have a nesting depth greater than the number of array dimensions in the type of the initialized object, plus one.
    1082603For example,
    1083604\begin{cfacode}
     
    1091612  { {14 }, { 15 } }   // a2[1]
    1092613};
    1093 A a3[4] = {
    1094   { { 11 }, { 12 } },  // error
     614A a3[4] = { // 1 dimension => max depth 2
     615  { { 11 }, { 12 } },  // error, three levels deep
    1095616  { 80 }, { 90 }, { 100 }
    1096617}
    1097618\end{cfacode}
    1098 % TODO: in CFA if the array dimension is empty, no object constructors are added -- need to fix this.
    1099619The body of @A@ has been omitted, since only the constructor interfaces are important.
    1100 In C, having a greater nesting depth means that the programmer intends to initialize subobjects with the nested initializer.
    1101 The reason for this omission is to both simplify the mental model for using constructors, and to make initialization simpler for the expression resolver.
    1102 If this were allowed, it would be necessary for the expression resolver to decide whether each argument to the constructor call could initialize to some argument in one of the available constructors, making the problem highly recursive and potentially much more expensive.
    1103 That is, in the previous example the line marked as an error could mean construct using @?{}(A *, A, A)@, since the inner initializer @{ 11 }@ could be taken as an intermediate object of type @A@ constructed with @?{}(A *, int)@.
    1104 In practice, however, there could be many objects that can be constructed from a given @int@ (or, indeed, any arbitrary parameter list), and thus a complete solution to this problem would require fully exploring all possibilities.
     620
    1105621It should be noted that unmanaged objects can still make use of designations and nested initializers in \CFA.
     622It is simple to overcome this limitation for managed objects by making use of compound literals, so that the arguments to the constructor call are explicitly typed.
    1106623
    1107624\subsection{Implicit Destructors}
    1108625\label{sub:implicit_dtor}
    1109626Destructors are automatically called at the end of the block in which the object is declared.
    1110 In addition to this, destructors are automatically called when statements manipulate control flow to leave a block in which the object is declared, e.g., with return, break, continue, and goto statements.
     627In addition to this, destructors are automatically called when statements manipulate control flow to leave a block in which the object is declared, \eg, with return, break, continue, and goto statements.
    1111628The example below demonstrates a simple routine with multiple return statements.
    1112629\begin{cfacode}
     
    1127644    if (i == 2) return; // destruct x, y
    1128645  } // destruct y
    1129 }
    1130 \end{cfacode}
    1131 
    1132 %% having this feels excessive, but it's here if necessary
    1133 % This procedure generates the following code.
    1134 % \begin{cfacode}
    1135 % void f(int i){
    1136 %   struct A x;
    1137 %   ?{}(&x);
    1138 %   {
    1139 %     struct A y;
    1140 %     ?{}(&y);
    1141 %     {
    1142 %       struct A z;
    1143 %       ?{}(&z);
    1144 %       {
    1145 %         if ((i==0)!=0) {
    1146 %           ^?{}(&z);
    1147 %           ^?{}(&y);
    1148 %           ^?{}(&x);
    1149 %           return;
    1150 %         }
    1151 %       }
    1152 %       if (((i==1)!=0) {
    1153 %           ^?{}(&z);
    1154 %           ^?{}(&y);
    1155 %           ^?{}(&x);
    1156 %           return ;
    1157 %       }
    1158 %       ^?{}(&z);
    1159 %     }
    1160 
    1161 %     if ((i==2)!=0) {
    1162 %       ^?{}(&y);
    1163 %       ^?{}(&x);
    1164 %       return;
    1165 %     }
    1166 %     ^?{}(&y);
    1167 %   }
    1168 
    1169 %   ^?{}(&x);
    1170 % }
    1171 % \end{cfacode}
     646} // destruct x
     647\end{cfacode}
    1172648
    1173649The next example illustrates the use of simple continue and break statements and the manner that they interact with implicit destructors.
     
    1183659\end{cfacode}
    1184660Since a destructor call is automatically inserted at the end of the block, nothing special needs to happen to destruct @x@ in the case where control reaches the end of the loop.
    1185 In the case where @i@ is @2@, the continue statement runs the loop update expression and attemps to begin the next iteration of the loop.
    1186 Since continue is a C statement, which does not understand destructors, a destructor call is added just before the continue statement to ensure that @x@ is destructed.
     661In the case where @i@ is @2@, the continue statement runs the loop update expression and attempts to begin the next iteration of the loop.
     662Since continue is a C statement, which does not understand destructors, it is transformed into a @goto@ statement that branches to the end of the loop, just before the block's destructors, to ensure that @x@ is destructed.
    1187663When @i@ is @3@, the break statement moves control to just past the end of the loop.
    1188 Like the previous case, a destructor call for @x@ is inserted just before the break statement.
    1189 
    1190 \CFA also supports labelled break and continue statements, which allow more precise manipulation of control flow.
    1191 Labelled break and continue allow the programmer to specify which control structure to target by using a label attached to a control structure.
     664Unlike the previous case, the destructor for @x@ cannot be reused, so a destructor call for @x@ is inserted just before the break statement.
     665
     666\CFA also supports labeled break and continue statements, which allow more precise manipulation of control flow.
     667Labeled break and continue allow the programmer to specify which control structure to target by using a label attached to a control structure.
    1192668\begin{cfacode}[emph={L1,L2}, emphstyle=\color{red}]
    1193669L1: for (int i = 0; i < 10; i++) {
    1194670  A x;
    1195   L2: for (int j = 0; j < 10; j++) {
     671  for (int j = 0; j < 10; j++) {
    1196672    A y;
    1197     if (j == 0) {
    1198       continue;    // destruct y
    1199     } else if (j == 1) {
    1200       break;       // destruct y
    1201     } else if (i == 1) {
     673    if (i == 1) {
    1202674      continue L1; // destruct y
    1203675    } else if (i == 2) {
     
    1208680\end{cfacode}
    1209681The statement @continue L1@ begins the next iteration of the outer for-loop.
    1210 Since the semantics of continue require the loop update expression to execute, control branches to the \emph{end} of the outer for loop, meaning that the block destructor for @x@ can be reused, and it is only necessary to generate the destructor for @y@.
    1211 Break, on the other hand, requires jumping out of the loop, so the destructors for both @x@ and @y@ are generated and inserted before the @break L1@ statement.
     682Since the semantics of continue require the loop update expression to execute, control branches to the end of the outer for loop, meaning that the block destructor for @x@ can be reused, and it is only necessary to generate the destructor for @y@.
     683Break, on the other hand, requires jumping out of both loops, so the destructors for both @x@ and @y@ are generated and inserted before the @break L1@ statement.
    1212684
    1213685Finally, an example which demonstrates goto.
     
    1256728}
    1257729\end{cfacode}
    1258 Labelled break and continue are implemented in \CFA in terms of goto statements, so the more constrained forms are precisely goverened by these rules.
     730All break and continue statements are implemented in \CFA in terms of goto statements, so the more constrained forms are precisely governed by these rules.
    1259731
    1260732The next example demonstrates the error case.
     
    1273745
    1274746\subsection{Implicit Copy Construction}
     747\label{s:implicit_copy_construction}
    1275748When a function is called, the arguments supplied to the call are subject to implicit copy construction (and destruction of the generated temporary), and the return value is subject to destruction.
    1276749When a value is returned from a function, the copy constructor is called to pass the value back to the call site.
    1277 Exempt from these rules are intrinsic and builtin functions.
     750Exempt from these rules are intrinsic and built-in functions.
    1278751It should be noted that unmanaged objects are subject to copy constructor calls when passed as arguments to a function or when returned from a function, since they are not the \emph{target} of the copy constructor call.
    1279 This is an important detail to bear in mind when using unmanaged objects, and could produce unexpected results when mixed with objects that are explicitly constructed.
     752That is, since the parameter is not marked as an unmanaged object using \ateq, it is be copy constructed if it is returned by value or passed as an argument to another function, so to guarantee consistent behaviour, unmanaged objects must be copy constructed when passed as arguments.
     753These semantics are important to bear in mind when using unmanaged objects, and could produce unexpected results when mixed with objects that are explicitly constructed.
    1280754\begin{cfacode}
    1281755struct A;
     
    1284758void ^?{}(A *);
    1285759
    1286 A f(A x) {
    1287   return x;
     760A identity(A x) { // pass by value => need local copy
     761  return x;       // return by value => make call-site copy
    1288762}
    1289763
    1290764A y, z @= {};
    1291 identity(y);
    1292 identity(z);
    1293 \end{cfacode}
    1294 Note that @z@ is copy constructed into a temporary variable to be passed as an argument, which is also destructed after the call.
    1295 A special syntactic form, such as a variant of \ateq, could be implemented to specify at the call site that an argument should not be copy constructed, to regain some control for the C programmer.
     765identity(y);  // copy construct y into x
     766identity(z);  // copy construct z into x
     767\end{cfacode}
     768Note that unmanaged argument @z@ is logically copy constructed into managed parameter @x@; however, the translator must copy construct into a temporary variable to be passed as an argument, which is also destructed after the call.
     769A compiler could by-pass the argument temporaries since it is in control of the calling conventions and knows exactly where the called-function's parameters live.
    1296770
    1297771This generates the following
    1298772\begin{cfacode}
    1299773struct A f(struct A x){
    1300   struct A _retval_f;
    1301   ?{}((&_retval_f), x);
     774  struct A _retval_f;    // return value
     775  ?{}((&_retval_f), x);  // copy construct return value
    1302776  return _retval_f;
    1303777}
    1304778
    1305779struct A y;
    1306 ?{}(&y);
    1307 struct A z = { 0 };
    1308 
    1309 struct A _tmp_cp1;     // argument 1
    1310 struct A _tmp_cp_ret0; // return value
    1311 _tmp_cp_ret0=f((?{}(&_tmp_cp1, y) , _tmp_cp1)), _tmp_cp_ret0;
    1312 ^?{}(&_tmp_cp_ret0);   // return value
    1313 ^?{}(&_tmp_cp1);       // argument 1
    1314 
    1315 struct A _tmp_cp2;     // argument 1
    1316 struct A _tmp_cp_ret1; // return value
    1317 _tmp_cp_ret1=f((?{}(&_tmp_cp2, z), _tmp_cp2)), _tmp_cp_ret1;
    1318 ^?{}(&_tmp_cp_ret1);   // return value
    1319 ^?{}(&_tmp_cp2);       // argument 1
     780?{}(&y);                 // default construct
     781struct A z = { 0 };      // C default
     782
     783struct A _tmp_cp1;       // argument 1
     784struct A _tmp_cp_ret0;   // return value
     785_tmp_cp_ret0=f(
     786  (?{}(&_tmp_cp1, y) , _tmp_cp1)  // argument is a comma expression
     787), _tmp_cp_ret0;         // return value for cascading
     788^?{}(&_tmp_cp_ret0);     // destruct return value
     789^?{}(&_tmp_cp1);         // destruct argument 1
     790
     791struct A _tmp_cp2;       // argument 1
     792struct A _tmp_cp_ret1;   // return value
     793_tmp_cp_ret1=f(
     794  (?{}(&_tmp_cp2, z), _tmp_cp2)  // argument is a common expression
     795), _tmp_cp_ret1;         // return value for cascading
     796^?{}(&_tmp_cp_ret1);     // destruct return value
     797^?{}(&_tmp_cp2);         // destruct argument 1
    1320798^?{}(&y);
    1321799\end{cfacode}
    1322800
    1323 A known issue with this implementation is that the return value of a function is not guaranteed to have the same address for its entire lifetime.
    1324 Specifically, since @_retval_f@ is allocated and constructed in @f@ then returned by value, the internal data is bitwise copied into the caller's stack frame.
     801A special syntactic form, such as a variant of \ateq, can be implemented to specify at the call site that an argument should not be copy constructed, to regain some control for the C programmer.
     802\begin{cfacode}
     803identity(z@);  // do not copy construct argument
     804               // - will copy construct/destruct return value
     805A@ identity_nocopy(A @ x) {  // argument not copy constructed or destructed
     806  return x;  // not copy constructed
     807             // return type marked @ => not destructed
     808}
     809\end{cfacode}
     810It should be noted that reference types will allow specifying that a value does not need to be copied, however reference types do not provide a means of preventing implicit copy construction from uses of the reference, so the problem is still present when passing or returning the reference by value.
     811
     812A known issue with this implementation is that the argument and return value temporaries are not guaranteed to have the same address for their entire lifetimes.
     813In the previous example, since @_retval_f@ is allocated and constructed in @f@, then returned by value, the internal data is bitwise copied into the caller's stack frame.
    1325814This approach works out most of the time, because typically destructors need to only access the fields of the object and recursively destroy.
    1326 It is currently the case that constructors and destructors which use the @this@ pointer as a unique identifier to store data externally will not work correctly for return value objects.
    1327 Thus is it not safe to rely on an object's @this@ pointer to remain constant throughout execution of the program.
     815It is currently the case that constructors and destructors that use the @this@ pointer as a unique identifier to store data externally do not work correctly for return value objects.
     816Thus, it is currently not safe to rely on an object's @this@ pointer to remain constant throughout execution of the program.
    1328817\begin{cfacode}
    1329818A * external_data[32];
     
    1341830  }
    1342831}
     832
     833A makeA() {
     834  A x;  // stores &x in external_data
     835  return x;
     836}
     837makeA();  // return temporary has a different address than x
     838// equivalent to:
     839//   A _tmp;
     840//   _tmp = makeA(), _tmp;
     841//   ^?{}(&_tmp);
    1343842\end{cfacode}
    1344843In the above example, a global array of pointers is used to keep track of all of the allocated @A@ objects.
    1345 Due to copying on return, the current object being destructed will not exist in the array if an @A@ object is ever returned by value from a function.
    1346 
    1347 This problem could be solved in the translator by mutating the function signatures so that the return value is moved into the parameter list.
     844Due to copying on return, the current object being destructed does not exist in the array if an @A@ object is ever returned by value from a function, such as in @makeA@.
     845
     846This problem could be solved in the translator by changing the function signatures so that the return value is moved into the parameter list.
    1348847For example, the translator could restructure the code like so
    1349848\begin{cfacode}
     
    1363862\end{cfacode}
    1364863This transformation provides @f@ with the address of the return variable so that it can be constructed into directly.
    1365 It is worth pointing out that this kind of signature rewriting already occurs in polymorphic functions which return by value, as discussed in \cite{Bilson03}.
    1366 A key difference in this case is that every function would need to be rewritten like this, since types can switch between managed and unmanaged at different scope levels, e.g.
     864It is worth pointing out that this kind of signature rewriting already occurs in polymorphic functions that return by value, as discussed in \cite{Bilson03}.
     865A key difference in this case is that every function would need to be rewritten like this, since types can switch between managed and unmanaged at different scope levels, \eg
    1367866\begin{cfacode}
    1368867struct A { int v; };
    1369 A x; // unmanaged
     868A x; // unmanaged, since only trivial constructors are available
    1370869{
    1371870  void ?{}(A * a) { ... }
     
    1375874A z; // unmanaged
    1376875\end{cfacode}
    1377 Hence there is not enough information to determine at function declaration to determine whether a type is managed or not, and thus it is the case that all signatures have to be rewritten to account for possible copy constructor and destructor calls.
     876Hence there is not enough information to determine at function declaration whether a type is managed or not, and thus it is the case that all signatures have to be rewritten to account for possible copy constructor and destructor calls.
    1378877Even with this change, it would still be possible to declare backwards compatible function prototypes with an @extern "C"@ block, which allows for the definition of C-compatible functions within \CFA code, however this would require actual changes to the way code inside of an @extern "C"@ function is generated as compared with normal code generation.
    1379 Furthermore, it isn't possible to overload C functions, so using @extern "C"@ to declare functions is of limited use.
    1380 
    1381 It would be possible to regain some control by adding an attribute to structs which specifies whether they can be managed or not (perhaps \emph{manageable} or \emph{unmanageable}), and to emit an error in the case that a constructor or destructor is declared for an unmanageable type.
    1382 Ideally, structs should be manageable by default, since otherwise the default case becomes more verbose.
     878Furthermore, it is not possible to overload C functions, so using @extern "C"@ to declare functions is of limited use.
     879
     880It would be possible to regain some control by adding an attribute to structures that specifies whether they can be managed or not (perhaps \emph{manageable} or \emph{unmanageable}), and to emit an error in the case that a constructor or destructor is declared for an unmanageable type.
     881Ideally, structures should be manageable by default, since otherwise the default case becomes more verbose.
    1383882This means that in general, function signatures would have to be rewritten, and in a select few cases the signatures would not be rewritten.
    1384883\begin{cfacode}
    1385 __attribute__((manageable)) struct A { ... };   // can declare constructors
    1386 __attribute__((unmanageable)) struct B { ... }; // cannot declare constructors
    1387 struct C { ... };                               // can declare constructors
     884__attribute__((manageable)) struct A { ... };   // can declare ctors
     885__attribute__((unmanageable)) struct B { ... }; // cannot declare ctors
     886struct C { ... };                               // can declare ctors
    1388887
    1389888A f();  // rewritten void f(A *);
     
    1391890C h();  // rewritten void h(C *);
    1392891\end{cfacode}
    1393 An alternative is to instead make the attribute \emph{identifiable}, which states that objects of this type use the @this@ parameter as an identity.
    1394 This strikes more closely to the visibile problem, in that only types marked as identifiable would need to have the return value moved into the parameter list, and every other type could remain the same.
     892An alternative is to make the attribute \emph{identifiable}, which states that objects of this type use the @this@ parameter as an identity.
     893This strikes more closely to the visible problem, in that only types marked as identifiable would need to have the return value moved into the parameter list, and every other type could remain the same.
    1395894Furthermore, no restrictions would need to be placed on whether objects can be constructed.
    1396895\begin{cfacode}
    1397 __attribute__((identifiable)) struct A { ... };  // can declare constructors
    1398 struct B { ... };                                // can declare constructors
     896__attribute__((identifiable)) struct A { ... };  // can declare ctors
     897struct B { ... };                                // can declare ctors
    1399898
    1400899A f();  // rewritten void f(A *);
     
    1402901\end{cfacode}
    1403902
    1404 Ultimately, this is the type of transformation that a real compiler would make when generating assembly code.
    1405 Since a compiler has full control over its calling conventions, it can seamlessly allow passing the return parameter without outwardly changing the signature of a routine.
    1406 As such, it has been decided that this issue is not currently a priority.
     903Ultimately, both of these are patchwork solutions.
     904Since a real compiler has full control over its calling conventions, it can seamlessly allow passing the return parameter without outwardly changing the signature of a routine.
     905As such, it has been decided that this issue is not currently a priority and will be fixed when a full \CFA compiler is implemented.
    1407906
    1408907\section{Implementation}
    1409908\subsection{Array Initialization}
    1410 Arrays are a special case in the C type system.
     909Arrays are a special case in the C type-system.
    1411910C arrays do not carry around their size, making it impossible to write a standalone \CFA function that constructs or destructs an array while maintaining the standard interface for constructors and destructors.
    1412911Instead, \CFA defines the initialization and destruction of an array recursively.
     
    15201019
    15211020\subsection{Global Initialization}
    1522 In standard C, global variables can only be initialized to compile-time constant expressions.
    1523 This places strict limitations on the programmer's ability to control the default values of objects.
     1021In standard C, global variables can only be initialized to compile-time constant expressions, which places strict limitations on the programmer's ability to control the default values of objects.
    15241022In \CFA, constructors and destructors are guaranteed to be run on global objects, allowing arbitrary code to be run before and after the execution of the main routine.
    15251023By default, objects within a translation unit are constructed in declaration order, and destructed in the reverse order.
    15261024The default order of construction of objects amongst translation units is unspecified.
    1527 % TODO: not yet implemented, but g++ provides attribute init_priority, which allows specifying the order of global construction on a per object basis
    1528 %   https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes
    1529 % suggestion: implement this in CFA by picking objects with a specified priority and pulling them into their own init functions (could even group them by priority level -> map<int, list<ObjectDecl*>>) and pull init_priority forward into constructor and destructor attributes with the same priority level
    1530 It is, however, guaranteed that any global objects in the standard library are initialized prior to the initialization of any object in the user program.
    1531 
    1532 This feature is implemented in the \CFA translator by grouping every global constructor call into a function with the GCC attribute \emph{constructor}, which performs most of the heavy lifting. % CITE: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
     1025It is, however, guaranteed that any global objects in the standard library are initialized prior to the initialization of any object in a user program.
     1026
     1027This feature is implemented in the \CFA translator by grouping every global constructor call into a function with the GCC attribute \emph{constructor}, which performs most of the heavy lifting \cite[6.31.1]{GCCExtensions}.
    15331028A similar function is generated with the \emph{destructor} attribute, which handles all global destructor calls.
    15341029At the time of writing, initialization routines in the library are specified with priority \emph{101}, which is the highest priority level that GCC allows, whereas initialization routines in the user's code are implicitly given the default priority level, which ensures they have a lower priority than any code with a specified priority level.
    1535 This mechanism allows arbitrarily complicated initialization to occur before any user code runs, making it possible for library designers to initialize their modules without requiring the user to call specific startup or teardown routines.
     1030This mechanism allows arbitrarily complicated initialization to occur before any user code runs, making it possible for library designers to initialize their modules without requiring the user to call specific startup or tear-down routines.
    15361031
    15371032For example, given the following global declarations.
     
    15591054\end{cfacode}
    15601055
     1056%   https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes
     1057% suggestion: implement this in CFA by picking objects with a specified priority and pulling them into their own init functions (could even group them by priority level -> map<int, list<ObjectDecl*>>) and pull init_priority forward into constructor and destructor attributes with the same priority level
     1058GCC provides an attribute @init_priority@ in \CC, which allows specifying the relative priority for initialization of global objects on a per-object basis.
     1059A similar attribute can be implemented in \CFA by pulling marked objects into global constructor/destructor-attribute functions with the specified priority.
     1060For example,
     1061\begin{cfacode}
     1062struct A { ... };
     1063void ?{}(A *, int);
     1064void ^?{}(A *);
     1065__attribute__((init_priority(200))) A x = { 123 };
     1066\end{cfacode}
     1067would generate
     1068\begin{cfacode}
     1069A x;
     1070__attribute__((constructor(200))) __init_x() {
     1071  ?{}(&x, 123);  // construct x with priority 200
     1072}
     1073__attribute__((destructor(200))) __destroy_x() {
     1074  ?{}(&x);       // destruct x with priority 200
     1075}
     1076\end{cfacode}
     1077
    15611078\subsection{Static Local Variables}
    15621079In standard C, it is possible to mark variables that are local to a function with the @static@ storage class.
    1563 Unlike normal local variables, a @static@ local variable is defined to live for the entire duration of the program, so that each call to the function has access to the same variable with the same address and value as it had in the previous call to the function. % TODO: mention dynamic loading caveat??
    1564 Much like global variables, in C @static@ variables must be initialized to a \emph{compile-time constant value} so that a compiler is able to create storage for the variable and initialize it before the program begins running.
     1080Unlike normal local variables, a @static@ local variable is defined to live for the entire duration of the program, so that each call to the function has access to the same variable with the same address and value as it had in the previous call to the function.
     1081Much like global variables, @static@ variables can only be initialized to a \emph{compile-time constant value} so that a compiler is able to create storage for the variable and initialize it at compile-time.
    15651082
    15661083Yet again, this rule is too restrictive for a language with constructors and destructors.
    1567 Instead, \CFA modifies the definition of a @static@ local variable so that objects are guaranteed to be live from the time control flow reaches their declaration, until the end of the program, since the initializer expression is not necessarily a compile-time constant, but can depend on the current execution state of the function.
    1568 Since standard C does not allow access to a @static@ local variable before the first time control flow reaches the declaration, this restriction does not preclude any valid C code.
     1084Since the initializer expression is not necessarily a compile-time constant and can depend on the current execution state of the function, \CFA modifies the definition of a @static@ local variable so that objects are guaranteed to be live from the time control flow reaches their declaration, until the end of the program.
     1085Since standard C does not allow access to a @static@ local variable before the first time control flow reaches the declaration, this change does not preclude any valid C code.
    15691086Local objects with @static@ storage class are only implicitly constructed and destructed once for the duration of the program.
    15701087The object is constructed when its declaration is reached for the first time.
     
    15731090Construction of @static@ local objects is implemented via an accompanying @static bool@ variable, which records whether the variable has already been constructed.
    15741091A conditional branch checks the value of the companion @bool@, and if the variable has not yet been constructed then the object is constructed.
    1575 The object's destructor is scheduled to be run when the program terminates using @atexit@, and the companion @bool@'s value is set so that subsequent invocations of the function will not reconstruct the object.
     1092The object's destructor is scheduled to be run when the program terminates using @atexit@ \footnote{When using the dynamic linker, it is possible to dynamically load and unload a shared library. Since glibc 2.2.3 \cite{atexit}, functions registered with @atexit@ within the shared library are called when unloading the shared library. As such, static local objects can be destructed using this mechanism even in shared libraries on Linux systems.}, and the companion @bool@'s value is set so that subsequent invocations of the function do not reconstruct the object.
    15761093Since the parameter to @atexit@ is a parameter-less function, some additional tweaking is required.
    15771094First, the @static@ variable must be hoisted up to global scope and uniquely renamed to prevent name clashes with other global objects.
    1578 Second, a function is built which calls the destructor for the newly hoisted variable.
     1095If necessary, a local structure may need to be hoisted, as well.
     1096Second, a function is built that calls the destructor for the newly hoisted variable.
    15791097Finally, the newly generated function is registered with @atexit@, instead of registering the destructor directly.
    15801098Since @atexit@ calls functions in the reverse order in which they are registered, @static@ local variables are guaranteed to be destructed in the reverse order that they are constructed, which may differ between multiple executions of the same program.
    1581 
    15821099Extending the previous example
    15831100\begin{cfacode}
     
    16301147\end{cfacode}
    16311148
    1632 \subsection{Constructor Expressions}
    1633 In \CFA, it is possible to use a constructor as an expression.
    1634 Like other operators, the function name @?{}@ matches its operator syntax.
    1635 For example, @(&x){}@ calls the default constructor on the variable @x@, and produces @&x@ as a result.
    1636 The significance of constructors as expressions rather than as statements is that the result of a constructor expression can be used as part of a larger expression.
    1637 A key example is the use of constructor expressions to initialize the result of a call to standard C routine @malloc@.
    1638 \begin{cfacode}
    1639 struct X { ... };
    1640 void ?{}(X *, double);
    1641 X * x = malloc(sizeof(X)){ 1.5 };
    1642 \end{cfacode}
    1643 In this example, @malloc@ dynamically allocates storage and initializes it using a constructor, all before assigning it into the variable @x@.
    1644 If this extension is not present, constructing dynamically allocated objects is much more cumbersome, requiring separate initialization of the pointer and initialization of the pointed-to memory.
    1645 \begin{cfacode}
    1646 X * x = malloc(sizeof(X));
    1647 x{ 1.5 };
    1648 \end{cfacode}
    1649 Not only is this verbose, but it is also more error prone, since this form allows maintenance code to easily sneak in between the initialization of @x@ and the initialization of the memory that @x@ points to.
    1650 This feature is implemented via a transformation produceing the value of the first argument of the constructor, since constructors do not themslves have a return value.
    1651 Since this transformation results in two instances of the subexpression, care is taken to allocate a temporary variable to hold the result of the subexpression in the case where the subexpression may contain side effects.
    1652 The previous example generates the following code.
    1653 \begin{cfacode}
    1654 struct X *_tmp_ctor;
    1655 struct X *x = ?{}((_tmp_ctor=((_tmp_cp_ret0=
    1656   malloc(sizeof(struct X))), _tmp_cp_ret0))), 1.5), _tmp_ctor);
    1657 \end{cfacode}
    1658 It should be noted that this technique is not exclusive to @malloc@, and allows a user to write a custom allocator that can be idiomatically used in much the same way as a constructed @malloc@ call.
    1659 
    1660 It is also possible to use operator syntax with destructors.
    1661 Unlike constructors, operator syntax with destructors is a statement and thus does not produce a value, since the destructed object is invalidated by the use of a destructor.
    1662 For example, \lstinline!^(&x){}! calls the destructor on the variable @x@.
     1149\subsection{Polymorphism}
     1150As mentioned in section \ref{sub:polymorphism}, \CFA currently has 3 type-classes that are used to designate polymorphic data types: @otype@, @dtype@, and @ftype@.
     1151In previous versions of \CFA, @otype@ was syntactic sugar for @dtype@ with known size/alignment information and an assignment function.
     1152That is,
     1153\begin{cfacode}
     1154forall(otype T)
     1155void f(T);
     1156\end{cfacode}
     1157was equivalent to
     1158\begin{cfacode}
     1159forall(dtype T | sized(T) | { T ?=?(T *, T); })
     1160void f(T);
     1161\end{cfacode}
     1162This allows easily specifying constraints that are common to all complete object-types very simply.
     1163
     1164Now that \CFA has constructors and destructors, more of a complete object's behaviour can be specified than was previously possible.
     1165As such, @otype@ has been augmented to include assertions for a default constructor, copy constructor, and destructor.
     1166That is, the previous example is now equivalent to
     1167\begin{cfacode}
     1168forall(dtype T | sized(T) |
     1169  { T ?=?(T *, T); void ?{}(T *); void ?{}(T *, T); void ^?{}(T *); })
     1170void f(T);
     1171\end{cfacode}
     1172These additions allow @f@'s body to create and destroy objects of type @T@, and pass objects of type @T@ as arguments to other functions, following the normal \CFA rules.
     1173A point of note here is that objects can be missing default constructors (and eventually other functions through deleted functions), so it is important for \CFA programmers to think carefully about the operations needed by their function, as to not over-constrain the acceptable parameter types and prevent potential reuse.
  • doc/rob_thesis/intro.tex

    r221c2de7 r154fdc8  
    55\section{\CFA Background}
    66\label{s:background}
    7 \CFA is a modern extension to the C programming language.
     7\CFA \footnote{Pronounced ``C-for-all'', and written \CFA or Cforall.} is a modern non-object-oriented extension to the C programming language.
    88As it is an extension of C, there is already a wealth of existing C code and principles that govern the design of the language.
    99Among the goals set out in the original design of \CFA, four points stand out \cite{Bilson03}.
     
    1616Therefore, these design principles must be kept in mind throughout the design and development of new language features.
    1717In order to appeal to existing C programmers, great care must be taken to ensure that new features naturally feel like C.
    18 The remainder of this section describes some of the important new features that currently exist in \CFA, to give the reader the necessary context in which the new features presented in this thesis must dovetail. % TODO: harmonize with?
     18These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used.
     19Unfortunately, \CC is actively diverging from C, so incremental additions require significant effort and training, coupled with multiple legacy design-choices that cannot be updated.
     20
     21The remainder of this section describes some of the important new features that currently exist in \CFA, to give the reader the necessary context in which the new features presented in this thesis must dovetail.
    1922
    2023\subsection{C Background}
     
    2932A a1 = { 1, .y:7, 6 };
    3033A a2[4] = { [2]:a0, [0]:a1, { .z:3 } };
    31 // equvialent to
     34// equivalent to
    3235// A a0 = { 0, 8, 0, 1 };
    3336// A a1 = { 1, 0, 7, 6 };
     
    3639Designations allow specifying the field to initialize by name, rather than by position.
    3740Any field not explicitly initialized is initialized as if it had static storage duration \cite[p.~141]{C11}.
    38 A designator specifies the current object for initialization, and as such any undesignated subobjects pick up where the last initialization left off.
    39 For example, in the initialization of @a1@, the initializer of @y@ is @7@, and the unnamed initializer @6@ initializes the next subobject, @z@.
    40 Later initializers override earlier initializers, so a subobject for which there is more than one initializer is only initailized by its last initializer.
    41 This can be seen in the initialization of @a0@, where @x@ is designated twice, and thus initialized to @8@.
    42 Note that in \CFA, designations use a colon separator, rather than an equals sign as in C.
     41A designator specifies the current object for initialization, and as such any undesignated sub-objects pick up where the last initialization left off.
     42For example, in the initialization of @a1@, the initializer of @y@ is @7@, and the unnamed initializer @6@ initializes the next sub-object, @z@.
     43Later initializers override earlier initializers, so a sub-object for which there is more than one initializer is only initialized by its last initializer.
     44These semantics can be seen in the initialization of @a0@, where @x@ is designated twice, and thus initialized to @8@.
     45Note that in \CFA, designations use a colon separator, rather than an equals sign as in C, because this syntax is one of the few places that conflicts with the new language features.
    4346
    4447C also provides \emph{compound literal} expressions, which provide a first-class mechanism for creating unnamed objects.
     
    5356\end{cfacode}
    5457Compound literals create an unnamed object, and result in an lvalue, so it is legal to assign a value into a compound literal or to take its address \cite[p.~86]{C11}.
    55 Syntactically, compound literals look like a cast operator followed by a brace-enclosed initializer, but semantically are different from a C cast, which only applies basic conversions and is never an lvalue.
     58Syntactically, compound literals look like a cast operator followed by a brace-enclosed initializer, but semantically are different from a C cast, which only applies basic conversions and coercions and is never an lvalue.
    5659
    5760\subsection{Overloading}
     
    5962Overloading is the ability to specify multiple entities with the same name.
    6063The most common form of overloading is function overloading, wherein multiple functions can be defined with the same name, but with different signatures.
    61 Like in \CC, \CFA allows overloading based both on the number of parameters and on the types of parameters.
     64C provides a small amount of built-in overloading, \eg + is overloaded for the basic types.
     65Like in \CC, \CFA allows user-defined overloading based both on the number of parameters and on the types of parameters.
    6266  \begin{cfacode}
    6367  void f(void);  // (1)
     
    9195
    9296There are times when a function should logically return multiple values.
    93 Since a function in standard C can only return a single value, a programmer must either take in additional return values by address, or the function's designer must create a wrapper structure t0 package multiple return-values.
     97Since a function in standard C can only return a single value, a programmer must either take in additional return values by address, or the function's designer must create a wrapper structure to package multiple return-values.
     98For example, the first approach:
    9499\begin{cfacode}
    95100int f(int * ret) {        // returns a value through parameter ret
     
    101106int res1 = g(&res2);      // explicitly pass storage
    102107\end{cfacode}
    103 The former solution is awkward because it requires the caller to explicitly allocate memory for $n$ result variables, even if they are only temporary values used as a subexpression, or even not used at all.
     108is awkward because it requires the caller to explicitly allocate memory for $n$ result variables, even if they are only temporary values used as a subexpression, or even not used at all.
     109The second approach:
    104110\begin{cfacode}
    105111struct A {
     
    112118... res3.x ... res3.y ... // use result values
    113119\end{cfacode}
    114 The latter approach requires the caller to either learn the field names of the structure or learn the names of helper routines to access the individual return values.
    115 Both solutions are syntactically unnatural.
    116 
    117 In \CFA, it is possible to directly declare a function returning mutliple values.
    118 This provides important semantic information to the caller, since return values are only for output.
    119 \begin{cfacode}
    120 [int, int] f() {       // don't need to create a new type
     120is awkward because the caller has to either learn the field names of the structure or learn the names of helper routines to access the individual return values.
     121Both approaches are syntactically unnatural.
     122
     123In \CFA, it is possible to directly declare a function returning multiple values.
     124This extension provides important semantic information to the caller, since return values are only for output.
     125\begin{cfacode}
     126[int, int] f() {       // no new type
    121127  return [123, 37];
    122128}
    123129\end{cfacode}
    124 However, the ability to return multiple values requires a syntax for accepting the results from a function.
     130However, the ability to return multiple values is useless without a syntax for accepting the results from the function.
     131
    125132In standard C, return values are most commonly assigned directly into local variables, or are used as the arguments to another function call.
    126133\CFA allows both of these contexts to accept multiple return values.
     
    148155  g(f());             // selects (2)
    149156  \end{cfacode}
    150 In this example, the only possible call to @f@ that can produce the two @int@s required by @g@ is the second option.
    151 A similar reasoning holds for assigning into multiple variables.
     157In this example, the only possible call to @f@ that can produce the two @int@s required for assigning into the variables @x@ and @y@ is the second option.
     158A similar reasoning holds calling the function @g@.
    152159
    153160In \CFA, overloading also applies to operator names, known as \emph{operator overloading}.
     
    163170  \begin{cfacode}
    164171  struct A { int i; };
    165   int ?+?(A x, A y);
     172  int ?+?(A x, A y);    // '?'s represent operands
    166173  bool ?<?(A x, A y);
    167174  \end{cfacode}
    168 Notably, the only difference in this example is syntax.
     175Notably, the only difference is syntax.
    169176Most of the operators supported by \CC for operator overloading are also supported in \CFA.
    170 Of notable exception are the logical operators (e.g. @||@), the sequence operator (i.e. @,@), and the member-access operators (e.g. @.@ and \lstinline{->}).
     177Of notable exception are the logical operators (\eg @||@), the sequence operator (\ie @,@), and the member-access operators (\eg @.@ and \lstinline{->}).
    171178
    172179Finally, \CFA also permits overloading variable identifiers.
    173180This feature is not available in \CC.
    174   \begin{cfacode} % TODO: pick something better than x? max, zero, one?
     181  \begin{cfacode}
    175182  struct Rational { int numer, denom; };
    176183  int x = 3;               // (1)
     
    186193In this example, there are three definitions of the variable @x@.
    187194Based on the context, \CFA attempts to choose the variable whose type best matches the expression context.
     195When used judiciously, this feature allows names like @MAX@, @MIN@, and @PI@ to apply across many types.
    188196
    189197Finally, the values @0@ and @1@ have special status in standard C.
     
    197205}
    198206\end{cfacode}
    199 Every if statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
     207Every if- and iteration-statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result.
    200208Due to these rewrite rules, the values @0@ and @1@ have the types \zero and \one in \CFA, which allow for overloading various operations that connect to @0@ and @1@ \footnote{In the original design of \CFA, @0@ and @1@ were overloadable names \cite[p.~7]{cforall}.}.
    201 The types \zero and \one have special built in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal.
     209The types \zero and \one have special built-in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal.
    202210  \begin{cfacode}
    203211  // lvalue is similar to returning a reference in C++
     
    240248  template<typename T>
    241249  T sum(T *arr, int n) {
    242     T t;
     250    T t;  // default construct => 0
    243251    for (; n > 0; n--) t += arr[n-1];
    244252    return t;
     
    258266  \end{cfacode}
    259267The first thing to note here is that immediately following the declaration of @otype T@ is a list of \emph{type assertions} that specify restrictions on acceptable choices of @T@.
    260 In particular, the assertions above specify that there must be a an assignment from \zero to @T@ and an addition assignment operator from @T@ to @T@.
     268In particular, the assertions above specify that there must be an assignment from \zero to @T@ and an addition assignment operator from @T@ to @T@.
    261269The existence of an assignment operator from @T@ to @T@ and the ability to create an object of type @T@ are assumed implicitly by declaring @T@ with the @otype@ type-class.
    262270In addition to @otype@, there are currently two other type-classes.
     
    278286A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA.
    279287One of the major limiting factors of \CC's approach is that templates cannot be separately compiled.
    280 In contrast, the explicit nature of assertions allows \CFA's polymorphic functions to be separately compiled.
     288In contrast, the explicit nature of assertions allows \CFA's polymorphic functions to be separately compiled, as the function prototype states all necessary requirements separate from the implementation.
     289For example, the prototype for the previous sum function is
     290  \begin{cfacode}
     291  forall(otype T | **R**{ T ?=?(T *, zero_t); T ?+=?(T *, T); }**R**)
     292  T sum(T *arr, int n);
     293  \end{cfacode}
     294With this prototype, a caller in another translation unit knows all of the constraints on @T@, and thus knows all of the operations that need to be made available to @sum@.
    281295
    282296In \CFA, a set of assertions can be factored into a \emph{trait}.
     
    293307This capability allows specifying the same set of assertions in multiple locations, without the repetition and likelihood of mistakes that come with manually writing them out for each function declaration.
    294308
     309An interesting application of return-type resolution and polymorphism is a type-safe version of @malloc@.
     310\begin{cfacode}
     311forall(dtype T | sized(T))
     312T * malloc() {
     313  return (T*)malloc(sizeof(T)); // call C malloc
     314}
     315int * x = malloc();     // malloc(sizeof(int))
     316double * y = malloc();  // malloc(sizeof(double))
     317
     318struct S { ... };
     319S * s = malloc();       // malloc(sizeof(S))
     320\end{cfacode}
     321The built-in trait @sized@ ensures that size and alignment information for @T@ is available in the body of @malloc@ through @sizeof@ and @_Alignof@ expressions respectively.
     322In calls to @malloc@, the type @T@ is bound based on call-site information, allowing \CFA code to allocate memory without the potential for errors introduced by manually specifying the size of the allocated block.
     323
    295324\section{Invariants}
    296 % TODO: discuss software engineering benefits of ctor/dtors: {pre/post} conditions, invariants
    297 % an important invariant is the state of the environment (memory, resources)
    298 % some objects pass their contract to the object user
    299 An \emph{invariant} is a logical assertion that true for some duration of a program's execution.
     325An \emph{invariant} is a logical assertion that is true for some duration of a program's execution.
    300326Invariants help a programmer to reason about code correctness and prove properties of programs.
    301327
     328\begin{sloppypar}
    302329In object-oriented programming languages, type invariants are typically established in a constructor and maintained throughout the object's lifetime.
    303 This is typically achieved through a combination of access control modifiers and a restricted interface.
     330These assertions are typically achieved through a combination of access-control modifiers and a restricted interface.
    304331Typically, data which requires the maintenance of an invariant is hidden from external sources using the \emph{private} modifier, which restricts reads and writes to a select set of trusted routines, including member functions.
    305332It is these trusted routines that perform all modifications to internal data in a way that is consistent with the invariant, by ensuring that the invariant holds true at the end of the routine call.
     333\end{sloppypar}
    306334
    307335In C, the @assert@ macro is often used to ensure invariants are true.
    308336Using @assert@, the programmer can check a condition and abort execution if the condition is not true.
    309 This is a powerful tool that forces the programmer to deal with logical inconsistencies as they occur.
     337This powerful tool forces the programmer to deal with logical inconsistencies as they occur.
    310338For production, assertions can be removed by simply defining the preprocessor macro @NDEBUG@, making it simple to ensure that assertions are 0-cost for a performance intensive application.
    311339\begin{cfacode}
     
    354382\end{dcode}
    355383The D compiler is able to assume that assertions and invariants hold true and perform optimizations based on those assumptions.
    356 
    357 An important invariant is the state of the execution environment, including the heap, the open file table, the state of global variables, etc.
    358 Since resources are finite, it is important to ensure that objects clean up properly when they are finished, restoring the execution environment to a stable state so that new objects can reuse resources.
     384Note, these invariants are internal to the type's correct behaviour.
     385
     386Types also have external invariants with the state of the execution environment, including the heap, the open-file table, the state of global variables, etc.
     387Since resources are finite and shared (concurrency), it is important to ensure that objects clean up properly when they are finished, restoring the execution environment to a stable state so that new objects can reuse resources.
    359388
    360389\section{Resource Management}
     
    366395The program stack grows and shrinks automatically with each function call, as needed for local variables.
    367396However, whenever a program needs a variable to outlive the block it is created in, the storage must be allocated dynamically with @malloc@ and later released with @free@.
    368 This pattern is extended to more complex objects, such as files and sockets, which also outlive the block where they are created, but at their core is resource management.
    369 Once allocated storage escapes a block, the responsibility for deallocating the storage is not specified in a function's type, that is, that the return value is owned by the caller.
     397This pattern is extended to more complex objects, such as files and sockets, which can also outlive the block where they are created, and thus require their own resource management.
     398Once allocated storage escapes\footnote{In garbage collected languages, such as Java, escape analysis \cite{Choi:1999:EAJ:320385.320386} is used to determine when dynamically allocated objects are strictly contained within a function, which allows the optimizer to allocate them on the stack.} a block, the responsibility for deallocating the storage is not specified in a function's type, that is, that the return value is owned by the caller.
    370399This implicit convention is provided only through documentation about the expectations of functions.
    371400
    372401In other languages, a hybrid situation exists where resources escape the allocation block, but ownership is precisely controlled by the language.
    373 This pattern requires a strict interface and protocol for a data structure, where the protocol consists of a pre-initialization and a post-termination call, and all intervening access is done via interface routines.
    374 This kind of encapsulation is popular in object-oriented programming languages, and like the stack, it contains a significant portion of resource management cases.
     402This pattern requires a strict interface and protocol for a data structure, consisting of a pre-initialization and a post-termination call, and all intervening access is done via interface routines.
     403This kind of encapsulation is popular in object-oriented programming languages, and like the stack, it takes care of a significant portion of resource-management cases.
    375404
    376405For example, \CC directly supports this pattern through class types and an idiom known as RAII \footnote{Resource Acquisition is Initialization} by means of constructors and destructors.
     
    380409On the other hand, destructors provide a simple mechanism for tearing down an object and resetting the environment in which the object lived.
    381410RAII ensures that if all resources are acquired in a constructor and released in a destructor, there are no resource leaks, even in exceptional circumstances.
    382 A type with at least one non-trivial constructor or destructor will henceforth be referred to as a \emph{managed type}.
    383 In the context of \CFA, a non-trivial constructor is either a user defined constructor or an auto generated constructor that calls a non-trivial constructor.
    384 
    385 For the remaining resource ownership cases, programmer must follow a brittle, explicit protocol for freeing resources or an implicit porotocol implemented via the programming language.
     411A type with at least one non-trivial constructor or destructor is henceforth referred to as a \emph{managed type}.
     412In the context of \CFA, a non-trivial constructor is either a user defined constructor or an auto-generated constructor that calls a non-trivial constructor.
     413
     414For the remaining resource ownership cases, a programmer must follow a brittle, explicit protocol for freeing resources or an implicit protocol enforced by the programming language.
    386415
    387416In garbage collected languages, such as Java, resources are largely managed by the garbage collector.
    388 Still, garbage collectors are typically focus only on memory management.
     417Still, garbage collectors typically focus only on memory management.
    389418There are many kinds of resources that the garbage collector does not understand, such as sockets, open files, and database connections.
    390419In particular, Java supports \emph{finalizers}, which are similar to destructors.
    391 Sadly, finalizers come with far fewer guarantees, to the point where a completely conforming JVM may never call a single finalizer. % TODO: citation JVM spec; http://stackoverflow.com/a/2506514/2386739
    392 Due to operating system resource limits, this is unacceptable for many long running tasks. % TODO: citation?
    393 Instead, the paradigm in Java requires programmers manually keep track of all resource \emph{except} memory, leading many novices and experts alike to forget to close files, etc.
    394 Complicating the picture, uncaught exceptions can cause control flow to change dramatically, leaking a resource which appears on first glance to be closed.
     420Unfortunately, finalizers are only guaranteed to be called before an object is reclaimed by the garbage collector \cite[p.~373]{Java8}, which may not happen if memory use is not contentious.
     421Due to operating-system resource-limits, this is unacceptable for many long running programs.
     422Instead, the paradigm in Java requires programmers to manually keep track of all resources \emph{except} memory, leading many novices and experts alike to forget to close files, etc.
     423Complicating the picture, uncaught exceptions can cause control flow to change dramatically, leaking a resource that appears on first glance to be released.
    395424\begin{javacode}
    396425void write(String filename, String msg) throws Exception {
     
    403432}
    404433\end{javacode}
    405 Any line in this program can throw an exception.
    406 This leads to a profusion of finally blocks around many function bodies, since it isn't always clear when an exception may be thrown.
     434Any line in this program can throw an exception, which leads to a profusion of finally blocks around many function bodies, since it is not always clear when an exception may be thrown.
    407435\begin{javacode}
    408436public void write(String filename, String msg) throws Exception {
     
    422450\end{javacode}
    423451In Java 7, a new \emph{try-with-resources} construct was added to alleviate most of the pain of working with resources, but ultimately it still places the burden squarely on the user rather than on the library designer.
    424 Furthermore, for complete safety this pattern requires nested objects to be declared separately, otherwise resources which can throw an exception on close can leak nested resources. % TODO: cite oracle article http://www.oracle.com/technetwork/articles/java/trywithresources-401775.html?
     452Furthermore, for complete safety this pattern requires nested objects to be declared separately, otherwise resources that can throw an exception on close can leak nested resources \cite{TryWithResources}.
    425453\begin{javacode}
    426454public void write(String filename, String msg) throws Exception {
    427   try (
     455  try (  // try-with-resources
    428456    FileOutputStream out = new FileOutputStream(filename);
    429457    FileOutputStream log = new FileOutputStream("log.txt");
     
    434462}
    435463\end{javacode}
    436 On the other hand, the Java compiler generates more code if more resources are declared, meaning that users must be more familiar with each type and library designers must provide better documentation.
     464Variables declared as part of a try-with-resources statement must conform to the @AutoClosable@ interface, and the compiler implicitly calls @close@ on each of the variables at the end of the block.
     465Depending on when the exception is raised, both @out@ and @log@ are null, @log@ is null, or both are non-null, therefore, the cleanup for these variables at the end is automatically guarded and conditionally executed to prevent null-pointer exceptions.
     466
     467While Rust \cite{Rust} does not enforce the use of a garbage collector, it does provide a manual memory management environment, with a strict ownership model that automatically frees allocated memory and prevents common memory management errors.
     468In particular, a variable has ownership over its associated value, which is freed automatically when the owner goes out of scope.
     469Furthermore, values are \emph{moved} by default on assignment, rather than copied, which invalidates the previous variable binding.
     470\begin{rustcode}
     471struct S {
     472  x: i32
     473}
     474let s = S { x: 123 };
     475let z = s;           // move, invalidate s
     476println!("{}", s.x); // error, s has been moved
     477\end{rustcode}
     478Types can be made copyable by implementing the @Copy@ trait.
     479
     480Rust allows multiple unowned views into an object through references, also known as borrows, provided that a reference does not outlive its referent.
     481A mutable reference is allowed only if it is the only reference to its referent, preventing data race errors and iterator invalidation errors.
     482\begin{rustcode}
     483let mut x = 10;
     484{
     485  let y = &x;
     486  let z = &x;
     487  println!("{} {}", y, z); // prints 10 10
     488}
     489{
     490  let y = &mut x;
     491  // let z1 = &x;     // not allowed, have mutable reference
     492  // let z2 = &mut x; // not allowed, have mutable reference
     493  *y = 5;
     494  println!("{}", y); // prints 5
     495}
     496println!("{}", x); // prints 5
     497\end{rustcode}
     498Since references are not owned, they do not release resources when they go out of scope.
     499There is no runtime cost imposed on these restrictions, since they are enforced at compile-time.
     500
     501Rust provides RAII through the @Drop@ trait, allowing arbitrary code to execute when the object goes out of scope, providing automatic clean up of auxiliary resources, much like a \CC program.
     502\begin{rustcode}
     503struct S {
     504  name: &'static str
     505}
     506
     507impl Drop for S {  // RAII for S
     508  fn drop(&mut self) {  // destructor
     509    println!("dropped {}", self.name);
     510  }
     511}
     512
     513{
     514  let x = S { name: "x" };
     515  let y = S { name: "y" };
     516} // prints "dropped y" "dropped x"
     517\end{rustcode}
    437518
    438519% D has constructors and destructors that are worth a mention (under classes) https://dlang.org/spec/spec.html
     
    442523The programming language, D, also manages resources with constructors and destructors \cite{D}.
    443524In D, @struct@s are stack allocated and managed via scoping like in \CC, whereas @class@es are managed automatically by the garbage collector.
    444 Like Java, using the garbage collector means that destructors may never be called, requiring the use of finally statements to ensure dynamically allocated resources that are not managed by the garbage collector, such as open files, are cleaned up.
     525Like Java, using the garbage collector means that destructors are called indeterminately, requiring the use of finally statements to ensure dynamically allocated resources that are not managed by the garbage collector, such as open files, are cleaned up.
    445526Since D supports RAII, it is possible to use the same techniques as in \CC to ensure that resources are released in a timely manner.
    446 Finally, D provides a scope guard statement, which allows an arbitrary statement to be executed at normal scope exit with \emph{success}, at exceptional scope exit with \emph{failure}, or at normal and exceptional scope exit with \emph{exit}. % cite? https://dlang.org/spec/statement.html#ScopeGuardStatement
    447 It has been shown that the \emph{exit} form of the scope guard statement can be implemented in a library in \CC. % cite: http://www.drdobbs.com/184403758
    448 
    449 % TODO: discussion of lexical scope vs. dynamic
    450 % see Peter's suggestions
    451 % RAII works in both cases. Guaranteed to work in stack case, works in heap case if root is deleted (but it's dangerous to rely on this, because of exceptions)
     527Finally, D provides a scope guard statement, which allows an arbitrary statement to be executed at normal scope exit with \emph{success}, at exceptional scope exit with \emph{failure}, or at normal and exceptional scope exit with \emph{exit}. % https://dlang.org/spec/statement.html#ScopeGuardStatement
     528It has been shown that the \emph{exit} form of the scope guard statement can be implemented in a library in \CC \cite{ExceptSafe}.
     529
     530To provide managed types in \CFA, new kinds of constructors and destructors are added to \CFA and discussed in Chapter 2.
    452531
    453532\section{Tuples}
    454533\label{s:Tuples}
    455 In mathematics, tuples are finite-length sequences which, unlike sets, allow duplicate elements.
    456 In programming languages, tuples are a construct that provide fixed-sized heterogeneous lists of elements.
     534In mathematics, tuples are finite-length sequences which, unlike sets, are ordered and allow duplicate elements.
     535In programming languages, tuples provide fixed-sized heterogeneous lists of elements.
    457536Many programming languages have tuple constructs, such as SETL, \KWC, ML, and Scala.
    458537
     
    462541Adding tuples to \CFA has previously been explored by Esteves \cite{Esteves04}.
    463542
    464 The design of tuples in \KWC took much of its inspiration from SETL.
     543The design of tuples in \KWC took much of its inspiration from SETL \cite{SETL}.
    465544SETL is a high-level mathematical programming language, with tuples being one of the primary data types.
    466545Tuples in SETL allow a number of operations, including subscripting, dynamic expansion, and multiple assignment.
     
    470549\begin{cppcode}
    471550tuple<int, int, int> triple(10, 20, 30);
    472 get<1>(triple); // access component 1 => 30
     551get<1>(triple); // access component 1 => 20
    473552
    474553tuple<int, double> f();
     
    482561Tuples are simple data structures with few specific operations.
    483562In particular, it is possible to access a component of a tuple using @std::get<N>@.
    484 Another interesting feature is @std::tie@, which creates a tuple of references, which allows assigning the results of a tuple-returning function into separate local variables, without requiring a temporary variable.
     563Another interesting feature is @std::tie@, which creates a tuple of references, allowing assignment of the results of a tuple-returning function into separate local variables, without requiring a temporary variable.
    485564Tuples also support lexicographic comparisons, making it simple to write aggregate comparators using @std::tie@.
    486565
    487 There is a proposal for \CCseventeen called \emph{structured bindings}, that introduces new syntax to eliminate the need to pre-declare variables and use @std::tie@ for binding the results from a function call. % TODO: cite http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/p0144r0.pdf
     566There is a proposal for \CCseventeen called \emph{structured bindings} \cite{StructuredBindings}, that introduces new syntax to eliminate the need to pre-declare variables and use @std::tie@ for binding the results from a function call.
    488567\begin{cppcode}
    489568tuple<int, double> f();
     
    492571tuple<int, int, int> triple(10, 20, 30);
    493572auto & [t1, t2, t3] = triple;
    494 t2 = 0; // changes triple
     573t2 = 0; // changes middle element of triple
    495574
    496575struct S { int x; double y; };
     
    498577auto [x, y] = s; // unpack s
    499578\end{cppcode}
    500 Structured bindings allow unpacking any struct with all public non-static data members into fresh local variables.
     579Structured bindings allow unpacking any structure with all public non-static data members into fresh local variables.
    501580The use of @&@ allows declaring new variables as references, which is something that cannot be done with @std::tie@, since \CC references do not support rebinding.
    502 This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must documented with some other mechanism.
     581This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must be documented with some other mechanism.
    503582Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables.
    504583
    505 Like \CC, D provides tuples through a library variadic template struct.
     584Like \CC, D provides tuples through a library variadic-template structure.
    506585In D, it is possible to name the fields of a tuple type, which creates a distinct type.
    507 \begin{dcode} % TODO: cite http://dlang.org/phobos/std_typecons.html
     586% http://dlang.org/phobos/std_typecons.html
     587\begin{dcode}
    508588Tuple!(float, "x", float, "y") point2D;
    509 Tuple!(float, float) float2;  // different types
     589Tuple!(float, float) float2;  // different type from point2D
    510590
    511591point2D[0]; // access first element
     
    521601The @expand@ method produces the components of the tuple as a list of separate values, making it possible to call a function that takes $N$ arguments using a tuple with $N$ components.
    522602
    523 Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML.
     603Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML \cite{sml}.
    524604A function in SML always accepts exactly one argument.
    525605There are two ways to mimic multiple argument functions: the first through currying and the second by accepting tuple arguments.
     
    533613\end{smlcode}
    534614Here, the function @binco@ appears to take 2 arguments, but it actually takes a single argument which is implicitly decomposed via pattern matching.
    535 Tuples are a foundational tool in SML, allowing the creation of arbitrarily complex structured data types.
    536 
    537 Scala, like \CC, provides tuple types through the standard library.
     615Tuples are a foundational tool in SML, allowing the creation of arbitrarily-complex structured data-types.
     616
     617Scala, like \CC, provides tuple types through the standard library \cite{Scala}.
    538618Scala provides tuples of size 1 through 22 inclusive through generic data structures.
    539619Tuples support named access and subscript access, among a few other operations.
    540620\begin{scalacode}
    541 val a = new Tuple3[Int, String, Double](0, "Text", 2.1) // explicit creation
    542 val b = (6, 'a', 1.1f)       // syntactic sugar for Tuple3[Int, Char, Float]
     621val a = new Tuple3(0, "Text", 2.1) // explicit creation
     622val b = (6, 'a', 1.1f)       // syntactic sugar: Tuple3[Int, Char, Float]
    543623val (i, _, d) = triple       // extractor syntax, ignore middle element
    544624
     
    547627\end{scalacode}
    548628In Scala, tuples are primarily used as simple data structures for carrying around multiple values or for returning multiple values from a function.
    549 The 22-element restriction is an odd and arbitrary choice, but in practice it doesn't cause problems since large tuples are uncommon.
     629The 22-element restriction is an odd and arbitrary choice, but in practice it does not cause problems since large tuples are uncommon.
    550630Subscript access is provided through the @productElement@ method, which returns a value of the top-type @Any@, since it is impossible to receive a more precise type from a general subscripting method due to type erasure.
    551631The disparity between named access beginning at @_1@ and subscript access starting at @0@ is likewise an oddity, but subscript access is typically avoided since it discards type information.
     
    553633
    554634
    555 \Csharp has similarly strange limitations, allowing tuples of size up to 7 components. % TODO: cite https://msdn.microsoft.com/en-us/library/system.tuple(v=vs.110).aspx
     635\Csharp also has tuples, but has similarly strange limitations, allowing tuples of size up to 7 components. % https://msdn.microsoft.com/en-us/library/system.tuple(v=vs.110).aspx
    556636The officially supported workaround for this shortcoming is to nest tuples in the 8th component.
    557637\Csharp allows accessing a component of a tuple by using the field @Item$N$@ for components 1 through 7, and @Rest@ for the nested tuple.
    558638
    559 
    560 % TODO: cite 5.3 https://docs.python.org/3/tutorial/datastructures.html
    561 In Python, tuples are immutable sequences that provide packing and unpacking operations.
     639In Python \cite{Python}, tuples are immutable sequences that provide packing and unpacking operations.
    562640While the tuple itself is immutable, and thus does not allow the assignment of components, there is nothing preventing a component from being internally mutable.
    563641The components of a tuple can be accessed by unpacking into multiple variables, indexing, or via field name, like D.
    564642Tuples support multiple assignment through a combination of packing and unpacking, in addition to the common sequence operations.
    565643
    566 % TODO: cite https://developer.apple.com/library/content/documentation/Swift/Conceptual/Swift_Programming_Language/Types.html#//apple_ref/doc/uid/TP40014097-CH31-ID448
    567 Swift, like D, provides named tuples, with components accessed by name, index, or via extractors.
     644Swift \cite{Swift}, like D, provides named tuples, with components accessed by name, index, or via extractors.
    568645Tuples are primarily used for returning multiple values from a function.
    569646In Swift, @Void@ is an alias for the empty tuple, and there are no single element tuples.
     647
     648Tuples comparable to those described above are added to \CFA and discussed in Chapter 3.
    570649
    571650\section{Variadic Functions}
     
    581660printf("%d %g %c %s", 10, 3.5, 'X', "a string");
    582661\end{cfacode}
    583 Through the use of a format string, @printf@ allows C programmers to print any of the standard C data types.
     662Through the use of a format string, C programmers can communicate argument type information to @printf@, allowing C programmers to print any of the standard C data types.
    584663Still, @printf@ is extremely limited, since the format codes are specified by the C standard, meaning users cannot define their own format codes to extend @printf@ for new data types or new formatting rules.
    585664
     665\begin{sloppypar}
    586666C provides manipulation of variadic arguments through the @va_list@ data type, which abstracts details of the manipulation of variadic arguments.
    587667Since the variadic arguments are untyped, it is up to the function to interpret any data that is passed in.
    588668Additionally, the interface to manipulate @va_list@ objects is essentially limited to advancing to the next argument, without any built-in facility to determine when the last argument is read.
    589 This requires the use of an \emph{argument descriptor} to pass information to the function about the structure of the argument list, including the number of arguments and their types.
     669This limitation requires the use of an \emph{argument descriptor} to pass information to the function about the structure of the argument list, including the number of arguments and their types.
    590670The format string in @printf@ is one such example of an argument descriptor.
    591671\begin{cfacode}
     
    618698Furthermore, if the user makes a mistake, compile-time checking is typically restricted to standard format codes and their corresponding types.
    619699In general, this means that C's variadic functions are not type-safe, making them difficult to use properly.
     700\end{sloppypar}
    620701
    621702% When arguments are passed to a variadic function, they undergo \emph{default argument promotions}.
     
    641722A parameter pack matches 0 or more elements, which can be types or expressions depending on the context.
    642723Like other templates, variadic template functions rely on an implicit set of constraints on a type, in this example a @print@ routine.
    643 That is, it is possible to use the @f@ routine any any type provided there is a corresponding @print@ routine, making variadic templates fully open to extension, unlike variadic functions in C.
     724That is, it is possible to use the @f@ routine on any type provided there is a corresponding @print@ routine, making variadic templates fully open to extension, unlike variadic functions in C.
    644725
    645726Recent \CC standards (\CCfourteen, \CCseventeen) expand on the basic premise by allowing variadic template variables and providing convenient expansion syntax to remove the need for recursion in some cases, amongst other things.
     
    672753Unfortunately, Java's use of nominal inheritance means that types must explicitly inherit from classes or interfaces in order to be considered a subclass.
    673754The combination of these two issues greatly restricts the usefulness of variadic functions in Java.
     755
     756Type-safe variadic functions are added to \CFA and discussed in Chapter 4.
  • doc/rob_thesis/thesis-frontpgs.tex

    r221c2de7 r154fdc8  
    2424
    2525        \Large
    26         Rob Schluntz \\
     26        Robert Schluntz \\
    2727
    2828        \vspace*{3.0cm}
     
    4343        \vspace*{1.0cm}
    4444
    45         \copyright\ Rob Schluntz 2017 \\
     45        \copyright\ Robert Schluntz 2017 \\
    4646        \end{center}
    4747\end{titlepage}
     
    7676\begin{center}\textbf{Abstract}\end{center}
    7777
    78 % \CFA is a modern extension to the C programming language.
    79 % Some of the features of \CFA include parametric polymorphism, overloading, and .
    80 TODO
     78\CFA is a modern, non-object-oriented extension of the C programming language.
     79This thesis addresses several critical deficiencies of C, notably: resource management, a limited function-return mechanism, and unsafe variadic functions.
     80To solve these problems, two fundamental language features are introduced: tuples and constructors/destructors.
     81While these features exist in prior programming languages, the contribution of this work is engineering these features into a highly complex type system.
     82C is an established language with a dedicated user-base.
     83An important goal is to add new features in a way that naturally feels like C, to appeal to this core user-base, and due to huge amounts of legacy code, maintaining backwards compatibility is crucial.
    8184
    8285\cleardoublepage
    8386%\newpage
    8487
    85 % A C K N O W L E D G E M E N T S
    86 % -------------------------------
     88% % A C K N O W L E D G E M E N T S
     89% % -------------------------------
    8790
    88 \begin{center}\textbf{Acknowledgements}\end{center}
     91% \begin{center}\textbf{Acknowledgements}\end{center}
    8992
    90 % I would like to thank all the little people who made this possible.
    91 TODO
    92 \cleardoublepage
    93 %\newpage
     93% % I would like to thank all the little people who made this possible.
     94% TODO
     95% \cleardoublepage
     96% %\newpage
    9497
    95 % D E D I C A T I O N
    96 % -------------------
     98% % D E D I C A T I O N
     99% % -------------------
    97100
    98 \begin{center}\textbf{Dedication}\end{center}
     101% \begin{center}\textbf{Dedication}\end{center}
    99102
    100 % This is dedicated to the one I love.
    101 TODO
    102 \cleardoublepage
    103 %\newpage
     103% % This is dedicated to the one I love.
     104% TODO
     105% \cleardoublepage
     106% %\newpage
    104107
    105108% T A B L E   O F   C O N T E N T S
     
    119122%\newpage
    120123
    121 % L I S T   O F   F I G U R E S
    122 % -----------------------------
    123 \addcontentsline{toc}{chapter}{List of Figures}
    124 \listoffigures
    125 \cleardoublepage
    126 \phantomsection         % allows hyperref to link to the correct page
    127 %\newpage
     124% % L I S T   O F   F I G U R E S
     125% % -----------------------------
     126% \addcontentsline{toc}{chapter}{List of Figures}
     127% \listoffigures
     128% \cleardoublepage
     129% \phantomsection               % allows hyperref to link to the correct page
     130% %\newpage
    128131
    129132% L I S T   O F   S Y M B O L S
  • doc/rob_thesis/thesis.tex

    r221c2de7 r154fdc8  
    6868\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
    6969
     70% For PDF, suitable for double-sided printing, change the PrintVersion variable below
     71% to "true" and use this \documentclass line instead of the one above:
     72% \documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book}
     73
    7074\usepackage[T1]{fontenc}                                % allow Latin1 (extended ASCII) characters
    7175\usepackage{textcomp}
    7276% \usepackage[utf8]{inputenc}
    73 \usepackage[latin1]{inputenc}
     77% \usepackage[latin1]{inputenc}
    7478\usepackage{fullpage,times,comment}
    7579% \usepackage{epic,eepic}
     
    9397\interfootnotelinepenalty=10000
    9498
    95 % For PDF, suitable for double-sided printing, change the PrintVersion variable below
    96 % to "true" and use this \documentclass line instead of the one above:
    97 %\documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book}
    98 
    9999% Some LaTeX commands I define for my own nomenclature.
    100100% If you have to, it's better to change nomenclature once here than in a
     
    136136    pdffitwindow=false,     % window fit to page when opened
    137137    pdfstartview={FitH},    % fits the width of the page to the window
    138     pdftitle={uWaterloo\ LaTeX\ Thesis\ Template},    % title: CHANGE THIS TEXT!
     138    pdftitle={Resource Management and Tuples in \CFA},    % title: CHANGE THIS TEXT!
    139139    pdfauthor={Rob Schluntz},    % author: CHANGE THIS TEXT! and uncomment this line
    140140%    pdfsubject={Subject},  % subject: CHANGE THIS TEXT! and uncomment this line
     
    225225\input{tuples}
    226226
     227\input{variadic}
     228
    227229\input{conclusions}
    228230
     
    282284\addcontentsline{toc}{chapter}{\textbf{References}}
    283285
    284 \bibliography{cfa}
     286\bibliography{cfa,thesis}
    285287% Tip 5: You can create multiple .bib files to organize your references.
    286288% Just list them all in the \bibliogaphy command, separated by commas (no spaces).
  • doc/rob_thesis/tuples.tex

    r221c2de7 r154fdc8  
    22\chapter{Tuples}
    33%======================================================================
    4 
    5 \section{Introduction}
    6 % TODO: named return values are not currently implemented in CFA - tie in with named tuples? (future work)
    7 % TODO: no passing input parameters by assignment, instead will have reference types => this is not a very C-like model and greatly complicates syntax for likely little gain (and would cause confusion with already supported return-by-rerefence)
    8 % TODO: tuples are allowed in expressions, exact meaning is defined by operator overloading (e.g. can add tuples). An important caveat to note is that it is currently impossible to allow adding two triples but prevent adding a pair with a quadruple (single flattening/structuring conversions are implicit, only total number of components matters). May be able to solve this with more nuanced conversion rules (future work)
    9 % TODO: benefits (conclusion) by Till: reduced number of variables and statements; no specified order of execution for multiple assignment (more optimzation freedom); can store parameter lists in variable; MRV routines (natural code); more convenient assignment statements; simple and efficient access of record fields; named return values more legible and efficient in use of storage
    104
    115\section{Multiple-Return-Value Functions}
     
    148This restriction results in code which emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}.
    159In the former situation, the function designer creates a record type that combines all of the return values into a single type.
    16 For example, consider a function returning the most frequently occuring letter in a string, and its frequency.
    17 % TODO: consider simplifying the example!
    18 %   Two things I like about this example:
    19 %   * it uses different types to illustrate why an array is insufficient (this is not necessary, but is nice)
    20 %   * it's complicated enough to show the uninitialized pitfall that exists in the aliasing example.
    21 %   Still, it may be a touch too complicated. Is there a simpler example with these two properties?
     10For example, consider a function returning the most frequently occurring letter in a string, and its frequency.
     11This example is complex enough to illustrate that an array is insufficient, since arrays are homogeneous, and demonstrates a potential pitfall that exists with aliasing.
    2212\begin{cfacode}
    2313struct mf_ret {
     
    7363const char * str = "hello world";
    7464char ch;                            // pre-allocate return value
    75 int freq = most_frequent(str, &ch); // pass return value as parameter
     65int freq = most_frequent(str, &ch); // pass return value as out parameter
    7666printf("%s -- %d %c\n", str, freq, ch);
    7767\end{cfacode}
    78 Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values.
    79 This complicates the call site with a sequence of variable declarations leading up to the call.
     68Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values, which complicates the call site with a sequence of variable declarations leading up to the call.
    8069Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call.
    8170Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe.
    8271On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler.
    83 There is a subtle bug in the previous example, in that @ret_ch@ is never assigned for a string that does not contain any letters, which can lead to undefined behaviour.
     72Interestingly, there is a subtle bug in the previous example, in that @ret_ch@ is never assigned for a string that does not contain any letters, which can lead to undefined behaviour.
     73In this particular case, it turns out that the frequency return value also doubles as an error code, where a frequency of 0 means the character return value should be ignored.
     74Still, not every routine with multiple return values should be required to return an error code, and error codes are easily ignored, so this is not a satisfying solution.
    8475As with the previous approach, this technique can simulate multiple return values, but in practice it is verbose and error prone.
    8576
     
    9081The expression resolution phase of the \CFA translator ensures that the correct form is used depending on the values being returned and the return type of the current function.
    9182A multiple-returning function with return type @T@ can return any expression that is implicitly convertible to @T@.
    92 Using the running example, the @most_frequent@ function can be written in using multiple return values as such,
     83Using the running example, the @most_frequent@ function can be written using multiple return values as such,
    9384\begin{cfacode}
    9485[int, char] most_frequent(const char * str) {
    9586  char freqs [26] = { 0 };
    9687  int ret_freq = 0;
    97   char ret_ch = 'a';
     88  char ret_ch = 'a';  // arbitrary default value for consistent results
    9889  for (int i = 0; str[i] != '\0'; ++i) {
    9990    if (isalpha(str[i])) {        // only count letters
     
    109100}
    110101\end{cfacode}
    111 This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type.
     102This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type, which precludes the bug seen with out-parameters.
    112103
    113104The addition of multiple-return-value functions necessitates a syntax for accepting multiple values at the call-site.
     
    136127In this case, there is only one option for a function named @most_frequent@ that takes a string as input.
    137128This function returns two values, one @int@ and one @char@.
    138 There are four options for a function named @process@, but only two which accept two arguments, and of those the best match is (3), which is also an exact match.
     129There are four options for a function named @process@, but only two that accept two arguments, and of those the best match is (3), which is also an exact match.
    139130This expression first calls @most_frequent("hello world")@, which produces the values @3@ and @'l'@, which are fed directly to the first and second parameters of (3), respectively.
    140131
     
    148139The previous expression has 3 \emph{components}.
    149140Each component in a tuple expression can be any \CFA expression, including another tuple expression.
    150 % TODO: Tuple expressions can appear anywhere that any other expression can appear (...?)
    151141The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization.
    152142It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used.
    153143Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}.
    154 % TODO: does this statement still apply, and if so to what extent?
    155 %   Tuples are a compile-time phenomenon and have little to no run-time presence.
    156144
    157145\subsection{Tuple Variables}
     
    166154These variables can be used in any of the contexts where a tuple expression is allowed, such as in the @printf@ function call.
    167155As in the @process@ example, the components of the tuple value are passed as separate parameters to @printf@, allowing very simple printing of tuple expressions.
    168 If the individual components are required, they can be extracted with a simple assignment, as in previous examples.
     156One way to access the individual components is with a simple assignment, as in previous examples.
    169157\begin{cfacode}
    170158int freq;
     
    221209In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@.
    222210For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@.
    223 Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@.
    224 The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure.
     211Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@.
     212The flexible structure of tuples permits a simple and expressive function-call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure.
    225213
    226214In \KWC \cite{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring.
    227215Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value.
    228 Flattening coerces a nested tuple into a flat tuple, i.e. it takes a tuple with tuple components and expands it into a tuple with only non-tuple components.
    229 Structuring moves in the opposite direction, i.e. it takes a flat tuple value and provides structure by introducing nested tuple components.
     216Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components.
     217Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components.
    230218
    231219In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations.
     
    254242\label{s:TupleAssignment}
    255243An assignment where the left side of the assignment operator has a tuple type is called tuple assignment.
    256 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called Multiple and Mass Assignment, respectively.
     244There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{Multiple} and \emph{Mass} Assignment, respectively.
    257245\begin{cfacode}
    258246int x;
     
    272260A mass assignment assigns the value $R$ to each $L_i$.
    273261For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression.
    274 This differs from C cascading assignment (e.g. @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment.
     262These semantics differ from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment.
    275263For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@.
    276264On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well.
     
    288276These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted.
    289277These semantics are a change from the original tuple design in \KWC \cite{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case.
    290 This decision wa made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position.
     278Restricting tuple assignment to statements was an attempt to to fix what was seen as a problem with side-effects, wherein assignment can be used in many different locations, such as in function-call argument position.
    291279While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility.
    292280Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user.
     
    303291\end{cfacode}
    304292The tuple expression begins with a mass assignment of @1.5@ into @[b, d]@, which assigns @1.5@ into @b@, which is truncated to @1@, and @1.5@ into @d@, producing the tuple @[1, 1.5]@ as a result.
    305 That tuple is used as the right side of the multiple assignment (i.e., @[c, a] = [1, 1.5]@) that assigns @1@ into @c@ and @1.5@ into @a@, which is truncated to @1@, producing the result @[1, 1]@.
     293That tuple is used as the right side of the multiple assignment (\ie, @[c, a] = [1, 1.5]@) that assigns @1@ into @c@ and @1.5@ into @a@, which is truncated to @1@, producing the result @[1, 1]@.
    306294Finally, the tuple @[1, 1]@ is used as an expression in the call to @f@.
    307295
     
    315303void ?{}(S *, S);      // (4)
    316304
    317 [S, S] x = [3, 6.28];  // uses (2), (3)
    318 [S, S] y;              // uses (1), (1)
    319 [S, S] z = x.0;        // uses (4), (4)
    320 \end{cfacode}
    321 In this example, @x@ is initialized by the multiple constructor calls @?{}(&x.0, 3)@ and @?{}(&x.1, 6.28)@, while @y@ is initilaized by two default constructor calls @?{}(&y.0)@ and @?{}(&y.1)@.
     305[S, S] x = [3, 6.28];  // uses (2), (3), specialized constructors
     306[S, S] y;              // uses (1), (1), default constructor
     307[S, S] z = x.0;        // uses (4), (4), copy constructor
     308\end{cfacode}
     309In this example, @x@ is initialized by the multiple constructor calls @?{}(&x.0, 3)@ and @?{}(&x.1, 6.28)@, while @y@ is initialized by two default constructor calls @?{}(&y.0)@ and @?{}(&y.1)@.
    322310@z@ is initialized by mass copy constructor calls @?{}(&z.0, x.0)@ and @?{}(&z.1, x.0)@.
    323 Finally, @x@, @y@, and @z@ are destructed, i.e. the calls @^?{}(&x.0)@, @^?{}(&x.1)@, @^?{}(&y.0)@, @^?{}(&y.1)@, @^?{}(&z.0)@, and @^?{}(&z.1)@.
     311Finally, @x@, @y@, and @z@ are destructed, \ie the calls @^?{}(&x.0)@, @^?{}(&x.1)@, @^?{}(&y.0)@, @^?{}(&y.1)@, @^?{}(&z.0)@, and @^?{}(&z.1)@.
    324312
    325313It is possible to define constructors and assignment functions for tuple types that provide new semantics, if the existing semantics do not fit the needs of an application.
     
    339327S s = t;
    340328\end{cfacode}
    341 The initialization of @s@ with @t@ works by default, because @t@ is flattened into its components, which satisfies the generated field constructor @?{}(S *, int, double)@ to initialize the first two values.
     329The initialization of @s@ with @t@ works by default because @t@ is flattened into its components, which satisfies the generated field constructor @?{}(S *, int, double)@ to initialize the first two values.
    342330
    343331\section{Member-Access Tuple Expression}
     
    354342Then the type of @a.[x, y, z]@ is @[T_x, T_y, T_z]@.
    355343
    356 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (e.g. rearrange components, drop components, duplicate components, etc.).
     344Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg, rearrange components, drop components, duplicate components, etc.).
    357345\begin{cfacode}
    358346[int, int, long, double] x;
     
    384372Since \CFA permits these tuple-access expressions using structures, unions, and tuples, \emph{member tuple expression} or \emph{field tuple expression} is more appropriate.
    385373
    386 It could be possible to extend member-access expressions further.
     374It is possible to extend member-access expressions further.
    387375Currently, a member-access expression whose member is a name requires that the aggregate is a structure or union, while a constant integer member requires the aggregate to be a tuple.
    388376In the interest of orthogonal design, \CFA could apply some meaning to the remaining combinations as well.
     
    398386z.y;  // ???
    399387\end{cfacode}
    400 One possiblity is for @s.1@ to select the second member of @s@.
     388One possibility is for @s.1@ to select the second member of @s@.
    401389Under this interpretation, it becomes possible to not only access members of a struct by name, but also by position.
    402390Likewise, it seems natural to open this mechanism to enumerations as well, wherein the left side would be a type, rather than an expression.
    403 One benefit of this interpretation is familiar, since it is extremely reminiscent of tuple-index expressions.
     391One benefit of this interpretation is familiarity, since it is extremely reminiscent of tuple-index expressions.
    404392On the other hand, it could be argued that this interpretation is brittle in that changing the order of members or adding new members to a structure becomes a brittle operation.
    405 This problem is less of a concern with tuples, since modifying a tuple affects only the code which directly uses that tuple, whereas modifying a structure has far reaching consequences with every instance of the structure.
    406 
    407 As for @z.y@, a natural interpretation would be to extend the meaning of member tuple expressions.
    408 That is, currently the tuple must occur as the member, i.e. to the right of the dot.
     393This problem is less of a concern with tuples, since modifying a tuple affects only the code that directly uses the tuple, whereas modifying a structure has far reaching consequences for every instance of the structure.
     394
     395As for @z.y@, one interpretation is to extend the meaning of member tuple expressions.
     396That is, currently the tuple must occur as the member, \ie to the right of the dot.
    409397Allowing tuples to the left of the dot could distribute the member across the elements of the tuple, in much the same way that member tuple expressions distribute the aggregate across the member tuple.
    410398In this example, @z.y@ expands to @[z.0.y, z.1.y]@, allowing what is effectively a very limited compile-time field-sections map operation, where the argument must be a tuple containing only aggregates having a member named @y@.
    411 It is questionable how useful this would actually be in practice, since generally structures are not designed to have names in common with other structures, and further this could cause maintainability issues in that it encourages programmers to adopt very simple naming conventions, to maximize the amount of overlap between different types.
     399It is questionable how useful this would actually be in practice, since structures often do not have names in common with other structures, and further this could cause maintainability issues in that it encourages programmers to adopt very simple naming conventions to maximize the amount of overlap between different types.
    412400Perhaps more useful would be to allow arrays on the left side of the dot, which would likewise allow mapping a field access across the entire array, producing an array of the contained fields.
    413401The immediate problem with this idea is that C arrays do not carry around their size, which would make it impossible to use this extension for anything other than a simple stack allocated array.
    414402
    415 Supposing this feature works as described, it would be necessary to specify an ordering for the expansion of member access expressions versus member tuple expressions.
     403Supposing this feature works as described, it would be necessary to specify an ordering for the expansion of member-access expressions versus member-tuple expressions.
    416404\begin{cfacode}
    417405struct { int x, y; };
     
    426414\end{cfacode}
    427415Depending on exactly how the two tuples are combined, different results can be achieved.
    428 As such, a specific ordering would need to be imposed in order for this feature to be useful.
    429 Furthermore, this addition moves a member tuple expression's meaning from being clear statically to needing resolver support, since the member name needs to be distributed appropriately over each member of the tuple, which could itself be a tuple.
    430 
    431 Ultimately, both of these extensions introduce complexity into the model, with relatively little peceived benefit.
     416As such, a specific ordering would need to be imposed to make this feature useful.
     417Furthermore, this addition moves a member-tuple expression's meaning from being clear statically to needing resolver support, since the member name needs to be distributed appropriately over each member of the tuple, which could itself be a tuple.
     418
     419A second possibility is for \CFA to have named tuples, as they exist in Swift and D.
     420\begin{cfacode}
     421typedef [int x, int y] Point2D;
     422Point2D p1, p2;
     423p1.x + p1.y + p2.x + p2.y;
     424p1.0 + p1.1 + p2.0 + p2.1;  // equivalent
     425\end{cfacode}
     426In this simpler interpretation, a tuple type carries with it a list of possibly empty identifiers.
     427This approach fits naturally with the named return-value feature, and would likely go a long way towards implementing it.
     428
     429Ultimately, the first two extensions introduce complexity into the model, with relatively little perceived benefit, and so were dropped from consideration.
     430Named tuples are a potentially useful addition to the language, provided they can be parsed with a reasonable syntax.
     431
    432432
    433433\section{Casting}
    434434In C, the cast operator is used to explicitly convert between types.
    435 In \CFA, the cast operator has a secondary use, which is type ascription.
     435In \CFA, the cast operator has a secondary use, which is type ascription, since it force the expression resolution algorithm to choose the lowest cost conversion to the target type.
    436436That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function.
    437437\begin{cfacode}
     
    442442(int)f();  // choose (2)
    443443\end{cfacode}
    444 Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples.
     444Since casting is a fundamental operation in \CFA, casts need to be given a meaningful interpretation in the context of tuples.
    445445Taking a look at standard C provides some guidance with respect to the way casts should work with tuples.
    446446\begin{cfacode}[numbers=left]
     
    448448void g();
    449449
    450 (void)f();
    451 (int)g();
     450(void)f();  // valid, ignore results
     451(int)g();   // invalid, void cannot be converted to int
    452452
    453453struct A { int x; };
    454 (struct A)f();
     454(struct A)f();  // invalid, int cannot be converted to A
    455455\end{cfacode}
    456456In C, line 4 is a valid cast, which calls @f@ and discards its result.
    457457On the other hand, line 5 is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical.
    458 Finally, line 8 is also invalid, because in C casts only provide conversion between scalar types \cite{C11}.
    459 For consistency, this implies that any case wherein the number of components increases as a result of the cast is invalid, while casts which have the same or fewer number of components may be valid.
     458Finally, line 8 is also invalid, because in C casts only provide conversion between scalar types \cite[p.~91]{C11}.
     459For consistency, this implies that any case wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid.
    460460
    461461Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$.
     
    468468  [int, [int, int], int] g();
    469469
    470   ([int, double])f();           // (1)
    471   ([int, int, int])g();         // (2)
    472   ([void, [int, int]])g();      // (3)
    473   ([int, int, int, int])g();    // (4)
    474   ([int, [int, int, int]])g();  // (5)
     470  ([int, double])f();           // (1) valid
     471  ([int, int, int])g();         // (2) valid
     472  ([void, [int, int]])g();      // (3) valid
     473  ([int, int, int, int])g();    // (4) invalid
     474  ([int, [int, int, int]])g();  // (5) invalid
    475475\end{cfacode}
    476476
     
    479479If @g@ is free of side effects, this is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@.
    480480Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@).
    481 
    482481% will this always hold true? probably, as constructors should give all of the conversion power we need. if casts become function calls, what would they look like? would need a way to specify the target type, which seems awkward. Also, C++ basically only has this because classes are closed to extension, while we don't have that problem (can have floating constructors for any type).
    483482Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions.
     
    509508\end{cfacode}
    510509Note that due to the implicit tuple conversions, this function is not restricted to the addition of two triples.
    511 A call to this plus operator type checks as long as a total of 6 non-tuple arguments are passed after flattening, and all of the arguments have a common type which can bind to @T@, with a pairwise @?+?@ over @T@.
    512 For example, these expressions will also succeed and produce the same value.
    513 \begin{cfacode}
    514 ([x.0, x.1]) + ([x.2, 10, 20, 30]);
    515 x.0 + ([x.1, x.2, 10, 20, 30]);
     510A call to this plus operator type checks as long as a total of 6 non-tuple arguments are passed after flattening, and all of the arguments have a common type that can bind to @T@, with a pairwise @?+?@ over @T@.
     511For example, these expressions also succeed and produce the same value.
     512\begin{cfacode}
     513([x.0, x.1]) + ([x.2, 10, 20, 30]);  // x + ([10, 20, 30])
     514x.0 + ([x.1, x.2, 10, 20, 30]);      // x + ([10, 20, 30])
    516515\end{cfacode}
    517516This presents a potential problem if structure is important, as these three expressions look like they should have different meanings.
    518 Further, these calls can be made ambiguous by adding seemingly different functions.
     517Furthermore, these calls can be made ambiguous by introducing seemingly different functions.
    519518\begin{cfacode}
    520519forall(otype T | { T ?+?(T, T); })
     
    524523\end{cfacode}
    525524It is also important to note that these calls could be disambiguated if the function return types were different, as they likely would be for a reasonable implementation of @?+?@, since the return type is used in overload resolution.
    526 Still, this is a deficiency of the current argument matching algorithm, and depending on the function, differing return values may not always be appropriate.
    527 It's possible that this could be rectified by applying an appropriate cost to the structuring and flattening conversions, which are currently 0-cost conversions.
     525Still, these semantics are a deficiency of the current argument matching algorithm, and depending on the function, differing return values may not always be appropriate.
     526These issues could be rectified by applying an appropriate cost to the structuring and flattening conversions, which are currently 0-cost conversions.
    528527Care would be needed in this case to ensure that exact matches do not incur such a cost.
    529528\begin{cfacode}
     
    536535\end{cfacode}
    537536
    538 Until this point, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (i.e. no implicit conversions are applied to assertion arguments).
     537Until this point, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie, no implicit conversions are applied to assertion arguments).
    539538This decision presents a conflict with the flexibility of tuples.
    540539\subsection{Assertion Inference}
     
    568567}
    569568\end{cfacode}
    570 Is transformed into
     569is transformed into
    571570\begin{cfacode}
    572571forall(dtype T0, dtype T1 | sized(T0) | sized(T1))
    573 struct _tuple2 {  // generated before the first 2-tuple
     572struct _tuple2_ {  // generated before the first 2-tuple
    574573  T0 field_0;
    575574  T1 field_1;
     
    578577  _tuple2_(double, double) x;
    579578  forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2))
    580   struct _tuple3 {  // generated before the first 3-tuple
     579  struct _tuple3_ {  // generated before the first 3-tuple
    581580    T0 field_0;
    582581    T1 field_1;
     
    591590[5, 'x', 1.24];
    592591\end{cfacode}
    593 Becomes
     592becomes
    594593\begin{cfacode}
    595594(_tuple3_(int, char, double)){ 5, 'x', 1.24 };
     
    605604f(x, 'z');
    606605\end{cfacode}
    607 Is transformed into
     606is transformed into
    608607\begin{cfacode}
    609608void f(int, _tuple2_(double, char));
     
    617616In the call to @f@, the second and third argument components are structured into a tuple argument.
    618617
    619 Expressions which may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion.
     618Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion.
    620619Each unique expression is assigned an identifier and is guaranteed to be executed exactly once.
    621620\begin{cfacode}
     
    624623g(h());
    625624\end{cfacode}
    626 Interally, this is converted to
     625Internally, this is converted to pseudo-\CFA
    627626\begin{cfacode}
    628627void g(int, double);
    629628[int, double] h();
    630 let unq<0> = f() : g(unq<0>.0, unq<0>.1);  // notation?
    631 \end{cfacode}
     629lazy [int, double] unq0 = h(); // deferred execution
     630g(unq0.0, unq0.1);             // execute h() once
     631\end{cfacode}
     632That is, the function @h@ is evaluated lazily and its result is stored for subsequent accesses.
    632633Ultimately, unique expressions are converted into two variables and an expression.
    633634\begin{cfacode}
     
    638639[int, double] _unq0;
    639640g(
    640   (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,
    641   (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,
     641  (_unq0_finished_ ? _unq0 : (_unq0 = h(), _unq0_finished_ = 1, _unq0)).0,
     642  (_unq0_finished_ ? _unq0 : (_unq0 = h(), _unq0_finished_ = 1, _unq0)).1,
    642643);
    643644\end{cfacode}
     
    646647Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression.
    647648
    648 Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure.
    649 This notion could be made more precise for certain intrinsic, autogenerated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions.
    650 It's possible that unique expressions could be exposed to the user through a language feature, but it's not immediately obvious that there is a benefit to doing so.
    651 
    652 Tuple member expressions are recursively expanded into a list of member access expressions.
     649Currently, the \CFA translator has a very broad, imprecise definition of impurity (side-effects), where every function call is assumed to be impure.
     650This notion could be made more precise for certain intrinsic, auto-generated, and built-in functions, and could analyze function bodies, when they are available, to recursively detect impurity, to eliminate some unique expressions.
     651It is possible that lazy evaluation could be exposed to the user through a lazy keyword with little additional effort.
     652
     653Tuple-member expressions are recursively expanded into a list of member-access expressions.
    653654\begin{cfacode}
    654655[int, [double, int, double], int]] x;
    655656x.[0, 1.[0, 2]];
    656657\end{cfacode}
    657 Which becomes
     658becomes
    658659\begin{cfacode}
    659660[x.0, [x.1.0, x.1.2]];
    660661\end{cfacode}
    661 Tuple member expressions also take advantage of unique expressions in the case of possible impurity.
     662Tuple-member expressions also take advantage of unique expressions in the case of possible impurity.
    662663
    663664Finally, the various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions.
     
    670671[x, y, z] = 1.5;            // mass assignment
    671672\end{cfacode}
    672 Generates the following
     673generates the following
    673674\begin{cfacode}
    674675// [x, y, z] = 1.5;
    675676_tuple3_(int, double, int) _tmp_stmtexpr_ret0;
    676 ({
     677({ // GNU C statement expression
    677678  // assign LHS address temporaries
    678679  int *__massassign_L0 = &x;    // ?{}
     
    689690    int *__multassign_L2 = (int *)&_tmp_stmtexpr_ret0.2;       // ?{}
    690691
    691     // assign RHS value temporaries and perform mass assignment to L0, L1, L2
    692     int __multassign_R0 = (*__massassign_L0=(int)__massassign_R0);   // ?{}
    693     double __multassign_R1 = (*__massassign_L1=__massassign_R0);     // ?{}
    694     int __multassign_R2 = (*__massassign_L2=(int)__massassign_R0);   // ?{}
     692    // assign RHS value temporaries and mass-assign to L0, L1, L2
     693    int __multassign_R0 = (*__massassign_L0=(int)__massassign_R0); // ?{}
     694    double __multassign_R1 = (*__massassign_L1=__massassign_R0);   // ?{}
     695    int __multassign_R2 = (*__massassign_L2=(int)__massassign_R0); // ?{}
    695696
    696697    // perform construction of statement expr return variable using
     
    711712});
    712713\end{cfacode}
    713 A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, e.g. in a unique expression.
     714A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg, in a unique expression.
    714715$N$ LHS variables are generated and constructed using the address of the tuple components, and a single RHS variable is generated to store the value of the RHS without any loss of precision.
    715716A nested statement expression is generated that performs the individual assignments and constructs the return value using the results of the individual assignments.
     
    720721[x, y, z] = [f(), 3];       // multiple assignment
    721722\end{cfacode}
    722 Generates
     723generates the following
    723724\begin{cfacode}
    724725// [x, y, z] = [f(), 3];
     
    741742      _tmp_cp_ret0 :
    742743      (_tmp_cp_ret0=f(), _unq0_finished_=1, _tmp_cp_ret0)).1; // ?{}
    743   ({ // tuple destruction - destruct f() return temporary - tuple destruction
     744  ({ // tuple destruction - destruct f() return temporary
    744745    // assign LHS address temporaries
    745746    double *__massassign_L3 = (double *)&_tmp_cp_ret0.0;  // ?{}
     
    757758    int *__multassign_L5 = (int *)&_tmp_stmtexpr_ret0.2;       // ?{}
    758759
    759     // assign RHS value temporaries and perform multiple assignment to L0, L1, L2
     760    // assign RHS value temporaries and multiple-assign to L0, L1, L2
    760761    int __multassign_R3 = (*__multassign_L0=(int)__multassign_R0);  // ?{}
    761762    double __multassign_R4 = (*__multassign_L1=__multassign_R1);    // ?{}
     
    785786The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language.
    786787There are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this is not a new restriction.
    787 
    788 \section{Variadic Functions}
    789 % TODO: should this maybe be its own chapter?
    790 C provides variadic functions through the manipulation of @va_list@ objects.
    791 A variadic function is one which contains at least one parameter, followed by @...@ as the last token in the parameter list.
    792 In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types.
    793 Two common argument descriptors are format strings or and counter parameters.
    794 It's important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly.
    795 This required repetition is error prone, because it's easy for the user to add or remove arguments without updating the argument descriptor.
    796 In addition, C requires the programmer to hard code all of the possible expected types.
    797 As a result, it is cumbersome to write a function that is open to extension.
    798 For example, a simple function which sums $N$ @int@s,
    799 \begin{cfacode}
    800 int sum(int N, ...) {
    801   va_list args;
    802   va_start(args, N);
    803   int ret = 0;
    804   while(N) {
    805     ret += va_arg(args, int);  // have to specify type
    806     N--;
    807   }
    808   va_end(args);
    809   return ret;
    810 }
    811 sum(3, 10, 20, 30);  // need to keep counter in sync
    812 \end{cfacode}
    813 The @va_list@ type is a special C data type that abstracts variadic argument manipulation.
    814 The @va_start@ macro initializes a @va_list@, given the last named parameter.
    815 Each use of the @va_arg@ macro allows access to the next variadic argument, given a type.
    816 Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call.
    817 As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures.
    818 In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined \cite{C11}.
    819 Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body.
    820 Since they rely on programmer convention rather than compile-time checks, variadic functions are generally unsafe.
    821 
    822 In practice, compilers can provide warnings to help mitigate some of the problems.
    823 For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers.
    824 Unfortunately, this does not permit extensions to the format string syntax, so a programmer cannot extend the attribute to warn for mismatches with custom types.
    825 
    826 Needless to say, C's variadic functions are a deficient language feature.
    827 Two options were examined to provide better, type-safe variadic functions in \CFA.
    828 \subsection{Whole Tuple Matching}
    829 Option 1 is to change the argument matching algorithm, so that type parameters can match whole tuples, rather than just their components.
    830 This option could be implemented with two phases of argument matching when a function contains type parameters and the argument list contains tuple arguments.
    831 If flattening and structuring fail to produce a match, a second attempt at matching the function and argument combination is made where tuple arguments are not expanded and structure must match exactly, modulo non-tuple implicit conversions.
    832 For example:
    833 \begin{cfacode}
    834   forall(otype T, otype U | { T g(U); })
    835   void f(T, U);
    836 
    837   [int, int] g([int, int, int, int]);
    838 
    839   f([1, 2], [3, 4, 5, 6]);
    840 \end{cfacode}
    841 With flattening and structuring, the call is first transformed into @f(1, 2, 3, 4, 5, 6)@.
    842 Since the first argument of type @T@ does not have a tuple type, unification decides that @T=int@ and @1@ is matched as the first parameter.
    843 Likewise, @U@ does not have a tuple type, so @U=int@ and @2@ is accepted as the second parameter.
    844 There are now no remaining formal parameters, but there are remaining arguments and the function is not variadic, so the match fails.
    845 
    846 With the addition of an exact matching attempt, @T=[int,int]@ and @U=[int,int,int,int]@ and so the arguments type check.
    847 Likewise, when inferring assertion @g@, an exact match is found.
    848 
    849 This approach is strict with respect to argument structure by nature, which makes it syntactically awkward to use in ways that the existing tuple design is not.
    850 For example, consider a @new@ function which allocates memory using @malloc@ and constructs the result, using arbitrary arguments.
    851 \begin{cfacode}
    852 struct Array;
    853 void ?{}(Array *, int, int, int);
    854 
    855 forall(dtype T, otype Params | sized(T) | { void ?{}(T *, Params); })
    856 T * new(Params p) {
    857   return malloc(){ p };
    858 }
    859 Array(int) * x = new([1, 2, 3]);
    860 \end{cfacode}
    861 The call to @new@ is not particularly appealing, since it requires the use of square brackets at the call-site, which is not required in any other function call.
    862 This shifts the burden from the compiler to the programmer, which is almost always wrong, and creates an odd inconsistency within the language.
    863 Similarly, in order to pass 0 variadic arguments, an explicit empty tuple must be passed into the argument list, otherwise the exact matching rule would not have an argument to bind against.
    864 
    865 It should be otherwise noted that the addition of an exact matching rule only affects the outcome for polymorphic type binding when tuples are involved.
    866 For non-tuple arguments, exact matching and flattening \& structuring are equivalent.
    867 For tuple arguments to a function without polymorphic formal parameters, flattening and structuring work whenever an exact match would have worked, since the tuple is flattened and implicitly restructured to its original structure.
    868 Thus there is nothing to be gained from permitting the exact matching rule to take effect when a function does not contain polymorphism and none of the arguments are tuples.
    869 
    870 Overall, this option takes a step in the right direction, but is contrary to the flexibility of the existing tuple design.
    871 
    872 \subsection{A New Typeclass}
    873 A second option is the addition of another kind of type parameter, @ttype@.
    874 Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types.
    875 In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule.
    876 % TODO: a similar rule exists in C/C++ for "..."
    877 This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates.
    878 As such, @ttype@ variables will also be referred to as argument packs.
    879 This is the option that has been added to \CFA.
    880 
    881 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion.
    882 Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful.
    883 Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.
    884 
    885 For example, a simple translation of the C sum function using @ttype@ would look like
    886 \begin{cfacode}
    887 int sum(){ return 0; }        // (0)
    888 forall(ttype Params | { int sum(Params); })
    889 int sum(int x, Params rest) { // (1)
    890   return x+sum(rest);
    891 }
    892 sum(10, 20, 30);
    893 \end{cfacode}
    894 Since (0) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.
    895 In order to call (1), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.
    896 In order to finish the resolution of @sum@, an assertion parameter which matches @int sum(int, int)@ is required.
    897 Like in the previous iteration, (0) is not a valid candiate, so (1) is examined with @Params@ bound to @[int]@, requiring the assertion @int sum(int)@.
    898 Next, (0) fails, and to satisfy (1) @Params@ is bound to @[]@, requiring an assertion @int sum()@.
    899 Finally, (0) matches and (1) fails, which terminates the recursion.
    900 Effectively, this traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@.
    901 
    902 A point of note is that this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details.
    903 It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments, which could be done simply
    904 \begin{cfacode}
    905 int sum(int x, int y){
    906   return x+y;
    907 }
    908 forall(ttype Params | { int sum(int, Params); })
    909 int sum(int x, int y, Params rest) {
    910   return sum(x+y, rest);
    911 }
    912 sum(10, 20, 30);
    913 \end{cfacode}
    914 
    915 One more iteration permits the summation of any summable type, as long as all arguments are the same type.
    916 \begin{cfacode}
    917 trait summable(otype T) {
    918   T ?+?(T, T);
    919 };
    920 forall(otype R | summable(R))
    921 R sum(R x, R y){
    922   return x+y;
    923 }
    924 forall(otype R, ttype Params
    925   | summable(R)
    926   | { R sum(R, Params); })
    927 R sum(R x, R y, Params rest) {
    928   return sum(x+y, rest);
    929 }
    930 sum(3, 10, 20, 30);
    931 \end{cfacode}
    932 Unlike C, it is not necessary to hard code the expected type.
    933 This is naturally open to extension, in that any user-defined type with a @?+?@ operator is automatically able to be used with the @sum@ function.
    934 That is to say, the programmer who writes @sum@ does not need full program knowledge of every possible data type, unlike what is necessary to write an equivalent function using the standard C mechanisms.
    935 
    936 Going one last step, it is possible to achieve full generality in \CFA, allowing the summation of arbitrary lists of summable types.
    937 \begin{cfacode}
    938 trait summable(otype T1, otype T2, otype R) {
    939   R ?+?(T1, T2);
    940 };
    941 forall(otype T1, otype T2, otype R | summable(T1, T2, R))
    942 R sum(T1 x, T2 y) {
    943   return x+y;
    944 }
    945 forall(otype T1, otype T2, otype T3, ttype Params, otype R
    946   | summable(T1, T2, T3)
    947   | { R sum(T3, Params); })
    948 R sum(T1 x, T2 y, Params rest ) {
    949   return sum(x+y, rest);
    950 }
    951 sum(3, 10.5, 20, 30.3);
    952 \end{cfacode}
    953 The \CFA translator requires adding explicit @double ?+?(int, double)@ and @double ?+?(double, int)@ functions for this call to work, since implicit conversions are not supported for assertions.
    954 
    955 C variadic syntax and @ttype@ polymorphism probably should not be mixed, since it is not clear where to draw the line to decide which arguments belong where.
    956 Furthermore, it might be desirable to disallow polymorphic functions to use C variadic syntax to encourage a Cforall style.
    957 Aside from calling C variadic functions, it is not obvious that there is anything that can be done with C variadics that could not also be done with @ttype@ parameters.
    958 
    959 Variadic templates in \CC require an ellipsis token to express that a parameter is a parameter pack and to expand a parameter pack.
    960 \CFA does not need an ellipsis in either case, since the type class @ttype@ is only used for variadics.
    961 An alternative design could have used an ellipsis combined with an existing type class.
    962 This approach was not taken because the largest benefit of the ellipsis token in \CC is the ability to expand a parameter pack within an expression, e.g. in fold expressions, which requires compile-time knowledge of the structure of the parameter pack, which is not available in \CFA.
    963 \begin{cppcode}
    964 template<typename... Args>
    965 void f(Args &... args) {
    966   g(&args...);  // expand to addresses of pack elements
    967 }
    968 \end{cppcode}
    969 As such, the addition of an ellipsis token would be purely an aesthetic change in \CFA today.
    970 
    971 It is possible to write a type-safe variadic print routine, which can replace @printf@
    972 \begin{cfacode}
    973 struct S { int x, y; };
    974 forall(otype T, ttype Params |
    975   { void print(T); void print(Params); })
    976 void print(T arg, Params rest) {
    977   print(arg);
    978   print(rest);
    979 }
    980 void print(char * x) { printf("%s", x); }
    981 void print(int x) { printf("%d", x);  }
    982 void print(S s) { print("{ ", s.x, ",", s.y, " }"); }
    983 print("s = ", (S){ 1, 2 }, "\n");
    984 \end{cfacode}
    985 This example routine showcases a variadic-template-like decomposition of the provided argument list.
    986 The individual @print@ routines allow printing a single element of a type.
    987 The polymorphic @print@ allows printing any list of types, as long as each individual type has a @print@ function.
    988 The individual print functions can be used to build up more complicated @print@ routines, such as for @S@, which is something that cannot be done with @printf@ in C.
    989 
    990 It is also possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions.
    991 For example, it is possible to write @new@ as a library function.
    992 Example 2: new (i.e. type-safe malloc + constructors)
    993 \begin{cfacode}
    994 struct Array;
    995 void ?{}(Array *, int, int, int);
    996 
    997 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })
    998 T * new(Params p) {
    999   return malloc(){ p }; // construct result of malloc
    1000 }
    1001 Array * x = new(1, 2, 3);
    1002 \end{cfacode}
    1003 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects.
    1004 This provides the type-safety of @new@ in \CC, without the need to specify the allocated type, thanks to return-type inference.
    1005 
    1006 In the call to @new@, @Array@ is selected to match @T@, and @Params@ is expanded to match @[int, int, int, int]@. To satisfy the assertions, a constructor with an interface compatible with @void ?{}(Array *, int, int, int)@ must exist in the current scope.
    1007 
    1008 \subsection{Implementation}
    1009 
    1010 The definition of @new@
    1011 \begin{cfacode}
    1012 forall(dtype T | sized(T)) T * malloc();
    1013 
    1014 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })
    1015 T * new(Params p) {
    1016   return malloc(){ p }; // construct result of malloc
    1017 }
    1018 \end{cfacode}
    1019 Generates the following
    1020 \begin{cfacode}
    1021 void *malloc(long unsigned int _sizeof_T, long unsigned int _alignof_T);
    1022 
    1023 void *new(
    1024   void (*_adapter_)(void (*)(), void *, void *),
    1025   long unsigned int _sizeof_T,
    1026   long unsigned int _alignof_T,
    1027   long unsigned int _sizeof_Params,
    1028   long unsigned int _alignof_Params,
    1029   void (* _ctor_T)(void *, void *),
    1030   void *p
    1031 ){
    1032   void *_retval_new;
    1033   void *_tmp_cp_ret0;
    1034   void *_tmp_ctor_expr0;
    1035   _retval_new=
    1036     (_adapter_(_ctor_T,
    1037       (_tmp_ctor_expr0=(_tmp_cp_ret0=malloc(_sizeof_2tT, _alignof_2tT),
    1038         _tmp_cp_ret0)),
    1039       p),
    1040     _tmp_ctor_expr0); // ?{}
    1041   *(void **)&_tmp_cp_ret0; // ^?{}
    1042   return _retval_new;
    1043 }
    1044 \end{cfacode}
    1045 The constructor for @T@ is called indirectly through the adapter function on the result of @malloc@ and the parameter pack.
    1046 The variable that was allocated and constructed is then returned from @new@.
    1047 
    1048 A call to @new@
    1049 \begin{cfacode}
    1050 struct S { int x, y; };
    1051 void ?{}(S *, int, int);
    1052 
    1053 S * s = new(3, 4);
    1054 \end{cfacode}
    1055 Generates the following
    1056 \begin{cfacode}
    1057 struct _tuple2_ {  // _tuple2_(T0, T1)
    1058   void *field_0;
    1059   void *field_1;
    1060 };
    1061 struct _conc__tuple2_0 {  // _tuple2_(int, int)
    1062   int field_0;
    1063   int field_1;
    1064 };
    1065 struct _conc__tuple2_0 _tmp_cp1;  // tuple argument to new
    1066 struct S *_tmp_cp_ret1;           // return value from new
    1067 void _thunk0(  // ?{}(S *, [int, int])
    1068   struct S *_p0,
    1069   struct _conc__tuple2_0 _p1
    1070 ){
    1071   _ctor_S(_p0, _p1.field_0, _p1.field_1);  // restructure tuple parameter
    1072 }
    1073 void _adapter(void (*_adaptee)(), void *_p0, void *_p1){
    1074   // apply adaptee to arguments after casting to actual types
    1075   ((void (*)(struct S *, struct _conc__tuple2_0))_adaptee)(
    1076     _p0,
    1077     *(struct _conc__tuple2_0 *)_p1
    1078   );
    1079 }
    1080 struct S *s = (struct S *)(_tmp_cp_ret1=
    1081   new(
    1082     _adapter,
    1083     sizeof(struct S),
    1084     __alignof__(struct S),
    1085     sizeof(struct _conc__tuple2_0),
    1086     __alignof__(struct _conc__tuple2_0),
    1087     (void (*)(void *, void *))&_thunk0,
    1088     (({ // copy construct tuple argument to new
    1089       int *__multassign_L0 = (int *)&_tmp_cp1.field_0;
    1090       int *__multassign_L1 = (int *)&_tmp_cp1.field_1;
    1091       int __multassign_R0 = 3;
    1092       int __multassign_R1 = 4;
    1093       ((*__multassign_L0=__multassign_R0 /* ?{} */) ,
    1094        (*__multassign_L1=__multassign_R1 /* ?{} */));
    1095     }), &_tmp_cp1)
    1096   ), _tmp_cp_ret1);
    1097 *(struct S **)&_tmp_cp_ret1; // ^?{}  // destroy return value from new
    1098 ({  // destroy argument temporary
    1099   int *__massassign_L0 = (int *)&_tmp_cp1.field_0;
    1100   int *__massassign_L1 = (int *)&_tmp_cp1.field_1;
    1101   ((*__massassign_L0 /* ^?{} */) , (*__massassign_L1 /* ^?{} */));
    1102 });
    1103 \end{cfacode}
    1104 Of note, @_thunk0@ is generated to translate calls to @?{}(S *, [int, int])@ into calls to @?{}(S *, int, int)@.
    1105 The call to @new@ constructs a tuple argument using the supplied arguments.
    1106 
    1107 The @print@ function
    1108 \begin{cfacode}
    1109 forall(otype T, ttype Params |
    1110   { void print(T); void print(Params); })
    1111 void print(T arg, Params rest) {
    1112   print(arg);
    1113   print(rest);
    1114 }
    1115 \end{cfacode}
    1116 Generates
    1117 \begin{cfacode}
    1118 void print_variadic(
    1119   void (*_adapterF_7tParams__P)(void (*)(), void *),
    1120   void (*_adapterF_2tT__P)(void (*)(), void *),
    1121   void (*_adapterF_P2tT2tT__MP)(void (*)(), void *, void *),
    1122   void (*_adapterF2tT_P2tT2tT_P_MP)(void (*)(), void *, void *, void *),
    1123   long unsigned int _sizeof_T,
    1124   long unsigned int _alignof_T,
    1125   long unsigned int _sizeof_Params,
    1126   long unsigned int _alignof_Params,
    1127   void *(*_assign_TT)(void *, void *),
    1128   void (*_ctor_T)(void *),
    1129   void (*_ctor_TT)(void *, void *),
    1130   void (*_dtor_T)(void *),
    1131   void (*print_T)(void *),
    1132   void (*print_Params)(void *),
    1133   void *arg,
    1134   void *rest
    1135 ){
    1136   void *_tmp_cp0 = __builtin_alloca(_sizeof_T);
    1137   _adapterF_2tT__P(  // print(arg)
    1138     ((void (*)())print_T),
    1139     (_adapterF_P2tT2tT__MP( // copy construct argument
    1140       ((void (*)())_ctor_TT),
    1141       _tmp_cp0,
    1142       arg
    1143     ), _tmp_cp0)
    1144   );
    1145   _dtor_T(_tmp_cp0);  // destroy argument temporary
    1146   _adapterF_7tParams__P(  // print(rest)
    1147     ((void (*)())print_Params),
    1148     rest
    1149   );
    1150 }
    1151 \end{cfacode}
    1152 The @print_T@ routine is called indirectly through an adapter function with a copy constructed argument, followed by an indirect call to @print_Params@.
    1153 
    1154 A call to print
    1155 \begin{cfacode}
    1156 void print(const char * x) { printf("%s", x); }
    1157 void print(int x) { printf("%d", x);  }
    1158 
    1159 print("x = ", 123, ".\n");
    1160 \end{cfacode}
    1161 Generates the following
    1162 \begin{cfacode}
    1163 void print_string(const char *x){
    1164   int _tmp_cp_ret0;
    1165   (_tmp_cp_ret0=printf("%s", x)) , _tmp_cp_ret0;
    1166   *(int *)&_tmp_cp_ret0; // ^?{}
    1167 }
    1168 void print_int(int x){
    1169   int _tmp_cp_ret1;
    1170   (_tmp_cp_ret1=printf("%d", x)) , _tmp_cp_ret1;
    1171   *(int *)&_tmp_cp_ret1; // ^?{}
    1172 }
    1173 
    1174 struct _tuple2_ {  // _tuple2_(T0, T1)
    1175   void *field_0;
    1176   void *field_1;
    1177 };
    1178 struct _conc__tuple2_0 {  // _tuple2_(int, const char *)
    1179   int field_0;
    1180   const char *field_1;
    1181 };
    1182 struct _conc__tuple2_0 _tmp_cp6;  // _tuple2_(int, const char *)
    1183 const char *_thunk0(const char **_p0, const char *_p1){
    1184         // const char * ?=?(const char **, const char *)
    1185   return *_p0=_p1;
    1186 }
    1187 void _thunk1(const char **_p0){ // void ?{}(const char **)
    1188   *_p0; // ?{}
    1189 }
    1190 void _thunk2(const char **_p0, const char *_p1){
    1191         // void ?{}(const char **, const char *)
    1192   *_p0=_p1; // ?{}
    1193 }
    1194 void _thunk3(const char **_p0){ // void ^?{}(const char **)
    1195   *_p0; // ^?{}
    1196 }
    1197 void _thunk4(struct _conc__tuple2_0 _p0){ // void print([int, const char *])
    1198   struct _tuple1_ { // _tuple1_(T0)
    1199     void *field_0;
    1200   };
    1201   struct _conc__tuple1_1 { // _tuple1_(const char *)
    1202     const char *field_0;
    1203   };
    1204   void _thunk5(struct _conc__tuple1_1 _pp0){ // void print([const char *])
    1205     print_string(_pp0.field_0);  // print(rest.0)
    1206   }
    1207   void _adapter_i_pii_(void (*_adaptee)(), void *_ret, void *_p0, void *_p1){
    1208     *(int *)_ret=((int (*)(int *, int))_adaptee)(_p0, *(int *)_p1);
    1209   }
    1210   void _adapter_pii_(void (*_adaptee)(), void *_p0, void *_p1){
    1211     ((void (*)(int *, int ))_adaptee)(_p0, *(int *)_p1);
    1212   }
    1213   void _adapter_i_(void (*_adaptee)(), void *_p0){
    1214     ((void (*)(int))_adaptee)(*(int *)_p0);
    1215   }
    1216   void _adapter_tuple1_5_(void (*_adaptee)(), void *_p0){
    1217     ((void (*)(struct _conc__tuple1_1 ))_adaptee)(*(struct _conc__tuple1_1 *)_p0);
    1218   }
    1219   print_variadic(
    1220     _adapter_tuple1_5,
    1221     _adapter_i_,
    1222     _adapter_pii_,
    1223     _adapter_i_pii_,
    1224     sizeof(int),
    1225     __alignof__(int),
    1226     sizeof(struct _conc__tuple1_1),
    1227     __alignof__(struct _conc__tuple1_1),
    1228     (void *(*)(void *, void *))_assign_i,     // int ?=?(int *, int)
    1229     (void (*)(void *))_ctor_i,                // void ?{}(int *)
    1230     (void (*)(void *, void *))_ctor_ii,       // void ?{}(int *, int)
    1231     (void (*)(void *))_dtor_ii,               // void ^?{}(int *)
    1232     (void (*)(void *))print_int,              // void print(int)
    1233     (void (*)(void *))&_thunk5,               // void print([const char *])
    1234     &_p0.field_0,                             // rest.0
    1235     &(struct _conc__tuple1_1 ){ _p0.field_1 } // [rest.1]
    1236   );
    1237 }
    1238 struct _tuple1_ {  // _tuple1_(T0)
    1239   void *field_0;
    1240 };
    1241 struct _conc__tuple1_6 {  // _tuple_1(const char *)
    1242   const char *field_0;
    1243 };
    1244 const char *_temp0;
    1245 _temp0="x = ";
    1246 void _adapter_pstring_pstring_string(
    1247   void (*_adaptee)(),
    1248   void *_ret,
    1249   void *_p0,
    1250   void *_p1
    1251 ){
    1252   *(const char **)_ret=
    1253     ((const char *(*)(const char **, const char *))_adaptee)(
    1254       _p0,
    1255       *(const char **)_p1
    1256     );
    1257 }
    1258 void _adapter_pstring_string(void (*_adaptee)(), void *_p0, void *_p1){
    1259   ((void (*)(const char **, const char *))_adaptee)(_p0, *(const char **)_p1);
    1260 }
    1261 void _adapter_string_(void (*_adaptee)(), void *_p0){
    1262   ((void (*)(const char *))_adaptee)(*(const char **)_p0);
    1263 }
    1264 void _adapter_tuple2_0_(void (*_adaptee)(), void *_p0){
    1265   ((void (*)(struct _conc__tuple2_0 ))_adaptee)(*(struct _conc__tuple2_0 *)_p0);
    1266 }
    1267 print_variadic(
    1268   _adapter_tuple2_0_,
    1269   _adapter_string_,
    1270   _adapter_pstring_string_,
    1271   _adapter_pstring_pstring_string_,
    1272   sizeof(const char *),
    1273   __alignof__(const char *),
    1274   sizeof(struct _conc__tuple2_0 ),
    1275   __alignof__(struct _conc__tuple2_0 ),
    1276   (void *(*)(void *, void *))&_thunk0, // const char * ?=?(const char **, const char *)
    1277   (void (*)(void *))&_thunk1,          // void ?{}(const char **)
    1278   (void (*)(void *, void *))&_thunk2,  // void ?{}(const char **, const char *)
    1279   (void (*)(void *))&_thunk3,          // void ^?{}(const char **)
    1280   (void (*)(void *))print_string,      // void print(const char *)
    1281   (void (*)(void *))&_thunk4,          // void print([int, const char *])
    1282   &_temp0,                             // "x = "
    1283   (({  // copy construct tuple argument to print
    1284     int *__multassign_L0 = (int *)&_tmp_cp6.field_0;
    1285     const char **__multassign_L1 = (const char **)&_tmp_cp6.field_1;
    1286     int __multassign_R0 = 123;
    1287     const char *__multassign_R1 = ".\n";
    1288     ((*__multassign_L0=__multassign_R0 /* ?{} */),
    1289      (*__multassign_L1=__multassign_R1 /* ?{} */));
    1290   }), &_tmp_cp6)                        // [123, ".\n"]
    1291 );
    1292 ({  // destroy argument temporary
    1293   int *__massassign_L0 = (int *)&_tmp_cp6.field_0;
    1294   const char **__massassign_L1 = (const char **)&_tmp_cp6.field_1;
    1295   ((*__massassign_L0 /* ^?{} */) , (*__massassign_L1 /* ^?{} */));
    1296 });
    1297 \end{cfacode}
    1298 The type @_tuple2_@ is generated to allow passing the @rest@ argument to @print_variadic@.
    1299 Thunks 0 through 3 provide wrappers for the @otype@ parameters for @const char *@, while @_thunk4@ translates a call to @print([int, const char *])@ into a call to @print_variadic(int, [const char *])@.
    1300 This all builds to a call to @print_variadic@, with the appropriate copy construction of the tuple argument.
    1301 
    1302 \section{Future Work}
  • doc/user/user.tex

    r221c2de7 r154fdc8  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Wed Apr  5 23:19:40 2017
    14 %% Update Count     : 1412
     13%% Last Modified On : Wed Apr 12 12:18:58 2017
     14%% Update Count     : 1415
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    6464% Names used in the document.
    6565\newcommand{\Version}{\input{../../version}}
    66 \newcommand{\CS}{C\raisebox{-0.9ex}{\large$^\sharp$}\xspace}
    67 
    6866\newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}}
    6967\newcommand{\Emph}[2][red]{{\color{#1}\textbf{\emph{#2}}}}
     
    195193For system programming, where direct access to hardware and dealing with real-time issues is a requirement, C is usually the language of choice.
    196194As well, there are millions of lines of C legacy code, forming the base for many software development projects (especially on UNIX systems).
    197 The TIOBE index (\url{http://www.tiobe.com/tiobe_index}) for March 2016 shows programming-language popularity, with \Index*{Java} 20.5\%, C 14.5\%, \Index*[C++]{\CC} 6.7\%, \CS 4.3\%, \Index*{Python} 4.3\%, and all other programming languages below 3\%.
     195The TIOBE index (\url{http://www.tiobe.com/tiobe_index}) for March 2016 shows programming-language popularity, with \Index*{Java} 20.5\%, C 14.5\%, \Index*[C++]{\CC} 6.7\%, \Csharp 4.3\%, \Index*{Python} 4.3\%, and all other programming languages below 3\%.
    198196As well, for 30 years, C has been the number 1 and 2 most popular programming language:
    199197\begin{center}
  • src/ControlStruct/LabelGenerator.cc

    r221c2de7 r154fdc8  
    2020#include "SynTree/Label.h"
    2121#include "SynTree/Attribute.h"
     22#include "SynTree/Statement.h"
    2223
    2324namespace ControlStruct {
     
    3132        }
    3233
    33         Label LabelGenerator::newLabel( std::string suffix ) {
     34        Label LabelGenerator::newLabel( std::string suffix, Statement * stmt ) {
    3435                std::ostringstream os;
    3536                os << "__L" << current++ << "__" << suffix;
     37                if ( stmt && ! stmt->get_labels().empty() ) {
     38                        os << "_" << stmt->get_labels().front() << "__";
     39                }
    3640                std::string ret = os.str();
    3741                Label l( ret );
  • src/ControlStruct/LabelGenerator.h

    r221c2de7 r154fdc8  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // LabelGenerator.h -- 
     7// LabelGenerator.h --
    88//
    99// Author           : Rodolfo G. Esteves
     
    2424          public:
    2525                static LabelGenerator *getGenerator();
    26                 Label newLabel(std::string suffix = "");
     26                Label newLabel(std::string suffix, Statement * stmt = nullptr);
    2727                void reset() { current = 0; }
    2828                void rewind() { current--; }
  • src/ControlStruct/MLEMutator.cc

    r221c2de7 r154fdc8  
    5656                bool labeledBlock = !(cmpndStmt->get_labels().empty());
    5757                if ( labeledBlock ) {
    58                         Label brkLabel = generator->newLabel("blockBreak");
     58                        Label brkLabel = generator->newLabel("blockBreak", cmpndStmt);
    5959                        enclosingControlStructures.push_back( Entry( cmpndStmt, brkLabel ) );
    6060                } // if
     
    8080                // whether brkLabel and contLabel are used with branch statements and will recursively do the same to nested
    8181                // loops
    82                 Label brkLabel = generator->newLabel("loopBreak");
    83                 Label contLabel = generator->newLabel("loopContinue");
     82                Label brkLabel = generator->newLabel("loopBreak", loopStmt);
     83                Label contLabel = generator->newLabel("loopContinue", loopStmt);
    8484                enclosingControlStructures.push_back( Entry( loopStmt, brkLabel, contLabel ) );
    8585                loopStmt->set_body ( loopStmt->get_body()->acceptMutator( *this ) );
    8686
     87                assert( ! enclosingControlStructures.empty() );
    8788                Entry &e = enclosingControlStructures.back();
    8889                // sanity check that the enclosing loops have been popped correctly
     
    108109                bool labeledBlock = !(ifStmt->get_labels().empty());
    109110                if ( labeledBlock ) {
    110                         Label brkLabel = generator->newLabel("blockBreak");
     111                        Label brkLabel = generator->newLabel("blockBreak", ifStmt);
    111112                        enclosingControlStructures.push_back( Entry( ifStmt, brkLabel ) );
    112113                } // if
    113114
    114115                Parent::mutate( ifStmt );
    115                
     116
    116117                if ( labeledBlock ) {
    117118                        if ( ! enclosingControlStructures.back().useBreakExit().empty() ) {
     
    126127        Statement *MLEMutator::handleSwitchStmt( SwitchClass *switchStmt ) {
    127128                // generate a label for breaking out of a labeled switch
    128                 Label brkLabel = generator->newLabel("switchBreak");
     129                Label brkLabel = generator->newLabel("switchBreak", switchStmt);
    129130                enclosingControlStructures.push_back( Entry(switchStmt, brkLabel) );
    130131                mutateAll( switchStmt->get_statements(), *this );
     
    158159
    159160                std::list< Entry >::reverse_iterator targetEntry;
    160                 if ( branchStmt->get_type() == BranchStmt::Goto ) {
    161                         return branchStmt;
    162                 } else if ( branchStmt->get_type() == BranchStmt::Continue) {
    163                         // continue target must be a loop
    164                         if ( branchStmt->get_target() == "" ) {
    165                                 targetEntry = std::find_if( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), [](Entry &e) { return isLoop( e.get_controlStructure() ); } );
    166                         } else {
    167                                 // labelled continue - lookup label in table ot find attached control structure
    168                                 targetEntry = std::find( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), (*targetTable)[branchStmt->get_target()] );
    169                         } // if
    170                         if ( targetEntry == enclosingControlStructures.rend() || ! isLoop( targetEntry->get_controlStructure() ) ) {
    171                                 throw SemanticError( "'continue' target must be an enclosing loop: " + originalTarget );
    172                         } // if
    173                 } else if ( branchStmt->get_type() == BranchStmt::Break ) {
    174                         if ( enclosingControlStructures.empty() ) throw SemanticError( "'break' outside a loop, switch, or labelled block" );
    175                         targetEntry = enclosingControlStructures.rbegin();
    176                 } else {
    177                         assert( false );
    178                 } // if
    179 
    180                 if ( branchStmt->get_target() != "" && targetTable->find( branchStmt->get_target() ) == targetTable->end() ) {
    181                         throw SemanticError("The label defined in the exit loop statement does not exist: " + originalTarget );  // shouldn't happen (since that's already checked)
    182                 } // if
     161                switch ( branchStmt->get_type() ) {
     162                        case BranchStmt::Goto:
     163                                return branchStmt;
     164                        case BranchStmt::Continue:
     165                        case BranchStmt::Break: {
     166                                bool isContinue = branchStmt->get_type() == BranchStmt::Continue;
     167                                // unlabeled break/continue
     168                                if ( branchStmt->get_target() == "" ) {
     169                                        if ( isContinue ) {
     170                                                // continue target is outermost loop
     171                                                targetEntry = std::find_if( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), [](Entry &e) { return isLoop( e.get_controlStructure() ); } );
     172                                        } else {
     173                                                // break target is outmost control structure
     174                                                if ( enclosingControlStructures.empty() ) throw SemanticError( "'break' outside a loop, switch, or labelled block" );
     175                                                targetEntry = enclosingControlStructures.rbegin();
     176                                        } // if
     177                                } else {
     178                                        // labeled break/continue - lookup label in table to find attached control structure
     179                                        targetEntry = std::find( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), (*targetTable)[branchStmt->get_target()] );
     180                                } // if
     181                                // ensure that selected target is valid
     182                                if ( targetEntry == enclosingControlStructures.rend() || (isContinue && ! isLoop( targetEntry->get_controlStructure() ) ) ) {
     183                                        throw SemanticError( toString( (isContinue ? "'continue'" : "'break'"), " target must be an enclosing ", (isContinue ? "loop: " : "control structure: "), originalTarget ) );
     184                                } // if
     185                                break;
     186                        }
     187                        default:
     188                                assert( false );
     189                } // switch
    183190
    184191                // branch error checks, get the appropriate label name and create a goto
     
    197204                } // switch
    198205
    199                 if ( branchStmt->get_target() == "" && branchStmt->get_type() != BranchStmt::Continue ) {
    200                         // unlabelled break/continue - can keep branch as break/continue for extra semantic information, but add
    201                         // exitLabel as its destination so that label passes can easily determine where the break/continue goes to
    202                         branchStmt->set_target( exitLabel );
    203                         return branchStmt;
    204                 } else {
    205                         // labelled break/continue - can't easily emulate this with break and continue, so transform into a goto
    206                         delete branchStmt;
    207                         return new BranchStmt( std::list<Label>(), exitLabel, BranchStmt::Goto );
    208                 } // if
     206                // transform break/continue statements into goto to simplify later handling of branches
     207                delete branchStmt;
     208                return new BranchStmt( std::list<Label>(), exitLabel, BranchStmt::Goto );
    209209        }
    210210
  • src/GenPoly/Box.cc

    r221c2de7 r154fdc8  
    3434#include "Parser/ParseNode.h"
    3535
     36#include "SynTree/Attribute.h"
    3637#include "SynTree/Constant.h"
    3738#include "SynTree/Declaration.h"
     
    165166                        using Parent::mutate;
    166167
     168                        PolyGenericCalculator();
     169
    167170                        template< typename DeclClass >
    168171                        DeclClass *handleDecl( DeclClass *decl, Type *type );
     
    198201                        ScopedSet< std::string > knownLayouts;          ///< Set of generic type layouts known in the current scope, indexed by sizeofName
    199202                        ScopedSet< std::string > knownOffsets;          ///< Set of non-generic types for which the offset array exists in the current scope, indexed by offsetofName
     203                        UniqueName bufNamer;                           ///< Namer for VLA buffers
    200204                };
    201205
     
    14521456////////////////////////////////////////// PolyGenericCalculator ////////////////////////////////////////////////////
    14531457
     1458                PolyGenericCalculator::PolyGenericCalculator()
     1459                        : Parent(), knownLayouts(), knownOffsets(), bufNamer( "_buf" ) {}
     1460
    14541461                void PolyGenericCalculator::beginTypeScope( Type *ty ) {
    14551462                        scopeTyVars.beginScope();
     
    15281535                        if ( ObjectDecl *objectDecl = dynamic_cast< ObjectDecl *>( declStmt->get_decl() ) ) {
    15291536                                if ( findGeneric( objectDecl->get_type() ) ) {
    1530                                         // change initialization of a polymorphic value object
    1531                                         // to allocate storage with alloca
     1537                                        // change initialization of a polymorphic value object to allocate via a VLA
     1538                                        // (alloca was previously used, but can't be safely used in loops)
    15321539                                        Type *declType = objectDecl->get_type();
    1533                                         UntypedExpr *alloc = new UntypedExpr( new NameExpr( "__builtin_alloca" ) );
    1534                                         alloc->get_args().push_back( new NameExpr( sizeofName( mangleType( declType ) ) ) );
     1540                                        std::string bufName = bufNamer.newName();
     1541                                        ObjectDecl *newBuf = new ObjectDecl( bufName, Type::StorageClasses(), LinkageSpec::C, 0,
     1542                                                new ArrayType( Type::Qualifiers(), new BasicType( Type::Qualifiers(), BasicType::Kind::Char), new NameExpr( sizeofName( mangleType(declType) ) ),
     1543                                                true, false, std::list<Attribute*>{ new Attribute( std::string{"aligned"}, std::list<Expression*>{ new ConstantExpr( Constant::from_int(8) ) } ) } ), 0 );
     1544                                        stmtsToAdd.push_back( new DeclStmt( noLabels, newBuf ) );
    15351545
    15361546                                        delete objectDecl->get_init();
    15371547
    1538                                         std::list<Expression*> designators;
    1539                                         objectDecl->set_init( new SingleInit( alloc, designators, false ) ); // not constructed
     1548                                        objectDecl->set_init( new SingleInit( new NameExpr( bufName ) ) );
    15401549                                }
    15411550                        }
  • src/Parser/ParseNode.h

    r221c2de7 r154fdc8  
    107107  public:
    108108        ExpressionNode( Expression * expr = nullptr ) : expr( expr ) {}
    109         ExpressionNode( const ExpressionNode &other );
    110109        virtual ~ExpressionNode() {}
    111         virtual ExpressionNode * clone() const override { return expr ? new ExpressionNode( expr->clone() ) : nullptr; }
     110        virtual ExpressionNode * clone() const override { return expr ? static_cast<ExpressionNode*>((new ExpressionNode( expr->clone() ))->set_next( maybeClone( get_next() ) )) : nullptr; }
    112111
    113112        bool get_extension() const { return extension; }
  • src/ResolvExpr/AlternativeFinder.cc

    r221c2de7 r154fdc8  
    211211        }
    212212
    213         // std::unordered_map< Expression *, UniqueExpr * > ;
     213        void AlternativeFinder::addAnonConversions( const Alternative & alt ) {
     214                // adds anonymous member interpretations whenever an aggregate value type is seen.
     215                Expression * expr = alt.expr->clone();
     216                std::unique_ptr< Expression > manager( expr ); // RAII for expr
     217                alt.env.apply( expr->get_result() );
     218                if ( StructInstType *structInst = dynamic_cast< StructInstType* >( expr->get_result() ) ) {
     219                        NameExpr nameExpr( "" );
     220                        addAggMembers( structInst, expr, alt.cost+Cost( 0, 0, 1 ), alt.env, &nameExpr );
     221                } else if ( UnionInstType *unionInst = dynamic_cast< UnionInstType* >( expr->get_result() ) ) {
     222                        NameExpr nameExpr( "" );
     223                        addAggMembers( unionInst, expr, alt.cost+Cost( 0, 0, 1 ), alt.env, &nameExpr );
     224                } // if
     225        }
    214226
    215227        template< typename StructOrUnionType >
     
    220232                std::list< Declaration* > members;
    221233                aggInst->lookup( name, members );
     234
    222235                for ( std::list< Declaration* >::const_iterator i = members.begin(); i != members.end(); ++i ) {
    223236                        if ( DeclarationWithType *dwt = dynamic_cast< DeclarationWithType* >( *i ) ) {
    224237                                alternatives.push_back( Alternative( new MemberExpr( dwt, expr->clone() ), env, newCost ) );
    225238                                renameTypes( alternatives.back().expr );
     239                                addAnonConversions( alternatives.back() ); // add anonymous member interpretations whenever an aggregate value type is seen as a member expression.
    226240                        } else {
    227241                                assert( false );
     
    730744                if ( candidates.empty() && ! errors.isEmpty() ) { throw errors; }
    731745
     746                // compute conversionsion costs
    732747                for ( AltList::iterator withFunc = candidates.begin(); withFunc != candidates.end(); ++withFunc ) {
    733748                        Cost cvtCost = computeConversionCost( *withFunc, indexer );
     
    751766                        } // if
    752767                } // for
     768                // function may return struct or union value, in which case we need to add alternatives for implicit conversions to each of the anonymous members
     769                for ( const Alternative & alt : alternatives ) {
     770                        addAnonConversions( alt );
     771                }
     772
    753773                candidates.clear();
    754774                candidates.splice( candidates.end(), alternatives );
     
    885905                        )
    886906                        renameTypes( alternatives.back().expr );
    887                         if ( StructInstType *structInst = dynamic_cast< StructInstType* >( (*i)->get_type() ) ) {
    888                                 NameExpr nameExpr( "" );
    889                                 addAggMembers( structInst, &newExpr, Cost( 0, 0, 1 ), env, &nameExpr );
    890                         } else if ( UnionInstType *unionInst = dynamic_cast< UnionInstType* >( (*i)->get_type() ) ) {
    891                                 NameExpr nameExpr( "" );
    892                                 addAggMembers( unionInst, &newExpr, Cost( 0, 0, 1 ), env, &nameExpr );
    893                         } // if
     907                        addAnonConversions( alternatives.back() ); // add anonymous member interpretations whenever an aggregate value type is seen as a name expression.
    894908                } // for
    895909        }
  • src/ResolvExpr/AlternativeFinder.h

    r221c2de7 r154fdc8  
    7878                void findSubExprs( InputIterator begin, InputIterator end, OutputIterator out );
    7979
     80                /// Adds alternatives for anonymous members
     81                void addAnonConversions( const Alternative & alt );
    8082                /// Adds alternatives for member expressions, given the aggregate, conversion cost for that aggregate, and name of the member
    8183                template< typename StructOrUnionType > void addAggMembers( StructOrUnionType *aggInst, Expression *expr, const Cost &newCost, const TypeEnvironment & env, Expression * member );
  • src/SymTab/Autogen.cc

    r221c2de7 r154fdc8  
    498498                makeUnionFieldsAssignment( srcParam, dstParam, back_inserter( funcDecl->get_statements()->get_kids() ) );
    499499                if ( returnVal ) {
    500                         if ( isDynamicLayout ) makeUnionFieldsAssignment( srcParam, returnVal, back_inserter( funcDecl->get_statements()->get_kids() ) );
    501                         else funcDecl->get_statements()->get_kids().push_back( new ReturnStmt( noLabels, new VariableExpr( srcParam ) ) );
     500                        funcDecl->get_statements()->get_kids().push_back( new ReturnStmt( noLabels, new VariableExpr( srcParam ) ) );
    502501                }
    503502        }
  • src/SymTab/Validate.cc

    r221c2de7 r154fdc8  
    208208        };
    209209
     210        class ArrayLength : public Visitor {
     211        public:
     212                /// for array types without an explicit length, compute the length and store it so that it
     213                /// is known to the rest of the phases. For example,
     214                ///   int x[] = { 1, 2, 3 };
     215                ///   int y[][2] = { { 1, 2, 3 }, { 1, 2, 3 } };
     216                /// here x and y are known at compile-time to have length 3, so change this into
     217                ///   int x[3] = { 1, 2, 3 };
     218                ///   int y[3][2] = { { 1, 2, 3 }, { 1, 2, 3 } };
     219                static void computeLength( std::list< Declaration * > & translationUnit );
     220
     221                virtual void visit( ObjectDecl * objDecl );
     222        };
     223
    210224        class CompoundLiteral final : public GenPoly::DeclMutator {
    211225                Type::StorageClasses storageClasses;
     
    235249                acceptAll( translationUnit, pass3 );
    236250                VerifyCtorDtorAssign::verify( translationUnit );
     251                ArrayLength::computeLength( translationUnit );
    237252        }
    238253
     
    869884                }
    870885        }
     886
     887        void ArrayLength::computeLength( std::list< Declaration * > & translationUnit ) {
     888                ArrayLength len;
     889                acceptAll( translationUnit, len );
     890        }
     891
     892        void ArrayLength::visit( ObjectDecl * objDecl ) {
     893                if ( ArrayType * at = dynamic_cast< ArrayType * >( objDecl->get_type() ) ) {
     894                        if ( at->get_dimension() != nullptr ) return;
     895                        if ( ListInit * init = dynamic_cast< ListInit * >( objDecl->get_init() ) ) {
     896                                at->set_dimension( new ConstantExpr( Constant::from_ulong( init->get_initializers().size() ) ) );
     897                        }
     898                }
     899        }
    871900} // namespace SymTab
    872901
  • src/SynTree/Expression.cc

    r221c2de7 r154fdc8  
    339339                        return TypeSubstitution( aggInst->get_baseParameters()->begin(), aggInst->get_baseParameters()->end(), aggInst->get_parameters().begin() );
    340340                } else {
    341                         assertf( false, "makeSub expects struct or union type for aggregate" );
     341                        assertf( false, "makeSub expects struct or union type for aggregate, but got: %s", toString( t ).c_str() );
    342342                }
    343343        }
  • src/libcfa/Makefile.am

    r221c2de7 r154fdc8  
    4141CC = ${abs_top_srcdir}/src/driver/cfa
    4242
    43 headers = limits stdlib math iostream fstream iterator rational assert containers/vector
     43headers = limits stdlib math iostream fstream iterator rational assert containers/pair containers/vector
    4444
    4545# not all platforms support concurrency, add option do disable it
  • src/libcfa/Makefile.in

    r221c2de7 r154fdc8  
    9999am__libcfa_d_a_SOURCES_DIST = libcfa-prelude.c interpose.c \
    100100        libhdr/libdebug.c limits.c stdlib.c math.c iostream.c \
    101         fstream.c iterator.c rational.c assert.c containers/vector.c \
    102         concurrency/coroutine.c concurrency/thread.c \
    103         concurrency/kernel.c concurrency/monitor.c \
    104         concurrency/CtxSwitch-@MACHINE_TYPE@.S concurrency/invoke.c
     101        fstream.c iterator.c rational.c assert.c containers/pair.c \
     102        containers/vector.c concurrency/coroutine.c \
     103        concurrency/thread.c concurrency/kernel.c \
     104        concurrency/monitor.c concurrency/CtxSwitch-@MACHINE_TYPE@.S \
     105        concurrency/invoke.c
    105106am__dirstamp = $(am__leading_dot)dirstamp
    106107@BUILD_CONCURRENCY_TRUE@am__objects_1 = concurrency/libcfa_d_a-coroutine.$(OBJEXT) \
     
    113114        libcfa_d_a-iterator.$(OBJEXT) libcfa_d_a-rational.$(OBJEXT) \
    114115        libcfa_d_a-assert.$(OBJEXT) \
     116        containers/libcfa_d_a-pair.$(OBJEXT) \
    115117        containers/libcfa_d_a-vector.$(OBJEXT) $(am__objects_1)
    116118@BUILD_CONCURRENCY_TRUE@am__objects_3 = concurrency/CtxSwitch-@MACHINE_TYPE@.$(OBJEXT) \
     
    126128am__libcfa_a_SOURCES_DIST = libcfa-prelude.c interpose.c \
    127129        libhdr/libdebug.c limits.c stdlib.c math.c iostream.c \
    128         fstream.c iterator.c rational.c assert.c containers/vector.c \
    129         concurrency/coroutine.c concurrency/thread.c \
    130         concurrency/kernel.c concurrency/monitor.c \
    131         concurrency/CtxSwitch-@MACHINE_TYPE@.S concurrency/invoke.c
     130        fstream.c iterator.c rational.c assert.c containers/pair.c \
     131        containers/vector.c concurrency/coroutine.c \
     132        concurrency/thread.c concurrency/kernel.c \
     133        concurrency/monitor.c concurrency/CtxSwitch-@MACHINE_TYPE@.S \
     134        concurrency/invoke.c
    132135@BUILD_CONCURRENCY_TRUE@am__objects_5 = concurrency/libcfa_a-coroutine.$(OBJEXT) \
    133136@BUILD_CONCURRENCY_TRUE@        concurrency/libcfa_a-thread.$(OBJEXT) \
     
    138141        libcfa_a-fstream.$(OBJEXT) libcfa_a-iterator.$(OBJEXT) \
    139142        libcfa_a-rational.$(OBJEXT) libcfa_a-assert.$(OBJEXT) \
     143        containers/libcfa_a-pair.$(OBJEXT) \
    140144        containers/libcfa_a-vector.$(OBJEXT) $(am__objects_5)
    141145@BUILD_CONCURRENCY_TRUE@am__objects_7 = concurrency/CtxSwitch-@MACHINE_TYPE@.$(OBJEXT) \
     
    176180        $(am__libcfa_a_SOURCES_DIST)
    177181am__nobase_cfa_include_HEADERS_DIST = limits stdlib math iostream \
    178         fstream iterator rational assert containers/vector \
    179         concurrency/coroutine concurrency/thread concurrency/kernel \
    180         concurrency/monitor ${shell echo stdhdr/*} \
     182        fstream iterator rational assert containers/pair \
     183        containers/vector concurrency/coroutine concurrency/thread \
     184        concurrency/kernel concurrency/monitor ${shell echo stdhdr/*} \
    181185        concurrency/invoke.h
    182186HEADERS = $(nobase_cfa_include_HEADERS)
     
    310314AM_CCASFLAGS = @CFA_FLAGS@
    311315headers = limits stdlib math iostream fstream iterator rational assert \
    312         containers/vector $(am__append_3)
     316        containers/pair containers/vector $(am__append_3)
    313317libobjs = ${headers:=.o}
    314318libsrc = libcfa-prelude.c interpose.c libhdr/libdebug.c ${headers:=.c} \
     
    400404        @$(MKDIR_P) containers/$(DEPDIR)
    401405        @: > containers/$(DEPDIR)/$(am__dirstamp)
     406containers/libcfa_d_a-pair.$(OBJEXT): containers/$(am__dirstamp) \
     407        containers/$(DEPDIR)/$(am__dirstamp)
    402408containers/libcfa_d_a-vector.$(OBJEXT): containers/$(am__dirstamp) \
    403409        containers/$(DEPDIR)/$(am__dirstamp)
     
    428434libhdr/libcfa_a-libdebug.$(OBJEXT): libhdr/$(am__dirstamp) \
    429435        libhdr/$(DEPDIR)/$(am__dirstamp)
     436containers/libcfa_a-pair.$(OBJEXT): containers/$(am__dirstamp) \
     437        containers/$(DEPDIR)/$(am__dirstamp)
    430438containers/libcfa_a-vector.$(OBJEXT): containers/$(am__dirstamp) \
    431439        containers/$(DEPDIR)/$(am__dirstamp)
     
    458466        -rm -f concurrency/libcfa_d_a-monitor.$(OBJEXT)
    459467        -rm -f concurrency/libcfa_d_a-thread.$(OBJEXT)
     468        -rm -f containers/libcfa_a-pair.$(OBJEXT)
    460469        -rm -f containers/libcfa_a-vector.$(OBJEXT)
     470        -rm -f containers/libcfa_d_a-pair.$(OBJEXT)
    461471        -rm -f containers/libcfa_d_a-vector.$(OBJEXT)
    462472        -rm -f libhdr/libcfa_a-libdebug.$(OBJEXT)
     
    497507@AMDEP_TRUE@@am__include@ @am__quote@concurrency/$(DEPDIR)/libcfa_d_a-monitor.Po@am__quote@
    498508@AMDEP_TRUE@@am__include@ @am__quote@concurrency/$(DEPDIR)/libcfa_d_a-thread.Po@am__quote@
     509@AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_a-pair.Po@am__quote@
    499510@AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_a-vector.Po@am__quote@
     511@AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_d_a-pair.Po@am__quote@
    500512@AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_d_a-vector.Po@am__quote@
    501513@AMDEP_TRUE@@am__include@ @am__quote@libhdr/$(DEPDIR)/libcfa_a-libdebug.Po@am__quote@
     
    681693@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o libcfa_d_a-assert.obj `if test -f 'assert.c'; then $(CYGPATH_W) 'assert.c'; else $(CYGPATH_W) '$(srcdir)/assert.c'; fi`
    682694
     695containers/libcfa_d_a-pair.o: containers/pair.c
     696@am__fastdepCC_TRUE@    $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-pair.o -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-pair.Tpo -c -o containers/libcfa_d_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c
     697@am__fastdepCC_TRUE@    $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_d_a-pair.Tpo containers/$(DEPDIR)/libcfa_d_a-pair.Po
     698@AMDEP_TRUE@@am__fastdepCC_FALSE@       $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_d_a-pair.o' libtool=no @AMDEPBACKSLASH@
     699@AMDEP_TRUE@@am__fastdepCC_FALSE@       DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
     700@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_d_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c
     701
     702containers/libcfa_d_a-pair.obj: containers/pair.c
     703@am__fastdepCC_TRUE@    $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-pair.obj -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-pair.Tpo -c -o containers/libcfa_d_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi`
     704@am__fastdepCC_TRUE@    $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_d_a-pair.Tpo containers/$(DEPDIR)/libcfa_d_a-pair.Po
     705@AMDEP_TRUE@@am__fastdepCC_FALSE@       $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_d_a-pair.obj' libtool=no @AMDEPBACKSLASH@
     706@AMDEP_TRUE@@am__fastdepCC_FALSE@       DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
     707@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_d_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi`
     708
    683709containers/libcfa_d_a-vector.o: containers/vector.c
    684710@am__fastdepCC_TRUE@    $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-vector.o -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-vector.Tpo -c -o containers/libcfa_d_a-vector.o `test -f 'containers/vector.c' || echo '$(srcdir)/'`containers/vector.c
     
    904930@AMDEP_TRUE@@am__fastdepCC_FALSE@       DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
    905931@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o libcfa_a-assert.obj `if test -f 'assert.c'; then $(CYGPATH_W) 'assert.c'; else $(CYGPATH_W) '$(srcdir)/assert.c'; fi`
     932
     933containers/libcfa_a-pair.o: containers/pair.c
     934@am__fastdepCC_TRUE@    $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_a-pair.o -MD -MP -MF containers/$(DEPDIR)/libcfa_a-pair.Tpo -c -o containers/libcfa_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c
     935@am__fastdepCC_TRUE@    $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_a-pair.Tpo containers/$(DEPDIR)/libcfa_a-pair.Po
     936@AMDEP_TRUE@@am__fastdepCC_FALSE@       $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_a-pair.o' libtool=no @AMDEPBACKSLASH@
     937@AMDEP_TRUE@@am__fastdepCC_FALSE@       DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
     938@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c
     939
     940containers/libcfa_a-pair.obj: containers/pair.c
     941@am__fastdepCC_TRUE@    $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_a-pair.obj -MD -MP -MF containers/$(DEPDIR)/libcfa_a-pair.Tpo -c -o containers/libcfa_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi`
     942@am__fastdepCC_TRUE@    $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_a-pair.Tpo containers/$(DEPDIR)/libcfa_a-pair.Po
     943@AMDEP_TRUE@@am__fastdepCC_FALSE@       $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_a-pair.obj' libtool=no @AMDEPBACKSLASH@
     944@AMDEP_TRUE@@am__fastdepCC_FALSE@       DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
     945@am__fastdepCC_FALSE@   $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi`
    906946
    907947containers/libcfa_a-vector.o: containers/vector.c
  • src/libcfa/stdlib.c

    r221c2de7 r154fdc8  
    1010// Created On       : Thu Jan 28 17:10:29 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Apr  1 18:31:26 2017
    13 // Update Count     : 181
     12// Last Modified On : Sun Apr 16 10:41:05 2017
     13// Update Count     : 189
    1414//
    1515
     
    7878} // posix_memalign
    7979
    80 forall( dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); } )
     80forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } )
    8181T * new( Params p ) {
    82         return ((T*)malloc()){ p };
     82        return ((T *)malloc()){ p };
    8383}
    8484
     
    229229forall( otype T | { int ?<?( T, T ); } )
    230230unsigned int bsearch( T key, const T * arr, size_t dimension ) {
    231         int comp( const void * t1, const void * t2 ) { return *(T *)t1 < *(T *)t2 ? -1 : *(T *)t2 < *(T *)t1 ? 1 : 0; }
    232         T *result = (T *)bsearch( &key, arr, dimension, sizeof(T), comp );
     231        T *result = bsearch( key, arr, dimension );
    233232        return result ? result - arr : dimension;                       // pointer subtraction includes sizeof(T)
    234233} // bsearch
  • src/prelude/prelude.cf

    r221c2de7 r154fdc8  
    217217signed int ?<?( _Bool, _Bool ),                                         ?<=?( _Bool, _Bool ),
    218218           ?>?( _Bool, _Bool ),                                         ?>=?( _Bool, _Bool );
     219signed int ?<?( char, char ),                           ?<=?( char, char ),
     220           ?>?( char, char ),                           ?>=?( char, char );
     221signed int ?<?( signed char, signed char ),                             ?<=?( signed char, signed char ),
     222           ?>?( signed char, signed char ),                             ?>=?( signed char, signed char );
    219223signed int ?<?( unsigned char, unsigned char ),                         ?<=?( unsigned char, unsigned char ),
    220224           ?>?( unsigned char, unsigned char ),                         ?>=?( unsigned char, unsigned char );
     225signed int ?<?( signed short, signed short ),                           ?<=?( signed short, signed short ),
     226           ?>?( signed short, signed short ),                           ?>=?( signed short, signed short );
     227signed int ?<?( unsigned short, unsigned short ),                       ?<=?( unsigned short, unsigned short ),
     228           ?>?( unsigned short, unsigned short ),                               ?>=?( unsigned short, unsigned short );
    221229signed int ?<?( signed int, signed int ),                               ?<=?( signed int, signed int ),
    222230           ?>?( signed int, signed int ),                               ?>=?( signed int, signed int );
     
    265273
    266274signed int ?==?( _Bool, _Bool ),                                                        ?!=?( _Bool, _Bool );
     275signed int ?==?( char, char ),                                                          ?!=?( char, char );
     276signed int ?==?( signed char, signed char ),                            ?!=?( signed char, signed char );
     277signed int ?==?( unsigned char, unsigned char ),                        ?!=?( unsigned char, unsigned char );
     278signed int ?==?( signed short, signed short ),                          ?!=?( signed short, signed short );
     279signed int ?==?( unsigned short, unsigned short ),                      ?!=?( unsigned short, unsigned short );
    267280signed int ?==?( signed int, signed int ),                                      ?!=?( signed int, signed int );
    268281signed int ?==?( unsigned int, unsigned int ),                                  ?!=?( unsigned int, unsigned int );
Note: See TracChangeset for help on using the changeset viewer.