Changeset 154fdc8
- Timestamp:
- Apr 19, 2017, 10:15:45 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- cd348e7
- Parents:
- 221c2de7 (diff), de4ce0e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 34 added
- 1 deleted
- 42 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/LaTeXmacros/common.tex
r221c2de7 r154fdc8 1 1 2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -*- Mode: Latex -*- %%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 3 %% … … 11 12 %% Created On : Sat Apr 9 10:06:17 2016 12 13 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Wed Apr 5 23:19:42201714 %% Update Count : 25 514 %% Last Modified On : Wed Apr 12 11:32:26 2017 15 %% Update Count : 257 15 16 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 17 … … 44 45 \newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20} % C++20 symbolic name 45 46 \newcommand{\Celeven}{C11\xspace} % C11 symbolic name 46 \newcommand{\Csharp}{C\raisebox{ 0.4ex}{\#}\xspace} % C# symbolic name47 \newcommand{\Csharp}{C\raisebox{-0.65ex}{\large$^\sharp$}\xspace} % C# symbolic name 47 48 48 49 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -
doc/bibliography/cfa.bib
r221c2de7 r154fdc8 9 9 % Predefined journal names: 10 10 % acmcs: Computing Surveys acta: Acta Infomatica 11 @string{acta="Acta Infomatica"} 11 12 % cacm: Communications of the ACM 12 13 % ibmjrd: IBM J. Research & Development ibmsj: IBM Systems Journal … … 433 434 keywords = {Parametric polymorphism, alphard, iterators, nested types}, 434 435 contributer = {gjditchfield@plg}, 436 key = {Alphard}, 435 437 editor = {Mary Shaw}, 436 438 title = {{ALPHARD}: Form and Content}, … … 861 863 862 864 @techreport{C11, 863 type 865 type = {International Standard}, 864 866 keywords = {ISO/IEC C 11}, 865 867 contributer = {pabuhr@plg}, … … 872 874 873 875 @techreport{C++Concepts, 874 type 876 type = {International Standard}, 875 877 keywords = {ISO/IEC TS 19217:2015}, 876 878 contributer = {a3moss@uwaterloo.ca}, 877 879 key = {{ISO/IEC} {TS} 19217}, 880 author = {Concepts}, 878 881 title = {Information technology -- Programming languages -- {C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Extensions for concepts}, 879 882 institution = {International Standard Organization}, … … 2722 2725 2723 2726 @online{GCCExtensions, 2724 contributer = {a3moss@uwaterloo.ca}, 2725 key = {{GNU}}, 2726 title = {Extensions to the {C} Language Family}, 2727 year = 2014, 2728 url = {https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html}, 2729 urldate = {2017-04-02} 2727 contributer = {a3moss@uwaterloo.ca}, 2728 key = {{GNU}}, 2729 author = {{C Extensions}}, 2730 title = {Extensions to the {C} Language Family}, 2731 year = 2014, 2732 note = {\href{https://gcc.gnu.org/onlinedocs/gcc-4.7.2/gcc/C-Extensions.html}{https://\-gcc.gnu.org/\-onlinedocs/\-gcc-4.7.2/\-gcc/\-C\-Extensions.html}}, 2733 urldate = {2017-04-02} 2730 2734 } 2731 2735 … … 2823 2827 key = {Fortran95}, 2824 2828 title = {Fortran 95 Standard, ISO/IEC 1539}, 2825 organization 2829 organization= {Unicomp, Inc.}, 2826 2830 address = {7660 E. Broadway, Tucson, Arizona, U.S.A, 85710}, 2827 2831 month = jan, … … 3060 3064 3061 3065 @online{GObject, 3062 keywords = {GObject}, 3063 contributor = {a3moss@uwaterloo.ca}, 3064 author = {{The GNOME Project}}, 3065 title = {{GObject} Reference Manual}, 3066 year = 2014, 3067 url = {https://developer.gnome.org/gobject/stable/}, 3068 urldate = {2017-04-04} 3066 keywords = {GObject}, 3067 contributor = {a3moss@uwaterloo.ca}, 3068 author = {{GObject}}, 3069 organization= {The GNOME Project}, 3070 title = {{GObject} Reference Manual}, 3071 year = 2014, 3072 url = {https://developer.gnome.org/gobject/stable/}, 3073 urldate = {2017-04-04} 3069 3074 } 3070 3075 … … 3645 3650 contributer = {pabuhr@plg}, 3646 3651 author = {James Gosling and Bill Joy and Guy Steele and Gilad Bracha and Alex Buckley}, 3647 title = {{Java} Language Specification}, 3652 title = {{Java} Language Spec.}, 3653 organization= {Oracle}, 3648 3654 publisher = {Oracle}, 3649 3655 year = 2015, … … 4564 4570 4565 4571 @manual{obj-c-book, 4566 keywords 4567 contributor 4568 author = {{Apple Computer Inc.}},4569 title 4570 publisher= {Apple Computer Inc.},4571 address 4572 year 4572 keywords = {objective-c}, 4573 contributor = {a3moss@uwaterloo.ca}, 4574 author = {{Objective-C}}, 4575 title = {The {Objective-C} Programming Language}, 4576 organization= {Apple Computer Inc.}, 4577 address = {Cupertino, CA}, 4578 year = 2003 4573 4579 } 4574 4580 … … 4576 4582 keywords = {objective-c}, 4577 4583 contributor = {a3moss@uwaterloo.ca}, 4578 author = {{ Apple Computer Inc.}},4584 author = {{Xcode}}, 4579 4585 title = {{Xcode} 7 Release Notes}, 4580 4586 year = 2015, … … 4894 4900 year = 1980, 4895 4901 month = nov, volume = 15, number = 11, pages = {47-56}, 4896 note = {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming 4897 Language}, 4902 note = {Proceedings of the ACM-SIGPLAN Symposium on the {Ada} Programming Language}, 4898 4903 comment = { 4899 4904 The two-pass (bottom-up, then top-down) algorithm, with a proof … … 5318 5323 title = {Programming with Sets: An Introduction to {SETL}}, 5319 5324 publisher = {Springer}, 5325 address = {New York, NY, USA}, 5320 5326 year = 1986, 5321 5327 } … … 5463 5469 contributer = {pabuhr@plg}, 5464 5470 title = {The Programming Language {Ada}: Reference Manual}, 5471 author = {Ada}, 5465 5472 organization= {United States Department of Defense}, 5466 5473 edition = {{ANSI/MIL-STD-1815A-1983}}, … … 5880 5887 keywords = {Rust programming language}, 5881 5888 contributer = {pabuhr@plg}, 5889 author = {{Rust}}, 5882 5890 title = {The {Rust} Programming Language}, 5883 5891 organization= {The Rust Project Developers}, … … 5891 5899 keywords = {Scala programming language}, 5892 5900 contributer = {pabuhr@plg}, 5901 author = {{Scala}}, 5893 5902 title = {{Scala} Language Specification, Version 2.11}, 5894 5903 organization= {\'{E}cole Polytechnique F\'{e}d\'{e}rale de Lausanne}, … … 6212 6221 } 6213 6222 6223 @article{Smith98, 6224 keywords = {Polymorphic C}, 6225 contributor = {a3moss@uwaterloo.ca}, 6226 title={A sound polymorphic type system for a dialect of C}, 6227 author={Smith, Geoffrey and Volpano, Dennis}, 6228 journal={Science of computer programming}, 6229 volume={32}, 6230 number={1-3}, 6231 pages={49--72}, 6232 year={1998}, 6233 publisher={Elsevier} 6234 } 6235 6214 6236 @book{Campbell74, 6215 6237 keywords = {path expressions}, … … 6308 6330 number = 5, 6309 6331 pages = {341-346} 6332 } 6333 6334 @online{Sutter15, 6335 contributer = {pabuhr@plg}, 6336 author = {Herb Sutter and Bjarne Stroustrup and Gabriel Dos Reis}, 6337 title = {Structured bindings}, 6338 issue_date = {2015-10-14}, 6339 month = oct, 6340 year = 2015, 6341 pages = {1--6}, 6342 numpages = {6}, 6343 note = {\href{http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/p0144r0.pdf}{http://\-www.open-std.org/\-jtc1/\-sc22/\-wg21/\-docs/\-papers/\-2015/\-p0144r0.pdf}}, 6310 6344 } 6311 6345 … … 6750 6784 number = 6, 6751 6785 month = jun, 6786 publisher = {ACM}, 6787 address = {New York, NY, USA}, 6752 6788 year = 1990, 6753 6789 pages = {127-136}, … … 6902 6938 6903 6939 @online{Vala, 6904 keywords = {GObject, Vala}, 6905 contributor = {a3moss@uwaterloo.ca}, 6906 author = {{The GNOME Project}}, 6907 title = {Vala Reference Manual}, 6908 year = 2017, 6909 url = {https://wiki.gnome.org/Projects/Vala/Manual}, 6910 urldate = {2017-04-04} 6940 keywords = {GObject, Vala}, 6941 contributor = {a3moss@uwaterloo.ca}, 6942 author = {{Vala}}, 6943 organization= {The GNOME Project}, 6944 title = {Vala Reference Manual}, 6945 year = 2017, 6946 url = {https://wiki.gnome.org/Projects/Vala/Manual}, 6947 urldate = {2017-04-04} 6911 6948 } 6912 6949 -
doc/generic_types/.gitignore
r221c2de7 r154fdc8 17 17 *.synctex.gz 18 18 comment.cut 19 timing.tex -
doc/generic_types/Makefile
r221c2de7 r154fdc8 21 21 22 22 GRAPHS = ${addsuffix .tex, \ 23 timing \ 23 24 } 24 25 … … 45 46 #${DOCUMENT} : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \ 46 47 47 ${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \ 48 ../LaTeXmacros/common.tex ../LaTeXmacros/indexstyle ../bibliography/cfa.bib 48 ${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex ../bibliography/cfa.bib 49 49 # Conditionally create an empty *.idx (index) file for inclusion until makeindex is run. 50 50 if [ ! -r ${basename $@}.idx ] ; then touch ${basename $@}.idx ; fi … … 66 66 ## Define the default recipes. 67 67 68 ${GRAPHS} : evaluation/timing.gp evaluation/timing.dat 69 gnuplot evaluation/timing.gp 70 68 71 %.tex : %.fig 69 72 fig2dev -L eepic $< > $@ -
doc/generic_types/acmart.cls
r221c2de7 r154fdc8 401 401 \immediate\write\@auxout{\string\bibstyle{#1}}% 402 402 \fi}} 403 \RequirePackage{graphicx, xcolor}404 \definecolor[named]{ACMBlue}{cmyk}{1,0.1,0,0.1}405 \definecolor[named]{ACMYellow}{cmyk}{0,0.16,1,0}406 \definecolor[named]{ACMOrange}{cmyk}{0,0.42,1,0.01}407 \definecolor[named]{ACMRed}{cmyk}{0,0.90,0.86,0}408 \definecolor[named]{ACMLightBlue}{cmyk}{0.49,0.01,0,0}409 \definecolor[named]{ACMGreen}{cmyk}{0.20,0,1,0.19}410 \definecolor[named]{ACMPurple}{cmyk}{0.55,1,0,0.15}411 \definecolor[named]{ACMDarkBlue}{cmyk}{1,0.58,0,0.21}403 %\RequirePackage{graphicx, xcolor} 404 %\definecolor[named]{ACMBlue}{cmyk}{1,0.1,0,0.1} 405 %\definecolor[named]{ACMYellow}{cmyk}{0,0.16,1,0} 406 %\definecolor[named]{ACMOrange}{cmyk}{0,0.42,1,0.01} 407 %\definecolor[named]{ACMRed}{cmyk}{0,0.90,0.86,0} 408 %\definecolor[named]{ACMLightBlue}{cmyk}{0.49,0.01,0,0} 409 %\definecolor[named]{ACMGreen}{cmyk}{0.20,0,1,0.19} 410 %\definecolor[named]{ACMPurple}{cmyk}{0.55,1,0,0.15} 411 %\definecolor[named]{ACMDarkBlue}{cmyk}{1,0.58,0,0.21} 412 412 \RequirePackage{geometry} 413 413 \ifcase\ACM@format@nr -
doc/generic_types/evaluation/.gitignore
r221c2de7 r154fdc8 1 1 c-bench 2 2 cpp-bench 3 cpp-vbench 3 4 cfa-bench 4 c2-bench5 cpp2-bench6 cfa2-bench7 5 *.o 8 6 *.d -
doc/generic_types/evaluation/Makefile
r221c2de7 r154fdc8 1 CFA = my-cfa1 CFA = cfa 2 2 DEPFLAGS = -MMD -MP 3 CFLAGS = -O2 4 ifdef N 5 CFLAGS += -DN=$(N) 6 endif 7 CXXFLAGS = $(CFLAGS) --std=c++14 3 8 4 .PHONY: all clean distclean bench9 .PHONY: all clean distclean run-c run-cpp run-cfa run 5 10 6 all: c-bench cpp-bench cfa-bench c 2-bench cpp2-bench cfa2-bench11 all: c-bench cpp-bench cfa-bench cpp-vbench 7 12 8 13 # rewrite object generation to auto-determine deps … … 13 18 c-%.o : c-%.c 14 19 c-%.o : c-%.c c-%.d 15 $(COMPILE.c) -O0$(OUTPUT_OPTION) -c $<20 $(COMPILE.c) $(OUTPUT_OPTION) -c $< 16 21 17 22 cpp-%.o : cpp-%.cpp 18 23 cpp-%.o : cpp-%.cpp cpp-%.d 19 $(COMPILE.cpp) -O0$(OUTPUT_OPTION) -c $<24 $(COMPILE.cpp) $(OUTPUT_OPTION) -c $< 20 25 21 26 cfa-%.o : cfa-%.c 22 27 cfa-%.o : cfa-%.c cfa-%.d 23 $(COMPILE.cfa) -O0$(OUTPUT_OPTION) -c $<28 $(COMPILE.cfa) $(OUTPUT_OPTION) -c $< 24 29 25 c2-%.o : c-%.c 26 c2-%.o : c-%.c c-%.d 27 $(COMPILE.c) -O2 $(OUTPUT_OPTION) -c $< 30 COBJS = c-stack.o c-pair.o c-print.o 31 CPPOBJS = 32 CPPVOBJS = cpp-vstack.o 33 CFAOBJS = cfa-stack.o cfa-pair.o cfa-print.o 28 34 29 cpp2-%.o : cpp-%.cpp 30 cpp2-%.o : cpp-%.cpp cpp-%.d 31 $(COMPILE.cpp) -O2 $(OUTPUT_OPTION) -c $< 32 33 cfa2-%.o : cfa-%.c 34 cfa2-%.o : cfa-%.c cfa-%.d 35 $(COMPILE.cfa) -O2 $(OUTPUT_OPTION) -c $< 36 37 COBJS = c-stack.o 38 CPPOBJS = 39 CFAOBJS = cfa-stack.o 40 C2OBJS = $(patsubst c-%,c2-%, $(COBJS)) 41 CPP2OBJS = $(patsubst cpp-%,cpp2-%, $(CPPOBJS)) 42 CFA2OBJS = $(patsubst cfa-%,cfa2-%, $(CFAOBJS)) 35 CFILES = c-bench.c bench.h $(COBJS:.o=.h) $(COBJS:.o=.c) 36 CPPFILES = cpp-bench.cpp bench.hpp cpp-stack.hpp cpp-pair.hpp cpp-print.hpp 37 CPPVFILES = cpp-vbench.cpp bench.hpp object.hpp $(CPPVOBJS:.o=.hpp) $(CPPVOBJS:.o=.cpp) cpp-vprint.hpp 38 CFAFILES = cfa-bench.c bench.h $(CFAOBJS:.o=.h) $(CFAOBJS:.o=.c) 43 39 44 40 c-bench: c-bench.c c-bench.d $(COBJS) 45 $(COMPILE.c) - O0 -o $@ $< $(COBJS) $(LDFLAGS)41 $(COMPILE.c) -o $@ $< $(COBJS) $(LDFLAGS) 46 42 47 43 cpp-bench: cpp-bench.cpp cpp-bench.d $(CPPOBJS) 48 $(COMPILE.cpp) -O0 -o $@ $< $(CPPOBJS) $(LDFLAGS) 44 $(COMPILE.cpp) -o $@ $< $(CPPOBJS) $(LDFLAGS) 45 46 cpp-vbench: cpp-vbench.cpp cpp-vbench.d $(CPPVOBJS) 47 $(COMPILE.cpp) -o $@ $< $(CPPVOBJS) $(LDFLAGS) 49 48 50 49 cfa-bench: cfa-bench.c cfa-bench.d $(CFAOBJS) 51 $(COMPILE.cfa) -O0 -o $@ $< $(CFAOBJS) $(LDFLAGS) 52 53 c2-bench: c-bench.c c-bench.d $(C2OBJS) 54 $(COMPILE.c) -O2 -o $@ $< $(C2OBJS) $(LDFLAGS) 55 56 cpp2-bench: cpp-bench.cpp cpp-bench.d $(CPP2OBJS) 57 $(COMPILE.cpp) -O2 -o $@ $< $(CPP2OBJS) $(LDFLAGS) 58 59 cfa2-bench: cfa-bench.c cfa-bench.d $(CFA2OBJS) 60 $(COMPILE.cfa) -O2 -o $@ $< $(CFA2OBJS) $(LDFLAGS) 50 $(COMPILE.cfa) -o $@ $< $(CFAOBJS) $(LDFLAGS) 61 51 62 52 clean: 63 53 -rm $(COBJS) c-bench 64 54 -rm $(CPPOBJS) cpp-bench 55 -rm $(CPPVOBJS) cpp-vbench 65 56 -rm $(CFAOBJS) cfa-bench 66 -rm $(C2OBJS) c2-bench67 -rm $(CPP2OBJS) cpp2-bench68 -rm $(CFA2OBJS) cfa2-bench69 57 70 58 distclean: clean 71 59 -rm $(COBJS:.o=.d) c-bench.d 72 60 -rm $(CPPOBJS:.o=.d) cpp-bench.d 61 -rm $(CPPVOBJS:.o=.d) cpp-vbench.d 73 62 -rm $(CFAOBJS:.o=.d) cfa-bench.d 74 63 75 bench: c-bench cpp-bench cfa-bench c2-bench cpp2-bench cfa2-bench 64 run-c: c-bench 65 @echo 76 66 @echo '## C ##' 77 @./c-bench 67 @/usr/bin/time -f 'max_memory:\t%M kilobytes' ./c-bench 68 @printf 'source_size:\t%8d lines\n' `cat $(CFILES) | wc -l` 69 @printf 'redundant_type_annotations:%8d lines\n' `cat $(CFILES) | fgrep '/***/' -c` 70 @printf 'binary_size:\t%8d bytes\n' `stat -c %s c-bench` 71 72 run-cfa: cfa-bench 73 @echo 74 @echo '## Cforall ##' 75 @/usr/bin/time -f 'max_memory:\t %M kilobytes' ./cfa-bench 76 @printf 'source_size:\t%8d lines\n' `cat $(CFAFILES) | wc -l` 77 @printf 'redundant_type_annotations:%8d lines\n' `cat $(CFAFILES) | fgrep '/***/' -c` 78 @printf 'binary_size:\t%8d bytes\n' `stat -c %s cfa-bench` 79 80 run-cpp: cpp-bench 81 @echo 78 82 @echo '## C++ ##' 79 @./cpp-bench 80 @echo '## Cforall ##' 81 @./cfa-bench 82 @echo '## C -O2 ##' 83 @./c2-bench 84 @echo '## C++ -O2 ##' 85 @./cpp2-bench 86 @echo '## Cforall -O2 ##' 87 @./cfa2-bench 83 @/usr/bin/time -f 'max_memory:\t %M kilobytes' ./cpp-bench 84 @printf 'source_size:\t%8d lines\n' `cat $(CPPFILES) | wc -l` 85 @printf 'redundant_type_annotations:%8d lines\n' `cat $(CPPFILES) | fgrep '/***/' -c` 86 @printf 'binary_size:\t%8d bytes\n' `stat -c %s cpp-bench` 87 88 run-cppv: cpp-vbench 89 @echo 90 @echo '## C++obj ##' 91 @/usr/bin/time -f 'max_memory:\t%M kilobytes' ./cpp-vbench 92 @printf 'source_size:\t%8d lines\n' `cat $(CPPVFILES) | wc -l` 93 @printf 'redundant_type_annotations:%8d lines\n' `cat $(CPPVFILES) | fgrep '/***/' -c` 94 @printf 'binary_size:\t%8d bytes\n' `stat -c %s cpp-vbench` 95 96 run: run-c run-cfa run-cpp run-cppv 88 97 89 98 # so make doesn't fail without dependency files -
doc/generic_types/evaluation/bench.h
r221c2de7 r154fdc8 1 #pragma once 1 2 #include <stdio.h> 2 3 #include <time.h> 3 4 4 #define N 200000000 5 long ms_between(clock_t start, clock_t end) { return (end - start) / (CLOCKS_PER_SEC / 1000); } 5 6 6 long ms_between(clock_t start, clock_t end) { 7 return (end - start) / (CLOCKS_PER_SEC / 1000); 7 #define N 40000000 8 #define TIMED(name, code) { \ 9 volatile clock_t _start, _end; \ 10 _start = clock(); \ 11 code \ 12 _end = clock(); \ 13 printf("%s:\t%8ld ms\n", name, ms_between(_start, _end)); \ 8 14 } 9 10 #define TIMED(name, code) { \ 11 clock_t start, end; \ 12 start = clock(); \ 13 code \ 14 end = clock(); \ 15 printf("%s:\t%7ld ms\n", name, ms_between(start, end)); \ 16 } 17 18 #define REPEAT_TIMED(name, code) TIMED( name, for (int i = 0; i < N; ++i) { code } ) 15 #define REPEAT_TIMED(name, n, code) TIMED( name, for (int _i = 0; _i < n; ++_i) { code } ) -
doc/generic_types/evaluation/c-bench.c
r221c2de7 r154fdc8 1 #include <stdio.h> 1 2 #include <stdlib.h> 2 3 #include "bench.h" 4 #include "c-pair.h" 3 5 #include "c-stack.h" 6 #include "c-print.h" 7 8 _Bool* new_bool( _Bool b ) { 9 _Bool* q = malloc(sizeof(_Bool)); /***/ 10 *q = b; 11 return q; 12 } 13 14 char* new_char( char c ) { 15 char* q = malloc(sizeof(char)); /***/ 16 *q = c; 17 return q; 18 } 19 20 int* new_int( int i ) { 21 int* q = malloc(sizeof(int)); /***/ 22 *q = i; 23 return q; 24 } 25 26 void* copy_bool( const void* p ) { return new_bool( *(const _Bool*)p ); } /***/ 27 void* copy_char( const void* p ) { return new_char( *(const char*)p ); } /***/ 28 void* copy_int( const void* p ) { return new_int( *(const int*)p ); } /***/ 29 void* copy_pair_bool_char( const void* p ) { return copy_pair( p, copy_bool, copy_char ); } /***/ 30 void free_pair_bool_char( void* p ) { free_pair( p, free, free ); } /***/ 31 32 int cmp_bool( const void* a, const void* b ) { /***/ 33 return *(const _Bool*)a == *(const _Bool*)b ? 0 : *(const _Bool*)a < *(const _Bool*)b ? -1 : 1; 34 } 35 36 int cmp_char( const void* a, const void* b ) { /***/ 37 return *(const char*)a == *(const char*)b ? 0 : *(const char*)a < *(const char*)b ? -1 : 1; 38 } 4 39 5 40 int main(int argc, char** argv) { 6 srand(20171025); 41 FILE * out = fopen("c-out.txt", "w"); 42 int maxi = 0, vali = 42; 43 struct stack si = new_stack(), ti; 7 44 8 struct stack s = new_stack(); 45 REPEAT_TIMED( "push_int", N, push_stack( &si, new_int( vali ) ); ) 46 TIMED( "copy_int", copy_stack( &ti, &si, copy_int ); /***/ ) 47 TIMED( "clear_int", clear_stack( &si, free ); /***/ ) 48 REPEAT_TIMED( "pop_int", N, 49 int* xi = pop_stack( &ti ); 50 if ( *xi > maxi ) { maxi = *xi; } 51 free(xi); ) 52 REPEAT_TIMED( "print_int", N/2, print( out, "dsds", vali, ":", vali, "\n" ); /***/ ) 9 53 10 REPEAT_TIMED( "push_int", 11 int* x = malloc(sizeof(int)); 12 *x = rand(); 13 push_stack(&s, x); 14 ) 54 struct pair * maxp = new_pair( new_bool(0), new_char('\0') ), 55 * valp = new_pair( new_bool(1), new_char('a') ); 56 struct stack sp = new_stack(), tp; 15 57 16 clear_stack(&s); 58 REPEAT_TIMED( "push_pair", N, push_stack( &sp, copy_pair_bool_char( valp ) ); ) 59 TIMED( "copy_pair", copy_stack( &tp, &sp, copy_pair_bool_char ); /***/ ) 60 TIMED( "clear_pair", clear_stack( &sp, free_pair_bool_char ); /***/ ) 61 REPEAT_TIMED( "pop_pair", N, 62 struct pair * xp = pop_stack( &tp ); 63 if ( cmp_pair( xp, maxp, cmp_bool, cmp_char /***/ ) > 0 ) { 64 free_pair_bool_char( maxp ); /***/ 65 maxp = xp; 66 } else { 67 free_pair_bool_char( xp ); /***/ 68 } ) 69 REPEAT_TIMED( "print_pair", N/2, print( out, "pbcspbcs", *valp, ":", *valp, "\n" ); /***/ ) 70 free_pair_bool_char( maxp ); /***/ 71 free_pair_bool_char( valp ); /***/ 72 fclose(out); 17 73 } -
doc/generic_types/evaluation/c-stack.c
r221c2de7 r154fdc8 7 7 }; 8 8 9 struct stack new_stack() { 10 return (struct stack){ NULL }; 9 struct stack new_stack() { return (struct stack){ NULL }; /***/ } 10 11 void copy_stack(struct stack* s, const struct stack* t, void* (*copy)(const void*)) { 12 struct stack_node** crnt = &s->head; 13 struct stack_node* next = t->head; 14 while ( next ) { 15 *crnt = malloc(sizeof(struct stack_node)); /***/ 16 **crnt = (struct stack_node){ copy(next->value) }; /***/ 17 crnt = &(*crnt)->next; 18 next = next->next; 19 } 20 *crnt = 0; 11 21 } 12 22 13 void clear_stack(struct stack* s ) {23 void clear_stack(struct stack* s, void (*free_el)(void*)) { 14 24 struct stack_node* next = s->head; 15 25 while ( next ) { 16 26 struct stack_node* crnt = next; 17 27 next = crnt->next; 18 free (crnt->value);28 free_el(crnt->value); 19 29 free(crnt); 20 30 } 31 s->head = NULL; 21 32 } 22 33 23 _Bool stack_empty(const struct stack* s) { 24 return s->head == NULL; 25 } 34 _Bool stack_empty(const struct stack* s) { return s->head == NULL; } 26 35 27 36 void push_stack(struct stack* s, void* value) { 28 struct stack_node* n = malloc(sizeof(struct stack_node)); 29 *n = (struct stack_node){ value, s->head }; 37 struct stack_node* n = malloc(sizeof(struct stack_node)); /***/ 38 *n = (struct stack_node){ value, s->head }; /***/ 30 39 s->head = n; 31 40 } -
doc/generic_types/evaluation/c-stack.h
r221c2de7 r154fdc8 1 #pragma once 2 1 3 struct stack_node; 2 3 4 struct stack { 4 5 struct stack_node* head; … … 6 7 7 8 struct stack new_stack(); 8 9 void clear_stack(struct stack* s );9 void copy_stack(struct stack* dst, const struct stack* src, void* (*copy)(const void*)); 10 void clear_stack(struct stack* s, void (*free_el)(void*)); 10 11 11 12 _Bool stack_empty(const struct stack* s); 12 13 13 void push_stack(struct stack* s, void* value); 14 15 14 void* pop_stack(struct stack* s); -
doc/generic_types/evaluation/cfa-bench.c
r221c2de7 r154fdc8 1 #include <std lib.h>1 #include <stdio.h> 2 2 #include "bench.h" 3 3 #include "cfa-stack.h" 4 #include "cfa-pair.h" 5 #include "cfa-print.h" 4 6 5 int main(int argc, char** argv) { 6 srand(20171025); 7 int main( int argc, char *argv[] ) { 8 FILE * out = fopen( "cfa-out.txt", "w" ); 9 int maxi = 0, vali = 42; 10 stack(int) si, ti; 7 11 8 stack(int) s; 12 REPEAT_TIMED( "push_int", N, push( &si, vali ); ) 13 TIMED( "copy_int", ti = si; ) 14 TIMED( "clear_int", clear( &si ); ) 15 REPEAT_TIMED( "pop_int", N, 16 int xi = pop( &ti ); 17 if ( xi > maxi ) { maxi = xi; } ) 18 REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); ) 9 19 10 REPEAT_TIMED( "push_int", 11 push( &s, rand() ); 12 ) 20 pair(_Bool, char) maxp = { (_Bool)0, '\0' }, valp = { (_Bool)1, 'a' }; 21 stack(pair(_Bool, char)) sp, tp; 22 23 REPEAT_TIMED( "push_pair", N, push( &sp, valp ); ) 24 TIMED( "copy_pair", tp = sp; ) 25 TIMED( "clear_pair", clear( &sp ); ) 26 REPEAT_TIMED( "pop_pair", N, 27 pair(_Bool, char) xp = pop( &tp ); 28 if ( xp > maxp ) { maxp = xp; } ) 29 REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); ) 30 fclose(out); 13 31 } -
doc/generic_types/evaluation/cfa-stack.c
r221c2de7 r154fdc8 7 7 }; 8 8 9 forall(otype T) void ?{}(stack(T)* s) { 10 ?{}( &s->head, 0 ); 9 forall(otype T) void ?{}(stack(T)* s) { (&s->head){ 0 }; } 10 11 forall(otype T) void ?{}(stack(T)* s, stack(T) t) { 12 stack_node(T)** crnt = &s->head; 13 stack_node(T)* next = t.head; 14 while ( next ) { 15 *crnt = ((stack_node(T)*)malloc()){ next->value }; /***/ 16 stack_node(T)* acrnt = *crnt; 17 crnt = &acrnt->next; 18 next = next->next; 19 } 20 *crnt = 0; 11 21 } 12 22 13 forall(otype T) void ^?{}(stack(T)* s) { 23 forall(otype T) stack(T) ?=?(stack(T)* s, stack(T) t) { 24 if ( s->head == t.head ) return *s; 25 clear(s); 26 s{ t }; 27 return *s; 28 } 29 30 forall(otype T) void ^?{}(stack(T)* s) { clear(s); } 31 32 forall(otype T) _Bool empty(const stack(T)* s) { return s->head == 0; } 33 34 forall(otype T) void push(stack(T)* s, T value) { 35 s->head = ((stack_node(T)*)malloc()){ value, s->head }; /***/ 36 } 37 38 forall(otype T) T pop(stack(T)* s) { 39 stack_node(T)* n = s->head; 40 s->head = n->next; 41 T x = n->value; 42 ^n{}; 43 free(n); 44 return x; 45 } 46 47 forall(otype T) void clear(stack(T)* s) { 14 48 stack_node(T)* next = s->head; 15 49 while ( next ) { … … 18 52 delete(crnt); 19 53 } 54 s->head = 0; 20 55 } 21 22 forall(otype T) _Bool empty(const stack(T)* s) {23 return s->head == 0;24 }25 26 forall(otype T) void push(stack(T)* s, T value) {27 s->head = ((stack_node(T)*)malloc()){ value, s->head };28 }29 30 forall(otype T) T pop(stack(T)* s) {31 stack_node(T)* n = s->head;32 s->head = n->next;33 T x = n->value;34 delete(n);35 return x;36 } -
doc/generic_types/evaluation/cfa-stack.h
r221c2de7 r154fdc8 1 #pragma once 2 1 3 forall(otype T) struct stack_node; 2 3 4 forall(otype T) struct stack { 4 5 stack_node(T)* head; … … 6 7 7 8 forall(otype T) void ?{}(stack(T)* s); 8 9 forall(otype T) void ?{}(stack(T)* s, stack(T) t); 10 forall(otype T) stack(T) ?=?(stack(T)* s, stack(T) t); 9 11 forall(otype T) void ^?{}(stack(T)* s); 10 12 11 13 forall(otype T) _Bool empty(const stack(T)* s); 12 13 14 forall(otype T) void push(stack(T)* s, T value); 14 15 15 forall(otype T) T pop(stack(T)* s); 16 forall(otype T) void clear(stack(T)* s); -
doc/generic_types/evaluation/cpp-bench.cpp
r221c2de7 r154fdc8 1 #include <stdlib.h> 2 #include "bench.h" 3 #include "cpp-stack.h" 1 #include <algorithm> 2 #include <fstream> 3 #include "bench.hpp" 4 #include "cpp-stack.hpp" 5 #include "cpp-pair.hpp" 6 #include "cpp-print.hpp" 4 7 5 8 int main(int argc, char** argv) { 6 srand(20171025); 9 std::ofstream out{"cpp-out.txt"}; 10 int maxi = 0, vali = 42; 11 stack<int> si, ti; 12 13 REPEAT_TIMED( "push_int", N, si.push( vali ); ) 14 TIMED( "copy_int", ti = si; ) 15 TIMED( "clear_int", si.clear(); ) 16 REPEAT_TIMED( "pop_int", N, maxi = std::max( maxi, ti.pop() ); ) 17 REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); ) 7 18 8 stack<int> s; 9 10 REPEAT_TIMED( "push_int", 11 s.push( rand() ); 12 ) 19 pair<bool, char> maxp = { false, '\0' }, valp = { true, 'a' }; 20 stack<pair<bool, char>> sp, tp; 21 22 REPEAT_TIMED( "push_pair", N, sp.push( valp ); ) 23 TIMED( "copy_pair", tp = sp; ) 24 TIMED( "clear_pair", sp.clear(); ) 25 REPEAT_TIMED( "pop_pair", N, maxp = std::max( maxp, tp.pop() ); ) 26 REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); ) 13 27 } -
doc/generic_types/generic_types.tex
r221c2de7 r154fdc8 1 1 % take off review (for line numbers) and anonymous (for anonymization) on submission 2 % \documentclass[format=acmlarge, anonymous,review]{acmart}3 \documentclass[format=acmlarge,review]{acmart}2 \documentclass[format=acmlarge,anonymous,review]{acmart} 3 % \documentclass[format=acmlarge,review]{acmart} 4 4 5 5 \usepackage{xspace,calc,comment} 6 6 \usepackage{upquote} % switch curled `'" to straight 7 7 \usepackage{listings} % format program code 8 \usepackage[usenames]{color} 8 9 9 10 \makeatletter 11 % Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore 12 % removes it as a variable-name character so keyworks in variables are highlighted 13 \DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}} 14 10 15 % parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for 11 16 % use rather than use \parident directly. … … 13 18 \setlength{\parindentlnth}{\parindent} 14 19 15 \newlength{\gcolumnposn} % temporary hack because lstlisting does handle tabs correctly20 \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly 16 21 \newlength{\columnposn} 17 22 \setlength{\gcolumnposn}{2.75in} … … 19 24 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@commentstyle{#2}}} 20 25 \newcommand{\CRT}{\global\columnposn=\gcolumnposn} 26 27 % Latin abbreviation 28 \newcommand{\abbrevFont}{\textit} % set empty for no italics 29 \newcommand*{\eg}{% 30 \@ifnextchar{,}{\abbrevFont{e}.\abbrevFont{g}.}% 31 {\@ifnextchar{:}{\abbrevFont{e}.\abbrevFont{g}.}% 32 {\abbrevFont{e}.\abbrevFont{g}.,\xspace}}% 33 }% 34 \newcommand*{\ie}{% 35 \@ifnextchar{,}{\abbrevFont{i}.\abbrevFont{e}.}% 36 {\@ifnextchar{:}{\abbrevFont{i}.\abbrevFont{e}.}% 37 {\abbrevFont{i}.\abbrevFont{e}.,\xspace}}% 38 }% 39 \newcommand*{\etc}{% 40 \@ifnextchar{.}{\abbrevFont{etc}}% 41 {\abbrevFont{etc}.\xspace}% 42 }% 43 \newcommand{\etal}{% 44 \@ifnextchar{.}{\abbrevFont{et~al}}% 45 {\abbrevFont{et al}.\xspace}% 46 }% 21 47 \makeatother 22 48 … … 28 54 \newcommand{\CCseventeen}{\rm C\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name 29 55 \newcommand{\CCtwenty}{\rm C\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name 30 \newcommand{\CS}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} 56 \newcommand{\CCV}{\rm C\kern-.1em\hbox{+\kern-.25em+}obj\xspace} % C++ virtual symbolic name 57 \newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name 31 58 \newcommand{\Textbf}[1]{{\color{red}\textbf{#1}}} 32 33 59 \newcommand{\TODO}[1]{\textbf{TODO}: {\itshape #1}} % TODO included 34 60 %\newcommand{\TODO}[1]{} % TODO elided 35 \newcommand{\eg}{\textit{e}.\textit{g}.,\xspace}36 \newcommand{\ie}{\textit{i}.\textit{e}.,\xspace}37 \newcommand{\etc}{\textit{etc}.,\xspace}38 61 39 62 % CFA programming language, based on ANSI C (with some gcc additions) … … 60 83 belowskip=3pt, 61 84 % replace/adjust listing characters that look bad in sanserif 62 literate={-}{\ raisebox{-0.15ex}{\texttt{-}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}163 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 {_}{\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}}1% {`}{\ttfamily\upshape\hspace*{-0.1ex}`}164 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 ,85 literate={-}{\makebox[1.4ex][c]{\raisebox{0.5ex}{\rule{1.2ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptscriptstyle\land\,$}}1 86 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 87 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1.4ex][c]{\raisebox{0.5ex}{\rule{1.2ex}{0.1ex}}}\kern-0.3ex\textgreater}2, 65 88 moredelim=**[is][\color{red}]{`}{`}, 66 89 }% lstset 67 90 68 91 % inline code @...@ 69 \lstMakeShortInline@ 92 \lstMakeShortInline@% 70 93 71 94 % ACM Information … … 120 143 121 144 \begin{abstract} 122 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers. This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design. 145 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. 146 This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. 147 Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. 148 The goal of the \CFA project is to create an extension of C that provides modern safety and productivity features while still ensuring strong backwards compatibility with C and its programmers. 149 Prior projects have attempted similar goals but failed to honour C programming-style; for instance, adding object-oriented or functional programming with garbage collection is a non-starter for many C developers. 150 Specifically, \CFA is designed to have an orthogonal feature-set based closely on the C programming paradigm, so that \CFA features can be added \emph{incrementally} to existing C code-bases, and C programmers can learn \CFA extensions on an as-needed basis, preserving investment in existing code and engineers. 151 This paper describes two \CFA extensions, generic and tuple types, details how their design avoids shortcomings of similar features in C and other C-like languages, and presents experimental results validating the design. 123 152 \end{abstract} 124 153 … … 129 158 \section{Introduction and Background} 130 159 131 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. 132 The \citet{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \CS 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail. The top 3 rankings over the past 30 years are: 133 \lstDeleteShortInline@ 160 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects. 161 This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more. 162 The \citet{TIOBE} ranks the top 5 most popular programming languages as: Java 16\%, \Textbf{C 7\%}, \Textbf{\CC 5\%}, \Csharp 4\%, Python 4\% = 36\%, where the next 50 languages are less than 3\% each with a long tail. 163 The top 3 rankings over the past 30 years are: 164 \lstDeleteShortInline@% 134 165 \begin{center} 135 166 \setlength{\tabcolsep}{10pt} 136 \begin{tabular}{@{}r|c|c|c|c|c|c|c@{}} 137 & 2017 & 2012 & 2007 & 2002 & 1997 & 1992 & 1987 \\ 138 \hline 139 Java & 1 & 1 & 1 & 3 & 13 & - & - \\ 140 \hline 141 \Textbf{C} & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1}& \Textbf{1} \\ 142 \hline 167 \begin{tabular}{@{}rccccccc@{}} 168 & 2017 & 2012 & 2007 & 2002 & 1997 & 1992 & 1987 \\ \hline 169 Java & 1 & 1 & 1 & 1 & 12 & - & - \\ 170 \Textbf{C} & \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{2}& \Textbf{1}& \Textbf{1}& \Textbf{1} \\ 143 171 \CC & 3 & 3 & 3 & 3 & 2 & 2 & 4 \\ 144 172 \end{tabular} 145 173 \end{center} 146 \lstMakeShortInline@ 147 Love it or hate it, C is extremely popular, highly used, and one of the few system 's languages.174 \lstMakeShortInline@% 175 Love it or hate it, C is extremely popular, highly used, and one of the few systems languages. 148 176 In many cases, \CC is often used solely as a better C. 149 177 Nonetheless, C, first standardized over thirty years ago, lacks many features that make programming in more modern languages safer and more productive. 150 178 151 \CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers. The four key design goals for \CFA~\citep{Bilson03} are: 179 \CFA (pronounced ``C-for-all'', and written \CFA or Cforall) is an evolutionary extension of the C programming language that aims to add modern language features to C while maintaining both source compatibility with C and a familiar programming model for programmers. 180 The four key design goals for \CFA~\citep{Bilson03} are: 152 181 (1) The behaviour of standard C code must remain the same when translated by a \CFA compiler as when translated by a C compiler; 153 182 (2) Standard C code must be as fast and as small when translated by a \CFA compiler as when translated by a C compiler; 154 183 (3) \CFA code must be at least as portable as standard C code; 155 184 (4) Extensions introduced by \CFA must be translated in the most efficient way possible. 156 These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. In its current implementation, \CFA is compiled by translating it to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)-(3). Ultimately, a compiler is necessary for advanced features and optimal performance. 157 158 This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings. Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance. 185 These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. 186 \CC is used similarly, but has the disadvantages of multiple legacy design-choices that cannot be updated and active divergence of the language model from C, requiring significant effort and training to incrementally add \CC to a C-based project. 187 188 \CFA is currently implemented as a source-to-source translator from \CFA to the GCC-dialect of C~\citep{GCCExtensions}, allowing it to leverage the portability and code optimizations provided by GCC, meeting goals (1)--(3). 189 Ultimately, a compiler is necessary for advanced features and optimal performance. 190 191 This paper identifies shortcomings in existing approaches to generic and variadic data types in C-like languages and presents a design for generic and variadic types avoiding those shortcomings. 192 Specifically, the solution is both reusable and type-checked, as well as conforming to the design goals of \CFA with ergonomic use of existing C abstractions. 193 The new constructs are empirically compared with both standard C and \CC; the results show the new design is comparable in performance. 159 194 160 195 … … 162 197 \label{sec:poly-fns} 163 198 164 \CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. The signature feature of \CFA is parametric-polymorphic functions where functions are generalized using a @forall@ clause (giving the language its name): 199 \CFA's polymorphism was originally formalized by \citet{Ditchfield92}, and first implemented by \citet{Bilson03}. 200 The signature feature of \CFA is parametric-polymorphic functions~\citep{forceone:impl,Cormack90,Duggan96} with functions generalized using a @forall@ clause (giving the language its name): 165 201 \begin{lstlisting} 166 202 `forall( otype T )` T identity( T val ) { return val; } 167 203 int forty_two = identity( 42 ); $\C{// T is bound to int, forty\_two == 42}$ 168 204 \end{lstlisting} 169 The @identity@ function above can be applied to any complete \emph{object type} (or @otype@). The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@). 170 171 Here, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; preliminary experiments have shown this overhead is similar to \CC virtual function calls. An advantage of this design is that, unlike \CC template functions, \CFA polymorphic functions are compatible with C \emph{separate} compilation, preventing code bloat. 172 173 Since bare polymorphic-types provide only a narrow set of available operations, \CFA provides a \emph{type assertion} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable. For example, the function @twice@ can be defined using the \CFA syntax for operator overloading: 205 The @identity@ function above can be applied to any complete \emph{object type} (or @otype@). 206 The type variable @T@ is transformed into a set of additional implicit parameters encoding sufficient information about @T@ to create and return a variable of that type. 207 The \CFA implementation passes the size and alignment of the type represented by an @otype@ parameter, as well as an assignment operator, constructor, copy constructor and destructor. 208 If this extra information is not needed, \eg for a pointer, the type parameter can be declared as a \emph{data type} (or @dtype@). 209 210 In \CFA, the polymorphism runtime-cost is spread over each polymorphic call, due to passing more arguments to polymorphic functions; 211 the experiments in Section~\ref{sec:eval} show this overhead is similar to \CC virtual-function calls. 212 A design advantage is that, unlike \CC template-functions, \CFA polymorphic-functions are compatible with C \emph{separate compilation}, preventing compilation and code bloat. 213 214 Since bare polymorphic-types provide a restricted set of available operations, \CFA provides a \emph{type assertion}~\cite[pp.~37-44]{Alphard} mechanism to provide further type information, where type assertions may be variable or function declarations that depend on a polymorphic type-variable. 215 For example, the function @twice@ can be defined using the \CFA syntax for operator overloading: 174 216 \begin{lstlisting} 175 217 forall( otype T `| { T ?+?(T, T); }` ) T twice( T x ) { return x + x; } $\C{// ? denotes operands}$ 176 218 int val = twice( twice( 3.7 ) ); 177 219 \end{lstlisting} 178 which works for any type @T@ with a matching addition operator. The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@. There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Ada} in its type analysis. The first approach has a late conversion from @int@ to @double@ on the final assignment, while the second has an eager conversion to @int@. \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition. 220 which works for any type @T@ with a matching addition operator. 221 The polymorphism is achieved by creating a wrapper function for calling @+@ with @T@ bound to @double@, then passing this function to the first call of @twice@. 222 There is now the option of using the same @twice@ and converting the result to @int@ on assignment, or creating another @twice@ with type parameter @T@ bound to @int@ because \CFA uses the return type~\cite{Cormack81,Baker82,Ada}, in its type analysis. 223 The first approach has a late conversion from @double@ to @int@ on the final assignment, while the second has an eager conversion to @int@. 224 \CFA minimizes the number of conversions and their potential to lose information, so it selects the first approach, which corresponds with C-programmer intuition. 179 225 180 226 Crucial to the design of a new programming language are the libraries to access thousands of external software features. 181 \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C.227 Like \CC, \CFA inherits a massive compatible library-base, where other programming languages must rewrite or provide fragile inter-language communication with C. 182 228 A simple example is leveraging the existing type-unsafe (@void *@) C @bsearch@ to binary search a sorted floating-point array: 183 229 \begin{lstlisting} 184 230 void * bsearch( const void * key, const void * base, size_t nmemb, size_t size, 185 int (* compar)( const void *, const void *));231 int (* compar)( const void *, const void * )); 186 232 int comp( const void * t1, const void * t2 ) { return *(double *)t1 < *(double *)t2 ? -1 : 187 233 *(double *)t2 < *(double *)t1 ? 1 : 0; } 188 double vals[10] = { /* 10 floating-point values */ }; 189 double key = 5.0; 234 double key = 5.0, vals[10] = { /* 10 floating-point values */ }; 190 235 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp ); $\C{// search sorted array}$ 191 236 \end{lstlisting} … … 196 241 return (T *)bsearch( &key, arr, size, sizeof(T), comp ); } 197 242 forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) { 198 T * result = bsearch( key, arr, size ); $\C{// call first version}$243 T * result = bsearch( key, arr, size ); $\C{// call first version}$ 199 244 return result ? result - arr : size; } $\C{// pointer subtraction includes sizeof(T)}$ 200 245 double * val = bsearch( 5.0, vals, 10 ); $\C{// selection based on return type}$ 201 246 int posn = bsearch( 5.0, vals, 10 ); 202 247 \end{lstlisting} 203 The nested routine @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result. 248 The nested function @comp@ provides the hidden interface from typed \CFA to untyped (@void *@) C, plus the cast of the result. 249 Providing a hidden @comp@ function in \CC is awkward as lambdas do not use C calling-conventions and template declarations cannot appear at block scope. 204 250 As well, an alternate kind of return is made available: position versus pointer to found element. 205 251 \CC's type-system cannot disambiguate between the two versions of @bsearch@ because it does not use the return type in overload resolution, nor can \CC separately compile a templated @bsearch@. … … 208 254 For example, it is possible to write a type-safe \CFA wrapper @malloc@ based on the C @malloc@: 209 255 \begin{lstlisting} 210 forall( dtype T | sized(T) ) T * malloc( void ) { return (T *) (void *)malloc( (size_t)sizeof(T) ); }256 forall( dtype T | sized(T) ) T * malloc( void ) { return (T *)malloc( sizeof(T) ); } 211 257 int * ip = malloc(); $\C{// select type and size from left-hand side}$ 212 258 double * dp = malloc(); … … 215 261 where the return type supplies the type/size of the allocation, which is impossible in most type systems. 216 262 217 Call-site inferencing and nested functions provide a localized form of inheritance. For example, the \CFA @qsort@ only sorts in ascending order using @<@. However, it is trivial to locally change this behaviour: 263 Call-site inferencing and nested functions provide a localized form of inheritance. 264 For example, the \CFA @qsort@ only sorts in ascending order using @<@. 265 However, it is trivial to locally change this behaviour: 218 266 \begin{lstlisting} 219 267 forall( otype T | { int ?<?( T, T ); } ) void qsort( const T * arr, size_t size ) { /* use C qsort */ } … … 223 271 \end{lstlisting} 224 272 Within the block, the nested version of @<@ performs @>@ and this local version overrides the built-in @<@ so it is passed to @qsort@. 225 Hence, programmers can easily form alocal environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types.273 Hence, programmers can easily form local environments, adding and modifying appropriate functions, to maximize reuse of other existing functions and types. 226 274 227 275 Finally, \CFA allows variable overloading: 228 \lstDeleteShortInline@ 229 \par\smallskip 230 \begin{tabular}{@{}l@{\hspace{\parindent}}|@{\hspace{\parindent}}l@{}} 231 \begin{lstlisting} 232 short int MAX = ...; 233 int MAX = ...; 234 double MAX = ...; 235 \end{lstlisting} 236 & 237 \begin{lstlisting} 238 short int s = MAX; // select correct MAX 239 int i = MAX; 240 double d = MAX; 241 \end{lstlisting} 242 \end{tabular} 243 \lstMakeShortInline@ 244 \smallskip\par\noindent 245 Hence, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@. 276 \begin{lstlisting} 277 short int MAX = ...; int MAX = ...; double MAX = ...; 278 short int s = MAX; int i = MAX; double d = MAX; $\C{// select correct MAX}$ 279 \end{lstlisting} 280 Here, the single name @MAX@ replaces all the C type-specific names: @SHRT_MAX@, @INT_MAX@, @DBL_MAX@. 246 281 As well, restricted constant overloading is allowed for the values @0@ and @1@, which have special status in C, \eg the value @0@ is both an integer and a pointer literal, so its meaning depends on context. 247 In addition, several operations are defined in terms values @0@ and @1@. 248 For example, 282 In addition, several operations are defined in terms values @0@ and @1@, \eg: 249 283 \begin{lstlisting} 250 284 int x; 251 if (x) // if (x != 0) 252 x++; // x += 1; 253 \end{lstlisting} 254 Every if statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result. 285 if (x) x++ $\C{// if (x != 0) x += 1;}$ 286 \end{lstlisting} 287 Every if and iteration statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result. 255 288 Due to these rewrite rules, the values @0@ and @1@ have the types @zero_t@ and @one_t@ in \CFA, which allows overloading various operations for new types that seamlessly connect to all special @0@ and @1@ contexts. 256 289 The types @zero_t@ and @one_t@ have special built in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal. … … 269 302 forall( otype T `| summable( T )` ) T sum( T a[$\,$], size_t size ) { // use trait 270 303 `T` total = { `0` }; $\C{// instantiate T from 0 by calling its constructor}$ 271 for ( unsigned int i = 0; i < size; i += 1 ) 272 total `+=` a[i]; $\C{// select appropriate +}$ 304 for ( unsigned int i = 0; i < size; i += 1 ) total `+=` a[i]; $\C{// select appropriate +}$ 273 305 return total; } 274 306 \end{lstlisting} 275 307 276 In fact, the set of operators is incomplete, \eg no assignment, but @otype@ is syntactic sugar for the following implicit trait:308 In fact, the set of @summable@ trait operators is incomplete, as it is missing assignment for type @T@, but @otype@ is syntactic sugar for the following implicit trait: 277 309 \begin{lstlisting} 278 310 trait otype( dtype T | sized(T) ) { // sized is a pseudo-trait for types with known size and alignment … … 283 315 \end{lstlisting} 284 316 Given the information provided for an @otype@, variables of polymorphic type can be treated as if they were a complete type: stack-allocatable, default or copy-initialized, assigned, and deleted. 285 % As an example, the @sum@ function produces generated code something like the following (simplified for clarity and brevity)\TODO{fix example, maybe elide, it's likely too long with the more complicated function}: 317 318 In summation, the \CFA type-system uses \emph{nominal typing} for concrete types, matching with the C type-system, and \emph{structural typing} for polymorphic types. 319 Hence, trait names play no part in type equivalence; 320 the names are simply macros for a list of polymorphic assertions, which are expanded at usage sites. 321 Nevertheless, trait names form a logical subtype-hierarchy with @dtype@ at the top, where traits often contain overlapping assertions, \eg operator @+@. 322 Traits are used like interfaces in Java or abstract base-classes in \CC, but without the nominal inheritance-relationships. 323 Instead, each polymorphic function (or generic type) defines the structural type needed for its execution (polymorphic type-key), and this key is fulfilled at each call site from the lexical environment, which is similar to Go~\citep{Go} interfaces. 324 Hence, new lexical scopes and nested functions are used extensively to create local subtypes, as in the @qsort@ example, without having to manage a nominal-inheritance hierarchy. 325 (Nominal inheritance can be approximated with traits using marker variables or functions, as is done in Go.) 326 327 % Nominal inheritance can be simulated with traits using marker variables or functions: 286 328 % \begin{lstlisting} 287 % void abs( size_t _sizeof_M, size_t _alignof_M, 288 % void (*_ctor_M)(void*), void (*_copy_M)(void*, void*), 289 % void (*_assign_M)(void*, void*), void (*_dtor_M)(void*), 290 % _Bool (*_lt_M)(void*, void*), void (*_neg_M)(void*, void*), 291 % void (*_ctor_M_zero)(void*, int), 292 % void* m, void* _rtn ) { $\C{// polymorphic parameter and return passed as void*}$ 293 % $\C{// M zero = { 0 };}$ 294 % void* zero = alloca(_sizeof_M); $\C{// stack allocate zero temporary}$ 295 % _ctor_M_zero(zero, 0); $\C{// initialize using zero\_t constructor}$ 296 % $\C{// return m < zero ? -m : m;}$ 297 % void *_tmp = alloca(_sizeof_M); 298 % _copy_M( _rtn, $\C{// copy-initialize return value}$ 299 % _lt_M( m, zero ) ? $\C{// check condition}$ 300 % (_neg_M(m, _tmp), _tmp) : $\C{// negate m}$ 301 % m); 302 % _dtor_M(_tmp); _dtor_M(zero); $\C{// destroy temporaries}$ 329 % trait nominal(otype T) { 330 % T is_nominal; 331 % }; 332 % int is_nominal; $\C{// int now satisfies the nominal trait}$ 333 % \end{lstlisting} 334 % 335 % Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems: 336 % \begin{lstlisting} 337 % trait pointer_like(otype Ptr, otype El) { 338 % lvalue El *?(Ptr); $\C{// Ptr can be dereferenced into a modifiable value of type El}$ 303 339 % } 340 % struct list { 341 % int value; 342 % list * next; $\C{// may omit "struct" on type names as in \CC}$ 343 % }; 344 % typedef list * list_iterator; 345 % 346 % lvalue int *?( list_iterator it ) { return it->value; } 304 347 % \end{lstlisting} 305 306 Traits may be used for many of the same purposes as interfaces in Java or abstract base classes in \CC. Unlike Java interfaces or \CC base classes, \CFA types do not explicitly state any inheritance relationship to traits they satisfy, which is a form of structural inheritance, similar to the implementation of an interface in Go~\citep{Go}, as opposed to the nominal inheritance model of Java and \CC. 307 308 Nominal inheritance can be simulated with traits using marker variables or functions: 309 \begin{lstlisting} 310 trait nominal(otype T) { 311 T is_nominal; 348 % In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@). 349 % While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously. 350 351 352 \section{Generic Types} 353 354 One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. 355 Broadly speaking, there are three approaches to implement abstract data-structures in C. 356 One approach is to write bespoke data structures for each context in which they are needed. 357 While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures. 358 A second approach is to use @void *@--based polymorphism, \eg the C standard-library functions @bsearch@ and @qsort@; an approach which does allow reuse of code for common functionality. 359 However, basing all polymorphism on @void *@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requiring a number of extra function parameters, pointer indirection, and dynamic allocation that would not otherwise be needed. 360 A third approach to generic code is to use preprocessor macros, which does allow the generated code to be both generic and type-checked, but errors may be difficult to interpret. 361 Furthermore, writing and using preprocessor macros can be unnatural and inflexible. 362 363 \CC, Java, and other languages use \emph{generic types} to produce type-safe abstract data-types. 364 \CFA also implements generic types that integrate efficiently and naturally with the existing polymorphic functions, while retaining backwards compatibility with C and providing separate compilation. 365 However, for known concrete parameters, the generic-type definition can be inlined, like \CC templates. 366 367 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name: 368 \begin{lstlisting} 369 forall( otype R, otype S ) struct pair { 370 R first; 371 S second; 312 372 }; 313 int is_nominal; $\C{// int now satisfies the nominal trait}$ 314 \end{lstlisting} 315 316 Traits, however, are significantly more powerful than nominal-inheritance interfaces; most notably, traits may be used to declare a relationship \emph{among} multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems: 317 \begin{lstlisting} 318 trait pointer_like(otype Ptr, otype El) { 319 lvalue El *?(Ptr); $\C{// Ptr can be dereferenced into a modifiable value of type El}$ 320 } 321 struct list { 322 int value; 323 list *next; $\C{// may omit "struct" on type names as in \CC}$ 324 }; 325 typedef list *list_iterator; 326 327 lvalue int *?( list_iterator it ) { return it->value; } 328 \end{lstlisting} 329 330 In the example above, @(list_iterator, int)@ satisfies @pointer_like@ by the user-defined dereference function, and @(list_iterator, list)@ also satisfies @pointer_like@ by the built-in dereference operator for pointers. Given a declaration @list_iterator it@, @*it@ can be either an @int@ or a @list@, with the meaning disambiguated by context (\eg @int x = *it;@ interprets @*it@ as an @int@, while @(*it).value = 42;@ interprets @*it@ as a @list@). 331 While a nominal-inheritance system with associated types could model one of those two relationships by making @El@ an associated type of @Ptr@ in the @pointer_like@ implementation, few such systems could model both relationships simultaneously. 332 333 \section{Generic Types} 334 335 One of the known shortcomings of standard C is that it does not provide reusable type-safe abstractions for generic data structures and algorithms. Broadly speaking, there are three approaches to create data structures in C. One approach is to write bespoke data structures for each context in which they are needed. While this approach is flexible and supports integration with the C type-checker and tooling, it is also tedious and error-prone, especially for more complex data structures. A second approach is to use @void*@-based polymorphism. This approach is taken by the C standard library functions @qsort@ and @bsearch@, and does allow the use of common code for common functionality. However, basing all polymorphism on @void*@ eliminates the type-checker's ability to ensure that argument types are properly matched, often requires a number of extra function parameters, and also adds pointer indirection and dynamic allocation to algorithms and data structures that would not otherwise require them. A third approach to generic code is to use pre-processor macros to generate it -- this approach does allow the generated code to be both generic and type-checked, though any errors produced may be difficult to interpret. Furthermore, writing and invoking C code as preprocessor macros is unnatural and somewhat inflexible. 336 337 Other C-like languages such as \CC and Java use \emph{generic types} to produce type-safe abstract data types. \CFA implements generic types with some care taken that the generic types design for \CFA integrates efficiently and naturally with the existing polymorphic functions in \CFA while retaining backwards compatibility with C; maintaining separate compilation is a particularly important constraint on the design. However, where the concrete parameters of the generic type are known, there is no extra overhead for the use of a generic type, as for \CC templates. 338 339 A generic type can be declared by placing a @forall@ specifier on a @struct@ or @union@ declaration, and instantiated using a parenthesized list of types after the type name: 340 \begin{lstlisting} 341 forall(otype R, otype S) struct pair { 342 R first; 343 S second; 344 }; 345 346 forall(otype T) 347 T value( pair(const char*, T) p ) { return p.second; } 348 349 forall(dtype F, otype T) 350 T value_p( pair(F*, T*) p ) { return *p.second; } 351 352 pair(const char*, int) p = { "magic", 42 }; 373 forall( otype T ) T value( pair( const char *, T ) p ) { return p.second; } 374 forall( dtype F, otype T ) T value_p( pair( F *, T * ) p ) { return * p.second; } 375 pair( const char *, int ) p = { "magic", 42 }; 353 376 int magic = value( p ); 354 355 pair(void*, int*) q = { 0, &p.second }; 377 pair( void *, int * ) q = { 0, &p.second }; 356 378 magic = value_p( q ); 357 379 double d = 1.0; 358 pair( double*, double*) r = { &d, &d };380 pair( double *, double * ) r = { &d, &d }; 359 381 d = value_p( r ); 360 382 \end{lstlisting} 361 383 362 \CFA classifies generic types as either \emph{concrete} or \emph{dynamic}. Concrete generic types have a fixed memory layout regardless of type parameters, while dynamic generic types vary in their in-memory layout depending on their type parameters. A type may have polymorphic parameters but still be concrete; in \CFA such types are called \emph{dtype-static}. Polymorphic pointers are an example of dtype-static types -- @forall(dtype T) T*@ is a polymorphic type, but for any @T@ chosen, @T*@ has exactly the same in-memory representation as a @void*@, and can therefore be represented by a @void*@ in code generation. 363 364 \CFA generic types may also specify constraints on their argument type to be checked by the compiler. For example, consider the following declaration of a sorted set-type, which ensures that the set key supports equality and relational comparison: 365 \begin{lstlisting} 366 forall(otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); }) 367 struct sorted_set; 368 \end{lstlisting} 369 370 \subsection{Concrete Generic Types} 371 372 The \CFA translator instantiates concrete generic types by template-expanding them to fresh struct types; concrete generic types can therefore be used with zero runtime overhead. To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated struct declarations where appropriate. For example, a function declaration that accepts or returns a concrete generic type produces a declaration for the instantiated struct in the same scope, which all callers that can see that declaration may reuse. As an example of the expansion, the concrete instantiation for @pair(const char*, int)@ looks like this: 384 \CFA classifies generic types as either \emph{concrete} or \emph{dynamic}. 385 Concrete types have a fixed memory layout regardless of type parameters, while dynamic types vary in memory layout depending on their type parameters. 386 A type may have polymorphic parameters but still be concrete, called \emph{dtype-static}. 387 Polymorphic pointers are an example of dtype-static types, \eg @forall(dtype T) T *@ is a polymorphic type, but for any @T@, @T *@ is a fixed-sized pointer, and therefore, can be represented by a @void *@ in code generation. 388 389 \CFA generic types also allow checked argument-constraints. 390 For example, the following declaration of a sorted set-type ensures the set key supports equality and relational comparison: 391 \begin{lstlisting} 392 forall( otype Key | { _Bool ?==?(Key, Key); _Bool ?<?(Key, Key); } ) struct sorted_set; 393 \end{lstlisting} 394 395 396 \subsection{Concrete Generic-Types} 397 398 The \CFA translator template-expands concrete generic-types into new structure types, affording maximal inlining. 399 To enable inter-operation among equivalent instantiations of a generic type, the translator saves the set of instantiations currently in scope and reuses the generated structure declarations where appropriate. 400 For example, a function declaration that accepts or returns a concrete generic-type produces a declaration for the instantiated struct in the same scope, which all callers may reuse. 401 For example, the concrete instantiation for @pair( const char *, int )@ is: 373 402 \begin{lstlisting} 374 403 struct _pair_conc1 { 375 const char * first;404 const char * first; 376 405 int second; 377 406 }; 378 407 \end{lstlisting} 379 408 380 A concrete generic type with dtype-static parameters is also expanded to a struct type, but this struct type is used for all matching instantiations. In the example above, the @pair(F*, T*)@ parameter to @value_p@ is such a type; its expansion looks something like this, and is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate: 409 A concrete generic-type with dtype-static parameters is also expanded to a structure type, but this type is used for all matching instantiations. 410 In the above example, the @pair( F *, T * )@ parameter to @value_p@ is such a type; its expansion is below and it is used as the type of the variables @q@ and @r@ as well, with casts for member access where appropriate: 381 411 \begin{lstlisting} 382 412 struct _pair_conc0 { 383 void * first;384 void * second;413 void * first; 414 void * second; 385 415 }; 386 416 \end{lstlisting} 387 417 388 418 389 \subsection{Dynamic Generic Types} 390 391 Though \CFA implements concrete generic types efficiently, it also has a fully general system for computing with dynamic generic types. As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. Dynamic generic structs also have implicit size and alignment parameters, and also an \emph{offset array} which contains the offsets of each member of the struct\footnote{Dynamic generic unions need no such offset array, as all members are at offset 0; the size and alignment parameters are still provided for dynamic unions, however.}. Access to members\footnote{The \lstinline@offsetof@ macro is implemented similarly.} of a dynamic generic struct is provided by adding the corresponding member of the offset array to the struct pointer at runtime, essentially moving a compile-time offset calculation to runtime where necessary. 392 393 These offset arrays are statically generated where possible. If a dynamic generic type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume that the generic type is complete (that is, has a known layout) at any call-site, and the offset array is passed from the caller; if the generic type is concrete at the call site the elements of this offset array can even be statically generated using the C @offsetof@ macro. As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void*@, and @_offsetof_pair@ is the offset array passed in to @value@ for @pair(const char*, T)@. The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) };@. 394 395 In some cases the offset arrays cannot be statically generated. For instance, modularity is generally provided in C by including an opaque forward-declaration of a struct and associated accessor and mutator routines in a header file, with the actual implementations in a separately-compiled \texttt{.c} file. \CFA supports this pattern for generic types, and in this instance the caller does not know the actual layout or size of the dynamic generic type, and only holds it by pointer. The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed in to a function from that function's caller. These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic struct (un-@sized@ parameters are forbidden from the language from being used in a context that affects layout). Results of these layout functions are cached so that they are only computed once per type per function.%, as in the example below for @pair@. 396 % \begin{lstlisting} 397 % static inline void _layoutof_pair(size_t* _szeof_pair, size_t* _alignof_pair, size_t* _offsetof_pair, 398 % size_t _szeof_R, size_t _alignof_R, size_t _szeof_S, size_t _alignof_S) { 399 % *_szeof_pair = 0; // default values 400 % *_alignof_pair = 1; 401 402 % // add offset, size, and alignment of first field 403 % _offsetof_pair[0] = *_szeof_pair; 404 % *_szeof_pair += _szeof_R; 405 % if ( *_alignof_pair < _alignof_R ) *_alignof_pair = _alignof_R; 406 407 % // padding, offset, size, and alignment of second field 408 % if ( *_szeof_pair & (_alignof_S - 1) ) 409 % *_szeof_pair += (_alignof_S - ( *_szeof_pair & (_alignof_S - 1) ) ); 410 % _offsetof_pair[1] = *_szeof_pair; 411 % *_szeof_pair += _szeof_S; 412 % if ( *_alignof_pair < _alignof_S ) *_alignof_pair = _alignof_S; 413 414 % // pad to struct alignment 415 % if ( *_szeof_pair & (*_alignof_pair - 1) ) 416 % *_szeof_pair += ( *_alignof_pair - ( *_szeof_pair & (*_alignof_pair - 1) ) ); 417 % } 418 % \end{lstlisting} 419 420 Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature. For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values. This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function. 421 422 Whether a type is concrete, dtype-static, or dynamic is decided based solely on the type parameters and @forall@ clause on the struct declaration. This design allows opaque forward declarations of generic types like @forall(otype T) struct Box;@ -- like in C, all uses of @Box(T)@ can be in a separately compiled translation unit, and callers from other translation units know the proper calling conventions to use. If the definition of a struct type was included in the decision of whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T* p };@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off. 419 \subsection{Dynamic Generic-Types} 420 421 Though \CFA implements concrete generic-types efficiently, it also has a fully general system for dynamic generic types. 422 As mentioned in Section~\ref{sec:poly-fns}, @otype@ function parameters (in fact all @sized@ polymorphic parameters) come with implicit size and alignment parameters provided by the caller. 423 Dynamic generic-types also have an \emph{offset array} containing structure-member offsets. 424 A dynamic generic-union needs no such offset array, as all members are at offset 0, but size and alignment are still necessary. 425 Access to members of a dynamic structure is provided at runtime via base-displacement addressing with the structure pointer and the member offset (similar to the @offsetof@ macro), moving a compile-time offset calculation to runtime. 426 427 The offset arrays are statically generated where possible. 428 If a dynamic generic-type is declared to be passed or returned by value from a polymorphic function, the translator can safely assume the generic type is complete (\ie has a known layout) at any call-site, and the offset array is passed from the caller; 429 if the generic type is concrete at the call site, the elements of this offset array can even be statically generated using the C @offsetof@ macro. 430 As an example, @p.second@ in the @value@ function above is implemented as @*(p + _offsetof_pair[1])@, where @p@ is a @void *@, and @_offsetof_pair@ is the offset array passed into @value@ for @pair( const char *, T )@. 431 The offset array @_offsetof_pair@ is generated at the call site as @size_t _offsetof_pair[] = { offsetof(_pair_conc1, first), offsetof(_pair_conc1, second) }@. 432 433 In some cases the offset arrays cannot be statically generated. 434 For instance, modularity is generally provided in C by including an opaque forward-declaration of a structure and associated accessor and mutator functions in a header file, with the actual implementations in a separately-compiled @.c@ file. 435 \CFA supports this pattern for generic types, but the caller does not know the actual layout or size of the dynamic generic-type, and only holds it by a pointer. 436 The \CFA translator automatically generates \emph{layout functions} for cases where the size, alignment, and offset array of a generic struct cannot be passed into a function from that function's caller. 437 These layout functions take as arguments pointers to size and alignment variables and a caller-allocated array of member offsets, as well as the size and alignment of all @sized@ parameters to the generic structure (un@sized@ parameters are forbidden from being used in a context that affects layout). 438 Results of these layout functions are cached so that they are only computed once per type per function. %, as in the example below for @pair@. 439 Layout functions also allow generic types to be used in a function definition without reflecting them in the function signature. 440 For instance, a function that strips duplicate values from an unsorted @vector(T)@ would likely have a pointer to the vector as its only explicit parameter, but use some sort of @set(T)@ internally to test for duplicate values. 441 This function could acquire the layout for @set(T)@ by calling its layout function with the layout of @T@ implicitly passed into the function. 442 443 Whether a type is concrete, dtype-static, or dynamic is decided solely on the type parameters and @forall@ clause on a declaration. 444 This design allows opaque forward declarations of generic types, \eg @forall(otype T) struct Box@ -- like in C, all uses of @Box(T)@ can be separately compiled, and callers from other translation units know the proper calling conventions to use. 445 If the definition of a structure type is included in deciding whether a generic type is dynamic or concrete, some further types may be recognized as dtype-static (\eg @forall(otype T) struct unique_ptr { T * p }@ does not depend on @T@ for its layout, but the existence of an @otype@ parameter means that it \emph{could}.), but preserving separate compilation (and the associated C compatibility) in the existing design is judged to be an appropriate trade-off. 446 423 447 424 448 \subsection{Applications} 425 449 \label{sec:generic-apps} 426 450 427 The reuse of dtype-static struct instantiations enables some useful programming patterns at zero runtime cost. The most important such pattern is using @forall(dtype T) T*@ as a type-checked replacement for @void*@, as in this example, which takes a @qsort@ or @bsearch@-compatible comparison routine and creates a similar lexicographic comparison for pairs of pointers: 428 \begin{lstlisting} 429 forall(dtype T) 430 int lexcmp( pair(T*, T*)* a, pair(T*, T*)* b, int (*cmp)(T*, T*) ) { 431 int c = cmp(a->first, b->first); 432 if ( c == 0 ) c = cmp(a->second, b->second); 433 return c; 434 } 435 \end{lstlisting} 436 Since @pair(T*, T*)@ is a concrete type, there are no added implicit parameters to @lexcmp@, so the code generated by \CFA is effectively identical to a version of this function written in standard C using @void*@, yet the \CFA version is type-checked to ensure that the fields of both pairs and the arguments to the comparison function match in type. 437 438 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost ``tag'' structs. Sometimes a particular bit of information is only useful for type-checking, and can be omitted at runtime. Tag structs can be used to provide this information to the compiler without further runtime overhead, as in the following example: 451 The reuse of dtype-static structure instantiations enables useful programming patterns at zero runtime cost. 452 The most important such pattern is using @forall(dtype T) T *@ as a type-checked replacement for @void *@, \eg creating a lexicographic comparison for pairs of pointers used by @bsearch@ or @qsort@: 453 \begin{lstlisting} 454 forall(dtype T) int lexcmp( pair( T *, T * ) * a, pair( T *, T * ) * b, int (* cmp)( T *, T * ) ) { 455 return cmp( a->first, b->first ) ? : cmp( a->second, b->second ); 456 } 457 \end{lstlisting} 458 Since @pair(T *, T * )@ is a concrete type, there are no implicit parameters passed to @lexcmp@, so the generated code is identical to a function written in standard C using @void *@, yet the \CFA version is type-checked to ensure the fields of both pairs and the arguments to the comparison function match in type. 459 460 Another useful pattern enabled by reused dtype-static type instantiations is zero-cost \emph{tag-structures}. 461 Sometimes information is only used for type-checking and can be omitted at runtime, \eg: 439 462 \begin{lstlisting} 440 463 forall(dtype Unit) struct scalar { unsigned long value; }; 441 442 464 struct metres {}; 443 465 struct litres {}; 444 466 445 forall(dtype U) 446 scalar(U) ?+?(scalar(U) a, scalar(U) b) { 467 forall(dtype U) scalar(U) ?+?( scalar(U) a, scalar(U) b ) { 447 468 return (scalar(U)){ a.value + b.value }; 448 469 } 449 450 470 scalar(metres) half_marathon = { 21093 }; 451 471 scalar(litres) swimming_pool = { 2500000 }; 452 453 472 scalar(metres) marathon = half_marathon + half_marathon; 454 473 scalar(litres) two_pools = swimming_pool + swimming_pool; 455 marathon + swimming_pool; // ERROR -- caught by compiler 456 \end{lstlisting} 457 @scalar@ is a dtype-static type, so all uses of it use a single struct definition, containing only a single @unsigned long@, and can share the same implementations of common routines like @?+?@ -- these implementations may even be separately compiled, unlike \CC template functions. However, the \CFA type-checker ensures that matching types are used by all calls to @?+?@, preventing nonsensical computations like adding the length of a marathon to the volume of an olympic pool. 474 marathon + swimming_pool; $\C{// compilation ERROR}$ 475 \end{lstlisting} 476 @scalar@ is a dtype-static type, so all uses have a single structure definition, containing @unsigned long@, and can share the same implementations of common functions like @?+?@. 477 These implementations may even be separately compiled, unlike \CC template functions. 478 However, the \CFA type-checker ensures matching types are used by all calls to @?+?@, preventing nonsensical computations like adding a length to a volume. 479 458 480 459 481 \section{Tuples} 460 482 \label{sec:tuples} 461 483 462 The @pair(R, S)@ generic type used as an example in the previous section can be considered a special case of a more general \emph{tuple} data structure. The authors have implemented tuples in \CFA, with a design particularly motivated by two use cases: \emph{multiple-return-value functions} and \emph{variadic functions}. 463 464 In standard C, functions can return at most one value. This restriction results in code that emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. In the former situation, the function designer creates a record type that combines all of the return values into a single type. Unfortunately, the designer must come up with a name for the return type and for each of its fields. Unnecessary naming is a common programming language issue, introducing verbosity and a complication of the user's mental model. As such, this technique is effective when used sparingly, but can quickly get out of hand if many functions need to return different combinations of types. In the latter approach, the designer simulates multiple return values by passing the additional return values as pointer parameters. The pointer parameters are assigned inside of the routine body to emulate a return. Using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. This responsibility complicates the call site with a sequence of variable declarations leading up to the call. Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe. On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. 465 466 C does provide a mechanism for variadic functions through manipulation of @va_list@ objects, but it is notoriously type-unsafe. A variadic function is one that contains at least one parameter, followed by @...@ as the last token in the parameter list. In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types, commonly a format string or counter parameter. It is important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly. This required repetition is error prone, because it is easy for the user to add or remove arguments without updating the argument descriptor. In addition, C requires the programmer to hard code all of the possible expected types. As a result, it is cumbersome to write a variadic function that is open to extension. For example, consider a simple function that sums $N$ @int@s: 467 \begin{lstlisting} 468 int sum(int N, ...) { 469 va_list args; 470 va_start(args, N); // must manually specify last non-variadic argument 471 int ret = 0; 472 while(N) { 473 ret += va_arg(args, int); // must specify type 474 N--; 475 } 476 va_end(args); 477 return ret; 478 } 479 480 sum(3, 10, 20, 30); // must keep initial counter argument in sync 481 \end{lstlisting} 482 483 The @va_list@ type is a special C data type that abstracts variadic argument manipulation. The @va_start@ macro initializes a @va_list@, given the last named parameter. Each use of the @va_arg@ macro allows access to the next variadic argument, given a type. Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call. As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures. In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined~\citep{C11}. Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body. Since they rely on programmer convention rather than compile-time checks, variadic functions are inherently unsafe. 484 485 In practice, compilers can provide warnings to help mitigate some of the problems. For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers. Unfortunately, this attribute does not permit extensions to the format string syntax, so a programmer cannot extend it to warn for mismatches with custom types. 484 In many languages, functions can return at most one value; 485 however, many operations have multiple outcomes, some exceptional. 486 Consider C's @div@ and @remquo@ functions, which return the quotient and remainder for a division of integer and floating-point values, respectively. 487 \begin{lstlisting} 488 typedef struct { int quo, rem; } div_t; $\C{// from include stdlib.h}$ 489 div_t div( int num, int den ); 490 double remquo( double num, double den, int * quo ); 491 div_t qr = div( 13, 5 ); $\C{// return quotient/remainder aggregate}$ 492 int q; 493 double r = remquo( 13.5, 5.2, &q ); $\C{// return remainder, alias quotient}$ 494 \end{lstlisting} 495 @div@ aggregates the quotient/remainder in a structure, while @remquo@ aliases a parameter to an argument. 496 Both approaches are awkward. 497 Alternatively, a programming language can directly support returning multiple values, \eg in \CFA: 498 \begin{lstlisting} 499 [ int, int ] div( int num, int den ); $\C{// return two integers}$ 500 [ double, double ] div( double num, double den ); $\C{// return two doubles}$ 501 int q, r; $\C{// overloaded variable names}$ 502 double q, r; 503 [ q, r ] = div( 13, 5 ); $\C{// select appropriate div and q, r}$ 504 [ q, r ] = div( 13.5, 5.2 ); $\C{// assign into tuple}$ 505 \end{lstlisting} 506 Clearly, this approach is straightforward to understand and use; 507 therefore, why do few programming languages support this obvious feature or provide it awkwardly? 508 The answer is that there are complex consequences that cascade through multiple aspects of the language, especially the type-system. 509 This section show these consequences and how \CFA handles them. 510 486 511 487 512 \subsection{Tuple Expressions} 488 513 489 The tuple extensions in \CFA can express multiple return values and variadic function parameters in an efficient and type-safe manner. \CFA introduces \emph{tuple expressions} and \emph{tuple types}. A tuple expression is an expression producing a fixed-size, ordered list of values of heterogeneous types. The type of a tuple expression is the tuple of the subexpression types, or a \emph{tuple type}. In \CFA, a tuple expression is denoted by a comma-separated list of expressions enclosed in square brackets. For example, the expression @[5, 'x', 10.5]@ has type @[int, char, double]@. The previous expression has three \emph{components}. Each component in a tuple expression can be any \CFA expression, including another tuple expression. The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}. 490 491 \CFA allows declaration of \emph{tuple variables}, variables of tuple type. For example: 492 \begin{lstlisting} 493 [int, char] most_frequent(const char*); 494 495 const char* str = "hello, world!"; 496 [int, char] freq = most_frequent(str); 497 printf("%s -- %d %c\n", str, freq); 498 \end{lstlisting} 499 In this example, the type of the @freq@ and the return type of @most_frequent@ are both tuple types. Also of note is how the tuple expression @freq@ is implicitly flattened into separate @int@ and @char@ arguments to @printf@; this code snippet could have been shortened by replacing the last two lines with @printf("%s -- %d %c\n", str, most_frequent(str));@ using exactly the same mechanism. 500 501 In addition to variables of tuple type, it is also possible to have pointers to tuples, and arrays of tuples. Tuple types can be composed of any types, except for array types, since arrays are not of fixed size, which makes tuple assignment difficult when a tuple contains an array. 502 \begin{lstlisting} 503 [double, int] di; 504 [double, int] * pdi 505 [double, int] adi[10]; 506 \end{lstlisting} 507 This example declares a variable of type @[double, int]@, a variable of type pointer to @[double, int]@, and an array of ten @[double, int]@. 514 The addition of multiple-return-value functions (MRVF) are useless without a syntax for accepting multiple values at the call-site. 515 The simplest mechanism for capturing the return values is variable assignment, allowing the values to be retrieved directly. 516 As such, \CFA allows assigning multiple values from a function into multiple variables, using a square-bracketed list of lvalue expressions (as above), called a \emph{tuple}. 517 518 However, functions also use \emph{composition} (nested calls), with the direct consequence that MRVFs must also support composition to be orthogonal with single-returning-value functions (SRVF), \eg: 519 \begin{lstlisting} 520 printf( "%d %d\n", div( 13, 5 ) ); $\C{// return values seperated into arguments}$ 521 \end{lstlisting} 522 Here, the values returned by @div@ are composed with the call to @printf@ by flattening the tuple into separate arguments. 523 However, the \CFA type-system must support significantly more complex composition: 524 \begin{lstlisting} 525 [ int, int ] foo$\(_1\)$( int ); 526 [ double ] foo$\(_2\)$( int ); 527 void bar( int, double, double ); 528 bar( foo( 3 ), foo( 3 ) ); 529 \end{lstlisting} 530 The type-resolver only has the tuple return-types to resolve the call to @bar@ as the @foo@ parameters are identical, which involves unifying the possible @foo@ functions with @bar@'s parameter list. 531 No combination of @foo@s are an exact match with @bar@'s parameters, so the resolver applies C conversions. 532 The minimal cost is @bar( foo@$_1$@( 3 ), foo@$_2$@( 3 ) )@, giving (@int@, {\color{ForestGreen}@int@}, @double@) to (@int@, {\color{ForestGreen}@double@}, @double@) with one {\color{ForestGreen}safe} (widening) conversion from @int@ to @double@ versus ({\color{red}@double@}, {\color{ForestGreen}@int@}, {\color{ForestGreen}@int@}) to ({\color{red}@int@}, {\color{ForestGreen}@double@}, {\color{ForestGreen}@double@}) with one {\color{red}unsafe} (narrowing) conversion from @double@ to @int@ and two safe conversions. 533 534 535 \subsection{Tuple Variables} 536 537 An important observation from function composition is that new variable names are not required to initialize parameters from an MRVF. 538 \CFA also allows declaration of tuple variables that can be initialized from an MRVF, since it can be awkward to declare multiple variables of different types, \eg: 539 \begin{lstlisting} 540 [ int, int ] qr = div( 13, 5 ); $\C{// tuple-variable declaration and initialization}$ 541 [ double, double ] qr = div( 13.5, 5.2 ); 542 \end{lstlisting} 543 where the tuple variable-name serves the same purpose as the parameter name(s). 544 Tuple variables can be composed of any types, except for array types, since array sizes are generally unknown. 545 546 One way to access the tuple-variable components is with assignment or composition: 547 \begin{lstlisting} 548 [ q, r ] = qr; $\C{// access tuple-variable components}$ 549 printf( "%d %d\n", qr ); 550 \end{lstlisting} 551 \CFA also supports \emph{tuple indexing} to access single components of a tuple expression: 552 \begin{lstlisting} 553 [int, int] * p = &qr; $\C{// tuple pointer}$ 554 int rem = qr.1; $\C{// access remainder}$ 555 int quo = div( 13, 5 ).0; $\C{// access quotient}$ 556 p->0 = 5; $\C{// change quotient}$ 557 bar( qr.1, qr ); $\C{// pass remainder and quotient/remainder}$ 558 rem = [42, div( 13, 5 )].0.1; $\C{// access 2nd component of 1st component of tuple expression}$ 559 \end{lstlisting} 560 508 561 509 562 \subsection{Flattening and Restructuring} 510 563 511 In function call contexts, tuples support implicit flattening and restructuring conversions. Tuple flattening recursively expands a tuple into the list of its basic components. Tuple structuring packages a list of expressions into a value of tuple type. 512 \begin{lstlisting} 513 int f(int, int); 514 int g([int, int]); 515 int h(int, [int, int]); 564 In function call contexts, tuples support implicit flattening and restructuring conversions. 565 Tuple flattening recursively expands a tuple into the list of its basic components. 566 Tuple structuring packages a list of expressions into a value of tuple type, \eg: 567 %\lstDeleteShortInline@% 568 %\par\smallskip 569 %\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}} 570 \begin{lstlisting} 571 int f( int, int ); 572 int g( [int, int] ); 573 int h( int, [int, int] ); 516 574 [int, int] x; 517 575 int y; 518 519 f(x); // flatten 520 g(y, 10); // structure 521 h(x, y); // flatten & structure 522 \end{lstlisting} 523 In \CFA, each of these calls is valid. In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. Finally, in the call to @h@, @y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure. 524 525 % In {K-W C} \citep{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring. Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components. 526 527 In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. Specifically, the expression resolution algorithm examines all of the possible alternatives for an expression to determine the best match. In resolving a function call expression, each combination of function value and list of argument alternatives is examined. Given a particular argument list and function value, the list of argument alternatives is flattened to produce a list of non-tuple valued expressions. Then the flattened list of expressions is compared with each value in the function's parameter list. If the parameter's type is not a tuple type, then the current argument value is unified with the parameter type, and on success the next argument and parameter are examined. If the parameter's type is a tuple type, then the structuring conversion takes effect, recursively applying the parameter matching algorithm using the tuple's component types as the parameter list types. Assuming a successful unification, eventually the algorithm gets to the end of the tuple type, which causes all of the matching expressions to be consumed and structured into a tuple expression. For example, in 528 \begin{lstlisting} 529 int f(int, [double, int]); 530 f([5, 10.2], 4); 531 \end{lstlisting} 532 There is only a single definition of @f@, and 3 arguments with only single interpretations. First, the argument alternative list @[5, 10.2], 4@ is flattened to produce the argument list @5, 10.2, 4@. Next, the parameter matching algorithm begins, with $P =~$@int@ and $A =~$@int@, which unifies exactly. Moving to the next parameter and argument, $P =~$@[double, int]@ and $A =~$@double@. This time, the parameter is a tuple type, so the algorithm applies recursively with $P' =~$@double@ and $A =~$@double@, which unifies exactly. Then $P' =~$@int@ and $A =~$@double@, which again unifies exactly. At this point, the end of $P'$ has been reached, so the arguments @10.2, 4@ are structured into the tuple expression @[10.2, 4]@. Finally, the end of the parameter list $P$ has also been reached, so the final expression is @f(5, [10.2, 4])@. 576 f( x ); $\C{// flatten}$ 577 g( y, 10 ); $\C{// structure}$ 578 h( x, y ); $\C{// flatten and structure}$ 579 \end{lstlisting} 580 %\end{lstlisting} 581 %& 582 %\begin{lstlisting} 583 %\end{tabular} 584 %\smallskip\par\noindent 585 %\lstMakeShortInline@% 586 In the call to @f@, @x@ is implicitly flattened so the components of @x@ are passed as the two arguments. 587 In the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the parameter type of @g@. 588 Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. 589 The flexible structure of tuples permits a simple and expressive function call syntax to work seamlessly with both SRVF and MRVF, and with any number of arguments of arbitrarily complex structure. 590 591 592 \subsection{Tuple Assignment} 593 594 An assignment where the left side is a tuple type is called \emph{tuple assignment}. 595 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple} and \emph{mass assignment}, respectively. 596 %\lstDeleteShortInline@% 597 %\par\smallskip 598 %\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}} 599 \begin{lstlisting} 600 int x = 10; 601 double y = 3.5; 602 [int, double] z; 603 z = [x, y]; $\C{// multiple assignment}$ 604 [x, y] = z; $\C{// multiple assignment}$ 605 z = 10; $\C{// mass assignment}$ 606 [y, x] = 3.14; $\C{// mass assignment}$ 607 \end{lstlisting} 608 %\end{lstlisting} 609 %& 610 %\begin{lstlisting} 611 %\end{tabular} 612 %\smallskip\par\noindent 613 %\lstMakeShortInline@% 614 Both kinds of tuple assignment have parallel semantics, so that each value on the left and right side is evaluated before any assignments occur. 615 As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function, \eg, @[x, y] = [y, x]@. 616 This semantics means mass assignment differs from C cascading assignment (\eg @a = b = c@) in that conversions are applied in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. 617 For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, yielding @y == 3.14@ and @x == 3@; 618 whereas C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, yielding @3@ in @y@ and @x@. 619 Finally, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. 620 This example shows mass, multiple, and cascading assignment used in one expression: 621 \begin{lstlisting} 622 void f( [int, int] ); 623 f( [x, y] = z = 1.5 ); $\C{// assignments in parameter list}$ 624 \end{lstlisting} 625 533 626 534 627 \subsection{Member Access} 535 628 536 At times, it is desirable to access a single component of a tuple-valued expression without creating unnecessary temporary variables to assign to. Given a tuple-valued expression @e@ and a compile-time constant integer $i$ where $0 \leq i < n$, where $n$ is the number of components in @e@, @e.i@ accesses the $i$\textsuperscript{th} component of @e@. For example, 537 \begin{lstlisting} 538 [int, double] x; 539 [char *, int] f(); 540 void g(double, int); 541 [int, double] * p; 542 543 int y = x.0; // access int component of x 544 y = f().1; // access int component of f 545 p->0 = 5; // access int component of tuple pointed-to by p 546 g(x.1, x.0); // rearrange x to pass to g 547 double z = [x, f()].0.1; // access second component of first component of tuple expression 548 \end{lstlisting} 549 As seen above, tuple-index expressions can occur on any tuple-typed expression, including tuple-returning functions, square-bracketed tuple expressions, and other tuple-index expressions, provided the retrieved component is also a tuple. This feature was proposed for {K-W C}, but never implemented~\citep[p.~45]{Till89}. 550 551 It is possible to access multiple fields from a single expression using a \emph{member-access tuple expression}. The result is a single tuple expression whose type is the tuple of the types of the members. For example, 629 It is also possible to access multiple fields from a single expression using a \emph{member-access}. 630 The result is a single tuple-valued expression whose type is the tuple of the types of the members, \eg: 552 631 \begin{lstlisting} 553 632 struct S { int x; double y; char * z; } s; 554 s.[x, y, z]; 555 \end{lstlisting} 556 Here, the type of @s.[x, y, z]@ is @[int, double, char *]@. A member tuple expression has the form @a.[x, y, z];@ where @a@ is an expression with type @T@, where @T@ supports member access expressions, and @x, y, z@ are all members of @T@ with types @T$_x$@, @T$_y$@, and @T$_z$@ respectively. Then the type of @a.[x, y, z]@ is @[T$_x$, T$_y$, T$_z$]@. 557 558 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange components, drop components, duplicate components, etc.): 633 s.[x, y, z] = 0; 634 \end{lstlisting} 635 Here, the mass assignment sets all members of @s@ to zero. 636 Since tuple-index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg rearrange, drop, and duplicate components). 637 %\lstDeleteShortInline@% 638 %\par\smallskip 639 %\begin{tabular}{@{}l@{\hspace{1.5\parindent}}||@{\hspace{1.5\parindent}}l@{}} 559 640 \begin{lstlisting} 560 641 [int, int, long, double] x; 561 void f(double, long); 562 563 f(x.[0, 3]); // f(x.0, x.3) 564 x.[0, 1] = x.[1, 0]; // [x.0, x.1] = [x.1, x.0] 565 [long, int, long] y = x.[2, 0, 2]; 566 \end{lstlisting} 567 568 It is possible for a member tuple expression to contain other member access expressions: 642 void f( double, long ); 643 x.[0, 1] = x.[1, 0]; $\C{// rearrange: [x.0, x.1] = [x.1, x.0]}$ 644 f( x.[0, 3] ); $\C{// drop: f(x.0, x.3)}$ 645 [int, int, int] y = x.[2, 0, 2]; $\C{// duplicate: [y.0, y.1, y.2] = [x.2, x.0.x.2]}$ 646 \end{lstlisting} 647 %\end{lstlisting} 648 %& 649 %\begin{lstlisting} 650 %\end{tabular} 651 %\smallskip\par\noindent 652 %\lstMakeShortInline@% 653 It is also possible for a member access to contain other member accesses, \eg: 569 654 \begin{lstlisting} 570 655 struct A { double i; int j; }; 571 656 struct B { int * k; short l; }; 572 657 struct C { int x; A y; B z; } v; 573 v.[x, y.[i, j], z.k]; 574 \end{lstlisting} 575 This expression is equivalent to @[v.x, [v.y.i, v.y.j], v.z.k]@. That is, the aggregate expression is effectively distributed across the tuple, which allows simple and easy access to multiple components in an aggregate, without repetition. It is guaranteed that the aggregate expression to the left of the @.@ in a member tuple expression is evaluated exactly once. As such, it is safe to use member tuple expressions on the result of a side-effecting function. 576 577 \subsection{Tuple Assignment} 578 579 In addition to tuple-index expressions, individual components of tuples can be accessed by a \emph{destructuring assignment} which has a tuple expression with lvalue components on its left-hand side. More generally, an assignment where the left-hand side of the assignment operator has a tuple type is called \emph{tuple assignment}. There are two kinds of tuple assignment depending on whether the right-hand side of the assignment operator has a tuple type or a non-tuple type, called \emph{multiple assignment} and \emph{mass assignment}, respectively. 580 \begin{lstlisting} 581 int x; 582 double y; 583 [int, double] z; 584 [y, x] = 3.14; // mass assignment 585 [x, y] = z; // multiple assignment 586 z = 10; // mass assignment 587 z = [x, y]; // multiple assignment 588 \end{lstlisting} 589 Let $L_i$ for $i$ in $[0, n)$ represent each component of the flattened left-hand side, $R_i$ represent each component of the flattened right-hand side of a multiple assignment, and $R$ represent the right-hand side of a mass assignment. 590 591 For a multiple assignment to be valid, both tuples must have the same number of elements when flattened. Multiple assignment assigns $R_i$ to $L_i$ for each $i$. 592 That is, @?=?(&$L_i$, $R_i$)@ must be a well-typed expression. In the previous example, @[x, y] = z@, @z@ is flattened into @z.0, z.1@, and the assignments @x = z.0@ and @y = z.1@ are executed. 593 594 A mass assignment assigns the value $R$ to each $L_i$. For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. This rule differs from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well. 595 596 Both kinds of tuple assignment have parallel semantics, such that each value on the left side and right side is evaluated \emph{before} any assignments occur. As a result, it is possible to swap the values in two variables without explicitly creating any temporary variables or calling a function: 597 \begin{lstlisting} 598 int x = 10, y = 20; 599 [x, y] = [y, x]; 600 \end{lstlisting} 601 After executing this code, @x@ has the value @20@ and @y@ has the value @10@. 602 603 Tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, just like all other assignment expressions in C. This definition allows cascading tuple assignment and use of tuple assignment in other expression contexts, an occasionally useful idiom to keep code succinct and reduce repetition. 604 % In \CFA, tuple assignment is an expression where the result type is the type of the left-hand side of the assignment, as in normal assignment. That is, a tuple assignment produces the value of the left-hand side after assignment. These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. These semantics are a change from the original tuple design in {K-W C}~\citep{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. This decision was made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position. While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility with C. Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. In another language, tuple assignment as a statement could be reasonable, but it would be inconsistent for tuple assignment to be the only kind of assignment in \CFA that is not an expression. 605 658 v.[x, y.[i, j], z.k]; $\C{// [v.x, [v.y.i, v.y.j], v.z.k]}$ 659 \end{lstlisting} 660 661 662 \begin{comment} 606 663 \subsection{Casting} 607 664 608 In C, the cast operator is used to explicitly convert between types. In \CFA, the cast operator has a secondary use as type ascription. That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function: 665 In C, the cast operator is used to explicitly convert between types. 666 In \CFA, the cast operator has a secondary use as type ascription. 667 That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function: 609 668 \begin{lstlisting} 610 669 int f(); // (1) … … 615 674 \end{lstlisting} 616 675 617 Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. Taking a look at standard C provides some guidance with respect to the way casts should work with tuples: 676 Since casting is a fundamental operation in \CFA, casts should be given a meaningful interpretation in the context of tuples. 677 Taking a look at standard C provides some guidance with respect to the way casts should work with tuples: 618 678 \begin{lstlisting} 619 679 int f(); … … 623 683 (int)g(); // (2) 624 684 \end{lstlisting} 625 In C, (1) is a valid cast, which calls @f@ and discards its result. On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid. 626 627 Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. This approach follows naturally from the way that a cast to @void@ works in C. 685 In C, (1) is a valid cast, which calls @f@ and discards its result. 686 On the other hand, (2) is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. 687 Generalizing these principles, any cast wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid. 688 689 Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. 690 Excess elements ($S_j$ for all $j$ in $[n, m)$) are evaluated, but their values are discarded so that they are not included in the result expression. 691 This approach follows naturally from the way that a cast to @void@ works in C. 628 692 629 693 For example, in 630 694 \begin{lstlisting} 631 [int, int, int] f(); 632 [int, [int, int], int] g(); 633 634 ([int, double])f(); $\C{// (1)}$ 635 ([int, int, int])g(); $\C{// (2)}$ 636 ([void, [int, int]])g(); $\C{// (3)}$ 637 ([int, int, int, int])g(); $\C{// (4)}$ 638 ([int, [int, int, int]])g(); $\C{// (5)}$ 639 \end{lstlisting} 640 641 (1) discards the last element of the return value and converts the second element to @double@. Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@. 695 [int, int, int] f(); 696 [int, [int, int], int] g(); 697 698 ([int, double])f(); $\C{// (1)}$ 699 ([int, int, int])g(); $\C{// (2)}$ 700 ([void, [int, int]])g(); $\C{// (3)}$ 701 ([int, int, int, int])g(); $\C{// (4)}$ 702 ([int, [int, int, int]])g(); $\C{// (5)}$ 703 \end{lstlisting} 704 705 (1) discards the last element of the return value and converts the second element to @double@. 706 Since @int@ is effectively a 1-element tuple, (2) discards the second component of the second element of the return value of @g@. 707 If @g@ is free of side effects, this expression is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@. 642 708 Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@). 643 709 644 Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. That is, it is invalid to cast @[int, int]@ to @[int, int, int]@. 710 Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions\footnote{User-defined conversions have been considered, but for compatibility with C and the existing use of casts as type ascription, any future design for such conversions would require more precise matching of types than allowed for function arguments and parameters.}. 711 As such, (4) is invalid because the cast target type contains 4 components, while the source type contains only 3. 712 Similarly, (5) is invalid because the cast @([int, int, int])(g().1)@ is invalid. 713 That is, it is invalid to cast @[int, int]@ to @[int, int, int]@. 714 \end{comment} 715 645 716 646 717 \subsection{Polymorphism} 647 718 648 Tuples also integrate with \CFA polymorphism as a special sort of generic type. Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types. 649 \begin{lstlisting} 650 forall(otype T, dtype U) 651 void f(T x, U * y); 652 653 f([5, "hello"]); 654 \end{lstlisting} 655 In this example, @[5, "hello"]@ is flattened, so that the argument list appears as @5, "hello"@. The argument matching algorithm binds @T@ to @int@ and @U@ to @const char*@, and calls the function as normal. 656 657 Tuples, however, may contain polymorphic components. For example, a plus operator can be written to add two triples of a type together. 658 \begin{lstlisting} 659 forall(otype T | { T ?+?(T, T); }) 660 [T, T, T] ?+?([T, T, T] x, [T, T, T] y) { 661 return [x.0+y.0, x.1+y.1, x.2+y.2]; 719 Tuples also integrate with \CFA polymorphism as a kind of generic type. 720 Due to the implicit flattening and structuring conversions involved in argument passing, @otype@ and @dtype@ parameters are restricted to matching only with non-tuple types, \eg: 721 \begin{lstlisting} 722 forall(otype T, dtype U) void f( T x, U * y ); 723 f( [5, "hello"] ); 724 \end{lstlisting} 725 where @[5, "hello"]@ is flattened, giving argument list @5, "hello"@, and @T@ binds to @int@ and @U@ binds to @const char@. 726 Tuples, however, may contain polymorphic components. 727 For example, a plus operator can be written to add two triples together. 728 \begin{lstlisting} 729 forall(otype T | { T ?+?( T, T ); }) [T, T, T] ?+?( [T, T, T] x, [T, T, T] y ) { 730 return [x.0 + y.0, x.1 + y.1, x.2 + y.2]; 662 731 } 663 732 [int, int, int] x; … … 666 735 \end{lstlisting} 667 736 668 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. Previously in \CFA, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie no implicit conversions are applied to assertion arguments). In the example below: 669 \begin{lstlisting} 670 int f([int, double], double); 671 forall(otype T, otype U | { T f(T, U, U); }) 672 void g(T, U); 673 g(5, 10.21); 674 \end{lstlisting} 675 If assertion arguments must match exactly, then the call to @g@ cannot be resolved, since the expected type of @f@ is flat, while the only @f@ in scope requires a tuple type. Since tuples are fluid, this requirement reduces the usability of tuples in polymorphic code. To ease this pain point, function parameter and return lists are flattened for the purposes of type unification, which allows the previous example to pass expression resolution. 676 677 This relaxation is made possible by extending the existing thunk generation scheme, as described by \citet{Bilson03}. Now, whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function: 678 \begin{lstlisting} 679 int _thunk(int _p0, double _p1, double _p2) { 680 return f([_p0, _p1], _p2); 681 } 682 \end{lstlisting} 683 Essentially, this thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism. These thunks take advantage of GCC C nested functions to produce closures that have the usual function pointer signature. 737 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. 738 \begin{lstlisting} 739 int f( [int, double], double ); 740 forall(otype T, otype U | { T f( T, U, U ); }) void g( T, U ); 741 g( 5, 10.21 ); 742 \end{lstlisting} 743 Hence, function parameter and return lists are flattened for the purposes of type unification allowing the example to pass expression resolution. 744 This relaxation is possible by extending the thunk scheme described by \citet{Bilson03}. 745 Whenever a candidate's parameter structure does not exactly match the formal parameter's structure, a thunk is generated to specialize calls to the actual function: 746 \begin{lstlisting} 747 int _thunk( int _p0, double _p1, double _p2 ) { return f( [_p0, _p1], _p2 ); } 748 \end{lstlisting} 749 so the thunk provides flattening and structuring conversions to inferred functions, improving the compatibility of tuples and polymorphism. 750 These thunks take advantage of GCC C nested-functions to produce closures that have the usual function pointer signature. 751 684 752 685 753 \subsection{Variadic Tuples} 686 687 To define variadic functions, \CFA adds a new kind of type parameter, @ttype@. Matching against a @ttype@ (``tuple type'') parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule. This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. As such, @ttype@ variables are also referred to as \emph{argument} or \emph{parameter packs} in this paper. 688 689 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion. Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled. 690 691 For example, the C @sum@ function at the beginning of Section~\ref{sec:tuples} could be written using @ttype@ as: 692 \begin{lstlisting} 693 int sum(){ return 0; } // (0) 694 forall(ttype Params | { int sum(Params); }) 695 int sum(int x, Params rest) { // (1) 696 return x+sum(rest); 697 } 698 sum(10, 20, 30); 699 \end{lstlisting} 700 Since (0) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@. 701 In order to call (1), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@. 702 In order to finish the resolution of @sum@, an assertion parameter that matches @int sum(int, int)@ is required. 703 Like in the previous iteration, (0) is not a valid candidate, so (1) is examined with @Params@ bound to @[int]@, requiring the assertion @int sum(int)@. 704 Next, (0) fails, and to satisfy (1) @Params@ is bound to @[]@, requiring an assertion @int sum()@. 705 Finally, (0) matches and (1) fails, which terminates the recursion. 706 Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@. 707 708 As a point of note, this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details. It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments: 709 \begin{lstlisting} 710 int sum(int x, int y){ 711 return x+y; 712 } 713 forall(ttype Params | { int sum(int, Params); }) 714 int sum(int x, int y, Params rest) { 715 return sum(x+y, rest); 716 } 717 \end{lstlisting} 718 719 One more iteration permits the summation of any summable type, as long as all arguments are the same type: 754 \label{sec:variadic-tuples} 755 756 To define variadic functions, \CFA adds a new kind of type parameter, @ttype@ (tuple type). 757 Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types. 758 In a given parameter list, there must be at most one @ttype@ parameter that occurs last, which matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates. 759 As such, @ttype@ variables are also called \emph{argument packs}. 760 761 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is via recursion. 762 Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful. 763 Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled. 764 For example, a generalized @sum@ function written using @ttype@: 765 \begin{lstlisting} 766 int sum$\(_0\)$() { return 0; } 767 forall(ttype Params | { int sum( Params ); } ) int sum$\(_1\)$( int x, Params rest ) { 768 return x + sum( rest ); 769 } 770 sum( 10, 20, 30 ); 771 \end{lstlisting} 772 Since @sum@\(_0\) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@. 773 In order to call @sum@\(_1\), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@. 774 The process continues, @Params@ is bound to @[]@, requiring an assertion @int sum()@, which matches @sum@\(_0\) and terminates the recursion. 775 Effectively, this algorithm traces as @sum(10, 20, 30)@ $\rightarrow$ @10 + sum(20, 30)@ $\rightarrow$ @10 + (20 + sum(30))@ $\rightarrow$ @10 + (20 + (30 + sum()))@ $\rightarrow$ @10 + (20 + (30 + 0))@. 776 777 It is reasonable to take the @sum@ function a step further to enforce a minimum number of arguments: 778 \begin{lstlisting} 779 int sum( int x, int y ) { return x + y; } 780 forall(ttype Params | { int sum( int, Params ); } ) int sum( int x, int y, Params rest ) { 781 return sum( x + y, rest ); 782 } 783 \end{lstlisting} 784 One more step permits the summation of any summable type with all arguments of the same type: 720 785 \begin{lstlisting} 721 786 trait summable(otype T) { 722 T ?+?(T, T);787 T ?+?( T, T ); 723 788 }; 724 forall(otype R | summable(R)) 725 R sum(R x, R y){ 726 return x+y; 727 } 728 forall(otype R, ttype Params 729 | summable(R) 730 | { R sum(R, Params); }) 731 R sum(R x, R y, Params rest) { 732 return sum(x+y, rest); 733 } 734 \end{lstlisting} 735 Unlike C, it is not necessary to hard code the expected type. This code is naturally open to extension, in that any user-defined type with a @?+?@ operator is automatically able to be used with the @sum@ function. That is to say, the programmer who writes @sum@ does not need full program knowledge of every possible data type, unlike what is necessary to write an equivalent function using the standard C mechanisms. Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators. 736 737 It is also possible to write a type-safe variadic print routine which can replace @printf@: 789 forall(otype R | summable( R ) ) R sum( R x, R y ) { 790 return x + y; 791 } 792 forall(otype R, ttype Params | summable(R) | { R sum(R, Params); } ) R sum(R x, R y, Params rest) { 793 return sum( x + y, rest ); 794 } 795 \end{lstlisting} 796 Unlike C variadic functions, it is unnecessary to hard code the number and expected types. 797 Furthermore, this code is extendable so any user-defined type with a @?+?@ operator. 798 Summing arbitrary heterogeneous lists is possible with similar code by adding the appropriate type variables and addition operators. 799 800 It is also possible to write a type-safe variadic print function to replace @printf@: 738 801 \begin{lstlisting} 739 802 struct S { int x, y; }; 740 forall(otype T, ttype Params | 741 { void print(T); void print(Params); }) 742 void print(T arg, Params rest) { 743 print(arg); 744 print(rest); 745 } 746 void print(char * x) { printf("%s", x); } 747 void print(int x) { printf("%d", x); } 748 void print(S s) { print("{ ", s.x, ",", s.y, " }"); } 749 750 print("s = ", (S){ 1, 2 }, "\n"); 751 \end{lstlisting} 752 This example routine showcases a variadic-template-like decomposition of the provided argument list. The individual @print@ routines allow printing a single element of a type. The polymorphic @print@ allows printing any list of types, as long as each individual type has a @print@ function. The individual print functions can be used to build up more complicated @print@ routines, such as for @S@, which is something that cannot be done with @printf@ in C. 753 754 It is also possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. For example, it is possible to write @new@ as a library function: 755 \begin{lstlisting} 756 struct Pair(otype R, otype S); 757 forall(otype R, otype S) 758 void ?{}(Pair(R, S) *, R, S); // (1) 759 760 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); }) 761 T * new(Params p) { 762 return ((T*)malloc( sizeof(T) )){ p }; // construct into result of malloc 763 } 764 765 Pair(int, char) * x = new(42, '!'); 766 \end{lstlisting} 767 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects. This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference. 768 769 In the call to @new@, @Pair(double, char)@ is selected to match @T@, and @Params@ is expanded to match @[double, char]@. The constructor (1) may be specialized to satisfy the assertion for a constructor with an interface compatible with @void ?{}(Pair(int, char) *, int, char)@. 770 771 \TODO{Check if we actually can use ttype parameters on generic types (if they set the complete flag, it should work, or nearly so).} 803 forall(otype T, ttype Params | { void print(T); void print(Params); }) void print(T arg, Params rest) { 804 print(arg); print(rest); 805 } 806 void print( char * x ) { printf( "%s", x ); } 807 void print( int x ) { printf( "%d", x ); } 808 void print( S s ) { print( "{ ", s.x, ",", s.y, " }" ); } 809 print( "s = ", (S){ 1, 2 }, "\n" ); 810 \end{lstlisting} 811 This example showcases a variadic-template-like decomposition of the provided argument list. 812 The individual @print@ functions allow printing a single element of a type. 813 The polymorphic @print@ allows printing any list of types, where as each individual type has a @print@ function. 814 The individual print functions can be used to build up more complicated @print@ functions, such as @S@, which cannot be done with @printf@ in C. 815 816 Finally, it is possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions. 817 For example, it is possible to write @new@ as a library function: 818 \begin{lstlisting} 819 forall( otype R, otype S ) void ?{}( pair(R, S) *, R, S ); 820 forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } ) T * new( Params p ) { 821 return ((T *)malloc()){ p }; $\C{// construct into result of malloc}$ 822 } 823 pair( int, char ) * x = new( 42, '!' ); 824 \end{lstlisting} 825 The @new@ function provides the combination of type-safe @malloc@ with a \CFA constructor call, making it impossible to forget constructing dynamically allocated objects. 826 This function provides the type-safety of @new@ in \CC, without the need to specify the allocated type again, thanks to return-type inference. 827 772 828 773 829 \subsection{Implementation} 774 830 775 Tuples are implemented in the \CFA translator via a transformation into generic types. For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated. For example: 831 Tuples are implemented in the \CFA translator via a transformation into generic types. 832 For each $N$, the first time an $N$-tuple is seen in a scope a generic type with $N$ type parameters is generated, \eg: 776 833 \begin{lstlisting} 777 834 [int, int] f() { 778 [double, double] x; 779 [int, double, int] y; 780 } 781 \end{lstlisting} 782 Is transformed into: 783 \begin{lstlisting} 784 forall(dtype T0, dtype T1 | sized(T0) | sized(T1)) 785 struct _tuple2 { // generated before the first 2-tuple 786 T0 field_0; 787 T1 field_1; 835 [double, double] x; 836 [int, double, int] y; 837 } 838 \end{lstlisting} 839 is transformed into: 840 \begin{lstlisting} 841 forall(dtype T0, dtype T1 | sized(T0) | sized(T1)) struct _tuple2 { 842 T0 field_0; $\C{// generated before the first 2-tuple}$ 843 T1 field_1; 788 844 }; 789 845 _tuple2(int, int) f() { 790 _tuple2(double, double) x; 791 forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2)) 792 struct _tuple3 { // generated before the first 3-tuple 793 T0 field_0; 794 T1 field_1; 795 T2 field_2; 796 }; 797 _tuple3_(int, double, int) y; 798 } 799 \end{lstlisting} 800 801 Tuple expressions are then simply converted directly into compound literals: 802 \begin{lstlisting} 803 [5, 'x', 1.24]; 804 \end{lstlisting} 805 Becomes: 806 \begin{lstlisting} 807 (_tuple3(int, char, double)){ 5, 'x', 1.24 }; 808 \end{lstlisting} 809 846 _tuple2(double, double) x; 847 forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2)) struct _tuple3 { 848 T0 field_0; $\C{// generated before the first 3-tuple}$ 849 T1 field_1; 850 T2 field_2; 851 }; 852 _tuple3(int, double, int) y; 853 } 854 \end{lstlisting} 855 Tuple expressions are then simply converted directly into compound literals, \eg @[5, 'x', 1.24]@ becomes @(_tuple3(int, char, double)){ 5, 'x', 1.24 }@. 856 857 \begin{comment} 810 858 Since tuples are essentially structures, tuple indexing expressions are just field accesses: 811 859 \begin{lstlisting} … … 826 874 f(x.field_0, (_tuple2){ x.field_1, 'z' }); 827 875 \end{lstlisting} 828 Note that due to flattening, @x@ used in the argument position is converted into the list of its fields. In the call to @f@, the second and third argument components are structured into a tuple argument. Similarly, tuple member expressions are recursively expanded into a list of member access expressions. 829 830 Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion. Each unique expression is assigned an identifier and is guaranteed to be executed exactly once: 876 Note that due to flattening, @x@ used in the argument position is converted into the list of its fields. 877 In the call to @f@, the second and third argument components are structured into a tuple argument. 878 Similarly, tuple member expressions are recursively expanded into a list of member access expressions. 879 880 Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion. 881 Each unique expression is assigned an identifier and is guaranteed to be executed exactly once: 831 882 \begin{lstlisting} 832 883 void g(int, double); … … 842 893 [int, double] _unq0; 843 894 g( 844 845 895 (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0, 896 (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1, 846 897 ); 847 898 \end{lstlisting} 848 Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. Tuple member expressions also take advantage of unique expressions in the case of possible impurity. 849 850 Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions. 851 852 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression. The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new. 899 Since argument evaluation order is not specified by the C programming language, this scheme is built to work regardless of evaluation order. 900 The first time a unique expression is executed, the actual expression is evaluated and the accompanying boolean is set to true. 901 Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. 902 Tuple member expressions also take advantage of unique expressions in the case of possible impurity. 903 904 Currently, the \CFA translator has a very broad, imprecise definition of impurity, where any function call is assumed to be impure. 905 This notion could be made more precise for certain intrinsic, auto-generated, and builtin functions, and could analyze function bodies when they are available to recursively detect impurity, to eliminate some unique expressions. 906 907 The various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. 908 A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg in a unique expression. 909 The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. 910 However, there are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this restriction is not new. 911 \end{comment} 912 853 913 854 914 \section{Evaluation} 855 856 \TODO{Magnus suggests we need some graphs, it's kind of a done thing that the reviewers will be looking for. Also, we've made some unsubstantiated claims about the runtime performance of \CFA, which some micro-benchmarks could help with. I'm thinking a simple stack push and pop, with an idiomatic \lstinline@void*@, \CFA, \CC template and \CC virtual inheritance versions (the void* and virtual inheritance versions likely need to be linked lists, or clumsy in their API -- possibly both versions) to test generics, and variadic print to test tuples. We measure SLOC, runtime performance, executable size (making sure to include benchmarks for multiple types in the executable), and possibly manually count the number of places where the programmer must provide un-type-checked type information. Appendices don't count against our page limit, so we might want to include the source code for the benchmarks (or at least the relevant implementation details) in one.} 915 \label{sec:eval} 916 917 Though \CFA provides significant added functionality over C, these features have a low runtime penalty. 918 In fact, \CFA's features for generic programming can enable faster runtime execution than idiomatic @void *@-based C code. 919 This claim is demonstrated through a set of generic-code-based micro-benchmarks in C, \CFA, and \CC (see stack implementations in Appendix~\ref{sec:BenchmarkStackImplementation}). 920 Since all these languages share a subset essentially comprising standard C, maximal-performance benchmarks would show little runtime variance, other than in length and clarity of source code. 921 A more illustrative benchmark measures the costs of idiomatic usage of each language's features. 922 Figure~\ref{fig:BenchmarkTest} shows the \CFA benchmark tests for a generic stack based on a singly linked-list, a generic pair-data-structure, and a variadic @print@ routine similar to that in Section~\ref{sec:variadic-tuples}. 923 The benchmark test is similar for C and \CC. 924 The experiment uses element types @int@ and @pair(_Bool, char)@, and pushes $N=40M$ elements on a generic stack, copies the stack, clears one of the stacks, finds the maximum value in the other stack, and prints $N/2$ (to reduce graph height) constants. 925 926 \begin{figure} 927 \begin{lstlisting}[xleftmargin=3\parindentlnth,aboveskip=0pt,belowskip=0pt] 928 int main( int argc, char * argv[] ) { 929 FILE * out = fopen( "cfa-out.txt", "w" ); 930 int maxi = 0, vali = 42; 931 stack(int) si, ti; 932 933 REPEAT_TIMED( "push_int", N, push( &si, vali ); ) 934 TIMED( "copy_int", ti = si; ) 935 TIMED( "clear_int", clear( &si ); ) 936 REPEAT_TIMED( "pop_int", N, 937 int xi = pop( &ti ); if ( xi > maxi ) { maxi = xi; } ) 938 REPEAT_TIMED( "print_int", N/2, print( out, vali, ":", vali, "\n" ); ) 939 940 pair(_Bool, char) maxp = { (_Bool)0, '\0' }, valp = { (_Bool)1, 'a' }; 941 stack(pair(_Bool, char)) sp, tp; 942 943 REPEAT_TIMED( "push_pair", N, push( &sp, valp ); ) 944 TIMED( "copy_pair", tp = sp; ) 945 TIMED( "clear_pair", clear( &sp ); ) 946 REPEAT_TIMED( "pop_pair", N, 947 pair(_Bool, char) xp = pop( &tp ); if ( xp > maxp ) { maxp = xp; } ) 948 REPEAT_TIMED( "print_pair", N/2, print( out, valp, ":", valp, "\n" ); ) 949 fclose(out); 950 } 951 \end{lstlisting} 952 \caption{\CFA Benchmark Test} 953 \label{fig:BenchmarkTest} 954 \end{figure} 955 956 The structure of each benchmark implemented is: C with @void *@-based polymorphism, \CFA with the presented features, \CC with templates, and \CC using only class inheritance for polymorphism, called \CCV. 957 The \CCV variant illustrates an alternative object-oriented idiom where all objects inherit from a base @object@ class, mimicking a Java-like interface; 958 hence runtime checks are necessary to safely down-cast objects. 959 The most notable difference among the implementations is in memory layout of generic types: \CFA and \CC inline the stack and pair elements into corresponding list and pair nodes, while C and \CCV lack such a capability and instead must store generic objects via pointers to separately-allocated objects. 960 For the print benchmark, idiomatic printing is used: the C and \CFA variants used @stdio.h@, while the \CC and \CCV variants used @iostream@; preliminary tests show this distinction has negligible runtime impact. 961 Note, the C benchmark uses unchecked casts as there is no runtime mechanism to perform such checks, while \CFA and \CC provide type-safety statically. 962 963 Figure~\ref{fig:eval} and Table~\ref{tab:eval} show the results of running the benchmark in Figure~\ref{fig:BenchmarkTest} and its C, \CC, and \CCV equivalents. 964 The graph plots the median of 5 consecutive runs of each program, with an initial warm-up run omitted. 965 All code is compiled at \texttt{-O2} by GCC or G++ 6.2.0, with all \CC code compiled as \CCfourteen. 966 The benchmarks are run on an Ubuntu 16.04 workstation with 16 GB of RAM and a 6-core AMD FX-6300 CPU with 3.5 GHz maximum clock frequency. 967 968 \begin{figure} 969 \centering 970 \input{timing} 971 \caption{Benchmark Timing Results (smaller is better)} 972 \label{fig:eval} 973 \end{figure} 974 975 \begin{table} 976 \caption{Properties of benchmark code} 977 \label{tab:eval} 978 \newcommand{\CT}[1]{\multicolumn{1}{c}{#1}} 979 \begin{tabular}{rrrrr} 980 & \CT{C} & \CT{\CFA} & \CT{\CC} & \CT{\CCV} \\ \hline 981 maximum memory usage (MB) & 10001 & 2502 & 2503 & 11253 \\ 982 source code size (lines) & 247 & 222 & 165 & 339 \\ 983 redundant type annotations (lines) & 39 & 2 & 2 & 15 \\ 984 binary size (KB) & 14 & 229 & 18 & 38 \\ 985 \end{tabular} 986 \end{table} 987 988 The C and \CCV variants are generally the slowest with the largest memory footprint, because of their less-efficient memory layout and the pointer-indirection necessary to implement generic types; 989 this inefficiency is exacerbated by the second level of generic types in the pair-based benchmarks. 990 By contrast, the \CFA and \CC variants run in roughly equivalent time for both the integer and pair of @_Bool@ and @char@ because the storage layout is equivalent, with the inlined libraries (\ie no separate compilation) and greater maturity of the \CC compiler contributing to its lead. 991 \CCV is slower than C largely due to the cost of runtime type-checking of down-casts (implemented with @dynamic_cast@); 992 There are two outliers in the graph for \CFA: all prints and pop of @pair@. 993 Both of these cases result from the complexity of the C-generated polymorphic code, so that the GCC compiler is unable to optimize some dead code and condense nested calls. 994 A compiler designed for \CFA could easily perform these optimizations. 995 Finally, the binary size for \CFA is larger because of static linking with the \CFA libraries. 996 997 \CFA is also competitive in terms of source code size, measured as a proxy for programmer effort. The line counts in Table~\ref{tab:eval} include implementations of @pair@ and @stack@ types for all four languages for purposes of direct comparison, though it should be noted that \CFA and \CC have pre-written data structures in their standard libraries that programmers would generally use instead. Use of these standard library types has minimal impact on the performance benchmarks, but shrinks the \CFA and \CC benchmarks to 73 and 54 lines, respectively. 998 On the other hand, C does not have a generic collections-library in its standard distribution, resulting in frequent reimplementation of such collection types by C programmers. 999 \CCV does not use the \CC standard template library by construction, and in fact includes the definition of @object@ and wrapper classes for @bool@, @char@, @int@, and @const char *@ in its line count, which inflates this count somewhat, as an actual object-oriented language would include these in the standard library; 1000 with their omission the \CCV line count is similar to C. 1001 We justify the given line count by noting that many object-oriented languages do not allow implementing new interfaces on library types without subclassing or wrapper types, which may be similarly verbose. 1002 1003 Raw line-count, however, is a fairly rough measure of code complexity; 1004 another important factor is how much type information the programmer must manually specify, especially where that information is not checked by the compiler. 1005 Such unchecked type information produces a heavier documentation burden and increased potential for runtime bugs, and is much less common in \CFA than C, with its manually specified function pointers arguments and format codes, or \CCV, with its extensive use of un-type-checked downcasts (\eg @object@ to @integer@ when popping a stack, or @object@ to @printable@ when printing the elements of a @pair@). 1006 To quantify this, the ``redundant type annotations'' line in Table~\ref{tab:eval} counts the number of lines on which the type of a known variable is re-specified, either as a format specifier, explicit downcast, type-specific function, or by name in a @sizeof@, struct literal, or @new@ expression. 1007 The \CC benchmark uses two redundant type annotations to create a new stack nodes, while the C and \CCV benchmarks have several such annotations spread throughout their code. 1008 The two instances in which the \CFA benchmark still uses redundant type specifiers are to cast the result of a polymorphic @malloc@ call (the @sizeof@ argument is inferred by the compiler). 1009 These uses are similar to the @new@ expressions in \CC, though the \CFA compiler's type resolver should shortly render even these type casts superfluous. 1010 857 1011 858 1012 \section{Related Work} 859 1013 860 \CC is the existing language it is most natural to compare \CFA to, as they are both more modern extensions to C with backwards source compatibility. The most fundamental difference in approach between \CC and \CFA is their approach to this C compatibility. \CC does provide fairly strong source backwards compatibility with C, but is a dramatically more complex language than C, and imposes a steep learning curve to use many of its extension features. For instance, in a break from general C practice, template code is typically written in header files, with a variety of subtle restrictions implied on its use by this choice, while the other polymorphism mechanism made available by \CC, class inheritance, requires programmers to learn an entirely new object-oriented programming paradigm; the interaction between templates and inheritance is also quite complex. \CFA, by contrast, has a single facility for polymorphic code, one which supports separate compilation and the existing procedural paradigm of C code. A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA. One of the major limiting factors of \CC's approach is that templates cannot be separately compiled, and, until concepts~\citep{C++Concepts} are standardized (currently anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors for template expansion failures. By contrast, the explicit nature of assertions in \CFA allows polymorphic functions to be separately compiled, and for their requirements to be checked by the compiler; similarly, \CFA generic types may be opaque, unlike \CC template classes. 861 862 Cyclone also provides capabilities for polymorphic functions and existential types~\citep{Grossman06}, similar in concept to \CFA's @forall@ functions and generic types. Cyclone existential types can include function pointers in a construct similar to a virtual function table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. Furthermore, Cyclone's polymorphic functions and types are restricted in that they may only abstract over types with the same layout and calling convention as @void*@, in practice only pointer types and @int@ - in \CFA terms, all Cyclone polymorphism must be dtype-static. This design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, but is more restrictive than \CFA's more general model. 863 864 Apple's Objective-C \citep{obj-c-book} is another industrially successful set of extensions to C. The Objective-C language model is a fairly radical departure from C, adding object-orientation and message-passing. Objective-C implements variadic functions using the C @va_arg@ mechanism, and did not support type-checked generics until recently \citep{xcode7}, historically using less-efficient and more error-prone runtime checking of object types instead. The GObject framework \citep{GObject} also adds object-orientation with runtime type-checking and reference-counting garbage-collection to C; these are much more intrusive feature additions than those provided by \CFA, in addition to the runtime overhead of reference-counting. The Vala programming language \citep{Vala} compiles to GObject-based C, and so adds the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases. Java \citep{Java8} has had generic types and variadic functions since Java~5; Java's generic types are type-checked at compilation and type-erased at runtime, similar to \CFA's, though in Java each object carries its own table of method pointers, while \CFA passes the method pointers separately so as to maintain a C-compatible struct layout. Java variadic functions are simply syntactic sugar for an array of a single type, and therefore less useful than \CFA's heterogeneously-typed variadic functions. Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens. 865 866 D \citep{D}, Go \citep{Go}, and Rust \citep{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust. However, each language represents dramatic departures from C in terms of language model, and none has the same level of compatibility with C as \CFA. D and Go are garbage-collected languages, imposing the associated runtime overhead. The necessity of accounting for data transfer between the managed Go runtime and the unmanaged C runtime complicates foreign-function interface between Go and C. Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime that is more easily interoperable with C. Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. On the other hand, Rust's borrow-checker, while it does provide strong safety guarantees, is complex and difficult to learn, and imposes a distinctly idiomatic programming style on Rust. \CFA, with its more modest safety features, is significantly easier to port C code to, while maintaining the idiomatic style of the original source. 867 868 \section{Conclusion \& Future Work} 869 870 There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, and concurrent programming primitives. In addition to this work, there are some interesting future directions the polymorphism design could take. Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. \CFA polymorphic functions, by contrast, use an approach that is essentially dynamic virtual dispatch. The runtime overhead of this approach is low, but not as low as \CC template functions, and it may be beneficial to provide a mechanism for particularly performance-sensitive code to close this gap. Further research is needed, but two promising approaches are to allow an annotation on polymorphic function call sites that tells the translator to create a template-specialization of the function (provided the code is visible in the current translation unit) or placing an annotation on polymorphic function definitions that instantiates a version of the polymorphic function specialized to some set of types. These approaches are not mutually exclusive, and would allow these performance optimizations to be applied only where most useful to increase performance, without suffering the code bloat or loss of generality of a template expansion approach where it is unnecessary. 871 872 In conclusion, the authors' design for generic types and tuples, unlike those available in existing work, is both reusable and type-checked, while still supporting a full range of C features, including separately-compiled modules. We have experimentally validated the performance of our design against both \CC and standard C, showing it is \TODO{shiny, cap'n}. 1014 1015 \subsection{Polymorphism} 1016 1017 \CC is the most similar language to \CFA; 1018 both are extensions to C with source and runtime backwards compatibility. 1019 The fundamental difference is in their engineering approach to C compatibility and programmer expectation. 1020 While \CC provides good backwards compatibility with C, it has a steep learning curve for many of its extensions. 1021 For example, polymorphism is provided via three disjoint mechanisms: overloading, inheritance, and templates. 1022 The overloading is restricted because resolution does not using the return type, inheritance requires learning object-oriented programming and coping with a restricted nominal-inheritance hierarchy, templates cannot be separately compiled resulting in compilation/code bloat and poor error messages, and determining how these mechanisms interact and which to use is confusing. 1023 In contrast, \CFA has a single facility for polymorphic code supporting type-safe separate-compilation of polymorphic functions and generic (opaque) types, which uniformly leverage the C procedural paradigm. 1024 The key mechanism to support separate compilation is \CFA's \emph{explicit} use of assumed properties for a type. 1025 Until \CC~\citet{C++Concepts} are standardized (anticipated for \CCtwenty), \CC provides no way to specify the requirements of a generic function in code beyond compilation errors during template expansion; 1026 furthermore, \CC concepts are restricted to template polymorphism. 1027 1028 Cyclone~\citep{Grossman06} also provides capabilities for polymorphic functions and existential types, similar to \CFA's @forall@ functions and generic types. 1029 Cyclone existential types can include function pointers in a construct similar to a virtual function-table, but these pointers must be explicitly initialized at some point in the code, a tedious and potentially error-prone process. 1030 Furthermore, Cyclone's polymorphic functions and types are restricted to abstraction over types with the same layout and calling convention as @void *@, \ie only pointer types and @int@. 1031 In \CFA terms, all Cyclone polymorphism must be dtype-static. 1032 While the Cyclone design provides the efficiency benefits discussed in Section~\ref{sec:generic-apps} for dtype-static polymorphism, it is more restrictive than \CFA's general model. 1033 \citet{Smith98} present Polymorphic C, an ML dialect with polymorphic functions and C-like syntax and pointer types; it lacks many of C's features, however, most notably structure types, and so is not a practical C replacement. 1034 1035 \citet{obj-c-book} is an industrially successful extension to C. 1036 However, Objective-C is a radical departure from C, using an object-oriented model with message-passing. 1037 Objective-C did not support type-checked generics until recently \citet{xcode7}, historically using less-efficient runtime checking of object types. 1038 The~\citet{GObject} framework also adds object-oriented programming with runtime type-checking and reference-counting garbage-collection to C; 1039 these features are more intrusive additions than those provided by \CFA, in addition to the runtime overhead of reference-counting. 1040 \citet{Vala} compiles to GObject-based C, adding the burden of learning a separate language syntax to the aforementioned demerits of GObject as a modernization path for existing C code-bases. 1041 Java~\citep{Java8} included generic types in Java~5, which are type-checked at compilation and type-erased at runtime, similar to \CFA's. 1042 However, in Java, each object carries its own table of method pointers, while \CFA passes the method pointers separately to maintain a C-compatible layout. 1043 Java is also a garbage-collected, object-oriented language, with the associated resource usage and C-interoperability burdens. 1044 1045 D~\citep{D}, Go, and~\citet{Rust} are modern, compiled languages with abstraction features similar to \CFA traits, \emph{interfaces} in D and Go and \emph{traits} in Rust. 1046 However, each language represents a significant departure from C in terms of language model, and none has the same level of compatibility with C as \CFA. 1047 D and Go are garbage-collected languages, imposing the associated runtime overhead. 1048 The necessity of accounting for data transfer between managed runtimes and the unmanaged C runtime complicates foreign-function interfaces to C. 1049 Furthermore, while generic types and functions are available in Go, they are limited to a small fixed set provided by the compiler, with no language facility to define more. 1050 D restricts garbage collection to its own heap by default, while Rust is not garbage-collected, and thus has a lighter-weight runtime more interoperable with C. 1051 Rust also possesses much more powerful abstraction capabilities for writing generic code than Go. 1052 On the other hand, Rust's borrow-checker provides strong safety guarantees but is complex and difficult to learn and imposes a distinctly idiomatic programming style. 1053 \CFA, with its more modest safety features, allows direct ports of C code while maintaining the idiomatic style of the original source. 1054 1055 1056 \subsection{Tuples/Variadics} 1057 1058 Many programming languages have some form of tuple construct and/or variadic functions, \eg SETL, C, KW-C, \CC, D, Go, Java, ML, and Scala. 1059 SETL~\cite{SETL} is a high-level mathematical programming language, with tuples being one of the primary data types. 1060 Tuples in SETL allow subscripting, dynamic expansion, and multiple assignment. 1061 C provides variadic functions through @va_list@ objects, but the programmer is responsible for managing the number of arguments and their types, so the mechanism is type unsafe. 1062 KW-C~\cite{Buhr94a}, a predecessor of \CFA, introduced tuples to C as an extension of the C syntax, taking much of its inspiration from SETL. 1063 The main contributions of that work were adding MRVF, tuple mass and multiple assignment, and record-field access. 1064 \CCeleven introduced @std::tuple@ as a library variadic template structure. 1065 Tuples are a generalization of @std::pair@, in that they allow for arbitrary length, fixed-size aggregation of heterogeneous values. 1066 Operations include @std::get<N>@ to extract vales, @std::tie@ to create a tuple of references used for assignment, and lexicographic comparisons. 1067 \CCseventeen proposes \emph{structured bindings}~\cite{Sutter15} to eliminate pre-declaring variables and use of @std::tie@ for binding the results. 1068 This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must be documented with some other mechanism. 1069 Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables. 1070 Like \CC, D provides tuples through a library variadic-template structure. 1071 Go does not have tuples but supports MRVF. 1072 Java's variadic functions appear similar to C's but are type-safe using homogeneous arrays, which are less useful than \CFA's heterogeneously-typed variadic functions. 1073 Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML~\cite{sml} and~\cite{Scala}, which decompose tuples using pattern matching. 1074 1075 1076 \section{Conclusion and Future Work} 1077 1078 The goal of \CFA is to provide an evolutionary pathway for large C development-environments to be more productive and safer, while respecting the talent and skill of C programmers. 1079 While other programming languages purport to be a better C, they are in fact new and interesting languages in their own right, but not C extensions. 1080 The purpose of this paper is to introduce \CFA, and showcase two language features that illustrate the \CFA type-system and approaches taken to achieve the goal of evolutionary C extension. 1081 The contributions are a powerful type-system using parametric polymorphism and overloading, generic types, and tuples, which all have complex interactions. 1082 The work is a challenging design, engineering, and implementation exercise. 1083 On the surface, the project may appear as a rehash of similar mechanisms in \CC. 1084 However, every \CFA feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supporting separate compilation. 1085 All of these new features are being used by the \CFA development-team to build the \CFA runtime-system. 1086 Finally, we demonstrate that \CFA performance for some idiomatic cases is better than C and close to \CC, showing the design is practically applicable. 1087 1088 There is ongoing work on a wide range of \CFA feature extensions, including reference types, exceptions, concurrent primitives and modules. 1089 (While all examples in the paper compile and run, a public beta-release of \CFA will take another 8--12 months to finalize these additional extensions.) 1090 In addition, there are interesting future directions for the polymorphism design. 1091 Notably, \CC template functions trade compile time and code bloat for optimal runtime of individual instantiations of polymorphic functions. 1092 \CFA polymorphic functions use dynamic virtual-dispatch; 1093 the runtime overhead of this approach is low, but not as low as inlining, and it may be beneficial to provide a mechanism for performance-sensitive code. 1094 Two promising approaches are an @inline@ annotation at polymorphic function call sites to create a template-specialization of the function (provided the code is visible) or placing an @inline@ annotation on polymorphic function-definitions to instantiate a specialized version for some set of types. 1095 These approaches are not mutually exclusive and allow performance optimizations to be applied only when necessary, without suffering global code-bloat. 1096 In general, we believe separate compilation, producing smaller code, works well with loaded hardware-caches, which may offset the benefit of larger inlined-code. 1097 873 1098 874 1099 \begin{acks} 875 The authors would like to thank Magnus Madsen for valuable editorialfeedback.876 877 This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}\ andthe first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship.1100 The authors would like to recognize the design assistance of Glen Ditchfield, Richard Bilson, and Thierry Delisle on the features described in this paper, and thank Magnus Madsen and the three anonymous reviewers for valuable feedback. 1101 This work is supported in part by a corporate partnership with \grantsponsor{Huawei}{Huawei Ltd.}{http://www.huawei.com}, and Aaron Moss and Peter Buhr are funded by the \grantsponsor{Natural Sciences and Engineering Research Council} of Canada. 1102 % the first author's \grantsponsor{NSERC-PGS}{NSERC PGS D}{http://www.nserc-crsng.gc.ca/Students-Etudiants/PG-CS/BellandPostgrad-BelletSuperieures_eng.asp} scholarship. 878 1103 \end{acks} 1104 879 1105 880 1106 \bibliographystyle{ACM-Reference-Format} 881 1107 \bibliography{cfa} 1108 1109 1110 \appendix 1111 1112 \section{Benchmark Stack Implementation} 1113 \label{sec:BenchmarkStackImplementation} 1114 1115 \lstset{basicstyle=\linespread{0.9}\sf\small} 1116 1117 Throughout, @/***/@ designates a counted redundant type annotation. 1118 1119 \medskip\noindent 1120 \CFA 1121 \begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt] 1122 forall(otype T) struct stack_node { 1123 T value; 1124 stack_node(T) * next; 1125 }; 1126 forall(otype T) void ?{}(stack(T) * s) { (&s->head){ 0 }; } 1127 forall(otype T) void ?{}(stack(T) * s, stack(T) t) { 1128 stack_node(T) ** crnt = &s->head; 1129 for ( stack_node(T) * next = t.head; next; next = next->next ) { 1130 *crnt = ((stack_node(T) *)malloc()){ next->value }; /***/ 1131 stack_node(T) * acrnt = *crnt; 1132 crnt = &acrnt->next; 1133 } 1134 *crnt = 0; 1135 } 1136 forall(otype T) stack(T) ?=?(stack(T) * s, stack(T) t) { 1137 if ( s->head == t.head ) return *s; 1138 clear(s); 1139 s{ t }; 1140 return *s; 1141 } 1142 forall(otype T) void ^?{}(stack(T) * s) { clear(s); } 1143 forall(otype T) _Bool empty(const stack(T) * s) { return s->head == 0; } 1144 forall(otype T) void push(stack(T) * s, T value) { 1145 s->head = ((stack_node(T) *)malloc()){ value, s->head }; /***/ 1146 } 1147 forall(otype T) T pop(stack(T) * s) { 1148 stack_node(T) * n = s->head; 1149 s->head = n->next; 1150 T x = n->value; 1151 ^n{}; 1152 free(n); 1153 return x; 1154 } 1155 forall(otype T) void clear(stack(T) * s) { 1156 for ( stack_node(T) * next = s->head; next; ) { 1157 stack_node(T) * crnt = next; 1158 next = crnt->next; 1159 delete(crnt); 1160 } 1161 s->head = 0; 1162 } 1163 \end{lstlisting} 1164 1165 \medskip\noindent 1166 \CC 1167 \begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt] 1168 template<typename T> class stack { 1169 struct node { 1170 T value; 1171 node * next; 1172 node( const T & v, node * n = nullptr ) : value(v), next(n) {} 1173 }; 1174 node * head; 1175 void copy(const stack<T>& o) { 1176 node ** crnt = &head; 1177 for ( node * next = o.head;; next; next = next->next ) { 1178 *crnt = new node{ next->value }; /***/ 1179 crnt = &(*crnt)->next; 1180 } 1181 *crnt = nullptr; 1182 } 1183 public: 1184 stack() : head(nullptr) {} 1185 stack(const stack<T>& o) { copy(o); } 1186 stack(stack<T> && o) : head(o.head) { o.head = nullptr; } 1187 ~stack() { clear(); } 1188 stack & operator= (const stack<T>& o) { 1189 if ( this == &o ) return *this; 1190 clear(); 1191 copy(o); 1192 return *this; 1193 } 1194 stack & operator= (stack<T> && o) { 1195 if ( this == &o ) return *this; 1196 head = o.head; 1197 o.head = nullptr; 1198 return *this; 1199 } 1200 bool empty() const { return head == nullptr; } 1201 void push(const T & value) { head = new node{ value, head }; /***/ } 1202 T pop() { 1203 node * n = head; 1204 head = n->next; 1205 T x = std::move(n->value); 1206 delete n; 1207 return x; 1208 } 1209 void clear() { 1210 for ( node * next = head; next; ) { 1211 node * crnt = next; 1212 next = crnt->next; 1213 delete crnt; 1214 } 1215 head = nullptr; 1216 } 1217 }; 1218 \end{lstlisting} 1219 1220 \medskip\noindent 1221 C 1222 \begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt] 1223 struct stack_node { 1224 void * value; 1225 struct stack_node * next; 1226 }; 1227 struct stack new_stack() { return (struct stack){ NULL }; /***/ } 1228 void copy_stack(struct stack * s, const struct stack * t, void * (*copy)(const void *)) { 1229 struct stack_node ** crnt = &s->head; 1230 for ( struct stack_node * next = t->head; next; next = next->next ) { 1231 *crnt = malloc(sizeof(struct stack_node)); /***/ 1232 **crnt = (struct stack_node){ copy(next->value) }; /***/ 1233 crnt = &(*crnt)->next; 1234 } 1235 *crnt = 0; 1236 } 1237 _Bool stack_empty(const struct stack * s) { return s->head == NULL; } 1238 void push_stack(struct stack * s, void * value) { 1239 struct stack_node * n = malloc(sizeof(struct stack_node)); /***/ 1240 *n = (struct stack_node){ value, s->head }; /***/ 1241 s->head = n; 1242 } 1243 void * pop_stack(struct stack * s) { 1244 struct stack_node * n = s->head; 1245 s->head = n->next; 1246 void * x = n->value; 1247 free(n); 1248 return x; 1249 } 1250 void clear_stack(struct stack * s, void (*free_el)(void *)) { 1251 for ( struct stack_node * next = s->head; next; ) { 1252 struct stack_node * crnt = next; 1253 next = crnt->next; 1254 free_el(crnt->value); 1255 free(crnt); 1256 } 1257 s->head = NULL; 1258 } 1259 \end{lstlisting} 1260 1261 \medskip\noindent 1262 \CCV 1263 \begin{lstlisting}[xleftmargin=2\parindentlnth,aboveskip=0pt,belowskip=0pt] 1264 stack::node::node( const object & v, node * n ) : value( v.new_copy() ), next( n ) {} 1265 void stack::copy(const stack & o) { 1266 node ** crnt = &head; 1267 for ( node * next = o.head; next; next = next->next ) { 1268 *crnt = new node{ *next->value }; 1269 crnt = &(*crnt)->next; 1270 } 1271 *crnt = nullptr; 1272 } 1273 stack::stack() : head(nullptr) {} 1274 stack::stack(const stack & o) { copy(o); } 1275 stack::stack(stack && o) : head(o.head) { o.head = nullptr; } 1276 stack::~stack() { clear(); } 1277 stack & stack::operator= (const stack & o) { 1278 if ( this == &o ) return *this; 1279 clear(); 1280 copy(o); 1281 return *this; 1282 } 1283 stack & stack::operator= (stack && o) { 1284 if ( this == &o ) return *this; 1285 head = o.head; 1286 o.head = nullptr; 1287 return *this; 1288 } 1289 bool stack::empty() const { return head == nullptr; } 1290 void stack::push(const object & value) { head = new node{ value, head }; /***/ } 1291 ptr<object> stack::pop() { 1292 node * n = head; 1293 head = n->next; 1294 ptr<object> x = std::move(n->value); 1295 delete n; 1296 return x; 1297 } 1298 void stack::clear() { 1299 for ( node * next = head; next; ) { 1300 node * crnt = next; 1301 next = crnt->next; 1302 delete crnt; 1303 } 1304 head = nullptr; 1305 } 1306 \end{lstlisting} 1307 1308 1309 \begin{comment} 1310 1311 \subsubsection{bench.h} 1312 (\texttt{bench.hpp} is similar.) 1313 1314 \lstinputlisting{evaluation/bench.h} 1315 1316 \subsection{C} 1317 1318 \subsubsection{c-stack.h} ~ 1319 1320 \lstinputlisting{evaluation/c-stack.h} 1321 1322 \subsubsection{c-stack.c} ~ 1323 1324 \lstinputlisting{evaluation/c-stack.c} 1325 1326 \subsubsection{c-pair.h} ~ 1327 1328 \lstinputlisting{evaluation/c-pair.h} 1329 1330 \subsubsection{c-pair.c} ~ 1331 1332 \lstinputlisting{evaluation/c-pair.c} 1333 1334 \subsubsection{c-print.h} ~ 1335 1336 \lstinputlisting{evaluation/c-print.h} 1337 1338 \subsubsection{c-print.c} ~ 1339 1340 \lstinputlisting{evaluation/c-print.c} 1341 1342 \subsubsection{c-bench.c} ~ 1343 1344 \lstinputlisting{evaluation/c-bench.c} 1345 1346 \subsection{\CFA} 1347 1348 \subsubsection{cfa-stack.h} ~ 1349 1350 \lstinputlisting{evaluation/cfa-stack.h} 1351 1352 \subsubsection{cfa-stack.c} ~ 1353 1354 \lstinputlisting{evaluation/cfa-stack.c} 1355 1356 \subsubsection{cfa-print.h} ~ 1357 1358 \lstinputlisting{evaluation/cfa-print.h} 1359 1360 \subsubsection{cfa-print.c} ~ 1361 1362 \lstinputlisting{evaluation/cfa-print.c} 1363 1364 \subsubsection{cfa-bench.c} ~ 1365 1366 \lstinputlisting{evaluation/cfa-bench.c} 1367 1368 \subsection{\CC} 1369 1370 \subsubsection{cpp-stack.hpp} ~ 1371 1372 \lstinputlisting[language=c++]{evaluation/cpp-stack.hpp} 1373 1374 \subsubsection{cpp-print.hpp} ~ 1375 1376 \lstinputlisting[language=c++]{evaluation/cpp-print.hpp} 1377 1378 \subsubsection{cpp-bench.cpp} ~ 1379 1380 \lstinputlisting[language=c++]{evaluation/cpp-bench.cpp} 1381 1382 \subsection{\CCV} 1383 1384 \subsubsection{object.hpp} ~ 1385 1386 \lstinputlisting[language=c++]{evaluation/object.hpp} 1387 1388 \subsubsection{cpp-vstack.hpp} ~ 1389 1390 \lstinputlisting[language=c++]{evaluation/cpp-vstack.hpp} 1391 1392 \subsubsection{cpp-vstack.cpp} ~ 1393 1394 \lstinputlisting[language=c++]{evaluation/cpp-vstack.cpp} 1395 1396 \subsubsection{cpp-vprint.hpp} ~ 1397 1398 \lstinputlisting[language=c++]{evaluation/cpp-vprint.hpp} 1399 1400 \subsubsection{cpp-vbench.cpp} ~ 1401 1402 \lstinputlisting[language=c++]{evaluation/cpp-vbench.cpp} 1403 \end{comment} 882 1404 883 1405 \end{document} -
doc/proposals/concurrency/concurrency.tex
r221c2de7 r154fdc8 61 61 \newcommand{\uC}{$\mu$\CC} 62 62 \newcommand{\cit}{\textsuperscript{[Citation Needed]}\xspace} 63 \newcommand{\code}[1]{\lstinline {#1}}63 \newcommand{\code}[1]{\lstinline[language=CFA]{#1}} 64 64 \newcommand{\pseudo}[1]{\lstinline[language=Pseudo]{#1}} 65 65 … … 160 160 Here, the constructor(\code{?\{\}}) uses the \code{nomutex} keyword to signify that it does not acquire the monitor mutual exclusion when constructing. This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. The prefix increment operator uses \code{mutex} to protect the incrementing process from race conditions. Finally, there is a conversion operator from \code{counter_t} to \code{size_t}. This conversion may or may not require the \code{mutex} key word depending on whether or not reading an \code{size_t} is an atomic operation or not. 161 161 162 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without wualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. In the end, which solution should be picked is still up for debate. For the reminder of this proposal, the explicit approach is used for clarity.162 Having both \code{mutex} and \code{nomutex} keywords could be argued to be redundant based on the meaning of a routine having neither of these keywords. For example, given a routine without quualifiers \code{void foo(counter_t & this)} then one could argue that it should default to the safest option \code{mutex}. On the other hand, the option of having routine \code{void foo(counter_t & this)} mean \code{nomutex} is unsafe by default and may easily cause subtle errors. It can be argued that \code{nomutex} is the more "normal" behaviour, the \code{nomutex} keyword effectively stating explicitly that "this routine has nothing special". Another alternative is to make having exactly one of these keywords mandatory, which would provide the same semantics but without the ambiguity of supporting routine \code{void foo(counter_t & this)}. Mandatory keywords would also have the added benefice of being self-documented but at the cost of extra typing. In the end, which solution should be picked is still up for debate. For the reminder of this proposal, the explicit approach is used for clarity. 163 163 164 164 The next semantic decision is to establish when mutex/nomutex may be used as a type qualifier. Consider the following declarations: … … 350 350 351 351 \subsection{Internal scheduling} \label{insched} 352 Monitors also need to schedule waiting threads internally as a mean of synchronization. Internal scheduling is one of the simple examples of such a feature. It allows users to declare condition variables and have threads wait and signaled from them. Here is a simple example of such a technique : 353 354 \begin{lstlisting} 355 mutex struct A { 356 condition e; 357 } 358 359 void foo(A & mutex a) { 360 //... 361 wait(a.e); 362 //... 363 } 364 365 void bar(A & mutex a) { 366 signal(a.e); 367 } 368 \end{lstlisting} 369 370 Note that in \CFA, \code{condition} have no particular need to be stored inside a monitor, beyond any software engineering reasons. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. This semantic can easily be extended to multi-monitor calls by offering the same guarantee. 352 371 353 \begin{center} 372 354 \begin{tabular}{ c @{\hskip 0.65in} c } 373 Thread 1 & Thread 2 \\ 374 \begin{lstlisting} 375 void foo(monitor & mutex a, 376 monitor & mutex b) { 377 //... 378 wait(a.e); 379 //... 380 } 381 382 foo(a, b); 383 \end{lstlisting} &\begin{lstlisting} 384 void bar(monitor & mutex a, 385 monitor & mutex b) { 386 signal(a.e); 387 } 388 389 390 391 bar(a, b); 355 \begin{lstlisting}[language=Pseudo] 356 acquire A 357 wait A 358 release A 359 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 360 acquire A 361 signal A 362 release A 392 363 \end{lstlisting} 393 364 \end{tabular} 394 365 \end{center} 395 A direct extension of the single monitor semantics is to release all locks when waiting and transferring ownership of all locks when signalling. However, for the purpose of synchronization it may be usefull to only release some of the locks but keep others. It is possible to support internal scheduling and \gls{group-acquire} without any extra syntax by relying on order of acquisition. Here is an example of the different contexts in which internal scheduling can be used. (Note that here the use of helper routines is irrelevant, only routines acquire mutual exclusion have an impact on internal scheduling): 366 367 Easy : like uC++ 396 368 397 369 \begin{center} 398 \begin{tabular}{|c|c|c|} 399 Context 1 & Context 2 & Context 3 \\ 400 \hline 401 \begin{lstlisting} 402 condition e; 403 404 //acquire a & b 405 void foo(monitor & mutex a, 406 monitor & mutex b) { 407 408 wait(e); //release a & b 409 } 410 411 412 413 414 415 416 foo(a,b); 417 \end{lstlisting} &\begin{lstlisting} 418 condition e; 419 420 //acquire a 421 void bar(monitor & mutex a, 422 monitor & nomutex b) { 423 foo(a,b); 424 } 425 426 //acquire a & b 427 void foo(monitor & mutex a, 428 monitor & mutex b) { 429 wait(e); //release a & b 430 } 431 432 bar(a, b); 433 \end{lstlisting} &\begin{lstlisting} 434 condition e; 435 436 //acquire a 437 void bar(monitor & mutex a, 438 monitor & nomutex b) { 439 baz(a,b); 440 } 441 442 //acquire b 443 void baz(monitor & nomutex a, 444 monitor & mutex b) { 445 wait(e); //release b 446 } 447 448 bar(a, b); 370 \begin{tabular}{ c @{\hskip 0.65in} c } 371 \begin{lstlisting}[language=Pseudo] 372 acquire A 373 acquire B 374 wait B 375 release B 376 release A 377 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 378 acquire A 379 acquire B 380 signal B 381 release B 382 release A 449 383 \end{lstlisting} 450 384 \end{tabular} 451 385 \end{center} 452 386 453 Context 1 is the simplest way of acquiring more than one monitor (\gls{group-acquire}), using a routine with multiple parameters having the \code{mutex} keyword. Context 2 also uses \gls{group-acquire} as well in routine \code{foo}. However, the routine is called by routine \code{bar}, which only acquires monitor \code{a}. Since monitors can be acquired multiple times this does not cause a deadlock by itself but it does force the acquiring order to \code{a} then \code{b}. Context 3 also forces the acquiring order to be \code{a} then \code{b} but does not use \gls{group-acquire}. The previous example tries to illustrate the semantics that must be established to support releasing monitors in a \code{wait} statement. In all cases, the behavior of the wait statment is to release all the locks that were acquired my the inner-most monitor call. That is \code{a & b} in context 1 and 2 and \code{b} only in context 3. Here are a few other examples of this behavior. 454 387 Also easy : like uC++ 455 388 456 389 \begin{center} 457 \begin{tabular}{|c|c|c|} 458 \begin{lstlisting} 459 condition e; 460 461 //acquire b 462 void foo(monitor & nomutex a, 463 monitor & mutex b) { 464 bar(a,b); 465 } 466 467 //acquire a 468 void bar(monitor & mutex a, 469 monitor & nomutex b) { 470 471 wait(e); //release a 472 //keep b 473 } 474 475 foo(a, b); 476 \end{lstlisting} &\begin{lstlisting} 477 condition e; 478 479 //acquire a & b 480 void foo(monitor & mutex a, 481 monitor & mutex b) { 482 bar(a,b); 483 } 484 485 //acquire b 486 void bar(monitor & mutex a, 487 monitor & nomutex b) { 488 489 wait(e); //release b 490 //keep a 491 } 492 493 foo(a, b); 494 \end{lstlisting} &\begin{lstlisting} 495 condition e; 496 497 //acquire a & b 498 void foo(monitor & mutex a, 499 monitor & mutex b) { 500 bar(a,b); 501 } 502 503 //acquire none 504 void bar(monitor & nomutex a, 505 monitor & nomutex b) { 506 507 wait(e); //release a & b 508 //keep none 509 } 510 511 foo(a, b); 390 \begin{tabular}{ c @{\hskip 0.65in} c } 391 \begin{lstlisting}[language=Pseudo] 392 acquire A & B 393 wait A & B 394 release A & B 395 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 396 acquire A & B 397 signal A & B 398 release A & B 512 399 \end{lstlisting} 513 400 \end{tabular} 514 401 \end{center} 515 Note the right-most example is actually a trick pulled on the reader. Monitor state information is stored in thread local storage rather then in the routine context, which means that helper routines and other \code{nomutex} routines are invisible to the runtime system in regards to concurrency. This means that in the right-most example, the routine parameters are completly unnecessary. However, calling this routine from outside a valid monitor context is undefined. 516 517 These semantics imply that in order to release of subset of the monitors currently held, users must write (and name) a routine that only acquires the desired subset and simply calls wait. While users can use this method, \CFA offers the \code{wait_release}\footnote{Not sure if an overload of \code{wait} would work...} which will release only the specified monitors. In the center previous examples, the code in the center uses the \code{bar} routine to only release monitor \code{b}. Using the \code{wait_release} helper, this can be rewritten without having the name two routines : 402 403 Simplest extension : can be made like uC++ by tying B to A 404 518 405 \begin{center} 519 \begin{tabular}{ c c c } 520 \begin{lstlisting} 521 condition e; 522 523 //acquire a & b 524 void foo(monitor & mutex a, 525 monitor & mutex b) { 526 bar(a,b); 527 } 528 529 //acquire b 530 void bar(monitor & mutex a, 531 monitor & nomutex b) { 532 533 wait(e); //release b 534 //keep a 535 } 536 537 foo(a, b); 538 \end{lstlisting} &\begin{lstlisting} 539 => 540 \end{lstlisting} &\begin{lstlisting} 541 condition e; 542 543 //acquire a & b 544 void foo(monitor & mutex a, 545 monitor & mutex b) { 546 wait_release(e,b); //release b 547 //keep a 548 } 549 550 foo(a, b); 406 \begin{tabular}{ c @{\hskip 0.65in} c } 407 \begin{lstlisting}[language=Pseudo] 408 acquire A 409 // Code Section 1 410 acquire B 411 // Code Section 2 412 wait A & B 413 // Code Section 3 414 release B 415 // Code Section 4 416 release A 417 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 418 acquire A 419 // Code Section 5 420 acquire B 421 // Code Section 6 422 signal A & B 423 // Code Section 7 424 release B 425 // Code Section 8 426 release A 551 427 \end{lstlisting} 552 428 \end{tabular} 553 429 \end{center} 554 430 555 Regardless of the context in which the \code{wait} statement is used, \code{signal} must be called holding the same set of monitors. In all cases, signal only needs a single parameter, the condition variable that needs to be signalled. But \code{signal} needs to be called from the same monitor(s) that call to \code{wait}. Otherwise, mutual exclusion cannot be properly transferred back to the waiting monitor. 556 557 Finally, an additional semantic which can be very usefull is the \code{signal_block} routine. This routine behaves like signal for all of the semantics discussed above, but with the subtelty that mutual exclusion is transferred to the waiting task immediately rather than wating for the end of the critical section. 558 \\ 431 Hard extension : 432 433 Incorrect options for the signal : 434 435 \begin{description} 436 \item[-] Release B and baton pass after Code Section 8 : Passing b without having it 437 \item[-] Keep B during Code Section 8 : Can lead to deadlocks since we secretly keep a lock longer than specified by the user 438 \item[-] Instead of release B transfer A and B to waiter then try to reacquire A before running Code Section 8 : This allows barging 439 \end{description} 440 441 Since we don't want barging we need to pass A \& B and somehow block and get A back. 442 443 \begin{center} 444 \begin{tabular}{ c @{\hskip 0.65in} c } 445 \begin{lstlisting}[language=Pseudo] 446 acquire A 447 acquire B 448 acquire C 449 wait A & B & C 450 1: release C 451 2: release B 452 3: release A 453 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 454 acquire A 455 acquire B 456 acquire C 457 signal A & B & C 458 4: release C 459 5: release B 460 6: release A 461 \end{lstlisting} 462 \end{tabular} 463 \end{center} 464 465 To prevent barging : 466 467 \begin{description} 468 \item[-] When the signaller hits 4 : pass A, B, C to waiter 469 \item[-] When the waiter hits 2 : pass A, B to signaller 470 \item[-] When the signaller hits 5 : pass A to waiter 471 \end{description} 472 473 474 \begin{center} 475 \begin{tabular}{ c @{\hskip 0.65in} c } 476 \begin{lstlisting}[language=Pseudo] 477 acquire A 478 acquire C 479 acquire B 480 wait A & B & C 481 1: release B 482 2: release C 483 3: release A 484 \end{lstlisting}&\begin{lstlisting}[language=Pseudo] 485 acquire B 486 acquire A 487 acquire C 488 signal A & B & C 489 4: release C 490 5: release A 491 6: release B 492 \end{lstlisting} 493 \end{tabular} 494 \end{center} 495 496 To prevent barging : When the signaller hits 4 : pass A, B, C to waiter. When the waiter hits 1 it must release B, 497 498 \begin{description} 499 \item[-] 500 \item[-] When the waiter hits 1 : pass A, B to signaller 501 \item[-] When the signaller hits 5 : pass A, B to waiter 502 \item[-] When the waiter hits 2 : pass A to signaller 503 \end{description} 504 505 % Monitors also need to schedule waiting threads internally as a mean of synchronization. Internal scheduling is one of the simple examples of such a feature. It allows users to declare condition variables and have threads wait and signaled from them. Here is a simple example of such a technique : 506 507 % \begin{lstlisting} 508 % mutex struct A { 509 % condition e; 510 % } 511 512 % void foo(A & mutex a) { 513 % //... 514 % wait(a.e); 515 % //... 516 % } 517 518 % void bar(A & mutex a) { 519 % signal(a.e); 520 % } 521 % \end{lstlisting} 522 523 % Note that in \CFA, \code{condition} have no particular need to be stored inside a monitor, beyond any software engineering reasons. Here routine \code{foo} waits for the \code{signal} from \code{bar} before making further progress, effectively ensuring a basic ordering. 524 525 % As for simple mutual exclusion, these semantics must also be extended to include \gls{group-acquire} : 526 % \begin{center} 527 % \begin{tabular}{ c @{\hskip 0.65in} c } 528 % Thread 1 & Thread 2 \\ 529 % \begin{lstlisting} 530 % void foo(A & mutex a, 531 % A & mutex b) { 532 % //... 533 % wait(a.e); 534 % //... 535 % } 536 537 % foo(a, b); 538 % \end{lstlisting} &\begin{lstlisting} 539 % void bar(A & mutex a, 540 % A & mutex b) { 541 % signal(a.e); 542 % } 543 544 545 546 % bar(a, b); 547 % \end{lstlisting} 548 % \end{tabular} 549 % \end{center} 550 551 % To define the semantics of internal scheduling, it is important to look at nesting and \gls{group-acquire}. Indeed, beyond concerns about lock ordering, without scheduling the two following pseudo codes are mostly equivalent. In fact, if we assume monitors are ordered alphabetically, these two pseudo codes would probably lead to exactly the same implementation : 552 553 % \begin{table}[h!] 554 % \centering 555 % \begin{tabular}{c c} 556 % \begin{lstlisting}[language=pseudo] 557 % monitor A, B, C 558 559 % acquire A 560 % acquire B & C 561 562 % //Do stuff 563 564 % release B & C 565 % release A 566 % \end{lstlisting} &\begin{lstlisting}[language=pseudo] 567 % monitor A, B, C 568 569 % acquire A 570 % acquire B 571 % acquire C 572 % //Do stuff 573 % release C 574 % release B 575 % release A 576 % \end{lstlisting} 577 % \end{tabular} 578 % \end{table} 579 580 % Once internal scheduling is introduce however, semantics of \gls{group-acquire} become relevant. For example, let us look into the semantics of the following pseudo-code : 581 582 % \begin{lstlisting}[language=Pseudo] 583 % 1: monitor A, B, C 584 % 2: condition c1 585 % 3: 586 % 4: acquire A 587 % 5: acquire A & B & C 588 % 6: signal c1 589 % 7: release A & B & C 590 % 8: release A 591 % \end{lstlisting} 592 593 % Without \gls{group-acquire} signal simply baton passes the monitor lock on the next release. In the case above, we therefore need to indentify the next release. If line 8 is picked at the release point, then the signal will attempt to pass A \& B \& C, without having ownership of B \& C. Since this violates mutual exclusion, we conclude that line 7 is the only valid location where signalling can occur. The traditionnal meaning of signalling is to transfer ownership of the monitor(s) and immediately schedule the longest waiting task. However, in the discussed case, the signalling thread expects to maintain ownership of monitor A. This can be expressed in two differents ways : 1) the thread transfers ownership of all locks and reacquires A when it gets schedulled again or 2) it transfers ownership of all three monitors and then expects the ownership of A to be transferred back. 594 595 % However, the question is does these behavior motivate supporting acquireing non-disjoint set of monitors. Indeed, if the previous example was modified to only acquire B \& C at line 5 (an release the accordingly) then in respects to scheduling, we could add the simplifying constraint that all monitors in a bulk will behave the same way, simplifying the problem back to a single monitor problem which has already been solved. For this constraint to be acceptble however, we need to demonstrate that in does not prevent any meaningful possibilities. And, indeed, we can look at the two previous interpretation of the above pseudo-code and conclude that supporting the acquiring of non-disjoint set of monitors does not add any expressiveness to the language. 596 597 % Option 1 reacquires the lock after the signal statement, this can be rewritten as follows without the need for non-disjoint sets : 598 % \begin{lstlisting}[language=Pseudo] 599 % monitor A, B, C 600 % condition c1 601 602 % acquire A & B & C 603 % signal c1 604 % release A & B & C 605 % acquire A 606 607 % release A 608 % \end{lstlisting} 609 610 % This pseudo code has almost exaclty the same semantics as the code acquiring intersecting sets of monitors. 611 612 % Option 2 uses two-way lock ownership transferring instead of reacquiring monitor A. Two-way monitor ownership transfer is normally done using signalBlock semantics, which immedietely transfers ownership of a monitor before getting the ownership back when the other thread no longer needs the monitor. While the example pseudo-code for Option 2 seems toe transfer ownership of A, B and C and only getting A back, this is not a requirement. Getting back all 3 monitors and releasing B and C differs only in performance. For this reason, the second option could arguably be rewritten as : 613 614 % \begin{lstlisting}[language=Pseudo] 615 % monitor A, B, C 616 % condition c1 617 618 % acquire A 619 % acquire B & C 620 % signalBlock c1 621 % release B & C 622 % release A 623 % \end{lstlisting} 624 625 % Obviously, the difference between these two snippets of pseudo code is that the first one transfers ownership of A, B and C while the second one only transfers ownership of B and C. However, this limitation can be removed by allowing user to release extra monitors when using internal scheduling, referred to as extended internal scheduling (pattent pending) from this point on. Extended internal scheduling means the two following pseudo-codes are functionnaly equivalent : 626 % \begin{table}[h!] 627 % \centering 628 % \begin{tabular}{c @{\hskip 0.65in} c} 629 % \begin{lstlisting}[language=pseudo] 630 % monitor A, B, C 631 % condition c1 632 633 % acquire A 634 % acquire B & C 635 % signalBlock c1 with A 636 % release B & C 637 % release A 638 % \end{lstlisting} &\begin{lstlisting}[language=pseudo] 639 % monitor A, B, C 640 % condition c1 641 642 % acquire A 643 % acquire A & B & C 644 % signal c1 645 % release A & B & C 646 % release A 647 % \end{lstlisting} 648 % \end{tabular} 649 % \end{table} 650 651 % It must be stated that the extended internal scheduling only makes sense when using wait and signalBlock, since they need to prevent barging, which cannot be done in the context of signal since the ownership transfer is strictly one-directionnal. 652 653 % One critic that could arise is that extended internal schedulling is not composable since signalBlock must be explicitly aware of which context it is in. However, this argument is not relevant since acquire A, B and C in a context where a subset of them is already acquired cannot be achieved without spurriously releasing some locks or having an oracle aware of all monitors. Therefore, composability of internal scheduling is no more an issue than composability of monitors in general. 654 655 % The main benefit of using extended internal scheduling is that it offers the same expressiveness as intersecting monitor set acquiring but greatly simplifies the selection of a leader (or representative) for a group of monitor. Indeed, when using intersecting sets, it is not obvious which set intersects with other sets which means finding a leader representing only the smallest scope is a hard problem. Where as when using disjoint sets, any monitor that would be intersecting must be specified in the extended set, the leader can be chosen as any monitor in the primary set. 656 657 % We need to make sure the semantics for internally scheduling N monitors are a natural extension of the single monitor semantics. For this reason, we introduce the concept of \gls{mon-ctx}. In terms of context internal scheduling means "releasing a \gls{mon-ctx} and waiting for an other thread to acquire the same \gls{mon-ctx} and baton-pass it back to the initial thread". This definitions requires looking into what a \gls{mon-ctx} is and what the semantics of waiting and baton-passing are. 658 659 % \subsubsection{Internal scheduling: Context} \label{insched-context} 660 % Monitor scheduling operations are defined in terms of the context they are in. In languages that only supports operations on a single monitor at once, the context is completly defined by which most recently acquired monitors. Indeed, acquiring several monitors will form a stack of monitors which will be released in FILO order. In \CFA, a \gls{mon-ctx} cannot be simply defined by the last monitor that was acquired since \gls{group-acquire} means multiple monitors can be "the last monitor acquired". The \gls{mon-ctx} is therefore defined as the last set of monitors to have been acquired. This means taht when any new monitor is acquired, the group it belongs to is the new \gls{mon-ctx}. Correspondingly, if any monitor is released, the \gls{mon-ctx} reverts back to the context that was used prior to the monitor being acquired. In the most common case, \gls{group-acquire} means every monitor of a group will be acquired in released at the same time. However, since every monitor has its own recursion level, \gls{group-acquire} does not prevent users from reacquiring certain monitors while acquireing new monitors in the same operation. For example : 661 662 % \begin{lstlisting} 663 % //Forward declarations 664 % monitor a, b, c 665 % void foo( monitor & mutex a, 666 % monitor & mutex b); 667 % void bar( monitor & mutex a, 668 % monitor & mutex b); 669 % void baz( monitor & mutex a, 670 % monitor & mutex b, 671 % monitor & mutex c); 672 673 % //Routines defined inline to illustrate context changed compared to the stack 674 675 % //main thread 676 % foo(a, b) { 677 % //thread calls foo 678 % //acquiring context a & b 679 680 % baz(a, b) { 681 % //thread calls baz 682 % //no context change 683 684 % bar(a, b, c) { 685 % //thread calls bar 686 % //acquiring context a & b & c 687 688 % //Do stuff 689 690 % return; 691 % //call to bar returns 692 % } 693 % //context back to a & b 694 695 % return; 696 % //call to baz returns 697 % } 698 % //no context change 699 700 % return; 701 % //call to foo returns 702 % } 703 % //context back to initial state 704 705 % \end{lstlisting} 706 707 % As illustrated by the previous example, context changes can be caused by only one of the monitors comming into context or going out of context. 708 709 % \subsubsection{Internal scheduling: Waiting} \label{insched-wait} 710 711 % \subsubsection{Internal scheduling: Baton Passing} \label{insched-signal} 712 % Baton passing in internal scheduling is done in terms of \code{signal} and \code{signalBlock}\footnote{Arguably, \code{signal_now} is a more evocative name and \code{signal} could be changed appropriately. }. While \code{signalBlock} is the more straight forward way of baton passing, transferring ownership immediately, it must rely on \code{signal} which is why t is discussed first. 713 % \code{signal} has for effect to transfer the current context to another thread when the context would otherwise be released. This means that instead of releasing the concerned monitors, the first thread on the condition ready-queue is scheduled to run. The monitors are not released and when the signalled thread runs, it assumes it regained ownership of all the monitors it had in its context. 714 715 % \subsubsection{Internal scheduling: Implementation} \label{insched-impl} 716 % Too implement internal scheduling, three things are need : a data structure for waiting tasks, a data structure for signalled task and a leaving procedure to run the signalled task. In the case of both data structures, it is desireable to have to use intrusive data structures in order to prevent the need for any dynamic allocation. However, in both cases being able to queue several items in the same position in a queue is non trivial, even more so in the presence of concurrency. However, within a given \gls{mon-ctx}, all monitors have exactly the same behavior in regards to scheduling. Therefore, the problem of queuing multiple monitors at once can be ignored by choosing one monitor to represent every monitor in a context. While this could prove difficult in other situations, \gls{group-acquire} requires that the monitors be sorted according to some stable predicate. Since monitors are sorted in all contexts, the representative can simply be the first in the list. Choosing a representative means a simple intrusive queue inside the condition is sufficient to implement the data structure for both waiting and signalled monitors. 717 718 % Since \CFA monitors don't have a complete image of the \gls{mon-ctx}, choosing the representative and maintaning the current context information cannot easily be done by any single monitors. However, as discussed in section [Missing section here], monitor mutual exclusion is implemented using an raii object which is already in charge of sorting monitors. This object has a complete picture of the \gls{mon-ctx} which means it is well suited to choose the reprensentative and detect context changes. 719 720 % \newpage 721 % \begin{lstlisting} 722 % void ctor( monitor ** _monitors, int _count ) { 723 % bool ctx_changed = false; 724 % for( mon in _monitors ) { 725 % ctx_changed = acquire( mon ) || ctx_changed; 726 % } 727 728 % if( ctx_changed ) { 729 % set_representative(); 730 % set_context(); 731 % } 732 % } 733 734 % void dtor( monitor ** _monitors, int _count ) { 735 % if( context_will_exit( _monitors, count ) ) { 736 % baton_pass(); 737 % return; 738 % } 739 740 % for( mon in _monitors ) { 741 % release( mon ); 742 % } 743 % } 744 745 % \end{lstlisting} 746 747 748 749 % A direct extension of the single monitor semantics is to release all locks when waiting and transferring ownership of all locks when signalling. However, for the purpose of synchronization it may be usefull to only release some of the locks but keep others. It is possible to support internal scheduling and \gls{group-acquire} without any extra syntax by relying on order of acquisition. Here is an example of the different contexts in which internal scheduling can be used. (Note that here the use of helper routines is irrelevant, only routines acquire mutual exclusion have an impact on internal scheduling): 750 751 % \begin{table}[h!] 752 % \centering 753 % \begin{tabular}{|c|c|c|} 754 % Context 1 & Context 2 & Context 3 \\ 755 % \hline 756 % \begin{lstlisting} 757 % condition e; 758 759 % //acquire a & b 760 % void foo(monitor & mutex a, 761 % monitor & mutex b) { 762 763 % wait(e); //release a & b 764 % } 765 766 767 768 769 770 771 % foo(a,b); 772 % \end{lstlisting} &\begin{lstlisting} 773 % condition e; 774 775 % //acquire a 776 % void bar(monitor & mutex a, 777 % monitor & nomutex b) { 778 % foo(a,b); 779 % } 780 781 % //acquire a & b 782 % void foo(monitor & mutex a, 783 % monitor & mutex b) { 784 % wait(e); //release a & b 785 % } 786 787 % bar(a, b); 788 % \end{lstlisting} &\begin{lstlisting} 789 % condition e; 790 791 % //acquire a 792 % void bar(monitor & mutex a, 793 % monitor & nomutex b) { 794 % baz(a,b); 795 % } 796 797 % //acquire b 798 % void baz(monitor & nomutex a, 799 % monitor & mutex b) { 800 % wait(e); //release b 801 % } 802 803 % bar(a, b); 804 % \end{lstlisting} 805 % \end{tabular} 806 % \end{table} 807 808 % Context 1 is the simplest way of acquiring more than one monitor (\gls{group-acquire}), using a routine with multiple parameters having the \code{mutex} keyword. Context 2 also uses \gls{group-acquire} as well in routine \code{foo}. However, the routine is called by routine \code{bar}, which only acquires monitor \code{a}. Since monitors can be acquired multiple times this does not cause a deadlock by itself but it does force the acquiring order to \code{a} then \code{b}. Context 3 also forces the acquiring order to be \code{a} then \code{b} but does not use \gls{group-acquire}. The previous example tries to illustrate the semantics that must be established to support releasing monitors in a \code{wait} statement. In all cases, the behavior of the wait statment is to release all the locks that were acquired my the inner-most monitor call. That is \code{a & b} in context 1 and 2 and \code{b} only in context 3. Here are a few other examples of this behavior. 809 810 811 % \begin{center} 812 % \begin{tabular}{|c|c|c|} 813 % \begin{lstlisting} 814 % condition e; 815 816 % //acquire b 817 % void foo(monitor & nomutex a, 818 % monitor & mutex b) { 819 % bar(a,b); 820 % } 821 822 % //acquire a 823 % void bar(monitor & mutex a, 824 % monitor & nomutex b) { 825 826 % wait(e); //release a 827 % //keep b 828 % } 829 830 % foo(a, b); 831 % \end{lstlisting} &\begin{lstlisting} 832 % condition e; 833 834 % //acquire a & b 835 % void foo(monitor & mutex a, 836 % monitor & mutex b) { 837 % bar(a,b); 838 % } 839 840 % //acquire b 841 % void bar(monitor & mutex a, 842 % monitor & nomutex b) { 843 844 % wait(e); //release b 845 % //keep a 846 % } 847 848 % foo(a, b); 849 % \end{lstlisting} &\begin{lstlisting} 850 % condition e; 851 852 % //acquire a & b 853 % void foo(monitor & mutex a, 854 % monitor & mutex b) { 855 % bar(a,b); 856 % } 857 858 % //acquire none 859 % void bar(monitor & nomutex a, 860 % monitor & nomutex b) { 861 862 % wait(e); //release a & b 863 % //keep none 864 % } 865 866 % foo(a, b); 867 % \end{lstlisting} 868 % \end{tabular} 869 % \end{center} 870 % Note the right-most example is actually a trick pulled on the reader. Monitor state information is stored in thread local storage rather then in the routine context, which means that helper routines and other \code{nomutex} routines are invisible to the runtime system in regards to concurrency. This means that in the right-most example, the routine parameters are completly unnecessary. However, calling this routine from outside a valid monitor context is undefined. 871 872 % These semantics imply that in order to release of subset of the monitors currently held, users must write (and name) a routine that only acquires the desired subset and simply calls wait. While users can use this method, \CFA offers the \code{wait_release}\footnote{Not sure if an overload of \code{wait} would work...} which will release only the specified monitors. In the center previous examples, the code in the center uses the \code{bar} routine to only release monitor \code{b}. Using the \code{wait_release} helper, this can be rewritten without having the name two routines : 873 % \begin{center} 874 % \begin{tabular}{ c c c } 875 % \begin{lstlisting} 876 % condition e; 877 878 % //acquire a & b 879 % void foo(monitor & mutex a, 880 % monitor & mutex b) { 881 % bar(a,b); 882 % } 883 884 % //acquire b 885 % void bar(monitor & mutex a, 886 % monitor & nomutex b) { 887 888 % wait(e); //release b 889 % //keep a 890 % } 891 892 % foo(a, b); 893 % \end{lstlisting} &\begin{lstlisting} 894 % => 895 % \end{lstlisting} &\begin{lstlisting} 896 % condition e; 897 898 % //acquire a & b 899 % void foo(monitor & mutex a, 900 % monitor & mutex b) { 901 % wait_release(e,b); //release b 902 % //keep a 903 % } 904 905 % foo(a, b); 906 % \end{lstlisting} 907 % \end{tabular} 908 % \end{center} 909 910 % Regardless of the context in which the \code{wait} statement is used, \code{signal} must be called holding the same set of monitors. In all cases, signal only needs a single parameter, the condition variable that needs to be signalled. But \code{signal} needs to be called from the same monitor(s) that call to \code{wait}. Otherwise, mutual exclusion cannot be properly transferred back to the waiting monitor. 911 912 % Finally, an additional semantic which can be very usefull is the \code{signal_block} routine. This routine behaves like signal for all of the semantics discussed above, but with the subtelty that mutual exclusion is transferred to the waiting task immediately rather than wating for the end of the critical section. 913 % \\ 559 914 560 915 % ####### # # ####### ##### ##### # # ####### ###### -
doc/proposals/concurrency/glossary.tex
r221c2de7 r154fdc8 14 14 15 15 \longnewglossaryentry{group-acquire} 16 {name={bulk edacquiring}}16 {name={bulk acquiring}} 17 17 { 18 18 Implicitly acquiring several monitors when entering a monitor. 19 } 20 21 \longnewglossaryentry{mon-ctx} 22 {name={monitor context}} 23 { 24 The state of the current thread regarding which monitors are owned. 19 25 } 20 26 -
doc/proposals/concurrency/style.tex
r221c2de7 r154fdc8 1 1 \input{common} % bespoke macros used in the document 2 3 % \CFADefaultStyle 2 4 3 5 \lstset{ -
doc/proposals/concurrency/version
r221c2de7 r154fdc8 1 0.7. 611 0.7.141 -
doc/rob_thesis/cfa-format.tex
r221c2de7 r154fdc8 72 72 morecomment=[n]{/+}{+/}, 73 73 morecomment=[n][\color{blue}]{/++}{+/}, 74 % Options 75 sensitive=true 76 } 77 78 \lstdefinelanguage{rust}{ 79 % Keywords 80 morekeywords=[1]{ 81 abstract, alignof, as, become, box, 82 break, const, continue, crate, do, 83 else, enum, extern, false, final, 84 fn, for, if, impl, in, 85 let, loop, macro, match, mod, 86 move, mut, offsetof, override, priv, 87 proc, pub, pure, ref, return, 88 Self, self, sizeof, static, struct, 89 super, trait, true, type, typeof, 90 unsafe, unsized, use, virtual, where, 91 while, yield 92 }, 93 % Strings 94 morestring=[b]{"}, 95 % Comments 96 comment=[l]{//}, 97 morecomment=[s]{/*}{*/}, 74 98 % Options 75 99 sensitive=true … … 107 131 style=defaultStyle 108 132 } 109 \lstMakeShortInline[basewidth=0.5em,breaklines=true ]@ % single-character for \lstinline133 \lstMakeShortInline[basewidth=0.5em,breaklines=true,basicstyle=\normalsize\ttfamily\color{basicCol}]@ % single-character for \lstinline 110 134 111 135 \lstnewenvironment{cfacode}[1][]{ … … 155 179 \lstset{ 156 180 language = D, 181 style=defaultStyle, 182 #1 183 } 184 }{} 185 186 \lstnewenvironment{rustcode}[1][]{ 187 \lstset{ 188 language = rust, 157 189 style=defaultStyle, 158 190 #1 -
doc/rob_thesis/conclusions.tex
r221c2de7 r154fdc8 3 3 %====================================================================== 4 4 5 Conclusion paragraphs. 5 Adding resource management and tuples to \CFA has been a challenging design, engineering, and implementation exercise. 6 On the surface, the work may appear as a rehash of similar mechanisms in \CC. 7 However, every added feature is different than its \CC counterpart, often with extended functionality, better integration with C and its programmers, and always supports separate compilation. 8 All of these new features are being used by the \CFA development-team to build the \CFA runtime system. 9 10 \section{Constructors and Destructors} 11 \CFA supports the RAII idiom using constructors and destructors. 12 There are many engineering challenges in introducing constructors and destructors, partially since \CFA is not an object-oriented language. 13 By making use of managed types, \CFA programmers are afforded an extra layer of safety and ease of use in comparison to C programmers. 14 While constructors and destructors provide a sensible default behaviour, \CFA allows experienced programmers to declare unmanaged objects to take control of object management for performance reasons. 15 Constructors and destructors as named functions fit the \CFA polymorphism model perfectly, allowing polymorphic code to use managed types seamlessly. 16 17 \section{Tuples} 18 \CFA can express functions with multiple return values in a way that is simple, concise, and safe. 19 The addition of multiple-return-value functions naturally requires a way to use multiple return values, which begets tuple types. 20 Tuples provide two useful notions of assignment: multiple assignment, allowing simple, yet expressive assignment between multiple variables, and mass assignment, allowing a lossless assignment of a single value across multiple variables. 21 Tuples have a flexible structure that allows the \CFA type-system to decide how to restructure tuples, making it syntactically simple to pass tuples between functions. 22 Tuple types can be combined with polymorphism and tuple conversions can apply during assertion inference to produce a cohesive feel. 23 24 \section{Variadic Functions} 25 Type-safe variadic functions, with a similar feel to variadic templates, are added to \CFA. 26 The new variadic functions can express complicated recursive algorithms. 27 Unlike variadic templates, it is possible to write @new@ as a library routine and to separately compile @ttype@ polymorphic functions. 28 Variadic functions are statically type checked and provide a user experience that is consistent with that of tuples and polymorphic functions. 29 30 \section{Future Work} 31 \subsection{Constructors and Destructors} 32 Both \CC and Rust support move semantics, which expand the user's control of memory management by providing the ability to transfer ownership of large data, rather than forcing potentially expensive copy semantics. 33 \CFA currently does not support move semantics, partially due to the complexity of the model. 34 The design space is currently being explored with the goal of finding an alternative to move semantics that provides necessary performance benefits, while reducing the amount of repetition required to create a new type, along with the cognitive burden placed on the user. 35 36 % One technique being evaluated is whether named return-values can be used to eliminate unnecessary temporaries \cite{Buhr94a}. 37 % For example, 38 % \begin{cfacode} 39 % struct A { ... }; 40 % [A x] f(A x); 41 % [A y] g(A y); 42 % [A z] h(A z); 43 44 % struct A a1, a2; 45 % a2 = h(g(f(a1))); 46 % \end{cfacode} 47 % Here, since both @f@'s argument and return value have the same name and type, the compiler can infer that @f@ returns its argument. 48 % With this knowledge, the compiler can reuse the storage for the argument to @f@ as the argument to @g@. % TODO: cite Till thesis? 49 50 Exception handling is among the features expected to be added to \CFA in the near future. 51 For exception handling to properly interact with the rest of the language, it must ensure all RAII guarantees continue to be met. 52 That is, when an exception is raised, it must properly unwind the stack by calling the destructors for any objects that live between the raise and the handler. 53 This can be accomplished either by augmenting the translator to properly emit code that executes the destructors, or by switching destructors to hook into the GCC @cleanup@ attribute \cite[6.32.1]{GCCExtensions}. 54 55 The @cleanup@ attribute, which is attached to a variable declaration, takes a function name as an argument and schedules that routine to be executed when the variable goes out of scope. 56 \begin{cfacode} 57 struct S { int x; }; 58 void __dtor_S(struct S *); 59 { 60 __attribute__((cleanup(__dtor_S))) struct S s; 61 } // calls __dtor_S(&s) 62 \end{cfacode} 63 This mechanism is known and understood by GCC, so that the destructor is properly called in any situation where a variable goes out of scope, including function returns, branches, and built-in GCC exception handling mechanisms using libunwind. 64 65 A caveat of this approach is that the @cleanup@ attribute only permits a function that consumes a single argument of type @T *@ for a variable of type @T@. 66 This restriction means that any destructor that consumes multiple arguments (\eg, because it is polymorphic) or any destructor that is a function pointer (\eg, because it is an assertion parameter) must be called through a local thunk. 67 For example, 68 \begin{cfacode} 69 forall(otype T) 70 struct Box { 71 T x; 72 }; 73 forall(otype T) void ^?{}(Box(T) * x); // has implicit parameters 74 75 forall(otype T) 76 void f(T x) { 77 T y = x; // destructor is a function-pointer parameter 78 Box(T) z = { x }; // destructor has multiple parameters 79 } 80 \end{cfacode} 81 currently generates the following 82 \begin{cfacode} 83 void _dtor_BoxT( // consumes more than 1 parameter due to assertions 84 void (*_adapter_PTT)(void (*)(), void *, void *), 85 void (*_adapter_T_PTT)(void (*)(), void *, void *, void *), 86 long unsigned int _sizeof_T, 87 long unsigned int _alignof_T, 88 void *(*_assign_T_PTT)(void *, void *), 89 void (*_ctor_PT)(void *), 90 void (*_ctor_PTT)(void *, void *), 91 void (*_dtor_PT)(void *), 92 void *x 93 ); 94 95 void f( 96 void (*_adapter_PTT)(void (*)(), void *, void *), 97 void (*_adapter_T_PTT)(void (*)(), void *, void *, void *), 98 long unsigned int _sizeof_T, 99 long unsigned int _alignof_T, 100 void *(*_assign_TT)(void *, void *), 101 void (*_ctor_T)(void *), 102 void (*_ctor_TT)(void *, void *), 103 void (*_dtor_T)(void *), 104 void *x 105 ){ 106 void *y = __builtin_alloca(_sizeof_T); 107 // constructor call elided 108 109 // generic layout computation elided 110 long unsigned int _sizeof_BoxT = ...; 111 void *z = __builtin_alloca(_sizeof_BoxT); 112 // constructor call elided 113 114 _dtor_BoxT( // ^?{}(&z); -- _dtor_BoxT has > 1 arguments 115 _adapter_PTT, 116 _adapter_T_PTT, 117 _sizeof_T, 118 _alignof_T, 119 _assign_TT, 120 _ctor_T, 121 _ctor_TT, 122 _dtor_T, 123 z 124 ); 125 _dtor_T(y); // ^?{}(&y); -- _dtor_T is a function pointer 126 } 127 \end{cfacode} 128 Further to this point, every distinct array type will require a thunk for its destructor, where array destructor code is currently inlined, since array destructors hard code the length of the array. 129 130 For function call temporaries, new scopes have to be added for destructor ordering to remain consistent. 131 In particular, the translator currently destroys argument and return value temporary objects as soon as the statement they were created for ends. 132 In order for this behaviour to be maintained, new scopes have to be added around every statement that contains a function call. 133 Since a nested expression can raise an exception, care must be taken when destroying temporary objects. 134 One way to achieve this is to split statements at every function call, to provide the correct scoping to destroy objects as necessary. 135 For example, 136 \begin{cfacode} 137 struct S { ... }; 138 void ?{}(S *, S); 139 void ^?{}(S *); 140 141 S f(); 142 S g(S); 143 144 g(f()); 145 \end{cfacode} 146 would generate 147 \begin{cfacode} 148 struct S { ... }; 149 void _ctor_S(struct S *, struct S); 150 void _dtor_S(struct S *); 151 152 { 153 __attribute__((cleanup(_dtor_S))) struct S _tmp1 = f(); 154 __attribute__((cleanup(_dtor_S))) struct S _tmp2 = 155 (_ctor_S(&_tmp2, _tmp1), _tmp2); 156 __attribute__((cleanup(_dtor_S))) struct S _tmp3 = g(_tmp2); 157 } // destroy _tmp3, _tmp2, _tmp1 158 \end{cfacode} 159 Note that destructors must be registered after the temporary is fully initialized, since it is possible for initialization expressions to raise exceptions, and a destructor should never be called on an uninitialized object. 160 This requires a slightly strange looking initializer for constructor calls, where a comma expression is used to produce the value of the object being initialized, after the constructor call, conceptually bitwise copying the initialized data into itself. 161 Since this copy is wholly unnecessary, it is easily optimized away. 162 163 A second approach is to attach an accompanying boolean to every temporary that records whether the object contains valid data, and thus whether the value should be destructed. 164 \begin{cfacode} 165 struct S { ... }; 166 void _ctor_S(struct S *, struct S); 167 void _dtor_S(struct S *); 168 169 struct _tmp_bundle_S { 170 bool valid; 171 struct S value; 172 }; 173 174 void _dtor_tmpS(struct _tmp_bundle_S * ret) { 175 if (ret->valid) { 176 _dtor_S(&ret->value); 177 } 178 } 179 180 { 181 __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp1 = { 0 }; 182 __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp2 = { 0 }; 183 __attribute__((cleanup(_dtor_tmpS))) struct _tmp_bundle_S _tmp3 = { 0 }; 184 _tmp2.value = g( 185 (_ctor_S( 186 &_tmp2.value, 187 (_tmp1.value = f(), _tmp1.valid = 1, _tmp1.value) 188 ), _tmp2.valid = 1, _tmp2.value) 189 ), _tmp3.valid = 1, _tmp3.value; 190 } // destroy _tmp3, _tmp2, _tmp1 191 \end{cfacode} 192 In particular, the boolean is set immediately after argument construction and immediately after return value copy. 193 The boolean is checked as a part of the @cleanup@ routine, forwarding to the object's destructor if the object is valid. 194 One such type and @cleanup@ routine needs to be generated for every type used in a function parameter or return value. 195 196 The former approach generates much simpler code, however splitting expressions requires care to ensure that expression evaluation order does not change. 197 Expression ordering has to be performed by a full compiler, so it is possible that the latter approach would be more suited to the \CFA prototype, whereas the former approach is clearly the better option in a full compiler. 198 More investigation is needed to determine whether the translator's current design can easily handle proper expression ordering. 199 200 As discussed in Section \ref{s:implicit_copy_construction}, return values are destructed with a different @this@ pointer than they are constructed with. 201 This problem can be easily fixed once a full \CFA compiler is built, since it would have full control over the call/return mechanism. 202 In particular, since the callee is aware of where it needs to place the return value, it can construct the return value directly, rather than bitwise copy the internal data. 203 204 Currently, the special functions are always auto-generated, except for generic types where the type parameter does not have assertions for the corresponding operation. 205 For example, 206 \begin{cfacode} 207 forall(dtype T | sized(T) | { void ?{}(T *); }) 208 struct S { T x; }; 209 \end{cfacode} 210 only auto-generates the default constructor for @S@, since the member @x@ is missing the other 3 special functions. 211 Once deleted functions have been added, function generation can make use of this information to disable generation of special functions when a member has a deleted function. 212 For example, 213 \begin{cfacode} 214 struct A {}; 215 void ?{}(A *) = delete; 216 struct S { A x; }; // does not generate void ?{}(S *); 217 \end{cfacode} 218 219 Unmanaged objects and their interactions with the managed \CFA environment are an open problem that deserves greater attention. 220 In particular, the interactions between unmanaged objects and copy semantics are subtle and can easily lead to errors. 221 It is possible that the compiler should mark some of these situations as errors by default, and possibly conditionally emit warnings for some situations. 222 Another possibility is to construct, destruct, and assign unmanaged objects using the intrinsic and auto-generated functions. 223 A more thorough examination of the design space for this problem is required. 224 225 Currently, the \CFA translator does not support any warnings. 226 Ideally, the translator should support optional warnings in the case where it can detect that an object has been constructed twice. 227 For example, forwarding constructor calls are guaranteed to initialize the entire object, so redundant constructor calls can cause problems such as memory leaks, while looking innocuous to a novice user. 228 \begin{cfacode} 229 struct B { ... }; 230 struct A { 231 B x, y, z; 232 }; 233 void ?{}(A * a, B x) { 234 // y, z implicitly default constructed 235 (&a->x){ ... }; // explicitly construct x 236 } // constructs an entire A 237 void ?{}(A * a) { 238 (&a->y){}; // initialize y 239 a{ (B){ ... } }; // forwarding constructor call 240 // initializes entire object, including y 241 } 242 \end{cfacode} 243 244 Finally, while constructors provide a mechanism for establishing invariants, there is currently no mechanism for maintaining invariants without resorting to opaque types. 245 That is, structure fields can be accessed and modified by any block of code without restriction, so while it is possible to ensure that an object is initially set to a valid state, it is not possible to ensure that it remains in a consistent state throughout its lifetime. 246 A popular technique for ensuring consistency in object-oriented programming languages is to provide access modifiers such as @private@, which provides compile-time checks that only privileged code accesses private data. 247 This approach could be added to \CFA, but it requires an idiomatic way of specifying what code is privileged. 248 One possibility is to tie access control into an eventual module system. 249 250 \subsection{Tuples} 251 Named result values are planned, but not yet implemented. 252 This feature ties nicely into named tuples, as seen in D and Swift. 253 254 Currently, tuple flattening and structuring conversions are 0-cost. 255 This makes tuples conceptually very simple to work with, but easily causes unnecessary ambiguity in situations where the type system should be able to differentiate between alternatives. 256 Adding an appropriate cost function to tuple conversions will allow tuples to interact with the rest of the programming language more cohesively. 257 258 \subsection{Variadic Functions} 259 Use of @ttype@ functions currently relies heavily on recursion. 260 \CC has opened variadic templates up so that recursion is not strictly necessary in some cases, and it would be interesting to see if any such cases can be applied to \CFA. 261 262 \CC supports variadic templated data-types, making it possible to express arbitrary length tuples, arbitrary parameter function objects, and more with generic types. 263 Currently, \CFA does not support @ttype@-parameter generic-types, though there does not appear to be a technical reason that it cannot. 264 Notably, opening up support for this makes it possible to implement the exit form of scope guard (see section \ref{s:ResMgmt}), making it possible to call arbitrary functions at scope exit in idiomatic \CFA. -
doc/rob_thesis/ctordtor.tex
r221c2de7 r154fdc8 3 3 %====================================================================== 4 4 5 % TODO: discuss move semantics; they haven't been implemented, but could be. Currently looking at alternative models. (future work) 6 7 % TODO: as an experiment, implement Andrei Alexandrescu's ScopeGuard http://www.drdobbs.com/cpp/generic-change-the-way-you-write-excepti/184403758?pgno=2 5 % TODO now: as an experiment, implement Andrei Alexandrescu's ScopeGuard http://www.drdobbs.com/cpp/generic-change-the-way-you-write-excepti/184403758?pgno=2 8 6 % doesn't seem possible to do this without allowing ttype on generic structs? 9 7 10 % If a Cforall constructor is in scope, C style initialization is11 % disabled by default.12 % * initialization rule: if any constructor is in scope for type T, try13 % to find a matching constructor for the call. If there are no14 % constructors in scope for type T, then attempt to fall back on15 % C-style initialization.16 % + if this rule was not in place, it would be easy to accidentally17 % use C-style initialization in certain cases, which could lead to18 % subtle errors [2]19 % - this means we need special syntax if we want to allow users to force20 % a C-style initialization (to give users more control)21 % - two different declarations in the same scope can be implicitly22 % initialized differently. That is, there may be two objects of type23 % T that are initialized differently because there is a constructor24 % definition between them. This is not technically specific to25 % constructors.26 27 % C-style initializers can be accessed with @= syntax28 % + provides a way to get around the requirement of using a constructor29 % (for advanced programmers only)30 % - can break invariants in the type => unsafe31 % * provides a way of asserting that a variable is an instance of a32 % C struct (i.e. a POD struct), and so will not be implicitly33 % destructed (this can be useful at times, maybe mitigates the need34 % for move semantics?) [3]35 % + can modernize a code base one step at a time36 37 % Cforall constructors can be used in expressions to initialize any38 % piece of memory.39 % + malloc() { ... } calls the appropriate constructor on the newly40 % allocated space; the argument is moved into the constructor call41 % without taking its address [4]42 % - with the above form, there is still no way to ensure that43 % dynamically allocated objects are constructed. To resolve this,44 % we might want a stronger "new" function which always calls the45 % constructor, although how we accomplish that is currently still46 % unresolved (compiler magic vs. better variadic functions?)47 % + This can be used as a placement syntax [5]48 % - can call the constructor on an object more than once, which could49 % cause resource leaks and reinitialize const fields (can try to50 % detect and prevent this in some cases)51 % * compiler always tries to implicitly insert a ctor/dtor pair for52 % non-@= objects.53 % * For POD objects, this will resolve to an autogenerated or54 % intrinsic function.55 % * Intrinsic functions are not automatically called. Autogenerated56 % are, because they may call a non-autogenerated function.57 % * destructors are automatically inserted at appropriate branches58 % (e.g. return, break, continue, goto) and at the end of the block59 % in which they are declared.60 % * For @= objects, the compiler never tries to interfere and insert61 % constructor and destructor calls for that object. Copy constructor62 % calls do not count, because the object is not the target of the copy63 % constructor.64 65 % A constructor is declared with the name ?{}66 % + combines the look of C initializers with the precedent of ?() being67 % the name for the function call operator68 % + it is possible to easily search for all constructors in a project69 % and immediately know that a function is a constructor by seeing the70 % name "?{}"71 72 % A destructor is declared with the name ^?{}73 % + name mirrors a constructor's name, with an extra symbol to74 % distinguish it75 % - the symbol '~' cannot be used due to parsing conflicts with the76 % unary '~' (bitwise negation) operator - this conflict exists because77 % we want to allow users to write ^x{}; to destruct x, rather than78 % ^?{}(&x);79 80 % The first argument of a constructor must be a pointer. The constructed81 % type is the base type of the pointer. E.g. void ?{}(T *) is a default82 % constructor for a T.83 % + can name the argument whatever you like, so not constrained by84 % language keyword "this" or "self", etc.85 % - have to explicitly qualify all object members to initialize them86 % (e.g. this->x = 0, rather than just x = 0)87 88 % Destructors can take arguments other than just the destructed pointer89 % * open research problem: not sure how useful this is90 91 % Pointer constructors92 % + can construct separately compiled objects (opaque types) [6]93 % + orthogonal design, follows directly from the definition of the first94 % argument of a constructor95 % - may require copy constructor or move constructor (or equivalent)96 % for correct implementation, which may not be obvious to everyone97 % + required feature for the prelude to specify as much behavior as possible98 % (similar to pointer assignment operators in this respect)99 100 % Designations can only be used for C-style initialization101 % * designation for constructors is equivalent to designation for any102 % general function call. Since a function prototype can be redeclared103 % many times, with arguments named differently each time (or not at104 % all!), this is considered to be an undesirable feature. We could105 % construct some set of rules to allow this behaviour, but it is106 % probably more trouble than it's worth, and no matter what we choose,107 % it is not likely to be obvious to most people.108 109 % Constructing an anonymous member [7]110 % + same as with calling any other function on an anonymous member111 % (implicit conversion by the compiler)112 % - may be some cases where this is ambiguous => clarify with a cast113 % (try to design APIs to avoid sharing function signatures between114 % composed types to avoid this)115 116 % Default Constructors and Destructors are called implicitly117 % + cannot forget to construct or destruct an object118 % - requires special syntax to specify that an object is not to be119 % constructed (@=)120 % * an object will not be implicitly constructed OR destructed if121 % explicitly initialized like a C object (@= syntax)122 % * an object will be destructed if there are no constructors in scope123 % (even though it is initialized like a C object) [8]124 125 % An object which changes from POD type to non POD type will not change126 % the semantics of a type containing it by composition127 % * That is, constructors will not be regenerated at the point where128 % an object changes from POD type to non POD type, because this could129 % cause a cascade of constructors being regenerated for many other130 % types. Further, there is precedence for this behaviour in other131 % facets of Cforall's design, such as how nested functions interact.132 % * This behaviour can be simplified in a language without declaration133 % before use, because a type can be classified as POD or non POD134 % (rather than potentially changing between the two at some point) at135 % at the global scope (which is likely the most common case)136 % * [9]137 138 % Changes to polymorphic type classes139 % * dtype and ftype remain the same140 % * forall(otype T) is currently essentially the same as141 % forall(dtype T | { @size(T); void ?=?(T *, T); }).142 % The big addition is that you can declare an object of type T, rather143 % than just a pointer to an object of type T since you know the size,144 % and you can assign into a T.145 % * this definition is changed to add default constructor and146 % destructor declarations, to remain consistent with what type meant147 % before the introduction of constructors and destructors.148 % * that is, forall(type T) is now essentially the same as149 % forall(dtype T | { @size(T); void ?=?(T *, T);150 % void ?{}(T *); void ^?{}(T *); })151 % + this is required to make generic types work correctly in152 % polymorphic functions153 % ? since declaring a constructor invalidates the autogenerated154 % routines, it is possible for a type to have constructors, but155 % not default constructors. That is, it might be the case that156 % you want to write a polymorphic function for a type which has157 % a size, but non-default constructors? Some options:158 % * declaring a constructor as a part of the assertions list for159 % a type declaration invalidates the default, so160 % forall(otype T | { void ?{}(T *, int); })161 % really means162 % forall(dtype T | { @size(T); void ?=?(T *, T);163 % void ?{}(T *, int); void ^?{}(T *); })164 % * force users to fully declare the assertions list like the165 % above in this case (this seems very undesirable)166 % * add another type class with the current desugaring of type167 % (just size and assignment)168 % * provide some way of subtracting from an existing assertions169 % list (this might be useful to have in general)170 171 % Implementation issues:172 % Changes to prelude/autogen or built in defaults?173 % * pointer ctors/dtors [prelude]174 % * other pointer type routines are declared in the prelude, and this175 % doesn't seem like it should be any different176 % * basic type ctors/dtors [prelude]177 % * other basic type routines are declared in the prelude, and this178 % doesn't seem like it should be any different179 % ? aggregate types [undecided, but leaning towards autogenerate]180 % * prelude181 % * routines specific to aggregate types cannot be predeclared in182 % the prelude because we don't know the name of every183 % aggregate type in the entire program184 % * autogenerate185 % + default assignment operator is already autogenerated for186 % aggregate types187 % * this seems to lead us in the direction of autogenerating,188 % because we may have a struct which contains other objects189 % that require construction [10]. If we choose not to190 % autogenerate in this case, then objects which are part of191 % other objects by composition will not be constructed unless192 % a constructor for the outer type is explicitly defined193 % * in this case, we would always autogenerate the appropriate194 % constructor(s) for an aggregate type, but just like with195 % basic types, pointer types, and enum types, the constructor196 % call can be elided when when it is not necessary.197 % + constructors will have to be explicitly autogenerated198 % in the case where they are required for a polymorphic function,199 % when no user defined constructor is in scope, which may make it200 % easiest to always autogenerate all appropriate constructors201 % - n+2 constructors would have to be generated for a POD type202 % * one constructor for each number of valid arguments [0, n],203 % plus the copy constructor204 % * this is taking a simplified approach: in C, it is possible205 % to omit the enclosing braces in a declaration, which would206 % lead to a combinatorial explosion of generated constructors.207 % In the interest of keeping things tractable, Cforall may be208 % incompatible with C in this case. [11]209 % * for non-POD types, only autogenerate the default and copy210 % constructors211 % * alternative: generate only the default constructor and212 % special case initialization for any other constructor when213 % only the autogenerated one exists214 % - this is not very sensible, as by the previous point, these215 % constructors may be needed for polymorphic functions216 % anyway.217 % - must somehow distinguish in resolver between autogenerated and218 % user defined constructors (autogenerated should never be chosen219 % when a user defined option exists [check first parameter], even220 % if full signature differs) (this may also have applications221 % to other autogenerated routines?)222 % - this scheme does not naturally support designation (i.e. general223 % functions calls do not support designation), thus these cases224 % will have to be treated specially in either case225 % * defaults226 % * i.e. hardcode a new set of rules for some "appropriate" default227 % behaviour for228 % + when resolving an initialization expression, explicitly check to229 % see if any constructors are in scope. If yes, attempt to resolve230 % to a constructor, and produce an error message if a match is not231 % found. If there are no constructors in scope, resolve to232 % initializing each field individually (C-style)233 % + does not attempt to autogenerate constructors for POD types,234 % which can be seen as a space optimization for the program235 % binary236 % - as stated previously, a polymorphic routine may require these237 % autogenerated constructors, so this doesn't seem like a big win,238 % because this leads to more complicated logic and tracking of239 % which constructors have already been generated240 % - even though a constructor is not explicitly declared or used241 % polymorphically, we might still need one for all uses of a242 % struct (e.g. in the case of composition).243 % * the biggest tradeoff in autogenerating vs. defaulting appears to244 % be in where and how the special code to check if constructors are245 % present is handled. It appears that there are more reasons to246 % autogenerate than not.247 248 % --- examples249 % [1] As an example of using constructors polymorphically, consider a250 % slight modification on the foldl example I put on the mailing list a251 % few months ago:252 253 % context iterable(type collection, type element, type iterator) {254 % void ?{}(iterator *, collection); // used to be makeIterator, but can255 % // idiomatically use constructor256 % int hasNext(iterator);257 % iterator ++?(iterator *);258 % lvalue element *?(iterator);259 % };260 261 262 % forall(type collection, type element, type result, type iterator263 % | iterable(collection, element, iterator))264 % result foldl(collection c, result acc,265 % result (*reduce)(result, element)) {266 % iterator it = { c };267 % while (hasNext(it)) {268 % acc = reduce(acc, *it);269 % ++it;270 % }271 % return acc;272 % }273 274 % Now foldl makes use of the knowledge that the iterator type has a275 % single argument constructor which takes the collection to iterate276 % over. This pattern allows polymorphic code to look more natural277 % (constructors are generally preferred to named initializer/creation278 % routines, e.g. "makeIterator")279 280 % [2] An example of some potentially dangerous code that we don't want281 % to let easily slip through the cracks - if this is really what you282 % want, then use @= syntax for the second declaration to quiet the283 % compiler.284 285 % struct A { int x, y, z; }286 % ?{}(A *, int);287 % ?{}(A *, int, int, int);288 289 % A a1 = { 1 }; // uses ?{}(A *, int);290 % A a2 = { 2, 3 }; // C-style initialization -> no invariants!291 % A a3 = { 4, 5, 6 }; // uses ?{}(A *, int, int, int);292 293 % [3] Since @= syntax creates a C object (essentially a POD, as far as294 % the compiler is concerned), the object will not be destructed295 % implicitly when it leaves scope, nor will it be copy constructed when296 % it is returned. In this case, a memcpy should be equivalent to a move.297 298 % // Box.h299 % struct Box;300 % void ?{}(Box **, int};301 % void ^?{}(Box **);302 % Box * make_fortytwo();303 304 % // Box.cfa305 % Box * make_fortytwo() {306 % Box *b @= {};307 % (&b){ 42 }; // construct explicitly308 % return b; // no destruction, essentially a move?309 % }310 311 % [4] Cforall's typesafe malloc can be composed with constructor312 % expressions. It is possible for a user to define their own functions313 % similar to malloc and achieve the same effects (e.g. Aaron's example314 % of an arena allocator)315 316 % // CFA malloc317 % forall(type T)318 % T * malloc() { return (T *)malloc(sizeof(T)); }319 320 % struct A { int x, y, z; };321 % void ?{}(A *, int);322 323 % int foo(){324 % ...325 % // desugars to:326 % // A * a = ?{}(malloc(), 123);327 % A * a = malloc() { 123 };328 % ...329 % }330 331 % [5] Aaron's example of combining function calls with constructor332 % syntax to perform an operation similar to C++'s std::vector::emplace333 % (i.e. to construct a new element in place, without the need to334 % copy)335 336 % forall(type T)337 % struct vector {338 % T * elem;339 % int len;340 % ...341 % };342 343 % ...344 % forall(type T)345 % T * vector_new(vector(T) * v) {346 % // reallocate if needed347 % return &v->elem[len++];348 % }349 350 % int main() {351 % vector(int) * v = ...352 % vector_new(v){ 42 }; // add element to the end of vector353 % }354 355 % [6] Pointer Constructors. It could be useful to use the existing356 % constructor syntax even more uniformly for ADTs. With this, ADTs can357 % be initialized in the same manor as any other object in a polymorphic358 % function.359 360 % // vector.h361 % forall(type T) struct vector;362 % forall(type T) void ?{}(vector(T) **);363 % // adds an element to the end364 % forall(type T) vector(T) * ?+?(vector(T) *, T);365 366 % // vector.cfa367 % // don't want to expose the implementation to the user and/or don't368 % // want to recompile the entire program if the struct definition369 % // changes370 371 % forall(type T) struct vector {372 % T * elem;373 % int len;374 % int capacity;375 % };376 377 % forall(type T) void resize(vector(T) ** v) { ... }378 379 % forall(type T) void ?{}(vector(T) ** v) {380 % vector(T) * vect = *v = malloc();381 % vect->capacity = 10;382 % vect->len = 0;383 % vect->elem = malloc(vect->capacity);384 % }385 386 % forall(type T) vector(T) * ?+?(vector(T) *v, T elem) {387 % if (v->len == v->capacity) resize(&v);388 % v->elem[v->len++] = elem;389 % }390 391 % // main.cfa392 % #include "adt.h"393 % forall(type T | { T ?+?(T, int); }394 % T sumRange(int lower, int upper) {395 % T x; // default construct396 % for (int i = lower; i <= upper; i++) {397 % x = x + i;398 % }399 % return x;400 % }401 402 % int main() {403 % vector(int) * numbers = sumRange(1, 10);404 % // numbers is now a vector containing [1..10]405 406 % int sum = sumRange(1, 10);407 % // sum is now an int containing the value 55408 % }409 410 % [7] The current proposal is to use the plan 9 model of inheritance.411 % Under this model, all of the members of an unnamed struct instance412 % become members of the containing struct. In addition, an object413 % can be passed as an argument to a function expecting one of its414 % base structs.415 416 % struct Point {417 % double x;418 % double y;419 % };420 421 % struct ColoredPoint {422 % Point; // anonymous member (no identifier)423 % // => a ColoredPoint has an x and y of type double424 % int color;425 % };426 427 % ColoredPoint cp = ...;428 % cp.x = 10.3; // x from Point is accessed directly429 % cp.color = 0x33aaff; // color is accessed normally430 % foo(cp); // cp can be used directly as a Point431 432 % void ?{}(Point *p, double x, double y) {433 % p->x = x;434 % p->y = y;435 % }436 437 % void ?{}(ColoredPoint *cp, double x, double y, int color) {438 % (&cp){ x, y }; // unambiguous, no ?{}(ColoredPoint*,double,double)439 % cp->color = color;440 % }441 442 % struct Size {443 % double width;444 % double height;445 % };446 447 % void ?{}(Size *s, double w, double h) {448 % p->width = w;449 % p->height = h;450 % }451 452 % struct Foo {453 % Point;454 % Size;455 % }456 457 % ?{}(Foo &f, double x, double y, double w, double h) {458 % // (&F,x,y) is ambiguous => is it ?{}(Point*,double,double) or459 % // ?{}(Size*,double,double)? Solve with a cast:460 % ((Point*)&F){ x, y };461 % ((Size*)&F){ w, h };462 % }463 464 % [8] Destructors will be called on objects that were not constructed.465 466 % struct A { ... };467 % ^?{}(A *);468 % {469 % A x;470 % A y @= {};471 % } // x is destructed, even though it wasn't constructed472 % // y is not destructed, because it is explicitly a C object473 474 475 % [9] A type's constructor is generated at declaration time using476 % current information about an object's members. This is analogous to477 % the treatment of other operators. For example, an object's assignment478 % operator will not change to call the override of a member's assignment479 % operator unless the object's assignment is also explicitly overridden.480 % This problem can potentially be treated differently in Do, since each481 % compilation unit is passed over at least twice (once to gather482 % symbol information, once to generate code - this is necessary to483 % achieve the "No declarations" goal)484 485 % struct A { ... };486 % struct B { A x; };487 % ...488 % void ?{}(A *); // from this point on, A objects will be constructed489 % B b1; // b1 and b1.x are both NOT constructed, because B490 % // objects are not constructed491 % void ?{}(B *); // from this point on, B objects will be constructed492 % B b2; // b2 and b2.x are both constructed493 494 % struct C { A x; };495 % // implicit definition of ?{}(C*), because C is not a POD type since496 % // it contains a non-POD type by composition497 % C c; // c and c.x are both constructed498 499 % [10] Requiring construction by composition500 501 % struct A {502 % ...503 % };504 505 % // declared ctor disables default c-style initialization of506 % // A objects; A is no longer a POD type507 % void ?{}(A *);508 509 % struct B {510 % A x;511 % };512 513 % // B objects can not be C-style initialized, because A objects514 % // must be constructed => B objects are transitively not POD types515 % B b; // b.x must be constructed, but B is not constructible516 % // => must autogenerate ?{}(B *) after struct B definition,517 % // which calls ?{}(&b.x)518 519 % [11] Explosion in the number of generated constructors, due to strange520 % C semantics.521 522 % struct A { int x, y; };523 % struct B { A u, v, w; };524 525 % A a = { 0, 0 };526 527 % // in C, you are allowed to do this528 % B b1 = { 1, 2, 3, 4, 5, 6 };529 % B b2 = { 1, 2, 3 };530 % B b3 = { a, a, a };531 % B b4 = { a, 5, 4, a };532 % B b5 = { 1, 2, a, 3 };533 534 % // we want to disallow b1, b2, b4, and b5 in Cforall.535 % // In particular, we will autogenerate these constructors:536 % void ?{}(A *); // default/0 parameters537 % void ?{}(A *, int); // 1 parameter538 % void ?{}(A *, int, int); // 2 parameters539 % void ?{}(A *, const A *); // copy constructor540 541 % void ?{}(B *); // default/0 parameters542 % void ?{}(B *, A); // 1 parameter543 % void ?{}(B *, A, A); // 2 parameters544 % void ?{}(B *, A, A, A); // 3 parameters545 % void ?{}(B *, const B *); // copy constructor546 547 % // we will not generate constructors for every valid combination548 % // of members in C. For example, we will not generate549 % void ?{}(B *, int, int, int, int, int, int); // b1 would need this550 % void ?{}(B *, int, int, int); // b2 would need this551 % void ?{}(B *, A, int, int, A); // b4 would need this552 % void ?{}(B *, int, int, A, int); // b5 would need this553 % // and so on554 555 556 557 % TODO: talk somewhere about compound literals?558 559 8 Since \CFA is a true systems language, it does not provide a garbage collector. 560 As well, \CFA is not an object-oriented programming language, i.e.structures cannot have routine members.9 As well, \CFA is not an object-oriented programming language, \ie, structures cannot have routine members. 561 10 Nevertheless, one important goal is to reduce programming complexity and increase safety. 562 11 To that end, \CFA provides support for implicit pre/post-execution of routines for objects, via constructors and destructors. 563 12 564 % TODO: this is old. remove or refactor565 % Manual resource management is difficult.566 % Part of the difficulty results from not having any guarantees about the current state of an object.567 % Objects can be internally composed of pointers that may reference resources which may or may not need to be manually released, and keeping track of that state for each object can be difficult for the end user.568 569 % Constructors and destructors provide a mechanism to bookend the lifetime of an object, allowing the designer of a type to establish invariants for objects of that type.570 % Constructors guarantee that object initialization code is run before the object can be used, while destructors provide a mechanism that is guaranteed to be run immediately before an object's lifetime ends.571 % Constructors and destructors can help to simplify resource management when used in a disciplined way.572 % In particular, when all resources are acquired in a constructor, and all resources are released in a destructor, no resource leaks are possible.573 % This pattern is a popular idiom in several languages, such as \CC, known as RAII (Resource Acquisition Is Initialization).574 575 13 This chapter details the design of constructors and destructors in \CFA, along with their current implementation in the translator. 576 Generated code samples have been edited to provide comments for clarity and to save on space.14 Generated code samples have been edited for clarity and brevity. 577 15 578 16 \section{Design Criteria} … … 592 30 Next, @x@ is assigned the value of @y@. 593 31 In the last line, @z@ is implicitly initialized to 0 since it is marked @static@. 594 The key difference between assignment and initialization being that assignment occurs on a live object ( i.e.an object that contains data).32 The key difference between assignment and initialization being that assignment occurs on a live object (\ie, an object that contains data). 595 33 It is important to note that this means @x@ could have been used uninitialized prior to being assigned, while @y@ could not be used uninitialized. 596 Use of uninitialized variables yields undefined behaviour, which is a common source of errors in C programs. % TODO: *citation* 597 598 Declaration initialization is insufficient, because it permits uninitialized variables to exist and because it does not allow for the insertion of arbitrary code before the variable is live. 599 Many C compilers give good warnings most of the time, but they cannot in all cases. 600 \begin{cfacode} 601 int f(int *); // never reads the parameter, only writes 602 int g(int *); // reads the parameter - expects an initialized variable 34 Use of uninitialized variables yields undefined behaviour, which is a common source of errors in C programs. 35 36 Initialization of a declaration is strictly optional, permitting uninitialized variables to exist. 37 Furthermore, declaration initialization is limited to expressions, so there is no way to insert arbitrary code before a variable is live, without delaying the declaration. 38 Many C compilers give good warnings for uninitialized variables most of the time, but they cannot in all cases. 39 \begin{cfacode} 40 int f(int *); // output parameter: never reads, only writes 41 int g(int *); // input parameter: never writes, only reads, 42 // so requires initialized variable 603 43 604 44 int x, y; 605 45 f(&x); // okay - only writes to x 606 g(&y); // will usey uninitialized607 \end{cfacode} 608 Other languages are able to give errors in the case of uninitialized variable use, but due to backwards compatibility concerns, this cannot bethe case in \CFA.609 610 In C, constructors and destructors are often mimicked by providing routines that create and tear down objects, where the teardown function is typically only necessary if the type modifies the execution environment.46 g(&y); // uses y uninitialized 47 \end{cfacode} 48 Other languages are able to give errors in the case of uninitialized variable use, but due to backwards compatibility concerns, this is not the case in \CFA. 49 50 In C, constructors and destructors are often mimicked by providing routines that create and tear down objects, where the tear down function is typically only necessary if the type modifies the execution environment. 611 51 \begin{cfacode} 612 52 struct array_int { … … 614 54 }; 615 55 struct array_int create_array(int sz) { 616 return (struct array_int) { malloc(sizeof(int)*sz) };56 return (struct array_int) { calloc(sizeof(int)*sz) }; 617 57 } 618 58 void destroy_rh(struct resource_holder * rh) { … … 634 74 Furthermore, even with this idiom it is easy to make mistakes, such as forgetting to destroy an object or destroying it multiple times. 635 75 636 A constructor provides a way of ensuring that the necessary aspects of object initialization is performed, from setting up invariants to providing compile- time checks for appropriate initialization parameters.76 A constructor provides a way of ensuring that the necessary aspects of object initialization is performed, from setting up invariants to providing compile- and run-time checks for appropriate initialization parameters. 637 77 This goal is achieved through a guarantee that a constructor is called implicitly after every object is allocated from a type with associated constructors, as part of an object's definition. 638 78 Since a constructor is called on every object of a managed type, it is impossible to forget to initialize such objects, as long as all constructors perform some sensible form of initialization. 639 79 640 80 In \CFA, a constructor is a function with the name @?{}@. 81 Like other operators in \CFA, the name represents the syntax used to call the constructor, \eg, @struct S = { ... };@. 641 82 Every constructor must have a return type of @void@ and at least one parameter, the first of which is colloquially referred to as the \emph{this} parameter, as in many object-oriented programming-languages (however, a programmer can give it an arbitrary name). 642 83 The @this@ parameter must have a pointer type, whose base type is the type of object that the function constructs. … … 655 96 656 97 In C, if the user creates an @Array@ object, the fields @data@ and @len@ are uninitialized, unless an explicit initializer list is present. 657 It is the user's responsibility to remember to initialize both of the fields to sensible values .98 It is the user's responsibility to remember to initialize both of the fields to sensible values, since there are no implicit checks for invalid values or reasonable defaults. 658 99 In \CFA, the user can define a constructor to handle initialization of @Array@ objects. 659 100 … … 671 112 This constructor initializes @x@ so that its @length@ field has the value 10, and its @data@ field holds a pointer to a block of memory large enough to hold 10 @int@s, and sets the value of each element of the array to 0. 672 113 This particular form of constructor is called the \emph{default constructor}, because it is called on an object defined without an initializer. 673 In other words, a default constructor is a constructor that takes a single argument ,the @this@ parameter.674 675 In \CFA, a destructor is a function much like a constructor, except that its name is \lstinline!^?{}! .676 A destructor for the @Array@ type can be defined as such.114 In other words, a default constructor is a constructor that takes a single argument: the @this@ parameter. 115 116 In \CFA, a destructor is a function much like a constructor, except that its name is \lstinline!^?{}! and it takes only one argument. 117 A destructor for the @Array@ type can be defined as: 677 118 \begin{cfacode} 678 119 void ^?{}(Array * arr) { … … 680 121 } 681 122 \end{cfacode} 682 Since the destructor is automatically called at deallocation for all objects of type @Array@, the memory associated with an @Array@ is automatically freed when the object's lifetime ends. 123 The destructor is automatically called at deallocation for all objects of type @Array@. 124 Hence, the memory associated with an @Array@ is automatically freed when the object's lifetime ends. 683 125 The exact guarantees made by \CFA with respect to the calling of destructors are discussed in section \ref{sub:implicit_dtor}. 684 126 … … 691 133 \end{cfacode} 692 134 By the previous definition of the default constructor for @Array@, @x@ and @y@ are initialized to valid arrays of length 10 after their respective definitions. 693 On line 3, @z@ is initialized with the value of @x@, while on line @4@, @y@ is assigned the value of @x@.135 On line 2, @z@ is initialized with the value of @x@, while on line 3, @y@ is assigned the value of @x@. 694 136 The key distinction between initialization and assignment is that a value to be initialized does not hold any meaningful values, whereas an object to be assigned might. 695 137 In particular, these cases cannot be handled the same way because in the former case @z@ does not currently own an array, while @y@ does. … … 712 154 The first function is called a \emph{copy constructor}, because it constructs its argument by copying the values from another object of the same type. 713 155 The second function is the standard copy-assignment operator. 714 The se four functions are special in that they control the state of most objects.156 The four functions (default constructor, destructor, copy constructor, and assignment operator) are special in that they safely control the state of most objects. 715 157 716 158 It is possible to define a constructor that takes any combination of parameters to provide additional initialization options. 717 For example, a reasonable extension to the array type would be a constructor that allocates the array to a given initial capacity and initializes the array to a given @fill@ value.159 For example, a reasonable extension to the array type would be a constructor that allocates the array to a given initial capacity and initializes the elements of the array to a given @fill@ value. 718 160 \begin{cfacode} 719 161 void ?{}(Array * arr, int capacity, int fill) { … … 725 167 } 726 168 \end{cfacode} 169 727 170 In \CFA, constructors are called implicitly in initialization contexts. 728 171 \begin{cfacode} 729 172 Array x, y = { 20, 0xdeadbeef }, z = y; 730 173 \end{cfacode} 731 In \CFA, constructor calls look just like C initializers, which allows them to be inserted into legacy C code with minimal code changes, and also provides a very simple syntax that veteran C programmers are familiar with.732 One downside of reusing C initialization syntax is that it is n't possible to determine whether an object is constructed just by looking at its declaration, since that requires knowledge of whether the type is managed at that point.174 Constructor calls look just like C initializers, which allows them to be inserted into legacy C code with minimal code changes, and also provides a very simple syntax that veteran C programmers are familiar with. 175 One downside of reusing C initialization syntax is that it is not possible to determine whether an object is constructed just by looking at its declaration, since that requires knowledge of whether the type is managed at that point in the program. 733 176 734 177 This example generates the following code … … 748 191 Destructors are implicitly called in reverse declaration-order so that objects with dependencies are destructed before the objects they are dependent on. 749 192 750 \subsection{ Syntax}751 \label{sub:syntax} % TODO: finish this section193 \subsection{Calling Syntax} 194 \label{sub:syntax} 752 195 There are several ways to construct an object in \CFA. 753 196 As previously introduced, every variable is automatically constructed at its definition, which is the most natural way to construct an object. … … 773 216 A * y = malloc(); // copy construct: ?{}(&y, malloc()) 774 217 775 ?{}(&x); // explicit construct x 776 ?{}(y, x); // explit construct y from x 777 ^?{}(&x); // explicit destroy x 218 ?{}(&x); // explicit construct x, second construction 219 ?{}(y, x); // explit construct y from x, second construction 220 ^?{}(&x); // explicit destroy x, in different order 778 221 ^?{}(y); // explicit destroy y 779 222 … … 781 224 // implicit ^?{}(&x); 782 225 \end{cfacode} 783 Calling a constructor or destructor directly is a flexible feature that allows complete control over the management of a piece ofstorage.226 Calling a constructor or destructor directly is a flexible feature that allows complete control over the management of storage. 784 227 In particular, constructors double as a placement syntax. 785 228 \begin{cfacode} … … 803 246 \end{cfacode} 804 247 Finally, constructors and destructors support \emph{operator syntax}. 805 Like other operators in \CFA, the function name mirrors the use-case, in that the first $N$ arguments fill in the place of the question mark. 248 Like other operators in \CFA, the function name mirrors the use-case, in that the question marks are placeholders for the first $N$ arguments. 249 This syntactic form is similar to the new initialization syntax in \CCeleven, except that it is used in expression contexts, rather than declaration contexts. 806 250 \begin{cfacode} 807 251 struct A { ... }; … … 822 266 Destructor operator syntax is actually an statement, and requires parentheses for symmetry with constructor syntax. 823 267 268 One of these three syntactic forms should appeal to either C or \CC programmers using \CFA. 269 270 \subsection{Constructor Expressions} 271 In \CFA, it is possible to use a constructor as an expression. 272 Like other operators, the function name @?{}@ matches its operator syntax. 273 For example, @(&x){}@ calls the default constructor on the variable @x@, and produces @&x@ as a result. 274 A key example for this capability is the use of constructor expressions to initialize the result of a call to @malloc@. 275 \begin{cfacode} 276 struct X { ... }; 277 void ?{}(X *, double); 278 X * x = malloc(){ 1.5 }; 279 \end{cfacode} 280 In this example, @malloc@ dynamically allocates storage and initializes it using a constructor, all before assigning it into the variable @x@. 281 If this extension is not present, constructing dynamically allocated objects is much more cumbersome, requiring separate initialization of the pointer and initialization of the pointed-to memory. 282 \begin{cfacode} 283 X * x = malloc(); 284 x{ 1.5 }; 285 \end{cfacode} 286 Not only is this verbose, but it is also more error prone, since this form allows maintenance code to easily sneak in between the initialization of @x@ and the initialization of the memory that @x@ points to. 287 This feature is implemented via a transformation producing the value of the first argument of the constructor, since constructors do not themselves have a return value. 288 Since this transformation results in two instances of the subexpression, care is taken to allocate a temporary variable to hold the result of the subexpression in the case where the subexpression may contain side effects. 289 The previous example generates the following code. 290 \begin{cfacode} 291 struct X *_tmp_ctor; 292 struct X *x = ?{}( // construct result of malloc 293 _tmp_ctor=malloc_T( // store result of malloc 294 sizeof(struct X), 295 _Alignof(struct X) 296 ), 297 1.5 298 ), _tmp_ctor; // produce constructed result of malloc 299 \end{cfacode} 300 It should be noted that this technique is not exclusive to @malloc@, and allows a user to write a custom allocator that can be idiomatically used in much the same way as a constructed @malloc@ call. 301 302 It should be noted that while it is possible to use operator syntax with destructors, destructors invalidate their argument, thus operator syntax with destructors is a statement and does not produce a value. 303 824 304 \subsection{Function Generation} 825 In \CFA, every type is defined to have the core set of four functions described previously.305 In \CFA, every type is defined to have the core set of four special functions described previously. 826 306 Having these functions exist for every type greatly simplifies the semantics of the language, since most operations can simply be defined directly in terms of function calls. 827 307 In addition to simplifying the definition of the language, it also simplifies the analysis that the translator must perform. … … 833 313 There are several options for user-defined types: structures, unions, and enumerations. 834 314 To aid in ease of use, the standard set of four functions is automatically generated for a user-defined type after its definition is completed. 835 By auto-generating these functions, it is ensured that legacy C code will continueto work correctly in every context where \CFA expects these functions to exist, since they are generated for every complete type.315 By auto-generating these functions, it is ensured that legacy C code continues to work correctly in every context where \CFA expects these functions to exist, since they are generated for every complete type. 836 316 837 317 The generated functions for enumerations are the simplest. 838 Since enumerations in C are essentially just another integral type, the generated functions behave in the same way that the builtin functions for the basic types work. 839 % TODO: examples for enums 318 Since enumerations in C are essentially just another integral type, the generated functions behave in the same way that the built-in functions for the basic types work. 840 319 For example, given the enumeration 841 320 \begin{cfacode} … … 850 329 } 851 330 void ?{}(enum Colour *_dst, enum Colour _src){ 852 (*_dst)=_src; // bitwise copy331 *_dst=_src; // bitwise copy 853 332 } 854 333 void ^?{}(enum Colour *_dst){ … … 856 335 } 857 336 enum Colour ?=?(enum Colour *_dst, enum Colour _src){ 858 return (*_dst)=_src; // bitwise copy337 return *_dst=_src; // bitwise copy 859 338 } 860 339 \end{cfacode} 861 340 In the future, \CFA will introduce strongly-typed enumerations, like those in \CC. 862 The existing generated routines will be sufficient to express this restriction, since they are currently set up to take in values of that enumeration type.341 The existing generated routines are sufficient to express this restriction, since they are currently set up to take in values of that enumeration type. 863 342 Changes related to this feature only need to affect the expression resolution phase, where more strict rules will be applied to prevent implicit conversions from integral types to enumeration types, but should continue to permit conversions from enumeration types to @int@. 864 In this way, it will still be possible to add an @int@ to an enumeration, but the resulting value will be an @int@, meaning that it won't be possible to reassign the value into an enumeration without a cast.343 In this way, it is still possible to add an @int@ to an enumeration, but the resulting value is an @int@, meaning it cannot be reassigned to an enumeration without a cast. 865 344 866 345 For structures, the situation is more complicated. 867 Fora structure @S@ with members @M$_0$@, @M$_1$@, ... @M$_{N-1}$@, each function @f@ in the standard set calls \lstinline{f(s->M$_i$, ...)} for each @$i$@.868 That is, a default constructor for @S@ default constructs the members of @S@, the copy constructor with copy constructthem, and so on.869 For example given the structdefinition346 Given a structure @S@ with members @M$_0$@, @M$_1$@, ... @M$_{N-1}$@, each function @f@ in the standard set calls \lstinline{f(s->M$_i$, ...)} for each @$i$@. 347 That is, a default constructor for @S@ default constructs the members of @S@, the copy constructor copy constructs them, and so on. 348 For example, given the structure definition 870 349 \begin{cfacode} 871 350 struct A { … … 893 372 } 894 373 \end{cfacode} 895 It is important to note that the destructors are called in reverse declaration order to resolveconflicts in the event there are dependencies among members.374 It is important to note that the destructors are called in reverse declaration order to prevent conflicts in the event there are dependencies among members. 896 375 897 376 In addition to the standard set, a set of \emph{field constructors} is also generated for structures. 898 The field constructors are constructors that consume a prefix of the struct 's memberlist.377 The field constructors are constructors that consume a prefix of the structure's member-list. 899 378 That is, $N$ constructors are built of the form @void ?{}(S *, T$_{\text{M}_0}$)@, @void ?{}(S *, T$_{\text{M}_0}$, T$_{\text{M}_1}$)@, ..., @void ?{}(S *, T$_{\text{M}_0}$, T$_{\text{M}_1}$, ..., T$_{\text{M}_{N-1}}$)@, where members are copy constructed if they have a corresponding positional argument and are default constructed otherwise. 900 The addition of field constructors allows struct s in \CFA to be used naturally in the same ways that they could be used in C (i.e. to initialize any prefix of the struct), e.g., @A a0 = { b }, a1 = { b, c }@.379 The addition of field constructors allows structures in \CFA to be used naturally in the same ways as used in C (\ie, to initialize any prefix of the structure), \eg, @A a0 = { b }, a1 = { b, c }@. 901 380 Extending the previous example, the following constructors are implicitly generated for @A@. 902 381 \begin{cfacode} … … 911 390 \end{cfacode} 912 391 913 For unions, the default constructor and destructor do nothing, as it is not obvious which member if anyshould be constructed.392 For unions, the default constructor and destructor do nothing, as it is not obvious which member, if any, should be constructed. 914 393 For copy constructor and assignment operations, a bitwise @memcpy@ is applied. 915 394 In standard C, a union can also be initialized using a value of the same type as its first member, and so a corresponding field constructor is generated to perform a bitwise @memcpy@ of the object. 916 An alter antive to this design is to always construct and destruct the first member of a union, to match with the C semantics of initializing the first member of the union.395 An alternative to this design is to always construct and destruct the first member of a union, to match with the C semantics of initializing the first member of the union. 917 396 This approach ultimately feels subtle and unsafe. 918 397 Another option is to, like \CC, disallow unions from containing members that are themselves managed types. … … 947 426 948 427 % This feature works in the \CFA model, since constructors are simply special functions and can be called explicitly, unlike in \CC. % this sentence isn't really true => placement new 949 In \CCeleven, this restriction has been loosened to allow unions with managed members, with the caveat that anyif there are any members with a user-defined operation, then that operation is not implicitly defined, forcing the user to define the operation if necessary.428 In \CCeleven, unions may have managed members, with the caveat that if there are any members with a user-defined operation, then that operation is not implicitly defined, forcing the user to define the operation if necessary. 950 429 This restriction could easily be added into \CFA once \emph{deleted} functions are added. 951 430 952 431 \subsection{Using Constructors and Destructors} 953 Implicitly generated constructor and destructor calls ignore the outermost type qualifiers, e.g.@const@ and @volatile@, on a type by way of a cast on the first argument to the function.432 Implicitly generated constructor and destructor calls ignore the outermost type qualifiers, \eg @const@ and @volatile@, on a type by way of a cast on the first argument to the function. 954 433 For example, 955 434 \begin{cfacode} … … 970 449 Here, @&s@ and @&s2@ are cast to unqualified pointer types. 971 450 This mechanism allows the same constructors and destructors to be used for qualified objects as for unqualified objects. 972 Since this applies only to implicitly generated constructor calls, the language does not allow qualified objects to be re-initialized with a constructor without an explicit cast. 451 This rule applies only to implicitly generated constructor calls. 452 Hence, explicitly re-initializing qualified objects with a constructor requires an explicit cast. 453 454 As discussed in Section \ref{sub:c_background}, compound literals create unnamed objects. 455 This mechanism can continue to be used seamlessly in \CFA with managed types to create temporary objects. 456 The object created by a compound literal is constructed using the provided brace-enclosed initializer-list, and is destructed at the end of the scope it is used in. 457 For example, 458 \begin{cfacode} 459 struct A { int x; }; 460 void ?{}(A *, int, int); 461 { 462 int x = (A){ 10, 20 }.x; 463 } 464 \end{cfacode} 465 is equivalent to 466 \begin{cfacode} 467 struct A { int x, y; }; 468 void ?{}(A *, int, int); 469 { 470 A _tmp; 471 ?{}(&_tmp, 10, 20); 472 int x = _tmp.x; 473 ^?{}(&tmp); 474 } 475 \end{cfacode} 973 476 974 477 Unlike \CC, \CFA provides an escape hatch that allows a user to decide at an object's definition whether it should be managed or not. … … 984 487 A a2 @= { 0 }; // unmanaged 985 488 \end{cfacode} 986 In this example, @a1@ is a managed object, and thus is default constructed and destructed at the end of @a1@'s lifetime, while @a2@ is an unmanaged object and is not implicitly constructed or destructed. 987 Instead, @a2->x@ is initialized to @0@ as if it were a C object, due to the explicit initializer. 988 Existing constructors are ignored when \ateq is used, so that any valid C initializer is able to initialize the object. 989 990 In addition to freedom, \ateq provides a simple path to migrating legacy C code to Cforall, in that objects can be moved from C-style initialization to \CFA gradually and individually. 489 In this example, @a1@ is a managed object, and thus is default constructed and destructed at the start/end of @a1@'s lifetime, while @a2@ is an unmanaged object and is not implicitly constructed or destructed. 490 Instead, @a2->x@ is initialized to @0@ as if it were a C object, because of the explicit initializer. 491 492 In addition to freedom, \ateq provides a simple path for migrating legacy C code to \CFA, in that objects can be moved from C-style initialization to \CFA gradually and individually. 991 493 It is worth noting that the use of unmanaged objects can be tricky to get right, since there is no guarantee that the proper invariants are established on an unmanaged object. 992 494 It is recommended that most objects be managed by sensible constructors and destructors, except where absolutely necessary. 993 495 994 When the user declares any constructor or destructor, the corresponding intrinsic/generated function and all field constructors for that type are hidden, so that they will not be found during expression resolution unlessthe user-defined function goes out of scope.995 Furthermore, if the user declares any constructor, then the intrinsic/generated default constructor is also hidden, making it so that objects of a type may not be default constructable.996 Th is closely mirrors the rule for implicit declaration of constructors in \CC, wherein the default constructor is implicitly declared if there is no user-declared constructor. % TODO: cite C++98 page 186??496 When a user declares any constructor or destructor, the corresponding intrinsic/generated function and all field constructors for that type are hidden, so that they are not found during expression resolution until the user-defined function goes out of scope. 497 Furthermore, if the user declares any constructor, then the intrinsic/generated default constructor is also hidden, precluding default construction. 498 These semantics closely mirror the rule for implicit declaration of constructors in \CC, wherein the default constructor is implicitly declared if there is no user-declared constructor \cite[p.~186]{ANSI98:C++}. 997 499 \begin{cfacode} 998 500 struct S { int x, y; }; … … 1001 503 S s0, s1 = { 0 }, s2 = { 0, 2 }, s3 = s2; // okay 1002 504 { 1003 void ?{}(S * s, int i) { s->x = i*2; } 1004 S s4; // error 1005 S s5 = { 3 }; // okay 1006 S s6 = { 4, 5 }; // error 505 void ?{}(S * s, int i) { s->x = i*2; } // locally hide autogen ctors 506 S s4; // error, no default constructor 507 S s5 = { 3 }; // okay, local constructor 508 S s6 = { 4, 5 }; // error, no field constructor 1007 509 S s7 = s5; // okay 1008 510 } … … 1012 514 In this example, the inner scope declares a constructor from @int@ to @S@, which hides the default constructor and field constructors until the end of the scope. 1013 515 1014 When defining a constructor or destructor for a struct @S@, any members that are not explicitly constructed or destructed are implicitly constructed or destructed automatically.516 When defining a constructor or destructor for a structure @S@, any members that are not explicitly constructed or destructed are implicitly constructed or destructed automatically. 1015 517 If an explicit call is present, then that call is taken in preference to any implicitly generated call. 1016 A consequence of this rule is that it is possible, unlike \CC, to precisely control the order of construction and destruction of sub objects on a per-constructor basis, whereas in \CC subobject initialization and destruction is always performed based on the declaration order.518 A consequence of this rule is that it is possible, unlike \CC, to precisely control the order of construction and destruction of sub-objects on a per-constructor basis, whereas in \CC sub-object initialization and destruction is always performed based on the declaration order. 1017 519 \begin{cfacode} 1018 520 struct A { … … 1033 535 } 1034 536 \end{cfacode} 1035 Finally, it is illegal for a sub object to be explicitly constructed for the first time after it is used for the first time.537 Finally, it is illegal for a sub-object to be explicitly constructed for the first time after it is used for the first time. 1036 538 If the translator cannot be reasonably sure that an object is constructed prior to its first use, but is constructed afterward, an error is emitted. 1037 More specifically, the translator searches the body of a constructor to ensure that every sub object is initialized.539 More specifically, the translator searches the body of a constructor to ensure that every sub-object is initialized. 1038 540 \begin{cfacode} 1039 541 void ?{}(A * a, double x) { … … 1042 544 } 1043 545 \end{cfacode} 1044 However, if the translator sees a sub object used within the body of a constructor, but does not see a constructor call that uses the subobject as the target of a constructor, then the translator assumes the object is to be implicitly constructed (copy constructed in a copy constructor and default constructed in any other constructor).546 However, if the translator sees a sub-object used within the body of a constructor, but does not see a constructor call that uses the sub-object as the target of a constructor, then the translator assumes the object is to be implicitly constructed (copy constructed in a copy constructor and default constructed in any other constructor). 1045 547 \begin{cfacode} 1046 548 void ?{}(A * a) { … … 1058 560 } // z, y, w implicitly destructed, in this order 1059 561 \end{cfacode} 1060 If at any point, the @this@ parameter is passed directly as the target of another constructor, then it is assumed that constructor handles the initialization of all of the object's members and no implicit constructor calls are added. % TODO: confirm that this is correct. It might be possible to get subtle errors if you initialize some members then call another constructor... -- in fact, this is basically always wrong. if anything, I should check that such a constructor does not initialize any members, otherwise it'll always initialize the member twice (once locally, once by the called constructor).562 If at any point, the @this@ parameter is passed directly as the target of another constructor, then it is assumed that constructor handles the initialization of all of the object's members and no implicit constructor calls are added. 1061 563 To override this rule, \ateq can be used to force the translator to trust the programmer's discretion. 1062 564 This form of \ateq is not yet implemented. … … 1064 566 Despite great effort, some forms of C syntax do not work well with constructors in \CFA. 1065 567 In particular, constructor calls cannot contain designations (see \ref{sub:c_background}), since this is equivalent to allowing designations on the arguments to arbitrary function calls. 1066 In C, function prototypes are permitted to have arbitrary parameter names, including no names at all, which may have no connection to the actual names used at function definition.1067 Furthermore, a function prototype can be repeated an arbitrary number of times, each time using different names.1068 568 \begin{cfacode} 1069 569 // all legal forward declarations in C … … 1076 576 f(b:10, a:20, c:30); // which parameter is which? 1077 577 \end{cfacode} 578 In C, function prototypes are permitted to have arbitrary parameter names, including no names at all, which may have no connection to the actual names used at function definition. 579 Furthermore, a function prototype can be repeated an arbitrary number of times, each time using different names. 1078 580 As a result, it was decided that any attempt to resolve designated function calls with C's function prototype rules would be brittle, and thus it is not sensible to allow designations in constructor calls. 1079 % Many other languages do allow named arguments, such as Python and Scala, but they do not allow multiple arbitrarily named forward declarations of a function. 1080 1081 In addition, constructor calls cannot have a nesting depth greater than the number of array components in the type of the initialized object, plus one. 581 582 \begin{sloppypar} 583 In addition, constructor calls do not support unnamed nesting. 584 \begin{cfacode} 585 struct B { int x; }; 586 struct C { int y; }; 587 struct A { B b; C c; }; 588 void ?{}(A *, B); 589 void ?{}(A *, C); 590 591 A a = { 592 { 10 }, // construct B? - invalid 593 }; 594 \end{cfacode} 595 In C, nesting initializers means that the programmer intends to initialize sub-objects with the nested initializers. 596 The reason for this omission is to both simplify the mental model for using constructors, and to make initialization simpler for the expression resolver. 597 If this were allowed, it would be necessary for the expression resolver to decide whether each argument to the constructor call could initialize to some argument in one of the available constructors, making the problem highly recursive and potentially much more expensive. 598 That is, in the previous example the line marked as an error could mean construct using @?{}(A *, B)@ or with @?{}(A *, C)@, since the inner initializer @{ 10 }@ could be taken as an intermediate object of type @B@ or @C@. 599 In practice, however, there could be many objects that can be constructed from a given @int@ (or, indeed, any arbitrary parameter list), and thus a complete solution to this problem would require fully exploring all possibilities. 600 \end{sloppypar} 601 602 More precisely, constructor calls cannot have a nesting depth greater than the number of array dimensions in the type of the initialized object, plus one. 1082 603 For example, 1083 604 \begin{cfacode} … … 1091 612 { {14 }, { 15 } } // a2[1] 1092 613 }; 1093 A a3[4] = { 1094 { { 11 }, { 12 } }, // error 614 A a3[4] = { // 1 dimension => max depth 2 615 { { 11 }, { 12 } }, // error, three levels deep 1095 616 { 80 }, { 90 }, { 100 } 1096 617 } 1097 618 \end{cfacode} 1098 % TODO: in CFA if the array dimension is empty, no object constructors are added -- need to fix this.1099 619 The body of @A@ has been omitted, since only the constructor interfaces are important. 1100 In C, having a greater nesting depth means that the programmer intends to initialize subobjects with the nested initializer. 1101 The reason for this omission is to both simplify the mental model for using constructors, and to make initialization simpler for the expression resolver. 1102 If this were allowed, it would be necessary for the expression resolver to decide whether each argument to the constructor call could initialize to some argument in one of the available constructors, making the problem highly recursive and potentially much more expensive. 1103 That is, in the previous example the line marked as an error could mean construct using @?{}(A *, A, A)@, since the inner initializer @{ 11 }@ could be taken as an intermediate object of type @A@ constructed with @?{}(A *, int)@. 1104 In practice, however, there could be many objects that can be constructed from a given @int@ (or, indeed, any arbitrary parameter list), and thus a complete solution to this problem would require fully exploring all possibilities. 620 1105 621 It should be noted that unmanaged objects can still make use of designations and nested initializers in \CFA. 622 It is simple to overcome this limitation for managed objects by making use of compound literals, so that the arguments to the constructor call are explicitly typed. 1106 623 1107 624 \subsection{Implicit Destructors} 1108 625 \label{sub:implicit_dtor} 1109 626 Destructors are automatically called at the end of the block in which the object is declared. 1110 In addition to this, destructors are automatically called when statements manipulate control flow to leave a block in which the object is declared, e.g., with return, break, continue, and goto statements.627 In addition to this, destructors are automatically called when statements manipulate control flow to leave a block in which the object is declared, \eg, with return, break, continue, and goto statements. 1111 628 The example below demonstrates a simple routine with multiple return statements. 1112 629 \begin{cfacode} … … 1127 644 if (i == 2) return; // destruct x, y 1128 645 } // destruct y 1129 } 1130 \end{cfacode} 1131 1132 %% having this feels excessive, but it's here if necessary 1133 % This procedure generates the following code. 1134 % \begin{cfacode} 1135 % void f(int i){ 1136 % struct A x; 1137 % ?{}(&x); 1138 % { 1139 % struct A y; 1140 % ?{}(&y); 1141 % { 1142 % struct A z; 1143 % ?{}(&z); 1144 % { 1145 % if ((i==0)!=0) { 1146 % ^?{}(&z); 1147 % ^?{}(&y); 1148 % ^?{}(&x); 1149 % return; 1150 % } 1151 % } 1152 % if (((i==1)!=0) { 1153 % ^?{}(&z); 1154 % ^?{}(&y); 1155 % ^?{}(&x); 1156 % return ; 1157 % } 1158 % ^?{}(&z); 1159 % } 1160 1161 % if ((i==2)!=0) { 1162 % ^?{}(&y); 1163 % ^?{}(&x); 1164 % return; 1165 % } 1166 % ^?{}(&y); 1167 % } 1168 1169 % ^?{}(&x); 1170 % } 1171 % \end{cfacode} 646 } // destruct x 647 \end{cfacode} 1172 648 1173 649 The next example illustrates the use of simple continue and break statements and the manner that they interact with implicit destructors. … … 1183 659 \end{cfacode} 1184 660 Since a destructor call is automatically inserted at the end of the block, nothing special needs to happen to destruct @x@ in the case where control reaches the end of the loop. 1185 In the case where @i@ is @2@, the continue statement runs the loop update expression and attemp s to begin the next iteration of the loop.1186 Since continue is a C statement, which does not understand destructors, a destructor call is added just before the continue statementto ensure that @x@ is destructed.661 In the case where @i@ is @2@, the continue statement runs the loop update expression and attempts to begin the next iteration of the loop. 662 Since continue is a C statement, which does not understand destructors, it is transformed into a @goto@ statement that branches to the end of the loop, just before the block's destructors, to ensure that @x@ is destructed. 1187 663 When @i@ is @3@, the break statement moves control to just past the end of the loop. 1188 Like the previous case,a destructor call for @x@ is inserted just before the break statement.1189 1190 \CFA also supports label led break and continue statements, which allow more precise manipulation of control flow.1191 Label led break and continue allow the programmer to specify which control structure to target by using a label attached to a control structure.664 Unlike the previous case, the destructor for @x@ cannot be reused, so a destructor call for @x@ is inserted just before the break statement. 665 666 \CFA also supports labeled break and continue statements, which allow more precise manipulation of control flow. 667 Labeled break and continue allow the programmer to specify which control structure to target by using a label attached to a control structure. 1192 668 \begin{cfacode}[emph={L1,L2}, emphstyle=\color{red}] 1193 669 L1: for (int i = 0; i < 10; i++) { 1194 670 A x; 1195 L2:for (int j = 0; j < 10; j++) {671 for (int j = 0; j < 10; j++) { 1196 672 A y; 1197 if (j == 0) { 1198 continue; // destruct y 1199 } else if (j == 1) { 1200 break; // destruct y 1201 } else if (i == 1) { 673 if (i == 1) { 1202 674 continue L1; // destruct y 1203 675 } else if (i == 2) { … … 1208 680 \end{cfacode} 1209 681 The statement @continue L1@ begins the next iteration of the outer for-loop. 1210 Since the semantics of continue require the loop update expression to execute, control branches to the \emph{end}of the outer for loop, meaning that the block destructor for @x@ can be reused, and it is only necessary to generate the destructor for @y@.1211 Break, on the other hand, requires jumping out of the loop, so the destructors for both @x@ and @y@ are generated and inserted before the @break L1@ statement.682 Since the semantics of continue require the loop update expression to execute, control branches to the end of the outer for loop, meaning that the block destructor for @x@ can be reused, and it is only necessary to generate the destructor for @y@. 683 Break, on the other hand, requires jumping out of both loops, so the destructors for both @x@ and @y@ are generated and inserted before the @break L1@ statement. 1212 684 1213 685 Finally, an example which demonstrates goto. … … 1256 728 } 1257 729 \end{cfacode} 1258 Labelled break and continue are implemented in \CFA in terms of goto statements, so the more constrained forms are precisely goverened by these rules.730 All break and continue statements are implemented in \CFA in terms of goto statements, so the more constrained forms are precisely governed by these rules. 1259 731 1260 732 The next example demonstrates the error case. … … 1273 745 1274 746 \subsection{Implicit Copy Construction} 747 \label{s:implicit_copy_construction} 1275 748 When a function is called, the arguments supplied to the call are subject to implicit copy construction (and destruction of the generated temporary), and the return value is subject to destruction. 1276 749 When a value is returned from a function, the copy constructor is called to pass the value back to the call site. 1277 Exempt from these rules are intrinsic and built in functions.750 Exempt from these rules are intrinsic and built-in functions. 1278 751 It should be noted that unmanaged objects are subject to copy constructor calls when passed as arguments to a function or when returned from a function, since they are not the \emph{target} of the copy constructor call. 1279 This is an important detail to bear in mind when using unmanaged objects, and could produce unexpected results when mixed with objects that are explicitly constructed. 752 That is, since the parameter is not marked as an unmanaged object using \ateq, it is be copy constructed if it is returned by value or passed as an argument to another function, so to guarantee consistent behaviour, unmanaged objects must be copy constructed when passed as arguments. 753 These semantics are important to bear in mind when using unmanaged objects, and could produce unexpected results when mixed with objects that are explicitly constructed. 1280 754 \begin{cfacode} 1281 755 struct A; … … 1284 758 void ^?{}(A *); 1285 759 1286 A f(A x) {1287 return x; 760 A identity(A x) { // pass by value => need local copy 761 return x; // return by value => make call-site copy 1288 762 } 1289 763 1290 764 A y, z @= {}; 1291 identity(y); 1292 identity(z); 1293 \end{cfacode} 1294 Note that @z@ is copy constructedinto a temporary variable to be passed as an argument, which is also destructed after the call.1295 A special syntactic form, such as a variant of \ateq, could be implemented to specify at the call site that an argument should not be copy constructed, to regain some control for the C programmer.765 identity(y); // copy construct y into x 766 identity(z); // copy construct z into x 767 \end{cfacode} 768 Note that unmanaged argument @z@ is logically copy constructed into managed parameter @x@; however, the translator must copy construct into a temporary variable to be passed as an argument, which is also destructed after the call. 769 A compiler could by-pass the argument temporaries since it is in control of the calling conventions and knows exactly where the called-function's parameters live. 1296 770 1297 771 This generates the following 1298 772 \begin{cfacode} 1299 773 struct A f(struct A x){ 1300 struct A _retval_f; 1301 ?{}((&_retval_f), x); 774 struct A _retval_f; // return value 775 ?{}((&_retval_f), x); // copy construct return value 1302 776 return _retval_f; 1303 777 } 1304 778 1305 779 struct A y; 1306 ?{}(&y); 1307 struct A z = { 0 }; 1308 1309 struct A _tmp_cp1; // argument 1 1310 struct A _tmp_cp_ret0; // return value 1311 _tmp_cp_ret0=f((?{}(&_tmp_cp1, y) , _tmp_cp1)), _tmp_cp_ret0; 1312 ^?{}(&_tmp_cp_ret0); // return value 1313 ^?{}(&_tmp_cp1); // argument 1 1314 1315 struct A _tmp_cp2; // argument 1 1316 struct A _tmp_cp_ret1; // return value 1317 _tmp_cp_ret1=f((?{}(&_tmp_cp2, z), _tmp_cp2)), _tmp_cp_ret1; 1318 ^?{}(&_tmp_cp_ret1); // return value 1319 ^?{}(&_tmp_cp2); // argument 1 780 ?{}(&y); // default construct 781 struct A z = { 0 }; // C default 782 783 struct A _tmp_cp1; // argument 1 784 struct A _tmp_cp_ret0; // return value 785 _tmp_cp_ret0=f( 786 (?{}(&_tmp_cp1, y) , _tmp_cp1) // argument is a comma expression 787 ), _tmp_cp_ret0; // return value for cascading 788 ^?{}(&_tmp_cp_ret0); // destruct return value 789 ^?{}(&_tmp_cp1); // destruct argument 1 790 791 struct A _tmp_cp2; // argument 1 792 struct A _tmp_cp_ret1; // return value 793 _tmp_cp_ret1=f( 794 (?{}(&_tmp_cp2, z), _tmp_cp2) // argument is a common expression 795 ), _tmp_cp_ret1; // return value for cascading 796 ^?{}(&_tmp_cp_ret1); // destruct return value 797 ^?{}(&_tmp_cp2); // destruct argument 1 1320 798 ^?{}(&y); 1321 799 \end{cfacode} 1322 800 1323 A known issue with this implementation is that the return value of a function is not guaranteed to have the same address for its entire lifetime. 1324 Specifically, since @_retval_f@ is allocated and constructed in @f@ then returned by value, the internal data is bitwise copied into the caller's stack frame. 801 A special syntactic form, such as a variant of \ateq, can be implemented to specify at the call site that an argument should not be copy constructed, to regain some control for the C programmer. 802 \begin{cfacode} 803 identity(z@); // do not copy construct argument 804 // - will copy construct/destruct return value 805 A@ identity_nocopy(A @ x) { // argument not copy constructed or destructed 806 return x; // not copy constructed 807 // return type marked @ => not destructed 808 } 809 \end{cfacode} 810 It should be noted that reference types will allow specifying that a value does not need to be copied, however reference types do not provide a means of preventing implicit copy construction from uses of the reference, so the problem is still present when passing or returning the reference by value. 811 812 A known issue with this implementation is that the argument and return value temporaries are not guaranteed to have the same address for their entire lifetimes. 813 In the previous example, since @_retval_f@ is allocated and constructed in @f@, then returned by value, the internal data is bitwise copied into the caller's stack frame. 1325 814 This approach works out most of the time, because typically destructors need to only access the fields of the object and recursively destroy. 1326 It is currently the case that constructors and destructors which use the @this@ pointer as a unique identifier to store data externally willnot work correctly for return value objects.1327 Thus is itnot safe to rely on an object's @this@ pointer to remain constant throughout execution of the program.815 It is currently the case that constructors and destructors that use the @this@ pointer as a unique identifier to store data externally do not work correctly for return value objects. 816 Thus, it is currently not safe to rely on an object's @this@ pointer to remain constant throughout execution of the program. 1328 817 \begin{cfacode} 1329 818 A * external_data[32]; … … 1341 830 } 1342 831 } 832 833 A makeA() { 834 A x; // stores &x in external_data 835 return x; 836 } 837 makeA(); // return temporary has a different address than x 838 // equivalent to: 839 // A _tmp; 840 // _tmp = makeA(), _tmp; 841 // ^?{}(&_tmp); 1343 842 \end{cfacode} 1344 843 In the above example, a global array of pointers is used to keep track of all of the allocated @A@ objects. 1345 Due to copying on return, the current object being destructed will not exist in the array if an @A@ object is ever returned by value from a function.1346 1347 This problem could be solved in the translator by mutating the function signatures so that the return value is moved into the parameter list.844 Due to copying on return, the current object being destructed does not exist in the array if an @A@ object is ever returned by value from a function, such as in @makeA@. 845 846 This problem could be solved in the translator by changing the function signatures so that the return value is moved into the parameter list. 1348 847 For example, the translator could restructure the code like so 1349 848 \begin{cfacode} … … 1363 862 \end{cfacode} 1364 863 This transformation provides @f@ with the address of the return variable so that it can be constructed into directly. 1365 It is worth pointing out that this kind of signature rewriting already occurs in polymorphic functions whichreturn by value, as discussed in \cite{Bilson03}.1366 A key difference in this case is that every function would need to be rewritten like this, since types can switch between managed and unmanaged at different scope levels, e.g.864 It is worth pointing out that this kind of signature rewriting already occurs in polymorphic functions that return by value, as discussed in \cite{Bilson03}. 865 A key difference in this case is that every function would need to be rewritten like this, since types can switch between managed and unmanaged at different scope levels, \eg 1367 866 \begin{cfacode} 1368 867 struct A { int v; }; 1369 A x; // unmanaged 868 A x; // unmanaged, since only trivial constructors are available 1370 869 { 1371 870 void ?{}(A * a) { ... } … … 1375 874 A z; // unmanaged 1376 875 \end{cfacode} 1377 Hence there is not enough information to determine at function declaration to determinewhether a type is managed or not, and thus it is the case that all signatures have to be rewritten to account for possible copy constructor and destructor calls.876 Hence there is not enough information to determine at function declaration whether a type is managed or not, and thus it is the case that all signatures have to be rewritten to account for possible copy constructor and destructor calls. 1378 877 Even with this change, it would still be possible to declare backwards compatible function prototypes with an @extern "C"@ block, which allows for the definition of C-compatible functions within \CFA code, however this would require actual changes to the way code inside of an @extern "C"@ function is generated as compared with normal code generation. 1379 Furthermore, it is n't possible to overload C functions, so using @extern "C"@ to declare functions is of limited use.1380 1381 It would be possible to regain some control by adding an attribute to struct s whichspecifies whether they can be managed or not (perhaps \emph{manageable} or \emph{unmanageable}), and to emit an error in the case that a constructor or destructor is declared for an unmanageable type.1382 Ideally, struct s should be manageable by default, since otherwise the default case becomes more verbose.878 Furthermore, it is not possible to overload C functions, so using @extern "C"@ to declare functions is of limited use. 879 880 It would be possible to regain some control by adding an attribute to structures that specifies whether they can be managed or not (perhaps \emph{manageable} or \emph{unmanageable}), and to emit an error in the case that a constructor or destructor is declared for an unmanageable type. 881 Ideally, structures should be manageable by default, since otherwise the default case becomes more verbose. 1383 882 This means that in general, function signatures would have to be rewritten, and in a select few cases the signatures would not be rewritten. 1384 883 \begin{cfacode} 1385 __attribute__((manageable)) struct A { ... }; // can declare c onstructors1386 __attribute__((unmanageable)) struct B { ... }; // cannot declare c onstructors1387 struct C { ... }; // can declare c onstructors884 __attribute__((manageable)) struct A { ... }; // can declare ctors 885 __attribute__((unmanageable)) struct B { ... }; // cannot declare ctors 886 struct C { ... }; // can declare ctors 1388 887 1389 888 A f(); // rewritten void f(A *); … … 1391 890 C h(); // rewritten void h(C *); 1392 891 \end{cfacode} 1393 An alternative is to insteadmake the attribute \emph{identifiable}, which states that objects of this type use the @this@ parameter as an identity.1394 This strikes more closely to the visib ile problem, in that only types marked as identifiable would need to have the return value moved into the parameter list, and every other type could remain the same.892 An alternative is to make the attribute \emph{identifiable}, which states that objects of this type use the @this@ parameter as an identity. 893 This strikes more closely to the visible problem, in that only types marked as identifiable would need to have the return value moved into the parameter list, and every other type could remain the same. 1395 894 Furthermore, no restrictions would need to be placed on whether objects can be constructed. 1396 895 \begin{cfacode} 1397 __attribute__((identifiable)) struct A { ... }; // can declare c onstructors1398 struct B { ... }; // can declare c onstructors896 __attribute__((identifiable)) struct A { ... }; // can declare ctors 897 struct B { ... }; // can declare ctors 1399 898 1400 899 A f(); // rewritten void f(A *); … … 1402 901 \end{cfacode} 1403 902 1404 Ultimately, this is the type of transformation that a real compiler would make when generating assembly code.1405 Since a compiler has full control over its calling conventions, it can seamlessly allow passing the return parameter without outwardly changing the signature of a routine.1406 As such, it has been decided that this issue is not currently a priority .903 Ultimately, both of these are patchwork solutions. 904 Since a real compiler has full control over its calling conventions, it can seamlessly allow passing the return parameter without outwardly changing the signature of a routine. 905 As such, it has been decided that this issue is not currently a priority and will be fixed when a full \CFA compiler is implemented. 1407 906 1408 907 \section{Implementation} 1409 908 \subsection{Array Initialization} 1410 Arrays are a special case in the C type 909 Arrays are a special case in the C type-system. 1411 910 C arrays do not carry around their size, making it impossible to write a standalone \CFA function that constructs or destructs an array while maintaining the standard interface for constructors and destructors. 1412 911 Instead, \CFA defines the initialization and destruction of an array recursively. … … 1520 1019 1521 1020 \subsection{Global Initialization} 1522 In standard C, global variables can only be initialized to compile-time constant expressions. 1523 This places strict limitations on the programmer's ability to control the default values of objects. 1021 In standard C, global variables can only be initialized to compile-time constant expressions, which places strict limitations on the programmer's ability to control the default values of objects. 1524 1022 In \CFA, constructors and destructors are guaranteed to be run on global objects, allowing arbitrary code to be run before and after the execution of the main routine. 1525 1023 By default, objects within a translation unit are constructed in declaration order, and destructed in the reverse order. 1526 1024 The default order of construction of objects amongst translation units is unspecified. 1527 % TODO: not yet implemented, but g++ provides attribute init_priority, which allows specifying the order of global construction on a per object basis 1528 % https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes 1529 % suggestion: implement this in CFA by picking objects with a specified priority and pulling them into their own init functions (could even group them by priority level -> map<int, list<ObjectDecl*>>) and pull init_priority forward into constructor and destructor attributes with the same priority level 1530 It is, however, guaranteed that any global objects in the standard library are initialized prior to the initialization of any object in the user program. 1531 1532 This feature is implemented in the \CFA translator by grouping every global constructor call into a function with the GCC attribute \emph{constructor}, which performs most of the heavy lifting. % CITE: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes 1025 It is, however, guaranteed that any global objects in the standard library are initialized prior to the initialization of any object in a user program. 1026 1027 This feature is implemented in the \CFA translator by grouping every global constructor call into a function with the GCC attribute \emph{constructor}, which performs most of the heavy lifting \cite[6.31.1]{GCCExtensions}. 1533 1028 A similar function is generated with the \emph{destructor} attribute, which handles all global destructor calls. 1534 1029 At the time of writing, initialization routines in the library are specified with priority \emph{101}, which is the highest priority level that GCC allows, whereas initialization routines in the user's code are implicitly given the default priority level, which ensures they have a lower priority than any code with a specified priority level. 1535 This mechanism allows arbitrarily complicated initialization to occur before any user code runs, making it possible for library designers to initialize their modules without requiring the user to call specific startup or tear down routines.1030 This mechanism allows arbitrarily complicated initialization to occur before any user code runs, making it possible for library designers to initialize their modules without requiring the user to call specific startup or tear-down routines. 1536 1031 1537 1032 For example, given the following global declarations. … … 1559 1054 \end{cfacode} 1560 1055 1056 % https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes 1057 % suggestion: implement this in CFA by picking objects with a specified priority and pulling them into their own init functions (could even group them by priority level -> map<int, list<ObjectDecl*>>) and pull init_priority forward into constructor and destructor attributes with the same priority level 1058 GCC provides an attribute @init_priority@ in \CC, which allows specifying the relative priority for initialization of global objects on a per-object basis. 1059 A similar attribute can be implemented in \CFA by pulling marked objects into global constructor/destructor-attribute functions with the specified priority. 1060 For example, 1061 \begin{cfacode} 1062 struct A { ... }; 1063 void ?{}(A *, int); 1064 void ^?{}(A *); 1065 __attribute__((init_priority(200))) A x = { 123 }; 1066 \end{cfacode} 1067 would generate 1068 \begin{cfacode} 1069 A x; 1070 __attribute__((constructor(200))) __init_x() { 1071 ?{}(&x, 123); // construct x with priority 200 1072 } 1073 __attribute__((destructor(200))) __destroy_x() { 1074 ?{}(&x); // destruct x with priority 200 1075 } 1076 \end{cfacode} 1077 1561 1078 \subsection{Static Local Variables} 1562 1079 In standard C, it is possible to mark variables that are local to a function with the @static@ storage class. 1563 Unlike normal local variables, a @static@ local variable is defined to live for the entire duration of the program, so that each call to the function has access to the same variable with the same address and value as it had in the previous call to the function. % TODO: mention dynamic loading caveat??1564 Much like global variables, in C @static@ variables must be initialized to a \emph{compile-time constant value} so that a compiler is able to create storage for the variable and initialize it before the program begins running.1080 Unlike normal local variables, a @static@ local variable is defined to live for the entire duration of the program, so that each call to the function has access to the same variable with the same address and value as it had in the previous call to the function. 1081 Much like global variables, @static@ variables can only be initialized to a \emph{compile-time constant value} so that a compiler is able to create storage for the variable and initialize it at compile-time. 1565 1082 1566 1083 Yet again, this rule is too restrictive for a language with constructors and destructors. 1567 Instead, \CFA modifies the definition of a @static@ local variable so that objects are guaranteed to be live from the time control flow reaches their declaration, until the end of the program, since the initializer expression is not necessarily a compile-time constant, but can depend on the current execution state of the function.1568 Since standard C does not allow access to a @static@ local variable before the first time control flow reaches the declaration, this restrictiondoes not preclude any valid C code.1084 Since the initializer expression is not necessarily a compile-time constant and can depend on the current execution state of the function, \CFA modifies the definition of a @static@ local variable so that objects are guaranteed to be live from the time control flow reaches their declaration, until the end of the program. 1085 Since standard C does not allow access to a @static@ local variable before the first time control flow reaches the declaration, this change does not preclude any valid C code. 1569 1086 Local objects with @static@ storage class are only implicitly constructed and destructed once for the duration of the program. 1570 1087 The object is constructed when its declaration is reached for the first time. … … 1573 1090 Construction of @static@ local objects is implemented via an accompanying @static bool@ variable, which records whether the variable has already been constructed. 1574 1091 A conditional branch checks the value of the companion @bool@, and if the variable has not yet been constructed then the object is constructed. 1575 The object's destructor is scheduled to be run when the program terminates using @atexit@ , and the companion @bool@'s value is set so that subsequent invocations of the function willnot reconstruct the object.1092 The object's destructor is scheduled to be run when the program terminates using @atexit@ \footnote{When using the dynamic linker, it is possible to dynamically load and unload a shared library. Since glibc 2.2.3 \cite{atexit}, functions registered with @atexit@ within the shared library are called when unloading the shared library. As such, static local objects can be destructed using this mechanism even in shared libraries on Linux systems.}, and the companion @bool@'s value is set so that subsequent invocations of the function do not reconstruct the object. 1576 1093 Since the parameter to @atexit@ is a parameter-less function, some additional tweaking is required. 1577 1094 First, the @static@ variable must be hoisted up to global scope and uniquely renamed to prevent name clashes with other global objects. 1578 Second, a function is built which calls the destructor for the newly hoisted variable. 1095 If necessary, a local structure may need to be hoisted, as well. 1096 Second, a function is built that calls the destructor for the newly hoisted variable. 1579 1097 Finally, the newly generated function is registered with @atexit@, instead of registering the destructor directly. 1580 1098 Since @atexit@ calls functions in the reverse order in which they are registered, @static@ local variables are guaranteed to be destructed in the reverse order that they are constructed, which may differ between multiple executions of the same program. 1581 1582 1099 Extending the previous example 1583 1100 \begin{cfacode} … … 1630 1147 \end{cfacode} 1631 1148 1632 \subsection{Constructor Expressions} 1633 In \CFA, it is possible to use a constructor as an expression. 1634 Like other operators, the function name @?{}@ matches its operator syntax. 1635 For example, @(&x){}@ calls the default constructor on the variable @x@, and produces @&x@ as a result. 1636 The significance of constructors as expressions rather than as statements is that the result of a constructor expression can be used as part of a larger expression. 1637 A key example is the use of constructor expressions to initialize the result of a call to standard C routine @malloc@. 1638 \begin{cfacode} 1639 struct X { ... }; 1640 void ?{}(X *, double); 1641 X * x = malloc(sizeof(X)){ 1.5 }; 1642 \end{cfacode} 1643 In this example, @malloc@ dynamically allocates storage and initializes it using a constructor, all before assigning it into the variable @x@. 1644 If this extension is not present, constructing dynamically allocated objects is much more cumbersome, requiring separate initialization of the pointer and initialization of the pointed-to memory. 1645 \begin{cfacode} 1646 X * x = malloc(sizeof(X)); 1647 x{ 1.5 }; 1648 \end{cfacode} 1649 Not only is this verbose, but it is also more error prone, since this form allows maintenance code to easily sneak in between the initialization of @x@ and the initialization of the memory that @x@ points to. 1650 This feature is implemented via a transformation produceing the value of the first argument of the constructor, since constructors do not themslves have a return value. 1651 Since this transformation results in two instances of the subexpression, care is taken to allocate a temporary variable to hold the result of the subexpression in the case where the subexpression may contain side effects. 1652 The previous example generates the following code. 1653 \begin{cfacode} 1654 struct X *_tmp_ctor; 1655 struct X *x = ?{}((_tmp_ctor=((_tmp_cp_ret0= 1656 malloc(sizeof(struct X))), _tmp_cp_ret0))), 1.5), _tmp_ctor); 1657 \end{cfacode} 1658 It should be noted that this technique is not exclusive to @malloc@, and allows a user to write a custom allocator that can be idiomatically used in much the same way as a constructed @malloc@ call. 1659 1660 It is also possible to use operator syntax with destructors. 1661 Unlike constructors, operator syntax with destructors is a statement and thus does not produce a value, since the destructed object is invalidated by the use of a destructor. 1662 For example, \lstinline!^(&x){}! calls the destructor on the variable @x@. 1149 \subsection{Polymorphism} 1150 As mentioned in section \ref{sub:polymorphism}, \CFA currently has 3 type-classes that are used to designate polymorphic data types: @otype@, @dtype@, and @ftype@. 1151 In previous versions of \CFA, @otype@ was syntactic sugar for @dtype@ with known size/alignment information and an assignment function. 1152 That is, 1153 \begin{cfacode} 1154 forall(otype T) 1155 void f(T); 1156 \end{cfacode} 1157 was equivalent to 1158 \begin{cfacode} 1159 forall(dtype T | sized(T) | { T ?=?(T *, T); }) 1160 void f(T); 1161 \end{cfacode} 1162 This allows easily specifying constraints that are common to all complete object-types very simply. 1163 1164 Now that \CFA has constructors and destructors, more of a complete object's behaviour can be specified than was previously possible. 1165 As such, @otype@ has been augmented to include assertions for a default constructor, copy constructor, and destructor. 1166 That is, the previous example is now equivalent to 1167 \begin{cfacode} 1168 forall(dtype T | sized(T) | 1169 { T ?=?(T *, T); void ?{}(T *); void ?{}(T *, T); void ^?{}(T *); }) 1170 void f(T); 1171 \end{cfacode} 1172 These additions allow @f@'s body to create and destroy objects of type @T@, and pass objects of type @T@ as arguments to other functions, following the normal \CFA rules. 1173 A point of note here is that objects can be missing default constructors (and eventually other functions through deleted functions), so it is important for \CFA programmers to think carefully about the operations needed by their function, as to not over-constrain the acceptable parameter types and prevent potential reuse. -
doc/rob_thesis/intro.tex
r221c2de7 r154fdc8 5 5 \section{\CFA Background} 6 6 \label{s:background} 7 \CFA is a modernextension to the C programming language.7 \CFA \footnote{Pronounced ``C-for-all'', and written \CFA or Cforall.} is a modern non-object-oriented extension to the C programming language. 8 8 As it is an extension of C, there is already a wealth of existing C code and principles that govern the design of the language. 9 9 Among the goals set out in the original design of \CFA, four points stand out \cite{Bilson03}. … … 16 16 Therefore, these design principles must be kept in mind throughout the design and development of new language features. 17 17 In order to appeal to existing C programmers, great care must be taken to ensure that new features naturally feel like C. 18 The remainder of this section describes some of the important new features that currently exist in \CFA, to give the reader the necessary context in which the new features presented in this thesis must dovetail. % TODO: harmonize with? 18 These goals ensure existing C code-bases can be converted to \CFA incrementally with minimal effort, and C programmers can productively generate \CFA code without training beyond the features being used. 19 Unfortunately, \CC is actively diverging from C, so incremental additions require significant effort and training, coupled with multiple legacy design-choices that cannot be updated. 20 21 The remainder of this section describes some of the important new features that currently exist in \CFA, to give the reader the necessary context in which the new features presented in this thesis must dovetail. 19 22 20 23 \subsection{C Background} … … 29 32 A a1 = { 1, .y:7, 6 }; 30 33 A a2[4] = { [2]:a0, [0]:a1, { .z:3 } }; 31 // equ vialent to34 // equivalent to 32 35 // A a0 = { 0, 8, 0, 1 }; 33 36 // A a1 = { 1, 0, 7, 6 }; … … 36 39 Designations allow specifying the field to initialize by name, rather than by position. 37 40 Any field not explicitly initialized is initialized as if it had static storage duration \cite[p.~141]{C11}. 38 A designator specifies the current object for initialization, and as such any undesignated sub objects pick up where the last initialization left off.39 For example, in the initialization of @a1@, the initializer of @y@ is @7@, and the unnamed initializer @6@ initializes the next sub object, @z@.40 Later initializers override earlier initializers, so a sub object for which there is more than one initializer is only initailized by its last initializer.41 Th is can be seen in the initialization of @a0@, where @x@ is designated twice, and thus initialized to @8@.42 Note that in \CFA, designations use a colon separator, rather than an equals sign as in C .41 A designator specifies the current object for initialization, and as such any undesignated sub-objects pick up where the last initialization left off. 42 For example, in the initialization of @a1@, the initializer of @y@ is @7@, and the unnamed initializer @6@ initializes the next sub-object, @z@. 43 Later initializers override earlier initializers, so a sub-object for which there is more than one initializer is only initialized by its last initializer. 44 These semantics can be seen in the initialization of @a0@, where @x@ is designated twice, and thus initialized to @8@. 45 Note that in \CFA, designations use a colon separator, rather than an equals sign as in C, because this syntax is one of the few places that conflicts with the new language features. 43 46 44 47 C also provides \emph{compound literal} expressions, which provide a first-class mechanism for creating unnamed objects. … … 53 56 \end{cfacode} 54 57 Compound literals create an unnamed object, and result in an lvalue, so it is legal to assign a value into a compound literal or to take its address \cite[p.~86]{C11}. 55 Syntactically, compound literals look like a cast operator followed by a brace-enclosed initializer, but semantically are different from a C cast, which only applies basic conversions and is never an lvalue.58 Syntactically, compound literals look like a cast operator followed by a brace-enclosed initializer, but semantically are different from a C cast, which only applies basic conversions and coercions and is never an lvalue. 56 59 57 60 \subsection{Overloading} … … 59 62 Overloading is the ability to specify multiple entities with the same name. 60 63 The most common form of overloading is function overloading, wherein multiple functions can be defined with the same name, but with different signatures. 61 Like in \CC, \CFA allows overloading based both on the number of parameters and on the types of parameters. 64 C provides a small amount of built-in overloading, \eg + is overloaded for the basic types. 65 Like in \CC, \CFA allows user-defined overloading based both on the number of parameters and on the types of parameters. 62 66 \begin{cfacode} 63 67 void f(void); // (1) … … 91 95 92 96 There are times when a function should logically return multiple values. 93 Since a function in standard C can only return a single value, a programmer must either take in additional return values by address, or the function's designer must create a wrapper structure t0 package multiple return-values. 97 Since a function in standard C can only return a single value, a programmer must either take in additional return values by address, or the function's designer must create a wrapper structure to package multiple return-values. 98 For example, the first approach: 94 99 \begin{cfacode} 95 100 int f(int * ret) { // returns a value through parameter ret … … 101 106 int res1 = g(&res2); // explicitly pass storage 102 107 \end{cfacode} 103 The former solution is awkward because it requires the caller to explicitly allocate memory for $n$ result variables, even if they are only temporary values used as a subexpression, or even not used at all. 108 is awkward because it requires the caller to explicitly allocate memory for $n$ result variables, even if they are only temporary values used as a subexpression, or even not used at all. 109 The second approach: 104 110 \begin{cfacode} 105 111 struct A { … … 112 118 ... res3.x ... res3.y ... // use result values 113 119 \end{cfacode} 114 The latter approach requires the callerto either learn the field names of the structure or learn the names of helper routines to access the individual return values.115 Both solutions are syntactically unnatural.116 117 In \CFA, it is possible to directly declare a function returning mu tliple values.118 This provides important semantic information to the caller, since return values are only for output.119 \begin{cfacode} 120 [int, int] f() { // don't need to create anew type120 is awkward because the caller has to either learn the field names of the structure or learn the names of helper routines to access the individual return values. 121 Both approaches are syntactically unnatural. 122 123 In \CFA, it is possible to directly declare a function returning multiple values. 124 This extension provides important semantic information to the caller, since return values are only for output. 125 \begin{cfacode} 126 [int, int] f() { // no new type 121 127 return [123, 37]; 122 128 } 123 129 \end{cfacode} 124 However, the ability to return multiple values requires a syntax for accepting the results from a function. 130 However, the ability to return multiple values is useless without a syntax for accepting the results from the function. 131 125 132 In standard C, return values are most commonly assigned directly into local variables, or are used as the arguments to another function call. 126 133 \CFA allows both of these contexts to accept multiple return values. … … 148 155 g(f()); // selects (2) 149 156 \end{cfacode} 150 In this example, the only possible call to @f@ that can produce the two @int@s required by @g@ is the second option.151 A similar reasoning holds for assigning into multiple variables.157 In this example, the only possible call to @f@ that can produce the two @int@s required for assigning into the variables @x@ and @y@ is the second option. 158 A similar reasoning holds calling the function @g@. 152 159 153 160 In \CFA, overloading also applies to operator names, known as \emph{operator overloading}. … … 163 170 \begin{cfacode} 164 171 struct A { int i; }; 165 int ?+?(A x, A y); 172 int ?+?(A x, A y); // '?'s represent operands 166 173 bool ?<?(A x, A y); 167 174 \end{cfacode} 168 Notably, the only difference i n this example is syntax.175 Notably, the only difference is syntax. 169 176 Most of the operators supported by \CC for operator overloading are also supported in \CFA. 170 Of notable exception are the logical operators ( e.g. @||@), the sequence operator (i.e. @,@), and the member-access operators (e.g.@.@ and \lstinline{->}).177 Of notable exception are the logical operators (\eg @||@), the sequence operator (\ie @,@), and the member-access operators (\eg @.@ and \lstinline{->}). 171 178 172 179 Finally, \CFA also permits overloading variable identifiers. 173 180 This feature is not available in \CC. 174 \begin{cfacode} % TODO: pick something better than x? max, zero, one?181 \begin{cfacode} 175 182 struct Rational { int numer, denom; }; 176 183 int x = 3; // (1) … … 186 193 In this example, there are three definitions of the variable @x@. 187 194 Based on the context, \CFA attempts to choose the variable whose type best matches the expression context. 195 When used judiciously, this feature allows names like @MAX@, @MIN@, and @PI@ to apply across many types. 188 196 189 197 Finally, the values @0@ and @1@ have special status in standard C. … … 197 205 } 198 206 \end{cfacode} 199 Every if 207 Every if- and iteration-statement in C compares the condition with @0@, and every increment and decrement operator is semantically equivalent to adding or subtracting the value @1@ and storing the result. 200 208 Due to these rewrite rules, the values @0@ and @1@ have the types \zero and \one in \CFA, which allow for overloading various operations that connect to @0@ and @1@ \footnote{In the original design of \CFA, @0@ and @1@ were overloadable names \cite[p.~7]{cforall}.}. 201 The types \zero and \one have special built 209 The types \zero and \one have special built-in implicit conversions to the various integral types, and a conversion to pointer types for @0@, which allows standard C code involving @0@ and @1@ to work as normal. 202 210 \begin{cfacode} 203 211 // lvalue is similar to returning a reference in C++ … … 240 248 template<typename T> 241 249 T sum(T *arr, int n) { 242 T t; 250 T t; // default construct => 0 243 251 for (; n > 0; n--) t += arr[n-1]; 244 252 return t; … … 258 266 \end{cfacode} 259 267 The first thing to note here is that immediately following the declaration of @otype T@ is a list of \emph{type assertions} that specify restrictions on acceptable choices of @T@. 260 In particular, the assertions above specify that there must be a an assignment from \zero to @T@ and an addition assignment operator from @T@ to @T@.268 In particular, the assertions above specify that there must be an assignment from \zero to @T@ and an addition assignment operator from @T@ to @T@. 261 269 The existence of an assignment operator from @T@ to @T@ and the ability to create an object of type @T@ are assumed implicitly by declaring @T@ with the @otype@ type-class. 262 270 In addition to @otype@, there are currently two other type-classes. … … 278 286 A major difference between the approaches of \CC and \CFA to polymorphism is that the set of assumed properties for a type is \emph{explicit} in \CFA. 279 287 One of the major limiting factors of \CC's approach is that templates cannot be separately compiled. 280 In contrast, the explicit nature of assertions allows \CFA's polymorphic functions to be separately compiled. 288 In contrast, the explicit nature of assertions allows \CFA's polymorphic functions to be separately compiled, as the function prototype states all necessary requirements separate from the implementation. 289 For example, the prototype for the previous sum function is 290 \begin{cfacode} 291 forall(otype T | **R**{ T ?=?(T *, zero_t); T ?+=?(T *, T); }**R**) 292 T sum(T *arr, int n); 293 \end{cfacode} 294 With this prototype, a caller in another translation unit knows all of the constraints on @T@, and thus knows all of the operations that need to be made available to @sum@. 281 295 282 296 In \CFA, a set of assertions can be factored into a \emph{trait}. … … 293 307 This capability allows specifying the same set of assertions in multiple locations, without the repetition and likelihood of mistakes that come with manually writing them out for each function declaration. 294 308 309 An interesting application of return-type resolution and polymorphism is a type-safe version of @malloc@. 310 \begin{cfacode} 311 forall(dtype T | sized(T)) 312 T * malloc() { 313 return (T*)malloc(sizeof(T)); // call C malloc 314 } 315 int * x = malloc(); // malloc(sizeof(int)) 316 double * y = malloc(); // malloc(sizeof(double)) 317 318 struct S { ... }; 319 S * s = malloc(); // malloc(sizeof(S)) 320 \end{cfacode} 321 The built-in trait @sized@ ensures that size and alignment information for @T@ is available in the body of @malloc@ through @sizeof@ and @_Alignof@ expressions respectively. 322 In calls to @malloc@, the type @T@ is bound based on call-site information, allowing \CFA code to allocate memory without the potential for errors introduced by manually specifying the size of the allocated block. 323 295 324 \section{Invariants} 296 % TODO: discuss software engineering benefits of ctor/dtors: {pre/post} conditions, invariants 297 % an important invariant is the state of the environment (memory, resources) 298 % some objects pass their contract to the object user 299 An \emph{invariant} is a logical assertion that true for some duration of a program's execution. 325 An \emph{invariant} is a logical assertion that is true for some duration of a program's execution. 300 326 Invariants help a programmer to reason about code correctness and prove properties of programs. 301 327 328 \begin{sloppypar} 302 329 In object-oriented programming languages, type invariants are typically established in a constructor and maintained throughout the object's lifetime. 303 Th is is typically achieved through a combination of accesscontrol modifiers and a restricted interface.330 These assertions are typically achieved through a combination of access-control modifiers and a restricted interface. 304 331 Typically, data which requires the maintenance of an invariant is hidden from external sources using the \emph{private} modifier, which restricts reads and writes to a select set of trusted routines, including member functions. 305 332 It is these trusted routines that perform all modifications to internal data in a way that is consistent with the invariant, by ensuring that the invariant holds true at the end of the routine call. 333 \end{sloppypar} 306 334 307 335 In C, the @assert@ macro is often used to ensure invariants are true. 308 336 Using @assert@, the programmer can check a condition and abort execution if the condition is not true. 309 This is a powerful tool thatforces the programmer to deal with logical inconsistencies as they occur.337 This powerful tool forces the programmer to deal with logical inconsistencies as they occur. 310 338 For production, assertions can be removed by simply defining the preprocessor macro @NDEBUG@, making it simple to ensure that assertions are 0-cost for a performance intensive application. 311 339 \begin{cfacode} … … 354 382 \end{dcode} 355 383 The D compiler is able to assume that assertions and invariants hold true and perform optimizations based on those assumptions. 356 357 An important invariant is the state of the execution environment, including the heap, the open file table, the state of global variables, etc. 358 Since resources are finite, it is important to ensure that objects clean up properly when they are finished, restoring the execution environment to a stable state so that new objects can reuse resources. 384 Note, these invariants are internal to the type's correct behaviour. 385 386 Types also have external invariants with the state of the execution environment, including the heap, the open-file table, the state of global variables, etc. 387 Since resources are finite and shared (concurrency), it is important to ensure that objects clean up properly when they are finished, restoring the execution environment to a stable state so that new objects can reuse resources. 359 388 360 389 \section{Resource Management} … … 366 395 The program stack grows and shrinks automatically with each function call, as needed for local variables. 367 396 However, whenever a program needs a variable to outlive the block it is created in, the storage must be allocated dynamically with @malloc@ and later released with @free@. 368 This pattern is extended to more complex objects, such as files and sockets, which also outlive the block where they are created, but at their core isresource management.369 Once allocated storage escapes a block, the responsibility for deallocating the storage is not specified in a function's type, that is, that the return value is owned by the caller.397 This pattern is extended to more complex objects, such as files and sockets, which can also outlive the block where they are created, and thus require their own resource management. 398 Once allocated storage escapes\footnote{In garbage collected languages, such as Java, escape analysis \cite{Choi:1999:EAJ:320385.320386} is used to determine when dynamically allocated objects are strictly contained within a function, which allows the optimizer to allocate them on the stack.} a block, the responsibility for deallocating the storage is not specified in a function's type, that is, that the return value is owned by the caller. 370 399 This implicit convention is provided only through documentation about the expectations of functions. 371 400 372 401 In other languages, a hybrid situation exists where resources escape the allocation block, but ownership is precisely controlled by the language. 373 This pattern requires a strict interface and protocol for a data structure, where the protocol consistsof a pre-initialization and a post-termination call, and all intervening access is done via interface routines.374 This kind of encapsulation is popular in object-oriented programming languages, and like the stack, it contains a significant portion of resourcemanagement cases.402 This pattern requires a strict interface and protocol for a data structure, consisting of a pre-initialization and a post-termination call, and all intervening access is done via interface routines. 403 This kind of encapsulation is popular in object-oriented programming languages, and like the stack, it takes care of a significant portion of resource-management cases. 375 404 376 405 For example, \CC directly supports this pattern through class types and an idiom known as RAII \footnote{Resource Acquisition is Initialization} by means of constructors and destructors. … … 380 409 On the other hand, destructors provide a simple mechanism for tearing down an object and resetting the environment in which the object lived. 381 410 RAII ensures that if all resources are acquired in a constructor and released in a destructor, there are no resource leaks, even in exceptional circumstances. 382 A type with at least one non-trivial constructor or destructor will henceforth bereferred to as a \emph{managed type}.383 In the context of \CFA, a non-trivial constructor is either a user defined constructor or an auto 384 385 For the remaining resource ownership cases, programmer must follow a brittle, explicit protocol for freeing resources or an implicit porotocol implemented viathe programming language.411 A type with at least one non-trivial constructor or destructor is henceforth referred to as a \emph{managed type}. 412 In the context of \CFA, a non-trivial constructor is either a user defined constructor or an auto-generated constructor that calls a non-trivial constructor. 413 414 For the remaining resource ownership cases, a programmer must follow a brittle, explicit protocol for freeing resources or an implicit protocol enforced by the programming language. 386 415 387 416 In garbage collected languages, such as Java, resources are largely managed by the garbage collector. 388 Still, garbage collectors aretypically focus only on memory management.417 Still, garbage collectors typically focus only on memory management. 389 418 There are many kinds of resources that the garbage collector does not understand, such as sockets, open files, and database connections. 390 419 In particular, Java supports \emph{finalizers}, which are similar to destructors. 391 Sadly, finalizers come with far fewer guarantees, to the point where a completely conforming JVM may never call a single finalizer. % TODO: citation JVM spec; http://stackoverflow.com/a/2506514/2386739 392 Due to operating system resource limits, this is unacceptable for many long running tasks. % TODO: citation?393 Instead, the paradigm in Java requires programmers manually keep track of all resource\emph{except} memory, leading many novices and experts alike to forget to close files, etc.394 Complicating the picture, uncaught exceptions can cause control flow to change dramatically, leaking a resource which appears on first glance to be closed.420 Unfortunately, finalizers are only guaranteed to be called before an object is reclaimed by the garbage collector \cite[p.~373]{Java8}, which may not happen if memory use is not contentious. 421 Due to operating-system resource-limits, this is unacceptable for many long running programs. 422 Instead, the paradigm in Java requires programmers to manually keep track of all resources \emph{except} memory, leading many novices and experts alike to forget to close files, etc. 423 Complicating the picture, uncaught exceptions can cause control flow to change dramatically, leaking a resource that appears on first glance to be released. 395 424 \begin{javacode} 396 425 void write(String filename, String msg) throws Exception { … … 403 432 } 404 433 \end{javacode} 405 Any line in this program can throw an exception. 406 This leads to a profusion of finally blocks around many function bodies, since it isn't always clear when an exception may be thrown. 434 Any line in this program can throw an exception, which leads to a profusion of finally blocks around many function bodies, since it is not always clear when an exception may be thrown. 407 435 \begin{javacode} 408 436 public void write(String filename, String msg) throws Exception { … … 422 450 \end{javacode} 423 451 In Java 7, a new \emph{try-with-resources} construct was added to alleviate most of the pain of working with resources, but ultimately it still places the burden squarely on the user rather than on the library designer. 424 Furthermore, for complete safety this pattern requires nested objects to be declared separately, otherwise resources which can throw an exception on close can leak nested resources. % TODO: cite oracle article http://www.oracle.com/technetwork/articles/java/trywithresources-401775.html?452 Furthermore, for complete safety this pattern requires nested objects to be declared separately, otherwise resources that can throw an exception on close can leak nested resources \cite{TryWithResources}. 425 453 \begin{javacode} 426 454 public void write(String filename, String msg) throws Exception { 427 try ( 455 try ( // try-with-resources 428 456 FileOutputStream out = new FileOutputStream(filename); 429 457 FileOutputStream log = new FileOutputStream("log.txt"); … … 434 462 } 435 463 \end{javacode} 436 On the other hand, the Java compiler generates more code if more resources are declared, meaning that users must be more familiar with each type and library designers must provide better documentation. 464 Variables declared as part of a try-with-resources statement must conform to the @AutoClosable@ interface, and the compiler implicitly calls @close@ on each of the variables at the end of the block. 465 Depending on when the exception is raised, both @out@ and @log@ are null, @log@ is null, or both are non-null, therefore, the cleanup for these variables at the end is automatically guarded and conditionally executed to prevent null-pointer exceptions. 466 467 While Rust \cite{Rust} does not enforce the use of a garbage collector, it does provide a manual memory management environment, with a strict ownership model that automatically frees allocated memory and prevents common memory management errors. 468 In particular, a variable has ownership over its associated value, which is freed automatically when the owner goes out of scope. 469 Furthermore, values are \emph{moved} by default on assignment, rather than copied, which invalidates the previous variable binding. 470 \begin{rustcode} 471 struct S { 472 x: i32 473 } 474 let s = S { x: 123 }; 475 let z = s; // move, invalidate s 476 println!("{}", s.x); // error, s has been moved 477 \end{rustcode} 478 Types can be made copyable by implementing the @Copy@ trait. 479 480 Rust allows multiple unowned views into an object through references, also known as borrows, provided that a reference does not outlive its referent. 481 A mutable reference is allowed only if it is the only reference to its referent, preventing data race errors and iterator invalidation errors. 482 \begin{rustcode} 483 let mut x = 10; 484 { 485 let y = &x; 486 let z = &x; 487 println!("{} {}", y, z); // prints 10 10 488 } 489 { 490 let y = &mut x; 491 // let z1 = &x; // not allowed, have mutable reference 492 // let z2 = &mut x; // not allowed, have mutable reference 493 *y = 5; 494 println!("{}", y); // prints 5 495 } 496 println!("{}", x); // prints 5 497 \end{rustcode} 498 Since references are not owned, they do not release resources when they go out of scope. 499 There is no runtime cost imposed on these restrictions, since they are enforced at compile-time. 500 501 Rust provides RAII through the @Drop@ trait, allowing arbitrary code to execute when the object goes out of scope, providing automatic clean up of auxiliary resources, much like a \CC program. 502 \begin{rustcode} 503 struct S { 504 name: &'static str 505 } 506 507 impl Drop for S { // RAII for S 508 fn drop(&mut self) { // destructor 509 println!("dropped {}", self.name); 510 } 511 } 512 513 { 514 let x = S { name: "x" }; 515 let y = S { name: "y" }; 516 } // prints "dropped y" "dropped x" 517 \end{rustcode} 437 518 438 519 % D has constructors and destructors that are worth a mention (under classes) https://dlang.org/spec/spec.html … … 442 523 The programming language, D, also manages resources with constructors and destructors \cite{D}. 443 524 In D, @struct@s are stack allocated and managed via scoping like in \CC, whereas @class@es are managed automatically by the garbage collector. 444 Like Java, using the garbage collector means that destructors may never be called, requiring the use of finally statements to ensure dynamically allocated resources that are not managed by the garbage collector, such as open files, are cleaned up.525 Like Java, using the garbage collector means that destructors are called indeterminately, requiring the use of finally statements to ensure dynamically allocated resources that are not managed by the garbage collector, such as open files, are cleaned up. 445 526 Since D supports RAII, it is possible to use the same techniques as in \CC to ensure that resources are released in a timely manner. 446 Finally, D provides a scope guard statement, which allows an arbitrary statement to be executed at normal scope exit with \emph{success}, at exceptional scope exit with \emph{failure}, or at normal and exceptional scope exit with \emph{exit}. % cite? https://dlang.org/spec/statement.html#ScopeGuardStatement 447 It has been shown that the \emph{exit} form of the scope guard statement can be implemented in a library in \CC. % cite: http://www.drdobbs.com/184403758 448 449 % TODO: discussion of lexical scope vs. dynamic 450 % see Peter's suggestions 451 % RAII works in both cases. Guaranteed to work in stack case, works in heap case if root is deleted (but it's dangerous to rely on this, because of exceptions) 527 Finally, D provides a scope guard statement, which allows an arbitrary statement to be executed at normal scope exit with \emph{success}, at exceptional scope exit with \emph{failure}, or at normal and exceptional scope exit with \emph{exit}. % https://dlang.org/spec/statement.html#ScopeGuardStatement 528 It has been shown that the \emph{exit} form of the scope guard statement can be implemented in a library in \CC \cite{ExceptSafe}. 529 530 To provide managed types in \CFA, new kinds of constructors and destructors are added to \CFA and discussed in Chapter 2. 452 531 453 532 \section{Tuples} 454 533 \label{s:Tuples} 455 In mathematics, tuples are finite-length sequences which, unlike sets, a llow duplicate elements.456 In programming languages, tuples are a construct thatprovide fixed-sized heterogeneous lists of elements.534 In mathematics, tuples are finite-length sequences which, unlike sets, are ordered and allow duplicate elements. 535 In programming languages, tuples provide fixed-sized heterogeneous lists of elements. 457 536 Many programming languages have tuple constructs, such as SETL, \KWC, ML, and Scala. 458 537 … … 462 541 Adding tuples to \CFA has previously been explored by Esteves \cite{Esteves04}. 463 542 464 The design of tuples in \KWC took much of its inspiration from SETL .543 The design of tuples in \KWC took much of its inspiration from SETL \cite{SETL}. 465 544 SETL is a high-level mathematical programming language, with tuples being one of the primary data types. 466 545 Tuples in SETL allow a number of operations, including subscripting, dynamic expansion, and multiple assignment. … … 470 549 \begin{cppcode} 471 550 tuple<int, int, int> triple(10, 20, 30); 472 get<1>(triple); // access component 1 => 30551 get<1>(triple); // access component 1 => 20 473 552 474 553 tuple<int, double> f(); … … 482 561 Tuples are simple data structures with few specific operations. 483 562 In particular, it is possible to access a component of a tuple using @std::get<N>@. 484 Another interesting feature is @std::tie@, which creates a tuple of references, which allows assigningthe results of a tuple-returning function into separate local variables, without requiring a temporary variable.563 Another interesting feature is @std::tie@, which creates a tuple of references, allowing assignment of the results of a tuple-returning function into separate local variables, without requiring a temporary variable. 485 564 Tuples also support lexicographic comparisons, making it simple to write aggregate comparators using @std::tie@. 486 565 487 There is a proposal for \CCseventeen called \emph{structured bindings} , that introduces new syntax to eliminate the need to pre-declare variables and use @std::tie@ for binding the results from a function call. % TODO: cite http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/p0144r0.pdf566 There is a proposal for \CCseventeen called \emph{structured bindings} \cite{StructuredBindings}, that introduces new syntax to eliminate the need to pre-declare variables and use @std::tie@ for binding the results from a function call. 488 567 \begin{cppcode} 489 568 tuple<int, double> f(); … … 492 571 tuple<int, int, int> triple(10, 20, 30); 493 572 auto & [t1, t2, t3] = triple; 494 t2 = 0; // changes triple573 t2 = 0; // changes middle element of triple 495 574 496 575 struct S { int x; double y; }; … … 498 577 auto [x, y] = s; // unpack s 499 578 \end{cppcode} 500 Structured bindings allow unpacking any struct with all public non-static data members into fresh local variables.579 Structured bindings allow unpacking any structure with all public non-static data members into fresh local variables. 501 580 The use of @&@ allows declaring new variables as references, which is something that cannot be done with @std::tie@, since \CC references do not support rebinding. 502 This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must documented with some other mechanism.581 This extension requires the use of @auto@ to infer the types of the new variables, so complicated expressions with a non-obvious type must be documented with some other mechanism. 503 582 Furthermore, structured bindings are not a full replacement for @std::tie@, as it always declares new variables. 504 583 505 Like \CC, D provides tuples through a library variadic template struct.584 Like \CC, D provides tuples through a library variadic-template structure. 506 585 In D, it is possible to name the fields of a tuple type, which creates a distinct type. 507 \begin{dcode} % TODO: cite http://dlang.org/phobos/std_typecons.html 586 % http://dlang.org/phobos/std_typecons.html 587 \begin{dcode} 508 588 Tuple!(float, "x", float, "y") point2D; 509 Tuple!(float, float) float2; // different type s589 Tuple!(float, float) float2; // different type from point2D 510 590 511 591 point2D[0]; // access first element … … 521 601 The @expand@ method produces the components of the tuple as a list of separate values, making it possible to call a function that takes $N$ arguments using a tuple with $N$ components. 522 602 523 Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML .603 Tuples are a fundamental abstraction in most functional programming languages, such as Standard ML \cite{sml}. 524 604 A function in SML always accepts exactly one argument. 525 605 There are two ways to mimic multiple argument functions: the first through currying and the second by accepting tuple arguments. … … 533 613 \end{smlcode} 534 614 Here, the function @binco@ appears to take 2 arguments, but it actually takes a single argument which is implicitly decomposed via pattern matching. 535 Tuples are a foundational tool in SML, allowing the creation of arbitrarily complex structured datatypes.536 537 Scala, like \CC, provides tuple types through the standard library .615 Tuples are a foundational tool in SML, allowing the creation of arbitrarily-complex structured data-types. 616 617 Scala, like \CC, provides tuple types through the standard library \cite{Scala}. 538 618 Scala provides tuples of size 1 through 22 inclusive through generic data structures. 539 619 Tuples support named access and subscript access, among a few other operations. 540 620 \begin{scalacode} 541 val a = new Tuple3 [Int, String, Double](0, "Text", 2.1)// explicit creation542 val b = (6, 'a', 1.1f) // syntactic sugar forTuple3[Int, Char, Float]621 val a = new Tuple3(0, "Text", 2.1) // explicit creation 622 val b = (6, 'a', 1.1f) // syntactic sugar: Tuple3[Int, Char, Float] 543 623 val (i, _, d) = triple // extractor syntax, ignore middle element 544 624 … … 547 627 \end{scalacode} 548 628 In Scala, tuples are primarily used as simple data structures for carrying around multiple values or for returning multiple values from a function. 549 The 22-element restriction is an odd and arbitrary choice, but in practice it does n't cause problems since large tuples are uncommon.629 The 22-element restriction is an odd and arbitrary choice, but in practice it does not cause problems since large tuples are uncommon. 550 630 Subscript access is provided through the @productElement@ method, which returns a value of the top-type @Any@, since it is impossible to receive a more precise type from a general subscripting method due to type erasure. 551 631 The disparity between named access beginning at @_1@ and subscript access starting at @0@ is likewise an oddity, but subscript access is typically avoided since it discards type information. … … 553 633 554 634 555 \Csharp has similarly strange limitations, allowing tuples of size up to 7 components. % TODO: citehttps://msdn.microsoft.com/en-us/library/system.tuple(v=vs.110).aspx635 \Csharp also has tuples, but has similarly strange limitations, allowing tuples of size up to 7 components. % https://msdn.microsoft.com/en-us/library/system.tuple(v=vs.110).aspx 556 636 The officially supported workaround for this shortcoming is to nest tuples in the 8th component. 557 637 \Csharp allows accessing a component of a tuple by using the field @Item$N$@ for components 1 through 7, and @Rest@ for the nested tuple. 558 638 559 560 % TODO: cite 5.3 https://docs.python.org/3/tutorial/datastructures.html 561 In Python, tuples are immutable sequences that provide packing and unpacking operations. 639 In Python \cite{Python}, tuples are immutable sequences that provide packing and unpacking operations. 562 640 While the tuple itself is immutable, and thus does not allow the assignment of components, there is nothing preventing a component from being internally mutable. 563 641 The components of a tuple can be accessed by unpacking into multiple variables, indexing, or via field name, like D. 564 642 Tuples support multiple assignment through a combination of packing and unpacking, in addition to the common sequence operations. 565 643 566 % TODO: cite https://developer.apple.com/library/content/documentation/Swift/Conceptual/Swift_Programming_Language/Types.html#//apple_ref/doc/uid/TP40014097-CH31-ID448 567 Swift, like D, provides named tuples, with components accessed by name, index, or via extractors. 644 Swift \cite{Swift}, like D, provides named tuples, with components accessed by name, index, or via extractors. 568 645 Tuples are primarily used for returning multiple values from a function. 569 646 In Swift, @Void@ is an alias for the empty tuple, and there are no single element tuples. 647 648 Tuples comparable to those described above are added to \CFA and discussed in Chapter 3. 570 649 571 650 \section{Variadic Functions} … … 581 660 printf("%d %g %c %s", 10, 3.5, 'X', "a string"); 582 661 \end{cfacode} 583 Through the use of a format string, @printf@ allowsC programmers to print any of the standard C data types.662 Through the use of a format string, C programmers can communicate argument type information to @printf@, allowing C programmers to print any of the standard C data types. 584 663 Still, @printf@ is extremely limited, since the format codes are specified by the C standard, meaning users cannot define their own format codes to extend @printf@ for new data types or new formatting rules. 585 664 665 \begin{sloppypar} 586 666 C provides manipulation of variadic arguments through the @va_list@ data type, which abstracts details of the manipulation of variadic arguments. 587 667 Since the variadic arguments are untyped, it is up to the function to interpret any data that is passed in. 588 668 Additionally, the interface to manipulate @va_list@ objects is essentially limited to advancing to the next argument, without any built-in facility to determine when the last argument is read. 589 This requires the use of an \emph{argument descriptor} to pass information to the function about the structure of the argument list, including the number of arguments and their types.669 This limitation requires the use of an \emph{argument descriptor} to pass information to the function about the structure of the argument list, including the number of arguments and their types. 590 670 The format string in @printf@ is one such example of an argument descriptor. 591 671 \begin{cfacode} … … 618 698 Furthermore, if the user makes a mistake, compile-time checking is typically restricted to standard format codes and their corresponding types. 619 699 In general, this means that C's variadic functions are not type-safe, making them difficult to use properly. 700 \end{sloppypar} 620 701 621 702 % When arguments are passed to a variadic function, they undergo \emph{default argument promotions}. … … 641 722 A parameter pack matches 0 or more elements, which can be types or expressions depending on the context. 642 723 Like other templates, variadic template functions rely on an implicit set of constraints on a type, in this example a @print@ routine. 643 That is, it is possible to use the @f@ routine anyany type provided there is a corresponding @print@ routine, making variadic templates fully open to extension, unlike variadic functions in C.724 That is, it is possible to use the @f@ routine on any type provided there is a corresponding @print@ routine, making variadic templates fully open to extension, unlike variadic functions in C. 644 725 645 726 Recent \CC standards (\CCfourteen, \CCseventeen) expand on the basic premise by allowing variadic template variables and providing convenient expansion syntax to remove the need for recursion in some cases, amongst other things. … … 672 753 Unfortunately, Java's use of nominal inheritance means that types must explicitly inherit from classes or interfaces in order to be considered a subclass. 673 754 The combination of these two issues greatly restricts the usefulness of variadic functions in Java. 755 756 Type-safe variadic functions are added to \CFA and discussed in Chapter 4. -
doc/rob_thesis/thesis-frontpgs.tex
r221c2de7 r154fdc8 24 24 25 25 \Large 26 Rob Schluntz \\26 Robert Schluntz \\ 27 27 28 28 \vspace*{3.0cm} … … 43 43 \vspace*{1.0cm} 44 44 45 \copyright\ Rob Schluntz 2017 \\45 \copyright\ Robert Schluntz 2017 \\ 46 46 \end{center} 47 47 \end{titlepage} … … 76 76 \begin{center}\textbf{Abstract}\end{center} 77 77 78 % \CFA is a modern extension to the C programming language. 79 % Some of the features of \CFA include parametric polymorphism, overloading, and . 80 TODO 78 \CFA is a modern, non-object-oriented extension of the C programming language. 79 This thesis addresses several critical deficiencies of C, notably: resource management, a limited function-return mechanism, and unsafe variadic functions. 80 To solve these problems, two fundamental language features are introduced: tuples and constructors/destructors. 81 While these features exist in prior programming languages, the contribution of this work is engineering these features into a highly complex type system. 82 C is an established language with a dedicated user-base. 83 An important goal is to add new features in a way that naturally feels like C, to appeal to this core user-base, and due to huge amounts of legacy code, maintaining backwards compatibility is crucial. 81 84 82 85 \cleardoublepage 83 86 %\newpage 84 87 85 % A C K N O W L E D G E M E N T S86 % -------------------------------88 % % A C K N O W L E D G E M E N T S 89 % % ------------------------------- 87 90 88 \begin{center}\textbf{Acknowledgements}\end{center}91 % \begin{center}\textbf{Acknowledgements}\end{center} 89 92 90 % I would like to thank all the little people who made this possible.91 TODO92 \cleardoublepage93 % \newpage93 % % I would like to thank all the little people who made this possible. 94 % TODO 95 % \cleardoublepage 96 % %\newpage 94 97 95 % D E D I C A T I O N96 % -------------------98 % % D E D I C A T I O N 99 % % ------------------- 97 100 98 \begin{center}\textbf{Dedication}\end{center}101 % \begin{center}\textbf{Dedication}\end{center} 99 102 100 % This is dedicated to the one I love.101 TODO102 \cleardoublepage103 % \newpage103 % % This is dedicated to the one I love. 104 % TODO 105 % \cleardoublepage 106 % %\newpage 104 107 105 108 % T A B L E O F C O N T E N T S … … 119 122 %\newpage 120 123 121 % L I S T O F F I G U R E S122 % -----------------------------123 \addcontentsline{toc}{chapter}{List of Figures}124 \listoffigures125 \cleardoublepage126 \phantomsection % allows hyperref to link to the correct page127 % \newpage124 % % L I S T O F F I G U R E S 125 % % ----------------------------- 126 % \addcontentsline{toc}{chapter}{List of Figures} 127 % \listoffigures 128 % \cleardoublepage 129 % \phantomsection % allows hyperref to link to the correct page 130 % %\newpage 128 131 129 132 % L I S T O F S Y M B O L S -
doc/rob_thesis/thesis.tex
r221c2de7 r154fdc8 68 68 \documentclass[letterpaper,12pt,titlepage,oneside,final]{book} 69 69 70 % For PDF, suitable for double-sided printing, change the PrintVersion variable below 71 % to "true" and use this \documentclass line instead of the one above: 72 % \documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book} 73 70 74 \usepackage[T1]{fontenc} % allow Latin1 (extended ASCII) characters 71 75 \usepackage{textcomp} 72 76 % \usepackage[utf8]{inputenc} 73 \usepackage[latin1]{inputenc}77 % \usepackage[latin1]{inputenc} 74 78 \usepackage{fullpage,times,comment} 75 79 % \usepackage{epic,eepic} … … 93 97 \interfootnotelinepenalty=10000 94 98 95 % For PDF, suitable for double-sided printing, change the PrintVersion variable below96 % to "true" and use this \documentclass line instead of the one above:97 %\documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book}98 99 99 % Some LaTeX commands I define for my own nomenclature. 100 100 % If you have to, it's better to change nomenclature once here than in a … … 136 136 pdffitwindow=false, % window fit to page when opened 137 137 pdfstartview={FitH}, % fits the width of the page to the window 138 pdftitle={ uWaterloo\ LaTeX\ Thesis\ Template}, % title: CHANGE THIS TEXT!138 pdftitle={Resource Management and Tuples in \CFA}, % title: CHANGE THIS TEXT! 139 139 pdfauthor={Rob Schluntz}, % author: CHANGE THIS TEXT! and uncomment this line 140 140 % pdfsubject={Subject}, % subject: CHANGE THIS TEXT! and uncomment this line … … 225 225 \input{tuples} 226 226 227 \input{variadic} 228 227 229 \input{conclusions} 228 230 … … 282 284 \addcontentsline{toc}{chapter}{\textbf{References}} 283 285 284 \bibliography{cfa }286 \bibliography{cfa,thesis} 285 287 % Tip 5: You can create multiple .bib files to organize your references. 286 288 % Just list them all in the \bibliogaphy command, separated by commas (no spaces). -
doc/rob_thesis/tuples.tex
r221c2de7 r154fdc8 2 2 \chapter{Tuples} 3 3 %====================================================================== 4 5 \section{Introduction}6 % TODO: named return values are not currently implemented in CFA - tie in with named tuples? (future work)7 % TODO: no passing input parameters by assignment, instead will have reference types => this is not a very C-like model and greatly complicates syntax for likely little gain (and would cause confusion with already supported return-by-rerefence)8 % TODO: tuples are allowed in expressions, exact meaning is defined by operator overloading (e.g. can add tuples). An important caveat to note is that it is currently impossible to allow adding two triples but prevent adding a pair with a quadruple (single flattening/structuring conversions are implicit, only total number of components matters). May be able to solve this with more nuanced conversion rules (future work)9 % TODO: benefits (conclusion) by Till: reduced number of variables and statements; no specified order of execution for multiple assignment (more optimzation freedom); can store parameter lists in variable; MRV routines (natural code); more convenient assignment statements; simple and efficient access of record fields; named return values more legible and efficient in use of storage10 4 11 5 \section{Multiple-Return-Value Functions} … … 14 8 This restriction results in code which emulates functions with multiple return values by \emph{aggregation} or by \emph{aliasing}. 15 9 In the former situation, the function designer creates a record type that combines all of the return values into a single type. 16 For example, consider a function returning the most frequently occuring letter in a string, and its frequency. 17 % TODO: consider simplifying the example! 18 % Two things I like about this example: 19 % * it uses different types to illustrate why an array is insufficient (this is not necessary, but is nice) 20 % * it's complicated enough to show the uninitialized pitfall that exists in the aliasing example. 21 % Still, it may be a touch too complicated. Is there a simpler example with these two properties? 10 For example, consider a function returning the most frequently occurring letter in a string, and its frequency. 11 This example is complex enough to illustrate that an array is insufficient, since arrays are homogeneous, and demonstrates a potential pitfall that exists with aliasing. 22 12 \begin{cfacode} 23 13 struct mf_ret { … … 73 63 const char * str = "hello world"; 74 64 char ch; // pre-allocate return value 75 int freq = most_frequent(str, &ch); // pass return value as parameter65 int freq = most_frequent(str, &ch); // pass return value as out parameter 76 66 printf("%s -- %d %c\n", str, freq, ch); 77 67 \end{cfacode} 78 Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values. 79 This complicates the call site with a sequence of variable declarations leading up to the call. 68 Notably, using this approach, the caller is directly responsible for allocating storage for the additional temporary return values, which complicates the call site with a sequence of variable declarations leading up to the call. 80 69 Also, while a disciplined use of @const@ can give clues about whether a pointer parameter is going to be used as an out parameter, it is not immediately obvious from only the routine signature whether the callee expects such a parameter to be initialized before the call. 81 70 Furthermore, while many C routines that accept pointers are designed so that it is safe to pass @NULL@ as a parameter, there are many C routines that are not null-safe. 82 71 On a related note, C does not provide a standard mechanism to state that a parameter is going to be used as an additional return value, which makes the job of ensuring that a value is returned more difficult for the compiler. 83 There is a subtle bug in the previous example, in that @ret_ch@ is never assigned for a string that does not contain any letters, which can lead to undefined behaviour. 72 Interestingly, there is a subtle bug in the previous example, in that @ret_ch@ is never assigned for a string that does not contain any letters, which can lead to undefined behaviour. 73 In this particular case, it turns out that the frequency return value also doubles as an error code, where a frequency of 0 means the character return value should be ignored. 74 Still, not every routine with multiple return values should be required to return an error code, and error codes are easily ignored, so this is not a satisfying solution. 84 75 As with the previous approach, this technique can simulate multiple return values, but in practice it is verbose and error prone. 85 76 … … 90 81 The expression resolution phase of the \CFA translator ensures that the correct form is used depending on the values being returned and the return type of the current function. 91 82 A multiple-returning function with return type @T@ can return any expression that is implicitly convertible to @T@. 92 Using the running example, the @most_frequent@ function can be written inusing multiple return values as such,83 Using the running example, the @most_frequent@ function can be written using multiple return values as such, 93 84 \begin{cfacode} 94 85 [int, char] most_frequent(const char * str) { 95 86 char freqs [26] = { 0 }; 96 87 int ret_freq = 0; 97 char ret_ch = 'a'; 88 char ret_ch = 'a'; // arbitrary default value for consistent results 98 89 for (int i = 0; str[i] != '\0'; ++i) { 99 90 if (isalpha(str[i])) { // only count letters … … 109 100 } 110 101 \end{cfacode} 111 This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type .102 This approach provides the benefits of compile-time checking for appropriate return statements as in aggregation, but without the required verbosity of declaring a new named type, which precludes the bug seen with out-parameters. 112 103 113 104 The addition of multiple-return-value functions necessitates a syntax for accepting multiple values at the call-site. … … 136 127 In this case, there is only one option for a function named @most_frequent@ that takes a string as input. 137 128 This function returns two values, one @int@ and one @char@. 138 There are four options for a function named @process@, but only two whichaccept two arguments, and of those the best match is (3), which is also an exact match.129 There are four options for a function named @process@, but only two that accept two arguments, and of those the best match is (3), which is also an exact match. 139 130 This expression first calls @most_frequent("hello world")@, which produces the values @3@ and @'l'@, which are fed directly to the first and second parameters of (3), respectively. 140 131 … … 148 139 The previous expression has 3 \emph{components}. 149 140 Each component in a tuple expression can be any \CFA expression, including another tuple expression. 150 % TODO: Tuple expressions can appear anywhere that any other expression can appear (...?)151 141 The order of evaluation of the components in a tuple expression is unspecified, to allow a compiler the greatest flexibility for program optimization. 152 142 It is, however, guaranteed that each component of a tuple expression is evaluated for side-effects, even if the result is not used. 153 143 Multiple-return-value functions can equivalently be called \emph{tuple-returning functions}. 154 % TODO: does this statement still apply, and if so to what extent?155 % Tuples are a compile-time phenomenon and have little to no run-time presence.156 144 157 145 \subsection{Tuple Variables} … … 166 154 These variables can be used in any of the contexts where a tuple expression is allowed, such as in the @printf@ function call. 167 155 As in the @process@ example, the components of the tuple value are passed as separate parameters to @printf@, allowing very simple printing of tuple expressions. 168 If the individual components are required, they can be extractedwith a simple assignment, as in previous examples.156 One way to access the individual components is with a simple assignment, as in previous examples. 169 157 \begin{cfacode} 170 158 int freq; … … 221 209 In the call to @f@, @x@ is implicitly flattened so that the components of @x@ are passed as the two arguments to @f@. 222 210 For the call to @g@, the values @y@ and @10@ are structured into a single argument of type @[int, int]@ to match the type of the parameter of @g@. 223 Finally, in the call to @h@, @ y@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@.224 The flexible structure of tuples permits a simple and expressive function 211 Finally, in the call to @h@, @x@ is flattened to yield an argument list of length 3, of which the first component of @x@ is passed as the first parameter of @h@, and the second component of @x@ and @y@ are structured into the second argument of type @[int, int]@. 212 The flexible structure of tuples permits a simple and expressive function-call syntax to work seamlessly with both single- and multiple-return-value functions, and with any number of arguments of arbitrarily complex structure. 225 213 226 214 In \KWC \cite{Buhr94a,Till89}, a precursor to \CFA, there were 4 tuple coercions: opening, closing, flattening, and structuring. 227 215 Opening coerces a tuple value into a tuple of values, while closing converts a tuple of values into a single tuple value. 228 Flattening coerces a nested tuple into a flat tuple, i.e.it takes a tuple with tuple components and expands it into a tuple with only non-tuple components.229 Structuring moves in the opposite direction, i.e.it takes a flat tuple value and provides structure by introducing nested tuple components.216 Flattening coerces a nested tuple into a flat tuple, \ie it takes a tuple with tuple components and expands it into a tuple with only non-tuple components. 217 Structuring moves in the opposite direction, \ie it takes a flat tuple value and provides structure by introducing nested tuple components. 230 218 231 219 In \CFA, the design has been simplified to require only the two conversions previously described, which trigger only in function call and return situations. … … 254 242 \label{s:TupleAssignment} 255 243 An assignment where the left side of the assignment operator has a tuple type is called tuple assignment. 256 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called Multiple and MassAssignment, respectively.244 There are two kinds of tuple assignment depending on whether the right side of the assignment operator has a tuple type or a non-tuple type, called \emph{Multiple} and \emph{Mass} Assignment, respectively. 257 245 \begin{cfacode} 258 246 int x; … … 272 260 A mass assignment assigns the value $R$ to each $L_i$. 273 261 For a mass assignment to be valid, @?=?(&$L_i$, $R$)@ must be a well-typed expression. 274 Th is differs from C cascading assignment (e.g.@a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment.262 These semantics differ from C cascading assignment (\eg @a=b=c@) in that conversions are applied to $R$ in each individual assignment, which prevents data loss from the chain of conversions that can happen during a cascading assignment. 275 263 For example, @[y, x] = 3.14@ performs the assignments @y = 3.14@ and @x = 3.14@, which results in the value @3.14@ in @y@ and the value @3@ in @x@. 276 264 On the other hand, the C cascading assignment @y = x = 3.14@ performs the assignments @x = 3.14@ and @y = x@, which results in the value @3@ in @x@, and as a result the value @3@ in @y@ as well. … … 288 276 These semantics allow cascading tuple assignment to work out naturally in any context where a tuple is permitted. 289 277 These semantics are a change from the original tuple design in \KWC \cite{Till89}, wherein tuple assignment was a statement that allows cascading assignments as a special case. 290 This decision wa made in an attempt to fix what was seen as a problem with assignment, wherein it can be used in many different locations, such as in function-call argument position.278 Restricting tuple assignment to statements was an attempt to to fix what was seen as a problem with side-effects, wherein assignment can be used in many different locations, such as in function-call argument position. 291 279 While permitting assignment as an expression does introduce the potential for subtle complexities, it is impossible to remove assignment expressions from \CFA without affecting backwards compatibility. 292 280 Furthermore, there are situations where permitting assignment as an expression improves readability by keeping code succinct and reducing repetition, and complicating the definition of tuple assignment puts a greater cognitive burden on the user. … … 303 291 \end{cfacode} 304 292 The tuple expression begins with a mass assignment of @1.5@ into @[b, d]@, which assigns @1.5@ into @b@, which is truncated to @1@, and @1.5@ into @d@, producing the tuple @[1, 1.5]@ as a result. 305 That tuple is used as the right side of the multiple assignment ( i.e., @[c, a] = [1, 1.5]@) that assigns @1@ into @c@ and @1.5@ into @a@, which is truncated to @1@, producing the result @[1, 1]@.293 That tuple is used as the right side of the multiple assignment (\ie, @[c, a] = [1, 1.5]@) that assigns @1@ into @c@ and @1.5@ into @a@, which is truncated to @1@, producing the result @[1, 1]@. 306 294 Finally, the tuple @[1, 1]@ is used as an expression in the call to @f@. 307 295 … … 315 303 void ?{}(S *, S); // (4) 316 304 317 [S, S] x = [3, 6.28]; // uses (2), (3) 318 [S, S] y; // uses (1), (1) 319 [S, S] z = x.0; // uses (4), (4) 320 \end{cfacode} 321 In this example, @x@ is initialized by the multiple constructor calls @?{}(&x.0, 3)@ and @?{}(&x.1, 6.28)@, while @y@ is initi laized by two default constructor calls @?{}(&y.0)@ and @?{}(&y.1)@.305 [S, S] x = [3, 6.28]; // uses (2), (3), specialized constructors 306 [S, S] y; // uses (1), (1), default constructor 307 [S, S] z = x.0; // uses (4), (4), copy constructor 308 \end{cfacode} 309 In this example, @x@ is initialized by the multiple constructor calls @?{}(&x.0, 3)@ and @?{}(&x.1, 6.28)@, while @y@ is initialized by two default constructor calls @?{}(&y.0)@ and @?{}(&y.1)@. 322 310 @z@ is initialized by mass copy constructor calls @?{}(&z.0, x.0)@ and @?{}(&z.1, x.0)@. 323 Finally, @x@, @y@, and @z@ are destructed, i.e.the calls @^?{}(&x.0)@, @^?{}(&x.1)@, @^?{}(&y.0)@, @^?{}(&y.1)@, @^?{}(&z.0)@, and @^?{}(&z.1)@.311 Finally, @x@, @y@, and @z@ are destructed, \ie the calls @^?{}(&x.0)@, @^?{}(&x.1)@, @^?{}(&y.0)@, @^?{}(&y.1)@, @^?{}(&z.0)@, and @^?{}(&z.1)@. 324 312 325 313 It is possible to define constructors and assignment functions for tuple types that provide new semantics, if the existing semantics do not fit the needs of an application. … … 339 327 S s = t; 340 328 \end{cfacode} 341 The initialization of @s@ with @t@ works by default ,because @t@ is flattened into its components, which satisfies the generated field constructor @?{}(S *, int, double)@ to initialize the first two values.329 The initialization of @s@ with @t@ works by default because @t@ is flattened into its components, which satisfies the generated field constructor @?{}(S *, int, double)@ to initialize the first two values. 342 330 343 331 \section{Member-Access Tuple Expression} … … 354 342 Then the type of @a.[x, y, z]@ is @[T_x, T_y, T_z]@. 355 343 356 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple ( e.g.rearrange components, drop components, duplicate components, etc.).344 Since tuple index expressions are a form of member-access expression, it is possible to use tuple-index expressions in conjunction with member tuple expressions to manually restructure a tuple (\eg, rearrange components, drop components, duplicate components, etc.). 357 345 \begin{cfacode} 358 346 [int, int, long, double] x; … … 384 372 Since \CFA permits these tuple-access expressions using structures, unions, and tuples, \emph{member tuple expression} or \emph{field tuple expression} is more appropriate. 385 373 386 It could bepossible to extend member-access expressions further.374 It is possible to extend member-access expressions further. 387 375 Currently, a member-access expression whose member is a name requires that the aggregate is a structure or union, while a constant integer member requires the aggregate to be a tuple. 388 376 In the interest of orthogonal design, \CFA could apply some meaning to the remaining combinations as well. … … 398 386 z.y; // ??? 399 387 \end{cfacode} 400 One possib lity is for @s.1@ to select the second member of @s@.388 One possibility is for @s.1@ to select the second member of @s@. 401 389 Under this interpretation, it becomes possible to not only access members of a struct by name, but also by position. 402 390 Likewise, it seems natural to open this mechanism to enumerations as well, wherein the left side would be a type, rather than an expression. 403 One benefit of this interpretation is familiar , since it is extremely reminiscent of tuple-index expressions.391 One benefit of this interpretation is familiarity, since it is extremely reminiscent of tuple-index expressions. 404 392 On the other hand, it could be argued that this interpretation is brittle in that changing the order of members or adding new members to a structure becomes a brittle operation. 405 This problem is less of a concern with tuples, since modifying a tuple affects only the code which directly uses that tuple, whereas modifying a structure has far reaching consequences withevery instance of the structure.406 407 As for @z.y@, a natural interpretation would beto extend the meaning of member tuple expressions.408 That is, currently the tuple must occur as the member, i.e.to the right of the dot.393 This problem is less of a concern with tuples, since modifying a tuple affects only the code that directly uses the tuple, whereas modifying a structure has far reaching consequences for every instance of the structure. 394 395 As for @z.y@, one interpretation is to extend the meaning of member tuple expressions. 396 That is, currently the tuple must occur as the member, \ie to the right of the dot. 409 397 Allowing tuples to the left of the dot could distribute the member across the elements of the tuple, in much the same way that member tuple expressions distribute the aggregate across the member tuple. 410 398 In this example, @z.y@ expands to @[z.0.y, z.1.y]@, allowing what is effectively a very limited compile-time field-sections map operation, where the argument must be a tuple containing only aggregates having a member named @y@. 411 It is questionable how useful this would actually be in practice, since generally structures are not designed to have names in common with other structures, and further this could cause maintainability issues in that it encourages programmers to adopt very simple naming conventions,to maximize the amount of overlap between different types.399 It is questionable how useful this would actually be in practice, since structures often do not have names in common with other structures, and further this could cause maintainability issues in that it encourages programmers to adopt very simple naming conventions to maximize the amount of overlap between different types. 412 400 Perhaps more useful would be to allow arrays on the left side of the dot, which would likewise allow mapping a field access across the entire array, producing an array of the contained fields. 413 401 The immediate problem with this idea is that C arrays do not carry around their size, which would make it impossible to use this extension for anything other than a simple stack allocated array. 414 402 415 Supposing this feature works as described, it would be necessary to specify an ordering for the expansion of member access expressions versus membertuple expressions.403 Supposing this feature works as described, it would be necessary to specify an ordering for the expansion of member-access expressions versus member-tuple expressions. 416 404 \begin{cfacode} 417 405 struct { int x, y; }; … … 426 414 \end{cfacode} 427 415 Depending on exactly how the two tuples are combined, different results can be achieved. 428 As such, a specific ordering would need to be imposed in order for this feature to be useful. 429 Furthermore, this addition moves a member tuple expression's meaning from being clear statically to needing resolver support, since the member name needs to be distributed appropriately over each member of the tuple, which could itself be a tuple. 430 431 Ultimately, both of these extensions introduce complexity into the model, with relatively little peceived benefit. 416 As such, a specific ordering would need to be imposed to make this feature useful. 417 Furthermore, this addition moves a member-tuple expression's meaning from being clear statically to needing resolver support, since the member name needs to be distributed appropriately over each member of the tuple, which could itself be a tuple. 418 419 A second possibility is for \CFA to have named tuples, as they exist in Swift and D. 420 \begin{cfacode} 421 typedef [int x, int y] Point2D; 422 Point2D p1, p2; 423 p1.x + p1.y + p2.x + p2.y; 424 p1.0 + p1.1 + p2.0 + p2.1; // equivalent 425 \end{cfacode} 426 In this simpler interpretation, a tuple type carries with it a list of possibly empty identifiers. 427 This approach fits naturally with the named return-value feature, and would likely go a long way towards implementing it. 428 429 Ultimately, the first two extensions introduce complexity into the model, with relatively little perceived benefit, and so were dropped from consideration. 430 Named tuples are a potentially useful addition to the language, provided they can be parsed with a reasonable syntax. 431 432 432 433 433 \section{Casting} 434 434 In C, the cast operator is used to explicitly convert between types. 435 In \CFA, the cast operator has a secondary use, which is type ascription .435 In \CFA, the cast operator has a secondary use, which is type ascription, since it force the expression resolution algorithm to choose the lowest cost conversion to the target type. 436 436 That is, a cast can be used to select the type of an expression when it is ambiguous, as in the call to an overloaded function. 437 437 \begin{cfacode} … … 442 442 (int)f(); // choose (2) 443 443 \end{cfacode} 444 Since casting is a fundamental operation in \CFA, casts shouldbe given a meaningful interpretation in the context of tuples.444 Since casting is a fundamental operation in \CFA, casts need to be given a meaningful interpretation in the context of tuples. 445 445 Taking a look at standard C provides some guidance with respect to the way casts should work with tuples. 446 446 \begin{cfacode}[numbers=left] … … 448 448 void g(); 449 449 450 (void)f(); 451 (int)g(); 450 (void)f(); // valid, ignore results 451 (int)g(); // invalid, void cannot be converted to int 452 452 453 453 struct A { int x; }; 454 (struct A)f(); 454 (struct A)f(); // invalid, int cannot be converted to A 455 455 \end{cfacode} 456 456 In C, line 4 is a valid cast, which calls @f@ and discards its result. 457 457 On the other hand, line 5 is invalid, because @g@ does not produce a result, so requesting an @int@ to materialize from nothing is nonsensical. 458 Finally, line 8 is also invalid, because in C casts only provide conversion between scalar types \cite {C11}.459 For consistency, this implies that any case wherein the number of components increases as a result of the cast is invalid, while casts whichhave the same or fewer number of components may be valid.458 Finally, line 8 is also invalid, because in C casts only provide conversion between scalar types \cite[p.~91]{C11}. 459 For consistency, this implies that any case wherein the number of components increases as a result of the cast is invalid, while casts that have the same or fewer number of components may be valid. 460 460 461 461 Formally, a cast to tuple type is valid when $T_n \leq S_m$, where $T_n$ is the number of components in the target type and $S_m$ is the number of components in the source type, and for each $i$ in $[0, n)$, $S_i$ can be cast to $T_i$. … … 468 468 [int, [int, int], int] g(); 469 469 470 ([int, double])f(); // (1) 471 ([int, int, int])g(); // (2) 472 ([void, [int, int]])g(); // (3) 473 ([int, int, int, int])g(); // (4) 474 ([int, [int, int, int]])g(); // (5) 470 ([int, double])f(); // (1) valid 471 ([int, int, int])g(); // (2) valid 472 ([void, [int, int]])g(); // (3) valid 473 ([int, int, int, int])g(); // (4) invalid 474 ([int, [int, int, int]])g(); // (5) invalid 475 475 \end{cfacode} 476 476 … … 479 479 If @g@ is free of side effects, this is equivalent to @[(int)(g().0), (int)(g().1.0), (int)(g().2)]@. 480 480 Since @void@ is effectively a 0-element tuple, (3) discards the first and third return values, which is effectively equivalent to @[(int)(g().1.0), (int)(g().1.1)]@). 481 482 481 % will this always hold true? probably, as constructors should give all of the conversion power we need. if casts become function calls, what would they look like? would need a way to specify the target type, which seems awkward. Also, C++ basically only has this because classes are closed to extension, while we don't have that problem (can have floating constructors for any type). 483 482 Note that a cast is not a function call in \CFA, so flattening and structuring conversions do not occur for cast expressions. … … 509 508 \end{cfacode} 510 509 Note that due to the implicit tuple conversions, this function is not restricted to the addition of two triples. 511 A call to this plus operator type checks as long as a total of 6 non-tuple arguments are passed after flattening, and all of the arguments have a common type whichcan bind to @T@, with a pairwise @?+?@ over @T@.512 For example, these expressions willalso succeed and produce the same value.513 \begin{cfacode} 514 ([x.0, x.1]) + ([x.2, 10, 20, 30]); 515 x.0 + ([x.1, x.2, 10, 20, 30]); 510 A call to this plus operator type checks as long as a total of 6 non-tuple arguments are passed after flattening, and all of the arguments have a common type that can bind to @T@, with a pairwise @?+?@ over @T@. 511 For example, these expressions also succeed and produce the same value. 512 \begin{cfacode} 513 ([x.0, x.1]) + ([x.2, 10, 20, 30]); // x + ([10, 20, 30]) 514 x.0 + ([x.1, x.2, 10, 20, 30]); // x + ([10, 20, 30]) 516 515 \end{cfacode} 517 516 This presents a potential problem if structure is important, as these three expressions look like they should have different meanings. 518 Further , these calls can be made ambiguous by adding seemingly different functions.517 Furthermore, these calls can be made ambiguous by introducing seemingly different functions. 519 518 \begin{cfacode} 520 519 forall(otype T | { T ?+?(T, T); }) … … 524 523 \end{cfacode} 525 524 It is also important to note that these calls could be disambiguated if the function return types were different, as they likely would be for a reasonable implementation of @?+?@, since the return type is used in overload resolution. 526 Still, th is isa deficiency of the current argument matching algorithm, and depending on the function, differing return values may not always be appropriate.527 It's possible that this could be rectified by applying an appropriate cost to the structuring and flattening conversions, which are currently 0-cost conversions.525 Still, these semantics are a deficiency of the current argument matching algorithm, and depending on the function, differing return values may not always be appropriate. 526 These issues could be rectified by applying an appropriate cost to the structuring and flattening conversions, which are currently 0-cost conversions. 528 527 Care would be needed in this case to ensure that exact matches do not incur such a cost. 529 528 \begin{cfacode} … … 536 535 \end{cfacode} 537 536 538 Until this point, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization ( i.e.no implicit conversions are applied to assertion arguments).537 Until this point, it has been assumed that assertion arguments must match the parameter type exactly, modulo polymorphic specialization (\ie, no implicit conversions are applied to assertion arguments). 539 538 This decision presents a conflict with the flexibility of tuples. 540 539 \subsection{Assertion Inference} … … 568 567 } 569 568 \end{cfacode} 570 Is transformed into569 is transformed into 571 570 \begin{cfacode} 572 571 forall(dtype T0, dtype T1 | sized(T0) | sized(T1)) 573 struct _tuple2 { // generated before the first 2-tuple572 struct _tuple2_ { // generated before the first 2-tuple 574 573 T0 field_0; 575 574 T1 field_1; … … 578 577 _tuple2_(double, double) x; 579 578 forall(dtype T0, dtype T1, dtype T2 | sized(T0) | sized(T1) | sized(T2)) 580 struct _tuple3 { // generated before the first 3-tuple579 struct _tuple3_ { // generated before the first 3-tuple 581 580 T0 field_0; 582 581 T1 field_1; … … 591 590 [5, 'x', 1.24]; 592 591 \end{cfacode} 593 Becomes592 becomes 594 593 \begin{cfacode} 595 594 (_tuple3_(int, char, double)){ 5, 'x', 1.24 }; … … 605 604 f(x, 'z'); 606 605 \end{cfacode} 607 Is transformed into606 is transformed into 608 607 \begin{cfacode} 609 608 void f(int, _tuple2_(double, char)); … … 617 616 In the call to @f@, the second and third argument components are structured into a tuple argument. 618 617 619 Expressions whichmay contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion.618 Expressions that may contain side effects are made into \emph{unique expressions} before being expanded by the flattening conversion. 620 619 Each unique expression is assigned an identifier and is guaranteed to be executed exactly once. 621 620 \begin{cfacode} … … 624 623 g(h()); 625 624 \end{cfacode} 626 Inter ally, this is converted to625 Internally, this is converted to pseudo-\CFA 627 626 \begin{cfacode} 628 627 void g(int, double); 629 628 [int, double] h(); 630 let unq<0> = f() : g(unq<0>.0, unq<0>.1); // notation? 631 \end{cfacode} 629 lazy [int, double] unq0 = h(); // deferred execution 630 g(unq0.0, unq0.1); // execute h() once 631 \end{cfacode} 632 That is, the function @h@ is evaluated lazily and its result is stored for subsequent accesses. 632 633 Ultimately, unique expressions are converted into two variables and an expression. 633 634 \begin{cfacode} … … 638 639 [int, double] _unq0; 639 640 g( 640 (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).0,641 (_unq0_finished_ ? _unq0 : (_unq0 = f(), _unq0_finished_ = 1, _unq0)).1,641 (_unq0_finished_ ? _unq0 : (_unq0 = h(), _unq0_finished_ = 1, _unq0)).0, 642 (_unq0_finished_ ? _unq0 : (_unq0 = h(), _unq0_finished_ = 1, _unq0)).1, 642 643 ); 643 644 \end{cfacode} … … 646 647 Every subsequent evaluation of the unique expression then results in an access to the stored result of the actual expression. 647 648 648 Currently, the \CFA translator has a very broad, imprecise definition of impurity , where any function call is assumed to be impure.649 This notion could be made more precise for certain intrinsic, auto generated, and builtin functions, and could analyze function bodies when they are availableto recursively detect impurity, to eliminate some unique expressions.650 It 's possible that unique expressions could be exposed to the user through a language feature, but it's not immediately obvious that there is a benefit to doing so.651 652 Tuple member expressions are recursively expanded into a list of memberaccess expressions.649 Currently, the \CFA translator has a very broad, imprecise definition of impurity (side-effects), where every function call is assumed to be impure. 650 This notion could be made more precise for certain intrinsic, auto-generated, and built-in functions, and could analyze function bodies, when they are available, to recursively detect impurity, to eliminate some unique expressions. 651 It is possible that lazy evaluation could be exposed to the user through a lazy keyword with little additional effort. 652 653 Tuple-member expressions are recursively expanded into a list of member-access expressions. 653 654 \begin{cfacode} 654 655 [int, [double, int, double], int]] x; 655 656 x.[0, 1.[0, 2]]; 656 657 \end{cfacode} 657 Whichbecomes658 becomes 658 659 \begin{cfacode} 659 660 [x.0, [x.1.0, x.1.2]]; 660 661 \end{cfacode} 661 Tuple 662 Tuple-member expressions also take advantage of unique expressions in the case of possible impurity. 662 663 663 664 Finally, the various kinds of tuple assignment, constructors, and destructors generate GNU C statement expressions. … … 670 671 [x, y, z] = 1.5; // mass assignment 671 672 \end{cfacode} 672 Generates the following673 generates the following 673 674 \begin{cfacode} 674 675 // [x, y, z] = 1.5; 675 676 _tuple3_(int, double, int) _tmp_stmtexpr_ret0; 676 ({ 677 ({ // GNU C statement expression 677 678 // assign LHS address temporaries 678 679 int *__massassign_L0 = &x; // ?{} … … 689 690 int *__multassign_L2 = (int *)&_tmp_stmtexpr_ret0.2; // ?{} 690 691 691 // assign RHS value temporaries and perform mass assignmentto L0, L1, L2692 int __multassign_R0 = (*__massassign_L0=(int)__massassign_R0); 693 double __multassign_R1 = (*__massassign_L1=__massassign_R0); 694 int __multassign_R2 = (*__massassign_L2=(int)__massassign_R0); 692 // assign RHS value temporaries and mass-assign to L0, L1, L2 693 int __multassign_R0 = (*__massassign_L0=(int)__massassign_R0); // ?{} 694 double __multassign_R1 = (*__massassign_L1=__massassign_R0); // ?{} 695 int __multassign_R2 = (*__massassign_L2=(int)__massassign_R0); // ?{} 695 696 696 697 // perform construction of statement expr return variable using … … 711 712 }); 712 713 \end{cfacode} 713 A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, e.g.in a unique expression.714 A variable is generated to store the value produced by a statement expression, since its fields may need to be constructed with a non-trivial constructor and it may need to be referred to multiple time, \eg, in a unique expression. 714 715 $N$ LHS variables are generated and constructed using the address of the tuple components, and a single RHS variable is generated to store the value of the RHS without any loss of precision. 715 716 A nested statement expression is generated that performs the individual assignments and constructs the return value using the results of the individual assignments. … … 720 721 [x, y, z] = [f(), 3]; // multiple assignment 721 722 \end{cfacode} 722 Generates 723 generates the following 723 724 \begin{cfacode} 724 725 // [x, y, z] = [f(), 3]; … … 741 742 _tmp_cp_ret0 : 742 743 (_tmp_cp_ret0=f(), _unq0_finished_=1, _tmp_cp_ret0)).1; // ?{} 743 ({ // tuple destruction - destruct f() return temporary - tuple destruction744 ({ // tuple destruction - destruct f() return temporary 744 745 // assign LHS address temporaries 745 746 double *__massassign_L3 = (double *)&_tmp_cp_ret0.0; // ?{} … … 757 758 int *__multassign_L5 = (int *)&_tmp_stmtexpr_ret0.2; // ?{} 758 759 759 // assign RHS value temporaries and perform multiple assignmentto L0, L1, L2760 // assign RHS value temporaries and multiple-assign to L0, L1, L2 760 761 int __multassign_R3 = (*__multassign_L0=(int)__multassign_R0); // ?{} 761 762 double __multassign_R4 = (*__multassign_L1=__multassign_R1); // ?{} … … 785 786 The use of statement expressions allows the translator to arbitrarily generate additional temporary variables as needed, but binds the implementation to a non-standard extension of the C language. 786 787 There are other places where the \CFA translator makes use of GNU C extensions, such as its use of nested functions, so this is not a new restriction. 787 788 \section{Variadic Functions}789 % TODO: should this maybe be its own chapter?790 C provides variadic functions through the manipulation of @va_list@ objects.791 A variadic function is one which contains at least one parameter, followed by @...@ as the last token in the parameter list.792 In particular, some form of \emph{argument descriptor} is needed to inform the function of the number of arguments and their types.793 Two common argument descriptors are format strings or and counter parameters.794 It's important to note that both of these mechanisms are inherently redundant, because they require the user to specify information that the compiler knows explicitly.795 This required repetition is error prone, because it's easy for the user to add or remove arguments without updating the argument descriptor.796 In addition, C requires the programmer to hard code all of the possible expected types.797 As a result, it is cumbersome to write a function that is open to extension.798 For example, a simple function which sums $N$ @int@s,799 \begin{cfacode}800 int sum(int N, ...) {801 va_list args;802 va_start(args, N);803 int ret = 0;804 while(N) {805 ret += va_arg(args, int); // have to specify type806 N--;807 }808 va_end(args);809 return ret;810 }811 sum(3, 10, 20, 30); // need to keep counter in sync812 \end{cfacode}813 The @va_list@ type is a special C data type that abstracts variadic argument manipulation.814 The @va_start@ macro initializes a @va_list@, given the last named parameter.815 Each use of the @va_arg@ macro allows access to the next variadic argument, given a type.816 Since the function signature does not provide any information on what types can be passed to a variadic function, the compiler does not perform any error checks on a variadic call.817 As such, it is possible to pass any value to the @sum@ function, including pointers, floating-point numbers, and structures.818 In the case where the provided type is not compatible with the argument's actual type after default argument promotions, or if too many arguments are accessed, the behaviour is undefined \cite{C11}.819 Furthermore, there is no way to perform the necessary error checks in the @sum@ function at run-time, since type information is not carried into the function body.820 Since they rely on programmer convention rather than compile-time checks, variadic functions are generally unsafe.821 822 In practice, compilers can provide warnings to help mitigate some of the problems.823 For example, GCC provides the @format@ attribute to specify that a function uses a format string, which allows the compiler to perform some checks related to the standard format specifiers.824 Unfortunately, this does not permit extensions to the format string syntax, so a programmer cannot extend the attribute to warn for mismatches with custom types.825 826 Needless to say, C's variadic functions are a deficient language feature.827 Two options were examined to provide better, type-safe variadic functions in \CFA.828 \subsection{Whole Tuple Matching}829 Option 1 is to change the argument matching algorithm, so that type parameters can match whole tuples, rather than just their components.830 This option could be implemented with two phases of argument matching when a function contains type parameters and the argument list contains tuple arguments.831 If flattening and structuring fail to produce a match, a second attempt at matching the function and argument combination is made where tuple arguments are not expanded and structure must match exactly, modulo non-tuple implicit conversions.832 For example:833 \begin{cfacode}834 forall(otype T, otype U | { T g(U); })835 void f(T, U);836 837 [int, int] g([int, int, int, int]);838 839 f([1, 2], [3, 4, 5, 6]);840 \end{cfacode}841 With flattening and structuring, the call is first transformed into @f(1, 2, 3, 4, 5, 6)@.842 Since the first argument of type @T@ does not have a tuple type, unification decides that @T=int@ and @1@ is matched as the first parameter.843 Likewise, @U@ does not have a tuple type, so @U=int@ and @2@ is accepted as the second parameter.844 There are now no remaining formal parameters, but there are remaining arguments and the function is not variadic, so the match fails.845 846 With the addition of an exact matching attempt, @T=[int,int]@ and @U=[int,int,int,int]@ and so the arguments type check.847 Likewise, when inferring assertion @g@, an exact match is found.848 849 This approach is strict with respect to argument structure by nature, which makes it syntactically awkward to use in ways that the existing tuple design is not.850 For example, consider a @new@ function which allocates memory using @malloc@ and constructs the result, using arbitrary arguments.851 \begin{cfacode}852 struct Array;853 void ?{}(Array *, int, int, int);854 855 forall(dtype T, otype Params | sized(T) | { void ?{}(T *, Params); })856 T * new(Params p) {857 return malloc(){ p };858 }859 Array(int) * x = new([1, 2, 3]);860 \end{cfacode}861 The call to @new@ is not particularly appealing, since it requires the use of square brackets at the call-site, which is not required in any other function call.862 This shifts the burden from the compiler to the programmer, which is almost always wrong, and creates an odd inconsistency within the language.863 Similarly, in order to pass 0 variadic arguments, an explicit empty tuple must be passed into the argument list, otherwise the exact matching rule would not have an argument to bind against.864 865 It should be otherwise noted that the addition of an exact matching rule only affects the outcome for polymorphic type binding when tuples are involved.866 For non-tuple arguments, exact matching and flattening \& structuring are equivalent.867 For tuple arguments to a function without polymorphic formal parameters, flattening and structuring work whenever an exact match would have worked, since the tuple is flattened and implicitly restructured to its original structure.868 Thus there is nothing to be gained from permitting the exact matching rule to take effect when a function does not contain polymorphism and none of the arguments are tuples.869 870 Overall, this option takes a step in the right direction, but is contrary to the flexibility of the existing tuple design.871 872 \subsection{A New Typeclass}873 A second option is the addition of another kind of type parameter, @ttype@.874 Matching against a @ttype@ parameter consumes all remaining argument components and packages them into a tuple, binding to the resulting tuple of types.875 In a given parameter list, there should be at most one @ttype@ parameter that must occur last, otherwise the call can never resolve, given the previous rule.876 % TODO: a similar rule exists in C/C++ for "..."877 This idea essentially matches normal variadic semantics, with a strong feeling of similarity to \CCeleven variadic templates.878 As such, @ttype@ variables will also be referred to as argument packs.879 This is the option that has been added to \CFA.880 881 Like variadic templates, the main way to manipulate @ttype@ polymorphic functions is through recursion.882 Since nothing is known about a parameter pack by default, assertion parameters are key to doing anything meaningful.883 Unlike variadic templates, @ttype@ polymorphic functions can be separately compiled.884 885 For example, a simple translation of the C sum function using @ttype@ would look like886 \begin{cfacode}887 int sum(){ return 0; } // (0)888 forall(ttype Params | { int sum(Params); })889 int sum(int x, Params rest) { // (1)890 return x+sum(rest);891 }892 sum(10, 20, 30);893 \end{cfacode}894 Since (0) does not accept any arguments, it is not a valid candidate function for the call @sum(10, 20, 30)@.895 In order to call (1), @10@ is matched with @x@, and the argument resolution moves on to the argument pack @rest@, which consumes the remainder of the argument list and @Params@ is bound to @[20, 30]@.896 In order to finish the resolution of @sum@, an assertion parameter which matches @int sum(int, int)@ is required.897 Like in the previous iteration, (0) is not a valid candiate, so (1) is examined with @Params@ bound to @[int]@, requiring the assertion @int sum(int)@.898 Next, (0) fails, and to satisfy (1) @Params@ is bound to @[]@, requiring an assertion @int sum()@.899 Finally, (0) matches and (1) fails, which terminates the recursion.900 Effectively, this traces as @sum(10, 20, 30)@ $\rightarrow$ @10+sum(20, 30)@ $\rightarrow$ @10+(20+sum(30))@ $\rightarrow$ @10+(20+(30+sum()))@ $\rightarrow$ @10+(20+(30+0))@.901 902 A point of note is that this version does not require any form of argument descriptor, since the \CFA type system keeps track of all of these details.903 It might be reasonable to take the @sum@ function a step further to enforce a minimum number of arguments, which could be done simply904 \begin{cfacode}905 int sum(int x, int y){906 return x+y;907 }908 forall(ttype Params | { int sum(int, Params); })909 int sum(int x, int y, Params rest) {910 return sum(x+y, rest);911 }912 sum(10, 20, 30);913 \end{cfacode}914 915 One more iteration permits the summation of any summable type, as long as all arguments are the same type.916 \begin{cfacode}917 trait summable(otype T) {918 T ?+?(T, T);919 };920 forall(otype R | summable(R))921 R sum(R x, R y){922 return x+y;923 }924 forall(otype R, ttype Params925 | summable(R)926 | { R sum(R, Params); })927 R sum(R x, R y, Params rest) {928 return sum(x+y, rest);929 }930 sum(3, 10, 20, 30);931 \end{cfacode}932 Unlike C, it is not necessary to hard code the expected type.933 This is naturally open to extension, in that any user-defined type with a @?+?@ operator is automatically able to be used with the @sum@ function.934 That is to say, the programmer who writes @sum@ does not need full program knowledge of every possible data type, unlike what is necessary to write an equivalent function using the standard C mechanisms.935 936 Going one last step, it is possible to achieve full generality in \CFA, allowing the summation of arbitrary lists of summable types.937 \begin{cfacode}938 trait summable(otype T1, otype T2, otype R) {939 R ?+?(T1, T2);940 };941 forall(otype T1, otype T2, otype R | summable(T1, T2, R))942 R sum(T1 x, T2 y) {943 return x+y;944 }945 forall(otype T1, otype T2, otype T3, ttype Params, otype R946 | summable(T1, T2, T3)947 | { R sum(T3, Params); })948 R sum(T1 x, T2 y, Params rest ) {949 return sum(x+y, rest);950 }951 sum(3, 10.5, 20, 30.3);952 \end{cfacode}953 The \CFA translator requires adding explicit @double ?+?(int, double)@ and @double ?+?(double, int)@ functions for this call to work, since implicit conversions are not supported for assertions.954 955 C variadic syntax and @ttype@ polymorphism probably should not be mixed, since it is not clear where to draw the line to decide which arguments belong where.956 Furthermore, it might be desirable to disallow polymorphic functions to use C variadic syntax to encourage a Cforall style.957 Aside from calling C variadic functions, it is not obvious that there is anything that can be done with C variadics that could not also be done with @ttype@ parameters.958 959 Variadic templates in \CC require an ellipsis token to express that a parameter is a parameter pack and to expand a parameter pack.960 \CFA does not need an ellipsis in either case, since the type class @ttype@ is only used for variadics.961 An alternative design could have used an ellipsis combined with an existing type class.962 This approach was not taken because the largest benefit of the ellipsis token in \CC is the ability to expand a parameter pack within an expression, e.g. in fold expressions, which requires compile-time knowledge of the structure of the parameter pack, which is not available in \CFA.963 \begin{cppcode}964 template<typename... Args>965 void f(Args &... args) {966 g(&args...); // expand to addresses of pack elements967 }968 \end{cppcode}969 As such, the addition of an ellipsis token would be purely an aesthetic change in \CFA today.970 971 It is possible to write a type-safe variadic print routine, which can replace @printf@972 \begin{cfacode}973 struct S { int x, y; };974 forall(otype T, ttype Params |975 { void print(T); void print(Params); })976 void print(T arg, Params rest) {977 print(arg);978 print(rest);979 }980 void print(char * x) { printf("%s", x); }981 void print(int x) { printf("%d", x); }982 void print(S s) { print("{ ", s.x, ",", s.y, " }"); }983 print("s = ", (S){ 1, 2 }, "\n");984 \end{cfacode}985 This example routine showcases a variadic-template-like decomposition of the provided argument list.986 The individual @print@ routines allow printing a single element of a type.987 The polymorphic @print@ allows printing any list of types, as long as each individual type has a @print@ function.988 The individual print functions can be used to build up more complicated @print@ routines, such as for @S@, which is something that cannot be done with @printf@ in C.989 990 It is also possible to use @ttype@ polymorphism to provide arbitrary argument forwarding functions.991 For example, it is possible to write @new@ as a library function.992 Example 2: new (i.e. type-safe malloc + constructors)993 \begin{cfacode}994 struct Array;995 void ?{}(Array *, int, int, int);996 997 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })998 T * new(Params p) {999 return malloc(){ p }; // construct result of malloc1000 }1001 Array * x = new(1, 2, 3);1002 \end{cfacode}1003 The @new@ function provides the combination of type-safe @malloc@ with a constructor call, so that it becomes impossible to forget to construct dynamically allocated objects.1004 This provides the type-safety of @new@ in \CC, without the need to specify the allocated type, thanks to return-type inference.1005 1006 In the call to @new@, @Array@ is selected to match @T@, and @Params@ is expanded to match @[int, int, int, int]@. To satisfy the assertions, a constructor with an interface compatible with @void ?{}(Array *, int, int, int)@ must exist in the current scope.1007 1008 \subsection{Implementation}1009 1010 The definition of @new@1011 \begin{cfacode}1012 forall(dtype T | sized(T)) T * malloc();1013 1014 forall(dtype T, ttype Params | sized(T) | { void ?{}(T *, Params); })1015 T * new(Params p) {1016 return malloc(){ p }; // construct result of malloc1017 }1018 \end{cfacode}1019 Generates the following1020 \begin{cfacode}1021 void *malloc(long unsigned int _sizeof_T, long unsigned int _alignof_T);1022 1023 void *new(1024 void (*_adapter_)(void (*)(), void *, void *),1025 long unsigned int _sizeof_T,1026 long unsigned int _alignof_T,1027 long unsigned int _sizeof_Params,1028 long unsigned int _alignof_Params,1029 void (* _ctor_T)(void *, void *),1030 void *p1031 ){1032 void *_retval_new;1033 void *_tmp_cp_ret0;1034 void *_tmp_ctor_expr0;1035 _retval_new=1036 (_adapter_(_ctor_T,1037 (_tmp_ctor_expr0=(_tmp_cp_ret0=malloc(_sizeof_2tT, _alignof_2tT),1038 _tmp_cp_ret0)),1039 p),1040 _tmp_ctor_expr0); // ?{}1041 *(void **)&_tmp_cp_ret0; // ^?{}1042 return _retval_new;1043 }1044 \end{cfacode}1045 The constructor for @T@ is called indirectly through the adapter function on the result of @malloc@ and the parameter pack.1046 The variable that was allocated and constructed is then returned from @new@.1047 1048 A call to @new@1049 \begin{cfacode}1050 struct S { int x, y; };1051 void ?{}(S *, int, int);1052 1053 S * s = new(3, 4);1054 \end{cfacode}1055 Generates the following1056 \begin{cfacode}1057 struct _tuple2_ { // _tuple2_(T0, T1)1058 void *field_0;1059 void *field_1;1060 };1061 struct _conc__tuple2_0 { // _tuple2_(int, int)1062 int field_0;1063 int field_1;1064 };1065 struct _conc__tuple2_0 _tmp_cp1; // tuple argument to new1066 struct S *_tmp_cp_ret1; // return value from new1067 void _thunk0( // ?{}(S *, [int, int])1068 struct S *_p0,1069 struct _conc__tuple2_0 _p11070 ){1071 _ctor_S(_p0, _p1.field_0, _p1.field_1); // restructure tuple parameter1072 }1073 void _adapter(void (*_adaptee)(), void *_p0, void *_p1){1074 // apply adaptee to arguments after casting to actual types1075 ((void (*)(struct S *, struct _conc__tuple2_0))_adaptee)(1076 _p0,1077 *(struct _conc__tuple2_0 *)_p11078 );1079 }1080 struct S *s = (struct S *)(_tmp_cp_ret1=1081 new(1082 _adapter,1083 sizeof(struct S),1084 __alignof__(struct S),1085 sizeof(struct _conc__tuple2_0),1086 __alignof__(struct _conc__tuple2_0),1087 (void (*)(void *, void *))&_thunk0,1088 (({ // copy construct tuple argument to new1089 int *__multassign_L0 = (int *)&_tmp_cp1.field_0;1090 int *__multassign_L1 = (int *)&_tmp_cp1.field_1;1091 int __multassign_R0 = 3;1092 int __multassign_R1 = 4;1093 ((*__multassign_L0=__multassign_R0 /* ?{} */) ,1094 (*__multassign_L1=__multassign_R1 /* ?{} */));1095 }), &_tmp_cp1)1096 ), _tmp_cp_ret1);1097 *(struct S **)&_tmp_cp_ret1; // ^?{} // destroy return value from new1098 ({ // destroy argument temporary1099 int *__massassign_L0 = (int *)&_tmp_cp1.field_0;1100 int *__massassign_L1 = (int *)&_tmp_cp1.field_1;1101 ((*__massassign_L0 /* ^?{} */) , (*__massassign_L1 /* ^?{} */));1102 });1103 \end{cfacode}1104 Of note, @_thunk0@ is generated to translate calls to @?{}(S *, [int, int])@ into calls to @?{}(S *, int, int)@.1105 The call to @new@ constructs a tuple argument using the supplied arguments.1106 1107 The @print@ function1108 \begin{cfacode}1109 forall(otype T, ttype Params |1110 { void print(T); void print(Params); })1111 void print(T arg, Params rest) {1112 print(arg);1113 print(rest);1114 }1115 \end{cfacode}1116 Generates1117 \begin{cfacode}1118 void print_variadic(1119 void (*_adapterF_7tParams__P)(void (*)(), void *),1120 void (*_adapterF_2tT__P)(void (*)(), void *),1121 void (*_adapterF_P2tT2tT__MP)(void (*)(), void *, void *),1122 void (*_adapterF2tT_P2tT2tT_P_MP)(void (*)(), void *, void *, void *),1123 long unsigned int _sizeof_T,1124 long unsigned int _alignof_T,1125 long unsigned int _sizeof_Params,1126 long unsigned int _alignof_Params,1127 void *(*_assign_TT)(void *, void *),1128 void (*_ctor_T)(void *),1129 void (*_ctor_TT)(void *, void *),1130 void (*_dtor_T)(void *),1131 void (*print_T)(void *),1132 void (*print_Params)(void *),1133 void *arg,1134 void *rest1135 ){1136 void *_tmp_cp0 = __builtin_alloca(_sizeof_T);1137 _adapterF_2tT__P( // print(arg)1138 ((void (*)())print_T),1139 (_adapterF_P2tT2tT__MP( // copy construct argument1140 ((void (*)())_ctor_TT),1141 _tmp_cp0,1142 arg1143 ), _tmp_cp0)1144 );1145 _dtor_T(_tmp_cp0); // destroy argument temporary1146 _adapterF_7tParams__P( // print(rest)1147 ((void (*)())print_Params),1148 rest1149 );1150 }1151 \end{cfacode}1152 The @print_T@ routine is called indirectly through an adapter function with a copy constructed argument, followed by an indirect call to @print_Params@.1153 1154 A call to print1155 \begin{cfacode}1156 void print(const char * x) { printf("%s", x); }1157 void print(int x) { printf("%d", x); }1158 1159 print("x = ", 123, ".\n");1160 \end{cfacode}1161 Generates the following1162 \begin{cfacode}1163 void print_string(const char *x){1164 int _tmp_cp_ret0;1165 (_tmp_cp_ret0=printf("%s", x)) , _tmp_cp_ret0;1166 *(int *)&_tmp_cp_ret0; // ^?{}1167 }1168 void print_int(int x){1169 int _tmp_cp_ret1;1170 (_tmp_cp_ret1=printf("%d", x)) , _tmp_cp_ret1;1171 *(int *)&_tmp_cp_ret1; // ^?{}1172 }1173 1174 struct _tuple2_ { // _tuple2_(T0, T1)1175 void *field_0;1176 void *field_1;1177 };1178 struct _conc__tuple2_0 { // _tuple2_(int, const char *)1179 int field_0;1180 const char *field_1;1181 };1182 struct _conc__tuple2_0 _tmp_cp6; // _tuple2_(int, const char *)1183 const char *_thunk0(const char **_p0, const char *_p1){1184 // const char * ?=?(const char **, const char *)1185 return *_p0=_p1;1186 }1187 void _thunk1(const char **_p0){ // void ?{}(const char **)1188 *_p0; // ?{}1189 }1190 void _thunk2(const char **_p0, const char *_p1){1191 // void ?{}(const char **, const char *)1192 *_p0=_p1; // ?{}1193 }1194 void _thunk3(const char **_p0){ // void ^?{}(const char **)1195 *_p0; // ^?{}1196 }1197 void _thunk4(struct _conc__tuple2_0 _p0){ // void print([int, const char *])1198 struct _tuple1_ { // _tuple1_(T0)1199 void *field_0;1200 };1201 struct _conc__tuple1_1 { // _tuple1_(const char *)1202 const char *field_0;1203 };1204 void _thunk5(struct _conc__tuple1_1 _pp0){ // void print([const char *])1205 print_string(_pp0.field_0); // print(rest.0)1206 }1207 void _adapter_i_pii_(void (*_adaptee)(), void *_ret, void *_p0, void *_p1){1208 *(int *)_ret=((int (*)(int *, int))_adaptee)(_p0, *(int *)_p1);1209 }1210 void _adapter_pii_(void (*_adaptee)(), void *_p0, void *_p1){1211 ((void (*)(int *, int ))_adaptee)(_p0, *(int *)_p1);1212 }1213 void _adapter_i_(void (*_adaptee)(), void *_p0){1214 ((void (*)(int))_adaptee)(*(int *)_p0);1215 }1216 void _adapter_tuple1_5_(void (*_adaptee)(), void *_p0){1217 ((void (*)(struct _conc__tuple1_1 ))_adaptee)(*(struct _conc__tuple1_1 *)_p0);1218 }1219 print_variadic(1220 _adapter_tuple1_5,1221 _adapter_i_,1222 _adapter_pii_,1223 _adapter_i_pii_,1224 sizeof(int),1225 __alignof__(int),1226 sizeof(struct _conc__tuple1_1),1227 __alignof__(struct _conc__tuple1_1),1228 (void *(*)(void *, void *))_assign_i, // int ?=?(int *, int)1229 (void (*)(void *))_ctor_i, // void ?{}(int *)1230 (void (*)(void *, void *))_ctor_ii, // void ?{}(int *, int)1231 (void (*)(void *))_dtor_ii, // void ^?{}(int *)1232 (void (*)(void *))print_int, // void print(int)1233 (void (*)(void *))&_thunk5, // void print([const char *])1234 &_p0.field_0, // rest.01235 &(struct _conc__tuple1_1 ){ _p0.field_1 } // [rest.1]1236 );1237 }1238 struct _tuple1_ { // _tuple1_(T0)1239 void *field_0;1240 };1241 struct _conc__tuple1_6 { // _tuple_1(const char *)1242 const char *field_0;1243 };1244 const char *_temp0;1245 _temp0="x = ";1246 void _adapter_pstring_pstring_string(1247 void (*_adaptee)(),1248 void *_ret,1249 void *_p0,1250 void *_p11251 ){1252 *(const char **)_ret=1253 ((const char *(*)(const char **, const char *))_adaptee)(1254 _p0,1255 *(const char **)_p11256 );1257 }1258 void _adapter_pstring_string(void (*_adaptee)(), void *_p0, void *_p1){1259 ((void (*)(const char **, const char *))_adaptee)(_p0, *(const char **)_p1);1260 }1261 void _adapter_string_(void (*_adaptee)(), void *_p0){1262 ((void (*)(const char *))_adaptee)(*(const char **)_p0);1263 }1264 void _adapter_tuple2_0_(void (*_adaptee)(), void *_p0){1265 ((void (*)(struct _conc__tuple2_0 ))_adaptee)(*(struct _conc__tuple2_0 *)_p0);1266 }1267 print_variadic(1268 _adapter_tuple2_0_,1269 _adapter_string_,1270 _adapter_pstring_string_,1271 _adapter_pstring_pstring_string_,1272 sizeof(const char *),1273 __alignof__(const char *),1274 sizeof(struct _conc__tuple2_0 ),1275 __alignof__(struct _conc__tuple2_0 ),1276 (void *(*)(void *, void *))&_thunk0, // const char * ?=?(const char **, const char *)1277 (void (*)(void *))&_thunk1, // void ?{}(const char **)1278 (void (*)(void *, void *))&_thunk2, // void ?{}(const char **, const char *)1279 (void (*)(void *))&_thunk3, // void ^?{}(const char **)1280 (void (*)(void *))print_string, // void print(const char *)1281 (void (*)(void *))&_thunk4, // void print([int, const char *])1282 &_temp0, // "x = "1283 (({ // copy construct tuple argument to print1284 int *__multassign_L0 = (int *)&_tmp_cp6.field_0;1285 const char **__multassign_L1 = (const char **)&_tmp_cp6.field_1;1286 int __multassign_R0 = 123;1287 const char *__multassign_R1 = ".\n";1288 ((*__multassign_L0=__multassign_R0 /* ?{} */),1289 (*__multassign_L1=__multassign_R1 /* ?{} */));1290 }), &_tmp_cp6) // [123, ".\n"]1291 );1292 ({ // destroy argument temporary1293 int *__massassign_L0 = (int *)&_tmp_cp6.field_0;1294 const char **__massassign_L1 = (const char **)&_tmp_cp6.field_1;1295 ((*__massassign_L0 /* ^?{} */) , (*__massassign_L1 /* ^?{} */));1296 });1297 \end{cfacode}1298 The type @_tuple2_@ is generated to allow passing the @rest@ argument to @print_variadic@.1299 Thunks 0 through 3 provide wrappers for the @otype@ parameters for @const char *@, while @_thunk4@ translates a call to @print([int, const char *])@ into a call to @print_variadic(int, [const char *])@.1300 This all builds to a call to @print_variadic@, with the appropriate copy construction of the tuple argument.1301 1302 \section{Future Work} -
doc/user/user.tex
r221c2de7 r154fdc8 11 11 %% Created On : Wed Apr 6 14:53:29 2016 12 12 %% Last Modified By : Peter A. Buhr 13 %% Last Modified On : Wed Apr 5 23:19:40201714 %% Update Count : 141 213 %% Last Modified On : Wed Apr 12 12:18:58 2017 14 %% Update Count : 1415 15 15 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 16 … … 64 64 % Names used in the document. 65 65 \newcommand{\Version}{\input{../../version}} 66 \newcommand{\CS}{C\raisebox{-0.9ex}{\large$^\sharp$}\xspace}67 68 66 \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}} 69 67 \newcommand{\Emph}[2][red]{{\color{#1}\textbf{\emph{#2}}}} … … 195 193 For system programming, where direct access to hardware and dealing with real-time issues is a requirement, C is usually the language of choice. 196 194 As well, there are millions of lines of C legacy code, forming the base for many software development projects (especially on UNIX systems). 197 The TIOBE index (\url{http://www.tiobe.com/tiobe_index}) for March 2016 shows programming-language popularity, with \Index*{Java} 20.5\%, C 14.5\%, \Index*[C++]{\CC} 6.7\%, \C S4.3\%, \Index*{Python} 4.3\%, and all other programming languages below 3\%.195 The TIOBE index (\url{http://www.tiobe.com/tiobe_index}) for March 2016 shows programming-language popularity, with \Index*{Java} 20.5\%, C 14.5\%, \Index*[C++]{\CC} 6.7\%, \Csharp 4.3\%, \Index*{Python} 4.3\%, and all other programming languages below 3\%. 198 196 As well, for 30 years, C has been the number 1 and 2 most popular programming language: 199 197 \begin{center} -
src/ControlStruct/LabelGenerator.cc
r221c2de7 r154fdc8 20 20 #include "SynTree/Label.h" 21 21 #include "SynTree/Attribute.h" 22 #include "SynTree/Statement.h" 22 23 23 24 namespace ControlStruct { … … 31 32 } 32 33 33 Label LabelGenerator::newLabel( std::string suffix ) {34 Label LabelGenerator::newLabel( std::string suffix, Statement * stmt ) { 34 35 std::ostringstream os; 35 36 os << "__L" << current++ << "__" << suffix; 37 if ( stmt && ! stmt->get_labels().empty() ) { 38 os << "_" << stmt->get_labels().front() << "__"; 39 } 36 40 std::string ret = os.str(); 37 41 Label l( ret ); -
src/ControlStruct/LabelGenerator.h
r221c2de7 r154fdc8 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // LabelGenerator.h -- 7 // LabelGenerator.h -- 8 8 // 9 9 // Author : Rodolfo G. Esteves … … 24 24 public: 25 25 static LabelGenerator *getGenerator(); 26 Label newLabel(std::string suffix = "");26 Label newLabel(std::string suffix, Statement * stmt = nullptr); 27 27 void reset() { current = 0; } 28 28 void rewind() { current--; } -
src/ControlStruct/MLEMutator.cc
r221c2de7 r154fdc8 56 56 bool labeledBlock = !(cmpndStmt->get_labels().empty()); 57 57 if ( labeledBlock ) { 58 Label brkLabel = generator->newLabel("blockBreak" );58 Label brkLabel = generator->newLabel("blockBreak", cmpndStmt); 59 59 enclosingControlStructures.push_back( Entry( cmpndStmt, brkLabel ) ); 60 60 } // if … … 80 80 // whether brkLabel and contLabel are used with branch statements and will recursively do the same to nested 81 81 // loops 82 Label brkLabel = generator->newLabel("loopBreak" );83 Label contLabel = generator->newLabel("loopContinue" );82 Label brkLabel = generator->newLabel("loopBreak", loopStmt); 83 Label contLabel = generator->newLabel("loopContinue", loopStmt); 84 84 enclosingControlStructures.push_back( Entry( loopStmt, brkLabel, contLabel ) ); 85 85 loopStmt->set_body ( loopStmt->get_body()->acceptMutator( *this ) ); 86 86 87 assert( ! enclosingControlStructures.empty() ); 87 88 Entry &e = enclosingControlStructures.back(); 88 89 // sanity check that the enclosing loops have been popped correctly … … 108 109 bool labeledBlock = !(ifStmt->get_labels().empty()); 109 110 if ( labeledBlock ) { 110 Label brkLabel = generator->newLabel("blockBreak" );111 Label brkLabel = generator->newLabel("blockBreak", ifStmt); 111 112 enclosingControlStructures.push_back( Entry( ifStmt, brkLabel ) ); 112 113 } // if 113 114 114 115 Parent::mutate( ifStmt ); 115 116 116 117 if ( labeledBlock ) { 117 118 if ( ! enclosingControlStructures.back().useBreakExit().empty() ) { … … 126 127 Statement *MLEMutator::handleSwitchStmt( SwitchClass *switchStmt ) { 127 128 // generate a label for breaking out of a labeled switch 128 Label brkLabel = generator->newLabel("switchBreak" );129 Label brkLabel = generator->newLabel("switchBreak", switchStmt); 129 130 enclosingControlStructures.push_back( Entry(switchStmt, brkLabel) ); 130 131 mutateAll( switchStmt->get_statements(), *this ); … … 158 159 159 160 std::list< Entry >::reverse_iterator targetEntry; 160 if ( branchStmt->get_type() == BranchStmt::Goto ) { 161 return branchStmt; 162 } else if ( branchStmt->get_type() == BranchStmt::Continue) { 163 // continue target must be a loop 164 if ( branchStmt->get_target() == "" ) { 165 targetEntry = std::find_if( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), [](Entry &e) { return isLoop( e.get_controlStructure() ); } ); 166 } else { 167 // labelled continue - lookup label in table ot find attached control structure 168 targetEntry = std::find( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), (*targetTable)[branchStmt->get_target()] ); 169 } // if 170 if ( targetEntry == enclosingControlStructures.rend() || ! isLoop( targetEntry->get_controlStructure() ) ) { 171 throw SemanticError( "'continue' target must be an enclosing loop: " + originalTarget ); 172 } // if 173 } else if ( branchStmt->get_type() == BranchStmt::Break ) { 174 if ( enclosingControlStructures.empty() ) throw SemanticError( "'break' outside a loop, switch, or labelled block" ); 175 targetEntry = enclosingControlStructures.rbegin(); 176 } else { 177 assert( false ); 178 } // if 179 180 if ( branchStmt->get_target() != "" && targetTable->find( branchStmt->get_target() ) == targetTable->end() ) { 181 throw SemanticError("The label defined in the exit loop statement does not exist: " + originalTarget ); // shouldn't happen (since that's already checked) 182 } // if 161 switch ( branchStmt->get_type() ) { 162 case BranchStmt::Goto: 163 return branchStmt; 164 case BranchStmt::Continue: 165 case BranchStmt::Break: { 166 bool isContinue = branchStmt->get_type() == BranchStmt::Continue; 167 // unlabeled break/continue 168 if ( branchStmt->get_target() == "" ) { 169 if ( isContinue ) { 170 // continue target is outermost loop 171 targetEntry = std::find_if( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), [](Entry &e) { return isLoop( e.get_controlStructure() ); } ); 172 } else { 173 // break target is outmost control structure 174 if ( enclosingControlStructures.empty() ) throw SemanticError( "'break' outside a loop, switch, or labelled block" ); 175 targetEntry = enclosingControlStructures.rbegin(); 176 } // if 177 } else { 178 // labeled break/continue - lookup label in table to find attached control structure 179 targetEntry = std::find( enclosingControlStructures.rbegin(), enclosingControlStructures.rend(), (*targetTable)[branchStmt->get_target()] ); 180 } // if 181 // ensure that selected target is valid 182 if ( targetEntry == enclosingControlStructures.rend() || (isContinue && ! isLoop( targetEntry->get_controlStructure() ) ) ) { 183 throw SemanticError( toString( (isContinue ? "'continue'" : "'break'"), " target must be an enclosing ", (isContinue ? "loop: " : "control structure: "), originalTarget ) ); 184 } // if 185 break; 186 } 187 default: 188 assert( false ); 189 } // switch 183 190 184 191 // branch error checks, get the appropriate label name and create a goto … … 197 204 } // switch 198 205 199 if ( branchStmt->get_target() == "" && branchStmt->get_type() != BranchStmt::Continue ) { 200 // unlabelled break/continue - can keep branch as break/continue for extra semantic information, but add 201 // exitLabel as its destination so that label passes can easily determine where the break/continue goes to 202 branchStmt->set_target( exitLabel ); 203 return branchStmt; 204 } else { 205 // labelled break/continue - can't easily emulate this with break and continue, so transform into a goto 206 delete branchStmt; 207 return new BranchStmt( std::list<Label>(), exitLabel, BranchStmt::Goto ); 208 } // if 206 // transform break/continue statements into goto to simplify later handling of branches 207 delete branchStmt; 208 return new BranchStmt( std::list<Label>(), exitLabel, BranchStmt::Goto ); 209 209 } 210 210 -
src/GenPoly/Box.cc
r221c2de7 r154fdc8 34 34 #include "Parser/ParseNode.h" 35 35 36 #include "SynTree/Attribute.h" 36 37 #include "SynTree/Constant.h" 37 38 #include "SynTree/Declaration.h" … … 165 166 using Parent::mutate; 166 167 168 PolyGenericCalculator(); 169 167 170 template< typename DeclClass > 168 171 DeclClass *handleDecl( DeclClass *decl, Type *type ); … … 198 201 ScopedSet< std::string > knownLayouts; ///< Set of generic type layouts known in the current scope, indexed by sizeofName 199 202 ScopedSet< std::string > knownOffsets; ///< Set of non-generic types for which the offset array exists in the current scope, indexed by offsetofName 203 UniqueName bufNamer; ///< Namer for VLA buffers 200 204 }; 201 205 … … 1452 1456 ////////////////////////////////////////// PolyGenericCalculator //////////////////////////////////////////////////// 1453 1457 1458 PolyGenericCalculator::PolyGenericCalculator() 1459 : Parent(), knownLayouts(), knownOffsets(), bufNamer( "_buf" ) {} 1460 1454 1461 void PolyGenericCalculator::beginTypeScope( Type *ty ) { 1455 1462 scopeTyVars.beginScope(); … … 1528 1535 if ( ObjectDecl *objectDecl = dynamic_cast< ObjectDecl *>( declStmt->get_decl() ) ) { 1529 1536 if ( findGeneric( objectDecl->get_type() ) ) { 1530 // change initialization of a polymorphic value object 1531 // to allocate storage with alloca1537 // change initialization of a polymorphic value object to allocate via a VLA 1538 // (alloca was previously used, but can't be safely used in loops) 1532 1539 Type *declType = objectDecl->get_type(); 1533 UntypedExpr *alloc = new UntypedExpr( new NameExpr( "__builtin_alloca" ) ); 1534 alloc->get_args().push_back( new NameExpr( sizeofName( mangleType( declType ) ) ) ); 1540 std::string bufName = bufNamer.newName(); 1541 ObjectDecl *newBuf = new ObjectDecl( bufName, Type::StorageClasses(), LinkageSpec::C, 0, 1542 new ArrayType( Type::Qualifiers(), new BasicType( Type::Qualifiers(), BasicType::Kind::Char), new NameExpr( sizeofName( mangleType(declType) ) ), 1543 true, false, std::list<Attribute*>{ new Attribute( std::string{"aligned"}, std::list<Expression*>{ new ConstantExpr( Constant::from_int(8) ) } ) } ), 0 ); 1544 stmtsToAdd.push_back( new DeclStmt( noLabels, newBuf ) ); 1535 1545 1536 1546 delete objectDecl->get_init(); 1537 1547 1538 std::list<Expression*> designators; 1539 objectDecl->set_init( new SingleInit( alloc, designators, false ) ); // not constructed 1548 objectDecl->set_init( new SingleInit( new NameExpr( bufName ) ) ); 1540 1549 } 1541 1550 } -
src/Parser/ParseNode.h
r221c2de7 r154fdc8 107 107 public: 108 108 ExpressionNode( Expression * expr = nullptr ) : expr( expr ) {} 109 ExpressionNode( const ExpressionNode &other );110 109 virtual ~ExpressionNode() {} 111 virtual ExpressionNode * clone() const override { return expr ? new ExpressionNode( expr->clone()) : nullptr; }110 virtual ExpressionNode * clone() const override { return expr ? static_cast<ExpressionNode*>((new ExpressionNode( expr->clone() ))->set_next( maybeClone( get_next() ) )) : nullptr; } 112 111 113 112 bool get_extension() const { return extension; } -
src/ResolvExpr/AlternativeFinder.cc
r221c2de7 r154fdc8 211 211 } 212 212 213 // std::unordered_map< Expression *, UniqueExpr * > ; 213 void AlternativeFinder::addAnonConversions( const Alternative & alt ) { 214 // adds anonymous member interpretations whenever an aggregate value type is seen. 215 Expression * expr = alt.expr->clone(); 216 std::unique_ptr< Expression > manager( expr ); // RAII for expr 217 alt.env.apply( expr->get_result() ); 218 if ( StructInstType *structInst = dynamic_cast< StructInstType* >( expr->get_result() ) ) { 219 NameExpr nameExpr( "" ); 220 addAggMembers( structInst, expr, alt.cost+Cost( 0, 0, 1 ), alt.env, &nameExpr ); 221 } else if ( UnionInstType *unionInst = dynamic_cast< UnionInstType* >( expr->get_result() ) ) { 222 NameExpr nameExpr( "" ); 223 addAggMembers( unionInst, expr, alt.cost+Cost( 0, 0, 1 ), alt.env, &nameExpr ); 224 } // if 225 } 214 226 215 227 template< typename StructOrUnionType > … … 220 232 std::list< Declaration* > members; 221 233 aggInst->lookup( name, members ); 234 222 235 for ( std::list< Declaration* >::const_iterator i = members.begin(); i != members.end(); ++i ) { 223 236 if ( DeclarationWithType *dwt = dynamic_cast< DeclarationWithType* >( *i ) ) { 224 237 alternatives.push_back( Alternative( new MemberExpr( dwt, expr->clone() ), env, newCost ) ); 225 238 renameTypes( alternatives.back().expr ); 239 addAnonConversions( alternatives.back() ); // add anonymous member interpretations whenever an aggregate value type is seen as a member expression. 226 240 } else { 227 241 assert( false ); … … 730 744 if ( candidates.empty() && ! errors.isEmpty() ) { throw errors; } 731 745 746 // compute conversionsion costs 732 747 for ( AltList::iterator withFunc = candidates.begin(); withFunc != candidates.end(); ++withFunc ) { 733 748 Cost cvtCost = computeConversionCost( *withFunc, indexer ); … … 751 766 } // if 752 767 } // for 768 // function may return struct or union value, in which case we need to add alternatives for implicit conversions to each of the anonymous members 769 for ( const Alternative & alt : alternatives ) { 770 addAnonConversions( alt ); 771 } 772 753 773 candidates.clear(); 754 774 candidates.splice( candidates.end(), alternatives ); … … 885 905 ) 886 906 renameTypes( alternatives.back().expr ); 887 if ( StructInstType *structInst = dynamic_cast< StructInstType* >( (*i)->get_type() ) ) { 888 NameExpr nameExpr( "" ); 889 addAggMembers( structInst, &newExpr, Cost( 0, 0, 1 ), env, &nameExpr ); 890 } else if ( UnionInstType *unionInst = dynamic_cast< UnionInstType* >( (*i)->get_type() ) ) { 891 NameExpr nameExpr( "" ); 892 addAggMembers( unionInst, &newExpr, Cost( 0, 0, 1 ), env, &nameExpr ); 893 } // if 907 addAnonConversions( alternatives.back() ); // add anonymous member interpretations whenever an aggregate value type is seen as a name expression. 894 908 } // for 895 909 } -
src/ResolvExpr/AlternativeFinder.h
r221c2de7 r154fdc8 78 78 void findSubExprs( InputIterator begin, InputIterator end, OutputIterator out ); 79 79 80 /// Adds alternatives for anonymous members 81 void addAnonConversions( const Alternative & alt ); 80 82 /// Adds alternatives for member expressions, given the aggregate, conversion cost for that aggregate, and name of the member 81 83 template< typename StructOrUnionType > void addAggMembers( StructOrUnionType *aggInst, Expression *expr, const Cost &newCost, const TypeEnvironment & env, Expression * member ); -
src/SymTab/Autogen.cc
r221c2de7 r154fdc8 498 498 makeUnionFieldsAssignment( srcParam, dstParam, back_inserter( funcDecl->get_statements()->get_kids() ) ); 499 499 if ( returnVal ) { 500 if ( isDynamicLayout ) makeUnionFieldsAssignment( srcParam, returnVal, back_inserter( funcDecl->get_statements()->get_kids() ) ); 501 else funcDecl->get_statements()->get_kids().push_back( new ReturnStmt( noLabels, new VariableExpr( srcParam ) ) ); 500 funcDecl->get_statements()->get_kids().push_back( new ReturnStmt( noLabels, new VariableExpr( srcParam ) ) ); 502 501 } 503 502 } -
src/SymTab/Validate.cc
r221c2de7 r154fdc8 208 208 }; 209 209 210 class ArrayLength : public Visitor { 211 public: 212 /// for array types without an explicit length, compute the length and store it so that it 213 /// is known to the rest of the phases. For example, 214 /// int x[] = { 1, 2, 3 }; 215 /// int y[][2] = { { 1, 2, 3 }, { 1, 2, 3 } }; 216 /// here x and y are known at compile-time to have length 3, so change this into 217 /// int x[3] = { 1, 2, 3 }; 218 /// int y[3][2] = { { 1, 2, 3 }, { 1, 2, 3 } }; 219 static void computeLength( std::list< Declaration * > & translationUnit ); 220 221 virtual void visit( ObjectDecl * objDecl ); 222 }; 223 210 224 class CompoundLiteral final : public GenPoly::DeclMutator { 211 225 Type::StorageClasses storageClasses; … … 235 249 acceptAll( translationUnit, pass3 ); 236 250 VerifyCtorDtorAssign::verify( translationUnit ); 251 ArrayLength::computeLength( translationUnit ); 237 252 } 238 253 … … 869 884 } 870 885 } 886 887 void ArrayLength::computeLength( std::list< Declaration * > & translationUnit ) { 888 ArrayLength len; 889 acceptAll( translationUnit, len ); 890 } 891 892 void ArrayLength::visit( ObjectDecl * objDecl ) { 893 if ( ArrayType * at = dynamic_cast< ArrayType * >( objDecl->get_type() ) ) { 894 if ( at->get_dimension() != nullptr ) return; 895 if ( ListInit * init = dynamic_cast< ListInit * >( objDecl->get_init() ) ) { 896 at->set_dimension( new ConstantExpr( Constant::from_ulong( init->get_initializers().size() ) ) ); 897 } 898 } 899 } 871 900 } // namespace SymTab 872 901 -
src/SynTree/Expression.cc
r221c2de7 r154fdc8 339 339 return TypeSubstitution( aggInst->get_baseParameters()->begin(), aggInst->get_baseParameters()->end(), aggInst->get_parameters().begin() ); 340 340 } else { 341 assertf( false, "makeSub expects struct or union type for aggregate ");341 assertf( false, "makeSub expects struct or union type for aggregate, but got: %s", toString( t ).c_str() ); 342 342 } 343 343 } -
src/libcfa/Makefile.am
r221c2de7 r154fdc8 41 41 CC = ${abs_top_srcdir}/src/driver/cfa 42 42 43 headers = limits stdlib math iostream fstream iterator rational assert containers/ vector43 headers = limits stdlib math iostream fstream iterator rational assert containers/pair containers/vector 44 44 45 45 # not all platforms support concurrency, add option do disable it -
src/libcfa/Makefile.in
r221c2de7 r154fdc8 99 99 am__libcfa_d_a_SOURCES_DIST = libcfa-prelude.c interpose.c \ 100 100 libhdr/libdebug.c limits.c stdlib.c math.c iostream.c \ 101 fstream.c iterator.c rational.c assert.c containers/vector.c \ 102 concurrency/coroutine.c concurrency/thread.c \ 103 concurrency/kernel.c concurrency/monitor.c \ 104 concurrency/CtxSwitch-@MACHINE_TYPE@.S concurrency/invoke.c 101 fstream.c iterator.c rational.c assert.c containers/pair.c \ 102 containers/vector.c concurrency/coroutine.c \ 103 concurrency/thread.c concurrency/kernel.c \ 104 concurrency/monitor.c concurrency/CtxSwitch-@MACHINE_TYPE@.S \ 105 concurrency/invoke.c 105 106 am__dirstamp = $(am__leading_dot)dirstamp 106 107 @BUILD_CONCURRENCY_TRUE@am__objects_1 = concurrency/libcfa_d_a-coroutine.$(OBJEXT) \ … … 113 114 libcfa_d_a-iterator.$(OBJEXT) libcfa_d_a-rational.$(OBJEXT) \ 114 115 libcfa_d_a-assert.$(OBJEXT) \ 116 containers/libcfa_d_a-pair.$(OBJEXT) \ 115 117 containers/libcfa_d_a-vector.$(OBJEXT) $(am__objects_1) 116 118 @BUILD_CONCURRENCY_TRUE@am__objects_3 = concurrency/CtxSwitch-@MACHINE_TYPE@.$(OBJEXT) \ … … 126 128 am__libcfa_a_SOURCES_DIST = libcfa-prelude.c interpose.c \ 127 129 libhdr/libdebug.c limits.c stdlib.c math.c iostream.c \ 128 fstream.c iterator.c rational.c assert.c containers/vector.c \ 129 concurrency/coroutine.c concurrency/thread.c \ 130 concurrency/kernel.c concurrency/monitor.c \ 131 concurrency/CtxSwitch-@MACHINE_TYPE@.S concurrency/invoke.c 130 fstream.c iterator.c rational.c assert.c containers/pair.c \ 131 containers/vector.c concurrency/coroutine.c \ 132 concurrency/thread.c concurrency/kernel.c \ 133 concurrency/monitor.c concurrency/CtxSwitch-@MACHINE_TYPE@.S \ 134 concurrency/invoke.c 132 135 @BUILD_CONCURRENCY_TRUE@am__objects_5 = concurrency/libcfa_a-coroutine.$(OBJEXT) \ 133 136 @BUILD_CONCURRENCY_TRUE@ concurrency/libcfa_a-thread.$(OBJEXT) \ … … 138 141 libcfa_a-fstream.$(OBJEXT) libcfa_a-iterator.$(OBJEXT) \ 139 142 libcfa_a-rational.$(OBJEXT) libcfa_a-assert.$(OBJEXT) \ 143 containers/libcfa_a-pair.$(OBJEXT) \ 140 144 containers/libcfa_a-vector.$(OBJEXT) $(am__objects_5) 141 145 @BUILD_CONCURRENCY_TRUE@am__objects_7 = concurrency/CtxSwitch-@MACHINE_TYPE@.$(OBJEXT) \ … … 176 180 $(am__libcfa_a_SOURCES_DIST) 177 181 am__nobase_cfa_include_HEADERS_DIST = limits stdlib math iostream \ 178 fstream iterator rational assert containers/ vector \179 con currency/coroutine concurrency/thread concurrency/kernel\180 concurrency/ monitor ${shell echo stdhdr/*} \182 fstream iterator rational assert containers/pair \ 183 containers/vector concurrency/coroutine concurrency/thread \ 184 concurrency/kernel concurrency/monitor ${shell echo stdhdr/*} \ 181 185 concurrency/invoke.h 182 186 HEADERS = $(nobase_cfa_include_HEADERS) … … 310 314 AM_CCASFLAGS = @CFA_FLAGS@ 311 315 headers = limits stdlib math iostream fstream iterator rational assert \ 312 containers/ vector $(am__append_3)316 containers/pair containers/vector $(am__append_3) 313 317 libobjs = ${headers:=.o} 314 318 libsrc = libcfa-prelude.c interpose.c libhdr/libdebug.c ${headers:=.c} \ … … 400 404 @$(MKDIR_P) containers/$(DEPDIR) 401 405 @: > containers/$(DEPDIR)/$(am__dirstamp) 406 containers/libcfa_d_a-pair.$(OBJEXT): containers/$(am__dirstamp) \ 407 containers/$(DEPDIR)/$(am__dirstamp) 402 408 containers/libcfa_d_a-vector.$(OBJEXT): containers/$(am__dirstamp) \ 403 409 containers/$(DEPDIR)/$(am__dirstamp) … … 428 434 libhdr/libcfa_a-libdebug.$(OBJEXT): libhdr/$(am__dirstamp) \ 429 435 libhdr/$(DEPDIR)/$(am__dirstamp) 436 containers/libcfa_a-pair.$(OBJEXT): containers/$(am__dirstamp) \ 437 containers/$(DEPDIR)/$(am__dirstamp) 430 438 containers/libcfa_a-vector.$(OBJEXT): containers/$(am__dirstamp) \ 431 439 containers/$(DEPDIR)/$(am__dirstamp) … … 458 466 -rm -f concurrency/libcfa_d_a-monitor.$(OBJEXT) 459 467 -rm -f concurrency/libcfa_d_a-thread.$(OBJEXT) 468 -rm -f containers/libcfa_a-pair.$(OBJEXT) 460 469 -rm -f containers/libcfa_a-vector.$(OBJEXT) 470 -rm -f containers/libcfa_d_a-pair.$(OBJEXT) 461 471 -rm -f containers/libcfa_d_a-vector.$(OBJEXT) 462 472 -rm -f libhdr/libcfa_a-libdebug.$(OBJEXT) … … 497 507 @AMDEP_TRUE@@am__include@ @am__quote@concurrency/$(DEPDIR)/libcfa_d_a-monitor.Po@am__quote@ 498 508 @AMDEP_TRUE@@am__include@ @am__quote@concurrency/$(DEPDIR)/libcfa_d_a-thread.Po@am__quote@ 509 @AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_a-pair.Po@am__quote@ 499 510 @AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_a-vector.Po@am__quote@ 511 @AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_d_a-pair.Po@am__quote@ 500 512 @AMDEP_TRUE@@am__include@ @am__quote@containers/$(DEPDIR)/libcfa_d_a-vector.Po@am__quote@ 501 513 @AMDEP_TRUE@@am__include@ @am__quote@libhdr/$(DEPDIR)/libcfa_a-libdebug.Po@am__quote@ … … 681 693 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o libcfa_d_a-assert.obj `if test -f 'assert.c'; then $(CYGPATH_W) 'assert.c'; else $(CYGPATH_W) '$(srcdir)/assert.c'; fi` 682 694 695 containers/libcfa_d_a-pair.o: containers/pair.c 696 @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-pair.o -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-pair.Tpo -c -o containers/libcfa_d_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c 697 @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_d_a-pair.Tpo containers/$(DEPDIR)/libcfa_d_a-pair.Po 698 @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_d_a-pair.o' libtool=no @AMDEPBACKSLASH@ 699 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ 700 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_d_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c 701 702 containers/libcfa_d_a-pair.obj: containers/pair.c 703 @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-pair.obj -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-pair.Tpo -c -o containers/libcfa_d_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi` 704 @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_d_a-pair.Tpo containers/$(DEPDIR)/libcfa_d_a-pair.Po 705 @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_d_a-pair.obj' libtool=no @AMDEPBACKSLASH@ 706 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ 707 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_d_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi` 708 683 709 containers/libcfa_d_a-vector.o: containers/vector.c 684 710 @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_d_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_d_a-vector.o -MD -MP -MF containers/$(DEPDIR)/libcfa_d_a-vector.Tpo -c -o containers/libcfa_d_a-vector.o `test -f 'containers/vector.c' || echo '$(srcdir)/'`containers/vector.c … … 904 930 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ 905 931 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o libcfa_a-assert.obj `if test -f 'assert.c'; then $(CYGPATH_W) 'assert.c'; else $(CYGPATH_W) '$(srcdir)/assert.c'; fi` 932 933 containers/libcfa_a-pair.o: containers/pair.c 934 @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_a-pair.o -MD -MP -MF containers/$(DEPDIR)/libcfa_a-pair.Tpo -c -o containers/libcfa_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c 935 @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_a-pair.Tpo containers/$(DEPDIR)/libcfa_a-pair.Po 936 @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_a-pair.o' libtool=no @AMDEPBACKSLASH@ 937 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ 938 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_a-pair.o `test -f 'containers/pair.c' || echo '$(srcdir)/'`containers/pair.c 939 940 containers/libcfa_a-pair.obj: containers/pair.c 941 @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -MT containers/libcfa_a-pair.obj -MD -MP -MF containers/$(DEPDIR)/libcfa_a-pair.Tpo -c -o containers/libcfa_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi` 942 @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) containers/$(DEPDIR)/libcfa_a-pair.Tpo containers/$(DEPDIR)/libcfa_a-pair.Po 943 @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='containers/pair.c' object='containers/libcfa_a-pair.obj' libtool=no @AMDEPBACKSLASH@ 944 @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ 945 @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcfa_a_CFLAGS) $(CFLAGS) -c -o containers/libcfa_a-pair.obj `if test -f 'containers/pair.c'; then $(CYGPATH_W) 'containers/pair.c'; else $(CYGPATH_W) '$(srcdir)/containers/pair.c'; fi` 906 946 907 947 containers/libcfa_a-vector.o: containers/vector.c -
src/libcfa/stdlib.c
r221c2de7 r154fdc8 10 10 // Created On : Thu Jan 28 17:10:29 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S at Apr 1 18:31:26201713 // Update Count : 18 112 // Last Modified On : Sun Apr 16 10:41:05 2017 13 // Update Count : 189 14 14 // 15 15 … … 78 78 } // posix_memalign 79 79 80 forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params); } )80 forall( dtype T, ttype Params | sized(T) | { void ?{}( T *, Params ); } ) 81 81 T * new( Params p ) { 82 return ((T *)malloc()){ p };82 return ((T *)malloc()){ p }; 83 83 } 84 84 … … 229 229 forall( otype T | { int ?<?( T, T ); } ) 230 230 unsigned int bsearch( T key, const T * arr, size_t dimension ) { 231 int comp( const void * t1, const void * t2 ) { return *(T *)t1 < *(T *)t2 ? -1 : *(T *)t2 < *(T *)t1 ? 1 : 0; } 232 T *result = (T *)bsearch( &key, arr, dimension, sizeof(T), comp ); 231 T *result = bsearch( key, arr, dimension ); 233 232 return result ? result - arr : dimension; // pointer subtraction includes sizeof(T) 234 233 } // bsearch -
src/prelude/prelude.cf
r221c2de7 r154fdc8 217 217 signed int ?<?( _Bool, _Bool ), ?<=?( _Bool, _Bool ), 218 218 ?>?( _Bool, _Bool ), ?>=?( _Bool, _Bool ); 219 signed int ?<?( char, char ), ?<=?( char, char ), 220 ?>?( char, char ), ?>=?( char, char ); 221 signed int ?<?( signed char, signed char ), ?<=?( signed char, signed char ), 222 ?>?( signed char, signed char ), ?>=?( signed char, signed char ); 219 223 signed int ?<?( unsigned char, unsigned char ), ?<=?( unsigned char, unsigned char ), 220 224 ?>?( unsigned char, unsigned char ), ?>=?( unsigned char, unsigned char ); 225 signed int ?<?( signed short, signed short ), ?<=?( signed short, signed short ), 226 ?>?( signed short, signed short ), ?>=?( signed short, signed short ); 227 signed int ?<?( unsigned short, unsigned short ), ?<=?( unsigned short, unsigned short ), 228 ?>?( unsigned short, unsigned short ), ?>=?( unsigned short, unsigned short ); 221 229 signed int ?<?( signed int, signed int ), ?<=?( signed int, signed int ), 222 230 ?>?( signed int, signed int ), ?>=?( signed int, signed int ); … … 265 273 266 274 signed int ?==?( _Bool, _Bool ), ?!=?( _Bool, _Bool ); 275 signed int ?==?( char, char ), ?!=?( char, char ); 276 signed int ?==?( signed char, signed char ), ?!=?( signed char, signed char ); 277 signed int ?==?( unsigned char, unsigned char ), ?!=?( unsigned char, unsigned char ); 278 signed int ?==?( signed short, signed short ), ?!=?( signed short, signed short ); 279 signed int ?==?( unsigned short, unsigned short ), ?!=?( unsigned short, unsigned short ); 267 280 signed int ?==?( signed int, signed int ), ?!=?( signed int, signed int ); 268 281 signed int ?==?( unsigned int, unsigned int ), ?!=?( unsigned int, unsigned int );
Note: See TracChangeset
for help on using the changeset viewer.