# Changeset f343c6b

Ignore:
Timestamp:
Apr 25, 2019, 2:43:23 PM (3 years ago)
Branches:
arm-eh, cleanup-dtors, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
98b4b12
Parents:
ffaedcd (diff), 69c37cc (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/aaron-thesis'

Location:
doc
Files:
18 edited

Unmodified
Removed
• ## doc/bibliography/pl.bib

 rffaedcd contributer = {pabuhr@plg}, author      = {Aaron Moss and Robert Schluntz and Peter A. Buhr}, title       = {\textsf{C}$\mathbf{\forall}$ : Adding Modern Programming Language Features to C}, title       = {\textsf{C}$\mathbf{\forall}$ : Adding Modern Programming Language Features to {C}}, journal     = spe, volume      = 48, } @techreport{Prokopec11, keywords = {ctrie, concurrent map}, contributer = {a3moss@uwaterloo.ca}, title={Cache-aware lock-free concurrent hash tries}, author={Prokopec, Aleksandar and Bagwell, Phil and Odersky, Martin}, institution={EPFL}, year={2011} } @article{Buhr85, keywords    = {goto, multi-exit loop}, year        = 1998, note        = {{\small\textsf{ftp://\-plg.uwaterloo.ca/\-pub/\-Cforall/\-refrat.ps.gz}}}, } @phdthesis{Norrish98, title={C formalised in HOL}, author={Norrish, Michael}, year={1998}, school={University of Cambridge} } contributer = {a3moss@uwaterloo.ca}, title       = {Clang: a {C} language family frontend for {LLVM}}, howpublished= {\href{https://clang.llvm.org/}{https://\-clang.llvm.org/}}, note        = {Accessed 2019-02-22} howpublished= {\href{https://clang.llvm.org/}{https://\-clang.llvm.org/}} } number      = 11, pages       = {853-860}, } @inproceedings{Odersky01, keywords = {Scala}, contributer = {a3moss@uwaterloo.ca}, author = {Odersky, Martin and Zenger, Christoph and Zenger, Matthias}, title = {Colored Local Type Inference}, booktitle = {Proceedings of the 28th ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, series = {POPL '01}, year = {2001}, isbn = {1-58113-336-7}, location = {London, United Kingdom}, pages = {41--53}, numpages = {13}, url = {http://doi.acm.org/10.1145/360204.360207}, doi = {10.1145/360204.360207}, acmid = {360207}, publisher = {ACM}, address = {New York, NY, USA}, } } @inproceedings{Prokopec12, keywords={ctrie, hash trie, concurrent map}, contributer={a3moss@uwaterloo.ca}, title={Concurrent tries with efficient non-blocking snapshots}, author={Prokopec, Aleksandar and Bronson, Nathan Grasso and Bagwell, Phil and Odersky, Martin}, booktitle={ACM SIGPLAN Notices}, volume={47}, number={8}, pages={151--160}, year={2012}, organization={ACM} } @article{Buhr05a, keywords    = {concurrency, myths}, year        = 2018, } @article{Leroy09, keywords = {C formalization}, contributer = {a3moss@uwaterloo.ca}, author = {Leroy, Xavier}, title = {Formal Verification of a Realistic Compiler}, journal = {Commun. ACM}, issue_date = {July 2009}, volume = {52}, number = {7}, month = jul, year = {2009}, issn = {0001-0782}, pages = {107--115}, numpages = {9}, url = {http://doi.acm.org/10.1145/1538788.1538814}, doi = {10.1145/1538788.1538814}, acmid = {1538814}, publisher = {ACM}, address = {New York, NY, USA}, } @manual{Fortran95, number      = 1, pages       = {1-15}, } @article{Morgado13, keywords = {expression resolution}, contributer = {a3moss@uwaterloo.ca}, title={Iterative and core-guided {MaxSAT} solving: A survey and assessment}, author={Morgado, Antonio and Heras, Federico and Liffiton, Mark and Planes, Jordi and Marques-Silva, Joao}, journal={Constraints}, volume={18}, number={4}, pages={478--534}, year={2013}, publisher={Springer} } } @article{Pierce00, keywords = {Scala}, contributer = {a3moss@uwaterloo.ca}, author = {Pierce, Benjamin C. and Turner, David N.}, title = {Local Type Inference}, journal = {ACM Trans. Program. Lang. Syst.}, issue_date = {Jan. 2000}, volume = {22}, number = {1}, month = jan, year = {2000}, issn = {0164-0925}, pages = {1--44}, numpages = {44}, url = {http://doi.acm.org/10.1145/345099.345100}, doi = {10.1145/345099.345100}, acmid = {345100}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {polymorphism, subtyping, type inference}, } @article{Sundell08, keywords    = {lock free, deque}, note        = {\href{https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf}{https://\-www.openmp.org/\-wp-content/\-uploads/\-openmp-4.5.pdf}}, } @inproceedings{Krebbers14, keywords = {c formalization}, contributer = {a3moss@uwaterloo.ca}, author = {Krebbers, Robbert}, title = {An Operational and Axiomatic Semantics for Non-determinism and Sequence Points in C}, booktitle = {Proceedings of the 41st ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, series = {POPL '14}, year = {2014}, isbn = {978-1-4503-2544-8}, location = {San Diego, California, USA}, pages = {101--112}, numpages = {12}, url = {http://doi.acm.org/10.1145/2535838.2535878}, doi = {10.1145/2535838.2535878}, acmid = {2535878}, publisher = {ACM}, address = {New York, NY, USA}, } @book{Deitel04, } @article{SysVABI, keywords = {System V ABI}, contributer = {a3moss@uwaterloo.ca}, title={System {V} application binary interface}, author={Matz, Michael and Hubicka, Jan and Jaeger, Andreas and Mitchell, Mark}, journal={AMD64 Architecture Processor Supplement, Draft v0}, volume={99}, year={2013} } % T Argues against declaring exceptions on routine definitions. }, } @techreport{Black90, title={Typechecking polymorphism in {Emerald}}, author={Black, Andrew P and Hutchinson, Norman C}, year={1990}, institution={Cambridge Research Laboratory, Digital Equipment Corporation} }
• ## doc/theses/aaron_moss_PhD/phd/background.tex

 rffaedcd To provide background for the contributions in subsequent chapters, this chapter provides a summary of the features of \CFA{} at the time this work was conducted. The core design of \CFA{} is laid out in Glen Ditchfield's 1992 PhD thesis, \emph{Contextual Polymorphism} \cite{Ditchfield92}; in that thesis, Ditchfield presents the theoretical underpinnings of the \CFA{} polymorphism model. Glen Ditchfield laid out the core design of \CFA{} in his 1992 PhD thesis, \emph{Contextual Polymorphism} \cite{Ditchfield92}; in that thesis, Ditchfield presents the theoretical underpinnings of the \CFA{} polymorphism model. Building on Ditchfield's design for contextual polymorphism as well as KW-C \cite{Buhr94a}, an earlier set of (largely syntactic) extensions to C, Richard Bilson \cite{Bilson03} built the first version of the \CFA{} compiler, \CFACC{}, in the early 2000's. This early \CFACC{} provided basic functionality, but incorporated a number of algorithmic choices that have failed to scale as \CFA{} has developed, lacking the runtime performance for practical use; this thesis is substantially concerned with rectifying those deficits. Notable features added during this period include generic types (Chapter~\ref{generic-chap}), constructors and destructors \cite{Schluntz17}, improved support for tuples \cite{Schluntz17}, reference types \cite{Moss18}, first-class concurrent and parallel programming support \cite{Delisle18}, as well as numerous pieces of syntactic sugar and the start of an idiomatic standard library \cite{Moss18}. The selection of features presented in this chapter are chosen to elucidate the design constraints of the work presented in this thesis. \cbstart This thesis is primarily concerned with the \emph{expression resolution} portion of \CFA{} type-checking; resolution is discussed in more detail in Chapter~\ref{resolution-chap}, but is essentially determining which declarations the identifiers in each expression correspond to. Given that no simultaneously-visible C declarations share identifiers, expression resolution in C is not difficult, but the added features of \CFA{} make its resolution algorithms significantly more complex. Due to this complexity, the expression-resolution pass in \CFACC{} requires 95\% of compiler runtime on some source files, making an efficient procedure for expression resolution a requirement for a performant \CFA{} compiler. \cbend The features presented in this chapter are chosen to elucidate the design constraints of the work presented in this thesis. In some cases the interactions of multiple features make this design a significantly more complex problem than any individual feature; in other cases a feature that does not by itself add any complexity to expression resolution triggers previously rare edge cases more frequently. Another aspect of \CFA{}'s procedural paradigm is that it retains C's translation-unit-based encapsulation model, rather than class-based encapsulation such as in \CC{}. This design choice implies that separate compilation must be maintained to allow headers to act as an encapsulation boundary, rather than the header-only libraries used by \CC{} templates. As such, any language feature that requires code to be exposed in header files (\eg{} \CC{} templates) also eliminates encapsulation in \CFA{}. Given this constraint, \CFA{} is carefully designed to allow separate compilation for its added language features under the existing C usage patterns. \section{Name Overloading} \label{overloading-sec} The addition of !one_t! allows generic algorithms to handle the unit value uniformly for types where it is meaningful; a simple example of this is that polymorphic functions\footnote{discussed in Section~\ref{poly-func-sec}} in the \CFA{} prelude define !++x! and !x++! in terms of !x += 1!, allowing users to idiomatically define all forms of increment for a type !T! by defining the single function !T& ?+=?(T&, one_t)!; analogous overloads for the decrement operators are also present, and programmers can override any of these functions for a particular type if desired. \CFA{} previously allowed !0! and !1! to be the names of polymorphic variables, with separate overloads for !int 0!, !int 1!, and !forall(dtype T) T* 0!. \CFA{} previously allowed !0! and !1! to be the names of polymorphic variables, with separate overloads for !int 0!, !int 1!, and the polymorphic variable !forall(dtype T) T* 0!. While designing \CFA{} generic types (see Chapter~\ref{generic-chap}), it was discovered that the parametric polymorphic zero variable is not generalizable to other types; though all null pointers have the same in-memory representation, the same cannot be said of the zero values of arbitrary types. As such, variables that could represent !0! and !1! were phased out in favour of functions that could generate those values for a given type as appropriate. As such, polymorphic variables, and in particular variables for !0! and !1!, were phased out in favour of functions that could generate those values for a given type as appropriate. \section{Polymorphic Functions} \label{poly-func-sec} The type variable !T! is transformed into a set of additional implicit parameters to !identity!, which encode sufficient information about !T! to create and return a variable of that type. \CFA{} passes the size and alignment of the type represented by an !otype! parameter, as well as a default constructor, copy constructor, assignment operator, and destructor. Types which do not have one or more of these operators visible cannot be bound to !otype! parameters, but may be bound to un-constrained !dtype! (data type'') type variables. Types that do not have one or more of these operators visible cannot be bound to !otype! parameters, but may be bound to un-constrained !dtype! (data type'') type variables. In this design, the runtime cost of polymorphism is spread over each polymorphic call, due to passing more arguments to polymorphic functions; the experiments in Chapter~\ref{generic-chap} indicate that this overhead is comparable to that of \CC{} virtual function calls. % \TODO{rerun experiments, possibly look at vtable variant} \subsection{Type Assertions} Since bare polymorphic types do not provide a great range of available operations, \CFA{} provides a \emph{type assertion} mechanism to provide further information about a type\footnote{Subscript not included in source code.\label{sub-foot}}: Since bare polymorphic types do not provide a great range of available operations, \CFA{} provides a \emph{type assertion} mechanism to provide further information about a type\footnote{This example introduces a convention used throughout this thesis of disambiguating overloaded names with subscripts; the subscripts do not appear in \CFA{} source code.}: \begin{cfa} Monomorphic specializations of polymorphic functions can themselves be used to satisfy type assertions. For instance, !twice! could have been defined like this\footref{sub-foot}: For instance, !twice! could have been defined like this: \begin{cfa} \end{cfa} Specializing this polymorphic function with !S = double! produces a monomorphic function which can  be used to satisfy the type assertion on !four_times!. Specializing this polymorphic function with !S = double! produces a monomorphic function which can be used to satisfy the type assertion on !four_times!. \CFACC{} accomplishes this by creating a wrapper function calling !twice!$_2$ with !S! bound to !double!, then providing this wrapper function to !four_times!\footnote{\lstinline{twice}$_2$ could also have had a type parameter named \lstinline{T}; \CFA{} specifies renaming of the type parameters, which would avoid the name conflict with the type variable \lstinline{T} of \lstinline{four_times}}. However, !twice!$_2$ also works for any type !S! that has an addition operator defined for it. Finding appropriate functions to satisfy type assertions is essentially a recursive case of expression resolution, as it takes a name (that of the type assertion) and attempts to match it to a suitable declaration in the current scope. Finding appropriate functions to satisfy type assertions is essentially a recursive case of expression resolution, as it takes a name (that of the type assertion) and attempts to match it to a suitable declaration in the current scope\footnote{\CFACC{} actually performs a type-unification computation for assertion satisfaction rather than an expression resolution computation; see Section~\ref{assn-sat-sec} for details.}. If a polymorphic function can be used to satisfy one of its own type assertions, this recursion may not terminate, as it is possible that that function is examined as a candidate for its own assertion unboundedly repeatedly. To avoid such infinite loops, \CFACC{} imposes a fixed limit on the possible depth of recursion, similar to that employed by most \CC{} compilers for template expansion; this restriction means that there are some otherwise semantically well-typed expressions that cannot be resolved by \CFACC{}. % \subsection{Deleted Declarations} % Particular type combinations can be exempted from matching a given polymorphic function through use of a \emph{deleted function declaration}: % \begin{cfa} % int somefn(char) = void; % \end{cfa} % This feature is based on a \CCeleven{} feature typically used to make a type non-copyable by deleting its copy constructor and assignment operator\footnote{In previous versions of \CC{}, a type could be made non-copyable by declaring a private copy constructor and assignment operator, but not defining either. This idiom is well-known, but depends on some rather subtle and \CC{}-specific rules about private members and implicitly-generated functions; the deleted function form is both clearer and less verbose.} or forbidding some interpretations of a polymorphic function by specifically deleting the forbidden overloads\footnote{Specific polymorphic function overloads can also be forbidden in previous \CC{} versions through use of template metaprogramming techniques, though this advanced usage is beyond the skills of many programmers. A similar effect can be produced on an ad-hoc basis at the appropriate call sites through use of casts to determine the function type. In both cases, the deleted-function form is clearer and more concise.}. % Deleted function declarations are implemented in \CFACC{} by adding them to the symbol table as usual, but with a flag set that indicates that the function is deleted. % If this deleted declaration is selected as the unique minimal-cost interpretation of an expression than an error is produced. \subsection{Traits} Traits, however, are significantly more powerful than nominal-inheritance interfaces; firstly, due to the scoping rules of the declarations that satisfy a trait's type assertions, a type may not satisfy a trait everywhere that that type is declared, as with !char! and the !nominal! trait above. Secondly, because \CFA{} is not object-oriented and types do not have a closed set of methods, existing C library types can be extended to implement a trait simply by writing the requisite functions\footnote{\CC{} only allows partial extension of C types, because constructors, destructors, conversions, and the assignment, indexing, and function-call operators may only be defined in a class; \CFA{} does not have any of these restrictions.}. Secondly, because \CFA{} is not object-oriented and types do not have a closed set of methods, existing C library types can be extended to implement a trait simply by writing the requisite functions\footnote{\CC{} only allows partial extension of C types, because constructors, destructors, conversions, and the assignment, indexing, and function-call operators may only be defined in a class; \CFA{} does not have any of these restrictions.}. Finally, traits may be used to declare a relationship among multiple types, a property that may be difficult or impossible to represent in nominal-inheritance type systems\footnote{This example uses \CFA{}'s reference types, described in Section~\ref{type-features-sec}.}: In this example above, !(list_iterator, int)! satisfies !pointer_like! by the user-defined dereference function, and !(list_iterator, list)! also satisfies !pointer_like! by the built-in dereference operator for pointers. Given a declaration !list_iterator it!, !*it! can be either an !int! or a !list!, with the meaning disambiguated by context (\eg{} !int x = *it;! interprets !*it! as !int!, while !(*it).value = 42;! interprets !*it! as !list!). While a nominal-inheritance system with associated types could model one of those two relationships by making !El! an associated type of !Ptr! in the !pointer_like! implementation, few such systems could model both relationships simultaneously. While a nominal-inheritance system with associated types could model one of those two relationships by making !El! an associated type of !Ptr! in the !pointer_like! implementation, \cbstart the author is unaware of any nominal-inheritance system that could model both relationships simultaneously. Further comparison of \CFA{} polymorphism with other languages can be found in Section~\ref{generic-related-sec}. \cbend The flexibility of \CFA{}'s implicit trait-satisfaction mechanism provides programmers with a great deal of power, but also blocks some optimization approaches for expression resolution. The ability of types to begin or cease to satisfy traits when declarations go into or out of scope makes caching of trait satisfaction judgments difficult, and the ability of traits to take multiple type parameters can lead to a combinatorial explosion of work in any attempt to pre-compute trait satisfaction relationships. The ability of types to begin or cease to satisfy traits when declarations go into or out of scope makes caching of trait satisfaction judgments difficult, and the ability of traits to take multiple type parameters can lead to a combinatorial explosion of work in any attempt to pre-compute trait satisfaction relationships. \cbstart \subsection{Deleted Declarations} Particular type combinations can be exempted from matching a given polymorphic function through use of a \emph{deleted function declaration}: \begin{cfa} int somefn(char) = void; \end{cfa} This feature is based on a \CCeleven{} feature typically used to make a type non-copyable by deleting its copy constructor and assignment operator\footnote{In previous versions of \CC{}, a type could be made non-copyable by declaring a private copy constructor and assignment operator, but not defining either. This idiom is well-known, but depends on some rather subtle and \CC{}-specific rules about private members and implicitly-generated functions; the deleted function form is both clearer and less verbose.} or forbidding some interpretations of a polymorphic function by specifically deleting the forbidden overloads\footnote{Specific polymorphic function overloads can also be forbidden in previous \CC{} versions through use of template metaprogramming techniques, though this advanced usage is beyond the skills of many programmers.}. Deleted function declarations are implemented in \CFACC{} by adding them to the symbol table as usual, but with a flag set that indicates that the function is deleted. If this deleted declaration is selected as the unique minimal-cost interpretation of an expression then an error is produced, allowing \CFA{} programmers to guide the expression resolver away from undesirable solutions. \cbend \section{Implicit Conversions} \label{implicit-conv-sec} One contribution of this thesis, discussed in Section~\ref{conv-cost-sec}, is a number of refinements to this cost model to more efficiently resolve polymorphic function calls. The expression resolution problem, which is the focus of Chapter~\ref{resolution-chap}, is to find the unique minimal-cost interpretation of each expression in the program, where all identifiers must be matched to a declaration, and implicit conversions or polymorphic bindings of the result of an expression may increase the cost of the expression. In the context of these implicit conversions, the expression resolution problem can be restated as finding the unique minimal-cost interpretation of each expression in the program, where all identifiers must be matched to a declaration, and implicit conversions or polymorphic bindings of the result of an expression may increase the cost of the expression. While semantically valid \CFA{} code always has such a unique minimal-cost interpretation, \CFACC{} must also be able to detect and report as errors expressions that have either no interpretation or multiple ambiguous minimal-cost interpretations. Note that which subexpression interpretation is minimal-cost may require contextual information to disambiguate. Given some type !T!, a !T&! (reference to !T!'') is essentially an automatically dereferenced pointer. These types allow seamless pass-by-reference for function parameters, without the extraneous dereferencing syntax present in C; they also allow easy aliasing of nested values with a similarly convenient syntax. A particular improvement is removing syntactic special cases for operators that take or return mutable values; for example, the use !a += b! of a compound assignment operator now matches its signature, !int& ?+=?(int&, int)!, as opposed to the syntactic special cases in earlier versions of \CFA{} to automatically take the address of the first argument to !+=! and to mark its return value as mutable. \cbstart The addition of reference types also eliminated two syntactic special-cases present in previous versions of \CFA{}. Considering a call !a += b! to a compound assignment operator, the previous declaration for that operator was !lvalue int ?+=?(int*, int)! -- to mutate the left argument, the built-in operators were special-cased to implicitly take the address of that argument, while the special !lvalue! syntax was used to mark the return type of a function as a mutable reference. With references, this declaration can be re-written as !int& ?+=?(int&, int)! -- the reference semantics generalize the implicit address-of on the left argument and allow it to be used in user-declared functions, while also subsuming the (now removed) !lvalue! syntax for function return types. \cbend The C standard makes heavy use of the concept of \emph{lvalue}, an expression with a memory address; its complement, \emph{rvalue} (a non-addressable expression) is not explicitly named in the standard. In \CFA{}, the distinction between lvalue and rvalue can be re-framed in terms of reference and non-reference types, with the benefit of being able to express the difference in user code. \CFA{} references preserve the existing qualifier-dropping implicit lvalue-to-rvalue conversion from C (\eg{} a !const volatile int&! can be implicitly copied to a bare !int!) \CFA{} references preserve the existing qualifier-dropping implicit lvalue-to-rvalue conversion from C (\eg{} a !const volatile int&! can be implicitly copied to a bare !int!). To make reference types more easily usable in legacy pass-by-value code, \CFA{} also adds an implicit rvalue-to-lvalue conversion, implemented by storing the value in a compiler-generated temporary variable and passing a reference to that temporary. To mitigate the !const! hell'' problem present in \CC{}, there is also a qualifier-dropping lvalue-to-lvalue conversion implemented by copying into a temporary: \begin{cfa} const int magic = 42; void inc_print( int& x ) { printf("%d\n", ++x); } print_inc( magic ); $\C{// legal; implicitly generated code in red below:}$ inc_print( magic ); $\C{// legal; implicitly generated code in red below:}$ int tmp = magic; $\C{// to safely strip const-qualifier}$ print_inc( tmp ); $\C{// tmp is incremented, magic is unchanged}$ inc_print( tmp ); $\C{// tmp is incremented, magic is unchanged}$ \end{cfa} \CFA{} supports all of these use cases without further added syntax. The key to this syntax-free feature support is an observation made by the author that the address of a reference is a lvalue. In C, the address-of operator !&x! can only be applied to lvalue expressions, and always produces an immutable rvalue; \CFA{} supports reference re-binding by assignment to the address of a reference, and pointers to references by repeating the address-of operator: In C, the address-of operator !&x! can only be applied to lvalue expressions, and always produces an immutable rvalue; \CFA{} supports reference re-binding by assignment to the address of a reference\footnote{\cbstart The syntactic difference between reference initialization and reference assignment is unfortunate, but preserves the ability to pass function arguments by reference (a reference initialization context) without added syntax. \cbend }, and pointers to references by repeating the address-of operator: \begin{cfa} \end{cfa} For better compatibility with C, the \CFA{} team has chosen not to differentiate function overloads based on top-level reference types, and as such their contribution to the difficulty of \CFA{} expression resolution is largely restricted to the implementation details of normalization conversions and adapters. \subsection{Resource Management} For better compatibility with C, the \CFA{} team has chosen not to differentiate function overloads based on top-level reference types, and as such their contribution to the difficulty of \CFA{} expression resolution is largely restricted to the implementation details of matching reference to non-reference types during type-checking. \subsection{Resource Management} \label{ctor-sec} \CFA{} also supports the RAII (Resource Acquisition is Initialization'') idiom originated by \CC{}, thanks to the object lifetime work of Robert Schluntz \cite{Schluntz17}.
• ## doc/theses/aaron_moss_PhD/phd/conclusion.tex

 rffaedcd The combination of these practical improvements and added features significantly improve the viability of \CFA{} as a practical programming language. Further improvements to the \CFA{} type-system are still possible, however. Further improvements to the \CFA{} type system are still possible, however. One area suggested by this work is development of a scheme for user-defined conversions; to integrate properly with the \CFA{} conversion model, there would need to be a distinction between safe and unsafe conversions, and possibly a way to denote conversions as explicit-only or non-chainable. Another place for ongoing effort is improvement of compilation performance; I believe the most promising direction for that is rebuilding the \CFA{} compiler on a different framework than Bilson's \CFACC{}. Another place for ongoing effort is improvement of compilation performance; I believe the most promising direction for that effort is rebuilding the \CFA{} compiler on a different framework than Bilson's \CFACC{}. The resolver prototype presented in this work has good performance and already has the basics of \CFA{} semantics implemented, as well as many of the necessary core data structures, and would be a viable candidate for a new compiler architecture. An alternate approach would be to fork an existing C compiler such as Clang~\cite{Clang}, which would need to be modified to use one of the resolution algorithms discussed here, as well as various other features introduced by Bilson~\cite{Bilson03}. \cbstart More generally, the algorithmic techniques described in this thesis may be useful to implementors of other programming languages. In particular, the demonstration of practical performance for polymorphic return-type inference suggests the possibility of eliding return-type-only template parameters in \CC{} function calls, though integrating such an extension into \CC{} expression resolution in a backwards-compatible manner may be challenging. The \CFA{} expression resolution problem also bears some similarity to the \emph{local type inference} model put forward by Pierce \& Turner \cite{Pierce00} and Odersky \etal{} \cite{Odersky01}; compiler implementors for languages such as Scala \cite{Scala} that perform type inference based on this model may be able to profitably adapt the algorithms and data structures presented in this thesis. \cbend
• ## doc/theses/aaron_moss_PhD/phd/evaluation/algo-summary.dat

 rffaedcd "\\textsc{co-dca-inc}"  101     50      2824    2 "\\textsc{co-dca-per}"  103     12      1339    2 "\\textsc{td-imm-bas}"  64      125     2620    3 "\\textsc{td-imm-inc}"  62      322     2717    3 "\\textsc{td-def-bas}"  67      18      189     3 "\\textsc{td-def-inc}"  66      40      263     3 "\\textsc{td-imm-bas}"  64      125     2620    3 "\\textsc{td-imm-inc}"  62      322     2717    3 "\\textsc{td-dca-bas}"  67      18      191     3 "\\textsc{td-dca-inc}"  66      40      258     3
• ## doc/theses/aaron_moss_PhD/phd/evaluation/algo-summary.gp

 rffaedcd set yrange [0:80] set label "125" at 20.25,82 set label "322" at 21.75,82 set label "125" at 18.25,82 set label "322" at 19.75,82 plot 'evaluation/algo-summary.dat' using ($5 == 1 ?$3 : 1/0):xtic(1) with boxes, \ '' using ($5 == 2 ?$3 : 1/0):xtic(1) with boxes, \
• ## doc/theses/aaron_moss_PhD/phd/evaluation/cfa-plots.gp

 rffaedcd set output BUILD."cfa-time.tex" set xlabel "\\textsc{cfa-co} runtime (ms)" set ylabel "runtime (ms)" set xlabel "\\textsc{cfa-co} runtime (s)" set ylabel "runtime (s)" set logscale xy set key outside plot for [i=2:6] 'evaluation/cfa-time.tsv' using 2:i title columnheader plot for [i=2:5] 'evaluation/cfa-time.tsv' using 2:i title columnheader # MEMORY # set ylabel "peak memory (MB)" plot for [i=2:6] 'evaluation/cfa-mem-by-time.tsv' using 7:(column(i)/1000) title columnheader plot for [i=2:5] 'evaluation/cfa-mem-by-time.tsv' using 7:(column(i)/1000) title columnheader # SPEEDUP # set output BUILD."cfa-speedup.tex" set ylabel "speedup w.r.t. baseline" unset logscale y plot 'evaluation/cfa-time.tsv' using 2:(column(2)/column(2)) title 'baseline', \ '' using 2:(column(2)/column(3)) title columnheader, \ '' using 2:(column(3)/column(4)) title 'inter-round', \ '' using 2:(column(4)/column(5)) title columnheader # # RUNTIME SPEEDUP #
• ## doc/theses/aaron_moss_PhD/phd/evaluation/per-prob-scatter.gp

 rffaedcd set output BUILD."per-prob-assns.tex" set xlabel "assertions resolved" set xlabel "assertions satisfied" plot for [i=1:words(tests)] 'evaluation/per_prob/'.word(tests, i).'-per-prob.csv' using 6:(column(7)/1000) title word(tests, i)

Note: See TracChangeset for help on using the changeset viewer.