Changeset b21c77a


Ignore:
Timestamp:
Jun 29, 2018, 4:14:15 PM (5 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
new-env
Children:
184557e
Parents:
97397a26 (diff), 28f3a19 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/with_gc' into new-env

Files:
19 added
4 deleted
121 edited
3 moved

Legend:

Unmodified
Added
Removed
  • Jenkins/TestRegen

    r97397a26 rb21c77a  
    3333                        email()
    3434                }
    35         } 
     35        }
    3636        catch (Exception caughtError) {
    3737                email_error()
     
    6565
    6666        def install_dir = pwd tmp: true
    67                
     67
    6868        //Configure the conpilation (Output is not relevant)
    6969        //Use the current directory as the installation target so nothing
     
    101101                def email_subject = "[cforall dashboard][TEST REGEN# ${env.BUILD_NUMBER}] - Result"
    102102                def email_body = """This is an automated email from the Jenkins build machine. It was
    103 generated http://plg2:8082/dashboard.
     103generated https://cforall.uwaterloo.ca:8082/dashboard.html.
    104104
    105105Please apply the required changes using the following method :
     
    118118        def email_subject = "[cforall dashboard][TEST REGEN# ${env.BUILD_NUMBER}] - FAILURE"
    119119        def email_body = """This is an automated email from the Jenkins build machine. It was
    120 generated http://plg2:8082/dashboard.
     120generated https://cforall.uwaterloo.ca:8082/dashboard.html.
    121121
    122122Test generation encoutered an error please see attached logs
  • Jenkinsfile

    r97397a26 rb21c77a  
    174174
    175175def notify_server(int wait) {
    176         sh """curl --data "wait=${wait}" -X POST http://plg2:8082/jenkins/notify > /dev/null || true"""
     176        sh """curl --data "wait=${wait}" -X POST https://cforall.uwaterloo.ca:8082/jenkins/notify > /dev/null || true"""
    177177        return
    178178}
     
    329329
    330330                //Then publish the results
    331                 sh 'curl -H \'Content-Type: application/json\' --data @bench.json http://plg2:8082/jenkins/publish > /dev/null || true'
     331                sh 'curl -H \'Content-Type: application/json\' --data @bench.json https://cforall.uwaterloo.ca:8082/jenkins/publish > /dev/null || true'
    332332        }
    333333}
  • README

    r97397a26 rb21c77a  
    107107- the implicit coercion of structure types to the type of their first member is
    108108  not implemented
    109        
     109
    110110Who is responsible for cfa-cc?
    111111------------------------------
     
    115115The Cforall project maintains a web page:
    116116
    117         http://plg.uwaterloo.ca/~cforall
     117        https://cforall.uwaterloo.ca
  • doc/papers/OOPSLA17/Makefile

    r97397a26 rb21c77a  
    3333
    3434DOCUMENT = generic_types.pdf
     35BASE = ${basename ${DOCUMENT}}
    3536
    3637# Directives #
     
    4142
    4243clean :
    43         @rm -frv ${DOCUMENT} ${basename ${DOCUMENT}}.ps ${Build}
     44        @rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
    4445
    4546# File Dependencies #
    4647
    47 ${DOCUMENT} : ${basename ${DOCUMENT}}.ps
     48${DOCUMENT} : ${BASE}.ps
    4849        ps2pdf $<
    4950
    50 ${basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi
     51${BASE}.ps : ${BASE}.dvi
    5152        dvips ${Build}/$< -o $@
    5253
    53 ${basename ${DOCUMENT}}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ../../bibliography/pl.bib
     54${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     55                ../../bibliography/pl.bib | ${Build}
    5456        # Must have *.aux file containing citations for bibtex
    5557        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
     
    6365## Define the default recipes.
    6466
    65 ${Build}:
     67${Build} :
    6668        mkdir -p ${Build}
    6769
     
    6971        gnuplot -e Build="'${Build}/'" evaluation/timing.gp
    7072
    71 %.tex : %.fig
     73%.tex : %.fig | ${Build}
    7274        fig2dev -L eepic $< > ${Build}/$@
    7375
    74 %.ps : %.fig
     76%.ps : %.fig | ${Build}
    7577        fig2dev -L ps $< > ${Build}/$@
    7678
    77 %.pstex : %.fig
     79%.pstex : %.fig | ${Build}
    7880        fig2dev -L pstex $< > ${Build}/$@
    7981        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/papers/concurrency/Makefile

    r97397a26 rb21c77a  
    2020
    2121FIGURES = ${addsuffix .tex, \
    22 monitor \
    23 ext_monitor \
    2422int_monitor \
    2523dependency \
     
    2725
    2826PICTURES = ${addsuffix .pstex, \
     27monitor \
     28ext_monitor \
    2929system \
    3030monitor_structs \
     
    5959        dvips ${Build}/$< -o $@
    6060
    61 ${BASE}.dvi : Makefile ${Build} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    62                 annex/local.bib ../../bibliography/pl.bib
     61${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     62                annex/local.bib ../../bibliography/pl.bib | ${Build}
    6363        # Must have *.aux file containing citations for bibtex
    6464        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
    65         ${BibTeX} ${Build}/${basename $@}
     65        -${BibTeX} ${Build}/${basename $@}
    6666        # Some citations reference others so run again to resolve these citations
    6767        ${LaTeX} ${basename $@}.tex
    68         ${BibTeX} ${Build}/${basename $@}
     68        -${BibTeX} ${Build}/${basename $@}
    6969        # Run again to finish citations
    7070        ${LaTeX} ${basename $@}.tex
     
    7272## Define the default recipes.
    7373
    74 ${Build}:
     74${Build} :
    7575        mkdir -p ${Build}
    7676
    77 ${BASE}.out.ps: ${Build}
     77${BASE}.out.ps : | ${Build}
    7878        ln -fs ${Build}/Paper.out.ps .
    7979
    80 WileyNJD-AMA.bst:
     80WileyNJD-AMA.bst :
    8181        ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst .
    8282
    83 %.tex : %.fig ${Build}
     83%.tex : %.fig | ${Build}
    8484        fig2dev -L eepic $< > ${Build}/$@
    8585
    86 %.ps : %.fig ${Build}
     86%.ps : %.fig | ${Build}
    8787        fig2dev -L ps $< > ${Build}/$@
    8888
    89 %.pstex : %.fig ${Build}
     89%.pstex : %.fig | ${Build}
    9090        fig2dev -L pstex $< > ${Build}/$@
    9191        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/papers/concurrency/Paper.tex

    r97397a26 rb21c77a  
    5656\newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}}
    5757\newcommand{\Emph}[2][red]{{\color{#1}\textbf{\emph{#2}}}}
    58 \newcommand{\R}[1]{\Textbf{#1}}
    59 \newcommand{\B}[1]{{\Textbf[blue]{#1}}}
    60 \newcommand{\G}[1]{{\Textbf[OliveGreen]{#1}}}
    6158\newcommand{\uC}{$\mu$\CC}
    62 \newcommand{\cit}{\textsuperscript{[Citation Needed]}\xspace}
    63 \newcommand{\TODO}{{\Textbf{TODO}}}
     59\newcommand{\TODO}[1]{{\Textbf{#1}}}
    6460
    6561%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    258254\section{Introduction}
    259255
    260 This paper provides a minimal concurrency \newterm{Abstract Program Interface} (API) that is simple, efficient and can be used to build other concurrency features.
     256This paper provides a minimal concurrency \newterm{Application Program Interface} (API) that is simple, efficient and can be used to build other concurrency features.
    261257While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master.
    262258An easier approach for programmers is to support higher-level constructs as the basis of concurrency.
     
    275271Hence, there are two problems to be solved: concurrency and parallelism.
    276272While these two concepts are often combined, they are distinct, requiring different tools~\cite[\S~2]{Buhr05a}.
    277 Concurrency tools handle synchronization and mutual exclusion, while parallelism tools handle performance, cost and resource utilization.
     273Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost, and resource utilization.
    278274
    279275The proposed concurrency API is implemented in a dialect of C, called \CFA.
     
    286282Extended versions and explanation of the following code examples are available at the \CFA website~\cite{Cforall} or in Moss~\etal~\cite{Moss18}.
    287283
    288 \CFA is an extension of ISO-C, and hence, supports all C paradigms.
     284\CFA is a non-object-oriented extension of ISO-C, and hence, supports all C paradigms.
    289285%It is a non-object-oriented system-language, meaning most of the major abstractions have either no runtime overhead or can be opted out easily.
    290 Like C, the basics of \CFA revolve around structures and functions.
     286Like C, the building blocks of \CFA are structures and routines.
    291287Virtually all of the code generated by the \CFA translator respects C memory layouts and calling conventions.
    292288While \CFA is not an object-oriented language, lacking the concept of a receiver (\eg @this@) and nominal inheritance-relationships, C does have a notion of objects: ``region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}.
     
    300296int x = 1, y = 2, z = 3;
    301297int * p1 = &x, ** p2 = &p1,  *** p3 = &p2,      $\C{// pointers to x}$
    302         `&` r1 = x,  `&&` r2 = r1,  `&&&` r3 = r2;      $\C{// references to x}$
     298    `&` r1 = x,   `&&` r2 = r1,   `&&&` r3 = r2;        $\C{// references to x}$
    303299int * p4 = &z, `&` r4 = z;
    304300
     
    349345        'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$
    350346\end{cfa}
    351 and may appear as the body of a function or nested within a function body.
     347and may appear as the body of a routine or nested within a routine body.
    352348Each expression in the expression-list provides a type and object.
    353349The type must be an aggregate type.
     
    360356
    361357\CFA maximizes the ability to reuse names via overloading to aggressively address the naming problem.
    362 Both variables and functions may be overloaded, where selection is based on types, and number of returns (as in Ada~\cite{Ada}) and arguments.
     358Both variables and routines may be overloaded, where selection is based on types, and number of returns (as in Ada~\cite{Ada}) and arguments.
    363359\begin{cquote}
    364360\vspace*{-\baselineskip}%???
     
    415411\end{cquote}
    416412Overloading is important for \CFA concurrency since the runtime system relies on creating different types to represent concurrency objects.
    417 Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions to prevent name clashes.
    418 As seen in Section~\ref{basics}, function @main@ is heavily overloaded.
    419 
    420 Variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name:
     413Therefore, overloading eliminates long prefixes and other naming conventions to prevent name clashes.
     414As seen in Section~\ref{basics}, routine @main@ is heavily overloaded.
     415For example, variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name:
    421416\begin{cfa}
    422417struct S { int `i`; int j; double m; } s;
     
    432427}
    433428\end{cfa}
    434 For parallel semantics, both @s.i@ and @t.i@ are visible the same type, so only @i@ is ambiguous without qualification.
     429For parallel semantics, both @s.i@ and @t.i@ are visible with the same type, so only @i@ is ambiguous without qualification.
    435430
    436431
     
    438433
    439434Overloading also extends to operators.
    440 Operator-overloading syntax names a routine with the operator symbol and question marks for the operands:
     435Operator-overloading syntax creates a routine name with an operator symbol and question marks for the operands:
    441436\begin{cquote}
    442437\lstDeleteShortInline@%
     
    472467\end{cquote}
    473468While concurrency does not use operator overloading directly, it provides an introduction for the syntax of constructors.
    474 
    475 
    476 \subsection{Parametric Polymorphism}
    477 \label{s:ParametricPolymorphism}
    478 
    479 The signature feature of \CFA is parametric-polymorphic functions~\cite{} with functions generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types.
    480 For example, the following sum function works for any type that supports construction from 0 and addition:
    481 \begin{cfa}
    482 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and +
    483 T sum( T a[$\,$], size_t size ) {
    484         `T` total = { `0` };                                    $\C{// initialize by 0 constructor}$
    485         for ( size_t i = 0; i < size; i += 1 )
    486                 total = total `+` a[i];                         $\C{// select appropriate +}$
    487         return total;
    488 }
    489 S sa[5];
    490 int i = sum( sa, 5 );                                           $\C{// use S's 0 construction and +}$
    491 \end{cfa}
    492 
    493 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each function declaration:
    494 \begin{cfa}
    495 trait `sumable`( otype T ) {
    496         void `?{}`( T &, zero_t );                              $\C{// 0 literal constructor}$
    497         T `?+?`( T, T );                                                $\C{// assortment of additions}$
    498         T ?+=?( T &, T );
    499         T ++?( T & );
    500         T ?++( T & );
    501 };
    502 forall( otype T `| sumable( T )` )                      $\C{// use trait}$
    503 T sum( T a[$\,$], size_t size );
    504 \end{cfa}
    505 
    506 Assertions can be @otype@ or @dtype@.
    507 @otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator.
    508 @dtype@ only guarantees an object has a size and alignment.
    509 
    510 Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@:
    511 \begin{cfa}
    512 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); }
    513 int * ip = alloc();                                                     $\C{// select type and size from left-hand side}$
    514 double * dp = alloc();
    515 struct S {...} * sp = alloc();
    516 \end{cfa}
    517 where the return type supplies the type/size of the allocation, which is impossible in most type systems.
    518469
    519470
     
    544495\CFA also provides @new@ and @delete@, which behave like @malloc@ and @free@, in addition to constructing and destructing objects:
    545496\begin{cfa}
    546 {       struct S s = {10};                                              $\C{// allocation, call constructor}$
    547         ...
     497{
     498        ... struct S s = {10}; ...                              $\C{// allocation, call constructor}$
    548499}                                                                                       $\C{// deallocation, call destructor}$
    549500struct S * s = new();                                           $\C{// allocation, call constructor}$
     
    551502delete( s );                                                            $\C{// deallocation, call destructor}$
    552503\end{cfa}
    553 \CFA concurrency uses object lifetime as a means of synchronization and/or mutual exclusion.
     504\CFA concurrency uses object lifetime as a means of mutual exclusion and/or synchronization.
     505
     506
     507\subsection{Parametric Polymorphism}
     508\label{s:ParametricPolymorphism}
     509
     510The signature feature of \CFA is parametric-polymorphic routines~\cite{} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types.
     511For example, the following sum routine works for any type that supports construction from 0 and addition:
     512\begin{cfa}
     513forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and +
     514T sum( T a[$\,$], size_t size ) {
     515        `T` total = { `0` };                                    $\C{// initialize by 0 constructor}$
     516        for ( size_t i = 0; i < size; i += 1 )
     517                total = total `+` a[i];                         $\C{// select appropriate +}$
     518        return total;
     519}
     520S sa[5];
     521int i = sum( sa, 5 );                                           $\C{// use S's 0 construction and +}$
     522\end{cfa}
     523The builtin type @zero_t@ (and @one_t@) overload constant 0 (and 1) for a new types, where both 0 and 1 have special meaning in C.
     524
     525\CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration:
     526\begin{cfa}
     527trait `sumable`( otype T ) {
     528        void `?{}`( T &, zero_t );                              $\C{// 0 literal constructor}$
     529        T `?+?`( T, T );                                                $\C{// assortment of additions}$
     530        T ?+=?( T &, T );
     531        T ++?( T & );
     532        T ?++( T & );
     533};
     534forall( otype T `| sumable( T )` )                      $\C{// use trait}$
     535T sum( T a[$\,$], size_t size );
     536\end{cfa}
     537
     538Assertions can be @otype@ or @dtype@.
     539@otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator.
     540@dtype@ only guarantees an object has a size and alignment.
     541
     542Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@:
     543\begin{cfa}
     544forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); }
     545int * ip = alloc();                                                     $\C{// select type and size from left-hand side}$
     546double * dp = alloc();
     547struct S {...} * sp = alloc();
     548\end{cfa}
     549where the return type supplies the type/size of the allocation, which is impossible in most type systems.
    554550
    555551
     
    584580\subsection{\protect\CFA's Thread Building Blocks}
    585581
    586 An important missing feature in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional.
     582An important missing feature in C is threading\footnote{While the C11 standard defines a \protect\lstinline@threads.h@ header, it is minimal and defined as optional.
    587583As such, library support for threading is far from widespread.
    588 At the time of writing the paper, neither \protect\lstinline|gcc| nor \protect\lstinline|clang| support ``threads.h'' in their standard libraries.}.
    589 On modern architectures, a lack of threading is unacceptable~\cite{Sutter05, Sutter05b}, and therefore existing and new programming languages must have tools for writing efficient concurrent programs to take advantage of parallelism.
     584At the time of writing the paper, neither \protect\lstinline@gcc@ nor \protect\lstinline@clang@ support \protect\lstinline@threads.h@ in their standard libraries.}.
     585In modern programming languages, a lack of threading is unacceptable~\cite{Sutter05, Sutter05b}, and therefore existing and new programming languages must have tools for writing efficient concurrent programs to take advantage of parallelism.
    590586As an extension of C, \CFA needs to express these concepts in a way that is as natural as possible to programmers familiar with imperative languages.
    591587Furthermore, because C is a system-level language, programmers expect to choose precisely which features they need and which cost they are willing to pay.
     
    625621\newbox\myboxA
    626622\begin{lrbox}{\myboxA}
    627 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     623\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    628624`int f1, f2, state = 1;`   // single global variables
    629625int fib() {
     
    642638        }
    643639}
    644 \end{lstlisting}
     640\end{cfa}
    645641\end{lrbox}
    646642
    647643\newbox\myboxB
    648644\begin{lrbox}{\myboxB}
    649 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     645\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    650646#define FIB_INIT `{ 0, 1 }`
    651647typedef struct { int f2, f1; } Fib;
     
    664660        }
    665661}
    666 \end{lstlisting}
     662\end{cfa}
    667663\end{lrbox}
    668664
     
    677673\newbox\myboxA
    678674\begin{lrbox}{\myboxA}
    679 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     675\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    680676`coroutine` Fib { int fn; };
    681677void main( Fib & fib ) with( fib ) {
     
    697693        }
    698694}
    699 \end{lstlisting}
     695\end{cfa}
    700696\end{lrbox}
    701697\newbox\myboxB
    702698\begin{lrbox}{\myboxB}
    703 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     699\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    704700`coroutine` Fib { int ret; };
    705701void main( Fib & f ) with( fib ) {
     
    721717
    722718
    723 \end{lstlisting}
     719\end{cfa}
    724720\end{lrbox}
    725721\subfloat[3 States, internal variables]{\label{f:Coroutine3States}\usebox\myboxA}
     
    731727
    732728Using a coroutine, it is possible to express the Fibonacci formula directly without any of the C problems.
    733 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type:
    734 \begin{cfa}
    735 `coroutine` Fib { int fn; };
    736 \end{cfa}
    737 which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface functions, @next@.
     729Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines, \eg @next@.
    738730Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main.
    739 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's resume.
    740 The interface function, @next@, takes a Fibonacci instance and context switches to it using @resume@;
     731The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's @resume@.
     732The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@;
    741733on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned.
    742734The first @resume@ is special because it cocalls the coroutine at its coroutine main and allocates the stack;
     
    769761\newbox\myboxA
    770762\begin{lrbox}{\myboxA}
    771 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     763\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    772764`coroutine` Format {
    773765        char ch;   // used for communication
     
    801793        }
    802794}
    803 \end{lstlisting}
     795\end{cfa}
    804796\end{lrbox}
    805797
    806798\newbox\myboxB
    807799\begin{lrbox}{\myboxB}
    808 \begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     800\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    809801struct Format {
    810802        char ch;
     
    838830        format( &fmt );
    839831}
    840 \end{lstlisting}
     832\end{cfa}
    841833\end{lrbox}
    842834\subfloat[\CFA Coroutine]{\label{f:CFAFmt}\usebox\myboxA}
     
    847839\end{figure}
    848840
    849 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming function for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal functions.
    850 However, there is no stack growth because @resume@/@suspend@ context switch to existing stack-frames rather than create new ones.
    851 \newterm{Symmetric (full) coroutine}s have a coroutine call a resuming function for another coroutine, which eventually forms a resuming-call cycle.
     841The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines.
     842However,@resume@/@suspend@ context switch to existing stack-frames rather than create new ones so there is no stack growth.
     843\newterm{Symmetric (full) coroutine}s have a coroutine call a resuming routine for another coroutine, which eventually forms a resuming-call cycle.
    852844(The trivial cycle is a coroutine resuming itself.)
    853845This control flow is similar to recursion for normal routines, but again there is no stack growth from the context switch.
     
    931923Figure~\ref{f:ProdCons} shows a producer/consumer symmetric-coroutine performing bi-directional communication.
    932924Since the solution involves a full-coroutining cycle, the program main creates one coroutine in isolation, passes this coroutine to its partner, and closes the cycle at the call to @start@.
    933 The @start@ function communicates both the number of elements to be produced and the consumer into the producer's coroutine structure.
     925The @start@ routine communicates both the number of elements to be produced and the consumer into the producer's coroutine structure.
    934926Then the @resume@ to @prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it.
    935927@prod@'s coroutine main starts, creates local variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer.
     
    937929The producer call to @delivery@ transfers values into the consumer's communication variables, resumes the consumer, and returns the consumer status.
    938930For the first resume, @cons@'s stack is initialized, creating local variables retained between subsequent activations of the coroutine.
    939 The consumer iterates until the @done@ flag is set, prints, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).
     931The consumer iterates until the @done@ flag is set, prints the values delivered by the producer, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).
    940932The call from the consumer to the @payment@ introduces the cycle between producer and consumer.
    941933When @payment@ is called, the consumer copies values into the producer's communication variable and a resume is executed.
     
    967959\end{cfa}
    968960and the programming language (and possibly its tool set, \eg debugger) may need to understand @baseCoroutine@ because of the stack.
    969 Furthermore, the execution of constructs/destructors is in the wrong order for certain operations, \eg for threads;
    970 \eg, if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived.
     961Furthermore, the execution of constructs/destructors is in the wrong order for certain operations.
     962For example, for threads if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived.
    971963
    972964An alternatively is composition:
     
    980972However, there is nothing preventing wrong placement or multiple declarations.
    981973
    982 For coroutines as for threads, many implementations are based on routine pointers or function objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.
     974For coroutines as for threads, many implementations are based on routine pointers or routine objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.
    983975For example, Boost implements coroutines in terms of four functor object-types:
    984976\begin{cfa}
     
    988980symmetric_coroutine<>::yield_type
    989981\end{cfa}
    990 Similarly, the canonical threading paradigm is often based on function pointers, \eg @pthread@~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}.
     982Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthreads@~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}.
    991983However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type.
    992984\begin{cfa}
     
    1002994\end{cfa}
    1003995Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda-based coroutines adds very little.
     996
     997Note, the type @coroutine_t@ must be an abstract handle to the coroutine, because the coroutine descriptor and its stack are non-copyable.
     998Copying the coroutine descriptor results in copies being out of date with the current state of the stack.
     999Correspondingly, copying the stack results is copies being out of date with the coroutine descriptor, and pointers in the stack being out of date to data on the stack.
     1000(There is no mechanism in C to find all stack-specific pointers and update them as part of a copy.)
    10041001
    10051002The selected approach is to use language support by introducing a new kind of aggregate (structure):
     
    10141011Furthermore, implementing coroutines without language supports also displays the power of a programming language.
    10151012While this is ultimately the option used for idiomatic \CFA code, coroutines and threads can still be constructed without using the language support.
    1016 The reserved keyword eases use for the common cases.
    1017 
    1018 Part of the mechanism to generalize coroutines is using a \CFA trait, which defines a coroutine as anything satisfying the trait @is_coroutine@, and this trait is used to restrict coroutine-manipulation functions:
    1019 \begin{cfa}
    1020 trait is_coroutine( dtype T ) {
    1021       void main( T & this );
    1022       coroutine_desc * get_coroutine( T & this );
     1013The reserved keyword simply eases use for the common cases.
     1014
     1015Part of the mechanism to generalize coroutines is using a \CFA trait, which defines a coroutine as anything satisfying the trait @is_coroutine@, and this trait is used to restrict coroutine-manipulation routines:
     1016\begin{cfa}
     1017trait is_coroutine( `dtype` T ) {
     1018        void main( T & );
     1019        coroutine_desc * get_coroutine( T & );
    10231020};
    1024 forall( dtype T | is_coroutine(T) ) void get_coroutine( T & );
    1025 forall( dtype T | is_coroutine(T) ) void suspend( T & );
    1026 forall( dtype T | is_coroutine(T) ) void resume( T & );
    1027 \end{cfa}
    1028 This definition ensures there is a statically-typed @main@ function that is the starting point (first stack frame) of a coroutine.
    1029 No return value or additional parameters are necessary for this function, because the coroutine type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values.
    1030 As well, any object passed to @suspend@ and @resume@ is a coroutine since it must satisfy the @is_coroutine@ trait to compile.
    1031 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine.
     1021forall( `dtype` T | is_coroutine(T) ) void suspend( T & );
     1022forall( `dtype` T | is_coroutine(T) ) void resume( T & );
     1023\end{cfa}
     1024The @dtype@ property of the trait ensures the coroutine descriptor is non-copyable, so all coroutines must be passed by reference (pointer).
     1025The routine definitions ensures there is a statically-typed @main@ routine that is the starting point (first stack frame) of a coroutine, and a mechanism to get (read) the currently executing coroutine handle.
     1026The @main@ routine has no return value or additional parameters because the coroutine type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values versus fixed ones.
     1027The generic routines @suspend@ and @resume@ can be redefined, but any object passed to them is a coroutine since it must satisfy the @is_coroutine@ trait to compile.
     1028The advantage of this approach is that users can easily create different types of coroutines, \eg changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine, and possibly redefining @suspend@ and @resume@.
    10321029The \CFA keyword @coroutine@ implicitly implements the getter and forward declarations required for implementing the coroutine main:
    10331030\begin{cquote}
     
    10391036};
    10401037\end{cfa}
    1041 & {\Large $\Rightarrow$} &
     1038&
     1039{\Large $\Rightarrow$}
     1040&
    10421041\begin{tabular}{@{}ccc@{}}
    10431042\begin{cfa}
     
    10731072Like coroutines and for the same design reasons, the selected approach for user threads is to use language support by introducing a new kind of aggregate (structure) and a \CFA trait:
    10741073\begin{cquote}
    1075 \begin{tabular}{@{}c@{\hspace{2\parindentlnth}}c@{}}
     1074\begin{tabular}{@{}c@{\hspace{3\parindentlnth}}c@{}}
    10761075\begin{cfa}
    10771076thread myThread {
     
    10831082&
    10841083\begin{cfa}
    1085 trait is_thread( dtype T ) {
    1086       void main( T & this );
    1087       thread_desc * get_thread( T & this );
    1088       void ^?{}( T & `mutex` this );
     1084trait is_thread( `dtype` T ) {
     1085      void main( T & );
     1086      thread_desc * get_thread( T & );
     1087      void ^?{}( T & `mutex` );
    10891088};
    10901089\end{cfa}
     
    10921091\end{cquote}
    10931092(The qualifier @mutex@ for the destructor parameter is discussed in Section~\ref{s:Monitors}.)
    1094 Like a coroutine, the statically-typed @main@ function is the starting point (first stack frame) of a user thread.
     1093Like a coroutine, the statically-typed @main@ routine is the starting point (first stack frame) of a user thread.
    10951094The difference is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates an instance of @main@;
    10961095whereas, a user thread receives its own thread from the runtime system, which starts in @main@ as some point after the thread constructor is run.\footnote{
    1097 The \lstinline@main@ function is already a special routine in C (where the program begins), so it is a natural extension of the semantics to use overloading to declare mains for different coroutines/threads (the normal main being the main of the initial thread).}
    1098 No return value or additional parameters are necessary for this function, because the task type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values.
     1096The \lstinline@main@ routine is already a special routine in C, \ie where the program's initial thread begins, so it is a natural extension of this semantics to use overloading to declare \lstinline@main@s for user coroutines and threads.}
     1097No return value or additional parameters are necessary for this routine because the task type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values.
    10991098
    11001099\begin{comment} % put in appendix with coroutine version ???
     
    11091108
    11101109In this example, threads of type @foo@ start execution in the @void main(foo &)@ routine, which prints @"Hello World!".@ While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity.
    1111 With the static semantics it is trivial to write a thread type that takes a function pointer as a parameter and executes it on its stack asynchronously.
    1112 \begin{cfa}
    1113 typedef void (*voidFunc)(int);
    1114 
    1115 thread FuncRunner {
    1116         voidFunc func;
     1110With the static semantics it is trivial to write a thread type that takes a routine pointer as a parameter and executes it on its stack asynchronously.
     1111\begin{cfa}
     1112typedef void (*voidRtn)(int);
     1113
     1114thread RtnRunner {
     1115        voidRtn func;
    11171116        int arg;
    11181117};
    11191118
    1120 void ?{}(FuncRunner & this, voidFunc inFunc, int arg) {
    1121         this.func = inFunc;
     1119void ?{}(RtnRunner & this, voidRtn inRtn, int arg) {
     1120        this.func = inRtn;
    11221121        this.arg  = arg;
    11231122}
    11241123
    1125 void main(FuncRunner & this) {
    1126         // thread starts here and runs the function
     1124void main(RtnRunner & this) {
     1125        // thread starts here and runs the routine
    11271126        this.func( this.arg );
    11281127}
     
    11331132
    11341133int main() {
    1135         FuncRunner f = {hello, 42};
     1134        RtnRunner f = {hello, 42};
    11361135        return 0?
    11371136}
    11381137\end{cfa}
    1139 A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{api}.
     1138A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{API}.
    11401139\end{comment}
    11411140
     
    11861185void main( Adder & adder ) with( adder ) {
    11871186    subtotal = 0;
    1188     for ( int c = 0; c < cols; c += 1 ) {
    1189                 subtotal += row[c];
    1190     }
     1187    for ( int c = 0; c < cols; c += 1 ) { subtotal += row[c]; }
    11911188}
    11921189int main() {
     
    12101207
    12111208
    1212 \section{Synchronization / Mutual Exclusion}
     1209\section{Mutual Exclusion / Synchronization}
    12131210
    12141211Uncontrolled non-deterministic execution is meaningless.
    1215 To reestablish meaningful execution requires mechanisms to reintroduce determinism (control non-determinism), called synchronization and mutual exclusion, where synchronization is a timing relationship among threads and mutual exclusion is an access-control mechanism on data shared by threads.
    1216 Since many deterministic challenges appear with the use of mutable shared state, some languages/libraries disallow it (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka~\cite{Akka} (Scala)).
    1217 In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (\eg channels~\cite{CSP,Go}).
    1218 However, in call/return-based languages, these approaches force a clear distinction (\ie introduce a new programming paradigm) between non-concurrent and concurrent computation (\ie function call versus message passing).
    1219 This distinction means a programmers needs to learn two sets of design patterns.
     1212To reestablish meaningful execution requires mechanisms to reintroduce determinism, \ie restrict non-determinism, called mutual exclusion and synchronization, where mutual exclusion is an access-control mechanism on data shared by threads, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}.
     1213Since many deterministic challenges appear with the use of mutable shared state, some languages/libraries disallow it, \eg Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka~\cite{Akka} (Scala).
     1214In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts, \eg channels~\cite{CSP,Go}.
     1215However, in call/return-based languages, these approaches force a clear distinction, \ie introduce a new programming paradigm, between regular and concurrent computation, \eg routine call versus message passing.
     1216Hence, a programmer must learn and manipulate two sets of design patterns.
    12201217While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account.
    12211218In contrast, approaches based on statefull models more closely resemble the standard call/return programming-model, resulting in a single programming paradigm.
    12221219
    1223 At the lowest level, concurrent control is implemented as atomic operations, upon which different kinds of locks mechanism are constructed, \eg semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}.
     1220At the lowest level, concurrent control is implemented by atomic operations, upon which different kinds of locks mechanism are constructed, \eg semaphores~\cite{Dijkstra68b}, barriers, and path expressions~\cite{Campbell74}.
    12241221However, for productivity it is always desirable to use the highest-level construct that provides the necessary efficiency~\cite{Hochstein05}.
    1225 A newer approach is transactional memory~\cite{Herlihy93}.
     1222A newer approach for restricting non-determinism is transactional memory~\cite{Herlihy93}.
    12261223While this approach is pursued in hardware~\cite{Nakaike15} and system languages, like \CC~\cite{Cpp-Transactions}, the performance and feature set is still too restrictive to be the main concurrency paradigm for system languages, which is why it was rejected as the core paradigm for concurrency in \CFA.
    12271224
    1228 One of the most natural, elegant, and efficient mechanisms for synchronization and mutual exclusion for shared-memory systems is the \emph{monitor}.
    1229 Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}.
    1230 Many programming languages -- \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java} -- provide monitors as explicit language constructs.
    1231 In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors.
    1232 For these reasons, this project proposes monitors as the core concurrency construct, upon which even higher-level approaches can be easily constructed..
     1225One of the most natural, elegant, and efficient mechanisms for mutual exclusion and synchronization for shared-memory systems is the \emph{monitor}.
     1226First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, many concurrent programming-languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}.
     1227In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as mutex locks or semaphores to simulate monitors.
     1228For these reasons, \CFA selected monitors as the core high-level concurrency-construct, upon which higher-level approaches can be easily constructed.
    12331229
    12341230
     
    12361232
    12371233A group of instructions manipulating a specific instance of shared data that must be performed atomically is called an (individual) \newterm{critical-section}~\cite{Dijkstra65}.
    1238 A generalization is a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session may use the resource simultaneously, but different sessions may not use the resource simultaneously.
     1234The generalization is called a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session may use the resource simultaneously, but different sessions may not use the resource simultaneously.
    12391235The readers/writer problem~\cite{Courtois71} is an instance of a group critical-section, where readers have the same session and all writers have a unique session.
    1240 \newterm{Mutual exclusion} enforces the correction number of threads are using a critical section at the same time.
     1236\newterm{Mutual exclusion} enforces that the correct kind and number of threads are using a critical section.
    12411237
    12421238However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use.
    12431239Methods range from low-level locks, which are fast and flexible but require significant attention for correctness, to higher-level concurrency techniques, which sacrifice some performance to improve ease of use.
    1244 Ease of use comes by either guaranteeing some problems cannot occur (\eg deadlock free), or by offering a more explicit coupling between shared data and critical section.
    1245 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (\eg reading/writing large types atomically).
    1246 However, a significant challenge with (low-level) locks is composability because it takes careful organization for multiple locks to be used while preventing deadlock.
    1247 Easing composability is another feature higher-level mutual-exclusion mechanisms offer.
     1240Ease of use comes by either guaranteeing some problems cannot occur, \eg deadlock free, or by offering a more explicit coupling between shared data and critical section.
     1241For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations, \eg reading/writing, for numerical types.
     1242However, a significant challenge with locks is composability because it takes careful organization for multiple locks to be used while preventing deadlock.
     1243Easing composability is another feature higher-level mutual-exclusion mechanisms can offer.
    12481244
    12491245
     
    12511247
    12521248Synchronization enforces relative ordering of execution, and synchronization tools provide numerous mechanisms to establish these timing relationships.
    1253 Low-level synchronization primitives offer good performance and flexibility at the cost of ease of use.
    1254 Higher-level mechanisms often simplify usage by adding better coupling between synchronization and data (\eg message passing), or offering a simpler solution to otherwise involved challenges, \eg barrier lock.
    1255 As mentioned above, synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}.
    1256 Often synchronization is used to order access to a critical section, \eg ensuring the next kind of thread to enter a critical section is a reader thread
    1257 If a writer thread is scheduled for next access, but another reader thread acquires the critical section first, the reader has \newterm{barged}.
    1258 Barging can result in staleness/freshness problems, where a reader barges ahead of a write and reads temporally stale data, or a writer barges ahead of another writer overwriting data with a fresh value preventing the previous value from having an opportunity to be read.
     1249Low-level synchronization primitives offer good performance and flexibility at the cost of ease of use;
     1250higher-level mechanisms often simplify usage by adding better coupling between synchronization and data, \eg message passing, or offering a simpler solution to otherwise involved challenges, \eg barrier lock.
     1251Often synchronization is used to order access to a critical section, \eg ensuring a reader thread is the next kind of thread to enter a critical section.
     1252If a writer thread is scheduled for next access, but another reader thread acquires the critical section first, that reader has \newterm{barged}.
     1253Barging can result in staleness/freshness problems, where a reader barges ahead of a writer and reads temporally stale data, or a writer barges ahead of another writer overwriting data with a fresh value preventing the previous value from ever being read (lost computation).
    12591254Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs.
    1260 This challenge is often split into two different approaches, barging avoidance and barging prevention.
    1261 Algorithms that allow a barger but divert it until later are avoiding the barger, while algorithms that preclude a barger from entering during synchronization in the critical section prevent the barger completely.
    1262 baton-pass locks~\cite{Andrews89} between threads instead of releasing the locks are said to be using barging prevention.
     1255This challenge is often split into two different approaches: barging avoidance and barging prevention.
     1256Algorithms that allow a barger, but divert it until later using current synchronization state (flags), are avoiding the barger;
     1257algorithms that preclude a barger from entering during synchronization in the critical section prevent barging completely.
     1258Techniques like baton-pass locks~\cite{Andrews89} between threads instead of unconditionally releasing locks is an example of barging prevention.
    12631259
    12641260
     
    12661262\label{s:Monitors}
    12671263
    1268 A \textbf{monitor} is a set of routines that ensure mutual-exclusion when accessing shared state.
    1269 More precisely, a monitor is a programming technique that associates mutual-exclusion to routine scopes, as opposed to mutex locks, where mutual-exclusion is defined by lock/release calls independently of any scoping of the calling routine.
    1270 This strong association eases readability and maintainability, at the cost of flexibility.
    1271 Note that both monitors and mutex locks, require an abstract handle to identify them.
    1272 This concept is generally associated with object-oriented languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OO semantics.
    1273 The only requirement is the ability to declare a handle to a shared object and a set of routines that act on it:
    1274 \begin{cfa}
    1275 typedef /*some monitor type*/ monitor;
    1276 int f(monitor & m);
    1277 
    1278 int main() {
    1279         monitor m;  // Handle m
    1280         f(m);       // Routine using handle
    1281 }
    1282 \end{cfa}
    1283 
    1284 % ======================================================================
    1285 % ======================================================================
    1286 \subsection{Call Semantics} \label{call}
    1287 % ======================================================================
    1288 % ======================================================================
    1289 The above monitor example displays some of the intrinsic characteristics.
    1290 First, it is necessary to use pass-by-reference over pass-by-value for monitor routines.
    1291 This semantics is important, because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied.
    1292 Therefore, monitors are non-copy-able objects (@dtype@).
    1293 
    1294 Another aspect to consider is when a monitor acquires its mutual exclusion.
    1295 For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry.
    1296 Passthrough can occur for generic helper routines (@swap@, @sort@, \etc) or specific helper routines like the following to implement an atomic counter:
    1297 
    1298 \begin{cfa}
    1299 monitor counter_t { /*...see section $\ref{data}$...*/ };
    1300 
    1301 void ?{}(counter_t & nomutex this); // constructor
    1302 size_t ++?(counter_t & mutex this); // increment
    1303 
    1304 // need for mutex is platform dependent
    1305 void ?{}(size_t * this, counter_t & mutex cnt); // conversion
    1306 \end{cfa}
    1307 This counter is used as follows:
    1308 \begin{center}
    1309 \begin{tabular}{c @{\hskip 0.35in} c @{\hskip 0.35in} c}
    1310 \begin{cfa}
    1311 // shared counter
    1312 counter_t cnt1, cnt2;
    1313 
    1314 // multiple threads access counter
    1315 thread 1 : cnt1++; cnt2++;
    1316 thread 2 : cnt1++; cnt2++;
    1317 thread 3 : cnt1++; cnt2++;
    1318         ...
    1319 thread N : cnt1++; cnt2++;
    1320 \end{cfa}
    1321 \end{tabular}
    1322 \end{center}
    1323 Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to the \CC template @std::atomic@.
    1324 
    1325 Here, the constructor (@?{}@) uses the @nomutex@ keyword to signify that it does not acquire the monitor mutual-exclusion when constructing.
    1326 This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion.
    1327 Furthermore, it allows the implementation greater freedom when it initializes the monitor locking.
    1328 The prefix increment operator uses @mutex@ to protect the incrementing process from race conditions.
    1329 Finally, there is a conversion operator from @counter_t@ to @size_t@.
    1330 This conversion may or may not require the @mutex@ keyword depending on whether or not reading a @size_t@ is an atomic operation.
    1331 
    1332 For maximum usability, monitors use \textbf{multi-acq} semantics, which means a single thread can acquire the same monitor multiple times without deadlock.
    1333 For example, listing \ref{fig:search} uses recursion and \textbf{multi-acq} to print values inside a binary tree.
    1334 \begin{figure}
    1335 \begin{cfa}[caption={Recursive printing algorithm using \textbf{multi-acq}.},label={fig:search}]
    1336 monitor printer { ... };
    1337 struct tree {
    1338         tree * left, right;
    1339         char * value;
     1264A \textbf{monitor} is a set of routines that ensure mutual exclusion when accessing shared state.
     1265More precisely, a monitor is a programming technique that binds mutual exclusion to routine scope, as opposed to locks, where mutual-exclusion is defined by acquire/release calls, independent of lexical context (analogous to block and heap storage allocation).
     1266The strong association with the call/return paradigm eases programmability, readability and maintainability, at a slight cost in flexibility and efficiency.
     1267
     1268Note, like coroutines/threads, both locks and monitors require an abstract handle to reference them, because at their core, both mechanisms are manipulating non-copyable shared-state.
     1269Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data.
     1270Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity.
     1271As for coroutines/tasks, a non-copyable (@dtype@) trait is used to capture this requirement, so all locks/monitors must be passed by reference (pointer).
     1272\begin{cfa}
     1273trait is_monitor( `dtype` T ) {
     1274        monitor_desc * get_monitor( T & );
     1275        void ^?{}( T & mutex );
    13401276};
    1341 void print(printer & mutex p, char * v);
    1342 
    1343 void print(printer & mutex p, tree * t) {
    1344         print(p, t->value);
    1345         print(p, t->left );
    1346         print(p, t->right);
    1347 }
    1348 \end{cfa}
    1349 \end{figure}
    1350 
    1351 Having both @mutex@ and @nomutex@ keywords can be redundant, depending on the meaning of a routine having neither of these keywords.
    1352 For example, it is reasonable that it should default to the safest option (@mutex@) when given a routine without qualifiers @void foo(counter_t & this)@, whereas assuming @nomutex@ is unsafe and may cause subtle errors.
    1353 On the other hand, @nomutex@ is the ``normal'' parameter behaviour, it effectively states explicitly that ``this routine is not special''.
    1354 Another alternative is making exactly one of these keywords mandatory, which provides the same semantics but without the ambiguity of supporting routines with neither keyword.
    1355 Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing.
    1356 While there are several benefits to mandatory keywords, they do bring a few challenges.
    1357 Mandatory keywords in \CFA would imply that the compiler must know without doubt whether or not a parameter is a monitor or not.
     1277\end{cfa}
     1278
     1279
     1280\subsection{Mutex Acquisition}
     1281\label{s:MutexAcquisition}
     1282
     1283While correctness implicitly implies a monitor's mutual exclusion is acquired and released, there are implementation options about when and where the locking/unlocking occurs.
     1284(Much of this discussion also applies to basic locks.)
     1285For example, a monitor may need to be passed through multiple helper routines before it becomes necessary to acquire the monitor mutual-exclusion.
     1286\begin{cfa}[morekeywords=nomutex]
     1287monitor Aint { int cnt; };                                      $\C{// atomic integer counter}$
     1288void ?{}( Aint & `nomutex` this ) with( this ) { cnt = 0; } $\C{// constructor}$
     1289int ?=?( Aint & `mutex`$\(_{opt}\)$ lhs, int rhs ) with( lhs ) { cnt = rhs; } $\C{// conversions}$
     1290void ?{}( int & this, Aint & `mutex`$\(_{opt}\)$ v ) { this = v.cnt; }
     1291int ?=?( int & lhs, Aint & `mutex`$\(_{opt}\)$ rhs ) with( rhs ) { lhs = cnt; }
     1292int ++?( Aint & `mutex`$\(_{opt}\)$ this ) with( this ) { return ++cnt; } $\C{// increment}$
     1293\end{cfa}
     1294The @Aint@ constructor, @?{}@, uses the \lstinline[morekeywords=nomutex]@nomutex@ qualifier indicating mutual exclusion is unnecessary during construction because an object is inaccessible (private) until after it is initialized.
     1295(While a constructor may publish its address into a global variable, doing so generates a race-condition.)
     1296The conversion operators for initializing and assigning with a normal integer only need @mutex@, if reading/writing the implementation type is not atomic.
     1297Finally, the prefix increment operato, @++?@, is normally @mutex@ to protect the incrementing from race conditions, unless there is an atomic increment instruction for the implementation type.
     1298
     1299The atomic counter is used without any explicit mutual-exclusion and provides thread-safe semantics, which is similar to the \CC template @std::atomic@.
     1300\begin{cfa}
     1301Aint x, y, z;
     1302++x; ++y; ++z;                                                          $\C{// safe increment by multiple threads}$
     1303x = 2; y = 2; z = 2;                                            $\C{// conversions}$
     1304int i = x, j = y, k = z;
     1305i = x; j = y; k = z;
     1306\end{cfa}
     1307
     1308For maximum usability, monitors have \newterm{multi-acquire} semantics allowing a thread to acquire it multiple times without deadlock.
     1309For example, atomically printing the contents of a binary tree:
     1310\begin{cfa}
     1311monitor Tree {
     1312        Tree * left, right;
     1313        // value
     1314};
     1315void print( Tree & mutex tree ) {                       $\C{// prefix traversal}$
     1316        // write value
     1317        print( tree->left );                                    $\C{// multiply acquire monitor lock on each recursion}$
     1318        print( tree->right );
     1319}
     1320\end{cfa}
     1321
     1322Mandatory monitor qualifiers have the benefit of being self-documented, but requiring both @mutex@ and \lstinline[morekeywords=nomutex]@nomutex@ for all monitor parameter is redundant.
     1323Instead, one of qualifier semantics can be the default, and the other required.
     1324For example, assume the safe @mutex@ option for a monitor parameter because assuming \lstinline[morekeywords=nomutex]@nomutex@ may cause subtle errors.
     1325On the other hand, assuming \lstinline[morekeywords=nomutex]@nomutex@ is the \emph{normal} parameter behaviour, stating explicitly ``this parameter is not special''.
     1326Providing a default qualifier implies knowing whether a parameter is a monitor.
    13581327Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred.
    1359 For this reason, \CFA only has the @mutex@ keyword and uses no keyword to mean @nomutex@.
    1360 
    1361 The next semantic decision is to establish when @mutex@ may be used as a type qualifier.
    1362 Consider the following declarations:
    1363 \begin{cfa}
    1364 int f1(monitor & mutex m);
    1365 int f2(const monitor & mutex m);
    1366 int f3(monitor ** mutex m);
    1367 int f4(monitor * mutex m []);
    1368 int f5(graph(monitor *) & mutex m);
    1369 \end{cfa}
    1370 The problem is to identify which object(s) should be acquired.
    1371 Furthermore, each object needs to be acquired only once.
    1372 In the case of simple routines like @f1@ and @f2@ it is easy to identify an exhaustive list of objects to acquire on entry.
    1373 Adding indirections (@f3@) still allows the compiler and programmer to identify which object is acquired.
    1374 However, adding in arrays (@f4@) makes it much harder.
    1375 Array lengths are not necessarily known in C, and even then, making sure objects are only acquired once becomes none-trivial.
    1376 This problem can be extended to absurd limits like @f5@, which uses a graph of monitors.
    1377 To make the issue tractable, this project imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with at most one level of indirection (ignoring potential qualifiers).
    1378 Also note that while routine @f3@ can be supported, meaning that monitor @**m@ is acquired, passing an array to this routine would be type-safe and yet result in undefined behaviour because only the first element of the array is acquired.
    1379 However, this ambiguity is part of the C type-system with respects to arrays.
    1380 For this reason, @mutex@ is disallowed in the context where arrays may be passed:
    1381 \begin{cfa}
    1382 int f1(monitor & mutex m);    // Okay : recommended case
    1383 int f2(monitor * mutex m);    // Not Okay : Could be an array
    1384 int f3(monitor mutex m []);  // Not Okay : Array of unknown length
    1385 int f4(monitor ** mutex m);   // Not Okay : Could be an array
    1386 int f5(monitor * mutex m []); // Not Okay : Array of unknown length
    1387 \end{cfa}
    1388 Note that not all array functions are actually distinct in the type system.
    1389 However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic.
    1390 
    1391 Unlike object-oriented monitors, where calling a mutex member \emph{implicitly} acquires mutual-exclusion of the receiver object, \CFA uses an explicit mechanism to specify the object that acquires mutual-exclusion.
    1392 A consequence of this approach is that it extends naturally to multi-monitor calls.
    1393 \begin{cfa}
    1394 int f(MonitorA & mutex a, MonitorB & mutex b);
    1395 
    1396 MonitorA a;
    1397 MonitorB b;
    1398 f(a,b);
    1399 \end{cfa}
    1400 While OO monitors could be extended with a mutex qualifier for multiple-monitor calls, no example of this feature could be found.
    1401 The capability to acquire multiple locks before entering a critical section is called \emph{\textbf{bulk-acq}}.
    1402 In practice, writing multi-locking routines that do not lead to deadlocks is tricky.
     1328For this reason, \CFA requires programmers to identify the kind of parameter with the @mutex@ keyword and uses no keyword to mean \lstinline[morekeywords=nomutex]@nomutex@.
     1329
     1330The next semantic decision is establishing which parameter \emph{types} may be qualified with @mutex@.
     1331Given:
     1332\begin{cfa}
     1333monitor M { ... }
     1334int f1( M & mutex m );
     1335int f2( M * mutex m );
     1336int f3( M * mutex m[] );
     1337int f4( stack( M * ) & mutex m );
     1338\end{cfa}
     1339the issue is that some of these parameter types are composed of multiple objects.
     1340For @f1@, there is only a single parameter object.
     1341Adding indirection in @f2@ still identifies a single object.
     1342However, the matrix in @f3@ introduces multiple objects.
     1343While shown shortly, multiple acquisition is possible;
     1344however array lengths are often unknown in C.
     1345This issue is exacerbated in @f4@, where the data structure must be safely traversed to acquire all of its elements.
     1346
     1347To make the issue tractable, \CFA only acquires one monitor per parameter with at most one level of indirection.
     1348However, the C type-system has an ambiguity with respects to arrays.
     1349Is the argument for @f2@ a single object or an array of objects?
     1350If it is an array, only the first element of the array is acquired, which seems unsafe;
     1351hence, @mutex@ is disallowed for array parameters.
     1352\begin{cfa}
     1353int f1( M & mutex m );                                          $\C{// allowed: recommended case}$
     1354int f2( M * mutex m );                                          $\C{// disallowed: could be an array}$
     1355int f3( M mutex m[$\,$] );                                      $\C{// disallowed: array length unknown}$
     1356int f4( M ** mutex m );                                         $\C{// disallowed: could be an array}$
     1357int f5( M * mutex m[$\,$] );                            $\C{// disallowed: array length unknown}$
     1358\end{cfa}
     1359% Note, not all array routines have distinct types: @f2@ and @f3@ have the same type, as do @f4@ and @f5@.
     1360% However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic.
     1361
     1362For object-oriented monitors, calling a mutex member \emph{implicitly} acquires mutual exclusion of the receiver object, @`rec`.foo(...)@.
     1363\CFA has no receiver, and hence, must use an explicit mechanism to specify which object has mutual exclusion acquired.
     1364A positive consequence of this design decision is the ability to support multi-monitor routines.
     1365\begin{cfa}
     1366int f( M & mutex x, M & mutex y );              $\C{// multiple monitor parameter of any type}$
     1367M m1, m2;
     1368f( m1, m2 );
     1369\end{cfa}
     1370(While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.)
     1371In practice, writing multi-locking routines that do not deadlock is tricky.
    14031372Having language support for such a feature is therefore a significant asset for \CFA.
    1404 In the case presented above, \CFA guarantees that the order of acquisition is consistent across calls to different routines using the same monitors as arguments.
    1405 This consistent ordering means acquiring multiple monitors is safe from deadlock when using \textbf{bulk-acq}.
    1406 However, users can still force the acquiring order.
    1407 For example, notice which routines use @mutex@/@nomutex@ and how this affects acquiring order:
    1408 \begin{cfa}
    1409 void foo(A& mutex a, B& mutex b) { // acquire a & b
    1410         ...
    1411 }
    1412 
    1413 void bar(A& mutex a, B& /*nomutex*/ b) { // acquire a
    1414         ... foo(a, b); ... // acquire b
    1415 }
    1416 
    1417 void baz(A& /*nomutex*/ a, B& mutex b) { // acquire b
    1418         ... foo(a, b); ... // acquire a
    1419 }
    1420 \end{cfa}
    1421 The \textbf{multi-acq} monitor lock allows a monitor lock to be acquired by both @bar@ or @baz@ and acquired again in @foo@.
    1422 In the calls to @bar@ and @baz@ the monitors are acquired in opposite order.
    1423 
    1424 However, such use leads to lock acquiring order problems.
    1425 In the example above, the user uses implicit ordering in the case of function @foo@ but explicit ordering in the case of @bar@ and @baz@.
    1426 This subtle difference means that calling these routines concurrently may lead to deadlock and is therefore undefined behaviour.
    1427 As shown~\cite{Lister77}, solving this problem requires:
    1428 \begin{enumerate}
    1429         \item Dynamically tracking the monitor-call order.
    1430         \item Implement rollback semantics.
    1431 \end{enumerate}
    1432 While the first requirement is already a significant constraint on the system, implementing a general rollback semantics in a C-like language is still prohibitively complex~\cite{Dice10}.
    1433 In \CFA, users simply need to be careful when acquiring multiple monitors at the same time or only use \textbf{bulk-acq} of all the monitors.
    1434 While \CFA provides only a partial solution, most systems provide no solution and the \CFA partial solution handles many useful cases.
    1435 
    1436 For example, \textbf{multi-acq} and \textbf{bulk-acq} can be used together in interesting ways:
    1437 \begin{cfa}
    1438 monitor bank { ... };
    1439 
    1440 void deposit( bank & mutex b, int deposit );
    1441 
    1442 void transfer( bank & mutex mybank, bank & mutex yourbank, int me2you) {
    1443         deposit( mybank, -me2you );
    1444         deposit( yourbank, me2you );
     1373
     1374The capability to acquire multiple locks before entering a critical section is called \newterm{bulk acquire}.
     1375In the previous example, \CFA guarantees the order of acquisition is consistent across calls to different routines using the same monitors as arguments.
     1376This consistent ordering means acquiring multiple monitors is safe from deadlock.
     1377However, users can force the acquiring order.
     1378For example, notice the use of @mutex@/\lstinline[morekeywords=nomutex]@nomutex@ and how this affects the acquiring order:
     1379\begin{cfa}
     1380void foo( M & mutex m1, M & mutex m2 );         $\C{// acquire m1 and m2}$
     1381void bar( M & mutex m1, M & /* nomutex */ m2 ) { $\C{// acquire m1}$
     1382        ... foo( m1, m2 ); ...                                  $\C{// acquire m2}$
     1383}
     1384void baz( M & /* nomutex */ m1, M & mutex m2 ) { $\C{// acquire m2}$
     1385        ... foo( m1, m2 ); ...                                  $\C{// acquire m1}$
     1386}
     1387\end{cfa}
     1388The multi-acquire semantics allows @bar@ or @baz@ to acquire a monitor lock and reacquire it in @foo@.
     1389In the calls to @bar@ and @baz@, the monitors are acquired in opposite order.
     1390
     1391However, such use leads to lock acquiring order problems resulting in deadlock~\cite{Lister77}, where detecting it requires dynamically tracking of monitor calls, and dealing with it requires rollback semantics~\cite{Dice10}.
     1392In \CFA, safety is guaranteed by using bulk acquire of all monitors to shared objects, whereas other monitor systems provide no aid.
     1393While \CFA provides only a partial solution, the \CFA partial solution handles many useful cases.
     1394\begin{cfa}
     1395monitor Bank { ... };
     1396void deposit( Bank & `mutex` b, int deposit );
     1397void transfer( Bank & `mutex` mybank, Bank & `mutex` yourbank, int me2you) {
     1398        deposit( mybank, `-`me2you );                   $\C{// debit}$
     1399        deposit( yourbank, me2you );                    $\C{// credit}$
    14451400}
    14461401\end{cfa}
    14471402This example shows a trivial solution to the bank-account transfer problem~\cite{BankTransfer}.
    1448 Without \textbf{multi-acq} and \textbf{bulk-acq}, the solution to this problem is much more involved and requires careful engineering.
     1403Without multi- and bulk acquire, the solution to this problem requires careful engineering.
    14491404
    14501405
    14511406\subsection{\protect\lstinline|mutex| statement} \label{mutex-stmt}
    14521407
    1453 The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the @mutex@ statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}.
    1454 Table \ref{f:mutex-stmt} shows an example of the @mutex@ statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired.
    1455 Beyond naming, the @mutex@ statement has no semantic difference from a routine call with @mutex@ parameters.
    1456 
    1457 \begin{table}
    1458 \begin{center}
    1459 \begin{tabular}{|c|c|}
    1460 function call & @mutex@ statement \\
    1461 \hline
    1462 \begin{cfa}[tabsize=3]
     1408The monitor call-semantics associate all locking semantics to routines.
     1409Like Java, \CFA offers an alternative @mutex@ statement to reduce refactoring and naming.
     1410\begin{cquote}
     1411\begin{tabular}{@{}c|@{\hspace{\parindentlnth}}c@{}}
     1412routine call & @mutex@ statement \\
     1413\begin{cfa}
    14631414monitor M {};
    14641415void foo( M & mutex m1, M & mutex m2 ) {
    14651416        // critical section
    14661417}
    1467 
    14681418void bar( M & m1, M & m2 ) {
    14691419        foo( m1, m2 );
    14701420}
    1471 \end{cfa}&\begin{cfa}[tabsize=3]
    1472 monitor M {};
     1421\end{cfa}
     1422&
     1423\begin{cfa}
     1424
    14731425void bar( M & m1, M & m2 ) {
    1474         mutex(m1, m2) {
     1426        mutex( m1, m2 ) {       // remove refactoring and naming
    14751427                // critical section
    14761428        }
    14771429}
    14781430
    1479 
    14801431\end{cfa}
    14811432\end{tabular}
    1482 \end{center}
    1483 \caption{Regular call semantics vs. \protect\lstinline|mutex| statement}
    1484 \label{f:mutex-stmt}
    1485 \end{table}
    1486 
    1487 % ======================================================================
    1488 % ======================================================================
    1489 \subsection{Data semantics} \label{data}
    1490 % ======================================================================
    1491 % ======================================================================
    1492 Once the call semantics are established, the next step is to establish data semantics.
    1493 Indeed, until now a monitor is used simply as a generic handle but in most cases monitors contain shared data.
    1494 This data should be intrinsic to the monitor declaration to prevent any accidental use of data without its appropriate protection.
    1495 For example, here is a complete version of the counter shown in section \ref{call}:
    1496 \begin{cfa}
    1497 monitor counter_t {
    1498         int value;
     1433\end{cquote}
     1434
     1435
     1436\section{Scheduling}
     1437\label{s:Scheduling}
     1438
     1439While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed.
     1440For example, Figure~\ref{f:GenericBoundedBuffer} shows a bounded buffer that may be full/empty so produce/consumer threads must block.
     1441Leaving the monitor and trying again (busy waiting) is impractical for high-level programming.
     1442Monitors eliminate busy waiting by providing internal synchronization to schedule threads needing access to the shared data, where the synchronization is blocking (threads are parked) versus spinning.
     1443Synchronization is generally achieved with internal~\cite{Hoare74} or external~\cite[\S~2.9.2]{uC++} scheduling, where \newterm{scheduling} defines which thread acquires the critical section next.
     1444\newterm{Internal scheduling} is characterized by each thread entering the monitor and making an individual decision about proceeding or blocking, while \newterm{external scheduling} is characterized by an entering thread making a decision about proceeding for itself and on behalf of other threads attempting entry.
     1445
     1446Figure~\ref{f:BBInt} shows a \CFA bounded-buffer with internal scheduling, where producers/consumers enter the monitor, see the buffer is full/empty, and block on an appropriate condition lock, @full@/@empty@.
     1447The @wait@ routine atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the routine's parameter list.
     1448The appropriate condition lock is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer.
     1449Signalling is unconditional, because signalling an empty condition lock does nothing.
     1450
     1451Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means:
     1452\begin{enumerate}
     1453\item
     1454The signalling thread returns immediately, and the signalled thread continues.
     1455\item
     1456The signalling thread continues and the signalled thread is marked for urgent unblocking at the next scheduling point (exit/wait).
     1457\item
     1458The signalling thread blocks but is marked for urgrent unblocking at the next scheduling point and the signalled thread continues.
     1459\end{enumerate}
     1460The first approach is too restrictive, as it precludes solving a reasonable class of problems, \eg dating service.
     1461\CFA supports the next two semantics as both are useful.
     1462Finally, while it is common to store a @condition@ as a field of the monitor, in \CFA, a @condition@ variable can be created/stored independently.
     1463Furthermore, a condition variable is tied to a \emph{group} of monitors on first use (called \newterm{branding}), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors.
     1464
     1465\begin{figure}
     1466\centering
     1467\newbox\myboxA
     1468\begin{lrbox}{\myboxA}
     1469\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1470forall( otype T ) { // distribute forall
     1471        monitor Buffer {
     1472                `condition` full, empty;
     1473                int front, back, count;
     1474                T elements[10];
     1475        };
     1476        void ?{}( Buffer(T) & buffer ) with(buffer) {
     1477                [front, back, count] = 0;
     1478        }
     1479
     1480        void insert( Buffer(T) & mutex buffer, T elem )
     1481                                with(buffer) {
     1482                if ( count == 10 ) `wait( empty )`;
     1483                // insert elem into buffer
     1484                `signal( full )`;
     1485        }
     1486        T remove( Buffer(T) & mutex buffer ) with(buffer) {
     1487                if ( count == 0 ) `wait( full )`;
     1488                // remove elem from buffer
     1489                `signal( empty )`;
     1490                return elem;
     1491        }
     1492}
     1493\end{cfa}
     1494\end{lrbox}
     1495
     1496\newbox\myboxB
     1497\begin{lrbox}{\myboxB}
     1498\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1499forall( otype T ) { // distribute forall
     1500        monitor Buffer {
     1501
     1502                int front, back, count;
     1503                T elements[10];
     1504        };
     1505        void ?{}( Buffer(T) & buffer ) with(buffer) {
     1506                [front, back, count] = 0;
     1507        }
     1508        T remove( Buffer(T) & mutex buffer ); // forward
     1509        void insert( Buffer(T) & mutex buffer, T elem )
     1510                                with(buffer) {
     1511                if ( count == 10 ) `waitfor( remove, buffer )`;
     1512                // insert elem into buffer
     1513
     1514        }
     1515        T remove( Buffer(T) & mutex buffer ) with(buffer) {
     1516                if ( count == 0 ) `waitfor( insert, buffer )`;
     1517                // remove elem from buffer
     1518
     1519                return elem;
     1520        }
     1521}
     1522\end{cfa}
     1523\end{lrbox}
     1524
     1525\subfloat[Internal Scheduling]{\label{f:BBInt}\usebox\myboxA}
     1526%\qquad
     1527\subfloat[External Scheduling]{\label{f:BBExt}\usebox\myboxB}
     1528\caption{Generic Bounded-Buffer}
     1529\label{f:GenericBoundedBuffer}
     1530\end{figure}
     1531
     1532Figure~\ref{f:BBExt} shows a \CFA bounded-buffer with external scheduling, where producers/consumers detecting a full/empty buffer block and prevent more producers/consumers from entering the monitor until the buffer has a free/empty slot.
     1533External scheduling is controlled by the @waitfor@ statement, which atomically blocks the calling thread, releases the monitor lock, and restricts the routine calls that can next acquire mutual exclusion.
     1534If the buffer is full, only calls to @remove@ can acquire the buffer, and if the buffer is empty, only calls to @insert@ can acquire the buffer.
     1535Threads making calls to routines that are currently excluded block outside (external) of the monitor on a calling queue, versus blocking on condition queues inside (internal) of the monitor.
     1536% External scheduling is more constrained and explicit, which helps programmers reduce the non-deterministic nature of concurrency.
     1537External scheduling allows users to wait for events from other threads without concern of unrelated events occurring.
     1538The mechnaism can be done in terms of control flow, \eg Ada @accept@ or \uC @_Accept@, or in terms of data, \eg Go channels.
     1539While both mechanisms have strengths and weaknesses, this project uses a control-flow mechanism to stay consistent with other language semantics.
     1540Two challenges specific to \CFA for external scheduling are loose object-definitions (see Section~\ref{s:LooseObjectDefinitions}) and multiple-monitor routines (see Section~\ref{s:Multi-MonitorScheduling}).
     1541
     1542For internal scheduling, non-blocking signalling (as in the producer/consumer example) is used when the signaller is providing the cooperation for a waiting thread;
     1543the signaller enters the monitor and changes state, detects a waiting threads that can use the state, performs a non-blocking signal on the condition queue for the waiting thread, and exits the monitor to run concurrently.
     1544The waiter unblocks next, uses/takes the state, and exits the monitor.
     1545Blocking signalling is the reverse, where the waiter is providing the cooperation for the signalling thread;
     1546the signaller enters the monitor, detects a waiting thread providing the necessary state, performs a blocking signal to place it on the urgent queue and unblock the waiter.
     1547The waiter changes state and exits the monitor, and the signaller unblocks next from the urgent queue to use/take the state.
     1548
     1549Figure~\ref{f:DatingService} shows a dating service demonstrating the two forms of signalling: non-blocking and blocking.
     1550The dating service matches girl and boy threads with matching compatibility codes so they can exchange phone numbers.
     1551A thread blocks until an appropriate partner arrives.
     1552The complexity is exchanging phone number in the monitor because the monitor mutual-exclusion property prevents exchanging numbers.
     1553For internal scheduling, the @exchange@ condition is necessary to block the thread finding the match, while the matcher unblocks to take the oppose number, post its phone number, and unblock the partner.
     1554For external scheduling, the implicit urgent-condition replaces the explict @exchange@-condition and @signal_block@ puts the finding thread on the urgent condition and unblocks the matcher..
     1555
     1556The dating service is an example of a monitor that cannot be written using external scheduling because it requires knowledge of calling parameters to make scheduling decisions, and parameters of waiting threads are unavailable;
     1557as well, an arriving thread may not find a partner and must wait, which requires a condition variable, and condition variables imply internal scheduling.
     1558
     1559\begin{figure}
     1560\centering
     1561\newbox\myboxA
     1562\begin{lrbox}{\myboxA}
     1563\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1564enum { CCodes = 20 };
     1565monitor DS {
     1566        int GirlPhNo, BoyPhNo;
     1567        condition Girls[CCodes], Boys[CCodes];
     1568        condition exchange;
    14991569};
    1500 
    1501 void ?{}(counter_t & this) {
    1502         this.cnt = 0;
    1503 }
    1504 
    1505 int ?++(counter_t & mutex this) {
    1506         return ++this.value;
    1507 }
    1508 
    1509 // need for mutex is platform dependent here
    1510 void ?{}(int * this, counter_t & mutex cnt) {
    1511         *this = (int)cnt;
    1512 }
    1513 \end{cfa}
    1514 
    1515 Like threads and coroutines, monitors are defined in terms of traits with some additional language support in the form of the @monitor@ keyword.
    1516 The monitor trait is:
    1517 \begin{cfa}
    1518 trait is_monitor(dtype T) {
    1519         monitor_desc * get_monitor( T & );
    1520         void ^?{}( T & mutex );
     1570int girl( DS & mutex ds, int phNo, int ccode ) {
     1571        if ( is_empty( Boys[ccode] ) ) {
     1572                wait( Girls[ccode] );
     1573                GirlPhNo = phNo;
     1574                exchange.signal();
     1575        } else {
     1576                GirlPhNo = phNo;
     1577                signal( Boys[ccode] );
     1578                exchange.wait();
     1579        } // if
     1580        return BoyPhNo;
     1581}
     1582int boy( DS & mutex ds, int phNo, int ccode ) {
     1583        // as above with boy/girl interchanged
     1584}
     1585\end{cfa}
     1586\end{lrbox}
     1587
     1588\newbox\myboxB
     1589\begin{lrbox}{\myboxB}
     1590\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1591
     1592monitor DS {
     1593        int GirlPhNo, BoyPhNo;
     1594        condition Girls[CCodes], Boys[CCodes];
     1595
    15211596};
    1522 \end{cfa}
    1523 Note that the destructor of a monitor must be a @mutex@ routine to prevent deallocation while a thread is accessing the monitor.
    1524 As with any object, calls to a monitor, using @mutex@ or otherwise, is undefined behaviour after the destructor has run.
    1525 
    1526 % ======================================================================
    1527 % ======================================================================
    1528 \section{Internal Scheduling} \label{intsched}
    1529 % ======================================================================
    1530 % ======================================================================
    1531 In addition to mutual exclusion, the monitors at the core of \CFA's concurrency can also be used to achieve synchronization.
    1532 With monitors, this capability is generally achieved with internal or external scheduling as in~\cite{Hoare74}.
    1533 With \textbf{scheduling} loosely defined as deciding which thread acquires the critical section next, \textbf{internal scheduling} means making the decision from inside the critical section (\ie with access to the shared state), while \textbf{external scheduling} means making the decision when entering the critical section (\ie without access to the shared state).
    1534 Since internal scheduling within a single monitor is mostly a solved problem, this paper concentrates on extending internal scheduling to multiple monitors.
    1535 Indeed, like the \textbf{bulk-acq} semantics, internal scheduling extends to multiple monitors in a way that is natural to the user but requires additional complexity on the implementation side.
    1536 
    1537 First, here is a simple example of internal scheduling:
    1538 
    1539 \begin{cfa}
    1540 monitor A {
    1541         condition e;
    1542 }
    1543 
    1544 void foo(A& mutex a1, A& mutex a2) {
     1597int girl( DS & mutex ds, int phNo, int ccode ) {
     1598        if ( is_empty( Boys[ccode] ) ) { // no compatible
     1599                wait( Girls[ccode] ); // wait for boy
     1600                GirlPhNo = phNo; // make phone number available
     1601
     1602        } else {
     1603                GirlPhNo = phNo; // make phone number available
     1604                signal_block( Boys[ccode] ); // restart boy
     1605
     1606        } // if
     1607        return BoyPhNo;
     1608}
     1609int boy( DS & mutex ds, int phNo, int ccode ) {
     1610        // as above with boy/girl interchanged
     1611}
     1612\end{cfa}
     1613\end{lrbox}
     1614
     1615\subfloat[\lstinline@signal@]{\label{f:DatingSignal}\usebox\myboxA}
     1616\qquad
     1617\subfloat[\lstinline@signal_block@]{\label{f:DatingSignalBlock}\usebox\myboxB}
     1618\caption{Dating service. }
     1619\label{f:DatingService}
     1620\end{figure}
     1621
     1622Both internal and external scheduling extend to multiple monitors in a natural way.
     1623\begin{cquote}
     1624\begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}}
     1625\begin{cfa}
     1626monitor M { `condition e`; ... };
     1627void foo( M & mutex m1, M & mutex m2 ) {
     1628        ... wait( `e` ); ...   // wait( e, m1, m2 )
     1629        ... wait( `e, m1` ); ...
     1630        ... wait( `e, m2` ); ...
     1631}
     1632\end{cfa}
     1633&
     1634\begin{cfa}
     1635void rtn$\(_1\)$( M & mutex m1, M & mutex m2 );
     1636void rtn$\(_2\)$( M & mutex m1 );
     1637void bar( M & mutex m1, M & mutex m2 ) {
     1638        ... waitfor( `rtn` ); ...       // $\LstCommentStyle{waitfor( rtn\(_1\), m1, m2 )}$
     1639        ... waitfor( `rtn, m1` ); ... // $\LstCommentStyle{waitfor( rtn\(_2\), m1 )}$
     1640}
     1641\end{cfa}
     1642\end{tabular}
     1643\end{cquote}
     1644For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex types in the parameter list, \ie @wait( e, m1, m2 )@.
     1645To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@.
     1646Wait statically verifies the released monitors are the acquired mutex-parameters so unconditional release is safe.
     1647Finally, a signaller,
     1648\begin{cfa}
     1649void baz( M & mutex m1, M & mutex m2 ) {
     1650        ... signal( e ); ...
     1651}
     1652\end{cfa}
     1653must have acquired monitor locks that are greater than or equal to the number of locks for the waiting thread signalled from the condition queue.
     1654
     1655Similarly, for @waitfor( rtn )@, the default semantics is to atomically block the acceptor and release all acquired mutex types in the parameter list, \ie @waitfor( rtn, m1, m2 )@.
     1656To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn, m1 )@.
     1657Waitfor statically verifies the released monitors are the same as the acquired mutex-parameters of the given routine or routine pointer.
     1658To statically verify the released monitors match with the accepted routine's mutex parameters, the routine (pointer) prototype must be accessible.
     1659
     1660Given the ability to release a subset of acquired monitors can result in a \newterm{nested monitor}~\cite{Lister77} deadlock.
     1661\begin{cfa}
     1662void foo( M & mutex m1, M & mutex m2 ) {
     1663        ... wait( `e, m1` ); ...                                $\C{// release m1, keeping m2 acquired )}$
     1664void bar( M & mutex m1, M & mutex m2 ) {        $\C{// must acquire m1 and m2 )}$
     1665        ... signal( `e` ); ...
     1666\end{cfa}
     1667The @wait@ only releases @m1@ so the signalling thread cannot acquire both @m1@ and @m2@ to  enter @bar@ to get to the @signal@.
     1668While deadlock issues can occur with multiple/nesting acquisition, this issue results from the fact that locks, and by extension monitors, are not perfectly composable.
     1669
     1670Finally, an important aspect of monitor implementation is barging, \ie can calling threads barge ahead of signalled threads?
     1671If barging is allowed, synchronization between a singller and signallee is difficult, often requiring multiple unblock/block cycles (looping around a wait rechecking if a condition is met).
     1672\begin{quote}
     1673However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program.
     1674It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74}
     1675\end{quote}
     1676\CFA scheduling \emph{precludes} barging, which simplifies synchronization among threads in the monitor and increases correctness.
     1677For example, there are no loops in either bounded buffer solution in Figure~\ref{f:GenericBoundedBuffer}.
     1678Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design and implementation of \CFA concurrency.
     1679
     1680
     1681\subsection{Barging Prevention}
     1682
     1683Figure~\ref{f:BargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics.
     1684The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors.
     1685The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired.
     1686When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting threads to prevent barging into the outer @mutex@ statement by another thread.
     1687However, both the signalling and waiting thread W1 still need monitor @m1@.
     1688
     1689\begin{figure}
     1690\newbox\myboxA
     1691\begin{lrbox}{\myboxA}
     1692\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1693monitor M m1, m2;
     1694condition c;
     1695mutex( m1 ) { // $\LstCommentStyle{\color{red}outer}$
    15451696        ...
    1546         // Wait for cooperation from bar()
    1547         wait(a1.e);
     1697        mutex( m1, m2 ) { // $\LstCommentStyle{\color{red}inner}$
     1698                ... `signal( c )`; ...
     1699                // m1, m2 acquired
     1700        } // $\LstCommentStyle{\color{red}release m2}$
     1701        // m1 acquired
     1702} // release m1
     1703\end{cfa}
     1704\end{lrbox}
     1705
     1706\newbox\myboxB
     1707\begin{lrbox}{\myboxB}
     1708\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1709
     1710
     1711mutex( m1 ) {
    15481712        ...
    1549 }
    1550 
    1551 void bar(A& mutex a1, A& mutex a2) {
    1552         // Provide cooperation for foo()
    1553         ...
    1554         // Unblock foo
    1555         signal(a1.e);
    1556 }
    1557 \end{cfa}
    1558 There are two details to note here.
    1559 First, @signal@ is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section.
    1560 This semantics is needed to respect mutual-exclusion, \ie the signaller and signalled thread cannot be in the monitor simultaneously.
    1561 The alternative is to return immediately after the call to @signal@, which is significantly more restrictive.
    1562 Second, in \CFA, while it is common to store a @condition@ as a field of the monitor, a @condition@ variable can be stored/created independently of a monitor.
    1563 Here routine @foo@ waits for the @signal@ from @bar@ before making further progress, ensuring a basic ordering.
    1564 
    1565 An important aspect of the implementation is that \CFA does not allow barging, which means that once function @bar@ releases the monitor, @foo@ is guaranteed to be the next thread to acquire the monitor (unless some other thread waited on the same condition).
    1566 This guarantee offers the benefit of not having to loop around waits to recheck that a condition is met.
    1567 The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding barging prevention or barging avoidance is more involved without language support.
    1568 Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design and implementation of \CFA concurrency.
    1569 
    1570 % ======================================================================
    1571 % ======================================================================
    1572 \subsection{Internal Scheduling - Multi-Monitor}
    1573 % ======================================================================
    1574 % ======================================================================
    1575 It is easy to understand the problem of multi-monitor scheduling using a series of pseudo-code examples.
    1576 Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors.
    1577 Indeed, @wait@ statements always use the implicit condition variable as parameters and explicitly name the monitors (A and B) associated with the condition.
    1578 Note that in \CFA, condition variables are tied to a \emph{group} of monitors on first use (called branding), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors.
    1579 The example below shows the simple case of having two threads (one for each column) and a single monitor A.
    1580 
    1581 \begin{multicols}{2}
    1582 thread 1
    1583 \begin{cfa}
    1584 acquire A
    1585         wait A
    1586 release A
    1587 \end{cfa}
    1588 
    1589 \columnbreak
    1590 
    1591 thread 2
    1592 \begin{cfa}
    1593 acquire A
    1594         signal A
    1595 release A
    1596 \end{cfa}
    1597 \end{multicols}
    1598 One thread acquires before waiting (atomically blocking and releasing A) and the other acquires before signalling.
    1599 It is important to note here that both @wait@ and @signal@ must be called with the proper monitor(s) already acquired.
    1600 This semantic is a logical requirement for barging prevention.
    1601 
    1602 A direct extension of the previous example is a \textbf{bulk-acq} version:
    1603 \begin{multicols}{2}
    1604 \begin{cfa}
    1605 acquire A & B
    1606         wait A & B
    1607 release A & B
    1608 \end{cfa}
    1609 \columnbreak
    1610 \begin{cfa}
    1611 acquire A & B
    1612         signal A & B
    1613 release A & B
    1614 \end{cfa}
    1615 \end{multicols}
    1616 \noindent This version uses \textbf{bulk-acq} (denoted using the {\sf\&} symbol), but the presence of multiple monitors does not add a particularly new meaning.
    1617 Synchronization happens between the two threads in exactly the same way and order.
    1618 The only difference is that mutual exclusion covers a group of monitors.
    1619 On the implementation side, handling multiple monitors does add a degree of complexity as the next few examples demonstrate.
    1620 
    1621 While deadlock issues can occur when nesting monitors, these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable.
    1622 For monitors, a well-known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a @wait@ is made by a thread that holds more than one monitor.
    1623 For example, the following cfa-code runs into the nested-monitor problem:
    1624 \begin{multicols}{2}
    1625 \begin{cfa}
    1626 acquire A
    1627         acquire B
    1628                 wait B
    1629         release B
    1630 release A
    1631 \end{cfa}
    1632 
    1633 \columnbreak
    1634 
    1635 \begin{cfa}
    1636 acquire A
    1637         acquire B
    1638                 signal B
    1639         release B
    1640 release A
    1641 \end{cfa}
    1642 \end{multicols}
    1643 \noindent The @wait@ only releases monitor @B@ so the signalling thread cannot acquire monitor @A@ to get to the @signal@.
    1644 Attempting release of all acquired monitors at the @wait@ introduces a different set of problems, such as releasing monitor @C@, which has nothing to do with the @signal@.
    1645 
    1646 However, for monitors as for locks, it is possible to write a program using nesting without encountering any problems if nesting is done correctly.
    1647 For example, the next cfa-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}.
    1648 
    1649 \begin{multicols}{2}
    1650 \begin{cfa}
    1651 acquire A
    1652         acquire B
    1653                 wait B
    1654         release B
    1655 release A
    1656 \end{cfa}
    1657 
    1658 \columnbreak
    1659 
    1660 \begin{cfa}
    1661 
    1662 acquire B
    1663         signal B
    1664 release B
    1665 
    1666 \end{cfa}
    1667 \end{multicols}
    1668 
    1669 \noindent However, this simple refactoring may not be possible, forcing more complex restructuring.
    1670 
    1671 % ======================================================================
    1672 % ======================================================================
    1673 \subsection{Internal Scheduling - In Depth}
    1674 % ======================================================================
    1675 % ======================================================================
    1676 
    1677 A larger example is presented to show complex issues for \textbf{bulk-acq} and its implementation options are analyzed.
    1678 Figure~\ref{f:int-bulk-cfa} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{f:int-bulk-cfa} shows the corresponding \CFA code to implement the cfa-code in listing \ref{f:int-bulk-cfa}.
    1679 For the purpose of translating the given cfa-code into \CFA-code, any method of introducing a monitor is acceptable, \eg @mutex@ parameters, global variables, pointer parameters, or using locals with the @mutex@ statement.
    1680 
    1681 \begin{figure}
    1682 \begin{multicols}{2}
    1683 Waiting thread
    1684 \begin{cfa}[numbers=left]
    1685 acquire A
    1686         // Code Section 1
    1687         acquire A & B
    1688                 // Code Section 2
    1689                 wait A & B
    1690                 // Code Section 3
    1691         release A & B
    1692         // Code Section 4
    1693 release A
    1694 \end{cfa}
    1695 \columnbreak
    1696 Signalling thread
    1697 \begin{cfa}[numbers=left, firstnumber=10,escapechar=|]
    1698 acquire A
    1699         // Code Section 5
    1700         acquire A & B
    1701                 // Code Section 6
    1702                 |\label{line:signal1}|signal A & B
    1703                 // Code Section 7
    1704         |\label{line:releaseFirst}|release A & B
    1705         // Code Section 8
    1706 |\label{line:lastRelease}|release A
    1707 \end{cfa}
    1708 \end{multicols}
    1709 \begin{cfa}[caption={Internal scheduling with \textbf{bulk-acq}},label={f:int-bulk-cfa}]
    1710 \end{cfa}
    1711 \begin{center}
    1712 \begin{cfa}[xleftmargin=.4\textwidth]
    1713 monitor A a;
    1714 monitor B b;
    1715 condition c;
    1716 \end{cfa}
    1717 \end{center}
    1718 \begin{multicols}{2}
    1719 Waiting thread
    1720 \begin{cfa}
    1721 mutex(a) {
    1722         // Code Section 1
    1723         mutex(a, b) {
    1724                 // Code Section 2
    1725                 wait(c);
    1726                 // Code Section 3
    1727         }
    1728         // Code Section 4
    1729 }
    1730 \end{cfa}
    1731 \columnbreak
    1732 Signalling thread
    1733 \begin{cfa}
    1734 mutex(a) {
    1735         // Code Section 5
    1736         mutex(a, b) {
    1737                 // Code Section 6
    1738                 signal(c);
    1739                 // Code Section 7
    1740         }
    1741         // Code Section 8
    1742 }
    1743 \end{cfa}
    1744 \end{multicols}
    1745 \begin{cfa}[caption={Equivalent \CFA code for listing \ref{f:int-bulk-cfa}},label={f:int-bulk-cfa}]
    1746 \end{cfa}
    1747 \begin{multicols}{2}
    1748 Waiter
    1749 \begin{cfa}[numbers=left]
    1750 acquire A
    1751         acquire A & B
    1752                 wait A & B
    1753         release A & B
    1754 release A
    1755 \end{cfa}
    1756 
    1757 \columnbreak
    1758 
    1759 Signaller
    1760 \begin{cfa}[numbers=left, firstnumber=6,escapechar=|]
    1761 acquire A
    1762         acquire A & B
    1763                 signal A & B
    1764         release A & B
    1765         |\label{line:secret}|// Secretly keep B here
    1766 release A
    1767 // Wakeup waiter and transfer A & B
    1768 \end{cfa}
    1769 \end{multicols}
    1770 \begin{cfa}[caption={Figure~\ref{f:int-bulk-cfa}, with delayed signalling comments},label={f:int-secret}]
    1771 \end{cfa}
     1713        mutex( m1, m2 ) {
     1714                ... `wait( c )`; // block and release m1, m2
     1715                // m1, m2 acquired
     1716        } // $\LstCommentStyle{\color{red}release m2}$
     1717        // m1 acquired
     1718} // release m1
     1719\end{cfa}
     1720\end{lrbox}
     1721
     1722\newbox\myboxC
     1723\begin{lrbox}{\myboxC}
     1724\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1725
     1726
     1727mutex( m2 ) {
     1728        ... `wait( c )`; ...
     1729        // m2 acquired
     1730} // $\LstCommentStyle{\color{red}release m2}$
     1731
     1732
     1733
     1734
     1735\end{cfa}
     1736\end{lrbox}
     1737
     1738\begin{cquote}
     1739\subfloat[Signalling Thread]{\label{f:SignallingThread}\usebox\myboxA}
     1740\hspace{2\parindentlnth}
     1741\subfloat[Waiting Thread (W1)]{\label{f:WaitingThread}\usebox\myboxB}
     1742\hspace{2\parindentlnth}
     1743\subfloat[Waiting Thread (W2)]{\label{f:OtherWaitingThread}\usebox\myboxC}
     1744\end{cquote}
     1745\caption{Barging Prevention}
     1746\label{f:BargingPrevention}
    17721747\end{figure}
    17731748
    1774 The complexity begins at code sections 4 and 8 in listing \ref{f:int-bulk-cfa}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors.
    1775 The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous cfa-code.
    1776 When the signaller thread reaches the location where it should ``release @A & B@'' (listing \ref{f:int-bulk-cfa} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor @B@ to the waiting thread.
    1777 This ownership transfer is required in order to prevent barging into @B@ by another thread, since both the signalling and signalled threads still need monitor @A@.
    1778 There are three options:
    1779 
    1780 \subsubsection{Delaying Signals}
    1781 The obvious solution to the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred.
    1782 It can be argued that that moment is when the last lock is no longer needed, because this semantics fits most closely to the behaviour of single-monitor scheduling.
    1783 This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from multiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups.
    1784 This solution releases the monitors once every monitor in a group can be released.
    1785 However, since some monitors are never released (\eg the monitor of a thread), this interpretation means a group might never be released.
    1786 A more interesting interpretation is to transfer the group until all its monitors are released, which means the group is not passed further and a thread can retain its locks.
    1787 
    1788 However, listing \ref{f:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors.
     1749One scheduling solution is for the signaller to keep ownership of all locks until the last lock is ready to be transferred, because this semantics fits most closely to the behaviour of single-monitor scheduling.
     1750However, Figure~\ref{f:OtherWaitingThread} shows this solution is complex depending on other waiters, resulting is choices when the signaller finishes the inner mutex-statement.
     1751The singaller can retain @m2@ until completion of the outer mutex statement and pass the locks to waiter W1, or it can pass @m2@ to waiter W2 after completing the inner mutex-statement, while continuing to hold @m1@.
     1752In the latter case, waiter W2 must eventually pass @m2@ to waiter W1, which is complex because W1 may have waited before W2, so W2 is unaware of it.
     1753Furthermore, there is an execution sequence where the signaller always finds waiter W2, and hence, waiter W1 starves.
     1754
     1755While a number of approaches were examined~\cite[\S~4.3]{Delisle18}, the solution chosen for \CFA is a novel techique called \newterm{partial signalling}.
     1756Signalled threads are moved to an urgent queue and the waiter at the front defines the set of monitors necessary for it to unblock.
     1757Partial signalling transfers ownership of monitors to the front waiter.
     1758When the signaller thread exits or waits in the monitor the front waiter is unblocked if all its monitors are released.
     1759This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met.
     1760
     1761\begin{comment}
    17891762Figure~\ref{f:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable.
    17901763Because the third thread is signalled when secretly holding @B@, the goal  becomes unreachable.
     
    18001773In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means knowing when to release a group becomes complex and inefficient (see next section) and therefore effectively precludes this approach.
    18011774
     1775
    18021776\subsubsection{Dependency graphs}
    1803 
    18041777
    18051778\begin{figure}
     
    18781851The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependencies unfold.
    18791852Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one.
    1880 
    1881 \subsubsection{Partial Signalling} \label{partial-sig}
    1882 Finally, the solution that is chosen for \CFA is to use partial signalling.
    1883 Again using listing \ref{f:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@.
    1884 Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread.
    1885 This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met.
    1886 This solution has a much simpler implementation than a dependency graph solving algorithms, which is why it was chosen.
    1887 Furthermore, after being fully implemented, this solution does not appear to have any significant downsides.
    1888 
    1889 Using partial signalling, listing \ref{f:dependency} can be solved easily:
    1890 \begin{itemize}
    1891         \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@.
    1892         \item When thread $\gamma$ reaches line \ref{line:release-a}  it transfers monitor @A@ to thread $\beta$  and wakes it up.
    1893         \item When thread $\beta$  reaches line \ref{line:release-aa} it transfers monitor @A@ to thread $\alpha$ and wakes it up.
    1894 \end{itemize}
    1895 
    1896 % ======================================================================
    1897 % ======================================================================
    1898 \subsection{Signalling: Now or Later}
    1899 % ======================================================================
    1900 % ======================================================================
    1901 \begin{table}
    1902 \begin{tabular}{|c|c|}
    1903 @signal@ & @signal_block@ \\
    1904 \hline
    1905 \begin{cfa}[tabsize=3]
    1906 monitor DatingService {
    1907         // compatibility codes
    1908         enum{ CCodes = 20 };
    1909 
    1910         int girlPhoneNo
    1911         int boyPhoneNo;
    1912 };
    1913 
    1914 condition girls[CCodes];
    1915 condition boys [CCodes];
    1916 condition exchange;
    1917 
    1918 int girl(int phoneNo, int cfa) {
    1919         // no compatible boy ?
    1920         if(empty(boys[cfa])) {
    1921                 wait(girls[cfa]);               // wait for boy
    1922                 girlPhoneNo = phoneNo;          // make phone number available
    1923                 signal(exchange);               // wake boy from chair
    1924         } else {
    1925                 girlPhoneNo = phoneNo;          // make phone number available
    1926                 signal(boys[cfa]);              // wake boy
    1927                 wait(exchange);         // sit in chair
    1928         }
    1929         return boyPhoneNo;
    1930 }
    1931 int boy(int phoneNo, int cfa) {
    1932         // same as above
    1933         // with boy/girl interchanged
    1934 }
    1935 \end{cfa}&\begin{cfa}[tabsize=3]
    1936 monitor DatingService {
    1937 
    1938         enum{ CCodes = 20 };    // compatibility codes
    1939 
    1940         int girlPhoneNo;
    1941         int boyPhoneNo;
    1942 };
    1943 
    1944 condition girls[CCodes];
    1945 condition boys [CCodes];
    1946 // exchange is not needed
    1947 
    1948 int girl(int phoneNo, int cfa) {
    1949         // no compatible boy ?
    1950         if(empty(boys[cfa])) {
    1951                 wait(girls[cfa]);               // wait for boy
    1952                 girlPhoneNo = phoneNo;          // make phone number available
    1953                 signal(exchange);               // wake boy from chair
    1954         } else {
    1955                 girlPhoneNo = phoneNo;          // make phone number available
    1956                 signal_block(boys[cfa]);                // wake boy
    1957 
    1958                 // second handshake unnecessary
    1959 
    1960         }
    1961         return boyPhoneNo;
    1962 }
    1963 
    1964 int boy(int phoneNo, int cfa) {
    1965         // same as above
    1966         // with boy/girl interchanged
    1967 }
    1968 \end{cfa}
    1969 \end{tabular}
    1970 \caption{Dating service example using \protect\lstinline|signal| and \protect\lstinline|signal_block|. }
    1971 \label{tbl:datingservice}
    1972 \end{table}
    1973 An important note is that, until now, signalling a monitor was a delayed operation.
    1974 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the @signal@ statement.
    1975 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the @signal_block@ routine.
    1976 
    1977 The example in table \ref{tbl:datingservice} highlights the difference in behaviour.
    1978 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed.
    1979 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example.
    1980 This feature removes the need for a second condition variables and simplifies programming.
    1981 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call.
    1982 
    1983 % ======================================================================
    1984 % ======================================================================
     1853\end{comment}
     1854
     1855
     1856\begin{comment}
    19851857\section{External scheduling} \label{extsched}
    1986 % ======================================================================
    1987 % ======================================================================
    1988 An alternative to internal scheduling is external scheduling (see Table~\ref{tbl:sched}).
     1858
    19891859\begin{table}
    19901860\begin{tabular}{|c|c|c|}
     
    20501920\label{tbl:sched}
    20511921\end{table}
    2052 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency.
    2053 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring.
    2054 External scheduling can generally be done either in terms of control flow (\eg Ada with @accept@, \uC with @_Accept@) or in terms of data (\eg Go with channels).
    2055 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics.
    2056 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines.
    2057 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages.
    2058 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s.
    20591922
    20601923For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting.
    20611924On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor.
    2062 
    2063 % ======================================================================
    2064 % ======================================================================
     1925\end{comment}
     1926
     1927
    20651928\subsection{Loose Object Definitions}
    2066 % ======================================================================
    2067 % ======================================================================
    2068 In \uC, a monitor class declaration includes an exhaustive list of monitor operations.
    2069 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user:
    2070 
    2071 \begin{cfa}
    2072 monitor A {};
    2073 
    2074 void f(A & mutex a);
    2075 void g(A & mutex a) {
    2076         waitfor(f); // Obvious which f() to wait for
    2077 }
    2078 
    2079 void f(A & mutex a, int); // New different F added in scope
    2080 void h(A & mutex a) {
    2081         waitfor(f); // Less obvious which f() to wait for
    2082 }
    2083 \end{cfa}
    2084 
    2085 Furthermore, external scheduling is an example where implementation constraints become visible from the interface.
    2086 Here is the cfa-code for the entering phase of a monitor:
    2087 \begin{center}
    2088 \begin{tabular}{l}
    2089 \begin{cfa}
    2090         if monitor is free
    2091                 enter
    2092         elif already own the monitor
    2093                 continue
    2094         elif monitor accepts me
    2095                 enter
    2096         else
    2097                 block
    2098 \end{cfa}
    2099 \end{tabular}
    2100 \end{center}
     1929\label{s:LooseObjectDefinitions}
     1930
     1931In an object-oriented programming-language, a class includes an exhaustive list of operations.
     1932However, new members can be added via static inheritance or dynaic members, \eg JavaScript~\cite{JavaScript}.
     1933Similarly, monitor routines can be added at any time in \CFA, making it less clear for programmers and more difficult to implement.
     1934\begin{cfa}
     1935monitor M {};
     1936void `f`( M & mutex m );
     1937void g( M & mutex m ) { waitfor( `f` ); }       $\C{// clear which f}$
     1938void `f`( M & mutex m, int );                           $\C{// different f}$
     1939void h( M & mutex m ) { waitfor( `f` ); }       $\C{// unclear which f}$
     1940\end{cfa}
     1941Hence, the cfa-code for the entering a monitor looks like:
     1942\begin{cfa}
     1943if ( $\textrm{\textit{monitor is free}}$ ) $\LstCommentStyle{// \color{red}enter}$
     1944else if ( $\textrm{\textit{already own monitor}}$ ) $\LstCommentStyle{// \color{red}continue}$
     1945else if ( $\textrm{\textit{monitor accepts me}}$ ) $\LstCommentStyle{// \color{red}enter}$
     1946else $\LstCommentStyle{// \color{red}block}$
     1947\end{cfa}
    21011948For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions.
    2102 However, a fast check for @monitor accepts me@ is much harder to implement depending on the constraints put on the monitors.
    2103 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}.
     1949However, a fast check for \emph{monitor accepts me} is much harder to implement depending on the constraints put on the monitors.
     1950Figure~\ref{fig:ClassicalMonitor} shows monitors are often expressed as an entry (calling) queue, some acceptor queues, and an urgent stack/queue.
    21041951
    21051952\begin{figure}
    21061953\centering
    2107 \subfloat[Classical Monitor] {
     1954\subfloat[Classical monitor] {
    21081955\label{fig:ClassicalMonitor}
    2109 {\resizebox{0.45\textwidth}{!}{\input{monitor}}}
     1956{\resizebox{0.45\textwidth}{!}{\input{monitor.pstex_t}}}
    21101957}% subfloat
    2111 \qquad
    2112 \subfloat[\textbf{bulk-acq} Monitor] {
     1958\quad
     1959\subfloat[Bulk acquire monitor] {
    21131960\label{fig:BulkMonitor}
    2114 {\resizebox{0.45\textwidth}{!}{\input{ext_monitor}}}
     1961{\resizebox{0.45\textwidth}{!}{\input{ext_monitor.pstex_t}}}
    21151962}% subfloat
    2116 \caption{External Scheduling Monitor}
     1963\caption{Monitor Implementation}
     1964\label{f:MonitorImplementation}
    21171965\end{figure}
    21181966
    2119 There are other alternatives to these pictures, but in the case of the left picture, implementing a fast accept check is relatively easy.
    2120 Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (\eg 128) of mutex members.
    2121 This approach requires a unique dense ordering of routines with an upper-bound and that ordering must be consistent across translation units.
    2122 For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritance.
    2123 However, in \CFA users can extend objects with mutex routines that are only visible in certain translation unit.
    2124 This means that establishing a program-wide dense-ordering among mutex routines can only be done in the program linking phase, and still could have issues when using dynamically shared objects.
    2125 
    2126 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}.
    2127 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate.
    2128 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions.
    2129 Storing an array of accepted function pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search.
    2130 Furthermore, supporting nested external scheduling (\eg listing \ref{f:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued.
    2131 
     1967For a fixed (small) number of mutex routines (\eg 128), the accept check reduces to a bitmask of allowed callers, which can be checked with a single instruction.
     1968This approach requires a unique dense ordering of routines with a small upper-bound and the ordering must be consistent across translation units.
     1969For object-oriented languages these constraints are common, but \CFA mutex routines can be added in any scope and are only visible in certain translation unit, precluding program-wide dense-ordering among mutex routines.
     1970
     1971Figure~\ref{fig:BulkMonitor} shows the \CFA monitor implementation.
     1972The mutex routine called is associated with each thread on the entry queue, while a list of acceptable routines is kept separately.
     1973The accepted list is a variable-sized array of accepted routine pointers, so the single instruction bitmask comparison is replaced by dereferencing a pointer followed by a linear search.
     1974
     1975\begin{comment}
    21321976\begin{figure}
    21331977\begin{cfa}[caption={Example of nested external scheduling},label={f:nest-ext}]
     
    21451989\end{figure}
    21461990
    2147 Note that in the right picture, tasks need to always keep track of the monitors associated with mutex routines, and the routine mask needs to have both a function pointer and a set of monitors, as is discussed in the next section.
     1991Note that in the right picture, tasks need to always keep track of the monitors associated with mutex routines, and the routine mask needs to have both a routine pointer and a set of monitors, as is discussed in the next section.
    21481992These details are omitted from the picture for the sake of simplicity.
    21491993
     
    21531997In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be  hard to write.
    21541998This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problem than writing locks that are as flexible as external scheduling in \CFA.
    2155 
    2156 % ======================================================================
    2157 % ======================================================================
     1999\end{comment}
     2000
     2001
    21582002\subsection{Multi-Monitor Scheduling}
    2159 % ======================================================================
    2160 % ======================================================================
     2003\label{s:Multi-MonitorScheduling}
    21612004
    21622005External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax.
    2163 Even in the simplest possible case, some new semantics needs to be established:
     2006Even in the simplest possible case, new semantics needs to be established:
    21642007\begin{cfa}
    21652008monitor M {};
    2166 
    2167 void f(M & mutex a);
    2168 
    2169 void g(M & mutex b, M & mutex c) {
    2170         waitfor(f); // two monitors M => unknown which to pass to f(M & mutex)
    2171 }
    2172 \end{cfa}
    2173 The obvious solution is to specify the correct monitor as follows:
    2174 
     2009void f( M & mutex m1 );
     2010void g( M & mutex m1, M & mutex m2 ) {
     2011        waitfor( f );                                                   $\C{// pass m1 or m2 to f?}$
     2012}
     2013\end{cfa}
     2014The solution is for the programmer to disambiguate:
     2015\begin{cfa}
     2016        waitfor( f, m2 );                                               $\C{// wait for call to f with argument m2}$
     2017\end{cfa}
     2018Routine @g@ has acquired both locks, so when routine @f@ is called, the lock for monitor @m2@ is passed from @g@ to @f@ (while @g@ still holds lock @m1@).
     2019This behaviour can be extended to the multi-monitor @waitfor@ statement.
    21752020\begin{cfa}
    21762021monitor M {};
    2177 
    2178 void f(M & mutex a);
    2179 
    2180 void g(M & mutex a, M & mutex b) {
    2181         // wait for call to f with argument b
    2182         waitfor(f, b);
    2183 }
    2184 \end{cfa}
    2185 This syntax is unambiguous.
    2186 Both locks are acquired and kept by @g@.
    2187 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@).
    2188 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows.
    2189 
    2190 \begin{cfa}
    2191 monitor M {};
    2192 
    2193 void f(M & mutex a, M & mutex b);
    2194 
    2195 void g(M & mutex a, M & mutex b) {
    2196         // wait for call to f with arguments a and b
    2197         waitfor(f, a, b);
    2198 }
    2199 \end{cfa}
    2200 
    2201 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour.
     2022void f( M & mutex m1, M & mutex m2 );
     2023void g( M & mutex m1, M & mutex m2 ) {
     2024        waitfor( f, m1, m2 );                                   $\C{// wait for call to f with arguments m1 and m2}$
     2025}
     2026\end{cfa}
     2027Again, the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired by accepting routine.
    22022028
    22032029An important behaviour to note is when a set of monitors only match partially:
    2204 
    22052030\begin{cfa}
    22062031mutex struct A {};
    2207 
    22082032mutex struct B {};
    2209 
    2210 void g(A & mutex a, B & mutex b) {
    2211         waitfor(f, a, b);
    2212 }
    2213 
     2033void g( A & mutex m1, B & mutex m2 ) {
     2034        waitfor( f, m1, m2 );
     2035}
    22142036A a1, a2;
    22152037B b;
    2216 
    22172038void foo() {
    2218         g(a1, b); // block on accept
    2219 }
    2220 
     2039        g( a1, b ); // block on accept
     2040}
    22212041void bar() {
    2222         f(a2, b); // fulfill cooperation
     2042        f( a2, b ); // fulfill cooperation
    22232043}
    22242044\end{cfa}
     
    22272047It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition.
    22282048
    2229 % ======================================================================
    2230 % ======================================================================
     2049
    22312050\subsection{\protect\lstinline|waitfor| Semantics}
    2232 % ======================================================================
    2233 % ======================================================================
    2234 
    2235 Syntactically, the @waitfor@ statement takes a function identifier and a set of monitors.
    2236 While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the @waitfor@ statement.
    2237 It checks that the set of monitors passed in matches the requirements for a function call.
     2051
     2052Syntactically, the @waitfor@ statement takes a routine identifier and a set of monitors.
     2053While the set of monitors can be any list of expressions, the routine name is more restricted because the compiler validates at compile time the validity of the routine type and the parameters used with the @waitfor@ statement.
     2054It checks that the set of monitors passed in matches the requirements for a routine call.
    22382055Figure~\ref{f:waitfor} shows various usages of the waitfor statement and which are acceptable.
    2239 The choice of the function type is made ignoring any non-@mutex@ parameter.
     2056The choice of the routine type is made ignoring any non-@mutex@ parameter.
    22402057One limitation of the current implementation is that it does not handle overloading, but overloading is possible.
    22412058\begin{figure}
     
    22632080        waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match
    22642081        waitfor(f1, 1);      // Incorrect : 1 not a mutex argument
    2265         waitfor(f9, a1);     // Incorrect : f9 function does not exist
     2082        waitfor(f9, a1);     // Incorrect : f9 routine does not exist
    22662083        waitfor(*fp, a1 );   // Incorrect : fp not an identifier
    22672084        waitfor(f4, a1);     // Incorrect : f4 ambiguous
     
    22732090
    22742091Finally, for added flexibility, \CFA supports constructing a complex @waitfor@ statement using the @or@, @timeout@ and @else@.
    2275 Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any function that fits one of the function+monitor set passed in.
    2276 To enable users to tell which accepted function executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered.
    2277 A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues.
     2092Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any routine that fits one of the routine+monitor set passed in.
     2093To enable users to tell which accepted routine executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered.
     2094A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching routine call already arrived and otherwise continues.
    22782095Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state.
    22792096Figure~\ref{f:waitfor2} demonstrates several complex masks and some incorrect ones.
     
    23302147\end{figure}
    23312148
    2332 % ======================================================================
    2333 % ======================================================================
     2149
    23342150\subsection{Waiting For The Destructor}
    2335 % ======================================================================
    2336 % ======================================================================
     2151
    23372152An interesting use for the @waitfor@ statement is destructor semantics.
    23382153Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}).
     
    23612176
    23622177
    2363 % ######     #    ######     #    #       #       ####### #       ###  #####  #     #
    2364 % #     #   # #   #     #   # #   #       #       #       #        #  #     # ##   ##
    2365 % #     #  #   #  #     #  #   #  #       #       #       #        #  #       # # # #
    2366 % ######  #     # ######  #     # #       #       #####   #        #   #####  #  #  #
    2367 % #       ####### #   #   ####### #       #       #       #        #        # #     #
    2368 % #       #     # #    #  #     # #       #       #       #        #  #     # #     #
    2369 % #       #     # #     # #     # ####### ####### ####### ####### ###  #####  #     #
    23702178\section{Parallelism}
     2179
    23712180Historically, computer performance was about processor speeds and instruction counts.
    23722181However, with heat dissipation being a direct consequence of speed increase, parallelism has become the new source for increased performance~\cite{Sutter05, Sutter05b}.
     
    23782187While there are many variations of the presented paradigms, most of these variations do not actually change the guarantees or the semantics, they simply move costs in order to achieve better performance for certain workloads.
    23792188
     2189
    23802190\section{Paradigms}
     2191
     2192
    23812193\subsection{User-Level Threads}
     2194
    23822195A direct improvement on the \textbf{kthread} approach is to use \textbf{uthread}.
    23832196These threads offer most of the same features that the operating system already provides but can be used on a much larger scale.
     
    23882201Examples of languages that support \textbf{uthread} are Erlang~\cite{Erlang} and \uC~\cite{uC++book}.
    23892202
     2203
    23902204\subsection{Fibers : User-Level Threads Without Preemption} \label{fibers}
     2205
    23912206A popular variant of \textbf{uthread} is what is often referred to as \textbf{fiber}.
    23922207However, \textbf{fiber} do not present meaningful semantic differences with \textbf{uthread}.
     
    23972212An example of a language that uses fibers is Go~\cite{Go}
    23982213
     2214
    23992215\subsection{Jobs and Thread Pools}
     2216
    24002217An approach on the opposite end of the spectrum is to base parallelism on \textbf{pool}.
    24012218Indeed, \textbf{pool} offer limited flexibility but at the benefit of a simpler user interface.
     
    24082225The gold standard of this implementation is Intel's TBB library~\cite{TBB}.
    24092226
     2227
    24102228\subsection{Paradigm Performance}
     2229
    24112230While the choice between the three paradigms listed above may have significant performance implications, it is difficult to pin down the performance implications of choosing a model at the language level.
    24122231Indeed, in many situations one of these paradigms may show better performance but it all strongly depends on the workload.
     
    24162235Finally, if the units of uninterrupted work are large, enough the paradigm choice is largely amortized by the actual work done.
    24172236
     2237
    24182238\section{The \protect\CFA\ Kernel : Processors, Clusters and Threads}\label{kernel}
     2239
    24192240A \textbf{cfacluster} is a group of \textbf{kthread} executed in isolation. \textbf{uthread} are scheduled on the \textbf{kthread} of a given \textbf{cfacluster}, allowing organization between \textbf{uthread} and \textbf{kthread}.
    24202241It is important that \textbf{kthread} belonging to a same \textbf{cfacluster} have homogeneous settings, otherwise migrating a \textbf{uthread} from one \textbf{kthread} to the other can cause issues.
     
    24242245Currently \CFA only supports one \textbf{cfacluster}, the initial one.
    24252246
     2247
    24262248\subsection{Future Work: Machine Setup}\label{machine}
     2249
    24272250While this was not done in the context of this paper, another important aspect of clusters is affinity.
    24282251While many common desktop and laptop PCs have homogeneous CPUs, other devices often have more heterogeneous setups.
     
    24302253OS support for CPU affinity is now common~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which means it is both possible and desirable for \CFA to offer an abstraction mechanism for portable CPU affinity.
    24312254
     2255
    24322256\subsection{Paradigms}\label{cfaparadigms}
     2257
    24332258Given these building blocks, it is possible to reproduce all three of the popular paradigms.
    24342259Indeed, \textbf{uthread} is the default paradigm in \CFA.
     
    24382263
    24392264
    2440 
    24412265\section{Behind the Scenes}
     2266
    24422267There are several challenges specific to \CFA when implementing concurrency.
    2443 These challenges are a direct result of \textbf{bulk-acq} and loose object definitions.
     2268These challenges are a direct result of bulk acquire and loose object definitions.
    24442269These two constraints are the root cause of most design decisions in the implementation.
    24452270Furthermore, to avoid contention from dynamically allocating memory in a concurrent environment, the internal-scheduling design is (almost) entirely free of mallocs.
     
    24492274The main memory concern for concurrency is queues.
    24502275All blocking operations are made by parking threads onto queues and all queues are designed with intrusive nodes, where each node has pre-allocated link fields for chaining, to avoid the need for memory allocation.
    2451 Since several concurrency operations can use an unbound amount of memory (depending on \textbf{bulk-acq}), statically defining information in the intrusive fields of threads is insufficient.The only way to use a variable amount of memory without requiring memory allocation is to pre-allocate large buffers of memory eagerly and store the information in these buffers.
     2276Since several concurrency operations can use an unbound amount of memory (depending on bulk acquire), statically defining information in the intrusive fields of threads is insufficient.The only way to use a variable amount of memory without requiring memory allocation is to pre-allocate large buffers of memory eagerly and store the information in these buffers.
    24522277Conveniently, the call stack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays.
    24532278Since stack allocation is based on scopes, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array.
    24542279The threads and the condition both have a fixed amount of memory, while @mutex@ routines and blocking calls allow for an unbound amount, within the stack size.
    24552280
    2456 Note that since the major contributions of this paper are extending monitor semantics to \textbf{bulk-acq} and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed.
    2457 
    2458 % ======================================================================
    2459 % ======================================================================
     2281Note that since the major contributions of this paper are extending monitor semantics to bulk acquire and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed.
     2282
     2283
    24602284\section{Mutex Routines}
    2461 % ======================================================================
    2462 % ======================================================================
    24632285
    24642286The first step towards the monitor implementation is simple @mutex@ routines.
     
    24952317\end{figure}
    24962318
     2319
    24972320\subsection{Details: Interaction with polymorphism}
     2321
    24982322Depending on the choice of semantics for when monitor locks are acquired, interaction between monitors and \CFA's concept of polymorphism can be more complex to support.
    24992323However, it is shown that entry-point locking solves most of the issues.
     
    25642388void foo(T * mutex t);
    25652389
    2566 // Correct: this function only works on monitors (any monitor)
     2390// Correct: this routine only works on monitors (any monitor)
    25672391forall(dtype T | is_monitor(T))
    25682392void bar(T * mutex t));
     
    25712395Both entry point and \textbf{callsite-locking} are feasible implementations.
    25722396The current \CFA implementation uses entry-point locking because it requires less work when using \textbf{raii}, effectively transferring the burden of implementation to object construction/destruction.
    2573 It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, \ie the function body.
     2397It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, \ie the routine body.
    25742398For example, the monitor call can appear in the middle of an expression.
    25752399Furthermore, entry-point locking requires less code generation since any useful routine is called multiple times but there is only one entry point for many call sites.
    25762400
    2577 % ======================================================================
    2578 % ======================================================================
     2401
    25792402\section{Threading} \label{impl:thread}
    2580 % ======================================================================
    2581 % ======================================================================
    25822403
    25832404Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency.
     
    25922413\end{figure}
    25932414
     2415
    25942416\subsection{Processors}
     2417
    25952418Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically @pthread@s in the current implementation of \CFA.
    25962419Indeed, any parallelism must go through operating-system libraries.
     
    26002423Processors internally use coroutines to take advantage of the existing context-switching semantics.
    26012424
     2425
    26022426\subsection{Stack Management}
     2427
    26032428One of the challenges of this system is to reduce the footprint as much as possible.
    26042429Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible.
     
    26072432In order to respect C user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large.
    26082433
     2434
    26092435\subsection{Context Switching}
     2436
    26102437As mentioned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading, because they share the same mechanism for context-switching between different stacks.
    2611 To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific function call.
     2438To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific routine call.
    26122439This assumption means that the context-switch only has to copy the callee-saved registers onto the stack and then switch the stack registers with the ones of the target coroutine/thread.
    2613 Note that the instruction pointer can be left untouched since the context-switch is always inside the same function.
     2440Note that the instruction pointer can be left untouched since the context-switch is always inside the same routine
    26142441Threads, however, do not context-switch between each other directly.
    26152442They context-switch to the scheduler.
     
    26212448This option is not currently present in \CFA, but the changes required to add it are strictly additive.
    26222449
     2450
    26232451\subsection{Preemption} \label{preemption}
     2452
    26242453Finally, an important aspect for any complete threading system is preemption.
    26252454As mentioned in section \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution.
     
    26482477As a result, a signal handler can start on one kernel thread and terminate on a second kernel thread (but the same user thread).
    26492478It is important to note that signal handlers save and restore signal masks because user-thread migration can cause a signal mask to migrate from one kernel thread to another.
    2650 This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' functions from other functions.}.
     2479This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' routines from other routines}.
    26512480However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks.
    26522481For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption.
     
    26552484Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel.
    26562485
     2486
    26572487\subsection{Scheduler}
    26582488Finally, an aspect that was not mentioned yet is the scheduling algorithm.
     
    26602490Further discussion on scheduling is present in section \ref{futur:sched}.
    26612491
    2662 % ======================================================================
    2663 % ======================================================================
     2492
    26642493\section{Internal Scheduling} \label{impl:intsched}
    2665 % ======================================================================
    2666 % ======================================================================
     2494
    26672495The following figure is the traditional illustration of a monitor (repeated from page~\pageref{fig:ClassicalMonitor} for convenience):
    26682496
    26692497\begin{figure}
    26702498\begin{center}
    2671 {\resizebox{0.4\textwidth}{!}{\input{monitor}}}
     2499{\resizebox{0.4\textwidth}{!}{\input{monitor.pstex_t}}}
    26722500\end{center}
    26732501\caption{Traditional illustration of a monitor}
     
    26782506
    26792507For \CFA, this picture does not have support for blocking multiple monitors on a single condition.
    2680 To support \textbf{bulk-acq} two changes to this picture are required.
     2508To support bulk acquire two changes to this picture are required.
    26812509First, it is no longer helpful to attach the condition to \emph{a single} monitor.
    26822510Secondly, the thread waiting on the condition has to be separated across multiple monitors, seen in figure \ref{fig:monitor_cfa}.
     
    27272555\end{figure}
    27282556
    2729 The solution discussed in \ref{intsched} can be seen in the exit routine of listing \ref{f:entry2}.
     2557The solution discussed in \ref{s:InternalScheduling} can be seen in the exit routine of listing \ref{f:entry2}.
    27302558Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership.
    27312559This solution is deadlock safe as well as preventing any potential barging.
     
    29632791}
    29642792\end{cfa}
    2965 This function is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, \ie no preemption.
     2793This routine is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, \ie no preemption.
    29662794However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{f:fiber-uthread}
    29672795\begin{figure}
     
    31482976For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine.
    31492977Figure~\ref{f:mutex} shows the code for \CFA.
    3150 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured.
     2978To put the results in context, the cost of entering a non-inline routine and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured.
    31512979The results can be shown in table \ref{tab:mutex}.
    31522980
     
    33993227Therefore, there is still significant work to improve performance.
    34003228Many of the data structures and algorithms may change in the future to more efficient versions.
    3401 For example, the number of monitors in a single \textbf{bulk-acq} is only bound by the stack size, this is probably unnecessarily generous.
     3229For example, the number of monitors in a single bulk acquire is only bound by the stack size, this is probably unnecessarily generous.
    34023230It may be possible that limiting the number helps increase performance.
    34033231However, it is not obvious that the benefit would be significant.
  • doc/papers/concurrency/figures/ext_monitor.fig

    r97397a26 rb21c77a  
    88-2
    991200 2
    10 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 3450.000 3150 3150 2850 3450 3150 3750
    11 5 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 3150.000 4350.000 3150 4050 2850 4350 3150 4650
    12 6 5850 1950 6150 2250
    13 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2100 105 105 6000 2100 6105 2205
    14 4 1 -1 0 0 0 10 0.0000 2 105 90 6000 2160 d\001
     105 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1575.000 3450.000 1575 3150 1275 3450 1575 3750
     115 1 0 1 -1 -1 0 0 -1 0.000 0 1 0 0 1575.000 4350.000 1575 4050 1275 4350 1575 4650
     126 4275 1950 4575 2250
     131 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2100 105 105 4425 2100 4530 2205
     144 1 -1 0 0 0 10 0.0000 2 105 90 4425 2160 d\001
    1515-6
    16 6 5100 2100 5400 2400
    17 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 2250 105 105 5250 2250 5355 2250
    18 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2295 X\001
     166 4275 1650 4575 1950
     171 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 1800 105 105 4425 1800 4530 1905
     184 1 -1 0 0 0 10 0.0000 2 105 90 4425 1860 b\001
    1919-6
    20 6 5100 1800 5400 2100
    21 1 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 5250 1950 105 105 5250 1950 5355 1950
    22 4 1 -1 0 0 0 10 0.0000 2 105 120 5250 2010 Y\001
     206 1495 5445 5700 5655
     211 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 1575 5550 80 80 1575 5550 1655 5630
     221 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2925 5550 105 105 2925 5550 3030 5655
     231 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 4425 5550 105 105 4425 5550 4530 5655
     244 0 -1 0 0 0 12 0.0000 2 135 1035 3150 5625 blocked task\001
     254 0 -1 0 0 0 12 0.0000 2 135 870 1725 5625 active task\001
     264 0 -1 0 0 0 12 0.0000 2 135 1050 4650 5625 routine mask\001
    2327-6
    24 6 5850 1650 6150 1950
    25 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 1800 105 105 6000 1800 6105 1905
    26 4 1 -1 0 0 0 10 0.0000 2 105 90 6000 1860 b\001
     286 3525 1800 3825 2400
     291 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3675 1950 105 105 3675 1950 3780 1950
     302 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
     31         3525 1800 3825 1800 3825 2400 3525 2400 3525 1800
     324 1 4 0 0 0 10 0.0000 2 105 120 3675 2010 Y\001
    2733-6
    28 6 3070 5445 7275 5655
    29 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3150 5550 80 80 3150 5550 3230 5630
    30 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4500 5550 105 105 4500 5550 4605 5655
    31 1 3 0 1 -1 -1 0 0 4 0.000 1 0.0000 6000 5550 105 105 6000 5550 6105 5655
    32 4 0 -1 0 0 0 12 0.0000 2 135 1035 4725 5625 blocked task\001
    33 4 0 -1 0 0 0 12 0.0000 2 135 870 3300 5625 active task\001
    34 4 0 -1 0 0 0 12 0.0000 2 135 1050 6225 5625 routine mask\001
     346 3525 2100 3825 2400
     351 3 0 1 -1 -1 1 0 4 0.000 1 0.0000 3675 2250 105 105 3675 2250 3780 2250
     364 1 4 0 0 0 10 0.0000 2 105 120 3675 2295 X\001
    3537-6
    36 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3300 3600 105 105 3300 3600 3405 3705
    37 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 3600 3600 105 105 3600 3600 3705 3705
    38 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6600 3900 105 105 6600 3900 6705 4005
    39 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6900 3900 105 105 6900 3900 7005 4005
    40 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2700 105 105 6000 2700 6105 2805
    41 1 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 6000 2400 105 105 6000 2400 6105 2505
    42 1 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 5100 4575 80 80 5100 4575 5180 4655
     381 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 1725 3600 105 105 1725 3600 1830 3705
     391 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 2025 3600 105 105 2025 3600 2130 3705
     401 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5025 3900 105 105 5025 3900 5130 4005
     411 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 5325 3900 105 105 5325 3900 5430 4005
     421 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2700 105 105 4425 2700 4530 2805
     431 3 0 1 -1 -1 0 0 -1 0.000 1 0.0000 4425 2400 105 105 4425 2400 4530 2505
     441 3 0 1 -1 -1 0 0 20 0.000 1 0.0000 3525 4575 80 80 3525 4575 3605 4655
    43452 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    44          4050 2925 5475 2925 5475 3225 4050 3225 4050 2925
     46         2475 2925 3900 2925 3900 3225 2475 3225 2475 2925
    45472 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4
    46          3150 3750 3750 3750 3750 4050 3150 4050
     48         1575 3750 2175 3750 2175 4050 1575 4050
    47492 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3
    48          3150 3450 3750 3450 3900 3675
     50         1575 3450 2175 3450 2325 3675
    49512 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2
    50          3750 3150 3600 3375
     52         2175 3150 2025 3375
    51532 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 3
    52          3150 4350 3750 4350 3900 4575
     54         1575 4350 2175 4350 2325 4575
    53552 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2
    54          3750 4050 3600 4275
     56         2175 4050 2025 4275
    55572 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4
    56          3150 4650 3750 4650 3750 4950 4950 4950
     58         1575 4650 2175 4650 2175 4950 3375 4950
    57592 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2
    58          6450 3750 6300 3975
     60         4875 3750 4725 3975
    59612 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 2
    60          4950 4950 5175 5100
     62         3375 4950 3600 5100
    61632 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9
    62          5250 4950 6450 4950 6450 4050 7050 4050 7050 3750 6450 3750
    63          6450 2850 6150 2850 6150 1650
     64         3675 4950 4875 4950 4875 4050 5475 4050 5475 3750 4875 3750
     65         4875 2850 4575 2850 4575 1650
    64662 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5
    65          5850 4200 5850 3300 4350 3300 4350 4200 5850 4200
    66 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
     67         4275 4200 4275 3300 2775 3300 2775 4200 4275 4200
     682 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
    6769        1 1 1.00 60.00 120.00
    68         7 1 1.00 60.00 120.00
    69          5250 3150 5250 2400
     70         3675 3075 3675 2400
     712 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
     72         4125 2850 4575 3000
    70732 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    71          3150 3150 3750 3150 3750 2850 5700 2850 5700 1650
    72 2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
    73          5700 2850 6150 3000
    74 2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
    75          5100 1800 5400 1800 5400 2400 5100 2400 5100 1800
    76 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2745 a\001
    77 4 1 -1 0 0 0 10 0.0000 2 75 75 6000 2445 c\001
    78 4 1 -1 0 0 0 12 0.0000 2 135 315 5100 5325 exit\001
    79 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 3075 A\001
    80 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 4875 condition\001
    81 4 1 -1 0 0 0 12 0.0000 2 135 135 3300 5100 B\001
    82 4 0 -1 0 0 0 12 0.0000 2 135 420 6600 3675 stack\001
    83 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3225 acceptor/\001
    84 4 0 -1 0 0 0 12 0.0000 2 180 750 6600 3450 signalled\001
    85 4 1 -1 0 0 0 12 0.0000 2 135 795 3300 2850 condition\001
    86 4 1 -1 0 0 0 12 0.0000 2 165 420 6000 1350 entry\001
    87 4 1 -1 0 0 0 12 0.0000 2 135 495 6000 1575 queue\001
    88 4 0 -1 0 0 0 12 0.0000 2 135 525 6300 2400 arrival\001
    89 4 0 -1 0 0 0 12 0.0000 2 135 630 6300 2175 order of\001
    90 4 1 -1 0 0 0 12 0.0000 2 135 525 5100 3675 shared\001
    91 4 1 -1 0 0 0 12 0.0000 2 135 735 5100 3975 variables\001
    92 4 0 0 50 -1 0 11 0.0000 2 165 855 4275 3150 Acceptables\001
    93 4 0 0 50 -1 0 11 0.0000 2 120 165 5775 2700 W\001
    94 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 2400 X\001
    95 4 0 0 50 -1 0 11 0.0000 2 120 105 5775 2100 Z\001
    96 4 0 0 50 -1 0 11 0.0000 2 120 135 5775 1800 Y\001
     74         1575 3150 2175 3150 2175 2850 4125 2850 4125 1650
     754 1 -1 0 0 0 10 0.0000 2 75 75 4425 2745 a\001
     764 1 -1 0 0 0 10 0.0000 2 75 75 4425 2445 c\001
     774 1 -1 0 0 0 12 0.0000 2 135 315 3525 5325 exit\001
     784 1 -1 0 0 0 12 0.0000 2 135 135 1725 3075 A\001
     794 1 -1 0 0 0 12 0.0000 2 135 795 1725 4875 condition\001
     804 1 -1 0 0 0 12 0.0000 2 135 135 1725 5100 B\001
     814 0 -1 0 0 0 12 0.0000 2 135 420 5025 3675 stack\001
     824 0 -1 0 0 0 12 0.0000 2 180 750 5025 3225 acceptor/\001
     834 0 -1 0 0 0 12 0.0000 2 180 750 5025 3450 signalled\001
     844 1 -1 0 0 0 12 0.0000 2 135 795 1725 2850 condition\001
     854 1 -1 0 0 0 12 0.0000 2 165 420 4425 1350 entry\001
     864 1 -1 0 0 0 12 0.0000 2 135 495 4425 1575 queue\001
     874 0 -1 0 0 0 12 0.0000 2 135 525 4725 2400 arrival\001
     884 0 -1 0 0 0 12 0.0000 2 135 630 4725 2175 order of\001
     894 1 -1 0 0 0 12 0.0000 2 135 525 3525 3675 shared\001
     904 1 -1 0 0 0 12 0.0000 2 135 735 3525 3975 variables\001
     914 0 4 50 -1 0 11 0.0000 2 120 135 4150 1875 Y\001
     924 0 4 50 -1 0 11 0.0000 2 120 105 4150 2175 Z\001
     934 0 4 50 -1 0 11 0.0000 2 120 135 4150 2475 X\001
     944 0 4 50 -1 0 11 0.0000 2 120 165 4150 2775 W\001
     954 0 -1 0 0 3 12 0.0000 2 150 540 5025 4275 urgent\001
     964 1 0 50 -1 0 11 0.0000 2 165 600 3150 3150 accepted\001
  • doc/papers/concurrency/figures/monitor.fig

    r97397a26 rb21c77a  
    72722 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4
    7373         3600 1500 3600 2100 4200 2100 4200 900
    74 2 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4
    75          2700 1500 2700 2100 3300 2100 3300 1500
    76742 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 9
    7775         3600 4200 4800 4200 4800 3300 5400 3300 5400 3000 4800 3000
     
    79772 2 1 1 -1 -1 0 0 -1 4.000 0 0 0 0 0 5
    8078         4200 3450 4200 2550 2700 2550 2700 3450 4200 3450
     792 1 0 1 -1 -1 0 0 -1 0.000 0 0 -1 0 0 4
     80         2700 1500 2700 2100 3300 2100 3300 1500
    81814 1 -1 0 0 0 10 0.0000 2 75 75 4350 1995 a\001
    82824 1 -1 0 0 0 10 0.0000 2 75 75 4350 1695 c\001
     
    89894 0 -1 0 0 0 12 0.0000 2 180 750 4950 2700 signalled\001
    90904 1 -1 0 0 0 12 0.0000 2 135 795 1650 2100 condition\001
    91 4 1 -1 0 0 0 12 0.0000 2 135 135 2550 1425 X\001
    92 4 1 -1 0 0 0 12 0.0000 2 135 135 3450 1425 Y\001
     914 1 4 0 0 0 12 0.0000 2 135 135 2550 1425 X\001
     924 1 4 0 0 0 12 0.0000 2 135 135 3450 1425 Y\001
    93934 1 -1 0 0 0 12 0.0000 2 165 420 4350 600 entry\001
    94944 1 -1 0 0 0 12 0.0000 2 135 495 4350 825 queue\001
     
    1001004 1 -1 0 0 0 10 0.0000 2 75 75 3450 1995 c\001
    1011014 1 -1 0 0 0 12 0.0000 2 135 570 3000 1200 queues\001
     1024 0 -1 0 0 3 12 0.0000 2 150 540 4950 3525 urgent\001
  • doc/papers/general/Makefile

    r97397a26 rb21c77a  
    5959        dvips ${Build}/$< -o $@
    6060
    61 ${BASE}.dvi : Makefile ${Build} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    62                 ../../bibliography/pl.bib
     61${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     62                ../../bibliography/pl.bib | ${Build}
    6363        # Must have *.aux file containing citations for bibtex
    6464        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
     
    7575        mkdir -p ${Build}
    7676
    77 ${BASE}.out.ps : ${Build}
     77${BASE}.out.ps : | ${Build}
    7878        ln -fs ${Build}/Paper.out.ps .
    7979
     
    8484        gnuplot -e Build="'${Build}/'" evaluation/timing.gp
    8585
    86 %.tex : %.fig ${Build}
     86%.tex : %.fig | ${Build}
    8787        fig2dev -L eepic $< > ${Build}/$@
    8888
    89 %.ps : %.fig ${Build}
     89%.ps : %.fig | ${Build}
    9090        fig2dev -L ps $< > ${Build}/$@
    9191
    92 %.pstex : %.fig ${Build}
     92%.pstex : %.fig | ${Build}
    9393        fig2dev -L pstex $< > ${Build}/$@
    9494        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/papers/general/Paper.tex

    r97397a26 rb21c77a  
    201201
    202202\abstract[Summary]{
    203 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
     203The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating-systems.
    204204This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
    205205Nevertheless, C, first standardized almost forty years ago, lacks many features that make programming in more modern languages safer and more productive.
     
    224224\section{Introduction}
    225225
    226 The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from commercial operating-systems to hobby projects.
     226The C programming language is a foundational technology for modern computing with millions of lines of code implementing everything from hobby projects to commercial operating-systems.
    227227This installation base and the programmers producing it represent a massive software-engineering investment spanning decades and likely to continue for decades more.
    228228The TIOBE~\cite{TIOBE} ranks the top 5 most \emph{popular} programming languages as: Java 15\%, \Textbf{C 12\%}, \Textbf{\CC 5.5\%}, Python 5\%, \Csharp 4.5\% = 42\%, where the next 50 languages are less than 4\% each with a long tail.
  • doc/proposals/ctordtor/Makefile

    r97397a26 rb21c77a  
    1 ## Define the appropriate configuration variables.
     1## Define the configuration variables.
    22
    3 MACROS = ../../LaTeXmacros
    4 BIB = ../../bibliography
     3Build = build
     4Figures = figures
     5Macros = ../../LaTeXmacros
     6Bib = ../../bibliography
    57
    6 TeXLIB = .:$(MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:
    7 LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error
     8TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/:
     9LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    810BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
     11
     12MAKEFLAGS = --no-print-directory # --silent
     13VPATH = ${Build} ${Figures}
    914
    1015## Define the text source files.
     
    2934
    3035DOCUMENT = ctor.pdf
     36BASE = ${basename ${DOCUMENT}}
    3137
    3238# Directives #
     39
     40.PHONY : all clean                                      # not file names
    3341
    3442all : ${DOCUMENT}
    3543
    3644clean :
    37         rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \
    38                 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT}
     45        @rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
    3946
    4047# File Dependencies #
    4148
    42 ${DOCUMENT} : ${basename ${DOCUMENT}}.ps
     49${DOCUMENT} : ${BASE}.ps
    4350        ps2pdf $<
    4451
    45 ${basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi
    46         dvips $< -o $@
     52${BASE}.ps : ${BASE}.dvi
     53        dvips ${Build}/$< -o $@
    4754
    48 ${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \
    49                 $(MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib
     55${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     56                ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build}
    5057        # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run.
    51         if [ ! -r ${basename $@}.ind ] ; then touch ${basename $@}.ind ; fi
     58        #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi
    5259        # Must have *.aux file containing citations for bibtex
    5360        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
    54         -${BibTeX} ${basename $@}
    55         # Some citations reference others so run steps again to resolve these citations
     61        -${BibTeX} ${Build}/${basename $@}
     62        # Some citations reference others so run again to resolve these citations
    5663        ${LaTeX} ${basename $@}.tex
    57         -${BibTeX} ${basename $@}
     64        -${BibTeX} ${Build}/${basename $@}
    5865        # Make index from *.aux entries and input index at end of document
    59         makeindex -s $(MACROS)/indexstyle ${basename $@}.idx
     66        #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx
     67        # Run again to finish citations
    6068        ${LaTeX} ${basename $@}.tex
    6169        # Run again to get index title into table of contents
     
    6775## Define the default recipes.
    6876
    69 %.tex : %.fig
    70         fig2dev -L eepic $< > $@
     77${Build}:
     78        mkdir -p ${Build}
    7179
    72 %.ps : %.fig
    73         fig2dev -L ps $< > $@
     80%.tex : %.fig | ${Build}
     81        fig2dev -L eepic $< > ${Build}/$@
    7482
    75 %.pstex : %.fig
    76         fig2dev -L pstex $< > $@
    77         fig2dev -L pstex_t -p $@ $< > $@_t
     83%.ps : %.fig | ${Build}
     84        fig2dev -L ps $< > ${Build}/$@
     85
     86%.pstex : %.fig | ${Build}
     87        fig2dev -L pstex $< > ${Build}/$@
     88        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
    7889
    7990# Local Variables: #
  • doc/proposals/ctordtor/ctor.tex

    r97397a26 rb21c77a  
    1 % inline code ©...© (copyright symbol) emacs: C-q M-)
    2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
    3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
    4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
    5 % LaTex escape §...§ (section symbol) emacs: C-q M-'
    6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
    7 % math escape $...$ (dollar symbol)
    8 
    91\documentclass[twoside,11pt]{article}
    102
     
    157\usepackage{textcomp}
    168\usepackage[latin1]{inputenc}
     9
    1710\usepackage{fullpage,times,comment}
    1811\usepackage{epic,eepic}
    19 \usepackage{upquote}                                                                    % switch curled `'" to straight
     12\usepackage{upquote}                                    % switch curled `'" to straight
    2013\usepackage{calc}
    2114\usepackage{xspace}
    2215\usepackage{graphicx}
    23 \usepackage{varioref}                                                                   % extended references
    24 \usepackage{listings}                                                                   % format program code
    25 \usepackage[flushmargin]{footmisc}                                              % support label/reference in footnote
     16\usepackage{varioref}                                   % extended references
     17\usepackage{listings}                                   % format program code
     18\usepackage[flushmargin]{footmisc}                      % support label/reference in footnote
    2619\usepackage{latexsym}                                   % \Box glyph
    2720\usepackage{mathptmx}                                   % better math font with "times"
     
    3427\renewcommand{\UrlFont}{\small\sf}
    3528
    36 \setlength{\topmargin}{-0.45in}                                                 % move running title into header
     29\setlength{\topmargin}{-0.45in}                         % move running title into header
    3730\setlength{\headsep}{0.25in}
    3831
     
    4336
    4437\interfootnotelinepenalty=10000
     38
     39\CFAStyle                                               % use default CFA format-style
     40% inline code ©...© (copyright symbol) emacs: C-q M-)
     41% red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
     42% blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
     43% green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
     44% LaTex escape §...§ (section symbol) emacs: C-q M-'
     45% keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
     46% math escape $...$ (dollar symbol)
     47
    4548
    4649\title{
     
    8386\thispagestyle{plain}
    8487\pagenumbering{arabic}
    85 
    8688
    8789
  • doc/proposals/tuples/Makefile

    r97397a26 rb21c77a  
    1 ## Define the appropriate configuration variables.
     1## Define the configuration variables.
    22
    3 MACROS = ../../LaTeXmacros
    4 BIB = ../../bibliography
     3Build = build
     4Figures = figures
     5Macros = ../../LaTeXmacros
     6Bib = ../../bibliography
    57
    6 TeXLIB = .:$(MACROS):$(MACROS)/listings:$(MACROS)/enumitem:$(BIB)/:
    7 LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error
     8TeXLIB = .:${Macros}:${MACROS}/listings:${MACROS}/enumitem:${Bib}/:
     9LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    810BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
     11
     12MAKEFLAGS = --no-print-directory --silent #
     13VPATH = ${Build} ${Figures}
    914
    1015## Define the text source files.
     
    2934
    3035DOCUMENT = tuples.pdf
     36BASE = ${basename ${DOCUMENT}}
    3137
    3238# Directives #
     39
     40.PHONY : all clean                                      # not file names
    3341
    3442all : ${DOCUMENT}
    3543
    3644clean :
    37         rm -f *.bbl *.aux *.dvi *.idx *.ilg *.ind *.brf *.out *.log *.toc *.blg *.pstex_t *.cf \
    38                 ${FIGURES} ${PICTURES} ${PROGRAMS} ${GRAPHS} ${basename ${DOCUMENT}}.ps ${DOCUMENT}
     45        @rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
    3946
    4047# File Dependencies #
    4148
    42 ${DOCUMENT} : ${basename ${DOCUMENT}}.ps
     49${DOCUMENT} : ${BASE}.ps
    4350        ps2pdf $<
    4451
    45 ${basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi
    46         dvips $< -o $@
     52${BASE}.ps : ${BASE}.dvi
     53        dvips ${Build}/$< -o $@
    4754
    48 ${basename ${DOCUMENT}}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${basename ${DOCUMENT}}.tex \
    49                 $(MACROS)/common.tex $(MACROS)/indexstyle $(BIB)/cfa.bib
     55${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     56                ${Macros}/common.tex ${Macros}/indexstyle ${Bib}/pl.bib | ${Build}
    5057        # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run.
    51         if [ ! -r ${basename $@}.ind ] ; then touch ${basename $@}.ind ; fi
     58        #if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi
    5259        # Must have *.aux file containing citations for bibtex
    5360        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
    54         -${BibTeX} ${basename $@}
    55         # Some citations reference others so run steps again to resolve these citations
     61        -${BibTeX} ${Build}/${basename $@}
     62        # Some citations reference others so run again to resolve these citations
    5663        ${LaTeX} ${basename $@}.tex
    57         -${BibTeX} ${basename $@}
     64        -${BibTeX} ${Build}/${basename $@}
    5865        # Make index from *.aux entries and input index at end of document
    59         makeindex -s $(MACROS)/indexstyle ${basename $@}.idx
     66        #makeindex -s ${Macros}/indexstyle ${Build}/${basename $@}.idx
     67        # Run again to finish citations
    6068        ${LaTeX} ${basename $@}.tex
    6169        # Run again to get index title into table of contents
     
    6775## Define the default recipes.
    6876
    69 %.tex : %.fig
    70         fig2dev -L eepic $< > $@
     77${Build}:
     78        mkdir -p ${Build}
    7179
    72 %.ps : %.fig
    73         fig2dev -L ps $< > $@
     80%.tex : %.fig | ${Build}
     81        fig2dev -L eepic $< > ${Build}/$@
    7482
    75 %.pstex : %.fig
    76         fig2dev -L pstex $< > $@
    77         fig2dev -L pstex_t -p $@ $< > $@_t
     83%.ps : %.fig | ${Build}
     84        fig2dev -L ps $< > ${Build}/$@
     85
     86%.pstex : %.fig | ${Build}
     87        fig2dev -L pstex $< > ${Build}/$@
     88        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
    7889
    7990# Local Variables: #
  • doc/proposals/tuples/tuples.tex

    r97397a26 rb21c77a  
    1 % inline code ©...© (copyright symbol) emacs: C-q M-)
    2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
    3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
    4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
    5 % LaTex escape §...§ (section symbol) emacs: C-q M-'
    6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
    7 % math escape $...$ (dollar symbol)
    8 
    91\documentclass[twoside,11pt]{article}
    102
     
    157\usepackage{textcomp}
    168\usepackage[latin1]{inputenc}
     9
    1710\usepackage{fullpage,times,comment}
    1811\usepackage{epic,eepic}
    19 \usepackage{upquote}                                                                    % switch curled `'" to straight
     12\usepackage{upquote}                                    % switch curled `'" to straight
    2013\usepackage{calc}
    2114\usepackage{xspace}
    2215\usepackage{graphicx}
    23 \usepackage{varioref}                                                                   % extended references
    24 \usepackage{listings}                                                                   % format program code
    25 \usepackage[flushmargin]{footmisc}                                              % support label/reference in footnote
     16\usepackage{varioref}                                   % extended references
     17\usepackage{listings}                                   % format program code
     18\usepackage[flushmargin]{footmisc}                      % support label/reference in footnote
    2619\usepackage{latexsym}                                   % \Box glyph
    2720\usepackage{mathptmx}                                   % better math font with "times"
     
    3427\renewcommand{\UrlFont}{\small\sf}
    3528
    36 \setlength{\topmargin}{-0.45in}                                                 % move running title into header
     29\setlength{\topmargin}{-0.45in}                         % move running title into header
    3730\setlength{\headsep}{0.25in}
    3831
     
    4235
    4336\interfootnotelinepenalty=10000
     37
     38\CFAStyle                                               % use default CFA format-style
     39% inline code ©...© (copyright symbol) emacs: C-q M-)
     40% red highlighting ®...® (registered trademark symbol) emacs: C-q M-.
     41% blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_
     42% green highlighting ¢...¢ (cent symbol) emacs: C-q M-"
     43% LaTex escape §...§ (section symbol) emacs: C-q M-'
     44% keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^
     45% math escape $...$ (dollar symbol)
     46
    4447
    4548\title{
  • doc/proposals/user_conversions.md

    r97397a26 rb21c77a  
    55There is also a set of _explicit_ conversions that are only allowed through a
    66cast expression.
    7 Based on Glen's notes on conversions [1], I propose that safe and unsafe
    8 conversions be expressed as constructor variants, though I make explicit
    9 (cast) conversions a constructor variant as well rather than a dedicated
    10 operator.
     7I propose that safe, unsafe, and explicit (cast) conversions be expressed as
     8constructor variants.
    119Throughout this article, I will use the following operator names for
    1210constructors and conversion functions from `From` to `To`:
    1311
    14         void ?{} ( To*, To );            // copy constructor
    15         void ?{} ( To*, From );          // explicit constructor
    16         void ?{explicit} ( To*, From );  // explicit cast conversion
    17         void ?{safe} ( To*, From );      // implicit safe conversion
    18         void ?{unsafe} ( To*, From );    // implicit unsafe conversion
    19 
    20 [1] http://plg.uwaterloo.ca/~cforall/Conversions/index.html
    21 
    22 Glen's design made no distinction between constructors and unsafe implicit
     12        void ?{} ( To&, To );            // copy constructor
     13        void ?{} ( To&, From );          // explicit constructor
     14        void ?{explicit} ( To&, From );  // explicit cast conversion
     15        void ?{safe} ( To&, From );      // implicit safe conversion
     16        void ?{unsafe} ( To&, From );    // implicit unsafe conversion
     17
     18It has been suggested that all constructors would define unsafe implicit
    2319conversions; this is elegant, but interacts poorly with tuples.
    2420Essentially, without making this distinction, a constructor like the following
     
    2622multiplying the space of possible interpretations of all functions:
    2723
    28         void ?{}( Coord *this, int x, int y );
     24        void ?{}( Coord& this, int x, int y );
    2925
    3026That said, it would certainly be possible to make a multiple-argument implicit
     
    3228used infrequently:
    3329
    34         void ?{unsafe}( Coord *this, int x, int y );
     30        void ?{unsafe}( Coord& this, int x, int y );
    3531
    3632An alternate possibility would be to only count two-arg constructors
    37 `void ?{} ( To*, From )` as unsafe conversions; under this semantics, safe and
     33`void ?{} ( To&, From )` as unsafe conversions; under this semantics, safe and
    3834explicit conversions should also have a compiler-enforced restriction to
    3935ensure that they are two-arg functions (this restriction may be valuable
     
    4339is convertable to `To`.
    4440If user-defined conversions are not added to the language,
    45 `void ?{} ( To*, From )` may be a suitable representation, relying on
     41`void ?{} ( To&, From )` may be a suitable representation, relying on
    4642conversions on the argument types to account for transitivity.
    47 On the other hand, `To*` should perhaps match its target type exactly, so
    48 another assertion syntax specific to conversions may be required, e.g.
    49 `From -> To`.
     43Since `To&` should be an exact match on `To`, this should put all the implicit
     44conversions on the RHS.
     45On the other hand, under some models (like [1]), implicit conversions are not
     46allowed in assertion parameters, so another assertion syntax specific to
     47conversions may be required, e.g. `From -> To`.
     48It has also been suggested that, for programmer control, no implicit
     49conversions (except, possibly, for polymorphic specialization) should be
     50allowed in resolution of cast operators.
     51
     52[1] ../working/assertion_resolution.md
    5053
    5154### Constructor Idiom ###
     
    5356that we can use the full range of Cforall features for conversions, including
    5457polymorphism.
    55 Glen [1] defines a _constructor idiom_ that can be used to create chains of
    56 safe conversions without duplicating code; given a type `Safe` which members
    57 of another type `From` can be directly converted to, the constructor idiom
    58 allows us to write a conversion for any type `To` which `Safe` converts to:
    59 
    60         forall(otype To | { void ?{safe}( To*, Safe ) })
    61         void ?{safe}( To *this, From that ) {
     58In an earlier version of this proposal, Glen Ditchfield defines a
     59_constructor idiom_ that can be used to create chains of safe conversions
     60without duplicating code; given a type `Safe` which members of another type
     61`From` can be directly converted to, the constructor idiom allows us to write
     62a conversion for any type `To` which `Safe` converts to:
     63
     64        forall(otype To | { void ?{safe}( To&, Safe ) })
     65        void ?{safe}( To& this, From that ) {
    6266                Safe tmp = /* some expression involving that */;
    63                 *this = tmp; // uses assertion parameter
     67                this{ tmp }; // initialize from assertion parameter
    6468        }
    6569
     
    6771unsafe conversions.
    6872
     73Glen's original suggestion said the copy constructor for `To` should also be
     74accepted as a resolution for `void ?{safe}( To&, Safe )` (`Safe` == `To`),
     75allowing this same code to be used for the single-step conversion as well.
     76This proposal does come at the cost of an extra copy initialization of the
     77target value, though.
     78
     79Contrariwise, if a monomorphic conversion from `From` to `Safe` is written,
     80e.g:
     81
     82        void ?{safe}( Safe& this, From that ) {
     83                this{ /* some parameters involving that */ };
     84        }
     85
     86Then the code for a transitive conversion from `From` to any `To` type
     87convertable from `Safe` is written:
     88
     89        forall(otype To | { void ?{safe}( To&, Safe ) })
     90        void ?{safe}( To& this, From that ) {
     91                Safe tmp = that;  // uses monomorphic conversion
     92                this{ tmp };      // initialize from assertion parameter
     93        }
     94
     95Given the entirely-boilerplate nature of this code, but negative performance
     96implications of the unmodified constructor idiom, it might be fruitful to have
     97transitive and single step conversion operators, and let CFA build the
     98transitive conversions; some possible names:
     99
     100        void ?{safe}  (To&, From);    void ?{final safe} (To&, From);  // single-step
     101        void ?{safe*} (To&, From);    void ?{safe}       (To&, From);  // transitive
     102
    69103What selective non-use of the constructor idiom gives us is the ability to
    70104define a conversion that may only be the *last* conversion in a chain of such.
    71 Constructing a conversion graph able to unambiguously represent the full
    72 hierarchy of implicit conversions in C is provably impossible using only
    73 single-step conversions with no additional information (see Appendix A), but
    74 this mechanism is sufficiently powerful (see [1], though the design there has
    75 some minor bugs; the general idea is to use the constructor idiom to define
    76 two chains of conversions, one among the signed integral types, another among
    77 the unsigned, and to use monomorphic conversions to allow conversions between
    78 signed and unsigned integer types).
     105One use for this is to solve the problem that `explicit` conversions were
     106added to C++ for, that of conversions to `bool` chaining to become conversions
     107to any arithmetic type.
     108Another use is to unambiguously represent the full hierarchy of implicit
     109conversions in C by making sign conversions non-transitive, allowing the
     110compiler to resolve e.g. `int -> unsigned long` as
     111`int -> long -> unsigned long` over `int -> unsigned int -> unsigned long`.
     112See [2] for more details.
     113
     114[2] ../working/glen_conversions/index.html#usual
    79115
    80116### Appendix A: Partial and Total Orders ###
     
    153189convert from `int` to `unsigned long`, so we just put in a direct conversion
    154190and make the compiler smart enough to figure out the costs" - this is the
    155 approach taken by the existing compipler, but given that in a user-defined
     191approach taken by the existing compiler, but given that in a user-defined
    156192conversion proposal the users can build an arbitrary graph of conversions,
    157193this case still needs to be handled.
     
    160196exists a chain of conversions from `a` to `b` (see Appendix A for description
    161197of preorders and related constructs).
    162 This preorder corresponds roughly to a more usual type-theoretic concept of
     198This preorder roughly corresponds to a more usual type-theoretic concept of
    163199subtyping ("if I can convert `a` to `b`, `a` is a more specific type than
    164200`b`"); however, since this graph is arbitrary, it may contain cycles, so if
     
    192228and so is considered to be the nearer type.
    193229By transitivity, then, the conversion from `X` to `Y2` should be cheaper than
    194 the conversion from `X` to `W`, but in this case the `X` and `W` are
     230the conversion from `X` to `W`, but in this case the `Y2` and `W` are
    195231incomparable by the conversion preorder, so the tie is broken by the shorter
    196232path from `X` to `W` in favour of `W`, contradicting the transitivity property
  • doc/refrat/Makefile

    r97397a26 rb21c77a  
    5353        dvips ${Build}/$< -o $@
    5454
    55 ${BASE}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    56                 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib
     55${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     56                ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib | ${Build}
    5757        # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run.
    5858        if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi
     
    7878        mkdir -p ${Build}
    7979
    80 %.tex : %.fig ${Build}
     80%.tex : %.fig | ${Build}
    8181        fig2dev -L eepic $< > ${Build}/$@
    8282
    83 %.ps : %.fig ${Build}
     83%.ps : %.fig | ${Build}
    8484        fig2dev -L ps $< > ${Build}/$@
    8585
    86 %.pstex : %.fig ${Build}
     86%.pstex : %.fig | ${Build}
    8787        fig2dev -L pstex $< > ${Build}/$@
    8888        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/theses/aaron_moss/comp_II/Makefile

    r97397a26 rb21c77a  
    3232
    3333DOCUMENT = comp_II.pdf
     34BASE = ${basename ${DOCUMENT}}
    3435
    3536# Directives #
     
    4041
    4142clean :
    42         @rm -frv ${DOCUMENT} ${basename ${DOCUMENT}}.ps ${Build}
     43        @rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
    4344
    4445# File Dependencies #
    4546
    46 ${DOCUMENT} : ${basename ${DOCUMENT}}.ps
     47${DOCUMENT} : ${BASE}.ps
    4748        ps2pdf $<
    4849
    49 ${basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi
     50${BASE}.ps : ${BASE}.dvi
    5051        dvips ${Build}/$< -o $@
    5152
    52 ${basename ${DOCUMENT}}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    53                 ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib
     53${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     54                ${Macros}/common.tex ${Macros}/indexstyle ../../../bibliography/pl.bib | ${Build}
    5455        # Must have *.aux file containing citations for bibtex
    5556        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
     
    6667        mkdir -p ${Build}
    6768
    68 %.tex : %.fig
     69%.tex : %.fig ${Build}
    6970        fig2dev -L eepic $< > ${Build}/$@
    7071
    71 %.ps : %.fig
     72%.ps : %.fig | ${Build}
    7273        fig2dev -L ps $< > ${Build}/$@
    7374
    74 %.pstex : %.fig
     75%.pstex : %.fig | ${Build}
    7576        fig2dev -L pstex $< > ${Build}/$@
    7677        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/theses/thierry_delisle/.gitignore

    r97397a26 rb21c77a  
    2525*.pdf
    2626*.png
     27*.ps
    2728figures/*.tex
    2829
  • doc/theses/thierry_delisle/Makefile

    r97397a26 rb21c77a  
    5151
    5252DOCUMENT = thesis.pdf
     53BASE = ${basename ${DOCUMENT}}
    5354
    5455# Directives #
     
    5960
    6061clean :
    61         @rm -frv ${DOCUMENT} ${basename ${DOCUMENT}}.ps ${Build}
     62        @rm -frv ${DOCUMENT} ${BASE}.ps ${Build}
    6263
    6364# File Dependencies #
    6465
    65 ${DOCUMENT} : ${basename ${DOCUMENT}}.ps
     66${DOCUMENT} : ${BASE}.ps
    6667        ps2pdf $<
    6768
    68 ${basename ${DOCUMENT}}.ps : ${basename ${DOCUMENT}}.dvi
     69${BASE}.ps : ${BASE}.dvi
    6970        dvips ${Build}/$< -o $@
    7071
    71 ${basename ${DOCUMENT}}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    72                 ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib
     72${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     73                ${Macros}/common.tex ${Macros}/indexstyle annex/local.bib ../../bibliography/pl.bib | ${Build}
    7374        # Must have *.aux file containing citations for bibtex
    7475        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
     
    8889        mkdir -p ${Build}
    8990
    90 %.tex : %.fig
     91%.tex : %.fig ${Build}
    9192        fig2dev -L eepic $< > ${Build}/$@
    9293
    93 %.ps : %.fig
     94%.ps : %.fig | ${Build}
    9495        fig2dev -L ps $< > ${Build}/$@
    9596
    96 %.pstex : %.fig
     97%.pstex : %.fig | ${Build}
    9798        fig2dev -L pstex $< > ${Build}/$@
    9899        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
     
    101102# Tools to generate png files
    102103# to create a png we create a pdf and convert it to png
    103 %.png : build/%.pstex figures/%.tex
     104%.png : build/%.pstex figures/%.tex ${Build}
    104105        echo ${basename $@}
    105106        ${LaTeX} figures/${basename $@}.tex
     
    109110
    110111# creating a pdf of a figure requires generating some latex that just includes the figure
    111 figures/%.tex: build/%.pstex
     112figures/%.tex: build/%.pstex ${Build}
    112113        echo -n         "\documentclass[preview]{standalone}\n"         \
    113114                        "\usepackage[T1]{fontenc}\n"                    \
  • doc/user/Makefile

    r97397a26 rb21c77a  
    5757        dvips ${Build}/$< -o $@
    5858
    59 ${BASE}.dvi : Makefile ${Build} ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    60                 ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib
     59${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     60                ${Macros}/common.tex ${Macros}/lstlang.sty ${Macros}/indexstyle ../bibliography/pl.bib | ${Build}
    6161        # Conditionally create an empty *.ind (index) file for inclusion until makeindex is run.
    6262        if [ ! -r ${basename $@}.ind ] ; then touch ${Build}/${basename $@}.ind ; fi
     
    7979        mkdir -p ${Build}
    8080
    81 %.tex : %.fig ${Build}
     81%.tex : %.fig | ${Build}
    8282        fig2dev -L eepic $< > ${Build}/$@
    8383
    84 %.ps : %.fig ${Build}
     84%.ps : %.fig | ${Build}
    8585        fig2dev -L ps $< > ${Build}/$@
    8686
    87 %.pstex : %.fig ${Build}
     87%.pstex : %.fig | ${Build}
    8888        fig2dev -L pstex $< > ${Build}/$@
    8989        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • src/CodeGen/CodeGenerator.cc

    r97397a26 rb21c77a  
    133133                output << "__attribute__ ((";
    134134                for ( list< Attribute * >::iterator attr( attributes.begin() );; ) {
    135                         output << (*attr)->get_name();
    136                         if ( ! (*attr)->get_parameters().empty() ) {
     135                        output << (*attr)->name;
     136                        if ( ! (*attr)->parameters.empty() ) {
    137137                                output << "(";
    138                                 genCommaList( (*attr)->get_parameters().begin(), (*attr)->get_parameters().end() );
     138                                genCommaList( (*attr)->parameters.begin(), (*attr)->parameters.end() );
    139139                                output << ")";
    140140                        } // if
     
    172172        // *** Declarations
    173173        void CodeGenerator::postvisit( FunctionDecl * functionDecl ) {
     174                // deleted decls should never be used, so don't print them
     175                if ( functionDecl->isDeleted && genC ) return;
    174176                extension( functionDecl );
    175177                genAttributes( functionDecl->get_attributes() );
     
    185187                        functionDecl->get_statements()->accept( *visitor );
    186188                } // if
     189                if ( functionDecl->isDeleted ) {
     190                        output << " = void";
     191                }
    187192        }
    188193
    189194        void CodeGenerator::postvisit( ObjectDecl * objectDecl ) {
     195                // deleted decls should never be used, so don't print them
     196                if ( objectDecl->isDeleted && genC ) return;
    190197                if (objectDecl->get_name().empty() && genC ) {
    191198                        // only generate an anonymous name when generating C code, otherwise it clutters the output too much
     
    206213                        objectDecl->get_init()->accept( *visitor );
    207214                } // if
     215                if ( objectDecl->isDeleted ) {
     216                        output << " = void";
     217                }
    208218
    209219                if ( objectDecl->get_bitfieldWidth() ) {
     
    827837                expr->expr->accept( *visitor );
    828838        }
     839
     840        void CodeGenerator::postvisit( DefaultArgExpr * arg ) {
     841                assertf( ! genC, "Default argument expressions should not reach code generation." );
     842                arg->expr->accept( *visitor );
     843        }
     844
     845        void CodeGenerator::postvisit( GenericExpr * expr ) {
     846                assertf( ! genC, "C11 _Generic expressions should not reach code generation." );
     847                output << "_Generic(";
     848                expr->control->accept( *visitor );
     849                output << ", ";
     850                unsigned int numAssocs = expr->associations.size();
     851                unsigned int i = 0;
     852                for ( GenericExpr::Association & assoc : expr->associations ) {
     853                        if (assoc.isDefault) {
     854                                output << "default: ";
     855                        } else {
     856                                output << genType( assoc.type, "", pretty, genC ) << ": ";
     857                        }
     858                        assoc.expr->accept( *visitor );
     859                        if ( i+1 != numAssocs ) {
     860                                output << ", ";
     861                        }
     862                        i++;
     863                }
     864                output << ")";
     865        }
     866
    829867
    830868        // *** Statements
  • src/CodeGen/CodeGenerator.h

    r97397a26 rb21c77a  
    9494                void postvisit( ConstructorExpr * );
    9595                void postvisit( DeletedExpr * );
     96                void postvisit( DefaultArgExpr * );
     97                void postvisit( GenericExpr * );
    9698
    9799                //*** Statements
  • src/Common/Debug.h

    r97397a26 rb21c77a  
    2828namespace Debug {
    2929        /// debug codegen a translation unit
    30         static inline void codeGen( __attribute__((unused)) const std::list< Declaration * > & translationUnit, __attribute__((unused)) const std::string & label, __attribute__((unused)) LinkageSpec::Spec linkageFilter = LinkageSpec::Compiler ) {
     30        static inline void codeGen( __attribute__((unused)) const std::list< Declaration * > & translationUnit, __attribute__((unused)) const std::string & label, __attribute__((unused)) LinkageSpec::Spec linkageFilter = LinkageSpec::Builtin ) {
    3131        #ifdef DEBUG
    3232                std::list< Declaration * > decls;
  • src/Common/PassVisitor.h

    r97397a26 rb21c77a  
    125125        virtual void visit( InitExpr *  initExpr ) override final;
    126126        virtual void visit( DeletedExpr *  delExpr ) override final;
     127        virtual void visit( DefaultArgExpr * argExpr ) override final;
     128        virtual void visit( GenericExpr * genExpr ) override final;
    127129
    128130        virtual void visit( VoidType * basicType ) override final;
     
    225227        virtual Expression * mutate( InitExpr *  initExpr ) override final;
    226228        virtual Expression * mutate( DeletedExpr *  delExpr ) override final;
     229        virtual Expression * mutate( DefaultArgExpr * argExpr ) override final;
     230        virtual Expression * mutate( GenericExpr * genExpr ) override final;
    227231
    228232        virtual Type * mutate( VoidType * basicType ) override final;
     
    258262
    259263private:
     264        bool inFunction = false;
     265
    260266        template<typename pass_t> friend void acceptAll( std::list< Declaration* > &decls, PassVisitor< pass_t >& visitor );
    261267        template<typename pass_t> friend void mutateAll( std::list< Declaration* > &decls, PassVisitor< pass_t >& visitor );
     
    313319        void indexerAddUnionFwd ( UnionDecl                 * node  ) { indexer_impl_addUnionFwd ( pass, 0, node ); }
    314320        void indexerAddTrait    ( TraitDecl                 * node  ) { indexer_impl_addTrait    ( pass, 0, node ); }
    315         void indexerAddWith     ( std::list< Expression * > & exprs, BaseSyntaxNode * withStmt ) { indexer_impl_addWith     ( pass, 0, exprs, withStmt ); }
     321        void indexerAddWith     ( std::list< Expression * > & exprs, BaseSyntaxNode * withStmt ) { indexer_impl_addWith( pass, 0, exprs, withStmt ); }
    316322
    317323
  • src/Common/PassVisitor.impl.h

    r97397a26 rb21c77a  
    404404                        indexerAddId( func );
    405405                        maybeAccept_impl( node->type, *this );
     406                        // function body needs to have the same scope as parameters - CompoundStmt will not enter
     407                        // a new scope if inFunction is true
     408                        ValueGuard< bool > oldInFunction( inFunction );
     409                        inFunction = true;
    406410                        maybeAccept_impl( node->statements, *this );
    407411                        maybeAccept_impl( node->attributes, *this );
     
    434438                        indexerAddId( func );
    435439                        maybeMutate_impl( node->type, *this );
     440                        // function body needs to have the same scope as parameters - CompoundStmt will not enter
     441                        // a new scope if inFunction is true
     442                        ValueGuard< bool > oldInFunction( inFunction );
     443                        inFunction = true;
    436444                        maybeMutate_impl( node->statements, *this );
    437445                        maybeMutate_impl( node->attributes, *this );
     
    712720        VISIT_START( node );
    713721        {
    714                 auto guard1 = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } );
     722                // do not enter a new scope if inFunction is true - needs to check old state before the assignment
     723                ValueGuard< bool > oldInFunction( inFunction );
     724                auto guard1 = makeFuncGuard( [this, &oldInFunction]() { if ( ! oldInFunction.old ) indexerScopeEnter(); }, [this, &oldInFunction]() { if ( ! oldInFunction.old ) indexerScopeLeave(); } );
    715725                auto guard2 = makeFuncGuard( [this]() { call_beginScope();   }, [this]() { call_endScope();     } );
     726                inFunction = false;
    716727                visitStatementList( node->kids );
    717728        }
     
    723734        MUTATE_START( node );
    724735        {
    725                 auto guard1 = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } );
     736                // do not enter a new scope if inFunction is true - needs to check old state before the assignment
     737                ValueGuard< bool > oldInFunction( inFunction );
     738                auto guard1 = makeFuncGuard( [this, &oldInFunction]() { if ( ! oldInFunction.old ) indexerScopeEnter(); }, [this, &oldInFunction]() { if ( ! oldInFunction.old ) indexerScopeLeave(); } );
    726739                auto guard2 = makeFuncGuard( [this]() { call_beginScope();   }, [this]() { call_endScope();     } );
     740                inFunction = false;
    727741                mutateStatementList( node->kids );
    728742        }
     
    828842        VISIT_START( node );
    829843
    830         visitExpression( node->condition );
    831         node->body = visitStatement( node->body );
     844        {
     845                // while statements introduce a level of scope (for the initialization)
     846                auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } );
     847                maybeAccept_impl( node->initialization, *this );
     848                visitExpression ( node->condition );
     849                node->body = visitStatement( node->body );
     850        }
    832851
    833852        VISIT_END( node );
     
    838857        MUTATE_START( node );
    839858
    840         node->condition = mutateExpression( node->condition );
    841         node->body      = mutateStatement ( node->body      );
     859        {
     860                // while statements introduce a level of scope (for the initialization)
     861                auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } );
     862                maybeMutate_impl( node->initialization, *this );
     863                node->condition = mutateExpression( node->condition );
     864                node->body      = mutateStatement ( node->body      );
     865        }
     866
    842867
    843868        MUTATE_END( Statement, node );
     
    20742099
    20752100//--------------------------------------------------------------------------
     2101// DefaultArgExpr
     2102template< typename pass_type >
     2103void PassVisitor< pass_type >::visit( DefaultArgExpr * node ) {
     2104        VISIT_START( node );
     2105
     2106        indexerScopedAccept( node->result, *this );
     2107        maybeAccept_impl( node->expr, *this );
     2108
     2109        VISIT_END( node );
     2110}
     2111
     2112template< typename pass_type >
     2113Expression * PassVisitor< pass_type >::mutate( DefaultArgExpr * node ) {
     2114        MUTATE_START( node );
     2115
     2116        indexerScopedMutate( node->env, *this );
     2117        indexerScopedMutate( node->result, *this );
     2118        maybeMutate_impl( node->expr, *this );
     2119
     2120        MUTATE_END( Expression, node );
     2121}
     2122
     2123//--------------------------------------------------------------------------
     2124// GenericExpr
     2125template< typename pass_type >
     2126void PassVisitor< pass_type >::visit( GenericExpr * node ) {
     2127        VISIT_START( node );
     2128
     2129        indexerScopedAccept( node->result, *this );
     2130        maybeAccept_impl( node->control, *this );
     2131        for ( GenericExpr::Association & assoc : node->associations ) {
     2132                indexerScopedAccept( assoc.type, *this );
     2133                maybeAccept_impl( assoc.expr, *this );
     2134        }
     2135
     2136        VISIT_END( node );
     2137}
     2138
     2139template< typename pass_type >
     2140Expression * PassVisitor< pass_type >::mutate( GenericExpr * node ) {
     2141        MUTATE_START( node );
     2142
     2143        indexerScopedMutate( node->env, *this );
     2144        indexerScopedMutate( node->result, *this );
     2145        maybeMutate_impl( node->control, *this );
     2146        for ( GenericExpr::Association & assoc : node->associations ) {
     2147                indexerScopedMutate( assoc.type, *this );
     2148                maybeMutate_impl( assoc.expr, *this );
     2149        }
     2150
     2151        MUTATE_END( Expression, node );
     2152}
     2153
     2154//--------------------------------------------------------------------------
    20762155// VoidType
    20772156template< typename pass_type >
  • src/Common/SemanticError.cc

    r97397a26 rb21c77a  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed May 16 15:01:20 2018
    13 // Update Count     : 9
     12// Last Modified On : Thu Jun  7 08:05:26 2018
     13// Update Count     : 10
    1414//
    1515
     
    9797void SemanticError( CodeLocation location, std::string error ) {
    9898        SemanticErrorThrow = true;
    99         throw SemanticErrorException(location, error);
     99        throw SemanticErrorException( location, error );
    100100}
    101101
  • src/Concurrency/Keywords.cc

    r97397a26 rb21c77a  
    192192                void postvisit(   StructDecl * decl );
    193193
    194                 std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* );
     194                std::list<DeclarationWithType*> findMutexArgs( FunctionDecl*, bool & first );
    195195                void validate( DeclarationWithType * );
    196196                void addDtorStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &);
     
    431431        void MutexKeyword::postvisit(FunctionDecl* decl) {
    432432
    433                 std::list<DeclarationWithType*> mutexArgs = findMutexArgs( decl );
    434                 if( mutexArgs.empty() ) return;
    435 
    436                 if( CodeGen::isConstructor(decl->name) ) SemanticError( decl, "constructors cannot have mutex parameters" );
    437 
     433                bool first = false;
     434                std::list<DeclarationWithType*> mutexArgs = findMutexArgs( decl, first );
    438435                bool isDtor = CodeGen::isDestructor( decl->name );
    439436
     437                // Is this function relevant to monitors
     438                if( mutexArgs.empty() ) {
     439                        // If this is the destructor for a monitor it must be mutex
     440                        if(isDtor) {
     441                                Type* ty = decl->get_functionType()->get_parameters().front()->get_type();
     442
     443                                // If it's a copy, it's not a mutex
     444                                ReferenceType* rty = dynamic_cast< ReferenceType * >( ty );
     445                                if( ! rty ) return;
     446
     447                                // If we are not pointing directly to a type, it's not a mutex
     448                                Type* base = rty->get_base();
     449                                if( dynamic_cast< ReferenceType * >( base ) ) return;
     450                                if( dynamic_cast< PointerType * >( base ) ) return;
     451
     452                                // Check if its a struct
     453                                StructInstType * baseStruct = dynamic_cast< StructInstType * >( base );
     454                                if( !baseStruct ) return;
     455
     456                                // Check if its a monitor
     457                                if(baseStruct->baseStruct->is_monitor() || baseStruct->baseStruct->is_thread())
     458                                        SemanticError( decl, "destructors for structures declared as \"monitor\" must use mutex parameters\n" );
     459                        }
     460                        return;
     461                }
     462
     463                // Monitors can't be constructed with mutual exclusion
     464                if( CodeGen::isConstructor(decl->name) && !first ) SemanticError( decl, "constructors cannot have mutex parameters" );
     465
     466                // It makes no sense to have multiple mutex parameters for the destructor
    440467                if( isDtor && mutexArgs.size() != 1 ) SemanticError( decl, "destructors can only have 1 mutex argument" );
    441468
     469                // Make sure all the mutex arguments are monitors
    442470                for(auto arg : mutexArgs) {
    443471                        validate( arg );
    444472                }
    445473
     474                // Check if we need to instrument the body
    446475                CompoundStmt* body = decl->get_statements();
    447476                if( ! body ) return;
    448477
     478                // Do we have the required headers
    449479                if( !monitor_decl || !guard_decl || !dtor_guard_decl )
    450                         SemanticError( decl, "mutex keyword requires monitors to be in scope, add #include <monitor>" );
    451 
     480                        SemanticError( decl, "mutex keyword requires monitors to be in scope, add #include <monitor>\n" );
     481
     482                // Instrument the body
    452483                if( isDtor ) {
    453484                        addDtorStatments( decl, body, mutexArgs );
     
    474505        }
    475506
    476         std::list<DeclarationWithType*> MutexKeyword::findMutexArgs( FunctionDecl* decl ) {
     507        std::list<DeclarationWithType*> MutexKeyword::findMutexArgs( FunctionDecl* decl, bool & first ) {
    477508                std::list<DeclarationWithType*> mutexArgs;
    478509
     510                bool once = true;
    479511                for( auto arg : decl->get_functionType()->get_parameters()) {
    480512                        //Find mutex arguments
    481513                        Type* ty = arg->get_type();
    482514                        if( ! ty->get_mutex() ) continue;
     515
     516                        if(once) {first = true;}
     517                        once = false;
    483518
    484519                        //Append it to the list
  • src/ControlStruct/ForExprMutator.cc

    r97397a26 rb21c77a  
    4545                return hoist( forStmt, forStmt->initialization );
    4646        }
     47        Statement *ForExprMutator::postmutate( WhileStmt *whileStmt ) {
     48                return hoist( whileStmt, whileStmt->initialization );
     49        }
    4750} // namespace ControlStruct
    4851
  • src/ControlStruct/ForExprMutator.h

    r97397a26 rb21c77a  
    1818class IfStmt;
    1919class ForStmt;
     20class WhileStmt;
    2021class Statement;
    2122
     
    2526                Statement *postmutate( IfStmt * );
    2627                Statement *postmutate( ForStmt * );
     28                Statement *postmutate( WhileStmt * );
    2729        };
    2830} // namespace ControlStruct
  • src/ControlStruct/Mutate.cc

    r97397a26 rb21c77a  
    2727#include "SynTree/Visitor.h"       // for acceptAll
    2828
    29 using namespace std;
     29namespace ControlStruct {
     30        void fixLabels( std::list< Declaration * > & translationUnit ) {
     31                PassVisitor<LabelFixer> lfix;
     32                acceptAll( translationUnit, lfix );
     33        }
    3034
    31 namespace ControlStruct {
    32         void mutate( std::list< Declaration * > translationUnit ) {
    33                 // hoist initialization out of for statements
     35        void hoistControlDecls( std::list< Declaration * > & translationUnit ) {
    3436                PassVisitor<ForExprMutator> formut;
    35 
    36                 // normalizes label definitions and generates multi-level exit labels
    37                 PassVisitor<LabelFixer> lfix;
    38 
    3937                mutateAll( translationUnit, formut );
    40                 acceptAll( translationUnit, lfix );
    4138        }
    4239} // namespace CodeGen
  • src/ControlStruct/Mutate.h

    r97397a26 rb21c77a  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // Mutate.h -- 
     7// Mutate.h --
    88//
    99// Author           : Rodolfo G. Esteves
     
    2020class Declaration;
    2121
     22/// Desugars Cforall control structures
    2223namespace ControlStruct {
    23         /// Desugars Cforall control structures
    24         void mutate( std::list< Declaration* > translationUnit );
     24        /// normalizes label definitions and generates multi-level exit labels
     25        void fixLabels( std::list< Declaration * > & translationUnit );
     26
     27        /// hoist initialization out of for statements
     28        void hoistControlDecls( std::list< Declaration * > & translationUnit );
    2529} // namespace ControlStruct
    2630
  • src/GenPoly/Lvalue.cc

    r97397a26 rb21c77a  
    145145
    146146        namespace {
    147                 // true for intrinsic function calls that return a reference
     147                // true for intrinsic function calls that return an lvalue in C
    148148                bool isIntrinsicReference( Expression * expr ) {
     149                        // known intrinsic-reference prelude functions
     150                        static std::set<std::string> lvalueFunctions = { "*?", "?[?]" };
    149151                        if ( UntypedExpr * untyped = dynamic_cast< UntypedExpr * >( expr ) ) {
    150152                                std::string fname = InitTweak::getFunctionName( untyped );
    151                                 // known intrinsic-reference prelude functions
    152                                 return fname == "*?" || fname == "?[?]";
     153                                return lvalueFunctions.count(fname);
    153154                        } else if ( ApplicationExpr * appExpr = dynamic_cast< ApplicationExpr * > ( expr ) ) {
    154155                                if ( DeclarationWithType * func = InitTweak::getFunction( appExpr ) ) {
    155                                         // use type of return variable rather than expr result type, since it may have been changed to a pointer type
    156                                         FunctionType * ftype = GenPoly::getFunctionType( func->get_type() );
    157                                         Type * ret = ftype->returnVals.empty() ? nullptr : ftype->returnVals.front()->get_type();
    158                                         return func->linkage == LinkageSpec::Intrinsic && dynamic_cast<ReferenceType *>( ret );
     156                                        return func->linkage == LinkageSpec::Intrinsic && lvalueFunctions.count(func->name);
    159157                                }
    160158                        }
     
    210208                                                // TODO: it's likely that the second condition should be ... && ! isIntrinsicReference( arg ), but this requires investigation.
    211209
    212                                                 if ( function->get_linkage() != LinkageSpec::Intrinsic && isIntrinsicReference( arg ) ) {
     210                                                if ( function->linkage != LinkageSpec::Intrinsic && isIntrinsicReference( arg ) ) {
    213211                                                        // needed for definition of prelude functions, etc.
    214212                                                        // if argument is dereference or array subscript, the result isn't REALLY a reference, but non-intrinsic functions expect a reference: take address
     
    226224                                                        arg = new AddressExpr( arg );
    227225                                                // } else if ( function->get_linkage() == LinkageSpec::Intrinsic && InitTweak::getPointerBase( arg->result ) ) {
    228                                                 } else if ( function->get_linkage() == LinkageSpec::Intrinsic && arg->result->referenceDepth() != 0 ) {
     226                                                } else if ( function->linkage == LinkageSpec::Intrinsic && arg->result->referenceDepth() != 0 ) {
    229227                                                        // argument is a 'real' reference, but function expects a C lvalue: add a dereference to the reference-typed argument
    230228                                                        PRINT(
  • src/Parser/DeclarationNode.cc

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 12:34:05 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 22 08:39:29 2018
    13 // Update Count     : 1074
     12// Last Modified On : Thu Jun  7 12:08:55 2018
     13// Update Count     : 1079
    1414//
    1515
     
    173173}
    174174
    175 DeclarationNode * DeclarationNode::newFunction( string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body ) {
     175DeclarationNode * DeclarationNode::newFunction( const string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body ) {
    176176        DeclarationNode * newnode = new DeclarationNode;
    177177        newnode->name = name;
     
    244244} // DeclarationNode::newForall
    245245
    246 DeclarationNode * DeclarationNode::newFromTypedef( string * name ) {
     246DeclarationNode * DeclarationNode::newFromTypedef( const string * name ) {
    247247        DeclarationNode * newnode = new DeclarationNode;
    248248        newnode->type = new TypeData( TypeData::SymbolicInst );
     
    267267} // DeclarationNode::newAggregate
    268268
    269 DeclarationNode * DeclarationNode::newEnum( string * name, DeclarationNode * constants, bool body ) {
     269DeclarationNode * DeclarationNode::newEnum( const string * name, DeclarationNode * constants, bool body ) {
    270270        assert( name );
    271271        DeclarationNode * newnode = new DeclarationNode;
     
    277277} // DeclarationNode::newEnum
    278278
    279 DeclarationNode * DeclarationNode::newEnumConstant( string * name, ExpressionNode * constant ) {
     279DeclarationNode * DeclarationNode::newEnumConstant( const string * name, ExpressionNode * constant ) {
    280280        DeclarationNode * newnode = new DeclarationNode;
    281281        newnode->name = name;
     
    284284} // DeclarationNode::newEnumConstant
    285285
    286 DeclarationNode * DeclarationNode::newName( string * name ) {
     286DeclarationNode * DeclarationNode::newName( const string * name ) {
    287287        DeclarationNode * newnode = new DeclarationNode;
    288288        newnode->name = name;
     
    290290} // DeclarationNode::newName
    291291
    292 DeclarationNode * DeclarationNode::newFromTypeGen( string * name, ExpressionNode * params ) {
     292DeclarationNode * DeclarationNode::newFromTypeGen( const string * name, ExpressionNode * params ) {
    293293        DeclarationNode * newnode = new DeclarationNode;
    294294        newnode->type = new TypeData( TypeData::SymbolicInst );
     
    299299} // DeclarationNode::newFromTypeGen
    300300
    301 DeclarationNode * DeclarationNode::newTypeParam( TypeClass tc, string * name ) {
     301DeclarationNode * DeclarationNode::newTypeParam( TypeClass tc, const string * name ) {
    302302        DeclarationNode * newnode = new DeclarationNode;
    303303        newnode->type = nullptr;
     
    330330} // DeclarationNode::newTraitUse
    331331
    332 DeclarationNode * DeclarationNode::newTypeDecl( string * name, DeclarationNode * typeParams ) {
     332DeclarationNode * DeclarationNode::newTypeDecl( const string * name, DeclarationNode * typeParams ) {
    333333        DeclarationNode * newnode = new DeclarationNode;
    334334        newnode->name = name;
     
    405405} // DeclarationNode::newBuiltinType
    406406
    407 DeclarationNode * DeclarationNode::newAttr( string * name, ExpressionNode * expr ) {
     407DeclarationNode * DeclarationNode::newAttr( const string * name, ExpressionNode * expr ) {
    408408        DeclarationNode * newnode = new DeclarationNode;
    409409        newnode->type = nullptr;
     
    414414}
    415415
    416 DeclarationNode * DeclarationNode::newAttr( string * name, DeclarationNode * type ) {
     416DeclarationNode * DeclarationNode::newAttr( const string * name, DeclarationNode * type ) {
    417417        DeclarationNode * newnode = new DeclarationNode;
    418418        newnode->type = nullptr;
     
    423423}
    424424
    425 DeclarationNode * DeclarationNode::newAttribute( string * name, ExpressionNode * expr ) {
     425DeclarationNode * DeclarationNode::newAttribute( const string * name, ExpressionNode * expr ) {
    426426        DeclarationNode * newnode = new DeclarationNode;
    427427        newnode->type = nullptr;
     
    544544                                        type->aggregate.params->appendList( q->type->forall ); // augment forall qualifier
    545545                                } else {                                                                // not polymorphic
    546                                         type->aggregate.params = q->type->forall; // make polymorphic type
    547                                         // change implicit typedef from TYPEDEFname to TYPEGENname
    548                                         typedefTable.changeKind( *type->aggregate.name, TYPEGENname );
     546                                        type->aggregate.params = q->type->forall; // set forall qualifier
    549547                                } // if
    550548                        } else {                                                                        // not polymorphic
     
    10651063                        SemanticError( this, "invalid function specifier for " );
    10661064                } // if
    1067                 return buildDecl( type, name ? *name : string( "" ), storageClasses, maybeBuild< Expression >( bitfieldWidth ), funcSpecs, linkage, asmName, maybeBuild< Initializer >(initializer), attributes )->set_extension( extension );
     1065                bool isDelete = initializer && initializer->get_isDelete();
     1066                Declaration * decl = buildDecl( type, name ? *name : string( "" ), storageClasses, maybeBuild< Expression >( bitfieldWidth ), funcSpecs, linkage, asmName, isDelete ? nullptr : maybeBuild< Initializer >(initializer), attributes )->set_extension( extension );
     1067                if ( isDelete ) {
     1068                        DeclarationWithType * dwt = strict_dynamic_cast<DeclarationWithType *>( decl );
     1069                        dwt->isDeleted = true;
     1070                }
     1071                return decl;
    10681072        } // if
    10691073
  • src/Parser/ExpressionNode.cc

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 13:17:07 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Mar 22 11:57:39 2018
    13 // Update Count     : 801
     12// Last Modified On : Mon Jun  4 21:24:45 2018
     13// Update Count     : 802
    1414//
    1515
     
    314314
    315315Expression * build_constantStr( string & str ) {
     316        assert( str.length() > 0 );
    316317        string units;                                                                           // units
    317318        sepString( str, units, '"' );                                           // separate constant from units
  • src/Parser/InitializerNode.cc

    r97397a26 rb21c77a  
    2727
    2828InitializerNode::InitializerNode( ExpressionNode * _expr, bool aggrp, ExpressionNode * des )
    29                 : expr( _expr ), aggregate( aggrp ), designator( des ), kids( nullptr ), maybeConstructed( true ) {
     29                : expr( _expr ), aggregate( aggrp ), designator( des ), kids( nullptr ), maybeConstructed( true ), isDelete( false ) {
    3030        if ( aggrp )
    3131                kids = dynamic_cast< InitializerNode * >( get_next() );
     
    3636
    3737InitializerNode::InitializerNode( InitializerNode * init, bool aggrp, ExpressionNode * des )
    38                 : expr( nullptr ), aggregate( aggrp ), designator( des ), kids( nullptr ), maybeConstructed( true ) {
     38                : expr( nullptr ), aggregate( aggrp ), designator( des ), kids( nullptr ), maybeConstructed( true ), isDelete( false ) {
    3939        if ( init )
    4040                set_last( init );
     
    4646                set_next( nullptr );
    4747} // InitializerNode::InitializerNode
     48
     49InitializerNode::InitializerNode( bool isDelete ) : expr( nullptr ), aggregate( false ), designator( nullptr ), kids( nullptr ), maybeConstructed( false ), isDelete( isDelete ) {}
    4850
    4951InitializerNode::~InitializerNode() {
     
    8486
    8587Initializer * InitializerNode::build() const {
     88        assertf( ! isDelete, "Should not build delete stmt InitializerNode" );
    8689        if ( aggregate ) {
    8790                // steal designators from children
  • src/Parser/ParseNode.h

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 13:28:16 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr 30 09:19:17 2018
    13 // Update Count     : 831
     12// Last Modified On : Wed Jun  6 16:17:18 2018
     13// Update Count     : 843
    1414//
    1515
     
    7777
    7878        ParseNode * next = nullptr;
    79         std::string * name = nullptr;
     79        const std::string * name = nullptr;
    8080        CodeLocation location = yylloc;
    8181}; // ParseNode
     
    8787        InitializerNode( ExpressionNode *, bool aggrp = false,  ExpressionNode * des = nullptr );
    8888        InitializerNode( InitializerNode *, bool aggrp = false, ExpressionNode * des = nullptr );
     89        InitializerNode( bool isDelete );
    8990        ~InitializerNode();
    9091        virtual InitializerNode * clone() const { assert( false ); return nullptr; }
     
    9798        InitializerNode * set_maybeConstructed( bool value ) { maybeConstructed = value; return this; }
    9899        bool get_maybeConstructed() const { return maybeConstructed; }
     100
     101        bool get_isDelete() const { return isDelete; }
    99102
    100103        InitializerNode * next_init() const { return kids; }
     
    110113        InitializerNode * kids;
    111114        bool maybeConstructed;
     115        bool isDelete;
    112116}; // InitializerNode
    113117
     
    167171};
    168172
    169 Expression * build_constantInteger( std::string &str );
    170 Expression * build_constantFloat( std::string &str );
    171 Expression * build_constantChar( std::string &str );
    172 Expression * build_constantStr( std::string &str );
     173Expression * build_constantInteger( std::string & str ); // these 4 routines modify the string
     174Expression * build_constantFloat( std::string & str );
     175Expression * build_constantChar( std::string & str );
     176Expression * build_constantStr( std::string & str );
    173177Expression * build_field_name_FLOATING_FRACTIONconstant( const std::string & str );
    174178Expression * build_field_name_FLOATING_DECIMALconstant( const std::string & str );
     
    226230        static DeclarationNode * newBuiltinType( BuiltinType );
    227231        static DeclarationNode * newForall( DeclarationNode * );
    228         static DeclarationNode * newFromTypedef( std::string * );
    229         static DeclarationNode * newFunction( std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
     232        static DeclarationNode * newFromTypedef( const std::string * );
     233        static DeclarationNode * newFunction( const std::string * name, DeclarationNode * ret, DeclarationNode * param, StatementNode * body );
    230234        static DeclarationNode * newAggregate( Aggregate kind, const std::string * name, ExpressionNode * actuals, DeclarationNode * fields, bool body );
    231         static DeclarationNode * newEnum( std::string * name, DeclarationNode * constants, bool body );
    232         static DeclarationNode * newEnumConstant( std::string * name, ExpressionNode * constant );
    233         static DeclarationNode * newName( std::string * );
    234         static DeclarationNode * newFromTypeGen( std::string *, ExpressionNode * params );
    235         static DeclarationNode * newTypeParam( TypeClass, std::string * );
     235        static DeclarationNode * newEnum( const std::string * name, DeclarationNode * constants, bool body );
     236        static DeclarationNode * newEnumConstant( const std::string * name, ExpressionNode * constant );
     237        static DeclarationNode * newName( const std::string * );
     238        static DeclarationNode * newFromTypeGen( const std::string *, ExpressionNode * params );
     239        static DeclarationNode * newTypeParam( TypeClass, const std::string * );
    236240        static DeclarationNode * newTrait( const std::string * name, DeclarationNode * params, DeclarationNode * asserts );
    237241        static DeclarationNode * newTraitUse( const std::string * name, ExpressionNode * params );
    238         static DeclarationNode * newTypeDecl( std::string * name, DeclarationNode * typeParams );
     242        static DeclarationNode * newTypeDecl( const std::string * name, DeclarationNode * typeParams );
    239243        static DeclarationNode * newPointer( DeclarationNode * qualifiers, OperKinds kind );
    240244        static DeclarationNode * newArray( ExpressionNode * size, DeclarationNode * qualifiers, bool isStatic );
     
    243247        static DeclarationNode * newTuple( DeclarationNode * members );
    244248        static DeclarationNode * newTypeof( ExpressionNode * expr );
    245         static DeclarationNode * newAttr( std::string *, ExpressionNode * expr ); // @ attributes
    246         static DeclarationNode * newAttr( std::string *, DeclarationNode * type ); // @ attributes
    247         static DeclarationNode * newAttribute( std::string *, ExpressionNode * expr = nullptr ); // gcc attributes
     249        static DeclarationNode * newAttr( const std::string *, ExpressionNode * expr ); // @ attributes
     250        static DeclarationNode * newAttr( const std::string *, DeclarationNode * type ); // @ attributes
     251        static DeclarationNode * newAttribute( const std::string *, ExpressionNode * expr = nullptr ); // gcc attributes
    248252        static DeclarationNode * newAsmStmt( StatementNode * stmt ); // gcc external asm statement
    249253        static DeclarationNode * newStaticAssert( ExpressionNode * condition, Expression * message );
     
    399403};
    400404
     405Expression * build_if_control( IfCtl * ctl, std::list< Statement * > & init );
    401406Statement * build_if( IfCtl * ctl, StatementNode * then_stmt, StatementNode * else_stmt );
    402407Statement * build_switch( bool isSwitch, ExpressionNode * ctl, StatementNode * stmt );
    403408Statement * build_case( ExpressionNode * ctl );
    404409Statement * build_default();
    405 Statement * build_while( ExpressionNode * ctl, StatementNode * stmt, bool kind = false );
     410Statement * build_while( IfCtl * ctl, StatementNode * stmt );
     411Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt );
    406412Statement * build_for( ForCtl * forctl, StatementNode * stmt );
    407413Statement * build_branch( BranchStmt::Type kind );
  • src/Parser/StatementNode.cc

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 14:59:41 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr 30 09:21:16 2018
    13 // Update Count     : 354
     12// Last Modified On : Tue Jun  5 08:58:34 2018
     13// Update Count     : 362
    1414//
    1515
     
    6969        caseStmt->get_statements().splice( caseStmt->get_statements().end(), stmts );
    7070        return this;
    71 }
     71} // StatementNode::append_last_case
    7272
    7373Statement * build_expr( ExpressionNode * ctl ) {
    7474        Expression * e = maybeMoveBuild< Expression >( ctl );
    7575
    76         if ( e )
    77                 return new ExprStmt( e );
    78         else
    79                 return new NullStmt();
    80 }
    81 
    82 Statement * build_if( IfCtl * ctl, StatementNode * then_stmt, StatementNode * else_stmt ) {
    83         Statement * thenb, * elseb = 0;
    84         std::list< Statement * > branches;
    85         buildMoveList< Statement, StatementNode >( then_stmt, branches );
    86         assert( branches.size() == 1 );
    87         thenb = branches.front();
    88 
    89         if ( else_stmt ) {
    90                 std::list< Statement * > branches;
    91                 buildMoveList< Statement, StatementNode >( else_stmt, branches );
    92                 assert( branches.size() == 1 );
    93                 elseb = branches.front();
    94         } // if
    95 
    96         std::list< Statement * > init;
     76        if ( e ) return new ExprStmt( e );
     77        else return new NullStmt();
     78} // build_expr
     79
     80Expression * build_if_control( IfCtl * ctl, std::list< Statement * > & init ) {
    9781        if ( ctl->init != 0 ) {
    9882                buildMoveList( ctl->init, init );
     
    10286        if ( ctl->condition ) {
    10387                // compare the provided condition against 0
    104                 cond =  notZeroExpr( maybeMoveBuild< Expression >(ctl->condition) );
     88                cond = notZeroExpr( maybeMoveBuild< Expression >(ctl->condition) );
    10589        } else {
    10690                for ( Statement * stmt : init ) {
     
    11397        }
    11498        delete ctl;
     99        return cond;
     100} // build_if_control
     101
     102Statement * build_if( IfCtl * ctl, StatementNode * then_stmt, StatementNode * else_stmt ) {
     103        Statement * thenb, * elseb = nullptr;
     104        std::list< Statement * > branches;
     105        buildMoveList< Statement, StatementNode >( then_stmt, branches );
     106        assert( branches.size() == 1 );
     107        thenb = branches.front();
     108
     109        if ( else_stmt ) {
     110                std::list< Statement * > branches;
     111                buildMoveList< Statement, StatementNode >( else_stmt, branches );
     112                assert( branches.size() == 1 );
     113                elseb = branches.front();
     114        } // if
     115
     116        std::list< Statement * > init;
     117        Expression * cond = build_if_control( ctl, init );
    115118        return new IfStmt( cond, thenb, elseb, init );
    116 }
     119} // build_if
    117120
    118121Statement * build_switch( bool isSwitch, ExpressionNode * ctl, StatementNode * stmt ) {
     
    130133        // branches.size() == 0 for switch (...) {}, i.e., no declaration or statements
    131134        return new SwitchStmt( maybeMoveBuild< Expression >(ctl), branches );
    132 }
     135} // build_switch
     136
    133137Statement * build_case( ExpressionNode * ctl ) {
    134138        std::list< Statement * > branches;
    135139        return new CaseStmt( maybeMoveBuild< Expression >(ctl), branches );
    136 }
     140} // build_case
     141
    137142Statement * build_default() {
    138143        std::list< Statement * > branches;
    139144        return new CaseStmt( nullptr, branches, true );
    140 }
    141 
    142 Statement * build_while( ExpressionNode * ctl, StatementNode * stmt, bool kind ) {
    143         std::list< Statement * > branches;
    144         buildMoveList< Statement, StatementNode >( stmt, branches );
    145         assert( branches.size() == 1 );
    146         return new WhileStmt( notZeroExpr( maybeMoveBuild< Expression >(ctl) ), branches.front(), kind );
    147 }
     145} // build_default
     146
     147Statement * build_while( IfCtl * ctl, StatementNode * stmt ) {
     148        std::list< Statement * > branches;
     149        buildMoveList< Statement, StatementNode >( stmt, branches );
     150        assert( branches.size() == 1 );
     151
     152        std::list< Statement * > init;
     153        Expression * cond = build_if_control( ctl, init );
     154        return new WhileStmt( cond, branches.front(), init, false );
     155} // build_while
     156
     157Statement * build_do_while( ExpressionNode * ctl, StatementNode * stmt ) {
     158        std::list< Statement * > branches;
     159        buildMoveList< Statement, StatementNode >( stmt, branches );
     160        assert( branches.size() == 1 );
     161
     162        std::list< Statement * > init;
     163        return new WhileStmt( notZeroExpr( maybeMoveBuild< Expression >(ctl) ), branches.front(), init, true );
     164} // build_do_while
    148165
    149166Statement * build_for( ForCtl * forctl, StatementNode * stmt ) {
     
    167184        delete forctl;
    168185        return new ForStmt( init, cond, incr, branches.front() );
    169 }
     186} // build_for
    170187
    171188Statement * build_branch( BranchStmt::Type kind ) {
    172189        Statement * ret = new BranchStmt( "", kind );
    173190        return ret;
    174 }
     191} // build_branch
     192
    175193Statement * build_branch( std::string * identifier, BranchStmt::Type kind ) {
    176194        Statement * ret = new BranchStmt( * identifier, kind );
    177195        delete identifier;                                                                      // allocated by lexer
    178196        return ret;
    179 }
     197} // build_branch
     198
    180199Statement * build_computedgoto( ExpressionNode * ctl ) {
    181200        return new BranchStmt( maybeMoveBuild< Expression >(ctl), BranchStmt::Goto );
    182 }
     201} // build_computedgoto
    183202
    184203Statement * build_return( ExpressionNode * ctl ) {
     
    186205        buildMoveList( ctl, exps );
    187206        return new ReturnStmt( exps.size() > 0 ? exps.back() : nullptr );
    188 }
     207} // build_return
    189208
    190209Statement * build_throw( ExpressionNode * ctl ) {
     
    193212        assertf( exps.size() < 2, "This means we are leaking memory");
    194213        return new ThrowStmt( ThrowStmt::Terminate, !exps.empty() ? exps.back() : nullptr );
    195 }
     214} // build_throw
    196215
    197216Statement * build_resume( ExpressionNode * ctl ) {
     
    200219        assertf( exps.size() < 2, "This means we are leaking memory");
    201220        return new ThrowStmt( ThrowStmt::Resume, !exps.empty() ? exps.back() : nullptr );
    202 }
     221} // build_resume
    203222
    204223Statement * build_resume_at( ExpressionNode * ctl, ExpressionNode * target ) {
     
    206225        (void)target;
    207226        assertf( false, "resume at (non-local throw) is not yet supported," );
    208 }
     227} // build_resume_at
    209228
    210229Statement * build_try( StatementNode * try_stmt, StatementNode * catch_stmt, StatementNode * finally_stmt ) {
     
    214233        FinallyStmt * finallyBlock = dynamic_cast< FinallyStmt * >(maybeMoveBuild< Statement >(finally_stmt) );
    215234        return new TryStmt( tryBlock, branches, finallyBlock );
    216 }
     235} // build_try
     236
    217237Statement * build_catch( CatchStmt::Kind kind, DeclarationNode * decl, ExpressionNode * cond, StatementNode * body ) {
    218238        std::list< Statement * > branches;
     
    220240        assert( branches.size() == 1 );
    221241        return new CatchStmt( kind, maybeMoveBuild< Declaration >(decl), maybeMoveBuild< Expression >(cond), branches.front() );
    222 }
     242} // build_catch
     243
    223244Statement * build_finally( StatementNode * stmt ) {
    224245        std::list< Statement * > branches;
     
    226247        assert( branches.size() == 1 );
    227248        return new FinallyStmt( dynamic_cast< CompoundStmt * >( branches.front() ) );
    228 }
     249} // build_finally
    229250
    230251WaitForStmt * build_waitfor( ExpressionNode * targetExpr, StatementNode * stmt, ExpressionNode * when ) {
     
    247268
    248269        return node;
    249 }
     270} // build_waitfor
    250271
    251272WaitForStmt * build_waitfor( ExpressionNode * targetExpr, StatementNode * stmt, ExpressionNode * when, WaitForStmt * node ) {
     
    266287
    267288        return node;
    268 }
     289} // build_waitfor
    269290
    270291WaitForStmt * build_waitfor_timeout( ExpressionNode * timeout, StatementNode * stmt, ExpressionNode * when ) {
     
    275296                node->timeout.statement = maybeMoveBuild<Statement >( stmt    );
    276297                node->timeout.condition = notZeroExpr( maybeMoveBuild<Expression>( when ) );
    277         }
    278         else {
     298        } else {
    279299                node->orelse.statement  = maybeMoveBuild<Statement >( stmt );
    280300                node->orelse.condition  = notZeroExpr( maybeMoveBuild<Expression>( when ) );
    281         }
     301        } // if
    282302
    283303        return node;
    284 }
     304} // build_waitfor_timeout
    285305
    286306WaitForStmt * build_waitfor_timeout( ExpressionNode * timeout, StatementNode * stmt, ExpressionNode * when,  StatementNode * else_stmt, ExpressionNode * else_when ) {
     
    295315
    296316        return node;
    297 }
     317} // build_waitfor_timeout
    298318
    299319WithStmt * build_with( ExpressionNode * exprs, StatementNode * stmt ) {
     
    302322        Statement * s = maybeMoveBuild<Statement>( stmt );
    303323        return new WithStmt( e, s );
    304 }
     324} // build_with
    305325
    306326Statement * build_compound( StatementNode * first ) {
     
    308328        buildMoveList( first, cs->get_kids() );
    309329        return cs;
    310 }
     330} // build_compound
    311331
    312332Statement * build_asm( bool voltile, Expression * instruction, ExpressionNode * output, ExpressionNode * input, ExpressionNode * clobber, LabelNode * gotolabels ) {
     
    318338        buildMoveList( clobber, clob );
    319339        return new AsmStmt( voltile, instruction, out, in, clob, gotolabels ? gotolabels->labels : noLabels );
    320 }
     340} // build_asm
    321341
    322342Statement * build_directive( string * directive ) {
    323343        return new DirectiveStmt( *directive );
    324 }
     344} // build_directive
    325345
    326346// Local Variables: //
  • src/Parser/TypeData.cc

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 15:12:51 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Apr 26 13:46:07 2018
    13 // Update Count     : 603
     12// Last Modified On : Wed Jun  6 17:40:33 2018
     13// Update Count     : 604
    1414//
    1515
     
    6565          case Aggregate:
    6666                // aggregate = new Aggregate_t;
     67                aggregate.kind = DeclarationNode::NoAggregate;
    6768                aggregate.name = nullptr;
    6869                aggregate.params = nullptr;
     
    7071                aggregate.fields = nullptr;
    7172                aggregate.body = false;
     73                aggregate.tagged = false;
     74                aggregate.parent = nullptr;
    7275                break;
    7376          case AggregateInst:
     
    198201                break;
    199202          case Aggregate:
     203                newtype->aggregate.kind = aggregate.kind;
    200204                newtype->aggregate.name = aggregate.name ? new string( *aggregate.name ) : nullptr;
    201205                newtype->aggregate.params = maybeClone( aggregate.params );
    202206                newtype->aggregate.actuals = maybeClone( aggregate.actuals );
    203207                newtype->aggregate.fields = maybeClone( aggregate.fields );
    204                 newtype->aggregate.kind = aggregate.kind;
    205208                newtype->aggregate.body = aggregate.body;
    206209                newtype->aggregate.tagged = aggregate.tagged;
     
    575578
    576579          case DeclarationNode::Int128:
    577                 ret = td->signedness == 1 ? BasicType::UnsignedInt128 : BasicType::SignedInt128;
     580                ret = td->signedness == DeclarationNode::Unsigned ? BasicType::UnsignedInt128 : BasicType::SignedInt128;
    578581                if ( td->length != DeclarationNode::NoLength ) {
    579582                        genTSError( DeclarationNode::lengthNames[ td->length ], td->basictype );
     
    599602                        genTSError( DeclarationNode::lengthNames[ td->length ], td->basictype );
    600603                } // if
    601                 if ( td->basictype == DeclarationNode::Float && td->length == DeclarationNode::Long ) {
     604                if ( td->basictype != DeclarationNode::Double && td->length == DeclarationNode::Long ) {
    602605                        genTSError( DeclarationNode::lengthNames[ td->length ], td->basictype );
    603606                } // if
     
    605608                        const_cast<TypeData *>(td)->basictype = DeclarationNode::LongDouble;
    606609                } // if
     610
     611                if ( td->basictype == DeclarationNode::Float80 || td->basictype == DeclarationNode::Float128 ) {
     612                        // if ( td->complextype != DeclarationNode::NoComplexType ) {
     613                        //      genTSError( DeclarationNode::complexTypeNames[ td->complextype ], td->basictype );
     614                        // }
     615                        if ( td->basictype == DeclarationNode::Float80 ) ret = BasicType::Float80;
     616                        else ret = BasicType::Float128;
     617                        break;
     618                }
    607619
    608620                ret = floattype[ td->complextype ][ td->basictype - DeclarationNode::Float ];
  • src/Parser/TypedefTable.cc

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 15:20:13 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 22 08:40:01 2018
    13 // Update Count     : 121
     12// Last Modified On : Fri Jun 22 06:14:39 2018
     13// Update Count     : 206
    1414//
    1515
     
    1717#include "TypedefTable.h"
    1818#include <cassert>                                                                              // for assert
     19#include <iostream>
    1920
    2021#if 0
    21 #include <iostream>
    22 #define debugPrint( x ) cerr << x
     22#define debugPrint( code ) code
    2323#else
    24 #define debugPrint( x )
     24#define debugPrint( code )
    2525#endif
    2626
    2727using namespace std;                                                                    // string, iostream
    2828
     29debugPrint(
     30static const char *kindName( int kind ) {
     31        switch ( kind ) {
     32          case IDENTIFIER: return "identifier";
     33          case TYPEDEFname: return "typedef";
     34          case TYPEGENname: return "typegen";
     35          default:
     36                cerr << "Error: cfa-cpp internal error, invalid kind of identifier" << endl;
     37                abort();
     38        } // switch
     39} // kindName
     40)
     41
    2942TypedefTable::~TypedefTable() {
    3043        if ( ! SemanticErrorThrow && kindTable.currentScope() != 0 ) {
    31                 // std::cerr << "scope failure " << kindTable.currentScope() << endl;
     44                cerr << "Error: cfa-cpp internal error, scope failure " << kindTable.currentScope() << endl;
     45                abort();
    3246        } // if
    3347} // TypedefTable::~TypedefTable
     
    4458} // TypedefTable::isKind
    4559
    46 void TypedefTable::changeKind( const string & identifier, int kind ) {
    47         KindTable::iterator posn = kindTable.find( identifier );
    48         if ( posn != kindTable.end() ) posn->second = kind;     // exists => update
    49 } // TypedefTable::changeKind
    50 
    5160// SKULLDUGGERY: Generate a typedef for the aggregate name so the aggregate does not have to be qualified by
    5261// "struct". Only generate the typedef, if the name is not in use. The typedef is implicitly (silently) removed if the
    5362// name is explicitly used.
    54 void TypedefTable::makeTypedef( const string & name ) {
     63void TypedefTable::makeTypedef( const string & name, int kind ) {
     64//    Check for existence is necessary to handle:
     65//        struct Fred {};
     66//        void Fred();
     67//        void fred() {
     68//           struct Fred act; // do not add as type in this scope
     69//           Fred();
     70//        }
    5571        if ( ! typedefTable.exists( name ) ) {
    56                 typedefTable.addToEnclosingScope( name, TYPEDEFname );
     72                typedefTable.addToEnclosingScope( name, kind, "MTD" );
    5773        } // if
    5874} // TypedefTable::makeTypedef
    5975
    60 void TypedefTable::addToEnclosingScope( const std::string & identifier, int kind ) {
    61         assert( kindTable.currentScope() >= 1 );
    62         auto scope = kindTable.currentScope() - 1;
    63         debugPrint( "Adding " << identifier << " as kind " << kind << " scope " << scope << endl );
     76void TypedefTable::addToScope( const string & identifier, int kind, const char * locn __attribute__((unused)) ) {
     77        auto scope = kindTable.currentScope();
     78        debugPrint( cerr << "Adding current at " << locn << " " << identifier << " as " << kindName( kind ) << " scope " << scope << endl );
     79        auto ret = kindTable.insertAt( scope, identifier, kind );
     80        //if ( ! ret.second ) ret.first->second = kind;         // exists => update
     81        assert( ret.first->second == kind );                            // exists
     82} // TypedefTable::addToScope
     83
     84void TypedefTable::addToEnclosingScope( const string & identifier, int kind, const char * locn __attribute__((unused)) ) {
     85        assert( kindTable.currentScope() >= 1 + level );
     86        auto scope = kindTable.currentScope() - 1 - level;
     87        debugPrint( cerr << "Adding enclosing at " << locn << " " << identifier << " as " << kindName( kind ) << " scope " << scope << " level " << level << endl );
    6488        auto ret = kindTable.insertAt( scope, identifier, kind );
    6589        if ( ! ret.second ) ret.first->second = kind;           // exists => update
     
    6892void TypedefTable::enterScope() {
    6993        kindTable.beginScope();
    70         debugPrint( "Entering scope " << kindTable.currentScope() << endl );
     94        debugPrint( cerr << "Entering scope " << kindTable.currentScope() << endl; print() );
    7195} // TypedefTable::enterScope
    7296
    7397void TypedefTable::leaveScope() {
    74         debugPrint( "Leaving scope " << kindTable.currentScope() << endl );
     98        debugPrint( cerr << "Leaving scope " << kindTable.currentScope() << endl; print() );
    7599        kindTable.endScope();
    76100} // TypedefTable::leaveScope
    77101
    78 // void TypedefTable::print( void ) const {
    79 //      for ( KindTable::const_iterator i = table.begin(); i != table.end(); i++) {
    80 //              debugPrint( (*i ).first << ": " );
    81 //              list< Entry > declList = (*i).second;
    82 //              for ( list< Entry >::const_iterator j = declList.begin(); j != declList.end(); j++ ) {
    83 //                      debugPrint( "(" << (*j).scope << " " << (*j).kind << ") " );
    84 //              }
    85 //              debugPrint( endl );
    86 //      } // for
    87 // }
     102void TypedefTable::print( void ) const {
     103        KindTable::size_type scope = kindTable.currentScope();
     104        debugPrint( cerr << "[" << scope << "]" );
     105        for ( KindTable::const_iterator i = kindTable.begin(); i != kindTable.end(); i++ ) {
     106                while ( i.get_level() != scope ) {
     107                        --scope;
     108                        debugPrint( cerr << endl << "[" << scope << "]" );
     109                } // while
     110                debugPrint( cerr << " " << (*i).first << ":" << kindName( (*i).second ) );
     111        } // for
     112        while ( scope > 0 ) {
     113                --scope;
     114                debugPrint( cerr << endl << "[" << scope << "]" );
     115        } // while
     116        debugPrint( cerr << endl );
     117} // TypedefTable::print
    88118
    89119// Local Variables: //
  • src/Parser/TypedefTable.h

    r97397a26 rb21c77a  
    1010// Created On       : Sat May 16 15:24:36 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 22 08:39:29 2018
    13 // Update Count     : 77
     12// Last Modified On : Fri Jun 22 05:29:58 2018
     13// Update Count     : 86
    1414//
    1515
     
    2525        typedef ScopedMap< std::string, int > KindTable;
    2626        KindTable kindTable;   
     27        unsigned int level = 0;
    2728  public:
    2829        ~TypedefTable();
     
    3031        bool exists( const std::string & identifier );
    3132        int isKind( const std::string & identifier ) const;
    32         void changeKind( const std::string & identifier, int kind );
    33         void makeTypedef( const std::string & name );
    34         void addToEnclosingScope( const std::string & identifier, int kind );
     33        void makeTypedef( const std::string & name, int kind = TYPEDEFname );
     34        void addToScope( const std::string & identifier, int kind, const char * );
     35        void addToEnclosingScope( const std::string & identifier, int kind, const char * );
    3536
    3637        void enterScope();
    3738        void leaveScope();
     39
     40        void up() { level += 1; }
     41        void down() { level -= 1; }
     42
     43        void print( void ) const;
    3844}; // TypedefTable
    3945
  • src/Parser/lex.ll

    r97397a26 rb21c77a  
    1010 * Created On       : Sat Sep 22 08:58:10 2001
    1111 * Last Modified By : Peter A. Buhr
    12  * Last Modified On : Thu May  3 13:42:40 2018
    13  * Update Count     : 676
     12 * Last Modified On : Wed Jun 20 09:08:28 2018
     13 * Update Count     : 682
    1414 */
    1515
     
    2525//**************************** Includes and Defines ****************************
    2626
     27// trigger before each matching rule's action
     28#define YY_USER_ACTION \
     29        yylloc.first_line = yylineno; \
     30        yylloc.first_column = column; \
     31        column += yyleng; \
     32        yylloc.last_column = column; \
     33        yylloc.last_line = yylineno; \
     34        yylloc.filename = yyfilename ? yyfilename : "";
    2735unsigned int column = 0;                                                                // position of the end of the last token parsed
    28 #define YY_USER_ACTION yylloc.first_line = yylineno; yylloc.first_column = column; column += yyleng; yylloc.last_column = column; yylloc.last_line = yylineno; yylloc.filename = yyfilename ? yyfilename : "";                          // trigger before each matching rule's action
    2936
    3037#include <string>
     
    4956#define NUMERIC_RETURN(x)       rm_underscore(); RETURN_VAL( x ) // numeric constant
    5057#define KEYWORD_RETURN(x)       RETURN_CHAR( x )                        // keyword
    51 #define QKEYWORD_RETURN(x)      typedefTable.isKind( yytext ); RETURN_VAL(x); // quasi-keyword
     58#define QKEYWORD_RETURN(x)      RETURN_VAL(x);                          // quasi-keyword
    5259#define IDENTIFIER_RETURN()     RETURN_VAL( typedefTable.isKind( yytext ) )
    5360#define ATTRIBUTE_RETURN()      RETURN_VAL( ATTR_IDENTIFIER )
     
    232239finally                 { KEYWORD_RETURN(FINALLY); }                    // CFA
    233240float                   { KEYWORD_RETURN(FLOAT); }
     241_Float32                { KEYWORD_RETURN(FLOAT); }                              // GCC
     242_Float32x               { KEYWORD_RETURN(FLOAT); }                              // GCC
     243_Float64                { KEYWORD_RETURN(DOUBLE); }                             // GCC
     244_Float64x               { KEYWORD_RETURN(DOUBLE); }                             // GCC
    234245__float80               { KEYWORD_RETURN(FLOAT80); }                    // GCC
    235246float80                 { KEYWORD_RETURN(FLOAT80); }                    // GCC
     247_Float128               { KEYWORD_RETURN(FLOAT128); }                   // GCC
     248_Float128x              { KEYWORD_RETURN(FLOAT128); }                   // GCC
    236249__float128              { KEYWORD_RETURN(FLOAT128); }                   // GCC
    237250float128                { KEYWORD_RETURN(FLOAT128); }                   // GCC
     
    446459
    447460%%
     461
    448462// ----end of lexer----
    449463
    450464void yyerror( const char * errmsg ) {
     465        SemanticErrorThrow = true;
    451466        cout << (yyfilename ? yyfilename : "*unknown file*") << ':' << yylineno << ':' << column - yyleng + 1
    452467                 << ": " << ErrorHelpers::error_str() << errmsg << " at token \"" << (yytext[0] == '\0' ? "EOF" : yytext) << '"' << endl;
  • src/Parser/parser.yy

    r97397a26 rb21c77a  
    1010// Created On       : Sat Sep  1 20:22:55 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu May 24 18:11:59 2018
    13 // Update Count     : 3369
     12// Last Modified On : Fri Jun 22 13:59:11 2018
     13// Update Count     : 3586
    1414//
    1515
     
    136136} // build_postfix_name
    137137