Changes in / [b2da0574:2ae16219]


Ignore:
Files:
122 added
235 deleted
16 edited

Legend:

Unmodified
Added
Removed
  • doc/LaTeXmacros/lstlang.sty

    rb2da0574 r2ae16219  
    88%% Created On       : Sat May 13 16:34:42 2017
    99%% Last Modified By : Peter A. Buhr
    10 %% Last Modified On : Fri Mar 16 22:18:12 2018
    11 %% Update Count     : 16
     10%% Last Modified On : Fri Apr  6 23:44:50 2018
     11%% Update Count     : 20
    1212%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1313
     
    112112\lstdefinelanguage{CFA}[ANSI]{C}{
    113113        morekeywords={
    114                 _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, _At, __attribute,
    115                 __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__,
    116                 __const, __const__, disable, dtype, enable, __extension__, fallthrough, fallthru,
    117                 finally, forall, ftype, _Generic, _Imaginary, inline, __label__, lvalue, _Noreturn, one_t,
    118                 otype, restrict, _Static_assert, throw, throwResume, trait, try, ttype, typeof, __typeof,
    119                 __typeof__, virtual, with, zero_t},
    120         morekeywords=[2]{
    121                 _Atomic, coroutine, is_coroutine, is_monitor, is_thread, monitor, mutex, nomutex, or,
    122                 resume, suspend, thread, _Thread_local, waitfor, when, yield},
     114                _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__,
     115                auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
     116                coroutine, disable, dtype, enable, __extension__, exception, fallthrough, fallthru, finally,
     117                __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,
     118                inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
     119                otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,
     120                _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
     121                virtual, __volatile, __volatile__, waitfor, when, with, zero_t,
     122    },
    123123        moredirectives={defined,include_next}%
    124124}
  • doc/papers/concurrency/Makefile

    rb2da0574 r2ae16219  
    33Build = build
    44Figures = figures
    5 Macros = AMA/AMA-stix/ama
     5Macros = ../AMA/AMA-stix/ama
    66TeXLIB = .:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography:
    77LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
     
    7979
    8080WileyNJD-AMA.bst:
    81         ln -fs AMA/AMA-stix/ama/WileyNJD-AMA.bst .
     81        ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst .
    8282
    8383%.tex : %.fig
  • doc/papers/concurrency/Paper.tex

    rb2da0574 r2ae16219  
    1212
    1313% Latex packages used in the document.
     14
    1415\usepackage{epic,eepic}
    1516\usepackage{xspace}
     
    1718\usepackage{upquote}                                            % switch curled `'" to straight
    1819\usepackage{listings}                                           % format program code
    19 \usepackage[labelformat=simple]{subfig}
     20\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt]{subfig}
    2021\renewcommand{\thesubfigure}{(\alph{subfigure})}
    2122\usepackage{siunitx}
    2223\sisetup{ binary-units=true }
    23 %\input{style}                                                          % bespoke macros used in the document
    2424
    2525\hypersetup{breaklinks=true}
     
    3131\renewcommand{\linenumberfont}{\scriptsize\sffamily}
    3232
    33 \lefthyphenmin=4                                                        % hyphen only after 4 characters
    34 \righthyphenmin=4
     33\renewcommand{\textfraction}{0.0}       % the entire page maybe devoted to floats with no text on the page at all
     34
     35\lefthyphenmin=3                                                        % hyphen only after 4 characters
     36\righthyphenmin=3
    3537
    3638%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
     
    9597% Latin abbreviation
    9698\newcommand{\abbrevFont}{\textit}                       % set empty for no italics
     99\@ifundefined{eg}{
    97100\newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.}
    98101\newcommand*{\eg}{%
     
    100103                {\@ifnextchar{:}{\EG}%
    101104                        {\EG,\xspace}}%
    102 }%
     105}}{}%
     106\@ifundefined{ie}{
    103107\newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.}
    104108\newcommand*{\ie}{%
     
    106110                {\@ifnextchar{:}{\IE}%
    107111                        {\IE,\xspace}}%
    108 }%
     112}}{}%
     113\@ifundefined{etc}{
    109114\newcommand{\ETC}{\abbrevFont{etc}}
    110115\newcommand*{\etc}{%
    111116        \@ifnextchar{.}{\ETC}%
    112117        {\ETC.\xspace}%
    113 }%
     118}}{}%
     119\@ifundefined{etal}{
    114120\newcommand{\ETAL}{\abbrevFont{et}~\abbrevFont{al}}
    115 \renewcommand*{\etal}{%
     121\newcommand*{\etal}{%
    116122        \@ifnextchar{.}{\protect\ETAL}%
    117123                {\protect\ETAL.\xspace}%
    118 }%
     124}}{}%
     125\@ifundefined{viz}{
    119126\newcommand{\VIZ}{\abbrevFont{viz}}
    120127\newcommand*{\viz}{%
    121128        \@ifnextchar{.}{\VIZ}%
    122129                {\VIZ.\xspace}%
    123 }%
     130}}{}%
    124131\makeatother
    125132
     
    134141\lstdefinelanguage{CFA}[ANSI]{C}{
    135142        morekeywords={
    136                 _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, _At, __attribute,
    137                 __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__,
    138                 __const, __const__, disable, dtype, enable, exception, __extension__, fallthrough, fallthru,
    139                 finally, forall, ftype, _Generic, _Imaginary, inline, __label__, lvalue, _Noreturn, one_t,
    140                 otype, restrict, _Static_assert, throw, throwResume, trait, try, ttype, typeof, __typeof,
    141                 __typeof__, virtual, with, zero_t},
    142         morekeywords=[2]{
    143                 _Atomic, coroutine, is_coroutine, is_monitor, is_thread, monitor, mutex, nomutex, or,
    144                 resume, suspend, thread, _Thread_local, waitfor, when, yield},
     143                _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, __attribute, __attribute__,
     144                auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
     145                coroutine, disable, dtype, enable, __extension__, exception, fallthrough, fallthru, finally,
     146                __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,
     147                inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
     148                otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,
     149                _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
     150                virtual, __volatile, __volatile__, waitfor, when, with, zero_t},
    145151        moredirectives={defined,include_next}%
    146152}
     
    212218\authormark{Thierry Delisle \textsc{et al}}
    213219
    214 \address[1]{\orgdiv{David R. Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Ontario}, \country{Canada}}}
     220\address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Ontario}, \country{Canada}}}
    215221
    216222\corres{*Peter A. Buhr, \email{pabuhr{\char`\@}uwaterloo.ca}}
    217 \presentaddress{David R. Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, N2L 3G1, Canada}
     223\presentaddress{Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, N2L 3G1, Canada}
    218224
    219225
     
    229235}%
    230236
    231 \keywords{concurrency, runtime, coroutines, threads, C, Cforall}
     237\keywords{concurrency, parallelism, coroutines, threads, monitors, runtime, C, Cforall}
    232238
    233239
     
    243249% ======================================================================
    244250
    245 This paper provides a minimal concurrency \newterm{API} that is simple, efficient and can be used to build other concurrency features.
     251This paper provides a minimal concurrency \newterm{Abstract Program Interface} (API) that is simple, efficient and can be used to build other concurrency features.
    246252While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master.
    247253An easier approach for programmers is to support higher-level constructs as the basis of concurrency.
     
    249255Examples of high-level approaches are task based~\cite{TBB}, message passing~\cite{Erlang,MPI}, and implicit threading~\cite{OpenMP}.
    250256
    251 The terminology used in this paper is as follows.
     257This paper used the following terminology.
    252258A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state.
    253 Multiple simultaneous threads gives rise to \newterm{concurrency}, which requires locking to ensure safe access to shared data.
    254 % Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, etc.) concurrent threads are introduced.
     259Multiple simultaneous threads gives rise to \newterm{concurrency}, which requires locking to ensure safe communication and access to shared data.
     260% Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, \etc) concurrent threads are introduced.
    255261\newterm{Locking}, and by extension locks, are defined as a mechanism to prevent progress of threads to provide safety.
    256262\newterm{Parallelism} is running multiple threads simultaneously.
     
    259265
    260266Hence, there are two problems to be solved in the design of concurrency for a programming language: concurrency and parallelism.
    261 While these two concepts are often combined, they are in fact distinct, requiring different tools~\cite{Buhr05a}.
    262 Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost and resource utilization.
     267While these two concepts are often combined, they are in fact distinct, requiring different tools~\cite[\S~2]{Buhr05a}.
     268Concurrency tools handle synchronization and mutual exclusion, while parallelism tools handle performance, cost and resource utilization.
    263269
    264270The proposed concurrency API is implemented in a dialect of C, called \CFA.
     
    277283Like C, the basics of \CFA revolve around structures and routines, which are thin abstractions over machine code.
    278284The vast majority of the code produced by the \CFA translator respects memory layouts and calling conventions laid out by C.
    279 Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (e.g., {\tt this}), it does have some notion of objects\footnote{C defines the term objects as : ``region of data storage in the execution environment, the contents of which can represent
     285Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (\eg {\tt this}), it does have some notion of objects\footnote{C defines the term objects as : ``region of data storage in the execution environment, the contents of which can represent
    280286values''~\cite[3.15]{C11}}, most importantly construction and destruction of objects.
    281287Most of the following code examples can be found on the \CFA website~\cite{Cforall}.
     
    329335\subsection{Operators}
    330336Overloading also extends to operators.
    331 The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation appear, e.g.:
     337The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation appear, \eg:
    332338\begin{cfa}
    333339int ++? (int op);                       $\C{// unary prefix increment}$
     
    420426
    421427Note that the type use for assertions can be either an @otype@ or a @dtype@.
    422 Types declared as @otype@ refer to ``complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator.
     428Types declared as @otype@ refer to ``complete'' objects, \ie objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator.
    423429Using @dtype@, on the other hand, has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable.
    424430
     
    458464% ======================================================================
    459465% ======================================================================
    460 Before any detailed discussion of the concurrency and parallelism in \CFA, it is important to describe the basics of concurrency and how they are expressed in \CFA user code.
    461 
    462 \section{Basics of concurrency}
     466
    463467At its core, concurrency is based on having multiple call-stacks and scheduling among threads of execution executing on these stacks.
    464 Concurrency without parallelism only requires having multiple call stacks (or contexts) for a single thread of execution.
    465 
    466 Execution with a single thread and multiple stacks where the thread is self-scheduling deterministically across the stacks is called coroutining.
    467 Execution with a single and multiple stacks but where the thread is scheduled by an oracle (non-deterministic from the thread's perspective) across the stacks is called concurrency.
    468 
    469 Therefore, a minimal concurrency system can be achieved by creating coroutines (see Section \ref{coroutine}), which instead of context-switching among each other, always ask an oracle where to context-switch next.
     468Multiple call stacks (or contexts) and a single thread of execution does \emph{not} imply concurrency.
     469Execution with a single thread and multiple stacks where the thread is deterministically self-scheduling across the stacks is called \newterm{coroutining};
     470execution with a single thread and multiple stacks but where the thread is scheduled by an oracle (non-deterministic from the thread's perspective) across the stacks is called concurrency~\cite[\S~3]{Buhr05a}.
     471Therefore, a minimal concurrency system can be achieved using coroutines (see Section \ref{coroutine}), which instead of context-switching among each other, always defer to an oracle for where to context-switch next.
     472
    470473While coroutines can execute on the caller's stack-frame, stack-full coroutines allow full generality and are sufficient as the basis for concurrency.
    471474The aforementioned oracle is a scheduler and the whole system now follows a cooperative threading-model (a.k.a., non-preemptive scheduling).
     
    480483
    481484
    482 \section{\protect\CFA's Thread Building Blocks}
     485\subsection{\protect\CFA's Thread Building Blocks}
    483486
    484487One of the important features that are missing in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional.
     
    490493
    491494
    492 \section{Coroutines: A Stepping Stone}\label{coroutine}
    493 
    494 While the main focus of this proposal is concurrency and parallelism, it is important to address coroutines, which are actually a significant building block of a concurrency system. \textbf{Coroutine}s are generalized routines which have predefined points where execution is suspended and can be resumed at a later time.
    495 Therefore, they need to deal with context switches and other context-management operations.
    496 This proposal includes coroutines both as an intermediate step for the implementation of threads, and a first-class feature of \CFA.
    497 Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant.
    498 The core \textbf{api} of coroutines revolves around two features: independent call-stacks and @suspend@/@resume@.
     495\subsection{Coroutines: A Stepping Stone}\label{coroutine}
     496
     497While the focus of this proposal is concurrency and parallelism, it is important to address coroutines, which are a significant building block of a concurrency system.
     498\newterm{Coroutine}s are generalized routines with points where execution is suspended and resumed at a later time.
     499Suspend/resume is a context switche and coroutines have other context-management operations.
     500Many design challenges of threads are partially present in designing coroutines, which makes the design effort relevant.
     501The core \textbf{api} of coroutines has two features: independent call-stacks and @suspend@/@resume@.
     502
     503A coroutine handles the class of problems that need to retain state between calls (\eg plugin, device driver, finite-state machine).
     504For example, a problem made easier with coroutines is unbounded generators, \eg generating an infinite sequence of Fibonacci numbers:
     505\begin{displaymath}
     506f(n) = \left \{
     507\begin{array}{ll}
     5080                               & n = 0         \\
     5091                               & n = 1         \\
     510f(n-1) + f(n-2) & n \ge 2       \\
     511\end{array}
     512\right.
     513\end{displaymath}
     514Figure~\ref{f:C-fibonacci} shows conventional approaches for writing a Fibonacci generator in C.
     515
     516Figure~\ref{f:GlobalVariables} illustrates the following problems:
     517unencapsulated global variables necessary to retain state between calls;
     518only one fibonacci generator can run at a time;
     519execution state must be explicitly retained.
     520Figure~\ref{f:ExternalState} addresses these issues:
     521unencapsulated program global variables become encapsulated structure variables;
     522multiple fibonacci generators can run at a time by declaring multiple fibonacci objects;
     523explicit execution state is removed by precomputing the first two Fibonacci numbers and returning $f(n-2)$.
    499524
    500525\begin{figure}
    501 \begin{center}
    502 \begin{tabular}{@{}lll@{}}
    503 \multicolumn{1}{c}{\textbf{callback}} & \multicolumn{1}{c}{\textbf{output array}} & \multicolumn{1}{c}{\textbf{external state}} \\
    504 \begin{cfa}
    505 void fib_func(
    506         int n, void (* callback)( int )
    507 ) {
    508         int fn, f1 = 0, f2 = 1;
    509         for ( int i = 0; i < n; i++ ) {
    510                 callback( f1 );
    511                 fn = f1 + f2;
    512                 f1 = f2;  f2 = fn;
     526\centering
     527\newbox\myboxA
     528\begin{lrbox}{\myboxA}
     529\begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     530`int f1, f2, state = 1;`   // single global variables
     531int fib() {
     532        int fn;
     533        `switch ( state )` {  // explicit execution state
     534          case 1: fn = 0;  f1 = fn;  state = 2;  break;
     535          case 2: fn = 1;  f2 = f1;  f1 = fn;  state = 3;  break;
     536          case 3: fn = f1 + f2;  f2 = f1;  f1 = fn;  break;
    513537        }
     538        return fn;
    514539}
    515540int main() {
    516         void print_fib( int n ) {
    517                 printf( "%d\n", n );
     541
     542        for ( int i = 0; i < 10; i += 1 ) {
     543                printf( "%d\n", fib() );
    518544        }
    519         fib_func( 10, print_fib );
    520 }
    521 
    522 \end{cfa}
    523 &
    524 \begin{cfa}
    525 void fib_array(
    526         int n, int * array
    527 ) {
    528         int fn, f1 = 0, f2 = 1;
    529         for ( int i = 0; i < n; i++ ) {
    530                 array[i] = f1;
    531                 fn = f1 + f2;
    532                 f1 = f2;  f2 = fn;
     545}
     546\end{lstlisting}
     547\end{lrbox}
     548
     549\newbox\myboxB
     550\begin{lrbox}{\myboxB}
     551\begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     552#define FIB_INIT `{ 0, 1 }`
     553typedef struct { int f2, f1; } Fib;
     554int fib( Fib * f ) {
     555
     556        int ret = f->f2;
     557        int fn = f->f1 + f->f2;
     558        f->f2 = f->f1; f->f1 = fn;
     559
     560        return ret;
     561}
     562int main() {
     563        Fib f1 = FIB_INIT, f2 = FIB_INIT;
     564        for ( int i = 0; i < 10; i += 1 ) {
     565                printf( "%d %d\n", fib( &f1 ), fib( &f2 ) );
    533566        }
    534567}
     568\end{lstlisting}
     569\end{lrbox}
     570
     571\subfloat[3 States: global variables]{\label{f:GlobalVariables}\usebox\myboxA}
     572\qquad
     573\subfloat[1 State: external variables]{\label{f:ExternalState}\usebox\myboxB}
     574\caption{C Fibonacci Implementations}
     575\label{f:C-fibonacci}
     576
     577\bigskip
     578
     579\newbox\myboxA
     580\begin{lrbox}{\myboxA}
     581\begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     582`coroutine` Fib { int fn; };
     583void main( Fib & f ) with( f ) {
     584        int f1, f2;
     585        fn = 0;  f1 = fn;  `suspend()`;
     586        fn = 1;  f2 = f1;  f1 = fn;  `suspend()`;
     587        for ( ;; ) {
     588                fn = f1 + f2;  f2 = f1;  f1 = fn;  `suspend()`;
     589        }
     590}
     591int next( Fib & fib ) with( fib ) {
     592        `resume( fib );`
     593        return fn;
     594}
    535595int main() {
    536         int a[10];
    537         fib_array( 10, a );
    538         for ( int i = 0; i < 10; i++ ) {
    539                 printf( "%d\n", a[i] );
     596        Fib f1, f2;
     597        for ( int i = 1; i <= 10; i += 1 ) {
     598                sout | next( f1 ) | next( f2 ) | endl;
    540599        }
    541600}
    542 \end{cfa}
    543 &
    544 \begin{cfa}
    545 
    546 typedef struct { int f1, f2; } Fib;
    547 int fib_state(
    548         Fib * fib
    549 ) {
    550         int ret = fib->f1;
    551         int fn = fib->f1 + fib->f2;
    552         fib->f2 = fib->f1; fib->f1 = fn;
     601\end{lstlisting}
     602\end{lrbox}
     603\newbox\myboxB
     604\begin{lrbox}{\myboxB}
     605\begin{lstlisting}[aboveskip=0pt,belowskip=0pt]
     606`coroutine` Fib { int ret; };
     607void main( Fib & f ) with( f ) {
     608        int fn, f1 = 1, f2 = 0;
     609        for ( ;; ) {
     610                ret = f2;
     611
     612                fn = f1 + f2;  f2 = f1;  f1 = fn; `suspend();`
     613        }
     614}
     615int next( Fib & fib ) with( fib ) {
     616        `resume( fib );`
    553617        return ret;
    554618}
    555 int main() {
    556         Fib fib = { 0, 1 };
    557 
    558         for ( int i = 0; i < 10; i++ ) {
    559                 printf( "%d\n", fib_state( &fib ) );
    560         }
    561 }
    562 \end{cfa}
    563 \end{tabular}
    564 \end{center}
    565 \caption{Fibonacci Implementations in C}
    566 \label{lst:fib-c}
     619
     620
     621
     622
     623
     624
     625\end{lstlisting}
     626\end{lrbox}
     627\subfloat[3 States, internal variables]{\label{f:Coroutine3States}\usebox\myboxA}
     628\qquad
     629\subfloat[1 State, internal variables]{\label{f:Coroutine1State}\usebox\myboxB}
     630\caption{\CFA Coroutine Fibonacci Implementations}
     631\label{f:fibonacci-cfa}
    567632\end{figure}
    568633
    569 A good example of a problem made easier with coroutines is generators, e.g., generating the Fibonacci sequence.
    570 This problem comes with the challenge of decoupling how a sequence is generated and how it is used.
    571 Listing \ref{lst:fibonacci-c} shows conventional approaches to writing generators in C.
    572 All three of these approach suffer from strong coupling.
    573 The left and centre approaches require that the generator have knowledge of how the sequence is used, while the rightmost approach requires holding internal state between calls on behalf of the generator and makes it much harder to handle corner cases like the Fibonacci seed.
    574 
    575 Listing \ref{lst:fibonacci-cfa} is an example of a solution to the Fibonacci problem using \CFA coroutines, where the coroutine stack holds sufficient state for the next generation.
     634Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, which provides communication for multiple interface functions, and the \newterm{coroutine main}, which runs on the coroutine stack.
     635\begin{cfa}
     636`coroutine C { char c; int i; _Bool s; };`      $\C{// used for communication}$
     637void ?{}( C & c ) { s = false; }                        $\C{// constructor}$
     638void main( C & cor ) with( cor ) {                      $\C{// actual coroutine}$
     639        while ( ! s ) // process c
     640        if ( v == ... ) s = false;
     641}
     642// interface functions
     643char cont( C & cor, char ch ) { c = ch; resume( cor ); return c; }
     644_Bool stop( C & cor, int v ) { s = true; i = v; resume( cor ); return s; }
     645\end{cfa}
     646
     647encapsulates the Fibonacci state in the  shows is an example of a solution to the Fibonacci problem using \CFA coroutines, where the coroutine stack holds sufficient state for the next generation.
    576648This solution has the advantage of having very strong decoupling between how the sequence is generated and how it is used.
    577649Indeed, this version is as easy to use as the @fibonacci_state@ solution, while the implementation is very similar to the @fibonacci_func@ example.
    578650
     651Figure~\ref{f:fmt-line} shows the @Format@ coroutine for restructuring text into groups of character blocks of fixed size.
     652The example takes advantage of resuming coroutines in the constructor to simplify the code and highlights the idea that interesting control flow can occur in the constructor.
     653
    579654\begin{figure}
    580 \begin{cfa}
    581 coroutine Fibonacci { int fn; };                                $\C{// used for communication}$
    582 
    583 void ?{}( Fibonacci & fib ) with( fib ) { fn = 0; } $\C{// constructor}$
    584 
    585 void main( Fibonacci & fib ) with( fib ) {              $\C{// main called on first resume}$
    586         int fn1, fn2;                                                           $\C{// retained between resumes}$
    587         fn = 0;  fn1 = fn;                                                      $\C{// 1st case}$
    588         suspend();                                                                      $\C{// restart last resume}$
    589         fn = 1;  fn2 = fn1;  fn1 = fn;                          $\C{// 2nd case}$
    590         suspend();                                                                      $\C{// restart last resume}$
    591         for ( ;; ) {
    592                 fn = fn1 + fn2; fn2 = fn1;  fn1 = fn;   $\C{// general case}$
    593                 suspend();                                                              $\C{// restart last resume}$
     655\centering
     656\begin{cfa}
     657`coroutine` Format {
     658        char ch;                                                                $\C{// used for communication}$
     659        int g, b;                                                               $\C{// global because used in destructor}$
     660};
     661void ?{}( Format & fmt ) { `resume( fmt );` } $\C{// prime (start) coroutine}$
     662void ^?{}( Format & fmt ) with( fmt ) { if ( g != 0 || b != 0 ) sout | endl; }
     663void main( Format & fmt ) with( fmt ) {
     664        for ( ;; ) {                                                    $\C{// for as many characters}$
     665                for ( g = 0; g < 5; g += 1 ) {          $\C{// groups of 5 blocks}$
     666                        for ( b = 0; b < 4; b += 1 ) {  $\C{// blocks of 4 characters}$
     667                                `suspend();`
     668                                sout | ch;                                      $\C{// print character}$
     669                        }
     670                        sout | "  ";                                    $\C{// print block separator}$
     671                }
     672                sout | endl;                                            $\C{// print group separator}$
    594673        }
    595674}
    596 int next( Fibonacci & fib ) with( fib ) {
    597         resume( fib );                                                          $\C{// restart last suspend}$
    598         return fn;
    599 }
    600 int main() {
    601         Fibonacci f1, f2;
    602         for ( int i = 1; i <= 10; i++ ) {
    603                 sout | next( f1 ) | next( f2 ) | endl;
    604         }
    605 }
    606 \end{cfa}
    607 \caption{Coroutine Fibonacci }
    608 \label{lst:fibonacci-cfa}
    609 \end{figure}
    610 
    611 Listing \ref{lst:fmt-line} shows the @Format@ coroutine for restructuring text into groups of character blocks of fixed size.
    612 The example takes advantage of resuming coroutines in the constructor to simplify the code and highlights the idea that interesting control flow can occur in the constructor.
    613 
    614 \begin{figure}
    615 \begin{cfa}[tabsize=3,caption={Formatting text into lines of 5 blocks of 4 characters.},label={lst:fmt-line}]
    616 // format characters into blocks of 4 and groups of 5 blocks per line
    617 coroutine Format {
    618         char ch;                                                                        // used for communication
    619         int g, b;                                                               // global because used in destructor
    620 };
    621 
    622 void  ?{}(Format& fmt) {
    623         resume( fmt );                                                  // prime (start) coroutine
    624 }
    625 
    626 void ^?{}(Format& fmt) with fmt {
    627         if ( fmt.g != 0 || fmt.b != 0 )
    628         sout | endl;
    629 }
    630 
    631 void main(Format& fmt) with fmt {
    632         for ( ;; ) {                                                    // for as many characters
    633                 for(g = 0; g < 5; g++) {                // groups of 5 blocks
    634                         for(b = 0; b < 4; fb++) {       // blocks of 4 characters
    635                                 suspend();
    636                                 sout | ch;                                      // print character
    637                         }
    638                         sout | "  ";                                    // print block separator
    639                 }
    640                 sout | endl;                                            // print group separator
    641         }
    642 }
    643 
    644 void prt(Format & fmt, char ch) {
     675void prt( Format & fmt, char ch ) {
    645676        fmt.ch = ch;
    646         resume(fmt);
    647 }
    648 
     677        `resume( fmt );`
     678}
    649679int main() {
    650680        Format fmt;
    651681        char ch;
    652         Eof: for ( ;; ) {                                               // read until end of file
    653                 sin | ch;                                                       // read one character
    654                 if(eof(sin)) break Eof;                 // eof ?
    655                 prt(fmt, ch);                                           // push character for formatting
     682        for ( ;; ) {                                                    $\C{// read until end of file}$
     683                sin | ch;                                                       $\C{// read one character}$
     684          if ( eof( sin ) ) break;                              $\C{// eof ?}$
     685                prt( fmt, ch );                                         $\C{// push character for formatting}$
    656686        }
    657687}
    658688\end{cfa}
     689\caption{Formatting text into lines of 5 blocks of 4 characters.}
     690\label{f:fmt-line}
    659691\end{figure}
    660692
    661 \subsection{Construction}
     693\begin{figure}
     694\centering
     695\lstset{language=CFA,escapechar={},moredelim=**[is][\protect\color{red}]{`}{`}}
     696\begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{}}
     697\begin{cfa}
     698`coroutine` Prod {
     699        Cons & c;
     700        int N, money, receipt;
     701};
     702void main( Prod & prod ) with( prod ) {
     703        // 1st resume starts here
     704        for ( int i = 0; i < N; i += 1 ) {
     705                int p1 = random( 100 ), p2 = random( 100 );
     706                sout | p1 | " " | p2 | endl;
     707                int status = delivery( c, p1, p2 );
     708                sout | " $" | money | endl | status | endl;
     709                receipt += 1;
     710        }
     711        stop( c );
     712        sout | "prod stops" | endl;
     713}
     714int payment( Prod & prod, int money ) {
     715        prod.money = money;
     716        `resume( prod );`
     717        return prod.receipt;
     718}
     719void start( Prod & prod, int N, Cons &c ) {
     720        &prod.c = &c;
     721        prod.[N, receipt] = [N, 0];
     722        `resume( prod );`
     723}
     724int main() {
     725        Prod prod;
     726        Cons cons = { prod };
     727        srandom( getpid() );
     728        start( prod, 5, cons );
     729}
     730\end{cfa}
     731&
     732\begin{cfa}
     733`coroutine` Cons {
     734        Prod & p;
     735        int p1, p2, status;
     736        _Bool done;
     737};
     738void ?{}( Cons & cons, Prod & p ) {
     739        &cons.p = &p;
     740        cons.[status, done ] = [0, false];
     741}
     742void ^?{}( Cons & cons ) {}
     743void main( Cons & cons ) with( cons ) {
     744        // 1st resume starts here
     745        int money = 1, receipt;
     746        for ( ; ! done; ) {
     747                sout | p1 | " " | p2 | endl | " $" | money | endl;
     748                status += 1;
     749                receipt = payment( p, money );
     750                sout | " #" | receipt | endl;
     751                money += 1;
     752        }
     753        sout | "cons stops" | endl;
     754}
     755int delivery( Cons & cons, int p1, int p2 ) {
     756        cons.[p1, p2] = [p1, p2];
     757        `resume( cons );`
     758        return cons.status;
     759}
     760void stop( Cons & cons ) {
     761        cons.done = true;
     762        `resume( cons );`
     763}
     764
     765\end{cfa}
     766\end{tabular}
     767\caption{Producer / consumer: resume-resume cycle, bi-directional communication}
     768\label{f:ProdCons}
     769\end{figure}
     770
     771
     772\subsubsection{Construction}
     773
    662774One important design challenge for implementing coroutines and threads (shown in section \ref{threads}) is that the runtime system needs to run code after the user-constructor runs to connect the fully constructed object into the system.
    663775In the case of coroutines, this challenge is simpler since there is no non-determinism from preemption or scheduling.
     
    702814}
    703815\end{cfa}
    704 The problem in this example is a storage management issue, the function pointer @_thunk0@ is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; i.e., the stack-based thunk being destroyed before it can be used.
     816The problem in this example is a storage management issue, the function pointer @_thunk0@ is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; \ie the stack-based thunk being destroyed before it can be used.
    705817This challenge is an extension of challenges that come with second-class routines.
    706818Indeed, GCC nested routines also have the limitation that nested routine cannot be passed outside of the declaration scope.
    707819The case of coroutines and threads is simply an extension of this problem to multiple call stacks.
    708820
    709 \subsection{Alternative: Composition}
     821
     822\subsubsection{Alternative: Composition}
     823
    710824One solution to this challenge is to use composition/containment, where coroutine fields are added to manage the coroutine.
    711825
     
    731845This opens the door for user errors and requires extra runtime storage to pass at runtime information that can be known statically.
    732846
    733 \subsection{Alternative: Reserved keyword}
     847
     848\subsubsection{Alternative: Reserved keyword}
     849
    734850The next alternative is to use language support to annotate coroutines as follows:
    735 
    736851\begin{cfa}
    737852coroutine Fibonacci {
     
    746861The reserved keywords are only present to improve ease of use for the common cases.
    747862
    748 \subsection{Alternative: Lambda Objects}
     863
     864\subsubsection{Alternative: Lambda Objects}
    749865
    750866For coroutines as for threads, many implementations are based on routine pointers or function objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.
     
    776892As discussed in section \ref{threads}, this approach is superseded by static approaches in terms of expressivity.
    777893
    778 \subsection{Alternative: Trait-Based Coroutines}
     894
     895\subsubsection{Alternative: Trait-Based Coroutines}
    779896
    780897Finally, the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines.
     
    821938The combination of these two approaches allows users new to coroutining and concurrency to have an easy and concise specification, while more advanced users have tighter control on memory layout and initialization.
    822939
    823 \section{Thread Interface}\label{threads}
     940\subsection{Thread Interface}\label{threads}
    824941The basic building blocks of multithreading in \CFA are \textbf{cfathread}.
    825942Both user and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism.
     
    9291046\end{cfa}
    9301047
    931 However, one of the drawbacks of this approach is that threads always form a tree where nodes must always outlive their children, i.e., they are always destroyed in the opposite order of construction because of C scoping rules.
     1048However, one of the drawbacks of this approach is that threads always form a tree where nodes must always outlive their children, \ie they are always destroyed in the opposite order of construction because of C scoping rules.
    9321049This restriction is relaxed by using dynamic allocation, so threads can outlive the scope in which they are created, much like dynamically allocating memory lets objects outlive the scope in which they are created.
    9331050
     
    9701087Since many of these challenges appear with the use of mutable shared state, some languages and libraries simply disallow mutable shared state (Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka (Scala)~\cite{Akka}).
    9711088In these paradigms, interaction among concurrent objects relies on message passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (channels~\cite{CSP,Go} for example).
    972 However, in languages that use routine calls as their core abstraction mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (i.e., message passing versus routine calls).
     1089However, in languages that use routine calls as their core abstraction mechanism, these approaches force a clear distinction between concurrent and non-concurrent paradigms (\ie message passing versus routine calls).
    9731090This distinction in turn means that, in order to be effective, programmers need to learn two sets of design patterns.
    9741091While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account.
     
    9841101One of the most natural, elegant, and efficient mechanisms for synchronization and communication, especially for shared-memory systems, is the \emph{monitor}.
    9851102Monitors were first proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}.
    986 Many programming languages---e.g., Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs.
     1103Many programming languages---\eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}---provide monitors as explicit language constructs.
    9871104In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as semaphores or locks to simulate monitors.
    9881105For these reasons, this project proposes monitors as the core concurrency construct.
    9891106
    990 \section{Basics}
     1107
     1108\subsection{Basics}
     1109
    9911110Non-determinism requires concurrent systems to offer support for mutual-exclusion and synchronization.
    9921111Mutual-exclusion is the concept that only a fixed number of threads can access a critical section at any given time, where a critical section is a group of instructions on an associated portion of data that requires the restricted access.
    9931112On the other hand, synchronization enforces relative ordering of execution and synchronization tools provide numerous mechanisms to establish timing relationships among threads.
    9941113
    995 \subsection{Mutual-Exclusion}
     1114
     1115\subsubsection{Mutual-Exclusion}
     1116
    9961117As mentioned above, mutual-exclusion is the guarantee that only a fix number of threads can enter a critical section at once.
    9971118However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use.
    9981119Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to  higher-level concurrency techniques, which sacrifice some performance in order to improve ease of use.
    999 Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section.
    1000 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (e.g., reading/writing large types atomically).
     1120Ease of use comes by either guaranteeing some problems cannot occur (\eg being deadlock free) or by offering a more explicit coupling between data and corresponding critical section.
     1121For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (\eg reading/writing large types atomically).
    10011122Another challenge with low-level locks is composability.
    10021123Locks have restricted composability because it takes careful organizing for multiple locks to be used while preventing deadlocks.
    10031124Easing composability is another feature higher-level mutual-exclusion mechanisms often offer.
    10041125
    1005 \subsection{Synchronization}
     1126
     1127\subsubsection{Synchronization}
     1128
    10061129As with mutual-exclusion, low-level synchronization primitives often offer good performance and good flexibility at the cost of ease of use.
    1007 Again, higher-level mechanisms often simplify usage by adding either better coupling between synchronization and data (e.g., message passing) or offering a simpler solution to otherwise involved challenges.
     1130Again, higher-level mechanisms often simplify usage by adding either better coupling between synchronization and data (\eg message passing) or offering a simpler solution to otherwise involved challenges.
    10081131As mentioned above, synchronization can be expressed as guaranteeing that event \textit{X} always happens before \textit{Y}.
    10091132Most of the time, synchronization happens within a critical section, where threads must acquire mutual-exclusion in a certain order.
     
    10161139Algorithms that use flag variables to detect barging threads are said to be using barging avoidance, while algorithms that baton-pass locks~\cite{Andrews89} between threads instead of releasing the locks are said to be using barging prevention.
    10171140
     1141
    10181142% ======================================================================
    10191143% ======================================================================
     
    10491173Another aspect to consider is when a monitor acquires its mutual exclusion.
    10501174For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry.
    1051 Passthrough can occur for generic helper routines (@swap@, @sort@, etc.) or specific helper routines like the following to implement an atomic counter:
     1175Passthrough can occur for generic helper routines (@swap@, @sort@, \etc) or specific helper routines like the following to implement an atomic counter:
    10521176
    10531177\begin{cfa}
     
    12071331
    12081332The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the @mutex@ statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}.
    1209 Table \ref{lst:mutex-stmt} shows an example of the @mutex@ statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired.
     1333Table \ref{f:mutex-stmt} shows an example of the @mutex@ statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired.
    12101334Beyond naming, the @mutex@ statement has no semantic difference from a routine call with @mutex@ parameters.
    12111335
     
    12371361\end{center}
    12381362\caption{Regular call semantics vs. \protect\lstinline|mutex| statement}
    1239 \label{lst:mutex-stmt}
     1363\label{f:mutex-stmt}
    12401364\end{table}
    12411365
     
    12861410In addition to mutual exclusion, the monitors at the core of \CFA's concurrency can also be used to achieve synchronization.
    12871411With monitors, this capability is generally achieved with internal or external scheduling as in~\cite{Hoare74}.
    1288 With \textbf{scheduling} loosely defined as deciding which thread acquires the critical section next, \textbf{internal scheduling} means making the decision from inside the critical section (i.e., with access to the shared state), while \textbf{external scheduling} means making the decision when entering the critical section (i.e., without access to the shared state).
     1412With \textbf{scheduling} loosely defined as deciding which thread acquires the critical section next, \textbf{internal scheduling} means making the decision from inside the critical section (\ie with access to the shared state), while \textbf{external scheduling} means making the decision when entering the critical section (\ie without access to the shared state).
    12891413Since internal scheduling within a single monitor is mostly a solved problem, this paper concentrates on extending internal scheduling to multiple monitors.
    12901414Indeed, like the \textbf{bulk-acq} semantics, internal scheduling extends to multiple monitors in a way that is natural to the user but requires additional complexity on the implementation side.
     
    13131437There are two details to note here.
    13141438First, @signal@ is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section.
    1315 This semantics is needed to respect mutual-exclusion, i.e., the signaller and signalled thread cannot be in the monitor simultaneously.
     1439This semantics is needed to respect mutual-exclusion, \ie the signaller and signalled thread cannot be in the monitor simultaneously.
    13161440The alternative is to return immediately after the call to @signal@, which is significantly more restrictive.
    13171441Second, in \CFA, while it is common to store a @condition@ as a field of the monitor, a @condition@ variable can be stored/created independently of a monitor.
     
    14311555
    14321556A larger example is presented to show complex issues for \textbf{bulk-acq} and its implementation options are analyzed.
    1433 Listing \ref{lst:int-bulk-cfa} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the cfa-code in listing \ref{lst:int-bulk-cfa}.
    1434 For the purpose of translating the given cfa-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., @mutex@ parameters, global variables, pointer parameters, or using locals with the @mutex@ statement.
     1557Figure~\ref{f:int-bulk-cfa} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{f:int-bulk-cfa} shows the corresponding \CFA code to implement the cfa-code in listing \ref{f:int-bulk-cfa}.
     1558For the purpose of translating the given cfa-code into \CFA-code, any method of introducing a monitor is acceptable, \eg @mutex@ parameters, global variables, pointer parameters, or using locals with the @mutex@ statement.
    14351559
    14361560\begin{figure}
     
    14621586\end{cfa}
    14631587\end{multicols}
    1464 \begin{cfa}[caption={Internal scheduling with \textbf{bulk-acq}},label={lst:int-bulk-cfa}]
     1588\begin{cfa}[caption={Internal scheduling with \textbf{bulk-acq}},label={f:int-bulk-cfa}]
    14651589\end{cfa}
    14661590\begin{center}
     
    14981622\end{cfa}
    14991623\end{multicols}
    1500 \begin{cfa}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-cfa}},label={lst:int-bulk-cfa}]
     1624\begin{cfa}[caption={Equivalent \CFA code for listing \ref{f:int-bulk-cfa}},label={f:int-bulk-cfa}]
    15011625\end{cfa}
    15021626\begin{multicols}{2}
     
    15231647\end{cfa}
    15241648\end{multicols}
    1525 \begin{cfa}[caption={Listing \ref{lst:int-bulk-cfa}, with delayed signalling comments},label={lst:int-secret}]
     1649\begin{cfa}[caption={Figure~\ref{f:int-bulk-cfa}, with delayed signalling comments},label={f:int-secret}]
    15261650\end{cfa}
    15271651\end{figure}
    15281652
    1529 The complexity begins at code sections 4 and 8 in listing \ref{lst:int-bulk-cfa}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors.
     1653The complexity begins at code sections 4 and 8 in listing \ref{f:int-bulk-cfa}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors.
    15301654The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous cfa-code.
    1531 When the signaller thread reaches the location where it should ``release @A & B@'' (listing \ref{lst:int-bulk-cfa} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor @B@ to the waiting thread.
     1655When the signaller thread reaches the location where it should ``release @A & B@'' (listing \ref{f:int-bulk-cfa} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor @B@ to the waiting thread.
    15321656This ownership transfer is required in order to prevent barging into @B@ by another thread, since both the signalling and signalled threads still need monitor @A@.
    15331657There are three options:
     
    15381662This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from multiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups.
    15391663This solution releases the monitors once every monitor in a group can be released.
    1540 However, since some monitors are never released (e.g., the monitor of a thread), this interpretation means a group might never be released.
     1664However, since some monitors are never released (\eg the monitor of a thread), this interpretation means a group might never be released.
    15411665A more interesting interpretation is to transfer the group until all its monitors are released, which means the group is not passed further and a thread can retain its locks.
    15421666
    1543 However, listing \ref{lst:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors.
    1544 Listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable.
     1667However, listing \ref{f:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors.
     1668Figure~\ref{f:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable.
    15451669Because the third thread is signalled when secretly holding @B@, the goal  becomes unreachable.
    1546 Depending on the order of signals (listing \ref{lst:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen:
     1670Depending on the order of signals (listing \ref{f:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen:
    15471671
    15481672\paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor @A@ needs to be passed to thread $\beta$ when thread $\alpha$ is done with it.
     
    15511675
    15521676Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order.
    1553 However, regardless of the answer, users can move line \ref{line:signal-a} before line \ref{line:signal-ab} and get the reverse effect for listing \ref{lst:dependency}.
     1677However, regardless of the answer, users can move line \ref{line:signal-a} before line \ref{line:signal-ab} and get the reverse effect for listing \ref{f:dependency}.
    15541678
    15551679In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means knowing when to release a group becomes complex and inefficient (see next section) and therefore effectively precludes this approach.
     
    15861710\end{cfa}
    15871711\end{multicols}
    1588 \begin{cfa}[caption={Pseudo-code for the three thread example.},label={lst:dependency}]
     1712\begin{cfa}[caption={Pseudo-code for the three thread example.},label={f:dependency}]
    15891713\end{cfa}
    15901714\begin{center}
    15911715\input{dependency}
    15921716\end{center}
    1593 \caption{Dependency graph of the statements in listing \ref{lst:dependency}}
     1717\caption{Dependency graph of the statements in listing \ref{f:dependency}}
    15941718\label{fig:dependency}
    15951719\end{figure}
    15961720
    1597 In listing \ref{lst:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion.
     1721In listing \ref{f:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion.
    15981722If ownership of both monitors is transferred to the waiter when the signaller releases @A & B@ and then the waiter transfers back ownership of @A@ back to the signaller when it releases it, then the problem is solved (@B@ is no longer in use at this point).
    15991723Dynamically finding the correct order is therefore the second possible solution.
    16001724The problem is effectively resolving a dependency graph of ownership requirements.
    16011725Here even the simplest of code snippets requires two transfers and has a super-linear complexity.
    1602 This complexity can be seen in listing \ref{lst:explosion}, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions.
     1726This complexity can be seen in listing \ref{f:explosion}, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions.
    16031727Furthermore, the presence of multiple solutions for ownership transfer can cause deadlock problems if a specific solution is not consistently picked; In the same way that multiple lock acquiring order can cause deadlocks.
    16041728\begin{figure}
     
    16261750\end{cfa}
    16271751\end{multicols}
    1628 \begin{cfa}[caption={Extension to three monitors of listing \ref{lst:int-bulk-cfa}},label={lst:explosion}]
     1752\begin{cfa}[caption={Extension to three monitors of listing \ref{f:int-bulk-cfa}},label={f:explosion}]
    16291753\end{cfa}
    16301754\end{figure}
    16311755
    1632 Given the three threads example in listing \ref{lst:dependency}, figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (e.g., $\alpha1$ must happen before $\alpha2$).
     1756Given the three threads example in listing \ref{f:dependency}, figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (\eg $\alpha1$ must happen before $\alpha2$).
    16331757The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependencies unfold.
    16341758Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one.
     
    16361760\subsubsection{Partial Signalling} \label{partial-sig}
    16371761Finally, the solution that is chosen for \CFA is to use partial signalling.
    1638 Again using listing \ref{lst:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@.
     1762Again using listing \ref{f:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@.
    16391763Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread.
    16401764This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met.
     
    16421766Furthermore, after being fully implemented, this solution does not appear to have any significant downsides.
    16431767
    1644 Using partial signalling, listing \ref{lst:dependency} can be solved easily:
     1768Using partial signalling, listing \ref{f:dependency} can be solved easily:
    16451769\begin{itemize}
    16461770        \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@.
     
    18071931This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency.
    18081932Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring.
    1809 External scheduling can generally be done either in terms of control flow (e.g., Ada with @accept@, \uC with @_Accept@) or in terms of data (e.g., Go with channels).
     1933External scheduling can generally be done either in terms of control flow (\eg Ada with @accept@, \uC with @_Accept@) or in terms of data (\eg Go with channels).
    18101934Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics.
    18111935Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines.
     
    18731997
    18741998There are other alternatives to these pictures, but in the case of the left picture, implementing a fast accept check is relatively easy.
    1875 Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (e.g., 128) of mutex members.
     1999Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (\eg 128) of mutex members.
    18762000This approach requires a unique dense ordering of routines with an upper-bound and that ordering must be consistent across translation units.
    18772001For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritance.
     
    18832007Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions.
    18842008Storing an array of accepted function pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search.
    1885 Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued.
     2009Furthermore, supporting nested external scheduling (\eg listing \ref{f:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued.
    18862010
    18872011\begin{figure}
    1888 \begin{cfa}[caption={Example of nested external scheduling},label={lst:nest-ext}]
     2012\begin{cfa}[caption={Example of nested external scheduling},label={f:nest-ext}]
    18892013monitor M {};
    18902014void foo( M & mutex a ) {}
     
    19912115While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the @waitfor@ statement.
    19922116It checks that the set of monitors passed in matches the requirements for a function call.
    1993 Listing \ref{lst:waitfor} shows various usages of the waitfor statement and which are acceptable.
     2117Figure~\ref{f:waitfor} shows various usages of the waitfor statement and which are acceptable.
    19942118The choice of the function type is made ignoring any non-@mutex@ parameter.
    19952119One limitation of the current implementation is that it does not handle overloading, but overloading is possible.
    19962120\begin{figure}
    1997 \begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}]
     2121\begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={f:waitfor}]
    19982122monitor A{};
    19992123monitor B{};
     
    20322156A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues.
    20332157Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state.
    2034 Listing \ref{lst:waitfor2} demonstrates several complex masks and some incorrect ones.
     2158Figure~\ref{f:waitfor2} demonstrates several complex masks and some incorrect ones.
    20352159
    20362160\begin{figure}
     
    20822206\end{cfa}
    20832207\caption{Correct and incorrect uses of the or, else, and timeout clause around a waitfor statement}
    2084 \label{lst:waitfor2}
     2208\label{f:waitfor2}
    20852209\end{figure}
    20862210
     
    20962220However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current @mutex@ routine, similarly to how a condition is signalled.
    20972221\begin{figure}
    2098 \begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}]
     2222\begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={f:dtor-order}]
    20992223monitor Executer {};
    21002224struct  Action;
     
    21122236\end{cfa}
    21132237\end{figure}
    2114 For example, listing \ref{lst:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop.
     2238For example, listing \ref{f:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop.
    21152239Switching the semantic meaning introduces an idiomatic way to terminate a task and/or wait for its termination via destruction.
    21162240
     
    21282252In this decade, it is no longer reasonable to create a high-performance application without caring about parallelism.
    21292253Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization.
    2130 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, etc.
     2254The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, \etc.
    21312255However, since these have significant costs and limitations, \textbf{kthread} are now mostly used as an implementation tool rather than a user oriented one.
    21322256There are several alternatives to solve these issues that all have strengths and weaknesses.
     
    21662290While the choice between the three paradigms listed above may have significant performance implications, it is difficult to pin down the performance implications of choosing a model at the language level.
    21672291Indeed, in many situations one of these paradigms may show better performance but it all strongly depends on the workload.
    2168 Having a large amount of mostly independent units of work to execute almost guarantees equivalent performance across paradigms and that the \textbf{pool}-based system has the best efficiency thanks to the lower memory overhead (i.e., no thread stack per job).
     2292Having a large amount of mostly independent units of work to execute almost guarantees equivalent performance across paradigms and that the \textbf{pool}-based system has the best efficiency thanks to the lower memory overhead (\ie no thread stack per job).
    21692293However, interactions among jobs can easily exacerbate contention.
    21702294User-level threads allow fine-grain context switching, which results in better resource utilization, but a context switch is more expensive and the extra control means users need to tweak more variables to get the desired performance.
     
    22182342
    22192343The first step towards the monitor implementation is simple @mutex@ routines.
    2220 In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{lst:entry1}.
     2344In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{f:entry1}.
    22212345The entry/exit procedures do not have to be extended to support multiple monitors.
    22222346Indeed it is sufficient to enter/leave monitors one-by-one as long as the order is correct to prevent deadlock~\cite{Havender68}.
     
    22462370\end{cfa}
    22472371\end{multicols}
    2248 \begin{cfa}[caption={Initial entry and exit routine for monitors},label={lst:entry1}]
     2372\begin{cfa}[caption={Initial entry and exit routine for monitors},label={f:entry1}]
    22492373\end{cfa}
    22502374\end{figure}
     
    22562380First of all, interaction between @otype@ polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying.
    22572381Therefore, the main question is how to support @dtype@ polymorphism.
    2258 It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, i.e., acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call.
     2382It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, \ie acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call.
    22592383For example:
    22602384\begin{table}
     
    23132437\end{table}
    23142438
    2315 Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.:
     2439Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, \eg:
    23162440\begin{cfa}
    23172441// Incorrect: T may not be monitor
     
    23262450Both entry point and \textbf{callsite-locking} are feasible implementations.
    23272451The current \CFA implementation uses entry-point locking because it requires less work when using \textbf{raii}, effectively transferring the burden of implementation to object construction/destruction.
    2328 It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, i.e., the function body.
     2452It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, \ie the function body.
    23292453For example, the monitor call can appear in the middle of an expression.
    23302454Furthermore, entry-point locking requires less code generation since any useful routine is called multiple times but there is only one entry point for many call sites.
     
    23592483Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible.
    23602484Normally, coroutines also create their own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the \textbf{kthread} stack, effectively stealing the processor stack.
    2361 The exception to this rule is the Main Processor, i.e., the initial \textbf{kthread} that is given to any program.
     2485The exception to this rule is the Main Processor, \ie the initial \textbf{kthread} that is given to any program.
    23622486In order to respect C user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large.
    23632487
     
    23902514When the preemption system receives a change in preemption, it inserts the time in a sorted order and sets a kernel timer for the closest one, effectively stepping through preemption events on each signal sent by the timer.
    23912515These timers use the Linux signal {\tt SIGALRM}, which is delivered to the process rather than the kernel-thread.
    2392 This results in an implementation problem, because when delivering signals to a process, the kernel can deliver the signal to any kernel thread for which the signal is not blocked, i.e.:
     2516This results in an implementation problem, because when delivering signals to a process, the kernel can deliver the signal to any kernel thread for which the signal is not blocked, \ie:
    23932517\begin{quote}
    23942518A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked.
     
    24062530However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks.
    24072531For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption.
    2408 One final detail about the alarm thread is how to wake it when additional communication is required (e.g., on thread termination).
     2532One final detail about the alarm thread is how to wake it when additional communication is required (\eg on thread termination).
    24092533This unblocking is also done using {\tt SIGALRM}, but sent through the @pthread_sigqueue@.
    24102534Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel.
     
    24452569\end{figure}
    24462570
    2447 This picture and the proper entry and leave algorithms (see listing \ref{lst:entry2}) is the fundamental implementation of internal scheduling.
     2571This picture and the proper entry and leave algorithms (see listing \ref{f:entry2}) is the fundamental implementation of internal scheduling.
    24482572Note that when a thread is moved from the condition to the AS-stack, it is conceptually split into N pieces, where N is the number of monitors specified in the parameter list.
    24492573The thread is woken up when all the pieces have popped from the AS-stacks and made active.
     
    24782602\end{cfa}
    24792603\end{multicols}
    2480 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}]
     2604\begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={f:entry2}]
    24812605\end{cfa}
    24822606\end{figure}
    24832607
    2484 The solution discussed in \ref{intsched} can be seen in the exit routine of listing \ref{lst:entry2}.
     2608The solution discussed in \ref{intsched} can be seen in the exit routine of listing \ref{f:entry2}.
    24852609Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership.
    24862610This solution is deadlock safe as well as preventing any potential barging.
     
    24982622The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive ``next'' pointers for linking onto monitors.
    24992623The @condition node@ is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each @condition criterion@ is moved to the AS-stack.
    2500 Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{lst:entry2}.
     2624Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{f:entry2}.
    25012625
    25022626% ======================================================================
     
    25062630% ======================================================================
    25072631Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}.
    2508 For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (i.e., no signal statement uses multiple conditions).
     2632For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (\ie no signal statement uses multiple conditions).
    25092633However, in the case of external scheduling, there is no equivalent object which is associated with @waitfor@ statements.
    25102634This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired.
     
    25332657Note that if a thread has acquired two monitors but executes a @waitfor@ with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless.
    25342658This becomes relevant when @when@ clauses affect the number of monitors passed to a @waitfor@ statement.
    2535         \item The entry/exit routines need to be updated as shown in listing \ref{lst:entry3}.
     2659        \item The entry/exit routines need to be updated as shown in listing \ref{f:entry3}.
    25362660\end{itemize}
    25372661
     
    25412665Indeed, when waiting for the destructors, storage is needed for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for.
    25422666For regular @waitfor@ statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later.
    2543 The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor}
     2667The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{f:entry-dtor}
    25442668
    25452669\begin{figure}
     
    25752699\end{cfa}
    25762700\end{multicols}
    2577 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}]
     2701\begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={f:entry3}]
    25782702\end{cfa}
    25792703\end{figure}
     
    26212745\end{cfa}
    26222746\end{multicols}
    2623 \begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={lst:entry-dtor}]
     2747\begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={f:entry-dtor}]
    26242748\end{cfa}
    26252749\end{figure}
     
    26372761For example, here is a very simple two thread pipeline that could be used for a simulator of a game engine:
    26382762\begin{figure}
    2639 \begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={lst:engine-v1}]
     2763\begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={f:engine-v1}]
    26402764// Visualization declaration
    26412765thread Renderer {} renderer;
     
    26692793Luckily, the monitor semantics can also be used to clearly enforce a shutdown order in a concise manner:
    26702794\begin{figure}
    2671 \begin{cfa}[caption={Same toy simulator with proper termination condition.},label={lst:engine-v2}]
     2795\begin{cfa}[caption={Same toy simulator with proper termination condition.},label={f:engine-v2}]
    26722796// Visualization declaration
    26732797thread Renderer {} renderer;
     
    27182842}
    27192843\end{cfa}
    2720 This function is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, i.e., no preemption.
    2721 However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{lst:fiber-uthread}
     2844This function is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, \ie no preemption.
     2845However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{f:fiber-uthread}
    27222846\begin{figure}
    27232847\lstset{language=CFA,deletedelim=**[is][]{`}{`}}
    2724 \begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={lst:fiber-uthread}]
     2848\begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={f:fiber-uthread}]
    27252849// Cluster forward declaration
    27262850struct cluster;
     
    28312955Yielding causes the thread to context-switch to the scheduler and back, more precisely: from the \textbf{uthread} to the \textbf{kthread} then from the \textbf{kthread} back to the same \textbf{uthread} (or a different one in the general case).
    28322956In order to make the comparison fair, coroutines also execute a 2-step context-switch by resuming another coroutine which does nothing but suspending in a tight loop, which is a resume/suspend cycle instead of a yield.
    2833 Listing \ref{lst:ctx-switch} shows the code for coroutines and threads with the results in table \ref{tab:ctx-switch}.
     2957Figure~\ref{f:ctx-switch} shows the code for coroutines and threads with the results in table \ref{tab:ctx-switch}.
    28342958All omitted tests are functionally identical to one of these tests.
    28352959The difference between coroutines and threads can be attributed to the cost of scheduling.
     
    28742998\end{cfa}
    28752999\end{multicols}
    2876 \begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={lst:ctx-switch}]
     3000\begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={f:ctx-switch}]
    28773001\end{cfa}
    28783002\end{figure}
     
    29023026The next interesting benchmark is to measure the overhead to enter/leave a critical-section.
    29033027For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine.
    2904 Listing \ref{lst:mutex} shows the code for \CFA.
     3028Figure~\ref{f:mutex} shows the code for \CFA.
    29053029To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured.
    29063030The results can be shown in table \ref{tab:mutex}.
    29073031
    29083032\begin{figure}
    2909 \begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={lst:mutex}]
     3033\begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={f:mutex}]
    29103034monitor M {};
    29113035void __attribute__((noinline)) call( M & mutex m /*, m2, m3, m4*/ ) {}
     
    29483072\subsection{Internal Scheduling}
    29493073The internal-scheduling benchmark measures the cost of waiting on and signalling a condition variable.
    2950 Listing \ref{lst:int-sched} shows the code for \CFA, with results table \ref{tab:int-sched}.
     3074Figure~\ref{f:int-sched} shows the code for \CFA, with results table \ref{tab:int-sched}.
    29513075As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    29523076
    29533077\begin{figure}
    2954 \begin{cfa}[caption={Benchmark code for internal scheduling},label={lst:int-sched}]
     3078\begin{cfa}[caption={Benchmark code for internal scheduling},label={f:int-sched}]
    29553079volatile int go = 0;
    29563080condition c;
     
    30073131\subsection{External Scheduling}
    30083132The Internal scheduling benchmark measures the cost of the @waitfor@ statement (@_Accept@ in \uC).
    3009 Listing \ref{lst:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}.
     3133Figure~\ref{f:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}.
    30103134As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    30113135
    30123136\begin{figure}
    3013 \begin{cfa}[caption={Benchmark code for external scheduling},label={lst:ext-sched}]
     3137\begin{cfa}[caption={Benchmark code for external scheduling},label={f:ext-sched}]
    30143138volatile int go = 0;
    30153139monitor M {};
     
    30613185\end{table}
    30623186
     3187
    30633188\subsection{Object Creation}
    30643189Finally, the last benchmark measures the cost of creation for concurrent objects.
    3065 Listing \ref{lst:creation} shows the code for @pthread@s and \CFA threads, with results shown in table \ref{tab:creation}.
     3190Figure~\ref{f:creation} shows the code for @pthread@s and \CFA threads, with results shown in table \ref{tab:creation}.
    30663191As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    30673192The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine, the creation cost is very low.
     
    31073232\end{center}
    31083233\caption{Benchmark code for \protect\lstinline|pthread|s and \CFA to measure object creation}
    3109 \label{lst:creation}
     3234\label{f:creation}
    31103235\end{figure}
    31113236
     
    31693294While most of the parallelism tools are aimed at data parallelism and control-flow parallelism, many modern workloads are not bound on computation but on IO operations, a common case being web servers and XaaS (anything as a service).
    31703295These types of workloads often require significant engineering around amortizing costs of blocking IO operations.
    3171 At its core, non-blocking I/O is an operating system level feature that allows queuing IO operations (e.g., network operations) and registering for notifications instead of waiting for requests to complete.
     3296At its core, non-blocking I/O is an operating system level feature that allows queuing IO operations (\eg network operations) and registering for notifications instead of waiting for requests to complete.
    31723297In this context, the role of the language makes Non-Blocking IO easily available and with low overhead.
    31733298The current trend is to use asynchronous programming using tools like callbacks and/or futures and promises, which can be seen in frameworks like Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java and Django~\cite{Django} for Python.
     
    31843309This type of parallelism can be achieved both at the language level and at the library level.
    31853310The canonical example of implicit parallelism is parallel for loops, which are the simplest example of a divide and conquer algorithms~\cite{uC++book}.
    3186 Table \ref{lst:parfor} shows three different code examples that accomplish point-wise sums of large arrays.
     3311Table \ref{f:parfor} shows three different code examples that accomplish point-wise sums of large arrays.
    31873312Note that none of these examples explicitly declare any concurrency or parallelism objects.
    31883313
     
    32673392\end{center}
    32683393\caption{For loop to sum numbers: Sequential, using library parallelism and language parallelism.}
    3269 \label{lst:parfor}
     3394\label{f:parfor}
    32703395\end{table}
    32713396
  • doc/papers/general/Makefile

    rb2da0574 r2ae16219  
    33Build = build
    44Figures = figures
    5 Macros = AMA/AMA-stix/ama
     5Macros = ../AMA/AMA-stix/ama
    66TeXLIB = .:${Macros}:${Build}:../../bibliography:
    77LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
     
    7373
    7474WileyNJD-AMA.bst:
    75         ln -fs AMA/AMA-stix/ama/WileyNJD-AMA.bst .
     75        ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst .
    7676
    7777${GRAPHS} : timing.gp timing.dat
  • doc/papers/general/Paper.tex

    rb2da0574 r2ae16219  
    1 \documentclass[AMA,STIX1COL]{WileyNJD-v2}
    2 
     1\documentclass[AMA,STIX1COL,STIXSMALL]{WileyNJD-v2}
    32\articletype{RESEARCH ARTICLE}%
    43
     
    8685% Latin abbreviation
    8786\newcommand{\abbrevFont}{\textit}                       % set empty for no italics
     87\@ifundefined{eg}{
    8888\newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.}
    8989\newcommand*{\eg}{%
     
    9191                {\@ifnextchar{:}{\EG}%
    9292                        {\EG,\xspace}}%
    93 }%
     93}}{}%
     94\@ifundefined{ie}{
    9495\newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.}
    9596\newcommand*{\ie}{%
     
    9798                {\@ifnextchar{:}{\IE}%
    9899                        {\IE,\xspace}}%
    99 }%
     100}}{}%
     101\@ifundefined{etc}{
    100102\newcommand{\ETC}{\abbrevFont{etc}}
    101103\newcommand*{\etc}{%
    102104        \@ifnextchar{.}{\ETC}%
    103105        {\ETC.\xspace}%
    104 }%
     106}}{}%
     107\@ifundefined{etal}{
    105108\newcommand{\ETAL}{\abbrevFont{et}~\abbrevFont{al}}
    106 \renewcommand*{\etal}{%
     109\newcommand*{\etal}{%
    107110        \@ifnextchar{.}{\protect\ETAL}%
    108111                {\protect\ETAL.\xspace}%
    109 }%
     112}}{}%
     113\@ifundefined{viz}{
    110114\newcommand{\VIZ}{\abbrevFont{viz}}
    111115\newcommand*{\viz}{%
    112116        \@ifnextchar{.}{\VIZ}%
    113117                {\VIZ.\xspace}%
    114 }%
     118}}{}%
    115119\makeatother
    116120
     
    174178\author[1]{Robert Schluntz}
    175179\author[1]{Peter A. Buhr*}
    176 \authormark{Aaron Moss \textsc{et al}}
     180\authormark{MOSS \textsc{et al}}
    177181
    178182\address[1]{\orgdiv{David R. Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Ontario}, \country{Canada}}}
     
    20432047\subsection{Integral Suffixes}
    20442048
    2045 Additional integral suffixes are added to cover all the integral types and lengths.
     2049New integral suffixes @hh@ (half of half of @int@) for @char@, @h@ (half of @int@) for @short@, and @z@ for @size_t@, and length suffixes for 8, 16, 32, 64, and 128 bit integers.
     2050%Additional integral suffixes are added to cover all the integral types and lengths.
    20462051\begin{cquote}
    20472052\lstDeleteShortInline@%
     
    20492054\begin{cfa}
    2050205520_`hh`     // signed char
    2051 21_`hhu`   // unsigned char
     205621_`hh`u    // unsigned char
    2052205722_`h`       // signed short int
    2053 23_`uh`     // unsigned short int
    2054 24_`z`       // size_t
     205823_u`h`     // unsigned short int
     205924`z`       // size_t
    20552060\end{cfa}
    20562061&
    20572062\begin{cfa}
    2058206320_`L8`      // int8_t
    2059 21_`ul8`     // uint8_t
     206421_u`l8`     // uint8_t
    2060206522_`l16`     // int16_t
    2061 23_`ul16`   // uint16_t
     206623_u`l16`   // uint16_t
    2062206724_`l32`     // int32_t
    20632068\end{cfa}
    20642069&
    20652070\begin{cfa}
    2066 25_`ul32`      // uint32_t
     207125_u`l32`      // uint32_t
    2067207226_`l64`        // int64_t
    2068 27_`l64u`      // uint64_t
     207327_`l64`u      // uint64_t
    2069207426_`L128`     // int128
    2070 27_`L128u`   // unsigned int128
     207527_`L128`u   // unsigned int128
    20712076\end{cfa}
    20722077\end{tabular}
     
    21152120\multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{postfix function}}        & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{constant}}      & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{variable/expression}}   & \multicolumn{1}{c}{\textbf{postfix pointer}}  \\
    21162121\begin{cfa}
    2117 int ?`h( int s );
    2118 int ?`h( double s );
    2119 int ?`m( char c );
    2120 int ?`m( const char * s );
    2121 int ?`t( int a, int b, int c );
    2122 \end{cfa}
    2123 &
    2124 \begin{cfa}
    2125 0 `h;
    2126 3.5`h;
    2127 '1'`m;
    2128 "123" "456"`m;
    2129 [1,2,3]`t;
     2122int |?`h|( int s );
     2123int |?`h|( double s );
     2124int |?`m|( char c );
     2125int |?`m|( const char * s );
     2126int |?`t|( int a, int b, int c );
     2127\end{cfa}
     2128&
     2129\begin{cfa}
     21300 |`h|;
     21313.5|`h|;
     2132'1'|`m|;
     2133"123" "456"|`m|;
     2134[1,2,3]|`t|;
    21302135\end{cfa}
    21312136&
    21322137\begin{cfa}
    21332138int i = 7;
    2134 i`h;
    2135 (i + 3)`h;
    2136 (i + 3.5)`h;
    2137 
    2138 \end{cfa}
    2139 &
    2140 \begin{cfa}
    2141 int (* ?`p)( int i );
    2142 ?`p = ?`h;
    2143 3`p;
    2144 i`p;
    2145 (i + 3)`p;
     2139i|`h|;
     2140(i + 3)|`h|;
     2141(i + 3.5)|`h|;
     2142
     2143\end{cfa}
     2144&
     2145\begin{cfa}
     2146int (* |?`p|)( int i );
     2147|?`p| = |?`h|;
     21483|`p|;
     2149i|`p|;
     2150(i + 3)|`p|;
    21462151\end{cfa}
    21472152\end{tabular}
  • doc/refrat/keywords.tex

    rb2da0574 r2ae16219  
    1111%% Created On       : Sun Aug  6 08:17:27 2017
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Wed Aug 30 22:10:10 2017
    14 %% Update Count     : 5
     13%% Last Modified On : Fri Apr  6 15:16:11 2018
     14%% Update Count     : 7
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616\begin{tabular}{@{}llllll@{}}
    1717\begin{tabular}{@{}l@{}}
    18 ©_At©                   \\
    1918©catch©                 \\
    2019©catchResume©   \\
    2120©choose©                \\
    2221©coroutine©             \\
     22©disable©               \\
    2323\end{tabular}
    2424&
    2525\begin{tabular}{@{}l@{}}
    26 ©disable©               \\
    2726©dtype©                 \\
    2827©enable©                \\
     28©exception©             \\
    2929©fallthrough©   \\
    3030©fallthru©              \\
     
    3535©forall©                \\
    3636©ftype©                 \\
    37 ©lvalue©                \\
    3837©monitor©               \\
     38©mutex©                 \\
    3939\end{tabular}
    4040&
    4141\begin{tabular}{@{}l@{}}
    42 ©mutex©                 \\
    4342©one_t©                 \\
    4443©otype©                 \\
    4544©throw©                 \\
    4645©throwResume©   \\
     46©trait©                 \\
    4747\end{tabular}
    4848&
    4949\begin{tabular}{@{}l@{}}
    50 ©trait©                 \\
    5150©try©                   \\
    5251©ttype©                 \\
    5352©virtual©               \\
    5453©waitfor©               \\
     54©when©                  \\
    5555\end{tabular}
    5656&
    5757\begin{tabular}{@{}l@{}}
    58 ©when©                  \\
    5958©with©                  \\
    6059©zero_t©                \\
     60                                \\
    6161                                \\
    6262                                \\
  • doc/user/user.tex

    rb2da0574 r2ae16219  
    1111%% Created On       : Wed Apr  6 14:53:29 2016
    1212%% Last Modified By : Peter A. Buhr
    13 %% Last Modified On : Tue Feb 13 08:31:21 2018
    14 %% Update Count     : 3161
     13%% Last Modified On : Sat Apr 14 19:04:30 2018
     14%% Update Count     : 3318
    1515%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    1616
     
    283283
    284284double key = 5.0, vals[10] = { /* 10 sorted floating values */ };
    285 double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp );      $\C{// search sorted array}$
     285double * val = (double *)bsearch( &key, vals, 10, sizeof(vals[0]), comp );      §\C{// search sorted array}§
    286286\end{lstlisting}
    287287which can be augmented simply with a polymorphic, type-safe, \CFA-overloaded wrappers:
     
    292292
    293293forall( otype T | { int ?<?( T, T ); } ) unsigned int bsearch( T key, const T * arr, size_t size ) {
    294         T * result = bsearch( key, arr, size ); $\C{// call first version}$
    295         return result ? result - arr : size; }  $\C{// pointer subtraction includes sizeof(T)}$
    296 
    297 double * val = bsearch( 5.0, vals, 10 );        $\C{// selection based on return type}$
     294        T * result = bsearch( key, arr, size ); §\C{// call first version}§
     295        return result ? result - arr : size; }  §\C{// pointer subtraction includes sizeof(T)}§
     296
     297double * val = bsearch( 5.0, vals, 10 );        §\C{// selection based on return type}§
    298298int posn = bsearch( 5.0, vals, 10 );
    299299\end{lstlisting}
     
    353353The 1999 C standard plus GNU extensions.
    354354\item
    355 \Indexc[deletekeywords=inline]{-fgnu89-inline}\index{compilation option!-fgnu89-inline@{\lstinline[deletekeywords=inline]$-fgnu89-inline$}}
     355\Indexc[deletekeywords=inline]{-fgnu89-inline}\index{compilation option!-fgnu89-inline@{\lstinline[deletekeywords=inline]@-fgnu89-inline@}}
    356356Use the traditional GNU semantics for inline routines in C99 mode, which allows inline routines in header files.
    357357\end{description}
     
    506506
    507507C, \CC, and Java (and many other programming languages) have no exponentiation operator\index{exponentiation!operator}\index{operator!exponentiation}, \ie $x^y$, and instead use a routine, like \Indexc{pow}, to perform the exponentiation operation.
    508 \CFA extends the basic operators with the exponentiation operator ©?\?©\index{?\\?@\lstinline$?\?$} and ©?\=?©\index{?\\=?@\lstinline$?\=?$}, as in, ©x \ y© and ©x \= y©, which means $x^y$ and $x \leftarrow x^y$.
     508\CFA extends the basic operators with the exponentiation operator ©?\?©\index{?\\?@\lstinline@?\?@} and ©?\=?©\index{?\\=?@\lstinline@?\=?@}, as in, ©x \ y© and ©x \= y©, which means $x^y$ and $x \leftarrow x^y$.
    509509The priority of the exponentiation operator is between the cast and multiplicative operators, so that ©w * (int)x \ (int)y * z© is parenthesized as ©((w * (((int)x) \ ((int)y))) * z)©.
    510510
     
    524524
    525525
    526 \section{\texorpdfstring{Labelled \LstKeywordStyle{continue} / \LstKeywordStyle{break}}{Labelled continue / break}}
     526\section{\texorpdfstring{Labelled \protect\lstinline@continue@ / \protect\lstinline@break@}{Labelled continue / break}}
    527527
    528528While C provides ©continue© and ©break© statements for altering control flow, both are restricted to one level of nesting for a particular control structure.
    529529Unfortunately, this restriction forces programmers to use \Indexc{goto} to achieve the equivalent control-flow for more than one level of nesting.
    530 To prevent having to switch to the ©goto©, \CFA extends the \Indexc{continue}\index{continue@\lstinline $continue$!labelled}\index{labelled!continue@©continue©} and \Indexc{break}\index{break@\lstinline $break$!labelled}\index{labelled!break@©break©} with a target label to support static multi-level exit\index{multi-level exit}\index{static multi-level exit}~\cite{Buhr85}, as in Java.
     530To prevent having to switch to the ©goto©, \CFA extends the \Indexc{continue}\index{continue@\lstinline@continue@!labelled}\index{labelled!continue@©continue©} and \Indexc{break}\index{break@\lstinline@break@!labelled}\index{labelled!break@©break©} with a target label to support static multi-level exit\index{multi-level exit}\index{static multi-level exit}~\cite{Buhr85}, as in Java.
    531531For both ©continue© and ©break©, the target label must be directly associated with a ©for©, ©while© or ©do© statement;
    532532for ©break©, the target label can also be associated with a ©switch©, ©if© or compound (©{}©) statement.
     
    613613\end{figure}
    614614
    615 Both labelled ©continue© and ©break© are a ©goto©\index{goto@\lstinline $goto$!restricted} restricted in the following ways:
     615Both labelled ©continue© and ©break© are a ©goto©\index{goto@\lstinline@goto@!restricted} restricted in the following ways:
    616616\begin{itemize}
    617617\item
     
    629629
    630630
    631 \section{\texorpdfstring{\LstKeywordStyle{switch} Statement}{switch Statement}}
     631\section{\texorpdfstring{\protect\lstinline@switch@ Statement}{switch Statement}}
    632632
    633633C allows a number of questionable forms for the ©switch© statement:
     
    834834
    835835
    836 \section{\texorpdfstring{\LstKeywordStyle{case} Clause}{case Clause}}
     836\section{\texorpdfstring{\protect\lstinline@case@ Clause}{case Clause}}
    837837
    838838C restricts the ©case© clause of a ©switch© statement to a single value.
     
    871871\end{tabular}
    872872\end{cquote}
    873 In addition, two forms of subranges are allowed to specify case values: a new \CFA form and an existing GNU C form.\footnote{
    874 The GNU C form \emph{requires} spaces around the ellipse.}
    875 \begin{cquote}
    876 \begin{tabular}{@{}l@{\hspace{3em}}l@{\hspace{2em}}l@{}}
    877 \multicolumn{1}{c@{\hspace{3em}}}{\textbf{\CFA}}        & \multicolumn{1}{c@{\hspace{2em}}}{\textbf{GNU C}}     \\
     873In addition, subranges are allowed to specify case values.\footnote{
     874gcc has the same mechanism but awkward syntax, \lstinline@2 ...42@, because a space is required after a number, otherwise the period is a decimal point.}
    878875\begin{cfa}
    879876switch ( i ) {
    880   case ®1~5:®
     877  case ®1~5:®                                   §\C{// 1, 2, 3, 4, 5}§
    881878        ...
    882   case ®10~15:®
     879  case ®10~15:®                                 §\C{// 10, 11, 12, 13, 14, 15}§
    883880        ...
    884881}
    885882\end{cfa}
    886 &
    887 \begin{cfa}
    888 switch ( i )
    889   case ®1 ... 5®:
    890         ...
    891   case ®10 ... 15®:
    892         ...
    893 }
    894 \end{cfa}
    895 &
    896 \begin{cfa}
    897 
    898 // 1, 2, 3, 4, 5
    899 
    900 // 10, 11, 12, 13, 14, 15
    901 
    902 
    903 \end{cfa}
    904 \end{tabular}
    905 \end{cquote}
    906883Lists of subranges are also allowed.
    907884\begin{cfa}
     
    910887
    911888
    912 \section{\texorpdfstring{\LstKeywordStyle{with} Clause / Statement}{with Clause / Statement}}
    913 \label{s:WithClauseStatement}
     889\section{\texorpdfstring{\protect\lstinline@with@ Statement}{with Statement}}
     890\label{s:WithStatement}
     891
     892Grouping heterogeneous data into \newterm{aggregate}s (structure/union) is a common programming practice, and an aggregate can be further organized into more complex structures, such as arrays and containers:
     893\begin{cfa}
     894struct S {                                                                      §\C{// aggregate}§
     895        char c;                                                                 §\C{// fields}§
     896        int i;
     897        double d;
     898};
     899S s, as[10];
     900\end{cfa}
     901However, functions manipulating aggregates must repeat the aggregate name to access its containing fields:
     902\begin{cfa}
     903void f( S s ) {
     904        `s.`c; `s.`i; `s.`d;                                    §\C{// access containing fields}§
     905}
     906\end{cfa}
     907which extends to multiple levels of qualification for nested aggregates.
     908A similar situation occurs in object-oriented programming, \eg \CC:
     909\begin{C++}
     910struct S {
     911        char c;                                                                 §\C{// fields}§
     912        int i;
     913        double d;
     914        void f() {                                                              §\C{// implicit ``this'' aggregate}§
     915                `this->`c; `this->`i; `this->`d;        §\C{// access containing fields}§
     916        }
     917}
     918\end{C++}
     919Object-oriented nesting of member functions in a \lstinline[language=C++]@class/struct@ allows eliding \lstinline[language=C++]$this->$ because of lexical scoping.
     920However, for other aggregate parameters, qualification is necessary:
     921\begin{cfa}
     922struct T { double m, n; };
     923int S::f( T & t ) {                                                     §\C{// multiple aggregate parameters}§
     924        c; i; d;                                                                §\C{\color{red}// this--{\textgreater}.c, this--{\textgreater}.i, this--{\textgreater}.d}§
     925        `t.`m; `t.`n;                                                   §\C{// must qualify}§
     926}
     927\end{cfa}
     928
     929To simplify the programmer experience, \CFA provides a @with@ statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate qualification to fields by opening a scope containing the field identifiers.
     930Hence, the qualified fields become variables with the side-effect that it is easier to optimizing field references in a block.
     931\begin{cfa}
     932void f( S & this ) `with ( this )` {            §\C{// with statement}§
     933        c; i; d;                                                                §\C{\color{red}// this.c, this.i, this.d}§
     934}
     935\end{cfa}
     936with the generality of opening multiple aggregate-parameters:
     937\begin{cfa}
     938void f( S & s, T & t ) `with ( s, t )` {                §\C{// multiple aggregate parameters}§
     939        c; i; d;                                                                §\C{\color{red}// s.c, s.i, s.d}§
     940        m; n;                                                                   §\C{\color{red}// t.m, t.n}§
     941}
     942\end{cfa}
     943
     944In detail, the @with@ statement has the form:
     945\begin{cfa}
     946§\emph{with-statement}§:
     947        'with' '(' §\emph{expression-list}§ ')' §\emph{compound-statement}§
     948\end{cfa}
     949and may appear as the body of a function or nested within a function body.
     950Each expression in the expression-list provides a type and object.
     951The type must be an aggregate type.
     952(Enumerations are already opened.)
     953The object is the implicit qualifier for the open structure-fields.
     954
     955All expressions in the expression list are open in parallel within the compound statement.
     956This semantic is different from Pascal, which nests the openings from left to right.
     957The difference between parallel and nesting occurs for fields with the same name and type:
     958\begin{cfa}
     959struct S { int `i`; int j; double m; } s, w;
     960struct T { int `i`; int k; int m; } t, w;
     961with ( s, t ) {
     962        j + k;                                                                  §\C{// unambiguous, s.j + t.k}§
     963        m = 5.0;                                                                §\C{// unambiguous, t.m = 5.0}§
     964        m = 1;                                                                  §\C{// unambiguous, s.m = 1}§
     965        int a = m;                                                              §\C{// unambiguous, a = s.i }§
     966        double b = m;                                                   §\C{// unambiguous, b = t.m}§
     967        int c = s.i + t.i;                                              §\C{// unambiguous, qualification}§
     968        (double)m;                                                              §\C{// unambiguous, cast}§
     969}
     970\end{cfa}
     971For parallel semantics, both @s.i@ and @t.i@ are visible, so @i@ is ambiguous without qualification;
     972for nested semantics, @t.i@ hides @s.i@, so @i@ implies @t.i@.
     973\CFA's ability to overload variables means fields with the same name but different types are automatically disambiguated, eliminating most qualification when opening multiple aggregates.
     974Qualification or a cast is used to disambiguate.
     975
     976There is an interesting problem between parameters and the function-body @with@, \eg:
     977\begin{cfa}
     978void ?{}( S & s, int i ) with ( s ) {           §\C{// constructor}§
     979        `s.i = i;`  j = 3;  m = 5.5;                    §\C{// initialize fields}§
     980}
     981\end{cfa}
     982Here, the assignment @s.i = i@ means @s.i = s.i@, which is meaningless, and there is no mechanism to qualify the parameter @i@, making the assignment impossible using the function-body @with@.
     983To solve this problem, parameters are treated like an initialized aggregate:
     984\begin{cfa}
     985struct Params {
     986        S & s;
     987        int i;
     988} params;
     989\end{cfa}
     990and implicitly opened \emph{after} a function-body open, to give them higher priority:
     991\begin{cfa}
     992void ?{}( S & s, int `i` ) with ( s ) `with( §\emph{\color{red}params}§ )` {
     993        s.i = `i`; j = 3; m = 5.5;
     994}
     995\end{cfa}
     996Finally, a cast may be used to disambiguate among overload variables in a @with@ expression:
     997\begin{cfa}
     998with ( w ) { ... }                                                      §\C{// ambiguous, same name and no context}§
     999with ( (S)w ) { ... }                                           §\C{// unambiguous, cast}§
     1000\end{cfa}
     1001and @with@ expressions may be complex expressions with type reference (see Section~\ref{s:References}) to aggregate:
     1002% \begin{cfa}
     1003% struct S { int i, j; } sv;
     1004% with ( sv ) {                                                         §\C{// implicit reference}§
     1005%       S & sr = sv;
     1006%       with ( sr ) {                                                   §\C{// explicit reference}§
     1007%               S * sp = &sv;
     1008%               with ( *sp ) {                                          §\C{// computed reference}§
     1009%                       i = 3; j = 4;                                   §\C{\color{red}// sp--{\textgreater}i, sp--{\textgreater}j}§
     1010%               }
     1011%               i = 2; j = 3;                                           §\C{\color{red}// sr.i, sr.j}§
     1012%       }
     1013%       i = 1; j = 2;                                                   §\C{\color{red}// sv.i, sv.j}§
     1014% }
     1015% \end{cfa}
    9141016
    9151017In \Index{object-oriented} programming, there is an implicit first parameter, often names \textbf{©self©} or \textbf{©this©}, which is elided.
     
    9351037\CFA provides a ©with© clause/statement (see Pascal~\cite[\S~4.F]{Pascal}) to elided the "©this.©" by opening a scope containing field identifiers, changing the qualified fields into variables and giving an opportunity for optimizing qualified references.
    9361038\begin{cfa}
    937 int mem( S & this ) ®with this® { §\C{// with clause}§
     1039int mem( S & this ) ®with( this )® { §\C{// with clause}§
    9381040        i = 1;                                          §\C{\color{red}// this.i}§
    9391041        j = 2;                                          §\C{\color{red}// this.j}§
     
    9431045\begin{cfa}
    9441046struct T { double m, n; };
    945 int mem2( S & this1, T & this2 ) ®with this1, this2® {
     1047int mem2( S & this1, T & this2 ) ®with( this1, this2 )® {
    9461048        i = 1; j = 2;
    9471049        m = 1.0; n = 2.0;
     
    9541056        struct S1 { ... } s1;
    9551057        struct S2 { ... } s2;
    956         ®with s1® {                                     §\C{// with statement}§
     1058        ®with( s1 )® {                          §\C{// with statement}§
    9571059                // access fields of s1 without qualification
    9581060                ®with s2® {                             §\C{// nesting}§
     
    9711073struct S { int i; int j; double m; } a, c;
    9721074struct T { int i; int k; int m } b, c;
    973 ®with a, b® {
    974         j + k;                                          §\C{// unambiguous, unique names define unique types}§
    975         i;                                                      §\C{// ambiguous, same name and type}§
    976         a.i + b.i;                                      §\C{// unambiguous, qualification defines unique names}§
    977         m;                                                      §\C{// ambiguous, same name and no context to define unique type}§
    978         m = 5.0;                                        §\C{// unambiguous, same name and context defines unique type}§
    979         m = 1;                                          §\C{// unambiguous, same name and context defines unique type}§
    980 }
    981 ®with c® { ... }                                §\C{// ambiguous, same name and no context}§
    982 ®with (S)c® { ... }                             §\C{// unambiguous, same name and cast defines unique type}§
    983 \end{cfa}
    984 
     1075with( a, b )
     1076{
     1077}
     1078\end{cfa}
     1079
     1080\begin{comment}
    9851081The components in the "with" clause
    9861082
     
    10071103the "with" to be implemented because I hate having to type all those object
    10081104names for fields. It's a great way to drive people away from the language.
     1105\end{comment}
    10091106
    10101107
     
    15951692
    15961693\item
    1597 lvalue to reference conversion: \lstinline[deletekeywords=lvalue]$lvalue-type cv1 T$ converts to ©cv2 T &©, which allows implicitly converting variables to references.
     1694lvalue to reference conversion: \lstinline[deletekeywords=lvalue]@lvalue-type cv1 T@ converts to ©cv2 T &©, which allows implicitly converting variables to references.
    15981695\begin{cfa}
    15991696int x, &r = ®x®, f( int & p ); // lvalue variable (int) convert to reference (int &)
     
    63616458
    63626459
     6460\section{Time}
     6461\label{s:TimeLib}
     6462
     6463
     6464%\subsection{\texorpdfstring{\protect\lstinline@Duration@}{Duration}}
     6465\subsection{\texorpdfstring{\LstKeywordStyle{\textmd{Duration}}}{Duration}}
     6466\label{s:Duration}
     6467
     6468\leavevmode
     6469\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6470struct Duration {
     6471        int64_t tv;                                                     §\C{// nanoseconds}§
     6472};
     6473
     6474void ?{}( Duration & dur );
     6475void ?{}( Duration & dur, zero_t );
     6476
     6477Duration ?=?( Duration & dur, zero_t );
     6478
     6479Duration +?( Duration rhs );
     6480Duration ?+?( Duration & lhs, Duration rhs );
     6481Duration ?+=?( Duration & lhs, Duration rhs );
     6482
     6483Duration -?( Duration rhs );
     6484Duration ?-?( Duration & lhs, Duration rhs );
     6485Duration ?-=?( Duration & lhs, Duration rhs );
     6486
     6487Duration ?*?( Duration lhs, int64_t rhs );
     6488Duration ?*?( int64_t lhs, Duration rhs );
     6489Duration ?*=?( Duration & lhs, int64_t rhs );
     6490
     6491int64_t ?/?( Duration lhs, Duration rhs );
     6492Duration ?/?( Duration lhs, int64_t rhs );
     6493Duration ?/=?( Duration & lhs, int64_t rhs );
     6494double div( Duration lhs, Duration rhs );
     6495
     6496Duration ?%?( Duration lhs, Duration rhs );
     6497Duration ?%=?( Duration & lhs, Duration rhs );
     6498
     6499_Bool ?==?( Duration lhs, Duration rhs );
     6500_Bool ?!=?( Duration lhs, Duration rhs );
     6501_Bool ?<? ( Duration lhs, Duration rhs );
     6502_Bool ?<=?( Duration lhs, Duration rhs );
     6503_Bool ?>? ( Duration lhs, Duration rhs );
     6504_Bool ?>=?( Duration lhs, Duration rhs );
     6505
     6506_Bool ?==?( Duration lhs, zero_t );
     6507_Bool ?!=?( Duration lhs, zero_t );
     6508_Bool ?<? ( Duration lhs, zero_t );
     6509_Bool ?<=?( Duration lhs, zero_t );
     6510_Bool ?>? ( Duration lhs, zero_t );
     6511_Bool ?>=?( Duration lhs, zero_t );
     6512
     6513Duration abs( Duration rhs );
     6514
     6515forall( dtype ostype | ostream( ostype ) ) ostype & ?|?( ostype & os, Duration dur );
     6516
     6517Duration ?`ns( int64_t nsec );
     6518Duration ?`us( int64_t usec );
     6519Duration ?`ms( int64_t msec );
     6520Duration ?`s( int64_t sec );
     6521Duration ?`s( double sec );
     6522Duration ?`m( int64_t min );
     6523Duration ?`m( double min );
     6524Duration ?`h( int64_t hours );
     6525Duration ?`h( double hours );
     6526Duration ?`d( int64_t days );
     6527Duration ?`d( double days );
     6528Duration ?`w( int64_t weeks );
     6529Duration ?`w( double weeks );
     6530
     6531int64_t ?`ns( Duration dur );
     6532int64_t ?`us( Duration dur );
     6533int64_t ?`ms( Duration dur );
     6534int64_t ?`s( Duration dur );
     6535int64_t ?`m( Duration dur );
     6536int64_t ?`h( Duration dur );
     6537int64_t ?`d( Duration dur );
     6538int64_t ?`w( Duration dur );
     6539\end{cfa}
     6540
     6541
     6542%\subsection{\texorpdfstring{\protect\lstinline@\timeval@}{timeval}}
     6543\subsection{\texorpdfstring{\LstKeywordStyle{\textmd{timeval}}}{timeval}}
     6544\label{s:timeval}
     6545
     6546\leavevmode
     6547\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6548void ?{}( timeval & t );
     6549void ?{}( timeval & t, time_t sec, suseconds_t usec );
     6550void ?{}( timeval & t, time_t sec );
     6551void ?{}( timeval & t, zero_t );
     6552void ?{}( timeval & t, Time time );
     6553
     6554timeval ?=?( timeval & t, zero_t );
     6555timeval ?+?( timeval & lhs, timeval rhs );
     6556timeval ?-?( timeval & lhs, timeval rhs );
     6557_Bool ?==?( timeval lhs, timeval rhs );
     6558_Bool ?!=?( timeval lhs, timeval rhs );
     6559\end{cfa}
     6560
     6561
     6562\subsection{\texorpdfstring{\protect\lstinline@timespec@}{timespec}}
     6563\label{s:timespec}
     6564
     6565\leavevmode
     6566\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6567void ?{}( timespec & t );
     6568void ?{}( timespec & t, time_t sec, __syscall_slong_t nsec );
     6569void ?{}( timespec & t, time_t sec );
     6570void ?{}( timespec & t, zero_t );
     6571void ?{}( timespec & t, Time time );
     6572
     6573timespec ?=?( timespec & t, zero_t );
     6574timespec ?+?( timespec & lhs, timespec rhs );
     6575timespec ?-?( timespec & lhs, timespec rhs );
     6576_Bool ?==?( timespec lhs, timespec rhs );
     6577_Bool ?!=?( timespec lhs, timespec rhs );
     6578\end{cfa}
     6579
     6580
     6581\subsection{\texorpdfstring{\protect\lstinline@itimerval@}{itimerval}}
     6582\label{s:itimerval}
     6583
     6584\leavevmode
     6585\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6586void ?{}( itimerval & itv, Duration alarm );
     6587void ?{}( itimerval & itv, Duration alarm, Duration interval );
     6588\end{cfa}
     6589
     6590
     6591\subsection{\texorpdfstring{\protect\lstinline@Time@}{Time}}
     6592\label{s:Time}
     6593
     6594\leavevmode
     6595\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6596struct Time {
     6597        uint64_t tv;                                            §\C{// nanoseconds since UNIX epoch}§
     6598};
     6599
     6600void ?{}( Time & time );
     6601void ?{}( Time & time, zero_t );
     6602void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 );
     6603Time ?=?( Time & time, zero_t );
     6604
     6605void ?{}( Time & time, timeval t );
     6606Time ?=?( Time & time, timeval t );
     6607
     6608void ?{}( Time & time, timespec t );
     6609Time ?=?( Time & time, timespec t );
     6610
     6611Time ?+?( Time & lhs, Duration rhs ) { return (Time)@{ lhs.tv + rhs.tv }; }
     6612Time ?+?( Duration lhs, Time rhs ) { return rhs + lhs; }
     6613Time ?+=?( Time & lhs, Duration rhs ) { lhs = lhs + rhs; return lhs; }
     6614
     6615Duration ?-?( Time lhs, Time rhs ) { return (Duration)@{ lhs.tv - rhs.tv }; }
     6616Time ?-?( Time lhs, Duration rhs ) { return (Time)@{ lhs.tv - rhs.tv }; }
     6617Time ?-=?( Time & lhs, Duration rhs ) { lhs = lhs - rhs; return lhs; }
     6618_Bool ?==?( Time lhs, Time rhs ) { return lhs.tv == rhs.tv; }
     6619_Bool ?!=?( Time lhs, Time rhs ) { return lhs.tv != rhs.tv; }
     6620_Bool ?<?( Time lhs, Time rhs ) { return lhs.tv < rhs.tv; }
     6621_Bool ?<=?( Time lhs, Time rhs ) { return lhs.tv <= rhs.tv; }
     6622_Bool ?>?( Time lhs, Time rhs ) { return lhs.tv > rhs.tv; }
     6623_Bool ?>=?( Time lhs, Time rhs ) { return lhs.tv >= rhs.tv; }
     6624
     6625forall( dtype ostype | ostream( ostype ) ) ostype & ?|?( ostype & os, Time time );
     6626
     6627char * yy_mm_dd( Time time, char * buf );
     6628char * ?`ymd( Time time, char * buf ) { // short form
     6629        return yy_mm_dd( time, buf );
     6630} // ymd
     6631
     6632char * mm_dd_yy( Time time, char * buf );
     6633char * ?`mdy( Time time, char * buf ) { // short form
     6634        return mm_dd_yy( time, buf );
     6635} // mdy
     6636
     6637char * dd_mm_yy( Time time, char * buf );
     6638char * ?`dmy( Time time, char * buf ) { // short form
     6639        return dd_mm_yy( time, buf );;
     6640} // dmy
     6641
     6642size_t strftime( char * buf, size_t size, const char * fmt, Time time );
     6643\end{cfa}
     6644
     6645
     6646\section{Clock}
     6647
     6648\subsection{C time}
     6649\label{s:Ctime}
     6650
     6651\leavevmode
     6652\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6653char * ctime( time_t tp );
     6654char * ctime_r( time_t tp, char * buf );
     6655tm * gmtime( time_t tp );
     6656tm * gmtime_r( time_t tp, tm * result );
     6657tm * localtime( time_t tp );
     6658tm * localtime_r( time_t tp, tm * result );
     6659\end{cfa}
     6660
     6661
     6662%\subsection{\texorpdfstring{\protect\lstinline@Clock@}{Clock}}
     6663\subsection{\texorpdfstring{\LstKeywordStyle{\textmd{Clock}}}{Clock}}
     6664\label{s:Clock}
     6665
     6666\leavevmode
     6667\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     6668struct Clock {
     6669        Duration offset;                                        §\C{// for virtual clock: contains offset from real-time}§
     6670        int clocktype;                                          §\C{// implementation only -1 (virtual), CLOCK\_REALTIME}§
     6671};
     6672
     6673void resetClock( Clock & clk );
     6674void resetClock( Clock & clk, Duration adj );
     6675void ?{}( Clock & clk );
     6676void ?{}( Clock & clk, Duration adj );
     6677Duration getRes();
     6678Time getTimeNsec();                                             §\C{// with nanoseconds}§
     6679Time getTime();                                                 §\C{// without nanoseconds}§
     6680Time getTime( Clock & clk );
     6681Time ?()( Clock & clk );
     6682timeval getTime( Clock & clk );
     6683tm getTime( Clock & clk );
     6684\end{cfa}
     6685
     6686
    63636687\section{Multi-precision Integers}
    63646688\label{s:MultiPrecisionIntegers}
     
    66586982\end{cfa}
    66596983
    6660 
    66616984\bibliographystyle{plain}
    66626985\bibliography{pl}
  • src/Common/SemanticError.cc

    rb2da0574 r2ae16219  
    6868}
    6969
    70 void SemanticWarningImpl( CodeLocation location, Warning, const char * const fmt, ... ) {
    71         va_list args;
    72         va_start(args, fmt);
    73         std::string msg = fmtToString( fmt, args );
    74         va_end(args);
    75         std::cerr << ErrorHelpers::bold() << location << ErrorHelpers::warning_str() << ErrorHelpers::reset_font() << msg << std::endl;
     70void SemanticWarningImpl( CodeLocation location, Warning warning, const char * const fmt, ... ) {
     71        Severity severity = WarningFormats[(int)warning].severity;
     72        switch(severity) {
     73        case Severity::Suppress :
     74                break;
     75        case Severity::Warn :
     76                {
     77                        va_list args;
     78                        va_start(args, fmt);
     79                        std::string msg = fmtToString( fmt, args );
     80                        va_end(args);
     81                        std::cerr << ErrorHelpers::bold() << location << ErrorHelpers::warning_str() << ErrorHelpers::reset_font() << msg << std::endl;
     82                }
     83                break;
     84        case Severity::Error :
     85                {
     86                        va_list args;
     87                        va_start(args, fmt);
     88                        std::string msg = fmtToString( fmt, args );
     89                        va_end(args);
     90                        SemanticError(location, msg);
     91                }
     92                break;
     93        case Severity::Critical :
     94                assertf(false, "Critical errors not implemented yet");
     95                break;
     96        }
    7697}
    7798
  • src/Common/SemanticError.h

    rb2da0574 r2ae16219  
    3636// Warnings
    3737
    38 constexpr const char * const WarningFormats[] = {
    39         "self assignment of expression: %s",
    40         "rvalue to reference conversion of rvalue: %s",
     38enum class Severity {
     39        Suppress,
     40        Warn,
     41        Error,
     42        Critical
     43};
     44
     45struct WarningData {
     46        const char * const name;
     47        const char * const message;
     48        mutable Severity severity;
     49};
     50
     51constexpr const WarningData WarningFormats[] = {
     52        {"self-assign"         , "self assignment of expression: %s"           , Severity::Warn},
     53        {"reference-conversion", "rvalue to reference conversion of rvalue: %s", Severity::Warn},
    4154};
    4255
     
    5265);
    5366
    54 // ## used here to allow empty __VA_ARGS__
    55 #define SemanticWarning(loc, id, ...) SemanticWarningImpl(loc, id, WarningFormats[(int)id], ## __VA_ARGS__)
     67#define SemanticWarning(loc, id, ...) SemanticWarningImpl(loc, id, WarningFormats[(int)id].message, __VA_ARGS__)
    5668
    5769void SemanticWarningImpl (CodeLocation loc, Warning warn, const char * const fmt, ...) __attribute__((format(printf, 3, 4)));
  • src/Parser/parser.yy

    rb2da0574 r2ae16219  
    1010// Created On       : Sat Sep  1 20:22:55 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Mar 28 17:52:24 2018
    13 // Update Count     : 3130
     12// Last Modified On : Tue Apr 17 17:10:30 2018
     13// Update Count     : 3144
    1414//
    1515
     
    391391%precedence '('
    392392
    393 %locations                      // support location tracking for error messages
     393%locations                                                                                              // support location tracking for error messages
    394394
    395395%start translation_unit                                                                 // parse-tree root
     
    17081708        | LONG
    17091709                { $$ = DeclarationNode::newLength( DeclarationNode::Long ); }
    1710         | ZERO_T
    1711                 { $$ = DeclarationNode::newBuiltinType( DeclarationNode::Zero ); }
    1712         | ONE_T
    1713                 { $$ = DeclarationNode::newBuiltinType( DeclarationNode::One ); }
    17141710        | VALIST                                                                                        // GCC, __builtin_va_list
    17151711                { $$ = DeclarationNode::newBuiltinType( DeclarationNode::Valist ); }
     
    17311727basic_type_specifier:
    17321728        direct_type
     1729                // Cannot have type modifiers, e.g., short, long, etc.
    17331730        | type_qualifier_list_opt indirect_type type_qualifier_list_opt
    17341731                { $$ = $2->addQualifiers( $1 )->addQualifiers( $3 ); }
     
    17361733
    17371734direct_type:
    1738                 // A semantic check is necessary for conflicting type qualifiers.
    17391735        basic_type_name
    17401736        | type_qualifier_list basic_type_name
     
    17551751        | ATTR_TYPEGENname '(' comma_expression ')'                     // CFA: e.g., @type(a+b) y;
    17561752                { $$ = DeclarationNode::newAttr( $1, $3 ); }
     1753        | ZERO_T                                                                                        // CFA
     1754                { $$ = DeclarationNode::newBuiltinType( DeclarationNode::Zero ); }
     1755        | ONE_T                                                                                         // CFA
     1756                { $$ = DeclarationNode::newBuiltinType( DeclarationNode::One ); }
    17571757        ;
    17581758
  • src/libcfa/time

    rb2da0574 r2ae16219  
    1010// Created On       : Wed Mar 14 23:18:57 2018
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Apr 13 07:51:52 2018
    13 // Update Count     : 634
     12// Last Modified On : Sat Apr 14 17:48:23 2018
     13// Update Count     : 636
    1414//
    1515
     
    2929
    3030//######################### Duration #########################
    31 
    32 static inline void ?{}( Duration & dur, Duration d ) with( dur ) { tv = d.tv; }
    3331
    3432static inline Duration ?=?( Duration & dur, zero_t ) { return dur{ 0 }; }
     
    137135//######################### Time #########################
    138136
    139 static inline void ?{}( Time & time, Time t ) with( time ) { tv = t.tv; }
    140137void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 );
     138static inline Time ?=?( Time & time, zero_t ) { return time{ 0 }; }
     139
    141140static inline void ?{}( Time & time, timeval t ) with( time ) { tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; }
    142 
    143 static inline Time ?=?( Time & time, zero_t ) { return time{ 0 }; }
    144 
    145141static inline Time ?=?( Time & time, timeval t ) with( time ) {
    146142        tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * (TIMEGRAN / 1_000_000LL);
     
    148144} // ?=?
    149145
    150 static inline void ?{}( Time & time, timespec t ) with( time ) {
    151         tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec;
    152 } // Time
    153 
     146static inline void ?{}( Time & time, timespec t ) with( time ) { tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec; }
    154147static inline Time ?=?( Time & time, timespec t ) with( time ) {
    155148        tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_nsec;
  • src/tests/concurrent/preempt.c

    rb2da0574 r2ae16219  
    1111}
    1212
     13#ifdef LONG_TEST
     14static const unsigned long N = 30_000ul;
     15#else
     16static const unsigned long N = 500ul;
     17#endif
     18
    1319static volatile int counter = 0;
    1420
     
    2228
    2329void main(worker_t & this) {
    24         while(counter < 1000) {
     30        while(counter < N) {
    2531                if( (counter % 7) == this.value ) {
    2632                        int next = __atomic_add_fetch_4(&counter, 1, __ATOMIC_SEQ_CST);
  • tools/prettyprinter/Makefile.am

    rb2da0574 r2ae16219  
    1111## Created On       : Wed Jun 28 12:07:10 2017
    1212## Last Modified By : Peter A. Buhr
    13 ## Last Modified On : Wed Jun 28 23:11:56 2017
    14 ## Update Count     : 15
     13## Last Modified On : Mon Apr 16 09:43:23 2018
     14## Update Count     : 20
    1515###############################################################################
    1616
  • tools/prettyprinter/lex.ll

    rb2da0574 r2ae16219  
    1010 * Created On       : Sat Dec 15 11:45:59 2001
    1111 * Last Modified By : Peter A. Buhr
    12  * Last Modified On : Tue Aug 29 17:33:36 2017
    13  * Update Count     : 268
     12 * Last Modified On : Sun Apr 15 21:28:33 2018
     13 * Update Count     : 271
    1414 */
    1515
     
    5050<INITIAL,C_CODE>"/*" {                                                                  // C style comments */
    5151#if defined(DEBUG_ALL) | defined(DEBUG_COMMENT)
    52     cerr << "\"/*\" : " << yytext << endl;
     52                cerr << "\"/*\" : " << yytext << endl;
    5353#endif
    54     if ( YYSTATE == C_CODE ) code_str += yytext;
    55     else comment_str += yytext;
    56     yy_push_state(C_COMMENT);
     54                if ( YYSTATE == C_CODE ) code_str += yytext;
     55                else comment_str += yytext;
     56                yy_push_state(C_COMMENT);
    5757}
    5858<C_COMMENT>(.|"\n")     {                                                                       // C style comments
    5959#if defined(DEBUG_ALL) | defined(DEBUG_COMMENT)
    60     cerr << "<C_COMMENT>(.|\\n) : " << yytext << endl;
     60                cerr << "<C_COMMENT>(.|\\n) : " << yytext << endl;
    6161#endif
    62     if ( yy_top_state() == C_CODE ) code_str += yytext;
    63     else comment_str += yytext;
     62                if ( yy_top_state() == C_CODE ) code_str += yytext;
     63                else comment_str += yytext;
    6464}
    6565<C_COMMENT>"*/" {                                                                               // C style comments
     
    123123<C_CODE>"%}"    { RETURN_TOKEN( RCURL ) }
    124124
    125 ^"%union"       { RETURN_TOKEN( UNION ) }
    126 ^"%start"       { RETURN_TOKEN( START ) }
    127 ^"%token"       { RETURN_TOKEN( TOKEN ) }
    128 ^"%type"            { RETURN_TOKEN( TYPE ) }
    129 ^"%left"            { RETURN_TOKEN( LEFT ) }
    130 ^"%right"           { RETURN_TOKEN( RIGHT ) }
    131 ^"%nonassoc"    { RETURN_TOKEN( NONASSOC ) }
    132 ^"%precedence"  { RETURN_TOKEN( PRECEDENCE ) }
     125^"%define"[^\n]*"\n" { RETURN_TOKEN( DEFINE ) }
     126^"%expect"              { RETURN_TOKEN( EXPECT ) }
     127^"%left"                { RETURN_TOKEN( LEFT ) }
     128^"%locations"   { RETURN_TOKEN( LOCATIONS ) }
     129^"%nonassoc"    { RETURN_TOKEN( NONASSOC ) }
     130^"%precedence"  { RETURN_TOKEN( PRECEDENCE ) }
    133131^"%pure_parser" { RETURN_TOKEN( PURE_PARSER ) }
     132^"%right"               { RETURN_TOKEN( RIGHT ) }
    134133^"%semantic_parser"     { RETURN_TOKEN( SEMANTIC_PARSER ) }
    135 ^"%expect"      { RETURN_TOKEN( EXPECT ) }
    136 ^"%thong"               { RETURN_TOKEN( THONG ) }
     134^"%start"               { RETURN_TOKEN( START ) }
     135^"%thong"               { RETURN_TOKEN( THONG ) }
     136^"%token"               { RETURN_TOKEN( TOKEN ) }
     137^"%type"                { RETURN_TOKEN( TYPE ) }
     138^"%union"               { RETURN_TOKEN( UNION ) }
    137139
    138 "%prec"                 { RETURN_TOKEN( PREC ) }
     140"%prec"                 { RETURN_TOKEN( PREC ) }
    139141
    140 {integer}           { RETURN_TOKEN( INTEGER ); }
    141 [']{c_char}[']  { RETURN_TOKEN( CHARACTER ); }
    142 {identifier}    { RETURN_TOKEN( IDENTIFIER ); }
     142{integer}               { RETURN_TOKEN( INTEGER ); }
     143[']{c_char}[']  { RETURN_TOKEN( CHARACTER ); }
     144{identifier}    { RETURN_TOKEN( IDENTIFIER ); }
    143145
    144146<C_CODE>["]{s_char}*["] {                                                               // hide braces "{}" in strings
     
    160162%%
    161163void lexC(void) {
    162     BEGIN(C_CODE);
     164        BEGIN(C_CODE);
    163165}
    164166
    165167string lexYacc(void) {
    166     BEGIN(INITIAL);
    167     //cerr << "CODE: " << endl << code_str << endl;
    168     string temp( code_str );
    169     code_str = "";
    170     return temp;
     168        BEGIN(INITIAL);
     169        //cerr << "CODE: " << endl << code_str << endl;
     170        string temp( code_str );
     171        code_str = "";
     172        return temp;
    171173}
    172174
  • tools/prettyprinter/parser.yy

    rb2da0574 r2ae16219  
    1010// Created On       : Sat Dec 15 13:44:21 2001
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Aug 29 16:34:10 2017
    13 // Update Count     : 1047
     12// Last Modified On : Sun Apr 15 21:40:30 2018
     13// Update Count     : 1052
    1414//
    1515
     
    6161%token<tokenp>  CODE                                                                    // C code
    6262
    63 %token<tokenp>  START                                                                   // %start
    64 %token<tokenp>  UNION                                                                   // %union
    65 %token<tokenp>  TOKEN                                                                   // %token
     63%token<tokenp>  DEFINE                                                                  // %define
     64%token<tokenp>  EXPECT                                                                  // %expect
    6665%token<tokenp>  LEFT                                                                    // %left
    67 %token<tokenp>  RIGHT                                                                   // %right
     66%token<tokenp>  LOCATIONS                                                               // %locations
    6867%token<tokenp>  NONASSOC                                                                // %nonassoc
    6968%token<tokenp>  PRECEDENCE                                                              // %precedence
     69%token<tokenp>  PURE_PARSER                                                             // %pure_parser
     70%token<tokenp>  RIGHT                                                                   // %right
     71%token<tokenp>  SEMANTIC_PARSER                                                 // %semantic_parser
     72%token<tokenp>  START                                                                   // %start
     73%token<tokenp>  THONG                                                                   // %thong
     74%token<tokenp>  TOKEN                                                                   // %token
    7075%token<tokenp>  TYPE                                                                    // %type
    71 %token<tokenp>  PURE_PARSER                                                             // %pure_parser
    72 %token<tokenp>  SEMANTIC_PARSER                                                 // %semantic_parser
    73 %token<tokenp>  EXPECT                                                                  // %expect
    74 %token<tokenp>  THONG                                                                   // %thong
     76%token<tokenp>  UNION                                                                   // %union
    7577
    7678%token<tokenp>  PREC                                                                    // %prec
    7779
    78 %token          END_TERMINALS                                                           // ALL TERMINAL TOKEN NAMES MUST APPEAR BEFORE THIS
     80%token                  END_TERMINALS                                                   // ALL TERMINAL TOKEN NAMES MUST APPEAR BEFORE THIS
    7981
    8082%type<tokenp>   sections
    81 %token          _SECTIONS
     83%token                  _SECTIONS
    8284%type<tokenp>   mark
    8385%type<tokenp>   defsection_opt
    84 %token          _DEFSECTION_OPT
     86%token                  _DEFSECTION_OPT
    8587%type<tokenp>   declarations
    8688%type<tokenp>   literalblock
    87 %token          _LITERALBLOCK
     89%token                  _LITERALBLOCK
    8890%type<tokenp>   declaration
    89 %token          _DECLARATION
     91%token                  _DECLARATION
    9092%type<tokenp>   union
    9193%type<tokenp>   rword
    9294%type<tokenp>   tag_opt
    93 %token          _TAG_OPT
     95%token                  _TAG_OPT
    9496%type<tokenp>   namenolist
    95 %token          _NAMENOLIST
     97%token                  _NAMENOLIST
    9698%type<tokenp>   nameno
    97 %token          _NAMENO
     99%token                  _NAMENO
    98100%type<tokenp>   namelist
    99 %token          _NAMELIST
     101%token                  _NAMELIST
    100102%type<tokenp>   name
    101103%type<tokenp>   rulesection
    102 %token          _RULESECTION
     104%token                  _RULESECTION
    103105%type<tokenp>   rules
    104 %token          _RULE
     106%token                  _RULE
    105107%type<tokenp>   lhs
    106 %token          _LHS
     108%token                  _LHS
    107109%type<tokenp>   rhs
    108 %token          _RHS
     110%token                  _RHS
    109111%type<tokenp>   prod
    110112%type<tokenp>   prec
    111 %token          _PREC
     113%token                  _PREC
    112114%type<tokenp>   action
    113 %token          _ACTION
     115%token                  _ACTION
    114116%type<tokenp>   usersection_opt
    115 %token          _USERSECTION_OPT
     117%token                  _USERSECTION_OPT
    116118%type<tokenp>   ccode_opt
    117119%type<tokenp>   blocks
     
    234236                    $$ = $1;
    235237                }
     238        | DEFINE                                                                                        // bison
     239        | LOCATIONS
    236240        | THONG                                                                                         // bison
    237241        ;
  • tools/prettyprinter/test.y

    rb2da0574 r2ae16219  
    66
    77/* adsad2 */
    8 
     8%locations
     9%define parse.error verbose
    910%%
    1011
Note: See TracChangeset for help on using the changeset viewer.