Ignore:
Timestamp:
Oct 29, 2019, 4:01:24 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
arm-eh, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
773db65, 9421f3d8
Parents:
7951100 (diff), 8364209 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
doc/papers/concurrency
Files:
45 added
1 deleted
5 edited

Legend:

Unmodified
Added
Removed
  • doc/papers/concurrency/Makefile

    r7951100 rb067d9b  
    44Figures = figures
    55Macros = ../AMA/AMA-stix/ama
    6 TeXLIB = .:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography:
     6TeXLIB = .:../../LaTeXmacros:${Macros}:${Build}:
    77LaTeX  = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build}
    8 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex
     8BibTeX = BIBINPUTS=annex:../../bibliography: && export BIBINPUTS && bibtex
    99
    1010MAKEFLAGS = --no-print-directory # --silent
     
    1515SOURCES = ${addsuffix .tex, \
    1616Paper \
    17 style/style \
    18 style/cfa-format \
    1917}
    2018
    2119FIGURES = ${addsuffix .tex, \
    22 monitor \
    23 ext_monitor \
    2420int_monitor \
    2521dependency \
     22RunTimeStructure \
    2623}
    2724
    2825PICTURES = ${addsuffix .pstex, \
     26FullProdConsStack \
     27FullCoroutinePhases \
     28corlayout \
     29CondSigWait \
     30monitor \
     31ext_monitor \
    2932system \
    3033monitor_structs \
     
    5962        dvips ${Build}/$< -o $@
    6063
    61 ${BASE}.dvi : Makefile ${Build} ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
    62                 annex/local.bib ../../bibliography/pl.bib
     64${BASE}.dvi : Makefile ${BASE}.out.ps WileyNJD-AMA.bst ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \
     65                annex/local.bib ../../bibliography/pl.bib | ${Build}
    6366        # Must have *.aux file containing citations for bibtex
    6467        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
    65         ${BibTeX} ${Build}/${basename $@}
     68        -${BibTeX} ${Build}/${basename $@}
    6669        # Some citations reference others so run again to resolve these citations
    6770        ${LaTeX} ${basename $@}.tex
    68         ${BibTeX} ${Build}/${basename $@}
     71        -${BibTeX} ${Build}/${basename $@}
    6972        # Run again to finish citations
    7073        ${LaTeX} ${basename $@}.tex
     
    7275## Define the default recipes.
    7376
    74 ${Build}:
     77${Build} :
    7578        mkdir -p ${Build}
    7679
    77 ${BASE}.out.ps: ${Build}
     80${BASE}.out.ps : | ${Build}
    7881        ln -fs ${Build}/Paper.out.ps .
    7982
    80 WileyNJD-AMA.bst:
     83WileyNJD-AMA.bst :
    8184        ln -fs ../AMA/AMA-stix/ama/WileyNJD-AMA.bst .
    8285
    83 %.tex : %.fig ${Build}
     86%.tex : %.fig | ${Build}
    8487        fig2dev -L eepic $< > ${Build}/$@
    8588
    86 %.ps : %.fig ${Build}
     89%.ps : %.fig | ${Build}
    8790        fig2dev -L ps $< > ${Build}/$@
    8891
    89 %.pstex : %.fig ${Build}
     92%.pstex : %.fig | ${Build}
    9093        fig2dev -L pstex $< > ${Build}/$@
    9194        fig2dev -L pstex_t -p ${Build}/$@ $< > ${Build}/$@_t
  • doc/papers/concurrency/Paper.tex

    r7951100 rb067d9b  
    33\articletype{RESEARCH ARTICLE}%
    44
    5 \received{26 April 2016}
    6 \revised{6 June 2016}
    7 \accepted{6 June 2016}
     5% Referees
     6% Doug Lea, dl@cs.oswego.edu, SUNY Oswego
     7% Herb Sutter, hsutter@microsoft.com, Microsoft Corp
     8% Gor Nishanov, gorn@microsoft.com, Microsoft Corp
     9% James Noble, kjx@ecs.vuw.ac.nz, Victoria University of Wellington, School of Engineering and Computer Science
     10
     11\received{XXXXX}
     12\revised{XXXXX}
     13\accepted{XXXXX}
    814
    915\raggedbottom
     
    1521\usepackage{epic,eepic}
    1622\usepackage{xspace}
     23\usepackage{enumitem}
    1724\usepackage{comment}
    1825\usepackage{upquote}                                            % switch curled `'" to straight
     
    2128\renewcommand{\thesubfigure}{(\Alph{subfigure})}
    2229\captionsetup{justification=raggedright,singlelinecheck=false}
    23 \usepackage{siunitx}
    24 \sisetup{binary-units=true}
     30\usepackage{dcolumn}                                            % align decimal points in tables
     31\usepackage{capt-of}
     32\setlength{\multicolsep}{6.0pt plus 2.0pt minus 1.5pt}
    2533
    2634\hypersetup{breaklinks=true}
     
    3240\renewcommand{\linenumberfont}{\scriptsize\sffamily}
    3341
     42\renewcommand{\topfraction}{0.8}                        % float must be greater than X of the page before it is forced onto its own page
     43\renewcommand{\bottomfraction}{0.8}                     % float must be greater than X of the page before it is forced onto its own page
     44\renewcommand{\floatpagefraction}{0.8}          % float must be greater than X of the page before it is forced onto its own page
    3445\renewcommand{\textfraction}{0.0}                       % the entire page maybe devoted to floats with no text on the page at all
    3546
     
    132143\makeatother
    133144
    134 \newenvironment{cquote}{%
    135         \list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}%
    136         \item\relax
    137 }{%
    138         \endlist
    139 }% cquote
     145\newenvironment{cquote}
     146               {\list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}%
     147                \item\relax}
     148               {\endlist}
     149
     150%\newenvironment{cquote}{%
     151%\list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}%
     152%\item\relax%
     153%}{%
     154%\endlist%
     155%}% cquote
    140156
    141157% CFA programming language, based on ANSI C (with some gcc additions)
     
    145161                auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, __const, __const__,
    146162                coroutine, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, finally,
    147                 __float80, float80, __float128, float128, forall, ftype, _Generic, _Imaginary, __imag, __imag__,
     163                __float80, float80, __float128, float128, forall, ftype, generator, _Generic, _Imaginary, __imag, __imag__,
    148164                inline, __inline, __inline__, __int128, int128, __label__, monitor, mutex, _Noreturn, one_t, or,
    149165                otype, restrict, __restrict, __restrict__, __signed, __signed__, _Static_assert, thread,
    150166                _Thread_local, throw, throwResume, timeout, trait, try, ttype, typeof, __typeof, __typeof__,
    151167                virtual, __volatile, __volatile__, waitfor, when, with, zero_t},
    152         moredirectives={defined,include_next}%
     168        moredirectives={defined,include_next},
     169        % replace/adjust listing characters that look bad in sanserif
     170        literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
     171                {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
     172                {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
     173                {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2,
    153174}
    154175
     
    167188aboveskip=4pt,                                                                                  % spacing above/below code block
    168189belowskip=3pt,
    169 % replace/adjust listing characters that look bad in sanserif
    170 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
    171         {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
    172         {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
    173         {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textrm{\textgreater}}}2,
    174190moredelim=**[is][\color{red}]{`}{`},
    175191}% lstset
     
    197213}
    198214
     215% Go programming language: https://github.com/julienc91/listings-golang/blob/master/listings-golang.sty
     216\lstdefinelanguage{Golang}{
     217        morekeywords=[1]{package,import,func,type,struct,return,defer,panic,recover,select,var,const,iota,},
     218        morekeywords=[2]{string,uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64,
     219                bool,float32,float64,complex64,complex128,byte,rune,uintptr, error,interface},
     220        morekeywords=[3]{map,slice,make,new,nil,len,cap,copy,close,true,false,delete,append,real,imag,complex,chan,},
     221        morekeywords=[4]{for,break,continue,range,goto,switch,case,fallthrough,if,else,default,},
     222        morekeywords=[5]{Println,Printf,Error,},
     223        sensitive=true,
     224        morecomment=[l]{//},
     225        morecomment=[s]{/*}{*/},
     226        morestring=[b]',
     227        morestring=[b]",
     228        morestring=[s]{`}{`},
     229        % replace/adjust listing characters that look bad in sanserif
     230        literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1
     231                {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1
     232                {<}{\textrm{\textless}}1 {>}{\textrm{\textgreater}}1
     233                {<-}{\makebox[2ex][c]{\textrm{\textless}\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}}2,
     234}
     235
    199236\lstnewenvironment{cfa}[1][]
    200237{\lstset{#1}}
     
    207244{}
    208245\lstnewenvironment{Go}[1][]
    209 {\lstset{#1}}
     246{\lstset{language=Golang,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
     247{}
     248\lstnewenvironment{python}[1][]
     249{\lstset{language=python,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}}
    210250{}
    211251
     
    222262}
    223263
    224 \title{\texorpdfstring{Concurrency in \protect\CFA}{Concurrency in Cforall}}
     264\newbox\myboxA
     265\newbox\myboxB
     266\newbox\myboxC
     267\newbox\myboxD
     268
     269\title{\texorpdfstring{Advanced Control-flow and Concurrency in \protect\CFA}{Advanced Control-flow in Cforall}}
    225270
    226271\author[1]{Thierry Delisle}
     
    232277\corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON, N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
    233278
    234 \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
     279% \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
    235280
    236281\abstract[Summary]{
    237 \CFA is a modern, polymorphic, \emph{non-object-oriented} extension of the C programming language.
    238 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system.
    239 These features are created from scratch as ISO C lacks concurrency, relying largely on the pthreads library.
    240 Coroutines and lightweight (user) threads are introduced into the language.
    241 In addition, monitors are added as a high-level mechanism for mutual exclusion and synchronization.
    242 A unique contribution is allowing multiple monitors to be safely acquired simultaneously.
    243 All features respect the expectations of C programmers, while being fully integrate with the \CFA polymorphic type-system and other language features.
    244 Finally, experimental results are presented to compare the performance of the new features with similar mechanisms in other concurrent programming-languages.
     282\CFA is a polymorphic, non-object-oriented, concurrent, backwards-compatible extension of the C programming language.
     283This paper discusses the design philosophy and implementation of its advanced control-flow and concurrent/parallel features, along with the supporting runtime written in \CFA.
     284These features are created from scratch as ISO C has only low-level and/or unimplemented concurrency, so C programmers continue to rely on library features like pthreads.
     285\CFA introduces modern language-level control-flow mechanisms, like generators, coroutines, user-level threading, and monitors for mutual exclusion and synchronization.
     286% Library extension for executors, futures, and actors are built on these basic mechanisms.
     287The runtime provides significant programmer simplification and safety by eliminating spurious wakeup and monitor barging.
     288The runtime also ensures multiple monitors can be safely acquired \emph{simultaneously} (deadlock free), and this feature is fully integrated with all monitor synchronization mechanisms.
     289All control-flow features integrate with the \CFA polymorphic type-system and exception handling, while respecting the expectations and style of C programmers.
     290Experimental results show comparable performance of the new features with similar mechanisms in other concurrent programming languages.
    245291}%
    246292
    247 \keywords{concurrency, parallelism, coroutines, threads, monitors, runtime, C, Cforall}
     293\keywords{generator, coroutine, concurrency, parallelism, thread, monitor, runtime, C, \CFA (Cforall)}
    248294
    249295
     
    256302\section{Introduction}
    257303
    258 This paper provides a minimal concurrency \newterm{Application Program Interface} (API) that is simple, efficient and can be used to build other concurrency features.
    259 While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master.
    260 An easier approach for programmers is to support higher-level constructs as the basis of concurrency.
    261 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{Hochstein05}.
    262 Examples of high-level approaches are task (work) based~\cite{TBB}, implicit threading~\cite{OpenMP}, monitors~\cite{Java}, channels~\cite{CSP,Go}, and message passing~\cite{Erlang,MPI}.
    263 
    264 The following terminology is used.
    265 A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state.
    266 Multiple simultaneous threads give rise to \newterm{concurrency}, which requires locking to ensure safe communication and access to shared data.
    267 % Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, \etc) concurrent threads are introduced.
    268 \newterm{Locking}, and by extension \newterm{locks}, are defined as a mechanism to prevent progress of threads to provide safety.
    269 \newterm{Parallelism} is running multiple threads simultaneously.
    270 Parallelism implies \emph{actual} simultaneous execution, where concurrency only requires \emph{apparent} simultaneous execution.
    271 As such, parallelism only affects performance, which is observed through differences in space and/or time at runtime.
    272 
    273 Hence, there are two problems to be solved: concurrency and parallelism.
    274 While these two concepts are often combined, they are distinct, requiring different tools~\cite[\S~2]{Buhr05a}.
    275 Concurrency tools handle synchronization and mutual exclusion, while parallelism tools handle performance, cost and resource utilization.
    276 
    277 The proposed concurrency API is implemented in a dialect of C, called \CFA.
    278 The paper discusses how the language features are added to the \CFA translator with respect to parsing, semantic, and type checking, and the corresponding high-performance runtime-library to implement the concurrency features.
    279 
    280 
    281 \section{\CFA Overview}
    282 
    283 The following is a quick introduction to the \CFA language, specifically tailored to the features needed to support concurrency.
    284 Extended versions and explanation of the following code examples are available at the \CFA website~\cite{Cforall} or in Moss~\etal~\cite{Moss18}.
    285 
    286 \CFA is an extension of ISO-C, and hence, supports all C paradigms.
    287 %It is a non-object-oriented system-language, meaning most of the major abstractions have either no runtime overhead or can be opted out easily.
    288 Like C, the basics of \CFA revolve around structures and routines.
    289 Virtually all of the code generated by the \CFA translator respects C memory layouts and calling conventions.
    290 While \CFA is not an object-oriented language, lacking the concept of a receiver (\eg @this@) and nominal inheritance-relationships, C does have a notion of objects: ``region of data storage in the execution environment, the contents of which can represent values''~\cite[3.15]{C11}.
    291 While some \CFA features are common in object-oriented programming-languages, they are an independent capability allowing \CFA to adopt them while retaining a procedural paradigm.
    292 
    293 
    294 \subsection{References}
    295 
    296 \CFA provides multi-level rebindable references, as an alternative to pointers, which significantly reduces syntactic noise.
    297 \begin{cfa}
    298 int x = 1, y = 2, z = 3;
    299 int * p1 = &x, ** p2 = &p1,  *** p3 = &p2,      $\C{// pointers to x}$
    300         `&` r1 = x,  `&&` r2 = r1,  `&&&` r3 = r2;      $\C{// references to x}$
    301 int * p4 = &z, `&` r4 = z;
    302 
    303 *p1 = 3; **p2 = 3; ***p3 = 3;       // change x
    304 r1 =  3;     r2 = 3;      r3 = 3;        // change x: implicit dereferences *r1, **r2, ***r3
    305 **p3 = &y; *p3 = &p4;                // change p1, p2
    306 `&`r3 = &y; `&&`r3 = &`&`r4;             // change r1, r2: cancel implicit dereferences (&*)**r3, (&(&*)*)*r3, &(&*)r4
    307 \end{cfa}
    308 A reference is a handle to an object, like a pointer, but is automatically dereferenced by the specified number of levels.
    309 Referencing (address-of @&@) a reference variable cancels one of the implicit dereferences, until there are no more implicit references, after which normal expression behaviour applies.
    310 
    311 
    312 \subsection{\texorpdfstring{\protect\lstinline{with} Statement}{with Statement}}
    313 \label{s:WithStatement}
    314 
    315 Heterogeneous data is aggregated into a structure/union.
    316 To reduce syntactic noise, \CFA provides a @with@ statement (see Pascal~\cite[\S~4.F]{Pascal}) to elide aggregate field-qualification by opening a scope containing the field identifiers.
    317 \begin{cquote}
    318 \vspace*{-\baselineskip}%???
    319 \lstDeleteShortInline@%
    320 \begin{cfa}
    321 struct S { char c; int i; double d; };
    322 struct T { double m, n; };
    323 // multiple aggregate parameters
    324 \end{cfa}
    325 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}}
    326 \begin{cfa}
    327 void f( S & s, T & t ) {
    328         `s.`c; `s.`i; `s.`d;
    329         `t.`m; `t.`n;
    330 }
    331 \end{cfa}
    332 &
    333 \begin{cfa}
    334 void f( S & s, T & t ) `with ( s, t )` {
    335         c; i; d;                // no qualification
    336         m; n;
    337 }
    338 \end{cfa}
    339 \end{tabular}
    340 \lstMakeShortInline@%
    341 \end{cquote}
    342 Object-oriented programming languages only provide implicit qualification for the receiver.
    343 
    344 In detail, the @with@ statement has the form:
    345 \begin{cfa}
    346 $\emph{with-statement}$:
    347         'with' '(' $\emph{expression-list}$ ')' $\emph{compound-statement}$
    348 \end{cfa}
    349 and may appear as the body of a routine or nested within a routine body.
    350 Each expression in the expression-list provides a type and object.
    351 The type must be an aggregate type.
    352 (Enumerations are already opened.)
    353 The object is the implicit qualifier for the open structure-fields.
    354 All expressions in the expression list are open in parallel within the compound statement, which is different from Pascal, which nests the openings from left to right.
    355 
    356 
    357 \subsection{Overloading}
    358 
    359 \CFA maximizes the ability to reuse names via overloading to aggressively address the naming problem.
    360 Both variables and routines may be overloaded, where selection is based on types, and number of returns (as in Ada~\cite{Ada}) and arguments.
    361 \begin{cquote}
    362 \vspace*{-\baselineskip}%???
    363 \lstDeleteShortInline@%
    364 \begin{cfa}
    365 // selection based on type
    366 \end{cfa}
    367 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}}
    368 \begin{cfa}
    369 const short int `MIN` = -32768;
    370 const int `MIN` = -2147483648;
    371 const long int `MIN` = -9223372036854775808L;
    372 \end{cfa}
    373 &
    374 \begin{cfa}
    375 short int si = `MIN`;
    376 int i = `MIN`;
    377 long int li = `MIN`;
    378 \end{cfa}
    379 \end{tabular}
    380 \begin{cfa}
    381 // selection based on type and number of parameters
    382 \end{cfa}
    383 \begin{tabular}{@{}l@{\hspace{2.7\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}}
    384 \begin{cfa}
    385 void `f`( void );
    386 void `f`( char );
    387 void `f`( int, double );
    388 \end{cfa}
    389 &
    390 \begin{cfa}
    391 `f`();
    392 `f`( 'a' );
    393 `f`( 3, 5.2 );
    394 \end{cfa}
    395 \end{tabular}
    396 \begin{cfa}
    397 // selection based on type and number of returns
    398 \end{cfa}
    399 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}|@{\hspace{2\parindentlnth}}l@{}}
    400 \begin{cfa}
    401 char `f`( int );
    402 double `f`( int );
    403 [char, double] `f`( int );
    404 \end{cfa}
    405 &
    406 \begin{cfa}
    407 char c = `f`( 3 );
    408 double d = `f`( 3 );
    409 [d, c] = `f`( 3 );
    410 \end{cfa}
    411 \end{tabular}
    412 \lstMakeShortInline@%
    413 \end{cquote}
    414 Overloading is important for \CFA concurrency since the runtime system relies on creating different types to represent concurrency objects.
    415 Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions to prevent name clashes.
    416 As seen in Section~\ref{basics}, routine @main@ is heavily overloaded.
    417 
    418 Variable overloading is useful in the parallel semantics of the @with@ statement for fields with the same name:
    419 \begin{cfa}
    420 struct S { int `i`; int j; double m; } s;
    421 struct T { int `i`; int k; int m; } t;
    422 with ( s, t ) {
    423         j + k;                                                                  $\C{// unambiguous, s.j + t.k}$
    424         m = 5.0;                                                                $\C{// unambiguous, s.m = 5.0}$
    425         m = 1;                                                                  $\C{// unambiguous, t.m = 1}$
    426         int a = m;                                                              $\C{// unambiguous, a = t.m }$
    427         double b = m;                                                   $\C{// unambiguous, b = s.m}$
    428         int c = `s.i` + `t.i`;                                  $\C{// unambiguous, qualification}$
    429         (double)m;                                                              $\C{// unambiguous, cast s.m}$
    430 }
    431 \end{cfa}
    432 For parallel semantics, both @s.i@ and @t.i@ are visible the same type, so only @i@ is ambiguous without qualification.
    433 
    434 
    435 \subsection{Operators}
    436 
    437 Overloading also extends to operators.
    438 Operator-overloading syntax creates a routine name with an operator symbol and question marks for the operands:
    439 \begin{cquote}
    440 \lstDeleteShortInline@%
    441 \begin{tabular}{@{}ll@{\hspace{\parindentlnth}}|@{\hspace{\parindentlnth}}l@{}}
    442 \begin{cfa}
    443 int ++? (int op);
    444 int ?++ (int op);
    445 int `?+?` (int op1, int op2);
    446 int ?<=?(int op1, int op2);
    447 int ?=? (int & op1, int op2);
    448 int ?+=?(int & op1, int op2);
    449 \end{cfa}
    450 &
    451 \begin{cfa}
    452 // unary prefix increment
    453 // unary postfix increment
    454 // binary plus
    455 // binary less than
    456 // binary assignment
    457 // binary plus-assignment
    458 \end{cfa}
    459 &
    460 \begin{cfa}
    461 struct S { int i, j; };
    462 S `?+?`( S op1, S op2) { // add two structures
    463         return (S){op1.i + op2.i, op1.j + op2.j};
    464 }
    465 S s1 = {1, 2}, s2 = {2, 3}, s3;
    466 s3 = s1 `+` s2;         // compute sum: s3 == {2, 5}
    467 \end{cfa}
    468 \end{tabular}
    469 \lstMakeShortInline@%
    470 \end{cquote}
    471 While concurrency does not use operator overloading directly, it provides an introduction for the syntax of constructors.
    472 
    473 
    474 \subsection{Parametric Polymorphism}
    475 \label{s:ParametricPolymorphism}
    476 
    477 The signature feature of \CFA is parametric-polymorphic routines~\cite{} with routines generalized using a @forall@ clause (giving the language its name), which allow separately compiled routines to support generic usage over multiple types.
    478 For example, the following sum routine works for any type that supports construction from 0 and addition \commenttd{constructors have not been introduced yet.}:
    479 \begin{cfa}
    480 forall( otype T | { void `?{}`( T *, zero_t ); T `?+?`( T, T ); } ) // constraint type, 0 and +
    481 T sum( T a[$\,$], size_t size ) {
    482         `T` total = { `0` };                                    $\C{// initialize by 0 constructor}$
    483         for ( size_t i = 0; i < size; i += 1 )
    484                 total = total `+` a[i];                         $\C{// select appropriate +}$
    485         return total;
    486 }
    487 S sa[5];
    488 int i = sum( sa, 5 );                                           $\C{// use S's 0 construction and +}$
    489 \end{cfa}
    490 
    491 \CFA provides \newterm{traits} to name a group of type assertions, where the trait name allows specifying the same set of assertions in multiple locations, preventing repetition mistakes at each routine declaration:
    492 \begin{cfa}
    493 trait `sumable`( otype T ) {
    494         void `?{}`( T &, zero_t );                              $\C{// 0 literal constructor}$
    495         T `?+?`( T, T );                                                $\C{// assortment of additions}$
    496         T ?+=?( T &, T );
    497         T ++?( T & );
    498         T ?++( T & );
    499 };
    500 forall( otype T `| sumable( T )` )                      $\C{// use trait}$
    501 T sum( T a[$\,$], size_t size );
    502 \end{cfa}
    503 
    504 Assertions can be @otype@ or @dtype@.
    505 @otype@ refers to a ``complete'' object, \ie an object has a size, default constructor, copy constructor, destructor and an assignment operator.
    506 @dtype@ only guarantees an object has a size and alignment.
    507 
    508 Using the return type for discrimination, it is possible to write a type-safe @alloc@ based on the C @malloc@:
    509 \begin{cfa}
    510 forall( dtype T | sized(T) ) T * alloc( void ) { return (T *)malloc( sizeof(T) ); }
    511 int * ip = alloc();                                                     $\C{// select type and size from left-hand side}$
    512 double * dp = alloc();
    513 struct S {...} * sp = alloc();
    514 \end{cfa}
    515 where the return type supplies the type/size of the allocation, which is impossible in most type systems.
    516 
    517 
    518 \subsection{Constructors / Destructors}
    519 
    520 Object lifetime is a challenge in non-managed programming languages.
    521 \CFA responds with \CC-like constructors and destructors:
    522 \begin{cfa}
    523 struct VLA { int len, * data; };                        $\C{// variable length array of integers}$
    524 void ?{}( VLA & vla ) with ( vla ) { len = 10;  data = alloc( len ); }  $\C{// default constructor}$
    525 void ?{}( VLA & vla, int size, char fill ) with ( vla ) { len = size;  data = alloc( len, fill ); } // initialization
    526 void ?{}( VLA & vla, VLA other ) { vla.len = other.len;  vla.data = other.data; } $\C{// copy, shallow}$
    527 void ^?{}( VLA & vla ) with ( vla ) { free( data ); } $\C{// destructor}$
    528 {
    529         VLA  x,            y = { 20, 0x01 },     z = y; $\C{// z points to y}$
    530         //    x{};         y{ 20, 0x01 };          z{ z, y };
    531         ^x{};                                                                   $\C{// deallocate x}$
    532         x{};                                                                    $\C{// reallocate x}$
    533         z{ 5, 0xff };                                                   $\C{// reallocate z, not pointing to y}$
    534         ^y{};                                                                   $\C{// deallocate y}$
    535         y{ x };                                                                 $\C{// reallocate y, points to x}$
    536         x{};                                                                    $\C{// reallocate x, not pointing to y}$
    537         //  ^z{};  ^y{};  ^x{};
    538 }
    539 \end{cfa}
    540 Like \CC, construction is implicit on allocation (stack/heap) and destruction is implicit on deallocation.
    541 The object and all their fields are constructed/destructed.
    542 \CFA also provides @new@ and @delete@, which behave like @malloc@ and @free@, in addition to constructing and destructing objects:
    543 \begin{cfa}
    544 {       struct S s = {10};                                              $\C{// allocation, call constructor}$
    545         ...
    546 }                                                                                       $\C{// deallocation, call destructor}$
    547 struct S * s = new();                                           $\C{// allocation, call constructor}$
    548 ...
    549 delete( s );                                                            $\C{// deallocation, call destructor}$
    550 \end{cfa}
    551 \CFA concurrency uses object lifetime as a means of synchronization and/or mutual exclusion.
    552 
    553 
    554 \section{Concurrency Basics}\label{basics}
    555 
    556 At its core, concurrency is based on multiple call-stacks and scheduling threads executing on these stacks.
    557 Multiple call stacks (or contexts) and a single thread of execution, called \newterm{coroutining}~\cite{Conway63,Marlin80}, does \emph{not} imply concurrency~\cite[\S~2]{Buhr05a}.
    558 In coroutining, the single thread is self-scheduling across the stacks, so execution is deterministic, \ie given fixed inputs, the execution path to the outputs is fixed and predictable.
    559 A \newterm{stackless} coroutine executes on the caller's stack~\cite{Python} but this approach is restrictive, \eg preventing modularization and supporting only iterator/generator-style programming;
    560 a \newterm{stackfull} coroutine executes on its own stack, allowing full generality.
    561 Only stackfull coroutines are a stepping-stone to concurrency.
    562 
    563 The transition to concurrency, even for execution with a single thread and multiple stacks, occurs when coroutines also context switch to a scheduling oracle, introducing non-determinism from the coroutine perspective~\cite[\S~3]{Buhr05a}.
    564 Therefore, a minimal concurrency system is possible using coroutines (see Section \ref{coroutine}) in conjunction with a scheduler to decide where to context switch next.
    565 The resulting execution system now follows a cooperative threading-model, called \newterm{non-preemptive scheduling}.
    566 
    567 Because the scheduler is special, it can either be a stackless or stackfull coroutine. \commenttd{I dislike this sentence, it seems imply 1-step vs 2-step but also seems to say that some kind of coroutine is required, which is not the case.}
    568 For stackless, the scheduler performs scheduling on the stack of the current coroutine and switches directly to the next coroutine, so there is one context switch.
    569 For stackfull, the current coroutine switches to the scheduler, which performs scheduling, and it then switches to the next coroutine, so there are two context switches.
    570 A stackfull scheduler is often used for simplicity and security, even through there is a slightly higher runtime-cost. \commenttd{I'm not a fan of the fact that we don't quantify this but yet imply it is negligeable.}
    571 
    572 Regardless of the approach used, a subset of concurrency related challenges start to appear.
    573 For the complete set of concurrency challenges to occur, the missing feature is \newterm{preemption}, where context switching occurs randomly between any two instructions, often based on a timer interrupt, called \newterm{preemptive scheduling}.
    574 While a scheduler introduces uncertainty in the order of execution, preemption introduces uncertainty where context switches occur.
    575 Interestingly, uncertainty is necessary for the runtime (operating) system to give the illusion of parallelism on a single processor and increase performance on multiple processors.
    576 The reason is that only the runtime has complete knowledge about resources and how to best utilized them.
    577 However, the introduction of unrestricted non-determinism results in the need for \newterm{mutual exclusion} and \newterm{synchronization} to restrict non-determinism for correctness;
    578 otherwise, it is impossible to write meaningful programs.
    579 Optimal performance in concurrent applications is often obtained by having as much non-determinism as correctness allows.
    580 
    581 
    582 \subsection{\protect\CFA's Thread Building Blocks}
    583 
    584 An important missing feature in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional.
    585 As such, library support for threading is far from widespread.
    586 At the time of writing the paper, neither \protect\lstinline|gcc| nor \protect\lstinline|clang| support ``threads.h'' in their standard libraries.}.
    587 In modern programming languages, a lack of threading is unacceptable~\cite{Sutter05, Sutter05b}, and therefore existing and new programming languages must have tools for writing efficient concurrent programs to take advantage of parallelism.
    588 As an extension of C, \CFA needs to express these concepts in a way that is as natural as possible to programmers familiar with imperative languages.
    589 Furthermore, because C is a system-level language, programmers expect to choose precisely which features they need and which cost they are willing to pay.
    590 Hence, concurrent programs should be written using high-level mechanisms, and only step down to lower-level mechanisms when performance bottlenecks are encountered.
    591 
    592 
    593 \subsection{Coroutines: A Stepping Stone}\label{coroutine}
    594 
    595 While the focus of this discussion is concurrency and parallelism, it is important to address coroutines, which are a significant building block of a concurrency system.
    596 Coroutines are generalized routines allowing execution to be temporarily suspend and later resumed.
    597 Hence, unlike a normal routine, a coroutine may not terminate when it returns to its caller, allowing it to be restarted with the values and execution location present at the point of suspension.
    598 This capability is accomplish via the coroutine's stack, where suspend/resume context switch among stacks.
    599 Because threading design-challenges are present in coroutines, their design effort is relevant, and this effort can be easily exposed to programmers giving them a useful new programming paradigm because a coroutine handles the class of problems that need to retain state between calls, \eg plugins, device drivers, and finite-state machines.
    600 Therefore, the core \CFA coroutine-API for has two fundamental features: independent call-stacks and @suspend@/@resume@ operations.
    601 
    602 For example, a problem made easier with coroutines is unbounded generators, \eg generating an infinite sequence of Fibonacci numbers, where Figure~\ref{f:C-fibonacci} shows conventional approaches for writing a Fibonacci generator in C.
    603 \begin{displaymath}
    604 \mathsf{fib}(n) = \left \{
    605 \begin{array}{ll}
    606 0                                       & n = 0         \\
    607 1                                       & n = 1         \\
    608 \mathsf{fib}(n-1) + \mathsf{fib}(n-2)   & n \ge 2       \\
    609 \end{array}
    610 \right.
    611 \end{displaymath}
    612 Figure~\ref{f:GlobalVariables} illustrates the following problems:
    613 unique unencapsulated global variables necessary to retain state between calls;
    614 only one Fibonacci generator;
    615 execution state must be explicitly retained via explicit state variables.
    616 Figure~\ref{f:ExternalState} addresses these issues:
    617 unencapsulated program global variables become encapsulated structure variables;
    618 unique global variables are replaced by multiple Fibonacci objects;
    619 explicit execution state is removed by precomputing the first two Fibonacci numbers and returning $\mathsf{fib}(n-2)$.
     304This paper discusses the design philosophy and implementation of advanced language-level control-flow and concurrent/parallel features in \CFA~\cite{Moss18,Cforall} and its runtime, which is written entirely in \CFA.
     305\CFA is a modern, polymorphic, non-object-oriented\footnote{
     306\CFA has features often associated with object-oriented programming languages, such as constructors, destructors, virtuals and simple inheritance.
     307However, functions \emph{cannot} be nested in structures, so there is no lexical binding between a structure and set of functions (member/method) implemented by an implicit \lstinline@this@ (receiver) parameter.},
     308backwards-compatible extension of the C programming language.
     309In many ways, \CFA is to C as Scala~\cite{Scala} is to Java, providing a \emph{research vehicle} for new typing and control-flow capabilities on top of a highly popular programming language allowing immediate dissemination.
     310Within the \CFA framework, new control-flow features are created from scratch because ISO \Celeven defines only a subset of the \CFA extensions, where the overlapping features are concurrency~\cite[\S~7.26]{C11}.
     311However, \Celeven concurrency is largely wrappers for a subset of the pthreads library~\cite{Butenhof97,Pthreads}, and \Celeven and pthreads concurrency is simple, based on thread fork/join in a function and mutex/condition locks, which is low-level and error-prone;
     312no high-level language concurrency features are defined.
     313Interestingly, almost a decade after publication of the \Celeven standard, neither gcc-8, clang-9 nor msvc-19 (most recent versions) support the \Celeven include @threads.h@, indicating little interest in the C11 concurrency approach (possibly because the effort to add concurrency to \CC).
     314Finally, while the \Celeven standard does not state a threading model, the historical association with pthreads suggests implementations would adopt kernel-level threading (1:1)~\cite{ThreadModel}.
     315
     316In contrast, there has been a renewed interest during the past decade in user-level (M:N, green) threading in old and new programming languages.
     317As multi-core hardware became available in the 1980/90s, both user and kernel threading were examined.
     318Kernel threading was chosen, largely because of its simplicity and fit with the simpler operating systems and hardware architectures at the time, which gave it a performance advantage~\cite{Drepper03}.
     319Libraries like pthreads were developed for C, and the Solaris operating-system switched from user (JDK 1.1~\cite{JDK1.1}) to kernel threads.
     320As a result, languages like Java, Scala, Objective-C~\cite{obj-c-book}, \CCeleven~\cite{C11}, and C\#~\cite{Csharp} adopt the 1:1 kernel-threading model, with a variety of presentation mechanisms.
     321From 2000 onwards, languages like Go~\cite{Go}, Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, D~\cite{D}, and \uC~\cite{uC++,uC++book} have championed the M:N user-threading model, and many user-threading libraries have appeared~\cite{Qthreads,MPC,Marcel}, including putting green threads back into Java~\cite{Quasar}.
     322The main argument for user-level threading is that it is lighter weight than kernel threading (locking and context switching do not cross the kernel boundary), so there is less restriction on programming styles that encourage large numbers of threads performing medium work units to facilitate load balancing by the runtime~\cite{Verch12}.
     323As well, user-threading facilitates a simpler concurrency approach using thread objects that leverage sequential patterns versus events with call-backs~\cite{Adya02,vonBehren03}.
     324Finally, performant user-threading implementations (both time and space) meet or exceed direct kernel-threading implementations, while achieving the programming advantages of high concurrency levels and safety.
     325
     326A further effort over the past two decades is the development of language memory models to deal with the conflict between language features and compiler/hardware optimizations, \ie some language features are unsafe in the presence of aggressive sequential optimizations~\cite{Buhr95a,Boehm05}.
     327The consequence is that a language must provide sufficient tools to program around safety issues, as inline and library code is all sequential to the compiler.
     328One solution is low-level qualifiers and functions (\eg @volatile@ and atomics) allowing \emph{programmers} to explicitly write safe (race-free~\cite{Boehm12}) programs.
     329A safer solution is high-level language constructs so the \emph{compiler} knows the optimization boundaries, and hence, provides implicit safety.
     330This problem is best known with respect to concurrency, but applies to other complex control-flow, like exceptions\footnote{
     331\CFA exception handling will be presented in a separate paper.
     332The key feature that dovetails with this paper is nonlocal exceptions allowing exceptions to be raised across stacks, with synchronous exceptions raised among coroutines and asynchronous exceptions raised among threads, similar to that in \uC~\cite[\S~5]{uC++}
     333} and coroutines.
     334Finally, language solutions allow matching constructs with language paradigm, \ie imperative and functional languages often have different presentations of the same concept to fit their programming model.
     335
     336Finally, it is important for a language to provide safety over performance \emph{as the default}, allowing careful reduction of safety for performance when necessary.
     337Two concurrency violations of this philosophy are \emph{spurious wakeup} (random wakeup~\cite[\S~8]{Buhr05a}) and \emph{barging}\footnote{
     338The notion of competitive succession instead of direct handoff, \ie a lock owner releases the lock and an arriving thread acquires it ahead of preexisting waiter threads.
     339} (signals-as-hints~\cite[\S~8]{Buhr05a}), where one is a consequence of the other, \ie once there is spurious wakeup, signals-as-hints follow.
     340However, spurious wakeup is \emph{not} a foundational concurrency property~\cite[\S~8]{Buhr05a}, it is a performance design choice.
     341Similarly, signals-as-hints are often a performance decision.
     342We argue removing spurious wakeup and signals-as-hints make concurrent programming significantly safer because it removes local non-determinism and matches with programmer expectation.
     343(Author experience teaching concurrency is that students are highly confused by these semantics.)
     344Clawing back performance, when local non-determinism is unimportant, should be an option not the default.
     345
     346\begin{comment}
     347Most augmented traditional (Fortran 18~\cite{Fortran18}, Cobol 14~\cite{Cobol14}, Ada 12~\cite{Ada12}, Java 11~\cite{Java11}) and new languages (Go~\cite{Go}, Rust~\cite{Rust}, and D~\cite{D}), except \CC, diverge from C with different syntax and semantics, only interoperate indirectly with C, and are not systems languages, for those with managed memory.
     348As a result, there is a significant learning curve to move to these languages, and C legacy-code must be rewritten.
     349While \CC, like \CFA, takes an evolutionary approach to extend C, \CC's constantly growing complex and interdependent features-set (\eg objects, inheritance, templates, etc.) mean idiomatic \CC code is difficult to use from C, and C programmers must expend significant effort learning \CC.
     350Hence, rewriting and retraining costs for these languages, even \CC, are prohibitive for companies with a large C software-base.
     351\CFA with its orthogonal feature-set, its high-performance runtime, and direct access to all existing C libraries circumvents these problems.
     352\end{comment}
     353
     354\CFA embraces user-level threading, language extensions for advanced control-flow, and safety as the default.
     355We present comparative examples so the reader can judge if the \CFA control-flow extensions are better and safer than those in other concurrent, imperative programming languages, and perform experiments to show the \CFA runtime is competitive with other similar mechanisms.
     356The main contributions of this work are:
     357\begin{itemize}[topsep=3pt,itemsep=1pt]
     358\item
     359language-level generators, coroutines and user-level threading, which respect the expectations of C programmers.
     360\item
     361monitor synchronization without barging, and the ability to safely acquiring multiple monitors \emph{simultaneously} (deadlock free), while seamlessly integrating these capabilities with all monitor synchronization mechanisms.
     362\item
     363providing statically type-safe interfaces that integrate with the \CFA polymorphic type-system and other language features.
     364% \item
     365% library extensions for executors, futures, and actors built on the basic mechanisms.
     366\item
     367a runtime system with no spurious wakeup.
     368\item
     369a dynamic partitioning mechanism to segregate the execution environment for specialized requirements.
     370% \item
     371% a non-blocking I/O library
     372\item
     373experimental results showing comparable performance of the new features with similar mechanisms in other programming languages.
     374\end{itemize}
     375
     376Section~\ref{s:StatefulFunction} begins advanced control by introducing sequential functions that retain data and execution state between calls, which produces constructs @generator@ and @coroutine@.
     377Section~\ref{s:Concurrency} begins concurrency, or how to create (fork) and destroy (join) a thread, which produces the @thread@ construct.
     378Section~\ref{s:MutualExclusionSynchronization} discusses the two mechanisms to restricted nondeterminism when controlling shared access to resources (mutual exclusion) and timing relationships among threads (synchronization).
     379Section~\ref{s:Monitor} shows how both mutual exclusion and synchronization are safely embedded in the @monitor@ and @thread@ constructs.
     380Section~\ref{s:CFARuntimeStructure} describes the large-scale mechanism to structure (cluster) threads and virtual processors (kernel threads).
     381Section~\ref{s:Performance} uses a series of microbenchmarks to compare \CFA threading with pthreads, Java OpenJDK-9, Go 1.12.6 and \uC 7.0.0.
     382
     383
     384\section{Stateful Function}
     385\label{s:StatefulFunction}
     386
     387The stateful function is an old idea~\cite{Conway63,Marlin80} that is new again~\cite{C++20Coroutine19}, where execution is temporarily suspended and later resumed, \eg plugin, device driver, finite-state machine.
     388Hence, a stateful function may not end when it returns to its caller, allowing it to be restarted with the data and execution location present at the point of suspension.
     389This capability is accomplished by retaining a data/execution \emph{closure} between invocations.
     390If the closure is fixed size, we call it a \emph{generator} (or \emph{stackless}), and its control flow is restricted, \eg suspending outside the generator is prohibited.
     391If the closure is variable size, we call it a \emph{coroutine} (or \emph{stackful}), and as the names implies, often implemented with a separate stack with no programming restrictions.
     392Hence, refactoring a stackless coroutine may require changing it to stackful.
     393A foundational property of all \emph{stateful functions} is that resume/suspend \emph{do not} cause incremental stack growth, \ie resume/suspend operations are remembered through the closure not the stack.
     394As well, activating a stateful function is \emph{asymmetric} or \emph{symmetric}, identified by resume/suspend (no cycles) and resume/resume (cycles).
     395A fixed closure activated by modified call/return is faster than a variable closure activated by context switching.
     396Additionally, any storage management for the closure (especially in unmanaged languages, \ie no garbage collection) must also be factored into design and performance.
     397Therefore, selecting between stackless and stackful semantics is a tradeoff between programming requirements and performance, where stackless is faster and stackful is more general.
     398Note, creation cost is amortized across usage, so activation cost is usually the dominant factor.
    620399
    621400\begin{figure}
    622401\centering
    623 \newbox\myboxA
    624402\begin{lrbox}{\myboxA}
    625403\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    626 `int f1, f2, state = 1;`   // single global variables
    627 int fib() {
    628         int fn;
    629         `switch ( state )` {  // explicit execution state
    630           case 1: fn = 0;  f1 = fn;  state = 2;  break;
    631           case 2: fn = 1;  f2 = f1;  f1 = fn;  state = 3;  break;
    632           case 3: fn = f1 + f2;  f2 = f1;  f1 = fn;  break;
    633         }
     404typedef struct {
     405        int fn1, fn;
     406} Fib;
     407#define FibCtor { 1, 0 }
     408int fib( Fib * f ) {
     409
     410
     411
     412        int fn = f->fn; f->fn = f->fn1;
     413                f->fn1 = f->fn + fn;
    634414        return fn;
     415
    635416}
    636417int main() {
    637 
    638         for ( int i = 0; i < 10; i += 1 ) {
    639                 printf( "%d\n", fib() );
    640         }
     418        Fib f1 = FibCtor, f2 = FibCtor;
     419        for ( int i = 0; i < 10; i += 1 )
     420                printf( "%d %d\n",
     421                           fib( &f1 ), fib( &f2 ) );
    641422}
    642423\end{cfa}
    643424\end{lrbox}
    644425
    645 \newbox\myboxB
    646426\begin{lrbox}{\myboxB}
    647427\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    648 #define FIB_INIT `{ 0, 1 }`
    649 typedef struct { int f2, f1; } Fib;
    650 int fib( Fib * f ) {
    651 
    652         int ret = f->f2;
    653         int fn = f->f1 + f->f2;
    654         f->f2 = f->f1; f->f1 = fn;
    655 
    656         return ret;
     428`generator` Fib {
     429        int fn1, fn;
     430};
     431
     432void `main(Fib & fib)` with(fib) {
     433
     434        [fn1, fn] = [1, 0];
     435        for () {
     436                `suspend;`
     437                [fn1, fn] = [fn, fn + fn1];
     438
     439        }
    657440}
    658441int main() {
    659         Fib f1 = FIB_INIT, f2 = FIB_INIT;
    660         for ( int i = 0; i < 10; i += 1 ) {
    661                 printf( "%d %d\n", fib( &f1 ), fib( &f2 ) );
     442        Fib f1, f2;
     443        for ( 10 )
     444                sout | `resume( f1 )`.fn
     445                         | `resume( f2 )`.fn;
     446}
     447\end{cfa}
     448\end{lrbox}
     449
     450\begin{lrbox}{\myboxC}
     451\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     452typedef struct {
     453        int fn1, fn;  void * `next`;
     454} Fib;
     455#define FibCtor { 1, 0, NULL }
     456Fib * comain( Fib * f ) {
     457        if ( f->next ) goto *f->next;
     458        f->next = &&s1;
     459        for ( ;; ) {
     460                return f;
     461          s1:; int fn = f->fn + f->fn1;
     462                        f->fn1 = f->fn; f->fn = fn;
    662463        }
    663464}
     465int main() {
     466        Fib f1 = FibCtor, f2 = FibCtor;
     467        for ( int i = 0; i < 10; i += 1 )
     468                printf("%d %d\n",comain(&f1)->fn,
     469                                 comain(&f2)->fn);
     470}
    664471\end{cfa}
    665472\end{lrbox}
    666473
    667 \subfloat[3 States: global variables]{\label{f:GlobalVariables}\usebox\myboxA}
    668 \qquad
    669 \subfloat[1 State: external variables]{\label{f:ExternalState}\usebox\myboxB}
    670 \caption{C Fibonacci Implementations}
    671 \label{f:C-fibonacci}
     474\subfloat[C asymmetric generator]{\label{f:CFibonacci}\usebox\myboxA}
     475\hspace{3pt}
     476\vrule
     477\hspace{3pt}
     478\subfloat[\CFA asymmetric generator]{\label{f:CFAFibonacciGen}\usebox\myboxB}
     479\hspace{3pt}
     480\vrule
     481\hspace{3pt}
     482\subfloat[C generator implementation]{\label{f:CFibonacciSim}\usebox\myboxC}
     483\caption{Fibonacci (output) asymmetric generator}
     484\label{f:FibonacciAsymmetricGenerator}
    672485
    673486\bigskip
    674487
    675 \newbox\myboxA
    676488\begin{lrbox}{\myboxA}
    677489\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    678 `coroutine` Fib { int fn; };
    679 void main( Fib & fib ) with( fib ) {
    680         int f1, f2;
    681         fn = 0;  f1 = fn;  `suspend()`;
    682         fn = 1;  f2 = f1;  f1 = fn;  `suspend()`;
    683         for ( ;; ) {
    684                 fn = f1 + f2;  f2 = f1;  f1 = fn;  `suspend()`;
     490`generator Fmt` {
     491        char ch;
     492        int g, b;
     493};
     494void ?{}( Fmt & fmt ) { `resume(fmt);` } // constructor
     495void ^?{}( Fmt & f ) with(f) { $\C[1.75in]{// destructor}$
     496        if ( g != 0 || b != 0 ) sout | nl; }
     497void `main( Fmt & f )` with(f) {
     498        for () { $\C{// until destructor call}$
     499                for ( ; g < 5; g += 1 ) { $\C{// groups}$
     500                        for ( ; b < 4; b += 1 ) { $\C{// blocks}$
     501                                `suspend;` $\C{// wait for character}$
     502                                while ( ch == '\n' ) `suspend;` // ignore
     503                                sout | ch;                                              // newline
     504                        } sout | " ";  // block spacer
     505                } sout | nl; // group newline
    685506        }
    686507}
    687 int next( Fib & fib ) with( fib ) {
    688         `resume( fib );`
    689         return fn;
    690 }
    691508int main() {
    692         Fib f1, f2;
    693         for ( int i = 1; i <= 10; i += 1 ) {
    694                 sout | next( f1 ) | next( f2 ) | endl;
     509        Fmt fmt; $\C{// fmt constructor called}$
     510        for () {
     511                sin | fmt.ch; $\C{// read into generator}$
     512          if ( eof( sin ) ) break;
     513                `resume( fmt );`
    695514        }
    696 }
     515
     516} $\C{// fmt destructor called}\CRT$
    697517\end{cfa}
    698518\end{lrbox}
    699 \newbox\myboxB
     519
    700520\begin{lrbox}{\myboxB}
    701521\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    702 `coroutine` Fib { int ret; };
    703 void main( Fib & f ) with( fib ) {
    704         int fn, f1 = 1, f2 = 0;
     522typedef struct {
     523        void * next;
     524        char ch;
     525        int g, b;
     526} Fmt;
     527void comain( Fmt * f ) {
     528        if ( f->next ) goto *f->next;
     529        f->next = &&s1;
    705530        for ( ;; ) {
    706                 ret = f2;
    707 
    708                 fn = f1 + f2;  f2 = f1;  f1 = fn; `suspend();`
     531                for ( f->g = 0; f->g < 5; f->g += 1 ) {
     532                        for ( f->b = 0; f->b < 4; f->b += 1 ) {
     533                                return;
     534                          s1:;  while ( f->ch == '\n' ) return;
     535                                printf( "%c", f->ch );
     536                        } printf( " " );
     537                } printf( "\n" );
    709538        }
    710539}
    711 int next( Fib & fib ) with( fib ) {
    712         `resume( fib );`
    713         return ret;
    714 }
    715 
    716 
    717 
    718 
    719 
    720 
     540int main() {
     541        Fmt fmt = { NULL };  comain( &fmt ); // prime
     542        for ( ;; ) {
     543                scanf( "%c", &fmt.ch );
     544          if ( feof( stdin ) ) break;
     545                comain( &fmt );
     546        }
     547        if ( fmt.g != 0 || fmt.b != 0 ) printf( "\n" );
     548}
    721549\end{cfa}
    722550\end{lrbox}
    723 \subfloat[3 States, internal variables]{\label{f:Coroutine3States}\usebox\myboxA}
    724 \qquad\qquad
    725 \subfloat[1 State, internal variables]{\label{f:Coroutine1State}\usebox\myboxB}
    726 \caption{\CFA Coroutine Fibonacci Implementations}
    727 \label{f:fibonacci-cfa}
     551
     552\subfloat[\CFA asymmetric generator]{\label{f:CFAFormatGen}\usebox\myboxA}
     553\hspace{3pt}
     554\vrule
     555\hspace{3pt}
     556\subfloat[C generator simulation]{\label{f:CFormatSim}\usebox\myboxB}
     557\hspace{3pt}
     558\caption{Formatter (input) asymmetric generator}
     559\label{f:FormatterAsymmetricGenerator}
    728560\end{figure}
    729561
    730 Using a coroutine, it is possible to express the Fibonacci formula directly without any of the C problems.
    731 Figure~\ref{f:Coroutine3States} creates a @coroutine@ type:
    732 \begin{cfa}
    733 `coroutine` Fib { int fn; };
    734 \end{cfa}
    735 which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface routines @next@.
    736 Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main.
    737 The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code has the three suspend points, representing the three states in the Fibonacci formula, to context switch back to the caller's resume.
    738 The interface routine @next@, takes a Fibonacci instance and context switches to it using @resume@;
    739 on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned.
    740 The first @resume@ is special because it cocalls the coroutine at its coroutine main and allocates the stack;
    741 when the coroutine main returns, its stack is deallocated.
    742 Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes.
    743 Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}.
    744 Coroutine generators are called \newterm{output coroutines} because values are only returned.
    745 
    746 Figure~\ref{f:CFAFmt} shows an \newterm{input coroutine}, @Format@, for restructuring text into groups of characters of fixed-size blocks.
    747 For example, the input of the left is reformatted into the output on the right.
    748 \begin{quote}
     562Stateful functions appear as generators, coroutines, and threads, where presentations are based on function objects or pointers~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.
     563For example, Python presents generators as a function object:
     564\begin{python}
     565def Gen():
     566        ... `yield val` ...
     567gen = Gen()
     568for i in range( 10 ):
     569        print( next( gen ) )
     570\end{python}
     571Boost presents coroutines in terms of four functor object-types:
     572\begin{cfa}
     573asymmetric_coroutine<>::pull_type
     574asymmetric_coroutine<>::push_type
     575symmetric_coroutine<>::call_type
     576symmetric_coroutine<>::yield_type
     577\end{cfa}
     578and many languages present threading using function pointers, @pthreads@~\cite{Butenhof97}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}, \eg pthreads:
     579\begin{cfa}
     580void * rtn( void * arg ) { ... }
     581int i = 3, rc;
     582pthread_t t; $\C{// thread id}$
     583`rc = pthread_create( &t, rtn, (void *)i );` $\C{// create and initialized task, type-unsafe input parameter}$
     584\end{cfa}
     585% void mycor( pthread_t cid, void * arg ) {
     586%       int * value = (int *)arg;                               $\C{// type unsafe, pointer-size only}$
     587%       // thread body
     588% }
     589% int main() {
     590%       int input = 0, output;
     591%       coroutine_t cid = coroutine_create( &mycor, (void *)&input ); $\C{// type unsafe, pointer-size only}$
     592%       coroutine_resume( cid, (void *)input, (void **)&output ); $\C{// type unsafe, pointer-size only}$
     593% }
     594\CFA's preferred presentation model for generators/coroutines/threads is a hybrid of objects and functions, with an object-oriented flavour.
     595Essentially, the generator/coroutine/thread function is semantically coupled with a generator/coroutine/thread custom type.
     596The custom type solves several issues, while accessing the underlying mechanisms used by the custom types is still allowed.
     597
     598
     599\subsection{Generator}
     600
     601Stackless generators have the potential to be very small and fast, \ie as small and fast as function call/return for both creation and execution.
     602The \CFA goal is to achieve this performance target, possibly at the cost of some semantic complexity.
     603A series of different kinds of generators and their implementation demonstrate how this goal is accomplished.
     604
     605Figure~\ref{f:FibonacciAsymmetricGenerator} shows an unbounded asymmetric generator for an infinite sequence of Fibonacci numbers written in C and \CFA, with a simple C implementation for the \CFA version.
     606This generator is an \emph{output generator}, producing a new result on each resumption.
     607To compute Fibonacci, the previous two values in the sequence are retained to generate the next value, \ie @fn1@ and @fn@, plus the execution location where control restarts when the generator is resumed, \ie top or middle.
     608An additional requirement is the ability to create an arbitrary number of generators (of any kind), \ie retaining one state in global variables is insufficient;
     609hence, state is retained in a closure between calls.
     610Figure~\ref{f:CFibonacci} shows the C approach of manually creating the closure in structure @Fib@, and multiple instances of this closure provide multiple Fibonacci generators.
     611The C version only has the middle execution state because the top execution state is declaration initialization.
     612Figure~\ref{f:CFAFibonacciGen} shows the \CFA approach, which also has a manual closure, but replaces the structure with a custom \CFA @generator@ type.
     613This generator type is then connected to a function that \emph{must be named \lstinline|main|},\footnote{
     614The name \lstinline|main| has special meaning in C, specifically the function where a program starts execution.
     615Hence, overloading this name for other starting points (generator/coroutine/thread) is a logical extension.}
     616called a \emph{generator main},which takes as its only parameter a reference to the generator type.
     617The generator main contains @suspend@ statements that suspend execution without ending the generator versus @return@.
     618For the Fibonacci generator-main,\footnote{
     619The \CFA \lstinline|with| opens an aggregate scope making its fields directly accessible, like Pascal \lstinline|with|, but using parallel semantics.
     620Multiple aggregates may be opened.}
     621the top initialization state appears at the start and the middle execution state is denoted by statement @suspend@.
     622Any local variables in @main@ \emph{are not retained} between calls;
     623hence local variables are only for temporary computations \emph{between} suspends.
     624All retained state \emph{must} appear in the generator's type.
     625As well, generator code containing a @suspend@ cannot be refactored into a helper function called by the generator, because @suspend@ is implemented via @return@, so a return from the helper function goes back to the current generator not the resumer.
     626The generator is started by calling function @resume@ with a generator instance, which begins execution at the top of the generator main, and subsequent @resume@ calls restart the generator at its point of last suspension.
     627Resuming an ended (returned) generator is undefined.
     628Function @resume@ returns its argument generator so it can be cascaded in an expression, in this case to print the next Fibonacci value @fn@ computed in the generator instance.
     629Figure~\ref{f:CFibonacciSim} shows the C implementation of the \CFA generator only needs one additional field, @next@, to handle retention of execution state.
     630The computed @goto@ at the start of the generator main, which branches after the previous suspend, adds very little cost to the resume call.
     631Finally, an explicit generator type provides both design and performance benefits, such as multiple type-safe interface functions taking and returning arbitrary types.\footnote{
     632The \CFA operator syntax uses \lstinline|?| to denote operands, which allows precise definitions for pre, post, and infix operators, \eg \lstinline|++?|, \lstinline|?++|, and \lstinline|?+?|, in addition \lstinline|?\{\}| denotes a constructor, as in \lstinline|foo `f` = `\{`...`\}`|, \lstinline|^?\{\}| denotes a destructor, and \lstinline|?()| is \CC function call \lstinline|operator()|.
     633}%
     634\begin{cfa}
     635int ?()( Fib & fib ) { return `resume( fib )`.fn; } $\C[3.9in]{// function-call interface}$
     636int ?()( Fib & fib, int N ) { for ( N - 1 ) `fib()`; return `fib()`; } $\C{// use function-call interface to skip N values}$
     637double ?()( Fib & fib ) { return (int)`fib()` / 3.14159; } $\C{// different return type, cast prevents recursive call}\CRT$
     638sout | (int)f1() | (double)f1() | f2( 2 ); // alternative interface, cast selects call based on return type, step 2 values
     639\end{cfa}
     640Now, the generator can be a separately compiled opaque-type only accessed through its interface functions.
     641For contrast, Figure~\ref{f:PythonFibonacci} shows the equivalent Python Fibonacci generator, which does not use a generator type, and hence only has a single interface, but an implicit closure.
     642
     643Having to manually create the generator closure by moving local-state variables into the generator type is an additional programmer burden.
     644(This restriction is removed by the coroutine in Section~\ref{s:Coroutine}.)
     645This requirement follows from the generality of variable-size local-state, \eg local state with a variable-length array requires dynamic allocation because the array size is unknown at compile time.
     646However, dynamic allocation significantly increases the cost of generator creation/destruction and is a showstopper for embedded real-time programming.
     647But more importantly, the size of the generator type is tied to the local state in the generator main, which precludes separate compilation of the generator main, \ie a generator must be inlined or local state must be dynamically allocated.
     648With respect to safety, we believe static analysis can discriminate local state from temporary variables in a generator, \ie variable usage spanning @suspend@, and generate a compile-time error.
     649Finally, our current experience is that most generator problems have simple data state, including local state, but complex execution state, so the burden of creating the generator type is small.
     650As well, C programmers are not afraid of this kind of semantic programming requirement, if it results in very small, fast generators.
     651
     652Figure~\ref{f:CFAFormatGen} shows an asymmetric \newterm{input generator}, @Fmt@, for restructuring text into groups of characters of fixed-size blocks, \ie the input on the left is reformatted into the output on the right, where newlines are ignored.
     653\begin{center}
    749654\tt
    750655\begin{tabular}{@{}l|l@{}}
    751656\multicolumn{1}{c|}{\textbf{\textrm{input}}} & \multicolumn{1}{c}{\textbf{\textrm{output}}} \\
    752 abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
     657\begin{tabular}[t]{@{}ll@{}}
     658abcdefghijklmnopqrstuvwxyz \\
     659abcdefghijklmnopqrstuvwxyz
     660\end{tabular}
    753661&
    754662\begin{tabular}[t]{@{}lllll@{}}
     
    758666\end{tabular}
    759667\end{tabular}
    760 \end{quote}
    761 The example takes advantage of resuming a coroutine in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops.
    762 The destruction provides a newline if formatted text ends with a full line.
    763 Figure~\ref{f:CFmt} shows the C equivalent formatter, where the loops of the coroutine are flatten (linearized) and rechecked on each call because execution location is not retained between calls.
     668\end{center}
     669The example takes advantage of resuming a generator in the constructor to prime the loops so the first character sent for formatting appears inside the nested loops.
     670The destructor provides a newline, if formatted text ends with a full line.
     671Figure~\ref{f:CFormatSim} shows the C implementation of the \CFA input generator with one additional field and the computed @goto@.
     672For contrast, Figure~\ref{f:PythonFormatter} shows the equivalent Python format generator with the same properties as the Fibonacci generator.
     673
     674Figure~\ref{f:DeviceDriverGen} shows a \emph{killer} asymmetric generator, a device-driver, because device drivers caused 70\%-85\% of failures in Windows/Linux~\cite{Swift05}.
     675Device drives follow the pattern of simple data state but complex execution state, \ie finite state-machine (FSM) parsing a protocol.
     676For example, the following protocol:
     677\begin{center}
     678\ldots\, STX \ldots\, message \ldots\, ESC ETX \ldots\, message \ldots\, ETX 2-byte crc \ldots
     679\end{center}
     680is a network message beginning with the control character STX, ending with an ETX, and followed by a 2-byte cyclic-redundancy check.
     681Control characters may appear in a message if preceded by an ESC.
     682When a message byte arrives, it triggers an interrupt, and the operating system services the interrupt by calling the device driver with the byte read from a hardware register.
     683The device driver returns a status code of its current state, and when a complete message is obtained, the operating system knows the message is in the message buffer.
     684Hence, the device driver is an input/output generator.
     685
     686Note, the cost of creating and resuming the device-driver generator, @Driver@, is virtually identical to call/return, so performance in an operating-system kernel is excellent.
     687As well, the data state is small, where variables @byte@ and @msg@ are communication variables for passing in message bytes and returning the message, and variables @lnth@, @crc@, and @sum@ are local variable that must be retained between calls and are manually hoisted into the generator type.
     688% Manually, detecting and hoisting local-state variables is easy when the number is small.
     689In contrast, the execution state is large, with one @resume@ and seven @suspend@s.
     690Hence, the key benefits of the generator are correctness, safety, and maintenance because the execution states are transcribed directly into the programming language rather than using a table-driven approach.
     691Because FSMs can be complex and frequently occur in important domains, direct generator support is important in a system programming language.
    764692
    765693\begin{figure}
     
    767695\newbox\myboxA
    768696\begin{lrbox}{\myboxA}
     697\begin{python}[aboveskip=0pt,belowskip=0pt]
     698def Fib():
     699        fn1, fn = 0, 1
     700        while True:
     701                `yield fn1`
     702                fn1, fn = fn, fn1 + fn
     703f1 = Fib()
     704f2 = Fib()
     705for i in range( 10 ):
     706        print( next( f1 ), next( f2 ) )
     707
     708
     709
     710
     711
     712
     713\end{python}
     714\end{lrbox}
     715
     716\newbox\myboxB
     717\begin{lrbox}{\myboxB}
     718\begin{python}[aboveskip=0pt,belowskip=0pt]
     719def Fmt():
     720        try:
     721                while True:
     722                        for g in range( 5 ):
     723                                for b in range( 4 ):
     724                                        print( `(yield)`, end='' )
     725                                print( '  ', end='' )
     726                        print()
     727        except GeneratorExit:
     728                if g != 0 | b != 0:
     729                        print()
     730fmt = Fmt()
     731`next( fmt )`                    # prime, next prewritten
     732for i in range( 41 ):
     733        `fmt.send( 'a' );`      # send to yield
     734\end{python}
     735\end{lrbox}
     736\subfloat[Fibonacci]{\label{f:PythonFibonacci}\usebox\myboxA}
     737\hspace{3pt}
     738\vrule
     739\hspace{3pt}
     740\subfloat[Formatter]{\label{f:PythonFormatter}\usebox\myboxB}
     741\caption{Python generator}
     742\label{f:PythonGenerator}
     743
     744\bigskip
     745
     746\begin{tabular}{@{}l|l@{}}
    769747\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    770 `coroutine` Format {
    771         char ch;   // used for communication
    772         int g, b;  // global because used in destructor
     748enum Status { CONT, MSG, ESTX,
     749                                ELNTH, ECRC };
     750`generator` Driver {
     751        Status status;
     752        unsigned char byte, * msg; // communication
     753        unsigned int lnth, sum;      // local state
     754        unsigned short int crc;
    773755};
    774 void main( Format & fmt ) with( fmt ) {
    775         for ( ;; ) {
    776                 for ( g = 0; g < 5; g += 1 ) {      // group
    777                         for ( b = 0; b < 4; b += 1 ) { // block
    778                                 `suspend();`
    779                                 sout | ch;              // separator
     756void ?{}( Driver & d, char * m ) { d.msg = m; }
     757Status next( Driver & d, char b ) with( d ) {
     758        byte = b; `resume( d );` return status;
     759}
     760void main( Driver & d ) with( d ) {
     761        enum { STX = '\002', ESC = '\033',
     762                        ETX = '\003', MaxMsg = 64 };
     763  msg: for () { // parse message
     764                status = CONT;
     765                lnth = 0; sum = 0;
     766                while ( byte != STX ) `suspend;`
     767          emsg: for () {
     768                        `suspend;` // process byte
     769\end{cfa}
     770&
     771\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     772                        choose ( byte ) { // switch with implicit break
     773                          case STX:
     774                                status = ESTX; `suspend;` continue msg;
     775                          case ETX:
     776                                break emsg;
     777                          case ESC:
     778                                `suspend;`
    780779                        }
    781                         sout | "  ";               // separator
     780                        if ( lnth >= MaxMsg ) { // buffer full ?
     781                                status = ELNTH; `suspend;` continue msg; }
     782                        msg[lnth++] = byte;
     783                        sum += byte;
    782784                }
    783                 sout | endl;
     785                msg[lnth] = '\0'; // terminate string
     786                `suspend;`
     787                crc = byte << 8;
     788                `suspend;`
     789                status = (crc | byte) == sum ? MSG : ECRC;
     790                `suspend;`
    784791        }
    785792}
    786 void ?{}( Format & fmt ) { `resume( fmt );` }
    787 void ^?{}( Format & fmt ) with( fmt ) {
    788         if ( g != 0 || b != 0 ) sout | endl;
    789 }
    790 void format( Format & fmt ) {
    791         `resume( fmt );`
     793\end{cfa}
     794\end{tabular}
     795\caption{Device-driver generator for communication protocol}
     796\label{f:DeviceDriverGen}
     797\end{figure}
     798
     799Figure~\ref{f:CFAPingPongGen} shows a symmetric generator, where the generator resumes another generator, forming a resume/resume cycle.
     800(The trivial cycle is a generator resuming itself.)
     801This control flow is similar to recursion for functions but without stack growth.
     802The steps for symmetric control-flow are creating, executing, and terminating the cycle.
     803Constructing the cycle must deal with definition-before-use to close the cycle, \ie, the first generator must know about the last generator, which is not within scope.
     804(This issue occurs for any cyclic data structure.)
     805% The example creates all the generators and then assigns the partners that form the cycle.
     806% Alternatively, the constructor can assign the partners as they are declared, except the first, and the first-generator partner is set after the last generator declaration to close the cycle.
     807Once the cycle is formed, the program main resumes one of the generators, and the generators can then traverse an arbitrary cycle using @resume@ to activate partner generator(s).
     808Terminating the cycle is accomplished by @suspend@ or @return@, both of which go back to the stack frame that started the cycle (program main in the example).
     809The starting stack-frame is below the last active generator because the resume/resume cycle does not grow the stack.
     810Also, since local variables are not retained in the generator function, it does not contain any objects with destructors that must be called, so the  cost is the same as a function return.
     811Destructor cost occurs when the generator instance is deallocated, which is easily controlled by the programmer.
     812
     813Figure~\ref{f:CPingPongSim} shows the implementation of the symmetric generator, where the complexity is the @resume@, which needs an extension to the calling convention to perform a forward rather than backward jump.
     814This jump-starts at the top of the next generator main to re-execute the normal calling convention to make space on the stack for its local variables.
     815However, before the jump, the caller must reset its stack (and any registers) equivalent to a @return@, but subsequently jump forward.
     816This semantics is basically a tail-call optimization, which compilers already perform.
     817The example shows the assembly code to undo the generator's entry code before the direct jump.
     818This assembly code depends on what entry code is generated, specifically if there are local variables and the level of optimization.
     819To provide this new calling convention requires a mechanism built into the compiler, which is beyond the scope of \CFA at this time.
     820Nevertheless, it is possible to hand generate any symmetric generators for proof of concept and performance testing.
     821A compiler could also eliminate other artifacts in the generator simulation to further increase performance, \eg LLVM has various coroutine support~\cite{CoroutineTS}, and \CFA can leverage this support should it fork @clang@.
     822
     823\begin{figure}
     824\centering
     825\begin{lrbox}{\myboxA}
     826\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     827`generator PingPong` {
     828        const char * name;
     829        int N;
     830        int i;                          // local state
     831        PingPong & partner; // rebindable reference
     832};
     833
     834void `main( PingPong & pp )` with(pp) {
     835        for ( ; i < N; i += 1 ) {
     836                sout | name | i;
     837                `resume( partner );`
     838        }
    792839}
    793840int main() {
    794         Format fmt;
    795         eof: for ( ;; ) {
    796                 sin | fmt.ch;
    797           if ( eof( sin ) ) break eof;
    798                 format( fmt );
     841        enum { N = 5 };
     842        PingPong ping = {"ping",N,0}, pong = {"pong",N,0};
     843        &ping.partner = &pong;  &pong.partner = &ping;
     844        `resume( ping );`
     845}
     846\end{cfa}
     847\end{lrbox}
     848
     849\begin{lrbox}{\myboxB}
     850\begin{cfa}[escapechar={},aboveskip=0pt,belowskip=0pt]
     851typedef struct PingPong {
     852        const char * name;
     853        int N, i;
     854        struct PingPong * partner;
     855        void * next;
     856} PingPong;
     857#define PPCtor(name, N) {name,N,0,NULL,NULL}
     858void comain( PingPong * pp ) {
     859        if ( pp->next ) goto *pp->next;
     860        pp->next = &&cycle;
     861        for ( ; pp->i < pp->N; pp->i += 1 ) {
     862                printf( "%s %d\n", pp->name, pp->i );
     863                asm( "mov  %0,%%rdi" : "=m" (pp->partner) );
     864                asm( "mov  %rdi,%rax" );
     865                asm( "popq %rbx" );
     866                asm( "jmp  comain" );
     867          cycle: ;
     868        }
     869}
     870\end{cfa}
     871\end{lrbox}
     872
     873\subfloat[\CFA symmetric generator]{\label{f:CFAPingPongGen}\usebox\myboxA}
     874\hspace{3pt}
     875\vrule
     876\hspace{3pt}
     877\subfloat[C generator simulation]{\label{f:CPingPongSim}\usebox\myboxB}
     878\hspace{3pt}
     879\caption{Ping-Pong symmetric generator}
     880\label{f:PingPongSymmetricGenerator}
     881\end{figure}
     882
     883Finally, part of this generator work was inspired by the recent \CCtwenty generator proposal~\cite{C++20Coroutine19} (which they call coroutines).
     884Our work provides the same high-performance asymmetric generators as \CCtwenty, and extends their work with symmetric generators.
     885An additional \CCtwenty generator feature allows @suspend@ and @resume@ to be followed by a restricted compound statement that is executed after the current generator has reset its stack but before calling the next generator, specified with \CFA syntax:
     886\begin{cfa}
     887... suspend`{ ... }`;
     888... resume( C )`{ ... }` ...
     889\end{cfa}
     890Since the current generator's stack is released before calling the compound statement, the compound statement can only reference variables in the generator's type.
     891This feature is useful when a generator is used in a concurrent context to ensure it is stopped before releasing a lock in the compound statement, which might immediately allow another thread to resume the generator.
     892Hence, this mechanism provides a general and safe handoff of the generator among competing threads.
     893
     894
     895\subsection{Coroutine}
     896\label{s:Coroutine}
     897
     898Stackful coroutines extend generator semantics, \ie there is an implicit closure and @suspend@ may appear in a helper function called from the coroutine main.
     899A coroutine is specified by replacing @generator@ with @coroutine@ for the type.
     900Coroutine generality results in higher cost for creation, due to dynamic stack allocation, execution, due to context switching among stacks, and terminating, due to possible stack unwinding and dynamic stack deallocation.
     901A series of different kinds of coroutines and their implementations demonstrate how coroutines extend generators.
     902
     903First, the previous generator examples are converted to their coroutine counterparts, allowing local-state variables to be moved from the generator type into the coroutine main.
     904\begin{description}
     905\item[Fibonacci]
     906Move the declaration of @fn1@ to the start of coroutine main.
     907\begin{cfa}[xleftmargin=0pt]
     908void main( Fib & fib ) with(fib) {
     909        `int fn1;`
     910\end{cfa}
     911\item[Formatter]
     912Move the declaration of @g@ and @b@ to the for loops in the coroutine main.
     913\begin{cfa}[xleftmargin=0pt]
     914for ( `g`; 5 ) {
     915        for ( `b`; 4 ) {
     916\end{cfa}
     917\item[Device Driver]
     918Move the declaration of @lnth@ and @sum@ to their points of initialization.
     919\begin{cfa}[xleftmargin=0pt]
     920        status = CONT;
     921        `unsigned int lnth = 0, sum = 0;`
     922        ...
     923        `unsigned short int crc = byte << 8;`
     924\end{cfa}
     925\item[PingPong]
     926Move the declaration of @i@ to the for loop in the coroutine main.
     927\begin{cfa}[xleftmargin=0pt]
     928void main( PingPong & pp ) with(pp) {
     929        for ( `i`; N ) {
     930\end{cfa}
     931\end{description}
     932It is also possible to refactor code containing local-state and @suspend@ statements into a helper function, like the computation of the CRC for the device driver.
     933\begin{cfa}
     934unsigned int Crc() {
     935        `suspend;`
     936        unsigned short int crc = byte << 8;
     937        `suspend;`
     938        status = (crc | byte) == sum ? MSG : ECRC;
     939        return crc;
     940}
     941\end{cfa}
     942A call to this function is placed at the end of the driver's coroutine-main.
     943For complex finite-state machines, refactoring is part of normal program abstraction, especially when code is used in multiple places.
     944Again, this complexity is usually associated with execution state rather than data state.
     945
     946\begin{comment}
     947Figure~\ref{f:Coroutine3States} creates a @coroutine@ type, @`coroutine` Fib { int fn; }@, which provides communication, @fn@, for the \newterm{coroutine main}, @main@, which runs on the coroutine stack, and possibly multiple interface functions, \eg @next@.
     948Like the structure in Figure~\ref{f:ExternalState}, the coroutine type allows multiple instances, where instances of this type are passed to the (overloaded) coroutine main.
     949The coroutine main's stack holds the state for the next generation, @f1@ and @f2@, and the code represents the three states in the Fibonacci formula via the three suspend points, to context switch back to the caller's @resume@.
     950The interface function @next@, takes a Fibonacci instance and context switches to it using @resume@;
     951on restart, the Fibonacci field, @fn@, contains the next value in the sequence, which is returned.
     952The first @resume@ is special because it allocates the coroutine stack and cocalls its coroutine main on that stack;
     953when the coroutine main returns, its stack is deallocated.
     954Hence, @Fib@ is an object at creation, transitions to a coroutine on its first resume, and transitions back to an object when the coroutine main finishes.
     955Figure~\ref{f:Coroutine1State} shows the coroutine version of the C version in Figure~\ref{f:ExternalState}.
     956Coroutine generators are called \newterm{output coroutines} because values are only returned.
     957
     958\begin{figure}
     959\centering
     960\newbox\myboxA
     961% \begin{lrbox}{\myboxA}
     962% \begin{cfa}[aboveskip=0pt,belowskip=0pt]
     963% `int fn1, fn2, state = 1;`   // single global variables
     964% int fib() {
     965%       int fn;
     966%       `switch ( state )` {  // explicit execution state
     967%         case 1: fn = 0;  fn1 = fn;  state = 2;  break;
     968%         case 2: fn = 1;  fn2 = fn1;  fn1 = fn;  state = 3;  break;
     969%         case 3: fn = fn1 + fn2;  fn2 = fn1;  fn1 = fn;  break;
     970%       }
     971%       return fn;
     972% }
     973% int main() {
     974%
     975%       for ( int i = 0; i < 10; i += 1 ) {
     976%               printf( "%d\n", fib() );
     977%       }
     978% }
     979% \end{cfa}
     980% \end{lrbox}
     981\begin{lrbox}{\myboxA}
     982\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     983#define FibCtor { 0, 1 }
     984typedef struct { int fn1, fn; } Fib;
     985int fib( Fib * f ) {
     986
     987        int ret = f->fn1;
     988        f->fn1 = f->fn;
     989        f->fn = ret + f->fn;
     990        return ret;
     991}
     992
     993
     994
     995int main() {
     996        Fib f1 = FibCtor, f2 = FibCtor;
     997        for ( int i = 0; i < 10; i += 1 ) {
     998                printf( "%d %d\n",
     999                                fib( &f1 ), fib( &f2 ) );
    7991000        }
    8001001}
     
    8051006\begin{lrbox}{\myboxB}
    8061007\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    807 struct Format {
    808         char ch;
    809         int g, b;
    810 };
    811 void format( struct Format * fmt ) {
    812         if ( fmt->ch != -1 ) {      // not EOF ?
    813                 printf( "%c", fmt->ch );
    814                 fmt->b += 1;
    815                 if ( fmt->b == 4 ) {  // block
    816                         printf( "  " );      // separator
    817                         fmt->b = 0;
    818                         fmt->g += 1;
    819                 }
    820                 if ( fmt->g == 5 ) {  // group
    821                         printf( "\n" );     // separator
    822                         fmt->g = 0;
    823                 }
    824         } else {
    825                 if ( fmt->g != 0 || fmt->b != 0 ) printf( "\n" );
     1008`coroutine` Fib { int fn1; };
     1009void main( Fib & fib ) with( fib ) {
     1010        int fn;
     1011        [fn1, fn] = [0, 1];
     1012        for () {
     1013                `suspend;`
     1014                [fn1, fn] = [fn, fn1 + fn];
    8261015        }
    8271016}
     1017int ?()( Fib & fib ) with( fib ) {
     1018        return `resume( fib )`.fn1;
     1019}
    8281020int main() {
    829         struct Format fmt = { 0, 0, 0 };
    830         for ( ;; ) {
    831                 scanf( "%c", &fmt.ch );
    832           if ( feof( stdin ) ) break;
    833                 format( &fmt );
    834         }
    835         fmt.ch = -1;
    836         format( &fmt );
    837 }
     1021        Fib f1, f2;
     1022        for ( 10 ) {
     1023                sout | f1() | f2();
     1024}
     1025
     1026
    8381027\end{cfa}
    8391028\end{lrbox}
    840 \subfloat[\CFA Coroutine]{\label{f:CFAFmt}\usebox\myboxA}
     1029
     1030\newbox\myboxC
     1031\begin{lrbox}{\myboxC}
     1032\begin{python}[aboveskip=0pt,belowskip=0pt]
     1033
     1034def Fib():
     1035
     1036        fn1, fn = 0, 1
     1037        while True:
     1038                `yield fn1`
     1039                fn1, fn = fn, fn1 + fn
     1040
     1041
     1042// next prewritten
     1043
     1044
     1045f1 = Fib()
     1046f2 = Fib()
     1047for i in range( 10 ):
     1048        print( next( f1 ), next( f2 ) )
     1049
     1050
     1051
     1052\end{python}
     1053\end{lrbox}
     1054
     1055\subfloat[C]{\label{f:GlobalVariables}\usebox\myboxA}
     1056\hspace{3pt}
     1057\vrule
     1058\hspace{3pt}
     1059\subfloat[\CFA]{\label{f:ExternalState}\usebox\myboxB}
     1060\hspace{3pt}
     1061\vrule
     1062\hspace{3pt}
     1063\subfloat[Python]{\label{f:ExternalState}\usebox\myboxC}
     1064\caption{Fibonacci generator}
     1065\label{f:C-fibonacci}
     1066\end{figure}
     1067
     1068\bigskip
     1069
     1070\newbox\myboxA
     1071\begin{lrbox}{\myboxA}
     1072\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1073`coroutine` Fib { int fn; };
     1074void main( Fib & fib ) with( fib ) {
     1075        fn = 0;  int fn1 = fn; `suspend`;
     1076        fn = 1;  int fn2 = fn1;  fn1 = fn; `suspend`;
     1077        for () {
     1078                fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `suspend`; }
     1079}
     1080int next( Fib & fib ) with( fib ) { `resume( fib );` return fn; }
     1081int main() {
     1082        Fib f1, f2;
     1083        for ( 10 )
     1084                sout | next( f1 ) | next( f2 );
     1085}
     1086\end{cfa}
     1087\end{lrbox}
     1088\newbox\myboxB
     1089\begin{lrbox}{\myboxB}
     1090\begin{python}[aboveskip=0pt,belowskip=0pt]
     1091
     1092def Fibonacci():
     1093        fn = 0; fn1 = fn; `yield fn`  # suspend
     1094        fn = 1; fn2 = fn1; fn1 = fn; `yield fn`
     1095        while True:
     1096                fn = fn1 + fn2; fn2 = fn1; fn1 = fn; `yield fn`
     1097
     1098
     1099f1 = Fibonacci()
     1100f2 = Fibonacci()
     1101for i in range( 10 ):
     1102        print( `next( f1 )`, `next( f2 )` ) # resume
     1103
     1104\end{python}
     1105\end{lrbox}
     1106\subfloat[\CFA]{\label{f:Coroutine3States}\usebox\myboxA}
    8411107\qquad
    842 \subfloat[C Linearized]{\label{f:CFmt}\usebox\myboxB}
    843 \caption{Formatting text into lines of 5 blocks of 4 characters.}
    844 \label{f:fmt-line}
     1108\subfloat[Python]{\label{f:Coroutine1State}\usebox\myboxB}
     1109\caption{Fibonacci input coroutine, 3 states, internal variables}
     1110\label{f:cfa-fibonacci}
    8451111\end{figure}
    846 
    847 The previous examples are \newterm{asymmetric (semi) coroutine}s because one coroutine always calls a resuming routine for another coroutine, and the resumed coroutine always suspends back to its last resumer, similar to call/return for normal routines
    848 However, there is no stack growth because @resume@/@suspend@ context switch to existing stack-frames rather than create new ones.
    849 \newterm{Symmetric (full) coroutine}s have a coroutine call a resuming routine for another coroutine, which eventually forms a resuming-call cycle.
    850 (The trivial cycle is a coroutine resuming itself.)
    851 This control flow is similar to recursion for normal routines, but again there is no stack growth from the context switch.
     1112\end{comment}
    8521113
    8531114\begin{figure}
     
    8571118\begin{cfa}
    8581119`coroutine` Prod {
    859         Cons & c;
     1120        Cons & c;                       // communication
    8601121        int N, money, receipt;
    8611122};
    8621123void main( Prod & prod ) with( prod ) {
    8631124        // 1st resume starts here
    864         for ( int i = 0; i < N; i += 1 ) {
     1125        for ( i; N ) {
    8651126                int p1 = random( 100 ), p2 = random( 100 );
    866                 sout | p1 | " " | p2 | endl;
     1127                sout | p1 | " " | p2;
    8671128                int status = delivery( c, p1, p2 );
    868                 sout | " $" | money | endl | status | endl;
     1129                sout | " $" | money | nl | status;
    8691130                receipt += 1;
    8701131        }
    8711132        stop( c );
    872         sout | "prod stops" | endl;
     1133        sout | "prod stops";
    8731134}
    8741135int payment( Prod & prod, int money ) {
     
    8911152\begin{cfa}
    8921153`coroutine` Cons {
    893         Prod & p;
     1154        Prod & p;                       // communication
    8941155        int p1, p2, status;
    895         _Bool done;
     1156        bool done;
    8961157};
    8971158void ?{}( Cons & cons, Prod & p ) {
    898         &cons.p = &p;
     1159        &cons.p = &p; // reassignable reference
    8991160        cons.[status, done ] = [0, false];
    9001161}
    901 void ^?{}( Cons & cons ) {}
    9021162void main( Cons & cons ) with( cons ) {
    9031163        // 1st resume starts here
    9041164        int money = 1, receipt;
    9051165        for ( ; ! done; ) {
    906                 sout | p1 | " " | p2 | endl | " $" | money | endl;
     1166                sout | p1 | " " | p2 | nl | " $" | money;
    9071167                status += 1;
    9081168                receipt = payment( p, money );
    909                 sout | " #" | receipt | endl;
     1169                sout | " #" | receipt;
    9101170                money += 1;
    9111171        }
    912         sout | "cons stops" | endl;
     1172        sout | "cons stops";
    9131173}
    9141174int delivery( Cons & cons, int p1, int p2 ) {
     
    9211181        `resume( cons );`
    9221182}
     1183
    9231184\end{cfa}
    9241185\end{tabular}
    925 \caption{Producer / consumer: resume-resume cycle, bi-directional communication}
     1186\caption{Producer / consumer: resume-resume cycle, bidirectional communication}
    9261187\label{f:ProdCons}
    9271188\end{figure}
    9281189
    929 Figure~\ref{f:ProdCons} shows a producer/consumer symmetric-coroutine performing bi-directional communication.
    930 Since the solution involves a full-coroutining cycle, the program main creates one coroutine in isolation, passes this coroutine to its partner, and closes the cycle at the call to @start@.
    931 The @start@ routine communicates both the number of elements to be produced and the consumer into the producer's coroutine structure.
    932 Then the @resume@ to @prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it.
    933 @prod@'s coroutine main starts, creates local variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer.
     1190Figure~\ref{f:ProdCons} shows the ping-pong example in Figure~\ref{f:CFAPingPongGen} extended into a producer/consumer symmetric-coroutine performing bidirectional communication.
     1191This example is illustrative because both producer/consumer have two interface functions with @resume@s that suspend execution in these interface (helper) functions.
     1192The program main creates the producer coroutine, passes it to the consumer coroutine in its initialization, and closes the cycle at the call to @start@ along with the number of items to be produced.
     1193The first @resume@ of @prod@ creates @prod@'s stack with a frame for @prod@'s coroutine main at the top, and context switches to it.
     1194@prod@'s coroutine main starts, creates local-state variables that are retained between coroutine activations, and executes $N$ iterations, each generating two random values, calling the consumer to deliver the values, and printing the status returned from the consumer.
    9341195
    9351196The producer call to @delivery@ transfers values into the consumer's communication variables, resumes the consumer, and returns the consumer status.
    936 For the first resume, @cons@'s stack is initialized, creating local variables retained between subsequent activations of the coroutine.
    937 The consumer iterates until the @done@ flag is set, prints, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).
    938 The call from the consumer to the @payment@ introduces the cycle between producer and consumer.
     1197On the first resume, @cons@'s stack is created and initialized, holding local-state variables retained between subsequent activations of the coroutine.
     1198The consumer iterates until the @done@ flag is set, prints the values delivered by the producer, increments status, and calls back to the producer via @payment@, and on return from @payment@, prints the receipt from the producer and increments @money@ (inflation).
     1199The call from the consumer to @payment@ introduces the cycle between producer and consumer.
    9391200When @payment@ is called, the consumer copies values into the producer's communication variable and a resume is executed.
    940 The context switch restarts the producer at the point where it was last context switched, so it continues in @delivery@ after the resume.
    941 
     1201The context switch restarts the producer at the point where it last context switched, so it continues in @delivery@ after the resume.
    9421202@delivery@ returns the status value in @prod@'s coroutine main, where the status is printed.
    9431203The loop then repeats calling @delivery@, where each call resumes the consumer coroutine.
     
    9451205The consumer increments and returns the receipt to the call in @cons@'s coroutine main.
    9461206The loop then repeats calling @payment@, where each call resumes the producer coroutine.
    947 
    948 After iterating $N$ times, the producer calls @stop@.
    949 The @done@ flag is set to stop the consumer's execution and a resume is executed.
    950 The context switch restarts @cons@ in @payment@ and it returns with the last receipt.
    951 The consumer terminates its loops because @done@ is true, its @main@ terminates, so @cons@ transitions from a coroutine back to an object, and @prod@ reactivates after the resume in @stop@.
    952 @stop@ returns and @prod@'s coroutine main terminates.
    953 The program main restarts after the resume in @start@.
    954 @start@ returns and the program main terminates.
    955 
    956 
    957 \subsection{Coroutine Implementation}
    958 
    959 A significant implementation challenge for coroutines (and threads, see section \ref{threads}) is adding extra fields and executing code after/before the coroutine constructor/destructor and coroutine main to create/initialize/de-initialize/destroy extra fields and the stack.
    960 There are several solutions to this problem and the chosen option forced the \CFA coroutine design.
    961 
    962 Object-oriented inheritance provides extra fields and code in a restricted context, but it requires programmers to explicitly perform the inheritance:
    963 \begin{cfa}
    964 struct mycoroutine $\textbf{\textsf{inherits}}$ baseCoroutine { ... }
    965 \end{cfa}
    966 and the programming language (and possibly its tool set, \eg debugger) may need to understand @baseCoroutine@ because of the stack.
    967 Furthermore, the execution of constructs/destructors is in the wrong order for certain operations, \eg for threads;
    968 \eg, if the thread is implicitly started, it must start \emph{after} all constructors, because the thread relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived.
    969 
    970 An alternatively is composition:
    971 \begin{cfa}
    972 struct mycoroutine {
    973         ... // declarations
     1207Figure~\ref{f:ProdConsRuntimeStacks} shows the runtime stacks of the program main, and the coroutine mains for @prod@ and @cons@ during the cycling.
     1208
     1209\begin{figure}
     1210\begin{center}
     1211\input{FullProdConsStack.pstex_t}
     1212\end{center}
     1213\vspace*{-10pt}
     1214\caption{Producer / consumer runtime stacks}
     1215\label{f:ProdConsRuntimeStacks}
     1216
     1217\medskip
     1218
     1219\begin{center}
     1220\input{FullCoroutinePhases.pstex_t}
     1221\end{center}
     1222\vspace*{-10pt}
     1223\caption{Ping / Pong coroutine steps}
     1224\label{f:PingPongFullCoroutineSteps}
     1225\end{figure}
     1226
     1227Terminating a coroutine cycle is more complex than a generator cycle, because it requires context switching to the program main's \emph{stack} to shutdown the program, whereas generators started by the program main run on its stack.
     1228Furthermore, each deallocated coroutine must guarantee all destructors are run for object allocated in the coroutine type \emph{and} allocated on the coroutine's stack at the point of suspension, which can be arbitrarily deep.
     1229When a coroutine's main ends, its stack is already unwound so any stack allocated objects with destructors have been finalized.
     1230The na\"{i}ve semantics for coroutine-cycle termination is to context switch to the last resumer, like executing a @suspend@/@return@ in a generator.
     1231However, for coroutines, the last resumer is \emph{not} implicitly below the current stack frame, as for generators, because each coroutine's stack is independent.
     1232Unfortunately, it is impossible to determine statically if a coroutine is in a cycle and unrealistic to check dynamically (graph-cycle problem).
     1233Hence, a compromise solution is necessary that works for asymmetric (acyclic) and symmetric (cyclic) coroutines.
     1234
     1235Our solution is to context switch back to the first resumer (starter) once the coroutine ends.
     1236This semantics works well for the most common asymmetric and symmetric coroutine usage patterns.
     1237For asymmetric coroutines, it is common for the first resumer (starter) coroutine to be the only resumer.
     1238All previous generators converted to coroutines have this property.
     1239For symmetric coroutines, it is common for the cycle creator to persist for the lifetime of the cycle.
     1240Hence, the starter coroutine is remembered on the first resume and ending the coroutine resumes the starter.
     1241Figure~\ref{f:ProdConsRuntimeStacks} shows this semantic by the dashed lines from the end of the coroutine mains: @prod@ starts @cons@ so @cons@ resumes @prod@ at the end, and the program main starts @prod@ so @prod@ resumes the program main at the end.
     1242For other scenarios, it is always possible to devise a solution with additional programming effort, such as forcing the cycle forward (backward) to a safe point before starting termination.
     1243
     1244The producer/consumer example does not illustrate the full power of the starter semantics because @cons@ always ends first.
     1245Assume generator @PingPong@ is converted to a coroutine.
     1246Figure~\ref{f:PingPongFullCoroutineSteps} shows the creation, starter, and cyclic execution steps of the coroutine version.
     1247The program main creates (declares) coroutine instances @ping@ and @pong@.
     1248Next, program main resumes @ping@, making it @ping@'s starter, and @ping@'s main resumes @pong@'s main, making it @pong@'s starter.
     1249Execution forms a cycle when @pong@ resumes @ping@, and cycles $N$ times.
     1250By adjusting $N$ for either @ping@/@pong@, it is possible to have either one finish first, instead of @pong@ always ending first.
     1251If @pong@ ends first, it resumes its starter @ping@ in its coroutine main, then @ping@ ends and resumes its starter the program main in function @start@.
     1252If @ping@ ends first, it resumes its starter the program main in function @start@.
     1253Regardless of the cycle complexity, the starter stack always leads back to the program main, but the stack can be entered at an arbitrary point.
     1254Once back at the program main, coroutines @ping@ and @pong@ are deallocated.
     1255For generators, deallocation runs the destructors for all objects in the generator type.
     1256For coroutines, deallocation deals with objects in the coroutine type and must also run the destructors for any objects pending on the coroutine's stack for any unterminated coroutine.
     1257Hence, if a coroutine's destructor detects the coroutine is not ended, it implicitly raises a cancellation exception (uncatchable exception) at the coroutine and resumes it so the cancellation exception can propagate to the root of the coroutine's stack destroying all local variable on the stack.
     1258So the \CFA semantics for the generator and coroutine, ensure both can be safely deallocated at any time, regardless of their current state, like any other aggregate object.
     1259Explicitly raising normal exceptions at another coroutine can replace flag variables, like @stop@, \eg @prod@ raises a @stop@ exception at @cons@ after it finishes generating values and resumes @cons@, which catches the @stop@ exception to terminate its loop.
     1260
     1261Finally, there is an interesting effect for @suspend@ with symmetric coroutines.
     1262A coroutine must retain its last resumer to suspend back because the resumer is on a different stack.
     1263These reverse pointers allow @suspend@ to cycle \emph{backwards}, which may be useful in certain cases.
     1264However, there is an anomaly if a coroutine resumes itself, because it overwrites its last resumer with itself, losing the ability to resume the last external resumer.
     1265To prevent losing this information, a self-resume does not overwrite the last resumer.
     1266
     1267
     1268\subsection{Generator / Coroutine Implementation}
     1269
     1270A significant implementation challenge for generators/coroutines (and threads in Section~\ref{s:threads}) is adding extra fields to the custom types and related functions, \eg inserting code after/before the coroutine constructor/destructor and @main@ to create/initialize/de-initialize/destroy any extra fields, \eg stack.
     1271There are several solutions to these problem, which follow from the object-oriented flavour of adopting custom types.
     1272
     1273For object-oriented languages, inheritance is used to provide extra fields and code via explicit inheritance:
     1274\begin{cfa}[morekeywords={class,inherits}]
     1275class myCoroutine inherits baseCoroutine { ... }
     1276\end{cfa}
     1277% The problem is that the programming language and its tool chain, \eg debugger, @valgrind@, need to understand @baseCoroutine@ because it infers special property, so type @baseCoroutine@ becomes a de facto keyword and all types inheriting from it are implicitly custom types.
     1278The problem is that some special properties are not handled by existing language semantics, \eg the execution of constructors/destructors is in the wrong order to implicitly start threads because the thread must start \emph{after} all constructors as it relies on a completely initialized object, but the inherited constructor runs \emph{before} the derived.
     1279Alternatives, such as explicitly starting threads as in Java, are repetitive and forgetting to call start is a common source of errors.
     1280An alternative is composition:
     1281\begin{cfa}
     1282struct myCoroutine {
     1283        ... // declaration/communication variables
    9741284        baseCoroutine dummy; // composition, last declaration
    9751285}
    9761286\end{cfa}
    977 which also requires an explicit declaration that must be the last one to ensure correct initialization order.
     1287which also requires an explicit declaration that must be last to ensure correct initialization order.
    9781288However, there is nothing preventing wrong placement or multiple declarations.
    9791289
    980 For coroutines as for threads, many implementations are based on routine pointers or routine objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}.
    981 For example, Boost implements coroutines in terms of four functor object-types:
    982 \begin{cfa}
    983 asymmetric_coroutine<>::pull_type
    984 asymmetric_coroutine<>::push_type
    985 symmetric_coroutine<>::call_type
    986 symmetric_coroutine<>::yield_type
    987 \end{cfa}
    988 Similarly, the canonical threading paradigm is often based on routine pointers, \eg @pthread@~\cite{pthreads}, \Csharp~\cite{Csharp}, Go~\cite{Go}, and Scala~\cite{Scala}.
    989 However, the generic thread-handle (identifier) is limited (few operations), unless it is wrapped in a custom type.
    990 \begin{cfa}
    991 void mycor( coroutine_t cid, void * arg ) {
    992         int * value = (int *)arg;                               $\C{// type unsafe, pointer-size only}$
    993         // Coroutine body
    994 }
    995 int main() {
    996         int input = 0, output;
    997         coroutine_t cid = coroutine_create( &mycor, (void *)&input ); $\C{// type unsafe, pointer-size only}$
    998         coroutine_resume( cid, (void *)input, (void **)&output ); $\C{// type unsafe, pointer-size only}$
    999 }
    1000 \end{cfa}
    1001 Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda-based coroutines adds very little.
    1002 
    1003 Note, the type @coroutine_t@ must be an abstract handle to the coroutine, because the coroutine descriptor and its stack are non-copyable.
    1004 Copying the coroutine descriptor results in copies being out of date with the current state of the stack.
    1005 Correspondingly, copying the stack results is copies being out of date with coroutine descriptor, and pointers in the stack being out of date to data on the stack.
    1006 (There is no mechanism in C to find all stack-specific pointers and update them as part of a copy.)
    1007 
    1008 The selected approach is to use language support by introducing a new kind of aggregate (structure):
    1009 \begin{cfa}
    1010 coroutine Fibonacci {
    1011         int fn; // communication variables
    1012 };
    1013 \end{cfa}
    1014 The @coroutine@ keyword means the compiler (and tool set) can find and inject code where needed.
    1015 The downside of this approach is that it makes coroutine a special case in the language.
    1016 Users wanting to extend coroutines or build their own for various reasons can only do so in ways offered by the language.
    1017 Furthermore, implementing coroutines without language supports also displays the power of a programming language.
    1018 While this is ultimately the option used for idiomatic \CFA code, coroutines and threads can still be constructed without using the language support.
    1019 The reserved keyword eases use for the common cases.
    1020 
    1021 Part of the mechanism to generalize coroutines is using a \CFA trait, which defines a coroutine as anything satisfying the trait @is_coroutine@, and this trait is used to restrict coroutine-manipulation routines:
     1290\CFA custom types make any special properties explicit to the language and its tool chain, \eg the language code-generator knows where to inject code
     1291% and when it is unsafe to perform certain optimizations,
     1292and IDEs using simple parsing can find and manipulate types with special properties.
     1293The downside of this approach is that it makes custom types a special case in the language.
     1294Users wanting to extend custom types or build their own can only do so in ways offered by the language.
     1295Furthermore, implementing custom types without language support may display the power of a programming language.
     1296\CFA blends the two approaches, providing custom type for idiomatic \CFA code, while extending and building new custom types is still possible, similar to Java concurrency with builtin and library.
     1297
     1298Part of the mechanism to generalize custom types is the \CFA trait~\cite[\S~2.3]{Moss18}, \eg the definition for custom-type @coroutine@ is anything satisfying the trait @is_coroutine@, and this trait both enforces and restricts the coroutine-interface functions.
    10221299\begin{cfa}
    10231300trait is_coroutine( `dtype` T ) {
     
    10251302        coroutine_desc * get_coroutine( T & );
    10261303};
    1027 forall( `dtype` T | is_coroutine(T) ) void suspend( T & );
    1028 forall( `dtype` T | is_coroutine(T) ) void resume( T & );
    1029 \end{cfa}
    1030 The @dtype@ property of the trait ensures the coroutine descriptor is non-copyable, so all coroutines must be passed by reference (pointer).
    1031 The routine definitions ensures there is a statically-typed @main@ routine that is the starting point (first stack frame) of a coroutine, and a mechanism to get (read) the currently executing coroutine handle.
    1032 The @main@ routine has no return value or additional parameters because the coroutine type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values versus fixed ones.
    1033 The generic routines @suspend@ and @resume@ can be redefined, but any object passed to them is a coroutine since it must satisfy the @is_coroutine@ trait to compile.
    1034 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine, and possibly redefining @suspend@ and @resume@.
    1035 The \CFA keyword @coroutine@ implicitly implements the getter and forward declarations required for implementing the coroutine main:
     1304forall( `dtype` T | is_coroutine(T) ) void $suspend$( T & ), resume( T & );
     1305\end{cfa}
     1306Note, copying generators/coroutines/threads is not meaningful.
     1307For example, both the resumer and suspender descriptors can have bidirectional pointers;
     1308copying these coroutines does not update the internal pointers so behaviour of both copies would be difficult to understand.
     1309Furthermore, two coroutines cannot logically execute on the same stack.
     1310A deep coroutine copy, which copies the stack, is also meaningless in an unmanaged language (no garbage collection), like C, because the stack may contain pointers to object within it that require updating for the copy.
     1311The \CFA @dtype@ property provides no \emph{implicit} copying operations and the @is_coroutine@ trait provides no \emph{explicit} copying operations, so all coroutines must be passed by reference (pointer).
     1312The function definitions ensure there is a statically typed @main@ function that is the starting point (first stack frame) of a coroutine, and a mechanism to get (read) the coroutine descriptor from its handle.
     1313The @main@ function has no return value or additional parameters because the coroutine type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values versus fixed ones.
     1314The advantage of this approach is that users can easily create different types of coroutines, \eg changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ function, and possibly redefining \textsf{suspend} and @resume@.
     1315
     1316The \CFA custom-type @coroutine@ implicitly implements the getter and forward declarations for the coroutine main.
    10361317\begin{cquote}
    10371318\begin{tabular}{@{}ccc@{}}
     
    10691350\end{tabular}
    10701351\end{cquote}
    1071 The combination of these two approaches allows an easy and concise specification to coroutining (and concurrency) for normal users, while more advanced users have tighter control on memory layout and initialization.
    1072 
    1073 
    1074 \subsection{Thread Interface}
    1075 \label{threads}
    1076 
    1077 Both user and kernel threads are supported, where user threads provide concurrency and kernel threads provide parallelism.
    1078 Like coroutines and for the same design reasons, the selected approach for user threads is to use language support by introducing a new kind of aggregate (structure) and a \CFA trait:
     1352The combination of custom types and fundamental @trait@ description of these types allows a concise specification for programmers and tools, while more advanced programmers can have tighter control over memory layout and initialization.
     1353
     1354Figure~\ref{f:CoroutineMemoryLayout} shows different memory-layout options for a coroutine (where a task is similar).
     1355The coroutine handle is the @coroutine@ instance containing programmer specified type global/communication variables across interface functions.
     1356The coroutine descriptor contains all implicit declarations needed by the runtime, \eg @suspend@/@resume@, and can be part of the coroutine handle or separate.
     1357The coroutine stack can appear in a number of locations and be fixed or variable sized.
     1358Hence, the coroutine's stack could be a VLS\footnote{
     1359We are examining variable-sized structures (VLS), where fields can be variable-sized structures or arrays.
     1360Once allocated, a VLS is fixed sized.}
     1361on the allocating stack, provided the allocating stack is large enough.
     1362For a VLS stack allocation/deallocation is an inexpensive adjustment of the stack pointer, modulo any stack constructor costs (\eg initial frame setup).
     1363For heap stack allocation, allocation/deallocation is an expensive heap allocation (where the heap can be a shared resource), modulo any stack constructor costs.
     1364With heap stack allocation, it is also possible to use a split (segmented) stack calling convention, available with gcc and clang, so the stack is variable sized.
     1365Currently, \CFA supports stack/heap allocated descriptors but only fixed-sized heap allocated stacks.
     1366In \CFA debug-mode, the fixed-sized stack is terminated with a write-only page, which catches most stack overflows.
     1367Experience teaching concurrency with \uC~\cite{CS343} shows fixed-sized stacks are rarely an issue for students.
     1368Split-stack allocation is under development but requires recompilation of legacy code, which may be impossible.
     1369
     1370\begin{figure}
     1371\centering
     1372\input{corlayout.pstex_t}
     1373\caption{Coroutine memory layout}
     1374\label{f:CoroutineMemoryLayout}
     1375\end{figure}
     1376
     1377
     1378\section{Concurrency}
     1379\label{s:Concurrency}
     1380
     1381Concurrency is nondeterministic scheduling of independent sequential execution paths (threads), where each thread has its own stack.
     1382A single thread with multiple call stacks, \newterm{coroutining}~\cite{Conway63,Marlin80}, does \emph{not} imply concurrency~\cite[\S~2]{Buhr05a}.
     1383In coroutining, coroutines self-schedule the thread across stacks so execution is deterministic.
     1384(It is \emph{impossible} to generate a concurrency error when coroutining.)
     1385However, coroutines are a stepping stone towards concurrency.
     1386
     1387The transition to concurrency, even for a single thread with multiple stacks, occurs when coroutines context switch to a \newterm{scheduling coroutine}, introducing non-determinism from the coroutine perspective~\cite[\S~3,]{Buhr05a}.
     1388Therefore, a minimal concurrency system requires coroutines \emph{in conjunction with a nondeterministic scheduler}.
     1389The resulting execution system now follows a cooperative threading model~\cite{Adya02,libdill}, called \newterm{non-preemptive scheduling}.
     1390Adding \newterm{preemption} introduces non-cooperative scheduling, where context switching occurs randomly between any two instructions often based on a timer interrupt, called \newterm{preemptive scheduling}.
     1391While a scheduler introduces uncertain execution among explicit context switches, preemption introduces uncertainty by introducing implicit context switches.
     1392Uncertainty gives the illusion of parallelism on a single processor and provides a mechanism to access and increase performance on multiple processors.
     1393The reason is that the scheduler/runtime have complete knowledge about resources and how to best utilized them.
     1394However, the introduction of unrestricted nondeterminism results in the need for \newterm{mutual exclusion} and \newterm{synchronization}, which restrict nondeterminism for correctness;
     1395otherwise, it is impossible to write meaningful concurrent programs.
     1396Optimal concurrent performance is often obtained by having as much nondeterminism as mutual exclusion and synchronization correctness allow.
     1397
     1398A scheduler can either be a stackless or stackful.
     1399For stackless, the scheduler performs scheduling on the stack of the current coroutine and switches directly to the next coroutine, so there is one context switch.
     1400For stackful, the current coroutine switches to the scheduler, which performs scheduling, and it then switches to the next coroutine, so there are two context switches.
     1401The \CFA runtime uses a stackful scheduler for uniformity and security.
     1402
     1403
     1404\subsection{Thread}
     1405\label{s:threads}
     1406
     1407Threading needs the ability to start a thread and wait for its completion.
     1408A common API for this ability is @fork@ and @join@.
     1409\begin{cquote}
     1410\begin{tabular}{@{}lll@{}}
     1411\multicolumn{1}{c}{\textbf{Java}} & \multicolumn{1}{c}{\textbf{\Celeven}} & \multicolumn{1}{c}{\textbf{pthreads}} \\
     1412\begin{cfa}
     1413class MyTask extends Thread {...}
     1414mytask t = new MyTask(...);
     1415`t.start();` // start
     1416// concurrency
     1417`t.join();` // wait
     1418\end{cfa}
     1419&
     1420\begin{cfa}
     1421class MyTask { ... } // functor
     1422MyTask mytask;
     1423`thread t( mytask, ... );` // start
     1424// concurrency
     1425`t.join();` // wait
     1426\end{cfa}
     1427&
     1428\begin{cfa}
     1429void * rtn( void * arg ) {...}
     1430pthread_t t;  int i = 3;
     1431`pthread_create( &t, rtn, (void *)i );` // start
     1432// concurrency
     1433`pthread_join( t, NULL );` // wait
     1434\end{cfa}
     1435\end{tabular}
     1436\end{cquote}
     1437\CFA has a simpler approach using a custom @thread@ type and leveraging declaration semantics (allocation/deallocation), where threads implicitly @fork@ after construction and @join@ before destruction.
     1438\begin{cfa}
     1439thread MyTask {};
     1440void main( MyTask & this ) { ... }
     1441int main() {
     1442        MyTask team`[10]`; $\C[2.5in]{// allocate stack-based threads, implicit start after construction}$
     1443        // concurrency
     1444} $\C{// deallocate stack-based threads, implicit joins before destruction}$
     1445\end{cfa}
     1446This semantic ensures a thread is started and stopped exactly once, eliminating some programming error, and scales to multiple threads for basic (termination) synchronization.
     1447For block allocation to arbitrary depth, including recursion, threads are created/destroyed in a lattice structure (tree with top and bottom).
     1448Arbitrary topologies are possible using dynamic allocation, allowing threads to outlive their declaration scope, identical to normal dynamic allocation.
     1449\begin{cfa}
     1450MyTask * factory( int N ) { ... return `anew( N )`; } $\C{// allocate heap-based threads, implicit start after construction}$
     1451int main() {
     1452        MyTask * team = factory( 10 );
     1453        // concurrency
     1454        `delete( team );` $\C{// deallocate heap-based threads, implicit joins before destruction}\CRT$
     1455}
     1456\end{cfa}
     1457
     1458Figure~\ref{s:ConcurrentMatrixSummation} shows concurrently adding the rows of a matrix and then totalling the subtotals sequentially, after all the row threads have terminated.
     1459The program uses heap-based threads because each thread needs different constructor values.
     1460(Python provides a simple iteration mechanism to initialize array elements to different values allowing stack allocation.)
     1461The allocation/deallocation pattern appears unusual because allocated objects are immediately deallocated without any intervening code.
     1462However, for threads, the deletion provides implicit synchronization, which is the intervening code.
     1463% While the subtotals are added in linear order rather than completion order, which slightly inhibits concurrency, the computation is restricted by the critical-path thread (\ie the thread that takes the longest), and so any inhibited concurrency is very small as totalling the subtotals is trivial.
     1464
     1465\begin{figure}
     1466\begin{cfa}
     1467`thread` Adder { int * row, cols, & subtotal; } $\C{// communication variables}$
     1468void ?{}( Adder & adder, int row[], int cols, int & subtotal ) {
     1469        adder.[ row, cols, &subtotal ] = [ row, cols, &subtotal ];
     1470}
     1471void main( Adder & adder ) with( adder ) {
     1472        subtotal = 0;
     1473        for ( c; cols ) { subtotal += row[c]; }
     1474}
     1475int main() {
     1476        const int rows = 10, cols = 1000;
     1477        int matrix[rows][cols], subtotals[rows], total = 0;
     1478        // read matrix
     1479        Adder * adders[rows];
     1480        for ( r; rows; ) { $\C{// start threads to sum rows}$
     1481                adders[r] = `new( matrix[r], cols, &subtotals[r] );`
     1482        }
     1483        for ( r; rows ) { $\C{// wait for threads to finish}$
     1484                `delete( adders[r] );` $\C{// termination join}$
     1485                total += subtotals[r]; $\C{// total subtotal}$
     1486        }
     1487        sout | total;
     1488}
     1489\end{cfa}
     1490\caption{Concurrent matrix summation}
     1491\label{s:ConcurrentMatrixSummation}
     1492\end{figure}
     1493
     1494
     1495\subsection{Thread Implementation}
     1496
     1497Threads in \CFA are user level run by runtime kernel threads (see Section~\ref{s:CFARuntimeStructure}), where user threads provide concurrency and kernel threads provide parallelism.
     1498Like coroutines, and for the same design reasons, \CFA provides a custom @thread@ type and a @trait@ to enforce and restrict the task-interface functions.
    10791499\begin{cquote}
    10801500\begin{tabular}{@{}c@{\hspace{3\parindentlnth}}c@{}}
    10811501\begin{cfa}
    10821502thread myThread {
    1083         // communication variables
     1503        ... // declaration/communication variables
    10841504};
    10851505
     
    10891509\begin{cfa}
    10901510trait is_thread( `dtype` T ) {
    1091       void main( T & );
    1092       thread_desc * get_thread( T & );
    1093       void ^?{}( T & `mutex` );
     1511        void main( T & );
     1512        thread_desc * get_thread( T & );
     1513        void ^?{}( T & `mutex` );
    10941514};
    10951515\end{cfa}
    10961516\end{tabular}
    10971517\end{cquote}
    1098 (The qualifier @mutex@ for the destructor parameter is discussed in Section~\ref{s:Monitors}.)
    1099 Like a coroutine, the statically-typed @main@ routine is the starting point (first stack frame) of a user thread.
    1100 The difference is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates an instance of @main@;
    1101 whereas, a user thread receives its own thread from the runtime system, which starts in @main@ as some point after the thread constructor is run.\footnote{
    1102 The \lstinline@main@ routine is already a special routine in C (where the program begins), so it is a natural extension of the semantics to use overloading to declare mains for different coroutines/threads (the normal main being the main of the initial thread).}
    1103 No return value or additional parameters are necessary for this routine because the task type allows an arbitrary number of interface routines with corresponding arbitrary typed input/output values.
    1104 
    1105 \begin{comment} % put in appendix with coroutine version ???
    1106 As such the @main@ routine of a thread can be defined as
    1107 \begin{cfa}
    1108 thread foo {};
    1109 
    1110 void main(foo & this) {
    1111         sout | "Hello World!" | endl;
    1112 }
    1113 \end{cfa}
    1114 
    1115 In this example, threads of type @foo@ start execution in the @void main(foo &)@ routine, which prints @"Hello World!".@ While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity.
    1116 With the static semantics it is trivial to write a thread type that takes a routine pointer as a parameter and executes it on its stack asynchronously.
    1117 \begin{cfa}
    1118 typedef void (*voidRtn)(int);
    1119 
    1120 thread RtnRunner {
    1121         voidRtn func;
    1122         int arg;
    1123 };
    1124 
    1125 void ?{}(RtnRunner & this, voidRtn inRtn, int arg) {
    1126         this.func = inRtn;
    1127         this.arg  = arg;
    1128 }
    1129 
    1130 void main(RtnRunner & this) {
    1131         // thread starts here and runs the routine
    1132         this.func( this.arg );
    1133 }
    1134 
    1135 void hello(/*unused*/ int) {
    1136         sout | "Hello World!" | endl;
    1137 }
    1138 
    1139 int main() {
    1140         RtnRunner f = {hello, 42};
    1141         return 0?
    1142 }
    1143 \end{cfa}
    1144 A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{api}.
    1145 \end{comment}
    1146 
    1147 For user threads to be useful, it must be possible to start and stop the underlying thread, and wait for it to complete execution.
    1148 While using an API such as @fork@ and @join@ is relatively common, such an interface is awkward and unnecessary.
    1149 A simple approach is to use allocation/deallocation principles, and have threads implicitly @fork@ after construction and @join@ before destruction.
    1150 \begin{cfa}
    1151 thread World {};
    1152 void main( World & this ) {
    1153         sout | "World!" | endl;
    1154 }
    1155 int main() {
    1156         World w`[10]`;                                                  $\C{// implicit forks after creation}$
    1157         sout | "Hello " | endl;                                 $\C{// "Hello " and 10 "World!" printed concurrently}$
    1158 }                                                                                       $\C{// implicit joins before destruction}$
    1159 \end{cfa}
    1160 This semantics ensures a thread is started and stopped exactly once, eliminating some programming error, and scales to multiple threads for basic (termination) synchronization.
    1161 This tree-structure (lattice) create/delete from C block-structure is generalized by using dynamic allocation, so threads can outlive the scope in which they are created, much like dynamically allocating memory lets objects outlive the scope in which they are created.
    1162 \begin{cfa}
    1163 int main() {
    1164         MyThread * heapLived;
    1165         {
    1166                 MyThread blockLived;                            $\C{// fork block-based thread}$
    1167                 heapLived = `new`( MyThread );          $\C{// fork heap-based thread}$
    1168                 ...
    1169         }                                                                               $\C{// join block-based thread}$
    1170         ...
    1171         `delete`( heapLived );                                  $\C{// join heap-based thread}$
    1172 }
    1173 \end{cfa}
    1174 The heap-based approach allows arbitrary thread-creation topologies, with respect to fork/join-style concurrency.
    1175 
    1176 Figure~\ref{s:ConcurrentMatrixSummation} shows concurrently adding the rows of a matrix and then totalling the subtotals sequential, after all the row threads have terminated.
    1177 The program uses heap-based threads because each thread needs different constructor values.
    1178 (Python provides a simple iteration mechanism to initialize array elements to different values allowing stack allocation.)
    1179 The allocation/deallocation pattern appears unusual because allocated objects are immediately deleted without any intervening code.
    1180 However, for threads, the deletion provides implicit synchronization, which is the intervening code.
    1181 While the subtotals are added in linear order rather than completion order, which slight inhibits concurrency, the computation is restricted by the critical-path thread (\ie the thread that takes the longest), and so any inhibited concurrency is very small as totalling the subtotals is trivial.
    1182 
    1183 \begin{figure}
    1184 \begin{cfa}
    1185 thread Adder {
    1186     int * row, cols, & subtotal;                        $\C{// communication}$
    1187 };
    1188 void ?{}( Adder & adder, int row[], int cols, int & subtotal ) {
    1189     adder.[ row, cols, &subtotal ] = [ row, cols, &subtotal ];
    1190 }
    1191 void main( Adder & adder ) with( adder ) {
    1192     subtotal = 0;
    1193     for ( int c = 0; c < cols; c += 1 ) {
    1194                 subtotal += row[c];
    1195     }
    1196 }
    1197 int main() {
    1198     const int rows = 10, cols = 1000;
    1199     int matrix[rows][cols], subtotals[rows], total = 0;
    1200     // read matrix
    1201     Adder * adders[rows];
    1202     for ( int r = 0; r < rows; r += 1 ) {       $\C{// start threads to sum rows}$
    1203                 adders[r] = new( matrix[r], cols, &subtotals[r] );
    1204     }
    1205     for ( int r = 0; r < rows; r += 1 ) {       $\C{// wait for threads to finish}$
    1206                 delete( adders[r] );                            $\C{// termination join}$
    1207                 total += subtotals[r];                          $\C{// total subtotal}$
    1208     }
    1209     sout | total | endl;
    1210 }
    1211 \end{cfa}
    1212 \caption{Concurrent Matrix Summation}
    1213 \label{s:ConcurrentMatrixSummation}
    1214 \end{figure}
     1518Like coroutines, the @dtype@ property prevents \emph{implicit} copy operations and the @is_thread@ trait provides no \emph{explicit} copy operations, so threads must be passed by reference (pointer).
     1519Similarly, the function definitions ensure there is a statically typed @main@ function that is the thread starting point (first stack frame), a mechanism to get (read) the thread descriptor from its handle, and a special destructor to prevent deallocation while the thread is executing.
     1520(The qualifier @mutex@ for the destructor parameter is discussed in Section~\ref{s:Monitor}.)
     1521The difference between the coroutine and thread is that a coroutine borrows a thread from its caller, so the first thread resuming a coroutine creates the coroutine's stack and starts running the coroutine main on the stack;
     1522whereas, a thread is scheduling for execution in @main@ immediately after its constructor is run.
     1523No return value or additional parameters are necessary for this function because the @thread@ type allows an arbitrary number of interface functions with corresponding arbitrary typed input/output values.
    12151524
    12161525
    12171526\section{Mutual Exclusion / Synchronization}
    1218 
    1219 Uncontrolled non-deterministic execution is meaningless.
    1220 To reestablish meaningful execution requires mechanisms to reintroduce determinism (\ie restrict non-determinism), called mutual exclusion and synchronization, where mutual exclusion is an access-control mechanism on data shared by threads, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}.
    1221 Since many deterministic challenges appear with the use of mutable shared state, some languages/libraries disallow it, \eg Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, Akka~\cite{Akka} (Scala).
    1222 In these paradigms, interaction among concurrent objects is performed by stateless message-passing~\cite{Thoth,Harmony,V-Kernel} or other paradigms closely relate to networking concepts (\eg channels~\cite{CSP,Go}).
    1223 However, in call/return-based languages, these approaches force a clear distinction (\ie introduce a new programming paradigm) between regular and concurrent computation (\ie routine call versus message passing).
    1224 Hence, a programmer must learn and manipulate two sets of design patterns.
     1527\label{s:MutualExclusionSynchronization}
     1528
     1529Unrestricted nondeterminism is meaningless as there is no way to know when the result is completed without synchronization.
     1530To produce meaningful execution requires clawing back some determinism using mutual exclusion and synchronization, where mutual exclusion provides access control for threads using shared data, and synchronization is a timing relationship among threads~\cite[\S~4]{Buhr05a}.
     1531Some concurrent systems eliminate mutable shared-state by switching to stateless communication like message passing~\cite{Thoth,Harmony,V-Kernel,MPI} (Erlang, MPI), channels~\cite{CSP} (CSP,Go), actors~\cite{Akka} (Akka, Scala), or functional techniques (Haskell).
     1532However, these approaches introduce a new communication mechanism for concurrency different from the standard communication using function call/return.
     1533Hence, a programmer must learn and manipulate two sets of design/programming patterns.
    12251534While this distinction can be hidden away in library code, effective use of the library still has to take both paradigms into account.
    1226 In contrast, approaches based on statefull models more closely resemble the standard call/return programming-model, resulting in a single programming paradigm.
    1227 
    1228 At the lowest level, concurrent control is implemented by atomic operations, upon which different kinds of locks mechanism are constructed, \eg semaphores~\cite{Dijkstra68b}, barriers, and path expressions~\cite{Campbell74}.
     1535In contrast, approaches based on stateful models more closely resemble the standard call/return programming model, resulting in a single programming paradigm.
     1536
     1537At the lowest level, concurrent control is implemented by atomic operations, upon which different kinds of locking mechanisms are constructed, \eg semaphores~\cite{Dijkstra68b}, barriers, and path expressions~\cite{Campbell74}.
    12291538However, for productivity it is always desirable to use the highest-level construct that provides the necessary efficiency~\cite{Hochstein05}.
    12301539A newer approach for restricting non-determinism is transactional memory~\cite{Herlihy93}.
    1231 While this approach is pursued in hardware~\cite{Nakaike15} and system languages, like \CC~\cite{Cpp-Transactions}, the performance and feature set is still too restrictive to be the main concurrency paradigm for system languages, which is why it was rejected as the core paradigm for concurrency in \CFA.
     1540While this approach is pursued in hardware~\cite{Nakaike15} and system languages, like \CC~\cite{Cpp-Transactions}, the performance and feature set is still too restrictive to be the main concurrency paradigm for system languages, which is why it is rejected as the core paradigm for concurrency in \CFA.
    12321541
    12331542One of the most natural, elegant, and efficient mechanisms for mutual exclusion and synchronization for shared-memory systems is the \emph{monitor}.
    1234 First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, many concurrent programming-languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}.
     1543First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, many concurrent programming languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Modula~\cite{Modula-2}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, NeWS~\cite{NeWS}, Emerald~\cite{Emerald}, \uC~\cite{Buhr92a} and Java~\cite{Java}.
    12351544In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as mutex locks or semaphores to simulate monitors.
    1236 For these reasons, \CFA selected monitors as the core high-level concurrency-construct, upon which higher-level approaches can be easily constructed.
     1545For these reasons, \CFA selected monitors as the core high-level concurrency construct, upon which higher-level approaches can be easily constructed.
    12371546
    12381547
    12391548\subsection{Mutual Exclusion}
    12401549
    1241 A group of instructions manipulating a specific instance of shared data that must be performed atomically is called an (individual) \newterm{critical-section}~\cite{Dijkstra65}.
    1242 The generalization is called a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session may use the resource simultaneously, but different sessions may not use the resource simultaneously.
    1243 The readers/writer problem~\cite{Courtois71} is an instance of a group critical-section, where readers have the same session and all writers have a unique session.
    1244 \newterm{Mutual exclusion} enforces that the correct kind and number of threads are using a critical section.
     1550A group of instructions manipulating a specific instance of shared data that must be performed atomically is called a \newterm{critical section}~\cite{Dijkstra65}, which is enforced by \newterm{simple mutual-exclusion}.
     1551The generalization is called a \newterm{group critical-section}~\cite{Joung00}, where multiple tasks with the same session use the resource simultaneously and different sessions are segregated, which is enforced by \newterm{complex mutual-exclusion} providing the correct kind and number of threads using a group critical-section.
     1552The readers/writer problem~\cite{Courtois71} is an instance of a group critical-section, where readers share a session but writers have a unique session.
    12451553
    12461554However, many solutions exist for mutual exclusion, which vary in terms of performance, flexibility and ease of use.
    12471555Methods range from low-level locks, which are fast and flexible but require significant attention for correctness, to higher-level concurrency techniques, which sacrifice some performance to improve ease of use.
    1248 Ease of use comes by either guaranteeing some problems cannot occur (\eg deadlock free), or by offering a more explicit coupling between shared data and critical section.
    1249 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (\eg reading/writing) for numerical types.
     1556Ease of use comes by either guaranteeing some problems cannot occur, \eg deadlock free, or by offering a more explicit coupling between shared data and critical section.
     1557For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations, \eg reading/writing, for numerical types.
    12501558However, a significant challenge with locks is composability because it takes careful organization for multiple locks to be used while preventing deadlock.
    12511559Easing composability is another feature higher-level mutual-exclusion mechanisms can offer.
     
    12561564Synchronization enforces relative ordering of execution, and synchronization tools provide numerous mechanisms to establish these timing relationships.
    12571565Low-level synchronization primitives offer good performance and flexibility at the cost of ease of use;
    1258 higher-level mechanisms often simplify usage by adding better coupling between synchronization and data (\eg message passing), or offering a simpler solution to otherwise involved challenges, \eg barrier lock.
    1259 Often synchronization is used to order access to a critical section, \eg ensuring a reader thread is the next kind of thread to enter a critical section.
    1260 If a writer thread is scheduled for next access, but another reader thread acquires the critical section first, that reader has \newterm{barged}.
     1566higher-level mechanisms often simplify usage by adding better coupling between synchronization and data, \eg receive-specific versus receive-any thread in message passing or offering specialized solutions, \eg barrier lock.
     1567Often synchronization is used to order access to a critical section, \eg ensuring a waiting writer thread enters the critical section before a calling reader thread.
     1568If the calling reader is scheduled before the waiting writer, the reader has barged.
    12611569Barging can result in staleness/freshness problems, where a reader barges ahead of a writer and reads temporally stale data, or a writer barges ahead of another writer overwriting data with a fresh value preventing the previous value from ever being read (lost computation).
    1262 Preventing or detecting barging is an involved challenge with low-level locks, which can be made much easier by higher-level constructs.
    1263 This challenge is often split into two different approaches: barging avoidance and barging prevention.
    1264 Algorithms that allow a barger, but divert it until later using current synchronization state (flags), are avoiding the barger;
    1265 algorithms that preclude a barger from entering during synchronization in the critical section prevent barging completely.
    1266 Techniques like baton-pass locks~\cite{Andrews89} between threads instead of unconditionally releasing locks is an example of barging prevention.
    1267 
    1268 
    1269 \section{Monitors}
    1270 \label{s:Monitors}
    1271 
    1272 A \textbf{monitor} is a set of routines that ensure mutual exclusion when accessing shared state.
    1273 More precisely, a monitor is a programming technique that binds mutual exclusion to routine scope, as opposed to locks, where mutual-exclusion is defined by acquire/release calls, independent of lexical context (analogous to block and heap storage allocation).
    1274 The strong association with the call/return paradigm eases programmability, readability and maintainability, at a slight cost in flexibility and efficiency.
    1275 
    1276 Note, like coroutines/threads, both locks and monitors require an abstract handle to reference them, because at their core, both mechanisms are manipulating non-copyable shared state.
    1277 Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data.
    1278 Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity.
    1279 As for coroutines/tasks, a non-copyable (@dtype@) trait is used to capture this requirement, so all locks/monitors must be passed by reference (pointer).
     1570Preventing or detecting barging is an involved challenge with low-level locks, which is made easier through higher-level constructs.
     1571This challenge is often split into two different approaches: barging avoidance and prevention.
     1572Algorithms that unconditionally releasing a lock for competing threads to acquire use barging avoidance during synchronization to force a barging thread to wait;
     1573algorithms that conditionally hold locks during synchronization, \eg baton-passing~\cite{Andrews89}, prevent barging completely.
     1574
     1575
     1576\section{Monitor}
     1577\label{s:Monitor}
     1578
     1579A \textbf{monitor} is a set of functions that ensure mutual exclusion when accessing shared state.
     1580More precisely, a monitor is a programming technique that implicitly binds mutual exclusion to static function scope, as opposed to locks, where mutual-exclusion is defined by acquire/release calls, independent of lexical context (analogous to block and heap storage allocation).
     1581Restricting acquire/release points eases programming, comprehension, and maintenance, at a slight cost in flexibility and efficiency.
     1582\CFA uses a custom @monitor@ type and leverages declaration semantics (deallocation) to protect active or waiting threads in a monitor.
     1583
     1584The following is a \CFA monitor implementation of an atomic counter.
     1585\begin{cfa}[morekeywords=nomutex]
     1586`monitor` Aint { int cnt; }; $\C[4.25in]{// atomic integer counter}$
     1587int ++?( Aint & `mutex`$\(_{opt}\)$ this ) with( this ) { return ++cnt; } $\C{// increment}$
     1588int ?=?( Aint & `mutex`$\(_{opt}\)$ lhs, int rhs ) with( lhs ) { cnt = rhs; } $\C{// conversions with int}\CRT$
     1589int ?=?( int & lhs, Aint & `mutex`$\(_{opt}\)$ rhs ) with( rhs ) { lhs = cnt; }
     1590\end{cfa}
     1591% The @Aint@ constructor, @?{}@, uses the \lstinline[morekeywords=nomutex]@nomutex@ qualifier indicating mutual exclusion is unnecessary during construction because an object is inaccessible (private) until after it is initialized.
     1592% (While a constructor may publish its address into a global variable, doing so generates a race-condition.)
     1593The prefix increment operation, @++?@, is normally @mutex@, indicating mutual exclusion is necessary during function execution, to protect the incrementing from race conditions, unless there is an atomic increment instruction for the implementation type.
     1594The assignment operators provide bidirectional conversion between an atomic and normal integer without accessing field @cnt@;
     1595these operations only need @mutex@, if reading/writing the implementation type is not atomic.
     1596The atomic counter is used without any explicit mutual-exclusion and provides thread-safe semantics, which is similar to the \CC template @std::atomic@.
     1597\begin{cfa}
     1598int i = 0, j = 0, k = 5;
     1599Aint x = { 0 }, y = { 0 }, z = { 5 }; $\C{// no mutex required}$
     1600++x; ++y; ++z; $\C{// safe increment by multiple threads}$
     1601x = 2; y = i; z = k; $\C{// conversions}$
     1602i = x; j = y; k = z;
     1603\end{cfa}
     1604
     1605\CFA monitors have \newterm{multi-acquire} semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling other interface functions.
     1606\begin{cfa}
     1607monitor M { ... } m;
     1608void foo( M & mutex m ) { ... } $\C{// acquire mutual exclusion}$
     1609void bar( M & mutex m ) { $\C{// acquire mutual exclusion}$
     1610        ... `bar( m );` ... `foo( m );` ... $\C{// reacquire mutual exclusion}$
     1611}
     1612\end{cfa}
     1613\CFA monitors also ensure the monitor lock is released regardless of how an acquiring function ends (normal or exceptional), and returning a shared variable is safe via copying before the lock is released.
     1614Similar safety is offered by \emph{explicit} mechanisms like \CC RAII;
     1615monitor \emph{implicit} safety ensures no programmer usage errors.
     1616Furthermore, RAII mechanisms cannot handle complex synchronization within a monitor, where the monitor lock may not be released on function exit because it is passed to an unblocking thread;
     1617RAII is purely a mutual-exclusion mechanism (see Section~\ref{s:Scheduling}).
     1618
     1619
     1620\subsection{Monitor Implementation}
     1621
     1622For the same design reasons, \CFA provides a custom @monitor@ type and a @trait@ to enforce and restrict the monitor-interface functions.
     1623\begin{cquote}
     1624\begin{tabular}{@{}c@{\hspace{3\parindentlnth}}c@{}}
     1625\begin{cfa}
     1626monitor M {
     1627        ... // shared data
     1628};
     1629
     1630\end{cfa}
     1631&
    12801632\begin{cfa}
    12811633trait is_monitor( `dtype` T ) {
     
    12841636};
    12851637\end{cfa}
     1638\end{tabular}
     1639\end{cquote}
     1640The @dtype@ property prevents \emph{implicit} copy operations and the @is_monitor@ trait provides no \emph{explicit} copy operations, so monitors must be passed by reference (pointer).
     1641% Copying a lock is insecure because it is possible to copy an open lock and then use the open copy when the original lock is closed to simultaneously access the shared data.
     1642% Copying a monitor is secure because both the lock and shared data are copies, but copying the shared data is meaningless because it no longer represents a unique entity.
     1643Similarly, the function definitions ensures there is a mechanism to get (read) the monitor descriptor from its handle, and a special destructor to prevent deallocation if a thread using the shared data.
     1644The custom monitor type also inserts any locks needed to implement the mutual exclusion semantics.
    12861645
    12871646
     
    12891648\label{s:MutexAcquisition}
    12901649
    1291 While correctness implicitly implies a monitor's mutual exclusion is acquired and released, there are implementation options about when and where the locking/unlocking occurs.
     1650While the monitor lock provides mutual exclusion for shared data, there are implementation options for when and where the locking/unlocking occurs.
    12921651(Much of this discussion also applies to basic locks.)
    1293 For example, a monitor may need to be passed through multiple helper routines before it becomes necessary to acquire the monitor mutual-exclusion.
    1294 \begin{cfa}[morekeywords=nomutex]
    1295 monitor Aint { int cnt; };                                      $\C{// atomic integer counter}$
    1296 void ?{}( Aint & `nomutex` this ) with( this ) { cnt = 0; } $\C{// constructor}$
    1297 int ?=?( Aint & `mutex`$\(_{opt}\)$ lhs, int rhs ) with( lhs ) { cnt = rhs; } $\C{// conversions}$
    1298 void ?{}( int & this, Aint & `mutex`$\(_{opt}\)$ v ) { this = v.cnt; }
    1299 int ?=?( int & lhs, Aint & `mutex`$\(_{opt}\)$ rhs ) with( rhs ) { lhs = cnt; }
    1300 int ++?( Aint & `mutex`$\(_{opt}\)$ this ) with( this ) { return ++cnt; } $\C{// increment}$
    1301 \end{cfa}
    1302 The @Aint@ constructor, @?{}@, uses the \lstinline[morekeywords=nomutex]@nomutex@ qualifier indicating mutual exclusion is unnecessary during construction because an object is inaccessible (private) until after it is initialized.
    1303 (While a constructor may publish its address into a global variable, doing so generates a race-condition.)
    1304 The conversion operators for initializing and assigning with a normal integer only need @mutex@, if reading/writing the implementation type is not atomic.
    1305 Finally, the prefix increment operato, @++?@, is normally @mutex@ to protect the incrementing from race conditions, unless there is an atomic increment instruction for the implementation type.
    1306 
    1307 The atomic counter is used without any explicit mutual-exclusion and provides thread-safe semantics, which is similar to the \CC template @std::atomic@.
    1308 \begin{cfa}
    1309 Aint x, y, z;
    1310 ++x; ++y; ++z;                                                          $\C{// safe increment by multiple threads}$
    1311 x = 2; y = 2; z = 2;                                            $\C{// conversions}$
    1312 int i = x, j = y, k = z;
    1313 i = x; j = y; k = z;
    1314 \end{cfa}
    1315 
    1316 For maximum usability, monitors have \newterm{multi-acquire} semantics allowing a thread to acquire it multiple times without deadlock.
    1317 For example, atomically printing the contents of a binary tree:
    1318 \begin{cfa}
    1319 monitor Tree {
    1320         Tree * left, right;
    1321         // value
    1322 };
    1323 void print( Tree & mutex tree ) {                       $\C{// prefix traversal}$
    1324         // write value
    1325         print( tree->left );                                    $\C{// multiply acquire monitor lock on each recursion}$
    1326         print( tree->right );
    1327 }
    1328 \end{cfa}
    1329 
    1330 Mandatory monitor qualifiers have the benefit of being self-documented, but requiring both @mutex@ and \lstinline[morekeywords=nomutex]@nomutex@ for all monitor parameter is redundant.
    1331 Instead, one of qualifier semantics can be the default, and the other required.
    1332 For example, assume the safe @mutex@ option for a monitor parameter because assuming \lstinline[morekeywords=nomutex]@nomutex@ may cause subtle errors.
    1333 On the other hand, assuming \lstinline[morekeywords=nomutex]@nomutex@ is the \emph{normal} parameter behaviour, stating explicitly ``this parameter is not special''.
     1652For example, a monitor may be passed through multiple helper functions before it is necessary to acquire the monitor's mutual exclusion.
     1653
     1654The benefit of mandatory monitor qualifiers is self-documentation, but requiring both @mutex@ and \lstinline[morekeywords=nomutex]@nomutex@ for all monitor parameters is redundant.
     1655Instead, the semantics has one qualifier as the default and the other required.
     1656For example, make the safe @mutex@ qualifier the default because assuming \lstinline[morekeywords=nomutex]@nomutex@ may cause subtle errors.
     1657Alternatively, make the unsafe \lstinline[morekeywords=nomutex]@nomutex@ qualifier the default because it is the \emph{normal} parameter semantics while @mutex@ parameters are rare.
    13341658Providing a default qualifier implies knowing whether a parameter is a monitor.
    1335 Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred.
     1659Since \CFA relies heavily on traits as an abstraction mechanism, types can coincidentally match the monitor trait but not be a monitor, similar to inheritance where a shape and playing card can both be drawable.
    13361660For this reason, \CFA requires programmers to identify the kind of parameter with the @mutex@ keyword and uses no keyword to mean \lstinline[morekeywords=nomutex]@nomutex@.
    13371661
    13381662The next semantic decision is establishing which parameter \emph{types} may be qualified with @mutex@.
    1339 Given:
     1663The following has monitor parameter types that are composed of multiple objects.
    13401664\begin{cfa}
    13411665monitor M { ... }
    1342 int f1( M & mutex m );
    1343 int f2( M * mutex m );
    1344 int f3( M * mutex m[] );
    1345 int f4( stack( M * ) & mutex m );
    1346 \end{cfa}
    1347 the issue is that some of these parameter types are composed of multiple objects.
    1348 For @f1@, there is only a single parameter object.
    1349 Adding indirection in @f2@ still identifies a single object.
    1350 However, the matrix in @f3@ introduces multiple objects.
    1351 While shown shortly, multiple acquisition is possible;
    1352 however array lengths are often unknown in C.
    1353 This issue is exacerbated in @f4@, where the data structure must be safely traversed to acquire all of its elements.
    1354 
    1355 To make the issue tractable, \CFA only acquires one monitor per parameter with at most one level of indirection.
    1356 However, the C type-system has an ambiguity with respects to arrays.
    1357 Is the argument for @f2@ a single object or an array of objects?
    1358 If it is an array, only the first element of the array is acquired, which seems unsafe;
    1359 hence, @mutex@ is disallowed for array parameters.
    1360 \begin{cfa}
    1361 int f1( M & mutex m );                                          $\C{// allowed: recommended case}$
    1362 int f2( M * mutex m );                                          $\C{// disallowed: could be an array}$
    1363 int f3( M mutex m[$\,$] );                                      $\C{// disallowed: array length unknown}$
    1364 int f4( M ** mutex m );                                         $\C{// disallowed: could be an array}$
    1365 int f5( M * mutex m[$\,$] );                            $\C{// disallowed: array length unknown}$
    1366 \end{cfa}
    1367 % Note, not all array routines have distinct types: @f2@ and @f3@ have the same type, as do @f4@ and @f5@.
    1368 % However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic.
    1369 
    1370 For object-oriented monitors, calling a mutex member \emph{implicitly} acquires mutual exclusion of the receiver object, @`rec`.foo(...)@.
    1371 \CFA has no receiver, and hence, must use an explicit mechanism to specify which object has mutual exclusion acquired.
    1372 A positive consequence of this design decision is the ability to support multi-monitor routines.
    1373 \begin{cfa}
    1374 int f( M & mutex x, M & mutex y );              $\C{// multiple monitor parameter of any type}$
    1375 M m1, m2;
    1376 f( m1, m2 );
    1377 \end{cfa}
    1378 (While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.)
    1379 In practice, writing multi-locking routines that do not deadlocks is tricky.
    1380 Having language support for such a feature is therefore a significant asset for \CFA.
    1381 
    1382 The capability to acquire multiple locks before entering a critical section is called \newterm{bulk acquire}.
    1383 In previous example, \CFA guarantees the order of acquisition is consistent across calls to different routines using the same monitors as arguments.
    1384 This consistent ordering means acquiring multiple monitors is safe from deadlock.
    1385 However, users can force the acquiring order.
    1386 For example, notice the use of @mutex@/\lstinline[morekeywords=nomutex]@nomutex@ and how this affects the acquiring order:
    1387 \begin{cfa}
    1388 void foo( M & mutex m1, M & mutex m2 );         $\C{// acquire m1 and m2}$
     1666int f1( M & mutex m ); $\C{// single parameter object}$
     1667int f2( M * mutex m ); $\C{// single or multiple parameter object}$
     1668int f3( M * mutex m[$\,$] ); $\C{// multiple parameter object}$
     1669int f4( stack( M * ) & mutex m ); $\C{// multiple parameters object}$
     1670\end{cfa}
     1671Function @f1@ has a single parameter object, while @f2@'s indirection could be a single or multi-element array, where static array size is often unknown in C.
     1672Function @f3@ has a multiple object matrix, and @f4@ a multiple object data structure.
     1673While shown shortly, multiple object acquisition is possible, but the number of objects must be statically known.
     1674Therefore, \CFA only acquires one monitor per parameter with at most one level of indirection, excluding pointers as it is impossible to statically determine the size.
     1675
     1676For object-oriented monitors, \eg Java, calling a mutex member \emph{implicitly} acquires mutual exclusion of the receiver object, @`rec`.foo(...)@.
     1677\CFA has no receiver, and hence, the explicit @mutex@ qualifier is used to specify which objects acquire mutual exclusion.
     1678A positive consequence of this design decision is the ability to support multi-monitor functions,\footnote{
     1679While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.}
     1680called \newterm{bulk acquire}.
     1681\CFA guarantees acquisition order is consistent across calls to @mutex@ functions using the same monitors as arguments, so acquiring multiple monitors is safe from deadlock.
     1682Figure~\ref{f:BankTransfer} shows a trivial solution to the bank transfer problem~\cite{BankTransfer}, where two resources must be locked simultaneously, using \CFA monitors with implicit locking and \CC with explicit locking.
     1683A \CFA programmer only has to manage when to acquire mutual exclusion;
     1684a \CC programmer must select the correct lock and acquisition mechanism from a panoply of locking options.
     1685Making good choices for common cases in \CFA simplifies the programming experience and enhances safety.
     1686
     1687\begin{figure}
     1688\centering
     1689\begin{lrbox}{\myboxA}
     1690\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1691monitor BankAccount {
     1692
     1693        int balance;
     1694} b1 = { 0 }, b2 = { 0 };
     1695void deposit( BankAccount & `mutex` b,
     1696                        int deposit ) with(b) {
     1697        balance += deposit;
     1698}
     1699void transfer( BankAccount & `mutex` my,
     1700        BankAccount & `mutex` your, int me2you ) {
     1701
     1702        deposit( my, -me2you ); // debit
     1703        deposit( your, me2you ); // credit
     1704}
     1705`thread` Person { BankAccount & b1, & b2; };
     1706void main( Person & person ) with(person) {
     1707        for ( 10_000_000 ) {
     1708                if ( random() % 3 ) deposit( b1, 3 );
     1709                if ( random() % 3 ) transfer( b1, b2, 7 );
     1710        }
     1711}
     1712int main() {
     1713        `Person p1 = { b1, b2 }, p2 = { b2, b1 };`
     1714
     1715} // wait for threads to complete
     1716\end{cfa}
     1717\end{lrbox}
     1718
     1719\begin{lrbox}{\myboxB}
     1720\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1721struct BankAccount {
     1722        `recursive_mutex m;`
     1723        int balance = 0;
     1724} b1, b2;
     1725void deposit( BankAccount & b, int deposit ) {
     1726        `scoped_lock lock( b.m );`
     1727        b.balance += deposit;
     1728}
     1729void transfer( BankAccount & my,
     1730                        BankAccount & your, int me2you ) {
     1731        `scoped_lock lock( my.m, your.m );`
     1732        deposit( my, -me2you ); // debit
     1733        deposit( your, me2you ); // credit
     1734}
     1735
     1736void person( BankAccount & b1, BankAccount & b2 ) {
     1737        for ( int i = 0; i < 10$'$000$'$000; i += 1 ) {
     1738                if ( random() % 3 ) deposit( b1, 3 );
     1739                if ( random() % 3 ) transfer( b1, b2, 7 );
     1740        }
     1741}
     1742int main() {
     1743        `thread p1(person, ref(b1), ref(b2)), p2(person, ref(b2), ref(b1));`
     1744        `p1.join(); p2.join();`
     1745}
     1746\end{cfa}
     1747\end{lrbox}
     1748
     1749\subfloat[\CFA]{\label{f:CFABank}\usebox\myboxA}
     1750\hspace{3pt}
     1751\vrule
     1752\hspace{3pt}
     1753\subfloat[\CC]{\label{f:C++Bank}\usebox\myboxB}
     1754\hspace{3pt}
     1755\caption{Bank transfer problem}
     1756\label{f:BankTransfer}
     1757\end{figure}
     1758
     1759Users can still force the acquiring order by using @mutex@/\lstinline[morekeywords=nomutex]@nomutex@.
     1760\begin{cfa}
     1761void foo( M & mutex m1, M & mutex m2 ); $\C{// acquire m1 and m2}$
    13891762void bar( M & mutex m1, M & /* nomutex */ m2 ) { $\C{// acquire m1}$
    1390         ... foo( m1, m2 ); ...                                  $\C{// acquire m2}$
     1763        ... foo( m1, m2 ); ... $\C{// acquire m2}$
    13911764}
    13921765void baz( M & /* nomutex */ m1, M & mutex m2 ) { $\C{// acquire m2}$
    1393         ... foo( m1, m2 ); ...                                  $\C{// acquire m1}$
    1394 }
    1395 \end{cfa}
    1396 The multi-acquire semantics allows @bar@ or @baz@ to acquire a monitor lock and reacquire it in @foo@.
    1397 In the calls to @bar@ and @baz@, the monitors are acquired in opposite order.
    1398 
    1399 However, such use leads to lock acquiring order problems resulting in deadlock~\cite{Lister77}, where detecting it requires dynamically tracking of monitor calls, and dealing with it requires implement rollback semantics~\cite{Dice10}.
    1400 In \CFA, safety is guaranteed by using bulk acquire of all monitors to shared objects, whereas other monitor systems provide no aid.
    1401 While \CFA provides only a partial solution, the \CFA partial solution handles many useful cases.
    1402 \begin{cfa}
    1403 monitor Bank { ... };
    1404 void deposit( Bank & `mutex` b, int deposit );
    1405 void transfer( Bank & `mutex` mybank, Bank & `mutex` yourbank, int me2you) {
    1406         deposit( mybank, `-`me2you );                   $\C{// debit}$
    1407         deposit( yourbank, me2you );                    $\C{// credit}$
    1408 }
    1409 \end{cfa}
    1410 This example shows a trivial solution to the bank-account transfer problem~\cite{BankTransfer}.
    1411 Without multi- and bulk acquire, the solution to this problem requires careful engineering.
    1412 
    1413 
    1414 \subsection{\protect\lstinline|mutex| statement} \label{mutex-stmt}
    1415 
    1416 The monitor call-semantics associate all locking semantics to routines.
    1417 Like Java, \CFA offers an alternative @mutex@ statement to reduce refactoring and naming.
     1766        ... foo( m1, m2 ); ... $\C{// acquire m1}$
     1767}
     1768\end{cfa}
     1769The bulk-acquire semantics allow @bar@ or @baz@ to acquire a monitor lock and reacquire it in @foo@.
     1770The calls to @bar@ and @baz@ acquired the monitors in opposite order, possibly resulting in deadlock.
     1771However, this case is the simplest instance of the \emph{nested-monitor problem}~\cite{Lister77}, where monitors are acquired in sequence versus bulk.
     1772Detecting the nested-monitor problem requires dynamic tracking of monitor calls, and dealing with it requires rollback semantics~\cite{Dice10}.
     1773\CFA does not deal with this fundamental problem.
     1774
     1775Finally, like Java, \CFA offers an alternative @mutex@ statement to reduce refactoring and naming.
    14181776\begin{cquote}
    1419 \begin{tabular}{@{}c|@{\hspace{\parindentlnth}}c@{}}
    1420 routine call & @mutex@ statement \\
    1421 \begin{cfa}
    1422 monitor M {};
     1777\renewcommand{\arraystretch}{0.0}
     1778\begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}}
     1779\multicolumn{1}{c}{\textbf{\lstinline@mutex@ call}} & \multicolumn{1}{c}{\lstinline@mutex@ \textbf{statement}} \\
     1780\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1781monitor M { ... };
    14231782void foo( M & mutex m1, M & mutex m2 ) {
    14241783        // critical section
     
    14291788\end{cfa}
    14301789&
    1431 \begin{cfa}
     1790\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    14321791
    14331792void bar( M & m1, M & m2 ) {
     
    14421801
    14431802
    1444 \section{Internal Scheduling}
    1445 \label{s:InternalScheduling}
    1446 
    1447 While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed, \eg a bounded buffer, Figure~\ref{f:GenericBoundedBuffer}, may be full/empty so produce/consumer threads must block.
     1803\subsection{Scheduling}
     1804\label{s:Scheduling}
     1805
     1806% There are many aspects of scheduling in a concurrency system, all related to resource utilization by waiting threads, \ie which thread gets the resource next.
     1807% Different forms of scheduling include access to processors by threads (see Section~\ref{s:RuntimeStructureCluster}), another is access to a shared resource by a lock or monitor.
     1808This section discusses monitor scheduling for waiting threads eligible for entry, \ie which thread gets the shared resource next. (See Section~\ref{s:RuntimeStructureCluster} for scheduling threads on virtual processors.)
     1809While monitor mutual-exclusion provides safe access to shared data, the monitor data may indicate that a thread accessing it cannot proceed, \eg a bounded buffer may be full/empty so produce/consumer threads must block.
    14481810Leaving the monitor and trying again (busy waiting) is impractical for high-level programming.
    1449 Monitors eliminate busy waiting by providing internal synchronization to schedule threads needing access to the shared data, where the synchronization is blocking (threads are parked) versus spinning.
    1450 The synchronization is generally achieved with internal~\cite{Hoare74} or external~\cite[\S~2.9.2]{uC++} scheduling, where \newterm{scheduling} is defined as indicating which thread acquires the critical section next.
     1811Monitors eliminate busy waiting by providing synchronization to schedule threads needing access to the shared data, where threads block versus spinning.
     1812Synchronization is generally achieved with internal~\cite{Hoare74} or external~\cite[\S~2.9.2]{uC++} scheduling.
    14511813\newterm{Internal scheduling} is characterized by each thread entering the monitor and making an individual decision about proceeding or blocking, while \newterm{external scheduling} is characterized by an entering thread making a decision about proceeding for itself and on behalf of other threads attempting entry.
    1452 
    1453 Figure~\ref{f:BBInt} shows a \CFA bounded-buffer with internal scheduling, where producers/consumers enter the monitor, see the buffer is full/empty, and block on an appropriate condition lock, @full@/@empty@.
    1454 The @wait@ routine atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the routine's parameter list.
    1455 The appropriate condition lock is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer.
    1456 Signalling is unconditional, because signalling an empty condition lock does nothing.
    1457 Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means:
    1458 \begin{enumerate}
    1459 \item
    1460 The signalling thread returns immediately, and the signalled thread continues.
    1461 \item
    1462 The signalling thread continues and the signalled thread is marked for urgent unblocking at the next scheduling point (exit/wait).
    1463 \item
    1464 The signalling thread blocks but is marked for urgrent unblocking at the next scheduling point and the signalled thread continues.
    1465 \end{enumerate}
    1466 The first approach is too restrictive, as it precludes solving a reasonable class of problems (\eg dating service).
    1467 \CFA supports the next two semantics as both are useful.
    1468 Finally, while it is common to store a @condition@ as a field of the monitor, in \CFA, a @condition@ variable can be created/stored independently.
    1469 Furthermore, a condition variable is tied to a \emph{group} of monitors on first use (called \newterm{branding}), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors.
     1814Finally, \CFA monitors do not allow calling threads to barge ahead of signalled threads, which simplifies synchronization among threads in the monitor and increases correctness.
     1815If barging is allowed, synchronization between a signaller and signallee is difficult, often requiring additional flags and multiple unblock/block cycles.
     1816In fact, signals-as-hints is completely opposite from that proposed by Hoare in the seminal paper on monitors~\cite[p.~550]{Hoare74}.
     1817% \begin{cquote}
     1818% However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program.
     1819% It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74}
     1820% \end{cquote}
     1821Furthermore, \CFA concurrency has no spurious wakeup~\cite[\S~9]{Buhr05a}, which eliminates an implicit form of self barging.
     1822Hence, a \CFA @wait@ statement is not enclosed in a @while@ loop retesting a blocking predicate, which can cause thread starvation due to barging.
     1823
     1824Figure~\ref{f:MonitorScheduling} shows general internal/external scheduling (for the bounded-buffer example in Figure~\ref{f:InternalExternalScheduling}).
     1825External calling threads block on the calling queue, if the monitor is occupied, otherwise they enter in FIFO order.
     1826Internal threads block on condition queues via @wait@ and reenter from the condition in FIFO order.
     1827Alternatively, internal threads block on urgent from the @signal_block@ or @waitfor@, and reenter implicitly when the monitor becomes empty, \ie, the thread in the monitor exits or waits.
     1828
     1829There are three signalling mechanisms to unblock waiting threads to enter the monitor.
     1830Note, signalling cannot have the signaller and signalled thread in the monitor simultaneously because of the mutual exclusion, so either the signaller or signallee can proceed.
     1831For internal scheduling, threads are unblocked from condition queues using @signal@, where the signallee is moved to urgent and the signaller continues (solid line).
     1832Multiple signals move multiple signallees to urgent until the condition is empty.
     1833When the signaller exits or waits, a thread blocked on urgent is processed before calling threads to prevent barging.
     1834(Java conceptually moves the signalled thread to the calling queue, and hence, allows barging.)
     1835The alternative unblock is in the opposite order using @signal_block@, where the signaller is moved to urgent and the signallee continues (dashed line), and is implicitly unblocked from urgent when the signallee exits or waits.
     1836
     1837For external scheduling, the condition queues are not used;
     1838instead threads are unblocked directly from the calling queue using @waitfor@ based on function names requesting mutual exclusion.
     1839(The linear search through the calling queue to locate a particular call can be reduced to $O(1)$.)
     1840The @waitfor@ has the same semantics as @signal_block@, where the signalled thread executes before the signallee, which waits on urgent.
     1841Executing multiple @waitfor@s from different signalled functions causes the calling threads to move to urgent.
     1842External scheduling requires urgent to be a stack, because the signaller expects to execute immediately after the specified monitor call has exited or waited.
     1843Internal scheduling behaves the same for an urgent stack or queue, except for multiple signalling, where the threads unblock from urgent in reverse order from signalling.
     1844If the restart order is important, multiple signalling by a signal thread can be transformed into daisy-chain signalling among threads, where each thread signals the next thread.
     1845We tried both a stack for @waitfor@ and queue for signalling, but that resulted in complex semantics about which thread enters next.
     1846Hence, \CFA uses a single urgent stack to correctly handle @waitfor@ and adequately support both forms of signalling.
     1847
     1848\begin{figure}
     1849\centering
     1850% \subfloat[Scheduling Statements] {
     1851% \label{fig:SchedulingStatements}
     1852% {\resizebox{0.45\textwidth}{!}{\input{CondSigWait.pstex_t}}}
     1853\input{CondSigWait.pstex_t}
     1854% }% subfloat
     1855% \quad
     1856% \subfloat[Bulk acquire monitor] {
     1857% \label{fig:BulkMonitor}
     1858% {\resizebox{0.45\textwidth}{!}{\input{ext_monitor.pstex_t}}}
     1859% }% subfloat
     1860\caption{Monitor Scheduling}
     1861\label{f:MonitorScheduling}
     1862\end{figure}
     1863
     1864Figure~\ref{f:BBInt} shows a \CFA generic bounded-buffer with internal scheduling, where producers/consumers enter the monitor, detect the buffer is full/empty, and block on an appropriate condition variable, @full@/@empty@.
     1865The @wait@ function atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the function's parameter list.
     1866The appropriate condition variable is signalled to unblock an opposite kind of thread after an element is inserted/removed from the buffer.
     1867Signalling is unconditional, because signalling an empty condition variable does nothing.
     1868It is common to declare condition variables as monitor fields to prevent shared access, hence no locking is required for access as the conditions are protected by the monitor lock.
     1869In \CFA, a condition variable can be created/stored independently.
     1870% To still prevent expensive locking on access, a condition variable is tied to a \emph{group} of monitors on first use, called \newterm{branding}, resulting in a low-cost boolean test to detect sharing from other monitors.
     1871
     1872% Signalling semantics cannot have the signaller and signalled thread in the monitor simultaneously, which means:
     1873% \begin{enumerate}
     1874% \item
     1875% The signalling thread returns immediately and the signalled thread continues.
     1876% \item
     1877% The signalling thread continues and the signalled thread is marked for urgent unblocking at the next scheduling point (exit/wait).
     1878% \item
     1879% The signalling thread blocks but is marked for urgent unblocking at the next scheduling point and the signalled thread continues.
     1880% \end{enumerate}
     1881% The first approach is too restrictive, as it precludes solving a reasonable class of problems, \eg dating service (see Figure~\ref{f:DatingService}).
     1882% \CFA supports the next two semantics as both are useful.
    14701883
    14711884\begin{figure}
     
    14811894        };
    14821895        void ?{}( Buffer(T) & buffer ) with(buffer) {
    1483                 [front, back, count] = 0;
     1896                front = back = count = 0;
    14841897        }
    1485 
    14861898        void insert( Buffer(T) & mutex buffer, T elem )
    14871899                                with(buffer) {
     
    15001912\end{lrbox}
    15011913
     1914% \newbox\myboxB
     1915% \begin{lrbox}{\myboxB}
     1916% \begin{cfa}[aboveskip=0pt,belowskip=0pt]
     1917% forall( otype T ) { // distribute forall
     1918%       monitor Buffer {
     1919%
     1920%               int front, back, count;
     1921%               T elements[10];
     1922%       };
     1923%       void ?{}( Buffer(T) & buffer ) with(buffer) {
     1924%               [front, back, count] = 0;
     1925%       }
     1926%       T remove( Buffer(T) & mutex buffer ); // forward
     1927%       void insert( Buffer(T) & mutex buffer, T elem )
     1928%                               with(buffer) {
     1929%               if ( count == 10 ) `waitfor( remove, buffer )`;
     1930%               // insert elem into buffer
     1931%
     1932%       }
     1933%       T remove( Buffer(T) & mutex buffer ) with(buffer) {
     1934%               if ( count == 0 ) `waitfor( insert, buffer )`;
     1935%               // remove elem from buffer
     1936%
     1937%               return elem;
     1938%       }
     1939% }
     1940% \end{cfa}
     1941% \end{lrbox}
     1942
    15021943\newbox\myboxB
    15031944\begin{lrbox}{\myboxB}
    15041945\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    1505 forall( otype T ) { // distribute forall
    1506         monitor Buffer {
    1507 
    1508                 int front, back, count;
    1509                 T elements[10];
    1510         };
    1511         void ?{}( Buffer(T) & buffer ) with(buffer) {
    1512                 [front, back, count] = 0;
    1513         }
    1514         T remove( Buffer(T) & mutex buffer ); // forward
    1515         void insert( Buffer(T) & mutex buffer, T elem )
    1516                                 with(buffer) {
    1517                 if ( count == 10 ) `waitfor( remove, buffer )`;
    1518                 // insert elem into buffer
    1519 
    1520         }
    1521         T remove( Buffer(T) & mutex buffer ) with(buffer) {
    1522                 if ( count == 0 ) `waitfor( insert, buffer )`;
    1523                 // remove elem from buffer
    1524 
    1525                 return elem;
    1526         }
    1527 }
     1946monitor ReadersWriter {
     1947        int rcnt, wcnt; // readers/writer using resource
     1948};
     1949void ?{}( ReadersWriter & rw ) with(rw) {
     1950        rcnt = wcnt = 0;
     1951}
     1952void EndRead( ReadersWriter & mutex rw ) with(rw) {
     1953        rcnt -= 1;
     1954}
     1955void EndWrite( ReadersWriter & mutex rw ) with(rw) {
     1956        wcnt = 0;
     1957}
     1958void StartRead( ReadersWriter & mutex rw ) with(rw) {
     1959        if ( wcnt > 0 ) `waitfor( EndWrite, rw );`
     1960        rcnt += 1;
     1961}
     1962void StartWrite( ReadersWriter & mutex rw ) with(rw) {
     1963        if ( wcnt > 0 ) `waitfor( EndWrite, rw );`
     1964        else while ( rcnt > 0 ) `waitfor( EndRead, rw );`
     1965        wcnt = 1;
     1966}
     1967
    15281968\end{cfa}
    15291969\end{lrbox}
    15301970
    1531 \subfloat[Internal Scheduling]{\label{f:BBInt}\usebox\myboxA}
    1532 %\qquad
    1533 \subfloat[External Scheduling]{\label{f:BBExt}\usebox\myboxB}
    1534 \caption{Generic Bounded-Buffer}
    1535 \label{f:GenericBoundedBuffer}
     1971\subfloat[Generic bounded buffer, internal scheduling]{\label{f:BBInt}\usebox\myboxA}
     1972\hspace{3pt}
     1973\vrule
     1974\hspace{3pt}
     1975\subfloat[Readers / writer lock, external scheduling]{\label{f:RWExt}\usebox\myboxB}
     1976
     1977\caption{Internal / external scheduling}
     1978\label{f:InternalExternalScheduling}
    15361979\end{figure}
    15371980
    1538 Figure~\ref{f:BBExt} shows a \CFA bounded-buffer with external scheduling, where producers/consumers detecting a full/empty buffer block and prevent more producers/consumers from entering the monitor until the buffer has a free/empty slot.
    1539 External scheduling is controlled by the @waitfor@ statement, which atomically blocks the calling thread, releases the monitor lock, and restricts the routine calls that can next acquire mutual exclusion.
     1981Figure~\ref{f:BBInt} can be transformed into external scheduling by removing the condition variables and signals/waits, and adding the following lines at the locations of the current @wait@s in @insert@/@remove@, respectively.
     1982\begin{cfa}[aboveskip=2pt,belowskip=1pt]
     1983if ( count == 10 ) `waitfor( remove, buffer )`;       |      if ( count == 0 ) `waitfor( insert, buffer )`;
     1984\end{cfa}
     1985Here, the producers/consumers detects a full/\-empty buffer and prevents more producers/consumers from entering the monitor until there is a free/empty slot in the buffer.
     1986External scheduling is controlled by the @waitfor@ statement, which atomically blocks the calling thread, releases the monitor lock, and restricts the function calls that can next acquire mutual exclusion.
    15401987If the buffer is full, only calls to @remove@ can acquire the buffer, and if the buffer is empty, only calls to @insert@ can acquire the buffer.
    1541 Threads making calls to routines that are currently excluded block outside (externally) of the monitor on a calling queue, versus blocking on condition queues inside the monitor.
    1542 
    1543 Both internal and external scheduling extend to multiple monitors in a natural way.
    1544 \begin{cfa}
    1545 monitor M { `condition e`; ... };
    1546 void foo( M & mutex m1, M & mutex m2 ) {
    1547         ... wait( `e` ); ...                                    $\C{// wait( e, m1, m2 )}$
    1548         ... wait( `e, m1` ); ...
    1549         ... wait( `e, m2` ); ...
    1550 }
    1551 
    1552 void rtn$\(_1\)$( M & mutex m1, M & mutex m2 );
    1553 void rtn$\(_2\)$( M & mutex m1 );
    1554 void bar( M & mutex m1, M & mutex m2 ) {
    1555         ... waitfor( `rtn` ); ...                               $\C{// waitfor( rtn\(_1\), m1, m2 )}$
    1556         ... waitfor( `rtn, m1` ); ...                   $\C{// waitfor( rtn\(_2\), m1 )}$
    1557 }
    1558 \end{cfa}
    1559 For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex types in the parameter list, \ie @wait( e, m1, m2 )@.
    1560 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@.
    1561 Wait statically verifies the released monitors are the acquired mutex-parameters so unconditional release is safe.
    1562 Similarly, for @waitfor( rtn, ... )@, the default semantics is to atomically block the acceptor and release all acquired mutex types in the parameter list, \ie @waitfor( rtn, m1, m2 )@.
    1563 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn, m1 )@.
    1564 Waitfor statically verifies the released monitors are the same as the acquired mutex-parameters of the given routine or routine pointer.
    1565 To statically verify the released monitors match with the accepted routine's mutex parameters, the routine (pointer) prototype must be accessible.
    1566 
    1567 Given the ability to release a subset of acquired monitors can result in a \newterm{nested monitor}~\cite{Lister77} deadlock.
    1568 \begin{cfa}
    1569 void foo( M & mutex m1, M & mutex m2 ) {
    1570         ... wait( `e, m1` ); ...                                $\C{// release m1, keeping m2 acquired )}$
    1571 void baz( M & mutex m1, M & mutex m2 ) {        $\C{// must acquire m1 and m2 )}$
    1572         ... signal( `e` ); ...
    1573 \end{cfa}
    1574 The @wait@ only releases @m1@ so the signalling thread cannot acquire both @m1@ and @m2@ to  enter @baz@ to get to the @signal@.
    1575 While deadlock issues can occur with multiple/nesting acquisition, this issue results from the fact that locks, and by extension monitors, are not perfectly composable.
    1576 
    1577 Finally, an important aspect of monitor implementation is barging, \ie can calling threads barge ahead of signalled threads?
    1578 If barging is allowed, synchronization between a singller and signallee is difficult, often requiring multiple unblock/block cycles (looping around a wait rechecking if a condition is met).
    1579 \begin{quote}
    1580 However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program.
    1581 It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74}
    1582 \end{quote}
    1583 \CFA scheduling \emph{precludes} barging, which simplifies synchronization among threads in the monitor and increases correctness.
    1584 For example, there are no loops in either bounded buffer solution in Figure~\ref{f:GenericBoundedBuffer}.
    1585 Supporting barging prevention as well as extending internal scheduling to multiple monitors is the main source of complexity in the design and implementation of \CFA concurrency.
    1586 
    1587 
    1588 \subsection{Barging Prevention}
    1589 
    1590 Figure~\ref{f:BargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics.
    1591 The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors.
    1592 The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired.
    1593 When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting thread to prevent barging into the outer @mutex@ statement by another thread.
    1594 However, both the signalling and signalled threads still need monitor @m1@.
    1595 
    1596 \begin{figure}
    1597 \newbox\myboxA
    1598 \begin{lrbox}{\myboxA}
    1599 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
    1600 monitor M m1, m2;
    1601 condition c;
    1602 mutex( m1 ) {
    1603         ...
    1604         mutex( m1, m2 ) {
    1605                 ... `wait( c )`; // block and release m1, m2
    1606                 // m1, m2 acquired
    1607         } // $\LstCommentStyle{\color{red}release m2}$
    1608         // m1 acquired
    1609 } // release m1
    1610 \end{cfa}
    1611 \end{lrbox}
    1612 
    1613 \newbox\myboxB
    1614 \begin{lrbox}{\myboxB}
    1615 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
    1616 
    1617 
    1618 mutex( m1 ) {
    1619         ...
    1620         mutex( m1, m2 ) {
    1621                 ... `signal( c )`; ...
    1622                 // m1, m2 acquired
    1623         } // $\LstCommentStyle{\color{red}release m2}$
    1624         // m1 acquired
    1625 } // release m1
    1626 \end{cfa}
    1627 \end{lrbox}
    1628 
    1629 \newbox\myboxC
    1630 \begin{lrbox}{\myboxC}
    1631 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
    1632 
    1633 
    1634 mutex( m1 ) {
    1635         ... `wait( c )`; ...
    1636         // m1 acquired
    1637 } // $\LstCommentStyle{\color{red}release m1}$
    1638 
    1639 
    1640 
    1641 
    1642 \end{cfa}
    1643 \end{lrbox}
    1644 
    1645 \begin{cquote}
    1646 \subfloat[Waiting Thread]{\label{f:WaitingThread}\usebox\myboxA}
    1647 \hspace{2\parindentlnth}
    1648 \subfloat[Signalling Thread]{\label{f:SignallingThread}\usebox\myboxB}
    1649 \hspace{2\parindentlnth}
    1650 \subfloat[Other Waiting Thread]{\label{f:SignallingThread}\usebox\myboxC}
    1651 \end{cquote}
    1652 \caption{Barging Prevention}
    1653 \label{f:BargingPrevention}
    1654 \end{figure}
    1655 
    1656 The obvious solution to the problem of multi-monitor scheduling is to keep ownership of all locks until the last lock is ready to be transferred.
    1657 It can be argued that that moment is when the last lock is no longer needed, because this semantics fits most closely to the behaviour of single-monitor scheduling.
    1658 This solution has the main benefit of transferring ownership of groups of monitors, which simplifies the semantics from multiple objects to a single group of objects, effectively making the existing single-monitor semantic viable by simply changing monitors to monitor groups.
    1659 This solution releases the monitors once every monitor in a group can be released.
    1660 However, since some monitors are never released (\eg the monitor of a thread), this interpretation means a group might never be released.
    1661 A more interesting interpretation is to transfer the group until all its monitors are released, which means the group is not passed further and a thread can retain its locks.
    1662 
    1663 However, listing \ref{f:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors.
    1664 Figure~\ref{f:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable.
    1665 Because the third thread is signalled when secretly holding @B@, the goal  becomes unreachable.
    1666 Depending on the order of signals (listing \ref{f:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen:
    1667 
    1668 \begin{comment}
    1669 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor @A@ needs to be passed to thread $\beta$ when thread $\alpha$ is done with it.
    1670 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor @B@ needs to be retained and passed to thread $\alpha$ along with monitor @A@, which can be done directly or possibly using thread $\beta$ as an intermediate.
    1671 \\
    1672 
    1673 Note that ordering is not determined by a race condition but by whether signalled threads are enqueued in FIFO or FILO order.
    1674 However, regardless of the answer, users can move line \ref{line:signal-a} before line \ref{line:signal-ab} and get the reverse effect for listing \ref{f:dependency}.
    1675 
    1676 In both cases, the threads need to be able to distinguish, on a per monitor basis, which ones need to be released and which ones need to be transferred, which means knowing when to release a group becomes complex and inefficient (see next section) and therefore effectively precludes this approach.
    1677 
    1678 
    1679 \subsubsection{Dependency graphs}
    1680 
    1681 \begin{figure}
    1682 \begin{multicols}{3}
    1683 Thread $\alpha$
    1684 \begin{cfa}[numbers=left, firstnumber=1]
    1685 acquire A
    1686         acquire A & B
    1687                 wait A & B
    1688         release A & B
    1689 release A
    1690 \end{cfa}
    1691 \columnbreak
    1692 Thread $\gamma$
    1693 \begin{cfa}[numbers=left, firstnumber=6, escapechar=|]
    1694 acquire A
    1695         acquire A & B
    1696                 |\label{line:signal-ab}|signal A & B
    1697         |\label{line:release-ab}|release A & B
    1698         |\label{line:signal-a}|signal A
    1699 |\label{line:release-a}|release A
    1700 \end{cfa}
    1701 \columnbreak
    1702 Thread $\beta$
    1703 \begin{cfa}[numbers=left, firstnumber=12, escapechar=|]
    1704 acquire A
    1705         wait A
    1706 |\label{line:release-aa}|release A
    1707 \end{cfa}
    1708 \end{multicols}
    1709 \begin{cfa}[caption={Pseudo-code for the three thread example.},label={f:dependency}]
    1710 \end{cfa}
    1711 \begin{center}
    1712 \input{dependency}
    1713 \end{center}
    1714 \caption{Dependency graph of the statements in listing \ref{f:dependency}}
    1715 \label{fig:dependency}
    1716 \end{figure}
    1717 
    1718 In listing \ref{f:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion.
    1719 If ownership of both monitors is transferred to the waiter when the signaller releases @A & B@ and then the waiter transfers back ownership of @A@ back to the signaller when it releases it, then the problem is solved (@B@ is no longer in use at this point).
    1720 Dynamically finding the correct order is therefore the second possible solution.
    1721 The problem is effectively resolving a dependency graph of ownership requirements.
    1722 Here even the simplest of code snippets requires two transfers and has a super-linear complexity.
    1723 This complexity can be seen in listing \ref{f:explosion}, which is just a direct extension to three monitors, requires at least three ownership transfer and has multiple solutions.
    1724 Furthermore, the presence of multiple solutions for ownership transfer can cause deadlock problems if a specific solution is not consistently picked; In the same way that multiple lock acquiring order can cause deadlocks.
    1725 \begin{figure}
    1726 \begin{multicols}{2}
    1727 \begin{cfa}
    1728 acquire A
    1729         acquire B
    1730                 acquire C
    1731                         wait A & B & C
    1732                 release C
    1733         release B
    1734 release A
    1735 \end{cfa}
    1736 
    1737 \columnbreak
    1738 
    1739 \begin{cfa}
    1740 acquire A
    1741         acquire B
    1742                 acquire C
    1743                         signal A & B & C
    1744                 release C
    1745         release B
    1746 release A
    1747 \end{cfa}
    1748 \end{multicols}
    1749 \begin{cfa}[caption={Extension to three monitors of listing \ref{f:int-bulk-cfa}},label={f:explosion}]
    1750 \end{cfa}
    1751 \end{figure}
    1752 
    1753 Given the three threads example in listing \ref{f:dependency}, figure \ref{fig:dependency} shows the corresponding dependency graph that results, where every node is a statement of one of the three threads, and the arrows the dependency of that statement (\eg $\alpha1$ must happen before $\alpha2$).
    1754 The extra challenge is that this dependency graph is effectively post-mortem, but the runtime system needs to be able to build and solve these graphs as the dependencies unfold.
    1755 Resolving dependency graphs being a complex and expensive endeavour, this solution is not the preferred one.
    1756 
    1757 \subsubsection{Partial Signalling} \label{partial-sig}
    1758 \end{comment}
    1759 
    1760 Finally, the solution that is chosen for \CFA is to use partial signalling.
    1761 Again using listing \ref{f:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@.
    1762 Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread.
    1763 This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met.
    1764 This solution has a much simpler implementation than a dependency graph solving algorithms, which is why it was chosen.
    1765 Furthermore, after being fully implemented, this solution does not appear to have any significant downsides.
    1766 
    1767 Using partial signalling, listing \ref{f:dependency} can be solved easily:
    1768 \begin{itemize}
    1769         \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@.
    1770         \item When thread $\gamma$ reaches line \ref{line:release-a}  it transfers monitor @A@ to thread $\beta$  and wakes it up.
    1771         \item When thread $\beta$  reaches line \ref{line:release-aa} it transfers monitor @A@ to thread $\alpha$ and wakes it up.
    1772 \end{itemize}
    1773 
    1774 
    1775 \subsection{Signalling: Now or Later}
     1988Threads calling excluded functions block outside of (external to) the monitor on the calling queue, versus blocking on condition queues inside of (internal to) the monitor.
     1989Figure~\ref{f:RWExt} shows a readers/writer lock written using external scheduling, where a waiting reader detects a writer using the resource and restricts further calls until the writer exits by calling @EndWrite@.
     1990The writer does a similar action for each reader or writer using the resource.
     1991Note, no new calls to @StarRead@/@StartWrite@ may occur when waiting for the call to @EndRead@/@EndWrite@.
     1992External scheduling allows waiting for events from other threads while restricting unrelated events, that would otherwise have to wait on conditions in the monitor.
     1993The mechnaism can be done in terms of control flow, \eg Ada @accept@ or \uC @_Accept@, or in terms of data, \eg Go @select@ on channels.
     1994While both mechanisms have strengths and weaknesses, this project uses the control-flow mechanism to be consistent with other language features.
     1995% Two challenges specific to \CFA for external scheduling are loose object-definitions (see Section~\ref{s:LooseObjectDefinitions}) and multiple-monitor functions (see Section~\ref{s:Multi-MonitorScheduling}).
     1996
     1997Figure~\ref{f:DatingService} shows a dating service demonstrating non-blocking and blocking signalling.
     1998The dating service matches girl and boy threads with matching compatibility codes so they can exchange phone numbers.
     1999A thread blocks until an appropriate partner arrives.
     2000The complexity is exchanging phone numbers in the monitor because of the mutual-exclusion property.
     2001For signal scheduling, the @exchange@ condition is necessary to block the thread finding the match, while the matcher unblocks to take the opposite number, post its phone number, and unblock the partner.
     2002For signal-block scheduling, the implicit urgent-queue replaces the explict @exchange@-condition and @signal_block@ puts the finding thread on the urgent condition and unblocks the matcher.
     2003The dating service is an example of a monitor that cannot be written using external scheduling because it requires knowledge of calling parameters to make scheduling decisions, and parameters of waiting threads are unavailable;
     2004as well, an arriving thread may not find a partner and must wait, which requires a condition variable, and condition variables imply internal scheduling.
     2005Furthermore, barging corrupts the dating service during an exchange because a barger may also match and change the phone numbers, invalidating the previous exchange phone number.
     2006Putting loops around the @wait@s does not correct the problem;
     2007the simple solution must be restructured to account for barging.
    17762008
    17772009\begin{figure}
     
    17842016        int GirlPhNo, BoyPhNo;
    17852017        condition Girls[CCodes], Boys[CCodes];
    1786         condition exchange;
     2018        `condition exchange;`
    17872019};
    17882020int girl( DS & mutex ds, int phNo, int ccode ) {
     
    17902022                wait( Girls[ccode] );
    17912023                GirlPhNo = phNo;
    1792                 exchange.signal();
     2024                `signal( exchange );`
    17932025        } else {
    17942026                GirlPhNo = phNo;
    1795                 signal( Boys[ccode] );
    1796                 exchange.wait();
    1797         } // if
     2027                `signal( Boys[ccode] );`
     2028                `wait( exchange );`
     2029        }
    17982030        return BoyPhNo;
    17992031}
     
    18202052        } else {
    18212053                GirlPhNo = phNo; // make phone number available
    1822                 signal_block( Boys[ccode] ); // restart boy
     2054                `signal_block( Boys[ccode] );` // restart boy
    18232055
    18242056        } // if
     
    18342066\qquad
    18352067\subfloat[\lstinline@signal_block@]{\label{f:DatingSignalBlock}\usebox\myboxB}
    1836 \caption{Dating service. }
    1837 \label{f:Dating service}
     2068\caption{Dating service}
     2069\label{f:DatingService}
    18382070\end{figure}
    18392071
    1840 An important note is that, until now, signalling a monitor was a delayed operation.
    1841 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the @signal@ statement.
    1842 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the @signal_block@ routine.
    1843 
    1844 The example in table \ref{tbl:datingservice} highlights the difference in behaviour.
    1845 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed.
    1846 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example.
    1847 This feature removes the need for a second condition variables and simplifies programming.
    1848 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call.
    1849 
    1850 % ======================================================================
    1851 % ======================================================================
    1852 \section{External scheduling} \label{extsched}
    1853 % ======================================================================
    1854 % ======================================================================
    1855 An alternative to internal scheduling is external scheduling (see Table~\ref{tbl:sched}).
    1856 
    1857 \begin{comment}
    1858 \begin{table}
    1859 \begin{tabular}{|c|c|c|}
    1860 Internal Scheduling & External Scheduling & Go\\
    1861 \hline
    1862 \begin{uC++}[tabsize=3]
    1863 _Monitor Semaphore {
    1864         condition c;
    1865         bool inUse;
    1866 public:
    1867         void P() {
    1868                 if(inUse)
    1869                         wait(c);
    1870                 inUse = true;
    1871         }
    1872         void V() {
    1873                 inUse = false;
    1874                 signal(c);
    1875         }
    1876 }
    1877 \end{uC++}&\begin{uC++}[tabsize=3]
    1878 _Monitor Semaphore {
    1879 
    1880         bool inUse;
    1881 public:
    1882         void P() {
    1883                 if(inUse)
    1884                         _Accept(V);
    1885                 inUse = true;
    1886         }
    1887         void V() {
    1888                 inUse = false;
    1889 
    1890         }
    1891 }
    1892 \end{uC++}&\begin{Go}[tabsize=3]
    1893 type MySem struct {
    1894         inUse bool
    1895         c     chan bool
    1896 }
    1897 
    1898 // acquire
    1899 func (s MySem) P() {
    1900         if s.inUse {
    1901                 select {
    1902                 case <-s.c:
    1903                 }
    1904         }
    1905         s.inUse = true
    1906 }
    1907 
    1908 // release
    1909 func (s MySem) V() {
    1910         s.inUse = false
    1911 
    1912         // This actually deadlocks
    1913         // when single thread
    1914         s.c <- false
    1915 }
    1916 \end{Go}
     2072In summation, for internal scheduling, non-blocking signalling (as in the producer/consumer example) is used when the signaller is providing the cooperation for a waiting thread;
     2073the signaller enters the monitor and changes state, detects a waiting threads that can use the state, performs a non-blocking signal on the condition queue for the waiting thread, and exits the monitor to run concurrently.
     2074The waiter unblocks next from the urgent queue, uses/takes the state, and exits the monitor.
     2075Blocking signal is the reverse, where the waiter is providing the cooperation for the signalling thread;
     2076the signaller enters the monitor, detects a waiting thread providing the necessary state, performs a blocking signal to place it on the urgent queue and unblock the waiter.
     2077The waiter changes state and exits the monitor, and the signaller unblocks next from the urgent queue to use/take the state.
     2078
     2079Both internal and external scheduling extend to multiple monitors in a natural way.
     2080\begin{cquote}
     2081\begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}}
     2082\begin{cfa}
     2083monitor M { `condition e`; ... };
     2084void foo( M & mutex m1, M & mutex m2 ) {
     2085        ... wait( `e` ); ...   // wait( e, m1, m2 )
     2086        ... wait( `e, m1` ); ...
     2087        ... wait( `e, m2` ); ...
     2088}
     2089\end{cfa}
     2090&
     2091\begin{cfa}
     2092void rtn$\(_1\)$( M & mutex m1, M & mutex m2 );
     2093void rtn$\(_2\)$( M & mutex m1 );
     2094void bar( M & mutex m1, M & mutex m2 ) {
     2095        ... waitfor( `rtn` ); ...       // $\LstCommentStyle{waitfor( rtn\(_1\), m1, m2 )}$
     2096        ... waitfor( `rtn, m1` ); ... // $\LstCommentStyle{waitfor( rtn\(_2\), m1 )}$
     2097}
     2098\end{cfa}
    19172099\end{tabular}
    1918 \caption{Different forms of scheduling.}
    1919 \label{tbl:sched}
    1920 \end{table}
    1921 \end{comment}
    1922 
    1923 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency.
    1924 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring.
    1925 External scheduling can generally be done either in terms of control flow (\eg Ada with @accept@, \uC with @_Accept@) or in terms of data (\eg Go with channels).
    1926 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics.
    1927 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines.
    1928 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages.
    1929 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s.
    1930 
    1931 For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting.
    1932 On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor.
    1933 
    1934 % ======================================================================
    1935 % ======================================================================
    1936 \subsection{Loose Object Definitions}
    1937 % ======================================================================
    1938 % ======================================================================
    1939 In \uC, a monitor class declaration includes an exhaustive list of monitor operations.
    1940 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user:
    1941 
    1942 \begin{cfa}
    1943 monitor A {};
    1944 
    1945 void f(A & mutex a);
    1946 void g(A & mutex a) {
    1947         waitfor(f); // Obvious which f() to wait for
    1948 }
    1949 
    1950 void f(A & mutex a, int); // New different F added in scope
    1951 void h(A & mutex a) {
    1952         waitfor(f); // Less obvious which f() to wait for
    1953 }
    1954 \end{cfa}
    1955 
    1956 Furthermore, external scheduling is an example where implementation constraints become visible from the interface.
    1957 Here is the cfa-code for the entering phase of a monitor:
    1958 \begin{center}
    1959 \begin{tabular}{l}
    1960 \begin{cfa}
    1961         if monitor is free
    1962                 enter
    1963         elif already own the monitor
    1964                 continue
    1965         elif monitor accepts me
    1966                 enter
    1967         else
    1968                 block
    1969 \end{cfa}
    1970 \end{tabular}
    1971 \end{center}
    1972 For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions.
    1973 However, a fast check for @monitor accepts me@ is much harder to implement depending on the constraints put on the monitors.
    1974 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}.
     2100\end{cquote}
     2101For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex parameters, \ie @wait( e, m1, m2 )@.
     2102To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@.
     2103Wait cannot statically verifies the released monitors are the acquired mutex-parameters without disallowing separately compiled helper functions calling @wait@.
     2104While \CC supports bulk locking, @wait@ only accepts a single lock for a condition variable, so bulk locking with condition variables is asymmetric.
     2105Finally, a signaller,
     2106\begin{cfa}
     2107void baz( M & mutex m1, M & mutex m2 ) {
     2108        ... signal( e ); ...
     2109}
     2110\end{cfa}
     2111must have acquired at least the same locks as the waiting thread signalled from a condition queue to allow the locks to be passed, and hence, prevent barging.
     2112
     2113Similarly, for @waitfor( rtn )@, the default semantics is to atomically block the acceptor and release all acquired mutex parameters, \ie @waitfor( rtn, m1, m2 )@.
     2114To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn, m1 )@.
     2115@waitfor@ does statically verify the monitor types passed are the same as the acquired mutex-parameters of the given function or function pointer, hence the function (pointer) prototype must be accessible.
     2116% When an overloaded function appears in an @waitfor@ statement, calls to any function with that name are accepted.
     2117% The rationale is that members with the same name should perform a similar function, and therefore, all should be eligible to accept a call.
     2118Overloaded functions can be disambiguated using a cast
     2119\begin{cfa}
     2120void rtn( M & mutex m );
     2121`int` rtn( M & mutex m );
     2122waitfor( (`int` (*)( M & mutex ))rtn, m );
     2123\end{cfa}
     2124
     2125The ability to release a subset of acquired monitors can result in a \newterm{nested monitor}~\cite{Lister77} deadlock.
     2126\begin{cfa}
     2127void foo( M & mutex m1, M & mutex m2 ) {
     2128        ... wait( `e, m1` ); ...                                $\C{// release m1, keeping m2 acquired )}$
     2129void bar( M & mutex m1, M & mutex m2 ) {        $\C{// must acquire m1 and m2 )}$
     2130        ... signal( `e` ); ...
     2131\end{cfa}
     2132The @wait@ only releases @m1@ so the signalling thread cannot acquire @m1@ and @m2@ to enter @bar@ and @signal@ the condition.
     2133While deadlock can occur with multiple/nesting acquisition, this is a consequence of locks, and by extension monitors, not being perfectly composable.
     2134
     2135
     2136
     2137\subsection{\texorpdfstring{Extended \protect\lstinline@waitfor@}{Extended waitfor}}
     2138
     2139Figure~\ref{f:ExtendedWaitfor} shows the extended form of the @waitfor@ statement to conditionally accept one of a group of mutex functions, with an optional statement to be performed \emph{after} the mutex function finishes.
     2140For a @waitfor@ clause to be executed, its @when@ must be true and an outstanding call to its corresponding member(s) must exist.
     2141The \emph{conditional-expression} of a @when@ may call a function, but the function must not block or context switch.
     2142If there are multiple acceptable mutex calls, selection occurs top-to-bottom (prioritized) among the @waitfor@ clauses, whereas some programming languages with similar mechanisms accept nondeterministically for this case, \eg Go \lstinline[morekeywords=select]@select@.
     2143If some accept guards are true and there are no outstanding calls to these members, the acceptor is blocked until a call to one of these members is made.
     2144If there is a @timeout@ clause, it provides an upper bound on waiting.
     2145If all the accept guards are false, the statement does nothing, unless there is a terminating @else@ clause with a true guard, which is executed instead.
     2146Hence, the terminating @else@ clause allows a conditional attempt to accept a call without blocking.
     2147If both @timeout@ and @else@ clause are present, the @else@ must be conditional, or the @timeout@ is never triggered.
     2148There is also a traditional future wait queue (not shown) (\eg Microsoft (@WaitForMultipleObjects@)), to wait for a specified number of future elements in the queue.
    19752149
    19762150\begin{figure}
    19772151\centering
    1978 \subfloat[Classical Monitor] {
    1979 \label{fig:ClassicalMonitor}
    1980 {\resizebox{0.45\textwidth}{!}{\input{monitor}}}
    1981 }% subfloat
    1982 \qquad
    1983 \subfloat[bulk acquire Monitor] {
    1984 \label{fig:BulkMonitor}
    1985 {\resizebox{0.45\textwidth}{!}{\input{ext_monitor}}}
    1986 }% subfloat
    1987 \caption{External Scheduling Monitor}
     2152\begin{cfa}
     2153`when` ( $\emph{conditional-expression}$ )      $\C{// optional guard}$
     2154        waitfor( $\emph{mutex-member-name}$ ) $\emph{statement}$ $\C{// action after call}$
     2155`or` `when` ( $\emph{conditional-expression}$ ) $\C{// any number of functions}$
     2156        waitfor( $\emph{mutex-member-name}$ ) $\emph{statement}$
     2157`or`    ...
     2158`when` ( $\emph{conditional-expression}$ ) $\C{// optional guard}$
     2159        `timeout` $\emph{statement}$ $\C{// optional terminating timeout clause}$
     2160`when` ( $\emph{conditional-expression}$ ) $\C{// optional guard}$
     2161        `else`  $\emph{statement}$ $\C{// optional terminating clause}$
     2162\end{cfa}
     2163\caption{Extended \protect\lstinline@waitfor@}
     2164\label{f:ExtendedWaitfor}
    19882165\end{figure}
    19892166
    1990 There are other alternatives to these pictures, but in the case of the left picture, implementing a fast accept check is relatively easy.
    1991 Restricted to a fixed number of mutex members, N, the accept check reduces to updating a bitmask when the acceptor queue changes, a check that executes in a single instruction even with a fairly large number (\eg 128) of mutex members.
    1992 This approach requires a unique dense ordering of routines with an upper-bound and that ordering must be consistent across translation units.
    1993 For OO languages these constraints are common, since objects only offer adding member routines consistently across translation units via inheritance.
    1994 However, in \CFA users can extend objects with mutex routines that are only visible in certain translation unit.
    1995 This means that establishing a program-wide dense-ordering among mutex routines can only be done in the program linking phase, and still could have issues when using dynamically shared objects.
    1996 
    1997 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}.
    1998 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate.
    1999 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions.
    2000 Storing an array of accepted routine pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search.
    2001 Furthermore, supporting nested external scheduling (\eg listing \ref{f:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued.
     2167Note, a group of conditional @waitfor@ clauses is \emph{not} the same as a group of @if@ statements, \eg:
     2168\begin{cfa}
     2169if ( C1 ) waitfor( mem1 );                       when ( C1 ) waitfor( mem1 );
     2170else if ( C2 ) waitfor( mem2 );         or when ( C2 ) waitfor( mem2 );
     2171\end{cfa}
     2172The left example only accepts @mem1@ if @C1@ is true or only @mem2@ if @C2@ is true.
     2173The right example accepts either @mem1@ or @mem2@ if @C1@ and @C2@ are true.
     2174
     2175An interesting use of @waitfor@ is accepting the @mutex@ destructor to know when an object is deallocated, \eg assume the bounded buffer is restructred from a monitor to a thread with the following @main@.
     2176\begin{cfa}
     2177void main( Buffer(T) & buffer ) with(buffer) {
     2178        for () {
     2179                `waitfor( ^?{}, buffer )` break;
     2180                or when ( count != 20 ) waitfor( insert, buffer ) { ... }
     2181                or when ( count != 0 ) waitfor( remove, buffer ) { ... }
     2182        }
     2183        // clean up
     2184}
     2185\end{cfa}
     2186When the program main deallocates the buffer, it first calls the buffer's destructor, which is accepted, the destructor runs, and the buffer is deallocated.
     2187However, the buffer thread cannot continue after the destructor call because the object is gone;
     2188hence, clean up in @main@ cannot occur, which means destructors for local objects are not run.
     2189To make this useful capability work, the semantics for accepting the destructor is the same as @signal@, \ie the destructor call is placed on urgent and the acceptor continues execution, which ends the loop, cleans up, and the thread terminates.
     2190Then, the destructor caller unblocks from urgent to deallocate the object.
     2191Accepting the destructor is the idiomatic way in \CFA to terminate a thread performing direct communication.
     2192
     2193
     2194\subsection{Bulk Barging Prevention}
     2195
     2196Figure~\ref{f:BulkBargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics.
     2197The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors.
     2198The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired.
     2199When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting threads to prevent barging into the outer @mutex@ statement by another thread.
     2200However, both the signalling and waiting threads W1 and W2 need some subset of monitors @m1@ and @m2@.
     2201\begin{cquote}
     2202condition c: (order 1) W2(@m2@), W1(@m1@,@m2@)\ \ \ or\ \ \ (order 2) W1(@m1@,@m2@), W2(@m2@) \\
     2203S: acq. @m1@ $\rightarrow$ acq. @m1,m2@ $\rightarrow$ @signal(c)@ $\rightarrow$ rel. @m2@ $\rightarrow$ pass @m2@ unblock W2 (order 2) $\rightarrow$ rel. @m1@ $\rightarrow$ pass @m1,m2@ unblock W1 \\
     2204\hspace*{2.75in}$\rightarrow$ rel. @m1@ $\rightarrow$ pass @m1,m2@ unblock W1 (order 1)
     2205\end{cquote}
    20022206
    20032207\begin{figure}
    2004 \begin{cfa}[caption={Example of nested external scheduling},label={f:nest-ext}]
    2005 monitor M {};
    2006 void foo( M & mutex a ) {}
    2007 void bar( M & mutex b ) {
    2008         // Nested in the waitfor(bar, c) call
    2009         waitfor(foo, b);
    2010 }
    2011 void baz( M & mutex c ) {
    2012         waitfor(bar, c);
    2013 }
    2014 
    2015 \end{cfa}
     2208\newbox\myboxA
     2209\begin{lrbox}{\myboxA}
     2210\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2211monitor M m1, m2;
     2212condition c;
     2213mutex( m1 ) { // $\LstCommentStyle{\color{red}outer}$
     2214        ...
     2215        mutex( m1, m2 ) { // $\LstCommentStyle{\color{red}inner}$
     2216                ... `signal( c )`; ...
     2217                // m1, m2 still acquired
     2218        } // $\LstCommentStyle{\color{red}release m2}$
     2219        // m1 acquired
     2220} // release m1
     2221\end{cfa}
     2222\end{lrbox}
     2223
     2224\newbox\myboxB
     2225\begin{lrbox}{\myboxB}
     2226\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2227
     2228
     2229mutex( m1 ) {
     2230        ...
     2231        mutex( m1, m2 ) {
     2232                ... `wait( c )`; // release m1, m2
     2233                // m1, m2 reacquired
     2234        } // $\LstCommentStyle{\color{red}release m2}$
     2235        // m1 acquired
     2236} // release m1
     2237\end{cfa}
     2238\end{lrbox}
     2239
     2240\newbox\myboxC
     2241\begin{lrbox}{\myboxC}
     2242\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2243
     2244
     2245mutex( m2 ) {
     2246        ... `wait( c )`; // release m2
     2247        // m2 reacquired
     2248} // $\LstCommentStyle{\color{red}release m2}$
     2249
     2250
     2251
     2252
     2253\end{cfa}
     2254\end{lrbox}
     2255
     2256\begin{cquote}
     2257\subfloat[Signalling Thread (S)]{\label{f:SignallingThread}\usebox\myboxA}
     2258\hspace{3\parindentlnth}
     2259\subfloat[Waiting Thread (W1)]{\label{f:WaitingThread}\usebox\myboxB}
     2260\hspace{2\parindentlnth}
     2261\subfloat[Waiting Thread (W2)]{\label{f:OtherWaitingThread}\usebox\myboxC}
     2262\end{cquote}
     2263\caption{Bulk Barging Prevention}
     2264\label{f:BulkBargingPrevention}
    20162265\end{figure}
    20172266
    2018 Note that in the right picture, tasks need to always keep track of the monitors associated with mutex routines, and the routine mask needs to have both a routine pointer and a set of monitors, as is discussed in the next section.
    2019 These details are omitted from the picture for the sake of simplicity.
    2020 
    2021 At this point, a decision must be made between flexibility and performance.
    2022 Many design decisions in \CFA achieve both flexibility and performance, for example polymorphic routines add significant flexibility but inlining them means the optimizer can easily remove any runtime cost.
    2023 Here, however, the cost of flexibility cannot be trivially removed.
    2024 In the end, the most flexible approach has been chosen since it allows users to write programs that would otherwise be  hard to write.
    2025 This decision is based on the assumption that writing fast but inflexible locks is closer to a solved problem than writing locks that are as flexible as external scheduling in \CFA.
    2026 
    2027 % ======================================================================
    2028 % ======================================================================
     2267One scheduling solution is for the signaller S to keep ownership of all locks until the last lock is ready to be transferred, because this semantics fits most closely to the behaviour of single-monitor scheduling.
     2268However, this solution is inefficient if W2 waited first and can be immediate passed @m2@ when released, while S retains @m1@ until completion of the outer mutex statement.
     2269If W1 waited first, the signaller must retain @m1@ amd @m2@ until completion of the outer mutex statement and then pass both to W1.
     2270% Furthermore, there is an execution sequence where the signaller always finds waiter W2, and hence, waiter W1 starves.
     2271To support this efficient semantics (and prevent barging), the implementation maintains a list of monitors acquired for each blocked thread.
     2272When a signaller exits or waits in a monitor function/statement, the front waiter on urgent is unblocked if all its monitors are released.
     2273Implementing a fast subset check for the necessary released monitors is important.
     2274% The benefit is encapsulating complexity into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met.
     2275
     2276
     2277\subsection{Loose Object Definitions}
     2278\label{s:LooseObjectDefinitions}
     2279
     2280In an object-oriented programming language, a class includes an exhaustive list of operations.
     2281A new class can add members via static inheritance but the subclass still has an exhaustive list of operations.
     2282(Dynamic member adding, \eg JavaScript~\cite{JavaScript}, is not considered.)
     2283In the object-oriented scenario, the type and all its operators are always present at compilation (even separate compilation), so it is possible to number the operations in a bit mask and use an $O(1)$ compare with a similar bit mask created for the operations specified in a @waitfor@.
     2284
     2285However, in \CFA, monitor functions can be statically added/removed in translation units, making a fast subset check difficult.
     2286\begin{cfa}
     2287        monitor M { ... }; // common type, included in .h file
     2288translation unit 1
     2289        void `f`( M & mutex m );
     2290        void g( M & mutex m ) { waitfor( `f`, m ); }
     2291translation unit 2
     2292        void `f`( M & mutex m ); $\C{// replacing f and g for type M in this translation unit}$
     2293        void `g`( M & mutex m );
     2294        void h( M & mutex m ) { waitfor( `f`, m ) or waitfor( `g`, m ); } $\C{// extending type M in this translation unit}$
     2295\end{cfa}
     2296The @waitfor@ statements in each translation unit cannot form a unique bit-mask because the monitor type does not carry that information.
     2297Hence, function pointers are used to identify the functions listed in the @waitfor@ statement, stored in a variable-sized array.
     2298Then, the same implementation approach used for the urgent stack is used for the calling queue.
     2299Each caller has a list of monitors acquired, and the @waitfor@ statement performs a (usually short) linear search matching functions in the @waitfor@ list with called functions, and then verifying the associated mutex locks can be transfers.
     2300(A possible way to construct a dense mapping is at link or load-time.)
     2301
     2302
    20292303\subsection{Multi-Monitor Scheduling}
    2030 % ======================================================================
    2031 % ======================================================================
    2032 
    2033 External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax.
    2034 Even in the simplest possible case, some new semantics needs to be established:
    2035 \begin{cfa}
    2036 monitor M {};
    2037 
    2038 void f(M & mutex a);
    2039 
    2040 void g(M & mutex b, M & mutex c) {
    2041         waitfor(f); // two monitors M => unknown which to pass to f(M & mutex)
    2042 }
    2043 \end{cfa}
    2044 The obvious solution is to specify the correct monitor as follows:
    2045 
    2046 \begin{cfa}
    2047 monitor M {};
    2048 
    2049 void f(M & mutex a);
    2050 
    2051 void g(M & mutex a, M & mutex b) {
    2052         // wait for call to f with argument b
    2053         waitfor(f, b);
    2054 }
    2055 \end{cfa}
    2056 This syntax is unambiguous.
    2057 Both locks are acquired and kept by @g@.
    2058 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@).
    2059 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows.
    2060 
    2061 \begin{cfa}
    2062 monitor M {};
    2063 
    2064 void f(M & mutex a, M & mutex b);
    2065 
    2066 void g(M & mutex a, M & mutex b) {
    2067         // wait for call to f with arguments a and b
    2068         waitfor(f, a, b);
    2069 }
    2070 \end{cfa}
    2071 
    2072 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour.
    2073 
    2074 An important behaviour to note is when a set of monitors only match partially:
    2075 
    2076 \begin{cfa}
    2077 mutex struct A {};
    2078 
    2079 mutex struct B {};
    2080 
    2081 void g(A & mutex a, B & mutex b) {
    2082         waitfor(f, a, b);
    2083 }
    2084 
    2085 A a1, a2;
    2086 B b;
    2087 
    2088 void foo() {
    2089         g(a1, b); // block on accept
    2090 }
    2091 
    2092 void bar() {
    2093         f(a2, b); // fulfill cooperation
    2094 }
    2095 \end{cfa}
    2096 While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables.
    2097 In both cases, partially matching monitor sets does not wakeup the waiting thread.
    2098 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition.
    2099 
    2100 % ======================================================================
    2101 % ======================================================================
    2102 \subsection{\protect\lstinline|waitfor| Semantics}
    2103 % ======================================================================
    2104 % ======================================================================
    2105 
    2106 Syntactically, the @waitfor@ statement takes a routine identifier and a set of monitors.
    2107 While the set of monitors can be any list of expressions, the routine name is more restricted because the compiler validates at compile time the validity of the routine type and the parameters used with the @waitfor@ statement.
    2108 It checks that the set of monitors passed in matches the requirements for a routine call.
    2109 Figure~\ref{f:waitfor} shows various usages of the waitfor statement and which are acceptable.
    2110 The choice of the routine type is made ignoring any non-@mutex@ parameter.
    2111 One limitation of the current implementation is that it does not handle overloading, but overloading is possible.
     2304\label{s:Multi-MonitorScheduling}
     2305
     2306External scheduling, like internal scheduling, becomes significantly more complex for multi-monitor semantics.
     2307Even in the simplest case, new semantics need to be established.
     2308\begin{cfa}
     2309monitor M { ... };
     2310void f( M & mutex m1 );
     2311void g( M & mutex m1, M & mutex m2 ) { `waitfor( f );` } $\C{// pass m1 or m2 to f?}$
     2312\end{cfa}
     2313The solution is for the programmer to disambiguate:
     2314\begin{cfa}
     2315waitfor( f, `m2` ); $\C{// wait for call to f with argument m2}$
     2316\end{cfa}
     2317Both locks are acquired by function @g@, so when function @f@ is called, the lock for monitor @m2@ is passed from @g@ to @f@, while @g@ still holds lock @m1@.
     2318This behaviour can be extended to the multi-monitor @waitfor@ statement.
     2319\begin{cfa}
     2320monitor M { ... };
     2321void f( M & mutex m1, M & mutex m2 );
     2322void g( M & mutex m1, M & mutex m2 ) { waitfor( f, `m1, m2` ); $\C{// wait for call to f with arguments m1 and m2}$
     2323\end{cfa}
     2324Again, the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired by the accepting function.
     2325Also, the order of the monitors in a @waitfor@ statement is unimportant.
     2326
     2327Figure~\ref{f:UnmatchedMutexSets} shows an example where, for internal and external scheduling with multiple monitors, a signalling or accepting thread must match exactly, \ie partial matching results in waiting.
     2328For both examples, the set of monitors is disjoint so unblocking is impossible.
     2329
    21122330\begin{figure}
    2113 \begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={f:waitfor}]
    2114 monitor A{};
    2115 monitor B{};
    2116 
    2117 void f1( A & mutex );
    2118 void f2( A & mutex, B & mutex );
    2119 void f3( A & mutex, int );
    2120 void f4( A & mutex, int );
    2121 void f4( A & mutex, double );
    2122 
    2123 void foo( A & mutex a1, A & mutex a2, B & mutex b1, B & b2 ) {
    2124         A * ap = & a1;
    2125         void (*fp)( A & mutex ) = f1;
    2126 
    2127         waitfor(f1, a1);     // Correct : 1 monitor case
    2128         waitfor(f2, a1, b1); // Correct : 2 monitor case
    2129         waitfor(f3, a1);     // Correct : non-mutex arguments are ignored
    2130         waitfor(f1, *ap);    // Correct : expression as argument
    2131 
    2132         waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments
    2133         waitfor(f2, a1);     // Incorrect : Too few mutex arguments
    2134         waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match
    2135         waitfor(f1, 1);      // Incorrect : 1 not a mutex argument
    2136         waitfor(f9, a1);     // Incorrect : f9 routine does not exist
    2137         waitfor(*fp, a1 );   // Incorrect : fp not an identifier
    2138         waitfor(f4, a1);     // Incorrect : f4 ambiguous
    2139 
    2140         waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex
    2141 }
    2142 \end{cfa}
     2331\centering
     2332\begin{lrbox}{\myboxA}
     2333\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2334monitor M1 {} m11, m12;
     2335monitor M2 {} m2;
     2336condition c;
     2337void f( M1 & mutex m1, M2 & mutex m2 ) {
     2338        signal( c );
     2339}
     2340void g( M1 & mutex m1, M2 & mutex m2 ) {
     2341        wait( c );
     2342}
     2343g( `m11`, m2 ); // block on wait
     2344f( `m12`, m2 ); // cannot fulfil
     2345\end{cfa}
     2346\end{lrbox}
     2347
     2348\begin{lrbox}{\myboxB}
     2349\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2350monitor M1 {} m11, m12;
     2351monitor M2 {} m2;
     2352
     2353void f( M1 & mutex m1, M2 & mutex m2 ) {
     2354
     2355}
     2356void g( M1 & mutex m1, M2 & mutex m2 ) {
     2357        waitfor( f, m1, m2 );
     2358}
     2359g( `m11`, m2 ); // block on accept
     2360f( `m12`, m2 ); // cannot fulfil
     2361\end{cfa}
     2362\end{lrbox}
     2363\subfloat[Internal scheduling]{\label{f:InternalScheduling}\usebox\myboxA}
     2364\hspace{3pt}
     2365\vrule
     2366\hspace{3pt}
     2367\subfloat[External scheduling]{\label{f:ExternalScheduling}\usebox\myboxB}
     2368\caption{Unmatched \protect\lstinline@mutex@ sets}
     2369\label{f:UnmatchedMutexSets}
    21432370\end{figure}
    21442371
    2145 Finally, for added flexibility, \CFA supports constructing a complex @waitfor@ statement using the @or@, @timeout@ and @else@.
    2146 Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any routine that fits one of the routine+monitor set passed in.
    2147 To enable users to tell which accepted routine executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered.
    2148 A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching routine call already arrived and otherwise continues.
    2149 Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state.
    2150 Figure~\ref{f:waitfor2} demonstrates several complex masks and some incorrect ones.
     2372
     2373\subsection{\texorpdfstring{\protect\lstinline@mutex@ Threads}{mutex Threads}}
     2374
     2375Threads in \CFA can also be monitors to allow \emph{direct communication} among threads, \ie threads can have mutex functions that are called by other threads.
     2376Hence, all monitor features are available when using threads.
     2377Figure~\ref{f:DirectCommunication} shows a comparison of direct call communication in \CFA with direct channel communication in Go.
     2378(Ada provides a similar mechanism to the \CFA direct communication.)
     2379The program main in both programs communicates directly with the other thread versus indirect communication where two threads interact through a passive monitor.
     2380Both direct and indirection thread communication are valuable tools in structuring concurrent programs.
    21512381
    21522382\begin{figure}
    2153 \lstset{language=CFA,deletedelim=**[is][]{`}{`}}
    2154 \begin{cfa}
    2155 monitor A{};
    2156 
    2157 void f1( A & mutex );
    2158 void f2( A & mutex );
    2159 
    2160 void foo( A & mutex a, bool b, int t ) {
    2161         waitfor(f1, a);                                                 $\C{// Correct : blocking case}$
    2162 
    2163         waitfor(f1, a) {                                                $\C{// Correct : block with statement}$
    2164                 sout | "f1" | endl;
     2383\centering
     2384\begin{lrbox}{\myboxA}
     2385\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2386
     2387struct Msg { int i, j; };
     2388thread GoRtn { int i;  float f;  Msg m; };
     2389void mem1( GoRtn & mutex gortn, int i ) { gortn.i = i; }
     2390void mem2( GoRtn & mutex gortn, float f ) { gortn.f = f; }
     2391void mem3( GoRtn & mutex gortn, Msg m ) { gortn.m = m; }
     2392void ^?{}( GoRtn & mutex ) {}
     2393
     2394void main( GoRtn & gortn ) with( gortn ) {  // thread starts
     2395
     2396        for () {
     2397
     2398                `waitfor( mem1, gortn )` sout | i;  // wait for calls
     2399                or `waitfor( mem2, gortn )` sout | f;
     2400                or `waitfor( mem3, gortn )` sout | m.i | m.j;
     2401                or `waitfor( ^?{}, gortn )` break;
     2402
    21652403        }
    2166         waitfor(f1, a) {                                                $\C{// Correct : block waiting for f1 or f2}$
    2167                 sout | "f1" | endl;
    2168         } or waitfor(f2, a) {
    2169                 sout | "f2" | endl;
     2404
     2405}
     2406int main() {
     2407        GoRtn gortn; $\C[2.0in]{// start thread}$
     2408        `mem1( gortn, 0 );` $\C{// different calls}\CRT$
     2409        `mem2( gortn, 2.5 );`
     2410        `mem3( gortn, (Msg){1, 2} );`
     2411
     2412
     2413} // wait for completion
     2414\end{cfa}
     2415\end{lrbox}
     2416
     2417\begin{lrbox}{\myboxB}
     2418\begin{Go}[aboveskip=0pt,belowskip=0pt]
     2419func main() {
     2420        type Msg struct{ i, j int }
     2421
     2422        ch1 := make( chan int )
     2423        ch2 := make( chan float32 )
     2424        ch3 := make( chan Msg )
     2425        hand := make( chan string )
     2426        shake := make( chan string )
     2427        gortn := func() { $\C[1.5in]{// thread starts}$
     2428                var i int;  var f float32;  var m Msg
     2429                L: for {
     2430                        select { $\C{// wait for messages}$
     2431                          case `i = <- ch1`: fmt.Println( i )
     2432                          case `f = <- ch2`: fmt.Println( f )
     2433                          case `m = <- ch3`: fmt.Println( m )
     2434                          case `<- hand`: break L $\C{// sentinel}$
     2435                        }
     2436                }
     2437                `shake <- "SHAKE"` $\C{// completion}$
    21702438        }
    2171         waitfor(f1, a); or else;                                $\C{// Correct : non-blocking case}$
    2172 
    2173         waitfor(f1, a) {                                                $\C{// Correct : non-blocking case}$
    2174                 sout | "blocked" | endl;
    2175         } or else {
    2176                 sout | "didn't block" | endl;
     2439
     2440        go gortn() $\C{// start thread}$
     2441        `ch1 <- 0` $\C{// different messages}$
     2442        `ch2 <- 2.5`
     2443        `ch3 <- Msg{1, 2}`
     2444        `hand <- "HAND"` $\C{// sentinel value}$
     2445        `<- shake` $\C{// wait for completion}\CRT$
     2446}
     2447\end{Go}
     2448\end{lrbox}
     2449
     2450\subfloat[\CFA]{\label{f:CFAwaitfor}\usebox\myboxA}
     2451\hspace{3pt}
     2452\vrule
     2453\hspace{3pt}
     2454\subfloat[Go]{\label{f:Gochannel}\usebox\myboxB}
     2455\caption{Direct communication}
     2456\label{f:DirectCommunication}
     2457\end{figure}
     2458
     2459\begin{comment}
     2460The following shows an example of two threads directly calling each other and accepting calls from each other in a cycle.
     2461\begin{cfa}
     2462\end{cfa}
     2463\vspace{-0.8\baselineskip}
     2464\begin{cquote}
     2465\begin{tabular}{@{}l@{\hspace{3\parindentlnth}}l@{}}
     2466\begin{cfa}
     2467thread Ping {} pi;
     2468void ping( Ping & mutex ) {}
     2469void main( Ping & pi ) {
     2470        for ( 10 ) {
     2471                `waitfor( ping, pi );`
     2472                `pong( po );`
    21772473        }
    2178         waitfor(f1, a) {                                                $\C{// Correct : block at most 10 seconds}$
    2179                 sout | "blocked" | endl;
    2180         } or timeout( 10`s) {
    2181                 sout | "didn't block" | endl;
     2474}
     2475int main() {}
     2476\end{cfa}
     2477&
     2478\begin{cfa}
     2479thread Pong {} po;
     2480void pong( Pong & mutex ) {}
     2481void main( Pong & po ) {
     2482        for ( 10 ) {
     2483                `ping( pi );`
     2484                `waitfor( pong, po );`
    21822485        }
    2183         // Correct : block only if b == true if b == false, don't even make the call
    2184         when(b) waitfor(f1, a);
    2185 
    2186         // Correct : block only if b == true if b == false, make non-blocking call
    2187         waitfor(f1, a); or when(!b) else;
    2188 
    2189         // Correct : block only of t > 1
    2190         waitfor(f1, a); or when(t > 1) timeout(t); or else;
    2191 
    2192         // Incorrect : timeout clause is dead code
    2193         waitfor(f1, a); or timeout(t); or else;
    2194 
    2195         // Incorrect : order must be waitfor [or waitfor... [or timeout] [or else]]
    2196         timeout(t); or waitfor(f1, a); or else;
    2197 }
    2198 \end{cfa}
    2199 \caption{Correct and incorrect uses of the or, else, and timeout clause around a waitfor statement}
    2200 \label{f:waitfor2}
    2201 \end{figure}
    2202 
    2203 % ======================================================================
    2204 % ======================================================================
    2205 \subsection{Waiting For The Destructor}
    2206 % ======================================================================
    2207 % ======================================================================
    2208 An interesting use for the @waitfor@ statement is destructor semantics.
    2209 Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}).
    2210 However, with the semantics discussed until now, waiting for the destructor does not make any sense, since using an object after its destructor is called is undefined behaviour.
    2211 The simplest approach is to disallow @waitfor@ on a destructor.
    2212 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current @mutex@ routine, similarly to how a condition is signalled.
    2213 \begin{figure}
    2214 \begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={f:dtor-order}]
    2215 monitor Executer {};
    2216 struct  Action;
    2217 
    2218 void ^?{}   (Executer & mutex this);
    2219 void execute(Executer & mutex this, const Action & );
    2220 void run    (Executer & mutex this) {
    2221         while(true) {
    2222                    waitfor(execute, this);
    2223                 or waitfor(^?{}   , this) {
    2224                         break;
    2225                 }
    2226         }
    2227 }
    2228 \end{cfa}
    2229 \end{figure}
    2230 For example, listing \ref{f:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop.
    2231 Switching the semantic meaning introduces an idiomatic way to terminate a task and/or wait for its termination via destruction.
    2232 
    2233 
    2234 % ######     #    ######     #    #       #       ####### #       ###  #####  #     #
    2235 % #     #   # #   #     #   # #   #       #       #       #        #  #     # ##   ##
    2236 % #     #  #   #  #     #  #   #  #       #       #       #        #  #       # # # #
    2237 % ######  #     # ######  #     # #       #       #####   #        #   #####  #  #  #
    2238 % #       ####### #   #   ####### #       #       #       #        #        # #     #
    2239 % #       #     # #    #  #     # #       #       #       #        #  #     # #     #
    2240 % #       #     # #     # #     # ####### ####### ####### ####### ###  #####  #     #
    2241 \section{Parallelism}
    2242 Historically, computer performance was about processor speeds and instruction counts.
    2243 However, with heat dissipation being a direct consequence of speed increase, parallelism has become the new source for increased performance~\cite{Sutter05, Sutter05b}.
    2244 In this decade, it is no longer reasonable to create a high-performance application without caring about parallelism.
    2245 Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization.
    2246 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, \etc.
    2247 However, since these have significant costs and limitations, \textbf{kthread} are now mostly used as an implementation tool rather than a user oriented one.
    2248 There are several alternatives to solve these issues that all have strengths and weaknesses.
    2249 While there are many variations of the presented paradigms, most of these variations do not actually change the guarantees or the semantics, they simply move costs in order to achieve better performance for certain workloads.
    2250 
    2251 \section{Paradigms}
    2252 \subsection{User-Level Threads}
    2253 A direct improvement on the \textbf{kthread} approach is to use \textbf{uthread}.
    2254 These threads offer most of the same features that the operating system already provides but can be used on a much larger scale.
    2255 This approach is the most powerful solution as it allows all the features of multithreading, while removing several of the more expensive costs of kernel threads.
    2256 The downside is that almost none of the low-level threading problems are hidden; users still have to think about data races, deadlocks and synchronization issues.
    2257 These issues can be somewhat alleviated by a concurrency toolkit with strong guarantees, but the parallelism toolkit offers very little to reduce complexity in itself.
    2258 
    2259 Examples of languages that support \textbf{uthread} are Erlang~\cite{Erlang} and \uC~\cite{uC++book}.
    2260 
    2261 \subsection{Fibers : User-Level Threads Without Preemption} \label{fibers}
    2262 A popular variant of \textbf{uthread} is what is often referred to as \textbf{fiber}.
    2263 However, \textbf{fiber} do not present meaningful semantic differences with \textbf{uthread}.
    2264 The significant difference between \textbf{uthread} and \textbf{fiber} is the lack of \textbf{preemption} in the latter.
    2265 Advocates of \textbf{fiber} list their high performance and ease of implementation as major strengths, but the performance difference between \textbf{uthread} and \textbf{fiber} is controversial, and the ease of implementation, while true, is a weak argument in the context of language design.
    2266 Therefore this proposal largely ignores fibers.
    2267 
    2268 An example of a language that uses fibers is Go~\cite{Go}
    2269 
    2270 \subsection{Jobs and Thread Pools}
    2271 An approach on the opposite end of the spectrum is to base parallelism on \textbf{pool}.
    2272 Indeed, \textbf{pool} offer limited flexibility but at the benefit of a simpler user interface.
    2273 In \textbf{pool} based systems, users express parallelism as units of work, called jobs, and a dependency graph (either explicit or implicit) that ties them together.
    2274 This approach means users need not worry about concurrency but significantly limit the interaction that can occur among jobs.
    2275 Indeed, any \textbf{job} that blocks also block the underlying worker, which effectively means the CPU utilization, and therefore throughput, suffers noticeably.
    2276 It can be argued that a solution to this problem is to use more workers than available cores.
    2277 However, unless the number of jobs and the number of workers are comparable, having a significant number of blocked jobs always results in idles cores.
    2278 
    2279 The gold standard of this implementation is Intel's TBB library~\cite{TBB}.
    2280 
    2281 \subsection{Paradigm Performance}
    2282 While the choice between the three paradigms listed above may have significant performance implications, it is difficult to pin down the performance implications of choosing a model at the language level.
    2283 Indeed, in many situations one of these paradigms may show better performance but it all strongly depends on the workload.
    2284 Having a large amount of mostly independent units of work to execute almost guarantees equivalent performance across paradigms and that the \textbf{pool}-based system has the best efficiency thanks to the lower memory overhead (\ie no thread stack per job).
    2285 However, interactions among jobs can easily exacerbate contention.
    2286 User-level threads allow fine-grain context switching, which results in better resource utilization, but a context switch is more expensive and the extra control means users need to tweak more variables to get the desired performance.
    2287 Finally, if the units of uninterrupted work are large, enough the paradigm choice is largely amortized by the actual work done.
    2288 
    2289 \section{The \protect\CFA\ Kernel : Processors, Clusters and Threads}\label{kernel}
    2290 A \textbf{cfacluster} is a group of \textbf{kthread} executed in isolation. \textbf{uthread} are scheduled on the \textbf{kthread} of a given \textbf{cfacluster}, allowing organization between \textbf{uthread} and \textbf{kthread}.
    2291 It is important that \textbf{kthread} belonging to a same \textbf{cfacluster} have homogeneous settings, otherwise migrating a \textbf{uthread} from one \textbf{kthread} to the other can cause issues.
    2292 A \textbf{cfacluster} also offers a pluggable scheduler that can optimize the workload generated by the \textbf{uthread}.
    2293 
    2294 \textbf{cfacluster} have not been fully implemented in the context of this paper.
    2295 Currently \CFA only supports one \textbf{cfacluster}, the initial one.
    2296 
    2297 \subsection{Future Work: Machine Setup}\label{machine}
    2298 While this was not done in the context of this paper, another important aspect of clusters is affinity.
    2299 While many common desktop and laptop PCs have homogeneous CPUs, other devices often have more heterogeneous setups.
    2300 For example, a system using \textbf{numa} configurations may benefit from users being able to tie clusters and/or kernel threads to certain CPU cores.
    2301 OS support for CPU affinity is now common~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which means it is both possible and desirable for \CFA to offer an abstraction mechanism for portable CPU affinity.
    2302 
    2303 \subsection{Paradigms}\label{cfaparadigms}
    2304 Given these building blocks, it is possible to reproduce all three of the popular paradigms.
    2305 Indeed, \textbf{uthread} is the default paradigm in \CFA.
    2306 However, disabling \textbf{preemption} on a cluster means threads effectively become fibers.
    2307 Since several \textbf{cfacluster} with different scheduling policy can coexist in the same application, this allows \textbf{fiber} and \textbf{uthread} to coexist in the runtime of an application.
    2308 Finally, it is possible to build executors for thread pools from \textbf{uthread} or \textbf{fiber}, which includes specialized jobs like actors~\cite{Actors}.
    2309 
    2310 
    2311 
    2312 \section{Behind the Scenes}
    2313 There are several challenges specific to \CFA when implementing concurrency.
    2314 These challenges are a direct result of bulk acquire and loose object definitions.
    2315 These two constraints are the root cause of most design decisions in the implementation.
    2316 Furthermore, to avoid contention from dynamically allocating memory in a concurrent environment, the internal-scheduling design is (almost) entirely free of mallocs.
    2317 This approach avoids the chicken and egg problem~\cite{Chicken} of having a memory allocator that relies on the threading system and a threading system that relies on the runtime.
    2318 This extra goal means that memory management is a constant concern in the design of the system.
    2319 
    2320 The main memory concern for concurrency is queues.
    2321 All blocking operations are made by parking threads onto queues and all queues are designed with intrusive nodes, where each node has pre-allocated link fields for chaining, to avoid the need for memory allocation.
    2322 Since several concurrency operations can use an unbound amount of memory (depending on bulk acquire), statically defining information in the intrusive fields of threads is insufficient.The only way to use a variable amount of memory without requiring memory allocation is to pre-allocate large buffers of memory eagerly and store the information in these buffers.
    2323 Conveniently, the call stack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays.
    2324 Since stack allocation is based on scopes, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array.
    2325 The threads and the condition both have a fixed amount of memory, while @mutex@ routines and blocking calls allow for an unbound amount, within the stack size.
    2326 
    2327 Note that since the major contributions of this paper are extending monitor semantics to bulk acquire and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed.
    2328 
    2329 % ======================================================================
    2330 % ======================================================================
    2331 \section{Mutex Routines}
    2332 % ======================================================================
    2333 % ======================================================================
    2334 
    2335 The first step towards the monitor implementation is simple @mutex@ routines.
    2336 In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{f:entry1}.
    2337 The entry/exit procedures do not have to be extended to support multiple monitors.
    2338 Indeed it is sufficient to enter/leave monitors one-by-one as long as the order is correct to prevent deadlock~\cite{Havender68}.
    2339 In \CFA, ordering of monitor acquisition relies on memory ordering.
    2340 This approach is sufficient because all objects are guaranteed to have distinct non-overlapping memory layouts and mutual-exclusion for a monitor is only defined for its lifetime, meaning that destroying a monitor while it is acquired is undefined behaviour.
    2341 When a mutex call is made, the concerned monitors are aggregated into a variable-length pointer array and sorted based on pointer values.
    2342 This array persists for the entire duration of the mutual-exclusion and its ordering reused extensively.
    2343 \begin{figure}
    2344 \begin{multicols}{2}
    2345 Entry
    2346 \begin{cfa}
    2347 if monitor is free
    2348         enter
    2349 elif already own the monitor
    2350         continue
    2351 else
    2352         block
    2353 increment recursions
    2354 \end{cfa}
    2355 \columnbreak
    2356 Exit
    2357 \begin{cfa}
    2358 decrement recursion
    2359 if recursion == 0
    2360         if entry queue not empty
    2361                 wake-up thread
    2362 \end{cfa}
    2363 \end{multicols}
    2364 \begin{cfa}[caption={Initial entry and exit routine for monitors},label={f:entry1}]
    2365 \end{cfa}
    2366 \end{figure}
    2367 
    2368 \subsection{Details: Interaction with polymorphism}
    2369 Depending on the choice of semantics for when monitor locks are acquired, interaction between monitors and \CFA's concept of polymorphism can be more complex to support.
    2370 However, it is shown that entry-point locking solves most of the issues.
    2371 
    2372 First of all, interaction between @otype@ polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying.
    2373 Therefore, the main question is how to support @dtype@ polymorphism.
    2374 It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, \ie acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call.
    2375 For example:
     2486}
     2487
     2488\end{cfa}
     2489\end{tabular}
     2490\end{cquote}
     2491% \lstMakeShortInline@%
     2492% \caption{Threads ping/pong using external scheduling}
     2493% \label{f:pingpong}
     2494% \end{figure}
     2495Note, the ping/pong threads are globally declared, @pi@/@po@, and hence, start (and possibly complete) before the program main starts.
     2496\end{comment}
     2497
     2498
     2499\subsection{Execution Properties}
     2500
     2501Table~\ref{t:ObjectPropertyComposition} shows how the \CFA high-level constructs cover 3 fundamental execution properties: thread, stateful function, and mutual exclusion.
     2502Case 1 is a basic object, with none of the new execution properties.
     2503Case 2 allows @mutex@ calls to Case 1 to protect shared data.
     2504Case 3 allows stateful functions to suspend/resume but restricts operations because the state is stackless.
     2505Case 4 allows @mutex@ calls to Case 3 to protect shared data.
     2506Cases 5 and 6 are the same as 3 and 4 without restriction because the state is stackful.
     2507Cases 7 and 8 are rejected because a thread cannot execute without a stackful state in a preemptive environment when context switching from the signal handler.
     2508Cases 9 and 10 have a stackful thread without and with @mutex@ calls.
     2509For situations where threads do not require direct communication, case 9 provides faster creation/destruction by eliminating @mutex@ setup.
     2510
    23762511\begin{table}
    2377 \begin{center}
    2378 \begin{tabular}{|c|c|c|}
    2379 Mutex & \textbf{callsite-locking} & \textbf{entry-point-locking} \\
    2380 call & cfa-code & cfa-code \\
     2512\caption{Object property composition}
     2513\centering
     2514\label{t:ObjectPropertyComposition}
     2515\renewcommand{\arraystretch}{1.25}
     2516%\setlength{\tabcolsep}{5pt}
     2517\begin{tabular}{c|c||l|l}
     2518\multicolumn{2}{c||}{object properties} & \multicolumn{2}{c}{mutual exclusion} \\
    23812519\hline
    2382 \begin{cfa}[tabsize=3]
    2383 void foo(monitor& mutex a){
    2384 
    2385         // Do Work
    2386         //...
    2387 
    2388 }
    2389 
    2390 void main() {
    2391         monitor a;
    2392 
    2393         foo(a);
    2394 
    2395 }
    2396 \end{cfa} & \begin{cfa}[tabsize=3]
    2397 foo(& a) {
    2398 
    2399         // Do Work
    2400         //...
    2401 
    2402 }
    2403 
    2404 main() {
    2405         monitor a;
    2406         acquire(a);
    2407         foo(a);
    2408         release(a);
    2409 }
    2410 \end{cfa} & \begin{cfa}[tabsize=3]
    2411 foo(& a) {
    2412         acquire(a);
    2413         // Do Work
    2414         //...
    2415         release(a);
    2416 }
    2417 
    2418 main() {
    2419         monitor a;
    2420 
    2421         foo(a);
    2422 
    2423 }
    2424 \end{cfa}
    2425 \end{tabular}
    2426 \end{center}
    2427 \caption{Call-site vs entry-point locking for mutex calls}
    2428 \label{tbl:locking-site}
    2429 \end{table}
    2430 
    2431 Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, \eg:
    2432 \begin{cfa}
    2433 // Incorrect: T may not be monitor
    2434 forall(dtype T)
    2435 void foo(T * mutex t);
    2436 
    2437 // Correct: this routine only works on monitors (any monitor)
    2438 forall(dtype T | is_monitor(T))
    2439 void bar(T * mutex t));
    2440 \end{cfa}
    2441 
    2442 Both entry point and \textbf{callsite-locking} are feasible implementations.
    2443 The current \CFA implementation uses entry-point locking because it requires less work when using \textbf{raii}, effectively transferring the burden of implementation to object construction/destruction.
    2444 It is harder to use \textbf{raii} for call-site locking, as it does not necessarily have an existing scope that matches exactly the scope of the mutual exclusion, \ie the routine body.
    2445 For example, the monitor call can appear in the middle of an expression.
    2446 Furthermore, entry-point locking requires less code generation since any useful routine is called multiple times but there is only one entry point for many call sites.
    2447 
    2448 % ======================================================================
    2449 % ======================================================================
    2450 \section{Threading} \label{impl:thread}
    2451 % ======================================================================
    2452 % ======================================================================
    2453 
    2454 Figure \ref{fig:system1} shows a high-level picture if the \CFA runtime system in regards to concurrency.
    2455 Each component of the picture is explained in detail in the flowing sections.
    2456 
    2457 \begin{figure}
    2458 \begin{center}
    2459 {\resizebox{\textwidth}{!}{\input{system.pstex_t}}}
    2460 \end{center}
    2461 \caption{Overview of the entire system}
    2462 \label{fig:system1}
    2463 \end{figure}
    2464 
    2465 \subsection{Processors}
    2466 Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically @pthread@s in the current implementation of \CFA.
    2467 Indeed, any parallelism must go through operating-system libraries.
    2468 However, \textbf{uthread} are still the main source of concurrency, processors are simply the underlying source of parallelism.
    2469 Indeed, processor \textbf{kthread} simply fetch a \textbf{uthread} from the scheduler and run it; they are effectively executers for user-threads.
    2470 The main benefit of this approach is that it offers a well-defined boundary between kernel code and user code, for example, kernel thread quiescing, scheduling and interrupt handling.
    2471 Processors internally use coroutines to take advantage of the existing context-switching semantics.
    2472 
    2473 \subsection{Stack Management}
    2474 One of the challenges of this system is to reduce the footprint as much as possible.
    2475 Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible.
    2476 Normally, coroutines also create their own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the \textbf{kthread} stack, effectively stealing the processor stack.
    2477 The exception to this rule is the Main Processor, \ie the initial \textbf{kthread} that is given to any program.
    2478 In order to respect C user expectations, the stack of the initial kernel thread, the main stack of the program, is used by the main user thread rather than the main processor, which can grow very large.
    2479 
    2480 \subsection{Context Switching}
    2481 As mentioned in section \ref{coroutine}, coroutines are a stepping stone for implementing threading, because they share the same mechanism for context-switching between different stacks.
    2482 To improve performance and simplicity, context-switching is implemented using the following assumption: all context-switches happen inside a specific routine call.
    2483 This assumption means that the context-switch only has to copy the callee-saved registers onto the stack and then switch the stack registers with the ones of the target coroutine/thread.
    2484 Note that the instruction pointer can be left untouched since the context-switch is always inside the same routine
    2485 Threads, however, do not context-switch between each other directly.
    2486 They context-switch to the scheduler.
    2487 This method is called a 2-step context-switch and has the advantage of having a clear distinction between user code and the kernel where scheduling and other system operations happen.
    2488 Obviously, this doubles the context-switch cost because threads must context-switch to an intermediate stack.
    2489 The alternative 1-step context-switch uses the stack of the ``from'' thread to schedule and then context-switches directly to the ``to'' thread.
    2490 However, the performance of the 2-step context-switch is still superior to a @pthread_yield@ (see section \ref{results}).
    2491 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft @SwitchToFiber@~\cite{switchToWindows} routine).
    2492 This option is not currently present in \CFA, but the changes required to add it are strictly additive.
    2493 
    2494 \subsection{Preemption} \label{preemption}
    2495 Finally, an important aspect for any complete threading system is preemption.
    2496 As mentioned in section \ref{basics}, preemption introduces an extra degree of uncertainty, which enables users to have multiple threads interleave transparently, rather than having to cooperate among threads for proper scheduling and CPU distribution.
    2497 Indeed, preemption is desirable because it adds a degree of isolation among threads.
    2498 In a fully cooperative system, any thread that runs a long loop can starve other threads, while in a preemptive system, starvation can still occur but it does not rely on every thread having to yield or block on a regular basis, which reduces significantly a programmer burden.
    2499 Obviously, preemption is not optimal for every workload.
    2500 However any preemptive system can become a cooperative system by making the time slices extremely large.
    2501 Therefore, \CFA uses a preemptive threading system.
    2502 
    2503 Preemption in \CFA\footnote{Note that the implementation of preemption is strongly tied with the underlying threading system.
    2504 For this reason, only the Linux implementation is cover, \CFA does not run on Windows at the time of writting} is based on kernel timers, which are used to run a discrete-event simulation.
    2505 Every processor keeps track of the current time and registers an expiration time with the preemption system.
    2506 When the preemption system receives a change in preemption, it inserts the time in a sorted order and sets a kernel timer for the closest one, effectively stepping through preemption events on each signal sent by the timer.
    2507 These timers use the Linux signal {\tt SIGALRM}, which is delivered to the process rather than the kernel-thread.
    2508 This results in an implementation problem, because when delivering signals to a process, the kernel can deliver the signal to any kernel thread for which the signal is not blocked, \ie:
    2509 \begin{quote}
    2510 A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked.
    2511 If more than one of the threads has the signal unblocked, then the kernel chooses an arbitrary thread to which to deliver the signal.
    2512 SIGNAL(7) - Linux Programmer's Manual
    2513 \end{quote}
    2514 For the sake of simplicity, and in order to prevent the case of having two threads receiving alarms simultaneously, \CFA programs block the {\tt SIGALRM} signal on every kernel thread except one.
    2515 
    2516 Now because of how involuntary context-switches are handled, the kernel thread handling {\tt SIGALRM} cannot also be a processor thread.
    2517 Hence, involuntary context-switching is done by sending signal {\tt SIGUSR1} to the corresponding proces\-sor and having the thread yield from inside the signal handler.
    2518 This approach effectively context-switches away from the signal handler back to the kernel and the signal handler frame is eventually unwound when the thread is scheduled again.
    2519 As a result, a signal handler can start on one kernel thread and terminate on a second kernel thread (but the same user thread).
    2520 It is important to note that signal handlers save and restore signal masks because user-thread migration can cause a signal mask to migrate from one kernel thread to another.
    2521 This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' routines from other routines}.
    2522 However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks.
    2523 For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption.
    2524 One final detail about the alarm thread is how to wake it when additional communication is required (\eg on thread termination).
    2525 This unblocking is also done using {\tt SIGALRM}, but sent through the @pthread_sigqueue@.
    2526 Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel.
    2527 
    2528 \subsection{Scheduler}
    2529 Finally, an aspect that was not mentioned yet is the scheduling algorithm.
    2530 Currently, the \CFA scheduler uses a single ready queue for all processors, which is the simplest approach to scheduling.
    2531 Further discussion on scheduling is present in section \ref{futur:sched}.
    2532 
    2533 % ======================================================================
    2534 % ======================================================================
    2535 \section{Internal Scheduling} \label{impl:intsched}
    2536 % ======================================================================
    2537 % ======================================================================
    2538 The following figure is the traditional illustration of a monitor (repeated from page~\pageref{fig:ClassicalMonitor} for convenience):
    2539 
    2540 \begin{figure}
    2541 \begin{center}
    2542 {\resizebox{0.4\textwidth}{!}{\input{monitor}}}
    2543 \end{center}
    2544 \caption{Traditional illustration of a monitor}
    2545 \end{figure}
    2546 
    2547 This picture has several components, the two most important being the entry queue and the AS-stack.
    2548 The entry queue is an (almost) FIFO list where threads waiting to enter are parked, while the acceptor/signaller (AS) stack is a FILO list used for threads that have been signalled or otherwise marked as running next.
    2549 
    2550 For \CFA, this picture does not have support for blocking multiple monitors on a single condition.
    2551 To support bulk acquire two changes to this picture are required.
    2552 First, it is no longer helpful to attach the condition to \emph{a single} monitor.
    2553 Secondly, the thread waiting on the condition has to be separated across multiple monitors, seen in figure \ref{fig:monitor_cfa}.
    2554 
    2555 \begin{figure}
    2556 \begin{center}
    2557 {\resizebox{0.8\textwidth}{!}{\input{int_monitor}}}
    2558 \end{center}
    2559 \caption{Illustration of \CFA Monitor}
    2560 \label{fig:monitor_cfa}
    2561 \end{figure}
    2562 
    2563 This picture and the proper entry and leave algorithms (see listing \ref{f:entry2}) is the fundamental implementation of internal scheduling.
    2564 Note that when a thread is moved from the condition to the AS-stack, it is conceptually split into N pieces, where N is the number of monitors specified in the parameter list.
    2565 The thread is woken up when all the pieces have popped from the AS-stacks and made active.
    2566 In this picture, the threads are split into halves but this is only because there are two monitors.
    2567 For a specific signalling operation every monitor needs a piece of thread on its AS-stack.
    2568 
    2569 \begin{figure}
    2570 \begin{multicols}{2}
    2571 Entry
    2572 \begin{cfa}
    2573 if monitor is free
    2574         enter
    2575 elif already own the monitor
    2576         continue
    2577 else
    2578         block
    2579 increment recursion
    2580 
    2581 \end{cfa}
    2582 \columnbreak
    2583 Exit
    2584 \begin{cfa}
    2585 decrement recursion
    2586 if recursion == 0
    2587         if signal_stack not empty
    2588                 set_owner to thread
    2589                 if all monitors ready
    2590                         wake-up thread
    2591 
    2592         if entry queue not empty
    2593                 wake-up thread
    2594 \end{cfa}
    2595 \end{multicols}
    2596 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={f:entry2}]
    2597 \end{cfa}
    2598 \end{figure}
    2599 
    2600 The solution discussed in \ref{s:InternalScheduling} can be seen in the exit routine of listing \ref{f:entry2}.
    2601 Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership.
    2602 This solution is deadlock safe as well as preventing any potential barging.
    2603 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the @wait@ and @signal_block@ routines.
    2604 
    2605 \begin{figure}
    2606 \begin{center}
    2607 {\resizebox{0.8\textwidth}{!}{\input{monitor_structs.pstex_t}}}
    2608 \end{center}
    2609 \caption{Data structures involved in internal/external scheduling}
    2610 \label{fig:structs}
    2611 \end{figure}
    2612 
    2613 Figure \ref{fig:structs} shows a high-level representation of these data structures.
    2614 The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive ``next'' pointers for linking onto monitors.
    2615 The @condition node@ is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each @condition criterion@ is moved to the AS-stack.
    2616 Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{f:entry2}.
    2617 
    2618 % ======================================================================
    2619 % ======================================================================
    2620 \section{External Scheduling}
    2621 % ======================================================================
    2622 % ======================================================================
    2623 Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}.
    2624 For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (\ie no signal statement uses multiple conditions).
    2625 However, in the case of external scheduling, there is no equivalent object which is associated with @waitfor@ statements.
    2626 This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired.
    2627 These monitors being the only objects that have sufficient lifetime and are available on both sides of the @waitfor@ statement.
    2628 This requires an algorithm to choose which monitor holds the relevant queue.
    2629 It is also important that said algorithm be independent of the order in which users list parameters.
    2630 The proposed algorithm is to fall back on monitor lock ordering (sorting by address) and specify that the monitor that is acquired first is the one with the relevant waiting queue.
    2631 This assumes that the lock acquiring order is static for the lifetime of all concerned objects but that is a reasonable constraint.
    2632 
    2633 This algorithm choice has two consequences:
    2634 \begin{itemize}
    2635         \item The queue of the monitor with the lowest address is no longer a true FIFO queue because threads can be moved to the front of the queue.
    2636 These queues need to contain a set of monitors for each of the waiting threads.
    2637 Therefore, another thread whose set contains the same lowest address monitor but different lower priority monitors may arrive first but enter the critical section after a thread with the correct pairing.
    2638         \item The queue of the lowest priority monitor is both required and potentially unused.
    2639 Indeed, since it is not known at compile time which monitor is the monitor which has the lowest address, every monitor needs to have the correct queues even though it is possible that some queues go unused for the entire duration of the program, for example if a monitor is only used in a specific pair.
    2640 \end{itemize}
    2641 Therefore, the following modifications need to be made to support external scheduling:
    2642 \begin{itemize}
    2643         \item The threads waiting on the entry queue need to keep track of which routine they are trying to enter, and using which set of monitors.
    2644 The @mutex@ routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information.
    2645         \item The monitors need to keep a mask of acceptable routines.
    2646 This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it.
    2647 It also needs storage to keep track of which routine was accepted.
    2648 Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread.
    2649 Note that if a thread has acquired two monitors but executes a @waitfor@ with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless.
    2650 This becomes relevant when @when@ clauses affect the number of monitors passed to a @waitfor@ statement.
    2651         \item The entry/exit routines need to be updated as shown in listing \ref{f:entry3}.
    2652 \end{itemize}
    2653 
    2654 \subsection{External Scheduling - Destructors}
    2655 Finally, to support the ordering inversion of destructors, the code generation needs to be modified to use a special entry routine.
    2656 This routine is needed because of the storage requirements of the call order inversion.
    2657 Indeed, when waiting for the destructors, storage is needed for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for.
    2658 For regular @waitfor@ statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later.
    2659 The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{f:entry-dtor}
    2660 
    2661 \begin{figure}
    2662 \begin{multicols}{2}
    2663 Entry
    2664 \begin{cfa}
    2665 if monitor is free
    2666         enter
    2667 elif already own the monitor
    2668         continue
    2669 elif matches waitfor mask
    2670         push criteria to AS-stack
    2671         continue
    2672 else
    2673         block
    2674 increment recursion
    2675 \end{cfa}
    2676 \columnbreak
    2677 Exit
    2678 \begin{cfa}
    2679 decrement recursion
    2680 if recursion == 0
    2681         if signal_stack not empty
    2682                 set_owner to thread
    2683                 if all monitors ready
    2684                         wake-up thread
    2685                 endif
    2686         endif
    2687 
    2688         if entry queue not empty
    2689                 wake-up thread
    2690         endif
    2691 \end{cfa}
    2692 \end{multicols}
    2693 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={f:entry3}]
    2694 \end{cfa}
    2695 \end{figure}
    2696 
    2697 \begin{figure}
    2698 \begin{multicols}{2}
    2699 Destructor Entry
    2700 \begin{cfa}
    2701 if monitor is free
    2702         enter
    2703 elif already own the monitor
    2704         increment recursion
    2705         return
    2706 create wait context
    2707 if matches waitfor mask
    2708         reset mask
    2709         push self to AS-stack
    2710         baton pass
    2711 else
    2712         wait
    2713 increment recursion
    2714 \end{cfa}
    2715 \columnbreak
    2716 Waitfor
    2717 \begin{cfa}
    2718 if matching thread is already there
    2719         if found destructor
    2720                 push destructor to AS-stack
    2721                 unlock all monitors
    2722         else
    2723                 push self to AS-stack
    2724                 baton pass
    2725         endif
    2726         return
    2727 endif
    2728 if non-blocking
    2729         Unlock all monitors
    2730         Return
    2731 endif
    2732 
    2733 push self to AS-stack
    2734 set waitfor mask
    2735 block
    2736 return
    2737 \end{cfa}
    2738 \end{multicols}
    2739 \begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={f:entry-dtor}]
    2740 \end{cfa}
    2741 \end{figure}
    2742 
    2743 
    2744 % ======================================================================
    2745 % ======================================================================
    2746 \section{Putting It All Together}
    2747 % ======================================================================
    2748 % ======================================================================
    2749 
    2750 
    2751 \section{Threads As Monitors}
    2752 As it was subtly alluded in section \ref{threads}, @thread@s in \CFA are in fact monitors, which means that all monitor features are available when using threads.
    2753 For example, here is a very simple two thread pipeline that could be used for a simulator of a game engine:
    2754 \begin{figure}
    2755 \begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={f:engine-v1}]
    2756 // Visualization declaration
    2757 thread Renderer {} renderer;
    2758 Frame * simulate( Simulator & this );
    2759 
    2760 // Simulation declaration
    2761 thread Simulator{} simulator;
    2762 void render( Renderer & this );
    2763 
    2764 // Blocking call used as communication
    2765 void draw( Renderer & mutex this, Frame * frame );
    2766 
    2767 // Simulation loop
    2768 void main( Simulator & this ) {
    2769         while( true ) {
    2770                 Frame * frame = simulate( this );
    2771                 draw( renderer, frame );
    2772         }
    2773 }
    2774 
    2775 // Rendering loop
    2776 void main( Renderer & this ) {
    2777         while( true ) {
    2778                 waitfor( draw, this );
    2779                 render( this );
    2780         }
    2781 }
    2782 \end{cfa}
    2783 \end{figure}
    2784 One of the obvious complaints of the previous code snippet (other than its toy-like simplicity) is that it does not handle exit conditions and just goes on forever.
    2785 Luckily, the monitor semantics can also be used to clearly enforce a shutdown order in a concise manner:
    2786 \begin{figure}
    2787 \begin{cfa}[caption={Same toy simulator with proper termination condition.},label={f:engine-v2}]
    2788 // Visualization declaration
    2789 thread Renderer {} renderer;
    2790 Frame * simulate( Simulator & this );
    2791 
    2792 // Simulation declaration
    2793 thread Simulator{} simulator;
    2794 void render( Renderer & this );
    2795 
    2796 // Blocking call used as communication
    2797 void draw( Renderer & mutex this, Frame * frame );
    2798 
    2799 // Simulation loop
    2800 void main( Simulator & this ) {
    2801         while( true ) {
    2802                 Frame * frame = simulate( this );
    2803                 draw( renderer, frame );
    2804 
    2805                 // Exit main loop after the last frame
    2806                 if( frame->is_last ) break;
    2807         }
    2808 }
    2809 
    2810 // Rendering loop
    2811 void main( Renderer & this ) {
    2812         while( true ) {
    2813                    waitfor( draw, this );
    2814                 or waitfor( ^?{}, this ) {
    2815                         // Add an exit condition
    2816                         break;
    2817                 }
    2818 
    2819                 render( this );
    2820         }
    2821 }
    2822 
    2823 // Call destructor for simulator once simulator finishes
    2824 // Call destructor for renderer to signify shutdown
    2825 \end{cfa}
    2826 \end{figure}
    2827 
    2828 \section{Fibers \& Threads}
    2829 As mentioned in section \ref{preemption}, \CFA uses preemptive threads by default but can use fibers on demand.
    2830 Currently, using fibers is done by adding the following line of code to the program~:
    2831 \begin{cfa}
    2832 unsigned int default_preemption() {
    2833         return 0;
    2834 }
    2835 \end{cfa}
    2836 This routine is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, \ie no preemption.
    2837 However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{f:fiber-uthread}
    2838 \begin{figure}
    2839 \lstset{language=CFA,deletedelim=**[is][]{`}{`}}
    2840 \begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={f:fiber-uthread}]
    2841 // Cluster forward declaration
    2842 struct cluster;
    2843 
    2844 // Processor forward declaration
    2845 struct processor;
    2846 
    2847 // Construct clusters with a preemption rate
    2848 void ?{}(cluster& this, unsigned int rate);
    2849 // Construct processor and add it to cluster
    2850 void ?{}(processor& this, cluster& cluster);
    2851 // Construct thread and schedule it on cluster
    2852 void ?{}(thread& this, cluster& cluster);
    2853 
    2854 // Declare two clusters
    2855 cluster thread_cluster = { 10`ms };                     // Preempt every 10 ms
    2856 cluster fibers_cluster = { 0 };                         // Never preempt
    2857 
    2858 // Construct 4 processors
    2859 processor processors[4] = {
    2860         //2 for the thread cluster
    2861         thread_cluster;
    2862         thread_cluster;
    2863         //2 for the fibers cluster
    2864         fibers_cluster;
    2865         fibers_cluster;
    2866 };
    2867 
    2868 // Declares thread
    2869 thread UThread {};
    2870 void ?{}(UThread& this) {
    2871         // Construct underlying thread to automatically
    2872         // be scheduled on the thread cluster
    2873         (this){ thread_cluster }
    2874 }
    2875 
    2876 void main(UThread & this);
    2877 
    2878 // Declares fibers
    2879 thread Fiber {};
    2880 void ?{}(Fiber& this) {
    2881         // Construct underlying thread to automatically
    2882         // be scheduled on the fiber cluster
    2883         (this.__thread){ fibers_cluster }
    2884 }
    2885 
    2886 void main(Fiber & this);
    2887 \end{cfa}
    2888 \end{figure}
    2889 
    2890 
    2891 % ======================================================================
    2892 % ======================================================================
    2893 \section{Performance Results} \label{results}
    2894 % ======================================================================
    2895 % ======================================================================
    2896 \section{Machine Setup}
    2897 Table \ref{tab:machine} shows the characteristics of the machine used to run the benchmarks.
    2898 All tests were made on this machine.
    2899 \begin{table}
    2900 \begin{center}
    2901 \begin{tabular}{| l | r | l | r |}
    2902 \hline
    2903 Architecture            & x86\_64                       & NUMA node(s)  & 8 \\
    2904 \hline
    2905 CPU op-mode(s)          & 32-bit, 64-bit                & Model name    & AMD Opteron\texttrademark  Processor 6380 \\
    2906 \hline
    2907 Byte Order                      & Little Endian                 & CPU Freq              & 2.5\si{\giga\hertz} \\
    2908 \hline
    2909 CPU(s)                  & 64                            & L1d cache     & \SI{16}{\kibi\byte} \\
    2910 \hline
    2911 Thread(s) per core      & 2                             & L1i cache     & \SI{64}{\kibi\byte} \\
    2912 \hline
    2913 Core(s) per socket      & 8                             & L2 cache              & \SI{2048}{\kibi\byte} \\
    2914 \hline
    2915 Socket(s)                       & 4                             & L3 cache              & \SI{6144}{\kibi\byte} \\
     2520thread  & stateful                              & \multicolumn{1}{c|}{No} & \multicolumn{1}{c}{Yes} \\
    29162521\hline
    29172522\hline
    2918 Operating system                & Ubuntu 16.04.3 LTS    & Kernel                & Linux 4.4-97-generic \\
     2523No              & No                                    & \textbf{1}\ \ \ aggregate type                & \textbf{2}\ \ \ @monitor@ aggregate type \\
    29192524\hline
    2920 Compiler                        & GCC 6.3               & Translator    & CFA 1 \\
     2525No              & Yes (stackless)               & \textbf{3}\ \ \ @generator@                   & \textbf{4}\ \ \ @monitor@ @generator@ \\
    29212526\hline
    2922 Java version            & OpenJDK-9             & Go version    & 1.9.2 \\
     2527No              & Yes (stackful)                & \textbf{5}\ \ \ @coroutine@                   & \textbf{6}\ \ \ @monitor@ @coroutine@ \\
    29232528\hline
     2529Yes             & No / Yes (stackless)  & \textbf{7}\ \ \ {\color{red}rejected} & \textbf{8}\ \ \ {\color{red}rejected} \\
     2530\hline
     2531Yes             & Yes (stackful)                & \textbf{9}\ \ \ @thread@                              & \textbf{10}\ \ @monitor@ @thread@ \\
    29242532\end{tabular}
    2925 \end{center}
    2926 \caption{Machine setup used for the tests}
    2927 \label{tab:machine}
    29282533\end{table}
    29292534
    2930 \section{Micro Benchmarks}
    2931 All benchmarks are run using the same harness to produce the results, seen as the @BENCH()@ macro in the following examples.
    2932 This macro uses the following logic to benchmark the code:
    2933 \begin{cfa}
    2934 #define BENCH(run, result) \
    2935         before = gettime(); \
    2936         run; \
    2937         after  = gettime(); \
    2938         result = (after - before) / N;
    2939 \end{cfa}
    2940 The method used to get time is @clock_gettime(CLOCK_THREAD_CPUTIME_ID);@.
    2941 Each benchmark is using many iterations of a simple call to measure the cost of the call.
    2942 The specific number of iterations depends on the specific benchmark.
    2943 
    2944 \subsection{Context-Switching}
    2945 The first interesting benchmark is to measure how long context-switches take.
    2946 The simplest approach to do this is to yield on a thread, which executes a 2-step context switch.
    2947 Yielding causes the thread to context-switch to the scheduler and back, more precisely: from the \textbf{uthread} to the \textbf{kthread} then from the \textbf{kthread} back to the same \textbf{uthread} (or a different one in the general case).
    2948 In order to make the comparison fair, coroutines also execute a 2-step context-switch by resuming another coroutine which does nothing but suspending in a tight loop, which is a resume/suspend cycle instead of a yield.
    2949 Figure~\ref{f:ctx-switch} shows the code for coroutines and threads with the results in table \ref{tab:ctx-switch}.
    2950 All omitted tests are functionally identical to one of these tests.
    2951 The difference between coroutines and threads can be attributed to the cost of scheduling.
     2535
     2536\subsection{Low-level Locks}
     2537
     2538For completeness and efficiency, \CFA provides a standard set of low-level locks: recursive mutex, condition, semaphore, barrier, \etc, and atomic instructions: @fetchAssign@, @fetchAdd@, @testSet@, @compareSet@, \etc.
     2539Some of these low-level mechanism are used in the \CFA runtime, but we strongly advocate using high-level mechanisms whenever possible.
     2540
     2541
     2542% \section{Parallelism}
     2543% \label{s:Parallelism}
     2544%
     2545% Historically, computer performance was about processor speeds.
     2546% However, with heat dissipation being a direct consequence of speed increase, parallelism is the new source for increased performance~\cite{Sutter05, Sutter05b}.
     2547% Therefore, high-performance applications must care about parallelism, which requires concurrency.
     2548% The lowest-level approach of parallelism is to use \newterm{kernel threads} in combination with semantics like @fork@, @join@, \etc.
     2549% However, kernel threads are better as an implementation tool because of complexity and higher cost.
     2550% Therefore, different abstractions are often layered onto kernel threads to simplify them, \eg pthreads.
     2551%
     2552%
     2553% \subsection{User Threads}
     2554%
     2555% A direct improvement on kernel threads is user threads, \eg Erlang~\cite{Erlang} and \uC~\cite{uC++book}.
     2556% This approach provides an interface that matches the language paradigms, gives more control over concurrency by the language runtime, and an abstract (and portable) interface to the underlying kernel threads across operating systems.
     2557% In many cases, user threads can be used on a much larger scale (100,000 threads).
     2558% Like kernel threads, user threads support preemption, which maximizes nondeterminism, but increases the potential for concurrency errors: race, livelock, starvation, and deadlock.
     2559% \CFA adopts user-threads to provide more flexibility and a low-cost mechanism to build any other concurrency approach, \eg thread pools and actors~\cite{Actors}.
     2560%
     2561% A variant of user thread is \newterm{fibres}, which removes preemption, \eg Go~\cite{Go} @goroutine@s.
     2562% Like functional programming, which removes mutation and its associated problems, removing preemption from concurrency reduces nondeterminism, making race and deadlock errors more difficult to generate.
     2563% However, preemption is necessary for fairness and to reduce tail-latency.
     2564% For concurrency that relies on spinning, if all cores spin the system is livelocked, whereas preemption breaks the livelock.
     2565
     2566
     2567\begin{comment}
     2568\subsection{Thread Pools}
     2569
     2570In contrast to direct threading is indirect \newterm{thread pools}, \eg Java @executor@, where small jobs (work units) are inserted into a work pool for execution.
     2571If the jobs are dependent, \ie interact, there is an implicit/explicit dependency graph that ties them together.
     2572While removing direct concurrency, and hence the amount of context switching, thread pools significantly limit the interaction that can occur among jobs.
     2573Indeed, jobs should not block because that also blocks the underlying thread, which effectively means the CPU utilization, and therefore throughput, suffers.
     2574While it is possible to tune the thread pool with sufficient threads, it becomes difficult to obtain high throughput and good core utilization as job interaction increases.
     2575As well, concurrency errors return, which threads pools are suppose to mitigate.
     2576
    29522577\begin{figure}
     2578\centering
     2579\begin{tabular}{@{}l|l@{}}
     2580\begin{cfa}
     2581struct Adder {
     2582    int * row, cols;
     2583};
     2584int operator()() {
     2585        subtotal = 0;
     2586        for ( int c = 0; c < cols; c += 1 )
     2587                subtotal += row[c];
     2588        return subtotal;
     2589}
     2590void ?{}( Adder * adder, int row[$\,$], int cols, int & subtotal ) {
     2591        adder.[rows, cols, subtotal] = [rows, cols, subtotal];
     2592}
     2593
     2594
     2595
     2596
     2597\end{cfa}
     2598&
     2599\begin{cfa}
     2600int main() {
     2601        const int rows = 10, cols = 10;
     2602        int matrix[rows][cols], subtotals[rows], total = 0;
     2603        // read matrix
     2604        Executor executor( 4 ); // kernel threads
     2605        Adder * adders[rows];
     2606        for ( r; rows ) { // send off work for executor
     2607                adders[r] = new( matrix[r], cols, &subtotal[r] );
     2608                executor.send( *adders[r] );
     2609        }
     2610        for ( r; rows ) {       // wait for results
     2611                delete( adders[r] );
     2612                total += subtotals[r];
     2613        }
     2614        sout | total;
     2615}
     2616\end{cfa}
     2617\end{tabular}
     2618\caption{Executor}
     2619\end{figure}
     2620\end{comment}
     2621
     2622
     2623\section{Runtime Structure}
     2624\label{s:CFARuntimeStructure}
     2625
     2626Figure~\ref{f:RunTimeStructure} illustrates the runtime structure of a \CFA program.
     2627In addition to the new kinds of objects introduced by \CFA, there are two more runtime entities used to control parallel execution: cluster and (virtual) processor.
     2628An executing thread is illustrated by its containment in a processor.
     2629
     2630\begin{figure}
     2631\centering
     2632\input{RunTimeStructure}
     2633\caption{\CFA Runtime structure}
     2634\label{f:RunTimeStructure}
     2635\end{figure}
     2636
     2637
     2638\subsection{Cluster}
     2639\label{s:RuntimeStructureCluster}
     2640
     2641A \newterm{cluster} is a collection of threads and virtual processors (abstract kernel-thread) that execute the (user) threads from its own ready queue (like an OS executing kernel threads).
     2642The purpose of a cluster is to control the amount of parallelism that is possible among threads, plus scheduling and other execution defaults.
     2643The default cluster-scheduler is single-queue multi-server, which provides automatic load-balancing of threads on processors.
     2644However, the design allows changing the scheduler, \eg multi-queue multi-server with work-stealing/sharing across the virtual processors.
     2645If several clusters exist, both threads and virtual processors, can be explicitly migrated from one cluster to another.
     2646No automatic load balancing among clusters is performed by \CFA.
     2647
     2648When a \CFA program begins execution, it creates a user cluster with a single processor and a special processor to handle preemption that does not execute user threads.
     2649The user cluster is created to contain the application user-threads.
     2650Having all threads execute on the one cluster often maximizes utilization of processors, which minimizes runtime.
     2651However, because of limitations of scheduling requirements (real-time), NUMA architecture, heterogeneous hardware, or issues with the underlying operating system, multiple clusters are sometimes necessary.
     2652
     2653
     2654\subsection{Virtual Processor}
     2655\label{s:RuntimeStructureProcessor}
     2656
     2657A virtual processor is implemented by a kernel thread (\eg UNIX process), which are scheduled for execution on a hardware processor by the underlying operating system.
     2658Programs may use more virtual processors than hardware processors.
     2659On a multiprocessor, kernel threads are distributed across the hardware processors resulting in virtual processors executing in parallel.
     2660(It is possible to use affinity to lock a virtual processor onto a particular hardware processor~\cite{affinityLinux, affinityWindows, affinityFreebsd, affinityNetbsd, affinityMacosx}, which is used when caching issues occur or for heterogeneous hardware processors.)
     2661The \CFA runtime attempts to block unused processors and unblock processors as the system load increases;
     2662balancing the workload with processors is difficult because it requires future knowledge, \ie what will the applicaton workload do next.
     2663Preemption occurs on virtual processors rather than user threads, via operating-system interrupts.
     2664Thus virtual processors execute user threads, where preemption frequency applies to a virtual processor, so preemption occurs randomly across the executed user threads.
     2665Turning off preemption transforms user threads into fibres.
     2666
     2667
     2668\begin{comment}
     2669\section{Implementation}
     2670\label{s:Implementation}
     2671
     2672A primary implementation challenge is avoiding contention from dynamically allocating memory because of bulk acquire, \eg the internal-scheduling design is (almost) free of allocations.
     2673All blocking operations are made by parking threads onto queues, therefore all queues are designed with intrusive nodes, where each node has preallocated link fields for chaining.
     2674Furthermore, several bulk-acquire operations need a variable amount of memory.
     2675This storage is allocated at the base of a thread's stack before blocking, which means programmers must add a small amount of extra space for stacks.
     2676
     2677In \CFA, ordering of monitor acquisition relies on memory ordering to prevent deadlock~\cite{Havender68}, because all objects have distinct non-overlapping memory layouts, and mutual-exclusion for a monitor is only defined for its lifetime.
     2678When a mutex call is made, pointers to the concerned monitors are aggregated into a variable-length array and sorted.
     2679This array persists for the entire duration of the mutual exclusion and is used extensively for synchronization operations.
     2680
     2681To improve performance and simplicity, context switching occurs inside a function call, so only callee-saved registers are copied onto the stack and then the stack register is switched;
     2682the corresponding registers are then restored for the other context.
     2683Note, the instruction pointer is untouched since the context switch is always inside the same function.
     2684Experimental results (not presented) for a stackless or stackful scheduler (1 versus 2 context switches) (see Section~\ref{s:Concurrency}) show the performance is virtually equivalent, because both approaches are dominated by locking to prevent a race condition.
     2685
     2686All kernel threads (@pthreads@) created a stack.
     2687Each \CFA virtual processor is implemented as a coroutine and these coroutines run directly on the kernel-thread stack, effectively stealing this stack.
     2688The exception to this rule is the program main, \ie the initial kernel thread that is given to any program.
     2689In order to respect C expectations, the stack of the initial kernel thread is used by program main rather than the main processor, allowing it to grow dynamically as in a normal C program.
     2690\end{comment}
     2691
     2692
     2693\subsection{Preemption}
     2694
     2695Nondeterministic preemption provides fairness from long-running threads, and forces concurrent programmers to write more robust programs, rather than relying on code between cooperative scheduling to be atomic.
     2696This atomic reliance can fail on multi-core machines, because execution across cores is nondeterministic.
     2697A different reason for not supporting preemption is that it significantly complicates the runtime system, \eg Microsoft runtime does not support interrupts and on Linux systems, interrupts are complex (see below).
     2698Preemption is normally handled by setting a countdown timer on each virtual processor.
     2699When the timer expires, an interrupt is delivered, and the interrupt handler resets the countdown timer, and if the virtual processor is executing in user code, the signal handler performs a user-level context-switch, or if executing in the language runtime kernel, the preemption is ignored or rolled forward to the point where the runtime kernel context switches back to user code.
     2700Multiple signal handlers may be pending.
     2701When control eventually switches back to the signal handler, it returns normally, and execution continues in the interrupted user thread, even though the return from the signal handler may be on a different kernel thread than the one where the signal is delivered.
     2702The only issue with this approach is that signal masks from one kernel thread may be restored on another as part of returning from the signal handler;
     2703therefore, the same signal mask is required for all virtual processors in a cluster.
     2704Because preemption frequency is usually long (1 millisecond) performance cost is negligible.
     2705
     2706Linux switched a decade ago from specific to arbitrary process signal-delivery for applications with multiple kernel threads.
     2707\begin{cquote}
     2708A process-directed signal may be delivered to any one of the threads that does not currently have the signal blocked.
     2709If more than one of the threads has the signal unblocked, then the kernel chooses an arbitrary thread to which it will deliver the signal.
     2710SIGNAL(7) - Linux Programmer's Manual
     2711\end{cquote}
     2712Hence, the timer-expiry signal, which is generated \emph{externally} by the Linux kernel to an application, is delivered to any of its Linux subprocesses (kernel threads).
     2713To ensure each virtual processor receives a preemption signal, a discrete-event simulation is run on a special virtual processor, and only it sets and receives timer events.
     2714Virtual processors register an expiration time with the discrete-event simulator, which is inserted in sorted order.
     2715The simulation sets the countdown timer to the value at the head of the event list, and when the timer expires, all events less than or equal to the current time are processed.
     2716Processing a preemption event sends an \emph{internal} @SIGUSR1@ signal to the registered virtual processor, which is always delivered to that processor.
     2717
     2718
     2719\subsection{Debug Kernel}
     2720
     2721There are two versions of the \CFA runtime kernel: debug and non-debug.
     2722The debugging version has many runtime checks and internal assertions, \eg stack (non-writable) guard page, and checks for stack overflow whenever context switches occur among coroutines and threads, which catches most stack overflows.
     2723After a program is debugged, the non-debugging version can be used to significantly decrease space and increase performance.
     2724
     2725
     2726\section{Performance}
     2727\label{s:Performance}
     2728
     2729To verify the implementation of the \CFA runtime, a series of microbenchmarks are performed comparing \CFA with pthreads, Java OpenJDK-9, Go 1.12.6 and \uC 7.0.0.
     2730For comparison, the package must be multi-processor (M:N), which excludes libdill/libmil~\cite{libdill} (M:1)), and use a shared-memory programming model, \eg not message passing.
     2731The benchmark computer is an AMD Opteron\texttrademark\ 6380 NUMA 64-core, 8 socket, 2.5 GHz processor, running Ubuntu 16.04.6 LTS, and \CFA/\uC are compiled with gcc 6.5.
     2732
     2733All benchmarks are run using the following harness. (The Java harness is augmented to circumvent JIT issues.)
     2734\begin{cfa}
     2735unsigned int N = 10_000_000;
     2736#define BENCH( `run` ) Time before = getTimeNsec();  `run;`  Duration result = (getTimeNsec() - before) / N;
     2737\end{cfa}
     2738The method used to get time is @clock_gettime( CLOCK_REALTIME )@.
     2739Each benchmark is performed @N@ times, where @N@ varies depending on the benchmark;
     2740the total time is divided by @N@ to obtain the average time for a benchmark.
     2741Each benchmark experiment is run 31 times.
     2742All omitted tests for other languages are functionally identical to the \CFA tests and available online~\cite{CforallBenchMarks}.
     2743% tar --exclude=.deps --exclude=Makefile --exclude=Makefile.in --exclude=c.c --exclude=cxx.cpp --exclude=fetch_add.c -cvhf benchmark.tar benchmark
     2744
     2745\paragraph{Object Creation}
     2746
     2747Object creation is measured by creating/deleting the specific kind of concurrent object.
     2748Figure~\ref{f:creation} shows the code for \CFA, with results in Table~\ref{tab:creation}.
     2749The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine to force stack creation, the creation cost is artificially low.
     2750
    29532751\begin{multicols}{2}
    2954 \CFA Coroutines
    2955 \begin{cfa}
    2956 coroutine GreatSuspender {};
    2957 void main(GreatSuspender& this) {
    2958         while(true) { suspend(); }
    2959 }
     2752\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
     2753\begin{cfa}
     2754@thread@ MyThread {};
     2755void @main@( MyThread & ) {}
    29602756int main() {
    2961         GreatSuspender s;
    2962         resume(s);
     2757        BENCH( for ( N ) { @MyThread m;@ } )
     2758        sout | result`ns;
     2759}
     2760\end{cfa}
     2761\captionof{figure}{\CFA object-creation benchmark}
     2762\label{f:creation}
     2763
     2764\columnbreak
     2765
     2766\vspace*{-16pt}
     2767\captionof{table}{Object creation comparison (nanoseconds)}
     2768\label{tab:creation}
     2769
     2770\begin{tabular}[t]{@{}r*{3}{D{.}{.}{5.2}}@{}}
     2771\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     2772\CFA Coroutine Lazy             & 13.2          & 13.1          & 0.44          \\
     2773\CFA Coroutine Eager    & 531.3         & 536.0         & 26.54         \\
     2774\CFA Thread                             & 2074.9        & 2066.5        & 170.76        \\
     2775\uC Coroutine                   & 89.6          & 90.5          & 1.83          \\
     2776\uC Thread                              & 528.2         & 528.5         & 4.94          \\
     2777Goroutine                               & 4068.0        & 4113.1        & 414.55        \\
     2778Java Thread                             & 103848.5      & 104295.4      & 2637.57       \\
     2779Pthreads                                & 33112.6       & 33127.1       & 165.90
     2780\end{tabular}
     2781\end{multicols}
     2782
     2783
     2784\paragraph{Context-Switching}
     2785
     2786In procedural programming, the cost of a function call is important as modularization (refactoring) increases.
     2787(In many cases, a compiler inlines function calls to eliminate this cost.)
     2788Similarly, when modularization extends to coroutines/tasks, the time for a context switch becomes a relevant factor.
     2789The coroutine test is from resumer to suspender and from suspender to resumer, which is two context switches.
     2790The thread test is using yield to enter and return from the runtime kernel, which is two context switches.
     2791The difference in performance between coroutine and thread context-switch is the cost of scheduling for threads, whereas coroutines are self-scheduling.
     2792Figure~\ref{f:ctx-switch} only shows the \CFA code for coroutines/threads (other systems are similar) with all results in Table~\ref{tab:ctx-switch}.
     2793
     2794\begin{multicols}{2}
     2795\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
     2796\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     2797@coroutine@ C {} c;
     2798void main( C & ) { for ( ;; ) { @suspend;@ } }
     2799int main() { // coroutine test
     2800        BENCH( for ( N ) { @resume( c );@ } )
     2801        sout | result`ns;
     2802}
     2803int main() { // task test
     2804        BENCH( for ( N ) { @yield();@ } )
     2805        sout | result`ns;
     2806}
     2807\end{cfa}
     2808\captionof{figure}{\CFA context-switch benchmark}
     2809\label{f:ctx-switch}
     2810
     2811\columnbreak
     2812
     2813\vspace*{-16pt}
     2814\captionof{table}{Context switch comparison (nanoseconds)}
     2815\label{tab:ctx-switch}
     2816\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
     2817\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     2818C function              & 1.8   & 1.8   & 0.01  \\
     2819\CFA generator  & 2.4   & 2.2   & 0.25  \\
     2820\CFA Coroutine  & 36.2  & 36.2  & 0.25  \\
     2821\CFA Thread             & 93.2  & 93.5  & 2.09  \\
     2822\uC Coroutine   & 52.0  & 52.1  & 0.51  \\
     2823\uC Thread              & 96.2  & 96.3  & 0.58  \\
     2824Goroutine               & 141.0 & 141.3 & 3.39  \\
     2825Java Thread             & 374.0 & 375.8 & 10.38 \\
     2826Pthreads Thread & 361.0 & 365.3 & 13.19
     2827\end{tabular}
     2828\end{multicols}
     2829
     2830
     2831\paragraph{Mutual-Exclusion}
     2832
     2833Uncontented mutual exclusion, which frequently occurs, is measured by entering/leaving a critical section.
     2834For monitors, entering and leaving a monitor function is measured.
     2835To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured.
     2836Figure~\ref{f:mutex} shows the code for \CFA with all results in Table~\ref{tab:mutex}.
     2837Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
     2838
     2839\begin{multicols}{2}
     2840\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
     2841\begin{cfa}
     2842@monitor@ M {} m1/*, m2, m3, m4*/;
     2843void __attribute__((noinline))
     2844do_call( M & @mutex m/*, m2, m3, m4*/@ ) {}
     2845int main() {
    29632846        BENCH(
    2964                 for(size_t i=0; i<n; i++) {
    2965                         resume(s);
    2966                 },
    2967                 result
     2847                for( N ) do_call( m1/*, m2, m3, m4*/ );
    29682848        )
    2969         printf("%llu\n", result);
    2970 }
    2971 \end{cfa}
     2849        sout | result`ns;
     2850}
     2851\end{cfa}
     2852\captionof{figure}{\CFA acquire/release mutex benchmark}
     2853\label{f:mutex}
     2854
    29722855\columnbreak
    2973 \CFA Threads
    2974 \begin{cfa}
    2975 
    2976 
    2977 
    2978 
    2979 int main() {
    2980 
    2981 
    2982         BENCH(
    2983                 for(size_t i=0; i<n; i++) {
    2984                         yield();
    2985                 },
    2986                 result
    2987         )
    2988         printf("%llu\n", result);
    2989 }
    2990 \end{cfa}
     2856
     2857\vspace*{-16pt}
     2858\captionof{table}{Mutex comparison (nanoseconds)}
     2859\label{tab:mutex}
     2860\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
     2861\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     2862test and test-and-test lock             & 19.1  & 18.9  & 0.40  \\
     2863\CFA @mutex@ function, 1 arg.   & 45.9  & 46.6  & 1.45  \\
     2864\CFA @mutex@ function, 2 arg.   & 105.0 & 104.7 & 3.08  \\
     2865\CFA @mutex@ function, 4 arg.   & 165.0 & 167.6 & 5.65  \\
     2866\uC @monitor@ member rtn.               & 54.0  & 53.7  & 0.82  \\
     2867Java synchronized method                & 31.0  & 31.1  & 0.50  \\
     2868Pthreads Mutex Lock                             & 33.6  & 32.6  & 1.14
     2869\end{tabular}
    29912870\end{multicols}
    2992 \begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={f:ctx-switch}]
    2993 \end{cfa}
    2994 \end{figure}
    2995 
    2996 \begin{table}
    2997 \begin{center}
    2998 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |}
    2999 \cline{2-4}
    3000 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\
    3001 \hline
    3002 Kernel Thread   & 241.5 & 243.86        & 5.08 \\
    3003 \CFA Coroutine  & 38            & 38            & 0    \\
    3004 \CFA Thread             & 103           & 102.96        & 2.96 \\
    3005 \uC Coroutine   & 46            & 45.86 & 0.35 \\
    3006 \uC Thread              & 98            & 99.11 & 1.42 \\
    3007 Goroutine               & 150           & 149.96        & 3.16 \\
    3008 Java Thread             & 289           & 290.68        & 8.72 \\
    3009 \hline
    3010 \end{tabular}
    3011 \end{center}
    3012 \caption{Context Switch comparison.
    3013 All numbers are in nanoseconds(\si{\nano\second})}
    3014 \label{tab:ctx-switch}
    3015 \end{table}
    3016 
    3017 \subsection{Mutual-Exclusion}
    3018 The next interesting benchmark is to measure the overhead to enter/leave a critical-section.
    3019 For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine.
    3020 Figure~\ref{f:mutex} shows the code for \CFA.
    3021 To put the results in context, the cost of entering a non-inline routine and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured.
    3022 The results can be shown in table \ref{tab:mutex}.
    3023 
    3024 \begin{figure}
    3025 \begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={f:mutex}]
    3026 monitor M {};
    3027 void __attribute__((noinline)) call( M & mutex m /*, m2, m3, m4*/ ) {}
    3028 
    3029 int main() {
    3030         M m/*, m2, m3, m4*/;
    3031         BENCH(
    3032                 for(size_t i=0; i<n; i++) {
    3033                         call(m/*, m2, m3, m4*/);
    3034                 },
    3035                 result
    3036         )
    3037         printf("%llu\n", result);
    3038 }
    3039 \end{cfa}
    3040 \end{figure}
    3041 
    3042 \begin{table}
    3043 \begin{center}
    3044 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |}
    3045 \cline{2-4}
    3046 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\
    3047 \hline
    3048 C routine                                               & 2             & 2             & 0    \\
    3049 FetchAdd + FetchSub                             & 26            & 26            & 0    \\
    3050 Pthreads Mutex Lock                             & 31            & 31.86 & 0.99 \\
    3051 \uC @monitor@ member routine            & 30            & 30            & 0    \\
    3052 \CFA @mutex@ routine, 1 argument        & 41            & 41.57 & 0.9  \\
    3053 \CFA @mutex@ routine, 2 argument        & 76            & 76.96 & 1.57 \\
    3054 \CFA @mutex@ routine, 4 argument        & 145           & 146.68        & 3.85 \\
    3055 Java synchronized routine                       & 27            & 28.57 & 2.6  \\
    3056 \hline
    3057 \end{tabular}
    3058 \end{center}
    3059 \caption{Mutex routine comparison.
    3060 All numbers are in nanoseconds(\si{\nano\second})}
    3061 \label{tab:mutex}
    3062 \end{table}
    3063 
    3064 \subsection{Internal Scheduling}
    3065 The internal-scheduling benchmark measures the cost of waiting on and signalling a condition variable.
    3066 Figure~\ref{f:int-sched} shows the code for \CFA, with results table \ref{tab:int-sched}.
    3067 As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    3068 
    3069 \begin{figure}
    3070 \begin{cfa}[caption={Benchmark code for internal scheduling},label={f:int-sched}]
     2871
     2872
     2873\paragraph{External Scheduling}
     2874
     2875External scheduling is measured using a cycle of two threads calling and accepting the call using the @waitfor@ statement.
     2876Figure~\ref{f:ext-sched} shows the code for \CFA, with results in Table~\ref{tab:ext-sched}.
     2877Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
     2878
     2879\begin{multicols}{2}
     2880\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
     2881\vspace*{-16pt}
     2882\begin{cfa}
    30712883volatile int go = 0;
    3072 condition c;
    3073 monitor M {};
    3074 M m1;
    3075 
    3076 void __attribute__((noinline)) do_call( M & mutex a1 ) { signal(c); }
    3077 
     2884@monitor@ M {} m;
    30782885thread T {};
    3079 void ^?{}( T & mutex this ) {}
    3080 void main( T & this ) {
    3081         while(go == 0) { yield(); }
    3082         while(go == 1) { do_call(m1); }
    3083 }
    3084 int  __attribute__((noinline)) do_wait( M & mutex a1 ) {
    3085         go = 1;
    3086         BENCH(
    3087                 for(size_t i=0; i<n; i++) {
    3088                         wait(c);
    3089                 },
    3090                 result
    3091         )
    3092         printf("%llu\n", result);
    3093         go = 0;
    3094         return 0;
     2886void __attribute__((noinline))
     2887do_call( M & @mutex@ ) {}
     2888void main( T & ) {
     2889        while ( go == 0 ) { yield(); }
     2890        while ( go == 1 ) { do_call( m ); }
     2891}
     2892int __attribute__((noinline))
     2893do_wait( M & @mutex@ m ) {
     2894        go = 1; // continue other thread
     2895        BENCH( for ( N ) { @waitfor( do_call, m );@ } )
     2896        go = 0; // stop other thread
     2897        sout | result`ns;
    30952898}
    30962899int main() {
    30972900        T t;
    3098         return do_wait(m1);
    3099 }
    3100 \end{cfa}
    3101 \end{figure}
    3102 
    3103 \begin{table}
    3104 \begin{center}
    3105 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |}
    3106 \cline{2-4}
    3107 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\
    3108 \hline
    3109 Pthreads Condition Variable                     & 5902.5        & 6093.29       & 714.78 \\
    3110 \uC @signal@                                    & 322           & 323   & 3.36   \\
    3111 \CFA @signal@, 1 @monitor@      & 352.5 & 353.11        & 3.66   \\
    3112 \CFA @signal@, 2 @monitor@      & 430           & 430.29        & 8.97   \\
    3113 \CFA @signal@, 4 @monitor@      & 594.5 & 606.57        & 18.33  \\
    3114 Java @notify@                           & 13831.5       & 15698.21      & 4782.3 \\
    3115 \hline
     2901        do_wait( m );
     2902}
     2903\end{cfa}
     2904\captionof{figure}{\CFA external-scheduling benchmark}
     2905\label{f:ext-sched}
     2906
     2907\columnbreak
     2908
     2909\vspace*{-16pt}
     2910\captionof{table}{External-scheduling comparison (nanoseconds)}
     2911\label{tab:ext-sched}
     2912\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
     2913\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     2914\CFA @waitfor@, 1 @monitor@     & 376.4 & 376.8 & 7.63  \\
     2915\CFA @waitfor@, 2 @monitor@     & 491.4 & 492.0 & 13.31 \\
     2916\CFA @waitfor@, 4 @monitor@     & 681.0 & 681.7 & 19.10 \\
     2917\uC @_Accept@                           & 331.1 & 331.4 & 2.66
    31162918\end{tabular}
    3117 \end{center}
    3118 \caption{Internal scheduling comparison.
    3119 All numbers are in nanoseconds(\si{\nano\second})}
    3120 \label{tab:int-sched}
    3121 \end{table}
    3122 
    3123 \subsection{External Scheduling}
    3124 The Internal scheduling benchmark measures the cost of the @waitfor@ statement (@_Accept@ in \uC).
    3125 Figure~\ref{f:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}.
    3126 As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    3127 
    3128 \begin{figure}
    3129 \begin{cfa}[caption={Benchmark code for external scheduling},label={f:ext-sched}]
     2919\end{multicols}
     2920
     2921
     2922\paragraph{Internal Scheduling}
     2923
     2924Internal scheduling is measured using a cycle of two threads signalling and waiting.
     2925Figure~\ref{f:int-sched} shows the code for \CFA, with results in Table~\ref{tab:int-sched}.
     2926Note, the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
     2927Java scheduling is significantly greater because the benchmark explicitly creates multiple thread in order to prevent the JIT from making the program sequential, \ie removing all locking.
     2928
     2929\begin{multicols}{2}
     2930\lstset{language=CFA,moredelim=**[is][\color{red}]{@}{@},deletedelim=**[is][]{`}{`}}
     2931\begin{cfa}
    31302932volatile int go = 0;
    3131 monitor M {};
    3132 M m1;
     2933@monitor@ M { @condition c;@ } m;
     2934void __attribute__((noinline))
     2935do_call( M & @mutex@ a1 ) { @signal( c );@ }
    31332936thread T {};
    3134 
    3135 void __attribute__((noinline)) do_call( M & mutex a1 ) {}
    3136 
    3137 void ^?{}( T & mutex this ) {}
    31382937void main( T & this ) {
    3139         while(go == 0) { yield(); }
    3140         while(go == 1) { do_call(m1); }
    3141 }
    3142 int  __attribute__((noinline)) do_wait( M & mutex a1 ) {
    3143         go = 1;
    3144         BENCH(
    3145                 for(size_t i=0; i<n; i++) {
    3146                         waitfor(call, a1);
    3147                 },
    3148                 result
    3149         )
    3150         printf("%llu\n", result);
    3151         go = 0;
    3152         return 0;
     2938        while ( go == 0 ) { yield(); }
     2939        while ( go == 1 ) { do_call( m ); }
     2940}
     2941int  __attribute__((noinline))
     2942do_wait( M & mutex m ) with(m) {
     2943        go = 1; // continue other thread
     2944        BENCH( for ( N ) { @wait( c );@ } );
     2945        go = 0; // stop other thread
     2946        sout | result`ns;
    31532947}
    31542948int main() {
    31552949        T t;
    3156         return do_wait(m1);
    3157 }
    3158 \end{cfa}
    3159 \end{figure}
    3160 
    3161 \begin{table}
    3162 \begin{center}
    3163 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |}
    3164 \cline{2-4}
    3165 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\
    3166 \hline
    3167 \uC @Accept@                                    & 350           & 350.61        & 3.11  \\
    3168 \CFA @waitfor@, 1 @monitor@     & 358.5 & 358.36        & 3.82  \\
    3169 \CFA @waitfor@, 2 @monitor@     & 422           & 426.79        & 7.95  \\
    3170 \CFA @waitfor@, 4 @monitor@     & 579.5 & 585.46        & 11.25 \\
    3171 \hline
     2950        do_wait( m );
     2951}
     2952\end{cfa}
     2953\captionof{figure}{\CFA Internal-scheduling benchmark}
     2954\label{f:int-sched}
     2955
     2956\columnbreak
     2957
     2958\vspace*{-16pt}
     2959\captionof{table}{Internal-scheduling comparison (nanoseconds)}
     2960\label{tab:int-sched}
     2961\bigskip
     2962
     2963\begin{tabular}{@{}r*{3}{D{.}{.}{5.2}}@{}}
     2964\multicolumn{1}{@{}c}{} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     2965\CFA @signal@, 1 @monitor@      & 372.6         & 374.3         & 14.17         \\
     2966\CFA @signal@, 2 @monitor@      & 492.7         & 494.1         & 12.99         \\
     2967\CFA @signal@, 4 @monitor@      & 749.4         & 750.4         & 24.74         \\
     2968\uC @signal@                            & 320.5         & 321.0         & 3.36          \\
     2969Java @notify@                           & 10160.5       & 10169.4       & 267.71        \\
     2970Pthreads Cond. Variable         & 4949.6        & 5065.2        & 363
    31722971\end{tabular}
    3173 \end{center}
    3174 \caption{External scheduling comparison.
    3175 All numbers are in nanoseconds(\si{\nano\second})}
    3176 \label{tab:ext-sched}
    3177 \end{table}
    3178 
    3179 
    3180 \subsection{Object Creation}
    3181 Finally, the last benchmark measures the cost of creation for concurrent objects.
    3182 Figure~\ref{f:creation} shows the code for @pthread@s and \CFA threads, with results shown in table \ref{tab:creation}.
    3183 As with all other benchmarks, all omitted tests are functionally identical to one of these tests.
    3184 The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine, the creation cost is very low.
    3185 
    3186 \begin{figure}
    3187 \begin{center}
    3188 @pthread@
    3189 \begin{cfa}
    3190 int main() {
    3191         BENCH(
    3192                 for(size_t i=0; i<n; i++) {
    3193                         pthread_t thread;
    3194                         if(pthread_create(&thread,NULL,foo,NULL)<0) {
    3195                                 perror( "failure" );
    3196                                 return 1;
    3197                         }
    3198 
    3199                         if(pthread_join(thread, NULL)<0) {
    3200                                 perror( "failure" );
    3201                                 return 1;
    3202                         }
    3203                 },
    3204                 result
    3205         )
    3206         printf("%llu\n", result);
    3207 }
    3208 \end{cfa}
    3209 
    3210 
    3211 
    3212 \CFA Threads
    3213 \begin{cfa}
    3214 int main() {
    3215         BENCH(
    3216                 for(size_t i=0; i<n; i++) {
    3217                         MyThread m;
    3218                 },
    3219                 result
    3220         )
    3221         printf("%llu\n", result);
    3222 }
    3223 \end{cfa}
    3224 \end{center}
    3225 \caption{Benchmark code for \protect\lstinline|pthread|s and \CFA to measure object creation}
    3226 \label{f:creation}
    3227 \end{figure}
    3228 
    3229 \begin{table}
    3230 \begin{center}
    3231 \begin{tabular}{| l | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] | S[table-format=5.2,table-number-alignment=right] |}
    3232 \cline{2-4}
    3233 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\
    3234 \hline
    3235 Pthreads                        & 26996 & 26984.71      & 156.6  \\
    3236 \CFA Coroutine Lazy     & 6             & 5.71  & 0.45   \\
    3237 \CFA Coroutine Eager    & 708           & 706.68        & 4.82   \\
    3238 \CFA Thread                     & 1173.5        & 1176.18       & 15.18  \\
    3239 \uC Coroutine           & 109           & 107.46        & 1.74   \\
    3240 \uC Thread                      & 526           & 530.89        & 9.73   \\
    3241 Goroutine                       & 2520.5        & 2530.93       & 61,56  \\
    3242 Java Thread                     & 91114.5       & 92272.79      & 961.58 \\
    3243 \hline
    3244 \end{tabular}
    3245 \end{center}
    3246 \caption{Creation comparison.
    3247 All numbers are in nanoseconds(\si{\nano\second}).}
    3248 \label{tab:creation}
    3249 \end{table}
    3250 
     2972\end{multicols}
    32512973
    32522974
    32532975\section{Conclusion}
    3254 This paper has achieved a minimal concurrency \textbf{api} that is simple, efficient and usable as the basis for higher-level features.
    3255 The approach presented is based on a lightweight thread-system for parallelism, which sits on top of clusters of processors.
    3256 This M:N model is judged to be both more efficient and allow more flexibility for users.
    3257 Furthermore, this document introduces monitors as the main concurrency tool for users.
    3258 This paper also offers a novel approach allowing multiple monitors to be accessed simultaneously without running into the Nested Monitor Problem~\cite{Lister77}.
    3259 It also offers a full implementation of the concurrency runtime written entirely in \CFA, effectively the largest \CFA code base to date.
    3260 
    3261 
    3262 % ======================================================================
    3263 % ======================================================================
     2976
     2977Advanced control-flow will always be difficult, especially when there is temporal ordering and nondeterminism.
     2978However, many systems exacerbate the difficulty through their presentation mechanisms.
     2979This paper shows it is possible to present a hierarchy of control-flow features, generator, coroutine, thread, and monitor, providing an integrated set of high-level, efficient, and maintainable control-flow features.
     2980Eliminated from \CFA are spurious wakeup and barging, which are nonintuitive and lead to errors, and having to work with a bewildering set of low-level locks and acquisition techniques.
     2981\CFA high-level race-free monitors and tasks provide the core mechanisms for mutual exclusion and synchronization, without having to resort to magic qualifiers like @volatile@/@atomic@.
     2982Extending these mechanisms to handle high-level deadlock-free bulk acquire across both mutual exclusion and synchronization is a unique contribution.
     2983The \CFA runtime provides concurrency based on a preemptive M:N user-level threading-system, executing in clusters, which encapsulate scheduling of work on multiple kernel threads providing parallelism.
     2984The M:N model is judged to be efficient and provide greater flexibility than a 1:1 threading model.
     2985These concepts and the \CFA runtime-system are written in the \CFA language, extensively leveraging the \CFA type-system, which demonstrates the expressiveness of the \CFA language.
     2986Performance comparisons with other concurrent systems/languages show the \CFA approach is competitive across all low-level operations, which translates directly into good performance in well-written concurrent applications.
     2987C programmers should feel comfortable using these mechanisms for developing complex control-flow in applications, with the ability to obtain maximum available performance by selecting mechanisms at the appropriate level of need.
     2988
     2989
    32642990\section{Future Work}
    3265 % ======================================================================
    3266 % ======================================================================
    3267 
    3268 \subsection{Performance} \label{futur:perf}
    3269 This paper presents a first implementation of the \CFA concurrency runtime.
    3270 Therefore, there is still significant work to improve performance.
    3271 Many of the data structures and algorithms may change in the future to more efficient versions.
    3272 For example, the number of monitors in a single bulk acquire is only bound by the stack size, this is probably unnecessarily generous.
    3273 It may be possible that limiting the number helps increase performance.
    3274 However, it is not obvious that the benefit would be significant.
    3275 
    3276 \subsection{Flexible Scheduling} \label{futur:sched}
     2991
     2992While control flow in \CFA has a strong start, development is still underway to complete a number of missing features.
     2993
     2994\paragraph{Flexible Scheduling}
     2995\label{futur:sched}
     2996
    32772997An important part of concurrency is scheduling.
    32782998Different scheduling algorithms can affect performance (both in terms of average and variation).
    32792999However, no single scheduler is optimal for all workloads and therefore there is value in being able to change the scheduler for given programs.
    3280 One solution is to offer various tweaking options to users, allowing the scheduler to be adjusted to the requirements of the workload.
    3281 However, in order to be truly flexible, it would be interesting to allow users to add arbitrary data and arbitrary scheduling algorithms.
    3282 For example, a web server could attach Type-of-Service information to threads and have a ``ToS aware'' scheduling algorithm tailored to this specific web server.
    3283 This path of flexible schedulers will be explored for \CFA.
    3284 
    3285 \subsection{Non-Blocking I/O} \label{futur:nbio}
    3286 While most of the parallelism tools are aimed at data parallelism and control-flow parallelism, many modern workloads are not bound on computation but on IO operations, a common case being web servers and XaaS (anything as a service).
    3287 These types of workloads often require significant engineering around amortizing costs of blocking IO operations.
    3288 At its core, non-blocking I/O is an operating system level feature that allows queuing IO operations (\eg network operations) and registering for notifications instead of waiting for requests to complete.
    3289 In this context, the role of the language makes Non-Blocking IO easily available and with low overhead.
    3290 The current trend is to use asynchronous programming using tools like callbacks and/or futures and promises, which can be seen in frameworks like Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java and Django~\cite{Django} for Python.
    3291 However, while these are valid solutions, they lead to code that is harder to read and maintain because it is much less linear.
    3292 
    3293 \subsection{Other Concurrency Tools} \label{futur:tools}
    3294 While monitors offer a flexible and powerful concurrent core for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package.
    3295 Examples of such tools can include simple locks and condition variables, futures and promises~\cite{promises}, executors and actors.
    3296 These additional features are useful when monitors offer a level of abstraction that is inadequate for certain tasks.
    3297 
    3298 \subsection{Implicit Threading} \label{futur:implcit}
    3299 Simpler applications can benefit greatly from having implicit parallelism.
    3300 That is, parallelism that does not rely on the user to write concurrency.
    3301 This type of parallelism can be achieved both at the language level and at the library level.
    3302 The canonical example of implicit parallelism is parallel for loops, which are the simplest example of a divide and conquer algorithms~\cite{uC++book}.
    3303 Table \ref{f:parfor} shows three different code examples that accomplish point-wise sums of large arrays.
    3304 Note that none of these examples explicitly declare any concurrency or parallelism objects.
    3305 
    3306 \begin{table}
    3307 \begin{center}
    3308 \begin{tabular}[t]{|c|c|c|}
    3309 Sequential & Library Parallel & Language Parallel \\
    3310 \begin{cfa}[tabsize=3]
    3311 void big_sum(
    3312         int* a, int* b,
    3313         int* o,
    3314         size_t len)
    3315 {
    3316         for(
    3317                 int i = 0;
    3318                 i < len;
    3319                 ++i )
    3320         {
    3321                 o[i]=a[i]+b[i];
    3322         }
    3323 }
    3324 
    3325 
    3326 
    3327 
    3328 
    3329 int* a[10000];
    3330 int* b[10000];
    3331 int* c[10000];
    3332 //... fill in a & b
    3333 big_sum(a,b,c,10000);
    3334 \end{cfa} &\begin{cfa}[tabsize=3]
    3335 void big_sum(
    3336         int* a, int* b,
    3337         int* o,
    3338         size_t len)
    3339 {
    3340         range ar(a, a+len);
    3341         range br(b, b+len);
    3342         range or(o, o+len);
    3343         parfor( ai, bi, oi,
    3344         [](     int* ai,
    3345                 int* bi,
    3346                 int* oi)
    3347         {
    3348                 oi=ai+bi;
    3349         });
    3350 }
    3351 
    3352 
    3353 int* a[10000];
    3354 int* b[10000];
    3355 int* c[10000];
    3356 //... fill in a & b
    3357 big_sum(a,b,c,10000);
    3358 \end{cfa}&\begin{cfa}[tabsize=3]
    3359 void big_sum(
    3360         int* a, int* b,
    3361         int* o,
    3362         size_t len)
    3363 {
    3364         parfor (ai,bi,oi)
    3365             in (a, b, o )
    3366         {
    3367                 oi = ai + bi;
    3368         }
    3369 }
    3370 
    3371 
    3372 
    3373 
    3374 
    3375 
    3376 
    3377 int* a[10000];
    3378 int* b[10000];
    3379 int* c[10000];
    3380 //... fill in a & b
    3381 big_sum(a,b,c,10000);
    3382 \end{cfa}
    3383 \end{tabular}
    3384 \end{center}
    3385 \caption{For loop to sum numbers: Sequential, using library parallelism and language parallelism.}