Changes in / [e3282fe:4432b52]


Ignore:
Location:
doc
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • doc/bibliography/pl.bib

    re3282fe r4432b52  
    961961    title       = {C Programming Language {ISO/IEC} 9899:1999(E)},
    962962    edition     = {2nd},
    963     publisher   = {International Standard Organization},
    964     address     = {\href{https://webstore.ansi.org/Standards/INCITS/INCITSISOIEC98991999R2005}{https://webstore.ansi.org/\-Standards/\-INCITS/\-INCITSISOIEC98991999R2005}},
     963    organization= {International Standard Organization},
     964    address     = {Geneva, Switzerland},
    965965    year        = 1999,
     966    note        = {\href{https://webstore.ansi.org/Standards/INCITS/INCITSISOIEC98991999R2005}{https://webstore.ansi.org/\-Standards/\-INCITS/\-INCITSISOIEC98991999R2005}},
    966967}
    967968
     
    972973    title       = {C Programming Language {ISO/IEC} 9889:2011-12},
    973974    edition     = {3rd},
    974     publisher   = {International Standard Organization},
    975     address     = {\href{https://www.iso.org/standard/57853.html}{https://\-www.iso.org/\-standard/\-57853.html}},
     975    organization= {International Standard Organization},
     976    address     = {Geneva, Switzerland},
    976977    year        = 2012,
     978    note        = {\href{https://www.iso.org/standard/57853.html}{https://\-www.iso.org/\-standard/\-57853.html}},
    977979}
    978980
     
    982984    key         = {Concepts},
    983985    title       = {{C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Programming language -- Extensions for concepts {ISO/IEC} {TS} 19217:2015},
    984     publisher   = {International Standard Organization},
    985     address     = {\href{https://www.iso.org/standard/64031.html}{https://\-www.iso.org/\-standard/\-64031.html}},
     986    organization= {International Standard Organization},
     987    address     = {Geneva, Switzerland},
    986988    year        = 2015,
     989    note        = {\href{https://www.iso.org/standard/64031.html}{https://\-www.iso.org/\-standard/\-64031.html}},
    987990}
    988991
     
    11491152    title       = {C\# Language Specification, Standard ECMA-334},
    11501153    organization= {ECMA International Standardizing Information and Communication Systems},
     1154    address     = {Geneva, Switzerland},
    11511155    month       = jun,
    11521156    year        = 2006,
     
    12981302    title       = {Programming Languages -- {Cobol} ISO/IEC 1989:2014},
    12991303    edition     = {2nd},
    1300     institution = {International Standard Organization},
    1301     address     = {\href{https://www.iso.org/standard/51416.html}{https://\-www.iso.org/\-standard/\-51416.html}},
     1304    organization= {International Standard Organization},
     1305    address     = {Geneva, Switzerland},
    13021306    year        = 2014,
     1307    note        = {\href{https://www.iso.org/standard/51416.html}{https://\-www.iso.org/\-standard/\-51416.html}},
    13031308}
    13041309
     
    16541659    title       = {$\mu${C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Annotated Reference Manual, Version 7.0.0},
    16551660    organization= {University of Waterloo},
     1661    address     = {Waterloo Ontario, Canada},
    16561662    month       = sep,
    16571663    year        = 2018,
     
    20862092    author      = {Walter Bright and Andrei Alexandrescu},
    20872093    organization= {Digital Mars},
     2094    address     = {Vienna Virginia, U.S.A.},
    20882095    year        = 2016,
    20892096    note        = {\href{http://dlang.org/spec/spec.html}{http://\-dlang.org/\-spec/\-spec.html}},
     
    33533360    title       = {Programming Languages -- {Fortran} Part 1:Base Language ISO/IEC 1539-1:2010},
    33543361    edition     = {3rd},
    3355     publisher   = {International Standard Organization},
    3356     address     = {\href{https://www.iso.org/standard/50459.html}{https://\-www.iso.org/\-standard/\-50459.html}},
     3362    organization= {International Standard Organization},
     3363    address     = {Geneva, Switzerland},
    33573364    year        = 2010,
     3365    note        = {\href{https://www.iso.org/standard/50459.html}{https://\-www.iso.org/\-standard/\-50459.html}},
    33583366}
    33593367
     
    33643372    title       = {Programming Languages -- {Fortran} Part 1:Base Language ISO/IEC 1539-1:2018},
    33653373    edition     = {4rd},
    3366     publisher   = {International Standard Organization},
    3367     address     = {\href{https://www.iso.org/standard/72320.html}{https://\-www.iso.org/\-standard/\-72320.html}},
     3374    organization= {International Standard Organization},
     3375    address     = {Geneva, Switzerland},
    33683376    year        = 2018,
     3377    note        = {\href{https://www.iso.org/standard/72320.html}{https://\-www.iso.org/\-standard/\-72320.html}},
    33693378}
    33703379
     
    47444753    address     = {New York, NY, USA},
    47454754}
     4755
    47464756@techreport{Mesa,
    47474757    keywords    = {monitors, packages},
     
    47504760    title       = {Mesa Language Manual},
    47514761    institution = {Xerox Palo Alto Research Center},
     4762    address     = {Palo Alto, California, U.S.A.},
    47524763    number      = {CSL--79--3},
    47534764    month       = apr,
     
    63016312    title       = {{C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Programming Language ISO/IEC 14882:1998},
    63026313    edition     = {1st},
    6303     publisher   = {International Standard Organization},
    6304     address     = {\href{https://www.iso.org/standard/25845.html}{https://\-www.iso.org/\-standard/\-25845.html}},
     6314    organization  = {International Standard Organization},
     6315    address     = {Geneva, Switzerland},
    63056316    year        = 1998,
     6317    note        = {\href{https://www.iso.org/standard/25845.html}{https://\-www.iso.org/\-standard/\-25845.html}},
    63066318}
    63076319
     
    63126324    title       = {{C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Programming Language ISO/IEC 14882:2014},
    63136325    edition     = {4th},
    6314     publisher   = {International Standard Organization},
    6315     address     = {\href{https://www.iso.org/standard/64029.html}{https://\-www.iso.org/\-standard/\-64029.html}},
     6326    organization= {International Standard Organization},
     6327    address     = {Geneva, Switzerland},
    63166328    year        = 2014,
     6329    note        = {\href{https://www.iso.org/standard/64029.html}{https://\-www.iso.org/\-standard/\-64029.html}},
    63176330}
    63186331
     
    63236336    title       = {{C}{\kern-.1em\hbox{\large\texttt{+\kern-.25em+}}} Programming Language ISO/IEC 14882:2017},
    63246337    edition     = {5th},
    6325     publisher   = {International Standard Organization},
    6326     address     = {\href{https://www.iso.org/standard/68564.html}{https://\-www.iso.org/\-standard/\-68564.html}},
     6338    organization= {International Standard Organization},
     6339    address     = {Geneva, Switzerland},
    63276340    year        = 2017,
     6341    note        = {\href{https://www.iso.org/standard/68564.html}{https://\-www.iso.org/\-standard/\-68564.html}},
    63286342}
    63296343
     
    64576471    title       = {The Programming Language Concurrent Pascal},
    64586472    journal     = ieeese,
    6459     volume      = 2,
     6473    volume      = {SE-1},
     6474    number      = 2,
    64606475    month       = jun,
    64616476    year        = 1975,
    6462     pages       = {199-206}
     6477    pages       = {199-207}
    64636478}
    64646479
     
    67196734    title       = {Programming languages -- {Ada} ISO/IEC 8652:2012},
    67206735    edition     = {3rd},
    6721     publisher   = {International Standard Organization},
    6722     address     = {\href{https://www.iso.org/standard/61507.html}{https://\-www.iso.org/\-standard/\-61507.html}},
     6736    organization= {International Standard Organization},
     6737    address     = {Geneva, Switzerland},
    67236738    year        = 2012,
     6739    note        = {\href{https://www.iso.org/standard/61507.html}{https://\-www.iso.org/\-standard/\-61507.html}},
    67246740}
    67256741
     
    77267742    title       = {The Thoth System: Multi-Process Structuring and Portability},
    77277743    publisher   = {American Elsevier},
     7744    address     = {New York, New York, U.S.A.},
    77287745    year        = 1982
    77297746}
  • doc/papers/concurrency/Paper.tex

    re3282fe r4432b52  
    110110\newcommand{\abbrevFont}{\textit}                       % set empty for no italics
    111111\@ifundefined{eg}{
    112 \newcommand{\EG}{\abbrevFont{e}\abbrevFont{g}}
     112%\newcommand{\EG}{\abbrevFont{e}\abbrevFont{g}}
     113\newcommand{\EG}{for example}
    113114\newcommand*{\eg}{%
    114115        \@ifnextchar{,}{\EG}%
     
    117118}}{}%
    118119\@ifundefined{ie}{
    119 \newcommand{\IE}{\abbrevFont{i}\abbrevFont{e}}
     120%\newcommand{\IE}{\abbrevFont{i}\abbrevFont{e}}
     121\newcommand{\IE}{that is}
    120122\newcommand*{\ie}{%
    121123        \@ifnextchar{,}{\IE}%
     
    264266\address[1]{\orgdiv{Cheriton School of Computer Science}, \orgname{University of Waterloo}, \orgaddress{\state{Waterloo, ON}, \country{Canada}}}
    265267
    266 \corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON, N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
     268\corres{*Peter A. Buhr, Cheriton School of Computer Science, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada. \email{pabuhr{\char`\@}uwaterloo.ca}}
    267269
    268270% \fundingInfo{Natural Sciences and Engineering Research Council of Canada}
    269271
    270272\abstract[Summary]{
    271 \CFA is a polymorphic, non-object-oriented, concurrent, backwards compatible extension of the C programming language.
     273\CFA is a polymorphic, nonobject-oriented, concurrent, backwards compatible extension of the C programming language.
    272274This paper discusses the design philosophy and implementation of its advanced control-flow and concurrent/parallel features, along with the supporting runtime written in \CFA.
    273275These features are created from scratch as ISO C has only low-level and/or unimplemented concurrency, so C programmers continue to rely on library approaches like pthreads.
     
    280282}%
    281283
    282 \keywords{generator, coroutine, concurrency, parallelism, thread, monitor, runtime, C, \CFA (Cforall)}
     284\keywords{C \CFA (Cforall) coroutine concurrency generator monitor parallelism runtime thread}
    283285
    284286
     
    291293\section{Introduction}
    292294
    293 \CFA~\cite{Moss18,Cforall} is a modern, polymorphic, non-object-oriented\footnote{
     295\CFA~\cite{Moss18,Cforall} is a modern, polymorphic, nonobject-oriented\footnote{
    294296\CFA has object-oriented features, such as constructors, destructors, and simple trait/interface inheritance.
    295297% Go interfaces, Rust traits, Swift Protocols, Haskell Type Classes and Java Interfaces.
     
    298300% Java, Rust, and Haskell (not sure about Swift) have nominal inheritance, where there needs to be a specific statement that "this type inherits from this type".
    299301However, functions \emph{cannot} be nested in structures and there is no mechanism to designate a function parameter as a receiver, \lstinline@this@, parameter.},
    300 backwards-compatible extension of the C programming language.
     302, backward-compatible extension of the C programming language.
    301303In many ways, \CFA is to C as Scala~\cite{Scala} is to Java, providing a vehicle for new typing and control-flow capabilities on top of a highly popular programming language\footnote{
    302304The TIOBE index~\cite{TIOBE} for May 2020 ranks the top five \emph{popular} programming languages as C 17\%, Java 16\%, Python 9\%, \CC 6\%, and \Csharp 4\% = 52\%, and over the past 30 years, C has always ranked either first or second in popularity.}
     
    309311The \CFA control-flow framework extends ISO \Celeven~\cite{C11} with new call/return and concurrent/parallel control-flow.
    310312Call/return control-flow with argument and parameter passing appeared in the first programming languages.
    311 Over the past 50 years, call/return has been augmented with features like static and dynamic call, exceptions (multi-level return) and generators/coroutines (see Section~\ref{s:StatefulFunction}).
     313Over the past 50 years, call/return has been augmented with features like static and dynamic call, exceptions (multilevel return) and generators/coroutines (see Section~\ref{s:StatefulFunction}).
    312314While \CFA has mechanisms for dynamic call (algebraic effects~\cite{Zhang19}) and exceptions\footnote{
    313315\CFA exception handling will be presented in a separate paper.
    314 The key feature that dovetails with this paper is nonlocal exceptions allowing exceptions to be raised across stacks, with synchronous exceptions raised among coroutines and asynchronous exceptions raised among threads, similar to that in \uC~\cite[\S~5]{uC++}}, this work only discusses retaining state between calls via generators and coroutines.
    315 \newterm{Coroutining} was introduced by Conway~\cite{Conway63} (1963), discussed by Knuth~\cite[\S~1.4.2]{Knuth73V1}, implemented in Simula67~\cite{Simula67}, formalized by Marlin~\cite{Marlin80}, and is now popular and appears in old and new programming languages: CLU~\cite{CLU}, \Csharp~\cite{Csharp}, Ruby~\cite{Ruby}, Python~\cite{Python}, JavaScript~\cite{JavaScript}, Lua~\cite{Lua}, \CCtwenty~\cite{C++20Coroutine19}.
     316The key feature that dovetails with this paper is nonlocal exceptions allowing exceptions to be raised across stacks, with synchronous exceptions raised among coroutines and asynchronous exceptions raised among threads, similar to that in \uC~\cite[\S~5]{uC++}}
     317, this work only discusses retaining state between calls via generators and coroutines.
     318\newterm{Coroutining} was introduced by Conway~\cite{Conway63}, discussed by Knuth~\cite[\S~1.4.2]{Knuth73V1}, implemented in Simula67~\cite{Simula67}, formalized by Marlin~\cite{Marlin80}, and is now popular and appears in old and new programming languages: CLU~\cite{CLU}, \Csharp~\cite{Csharp}, Ruby~\cite{Ruby}, Python~\cite{Python}, JavaScript~\cite{JavaScript}, Lua~\cite{Lua}, \CCtwenty~\cite{C++20Coroutine19}.
    316319Coroutining is sequential execution requiring direct handoff among coroutines, \ie only the programmer is controlling execution order.
    317320If coroutines transfer to an internal event-engine for scheduling the next coroutines (as in async-await), the program transitions into the realm of concurrency~\cite[\S~3]{Buhr05a}.
    318 Coroutines are only a stepping stone towards concurrency where the commonality is that coroutines and threads retain state between calls.
     321Coroutines are only a stepping stone toward concurrency where the commonality is that coroutines and threads retain state between calls.
    319322
    320323\Celeven and \CCeleven define concurrency~\cite[\S~7.26]{C11}, but it is largely wrappers for a subset of the pthreads library~\cite{Pthreads}.\footnote{Pthreads concurrency is based on simple thread fork and join in a function and mutex or condition locks, which is low-level and error-prone}
     
    322325While the \Celeven standard does not state a threading model, the historical association with pthreads suggests implementations would adopt kernel-level threading (1:1)~\cite{ThreadModel}, as for \CC.
    323326In contrast, there has been a renewed interest during the past decade in user-level (M:N, green) threading in old and new programming languages.
    324 As multi-core hardware became available in the 1980/90s, both user and kernel threading were examined.
     327As multicore hardware became available in the 1980/1990s, both user and kernel threading were examined.
    325328Kernel threading was chosen, largely because of its simplicity and fit with the simpler operating systems and hardware architectures at the time, which gave it a performance advantage~\cite{Drepper03}.
    326329Libraries like pthreads were developed for C, and the Solaris operating-system switched from user (JDK 1.1~\cite{JDK1.1}) to kernel threads.
    327330As a result, many languages adopt the 1:1 kernel-threading model, like Java (Scala), Objective-C~\cite{obj-c-book}, \CCeleven~\cite{C11}, C\#~\cite{Csharp} and Rust~\cite{Rust}, with a variety of presentation mechanisms.
    328 From 2000 onwards, several language implementations have championed the M:N user-threading model, like Go~\cite{Go}, Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, D~\cite{D}, and \uC~\cite{uC++,uC++book}, including putting green threads back into Java~\cite{Quasar}, and many user-threading libraries have appeared~\cite{Qthreads,MPC,Marcel}.
     331From 2000 onward, several language implementations have championed the M:N user-threading model, like Go~\cite{Go}, Erlang~\cite{Erlang}, Haskell~\cite{Haskell}, D~\cite{D}, and \uC~\cite{uC++,uC++book}, including putting green threads back into Java~\cite{Quasar}, and many user-threading libraries have appeared~\cite{Qthreads,MPC,Marcel}.
    329332The main argument for user-level threading is that it is lighter weight than kernel threading because locking and context switching do not cross the kernel boundary, so there is less restriction on programming styles that encourages large numbers of threads performing medium-sized work to facilitate load balancing by the runtime~\cite{Verch12}.
    330333As well, user-threading facilitates a simpler concurrency approach using thread objects that leverage sequential patterns versus events with call-backs~\cite{Adya02,vonBehren03}.
     
    335338One solution is low-level qualifiers and functions, \eg @volatile@ and atomics, allowing \emph{programmers} to explicitly write safe, race-free~\cite{Boehm12} programs.
    336339A safer solution is high-level language constructs so the \emph{compiler} knows the concurrency boundaries, \ie where mutual exclusion and synchronization are acquired and released, and provide implicit safety at and across these boundaries.
    337 While the optimization problem is best known with respect to concurrency, it applies to other complex control-flow, like exceptions and coroutines.
     340While the optimization problem is best known with respect to concurrency, it applies to other complex control-flows like exceptions and coroutines.
    338341As well, language solutions allow matching the language paradigm with the approach, \eg matching the functional paradigm with data-flow programming or the imperative paradigm with thread programming.
    339342
     
    346349However, spurious wakeup is \emph{not} a foundational concurrency property~\cite[\S~9]{Buhr05a};
    347350it is a performance design choice.
    348 We argue removing spurious wakeup and signals-as-hints make concurrent programming simpler and safer as there is less local non-determinism to manage.
     351We argue removing spurious wakeup and signals-as-hints make concurrent programming simpler and safer as there is less local nondeterminism to manage.
    349352If barging acquisition is allowed, its specialized performance advantage should be available as an option not the default.
    350353
     
    375378
    376379% \item
    377 % a non-blocking I/O library
     380% a nonblocking I/O library
    378381
    379382\item
     
    404407\begin{description}[leftmargin=\parindent,topsep=3pt,parsep=0pt]
    405408\item[\newterm{execution state}:]
    406 is the state information needed by a control-flow feature to initialize and manage both compute data and execution location(s), and de-initialize.
     409It is the state information needed by a control-flow feature to initialize and manage both compute data and execution location(s), and de-initialize.
    407410For example, calling a function initializes a stack frame including contained objects with constructors, manages local data in blocks and return locations during calls, and de-initializes the frame by running any object destructors and management operations.
    408411State is retained in fixed-sized aggregate structures (objects) and dynamic-sized stack(s), often allocated in the heap(s) managed by the runtime system.
     
    413416
    414417\item[\newterm{threading}:]
    415 is execution of code that occurs independently of other execution, where an individual thread's execution is sequential.
     418It is execution of code that occurs independently of other execution, where an individual thread's execution is sequential.
    416419Multiple threads provide \emph{concurrent execution};
    417420concurrent execution becomes parallel when run on multiple processing units, \eg hyper-threading, cores, or sockets.
     
    419422
    420423\item[\newterm{mutual-exclusion / synchronization (MES)}:]
    421 is the concurrency mechanism to perform an action without interruption and establish timing relationships among multiple threads.
     424It is the concurrency mechanism to perform an action without interruption and establish timing relationships among multiple threads.
    422425We contented these two properties are independent, \ie mutual exclusion cannot provide synchronization and vice versa without introducing additional threads~\cite[\S~4]{Buhr05a}.
    423 Limiting MES functionality results in contrived solutions and inefficiency on multi-core von Neumann computers where shared memory is a foundational aspect of its design.
     426Limiting MES functionality results in contrived solutions and inefficiency on multicore von Neumann computers where shared memory is a foundational aspect of its design.
    424427\end{description}
    425428These properties are fundamental as they cannot be built from existing language features, \eg a basic programming language like C99~\cite{C99} cannot create new control-flow features, concurrency, or provide MES without (atomic) hardware mechanisms.
    426429
    427430
    428 \subsection{Structuring Execution Properties}
     431\subsection{Structuring execution properties}
    429432
    430433Programming languages seldom present the fundamental execution properties directly to programmers.
     
    447450\vspace*{-5pt}
    448451\begin{tabular}{c|c||l|l}
    449 \multicolumn{2}{c||}{execution properties} & \multicolumn{2}{c}{mutual exclusion / synchronization} \\
     452\multicolumn{2}{c||}{Execution properties} & \multicolumn{2}{c}{Mutual exclusion / synchronization} \\
    450453\hline
    451454stateful                        & thread        & \multicolumn{1}{c|}{No} & \multicolumn{1}{c}{Yes} \\
     
    470473Structures are a foundational mechanism for data organization, and access functions provide interface abstraction and code sharing in all programming languages.
    471474Case 2 is case 1 with thread safety to a structure's state where access functions provide serialization (mutual exclusion) and scheduling among calling threads (synchronization).
    472 A @mutex@ structure, often called a \newterm{monitor}, provides a high-level interface for race-free access of shared data in concurrent programming-languages.
     475A @mutex@ structure, often called a \newterm{monitor}, provides a high-level interface for race-free access of shared data in concurrent programming languages.
    473476Case 3 is case 1 where the structure can implicitly retain execution state and access functions use this execution state to resume/suspend across \emph{callers}, but resume/suspend does not retain a function's local state.
    474477A stackless structure, often called a \newterm{generator} or \emph{iterator}, is \newterm{stackless} because it still borrows the caller's stack and thread, but the stack is used only to preserve state across its callees not callers.
    475 Generators provide the first step toward directly solving problems like finite-state machines that retain data and execution state between calls, whereas normal functions restart on each call.
     478Generators provide the first step toward directly solving problems like finite-state machines (FSMs) that retain data and execution state between calls, whereas normal functions restart on each call.
    476479Case 4 is cases 2 and 3 with thread safety during execution of the generator's access functions.
    477480A @mutex@ generator extends generators into the concurrent domain.
     
    488491Given the execution-properties taxonomy, programmers now ask three basic questions: is state necessary across callers and how much, is a separate thread necessary, is thread safety necessary.
    489492Table~\ref{t:ExecutionPropertyComposition} then suggests the optimal language feature needed for implementing a programming problem.
    490 The following sections describe how \CFA fills in \emph{all} the non-rejected table entries with language features, while other programming languages may only provide a subset of the table.
    491 
    492 
    493 \subsection{Design Requirements}
     493The following sections describe how \CFA fills in \emph{all} the nonrejected table entries with language features, while other programming languages may only provide a subset of the table.
     494
     495
     496\subsection{Design requirements}
    494497
    495498The following design requirements largely stem from building \CFA on top of C.
     
    497500\item
    498501All communication must be statically type checkable for early detection of errors and efficient code generation.
    499 This requirement is consistent with the fact that C is a statically-typed programming-language.
     502This requirement is consistent with the fact that C is a statically typed programming language.
    500503
    501504\item
     
    505508
    506509\item
    507 All communication is performed using function calls, \ie data is transmitted from argument to parameter and results are returned from function calls.
     510All communication is performed using function calls, \ie data are transmitted from argument to parameter and results are returned from function calls.
    508511Alternative forms of communication, such as call-backs, message passing, channels, or communication ports, step outside of C's normal form of communication.
    509512
     
    528531
    529532
    530 \subsection{Asynchronous Await / Call}
     533\subsection{Asynchronous await / call}
    531534
    532535Asynchronous await/call is a caller mechanism for structuring programs and/or increasing concurrency, where the caller (client) postpones an action into the future, which is subsequently executed by a callee (server).
     
    540543Specifically, control between caller and callee occurs indirectly through the event-engine precluding direct handoff and cycling among events, and requires complex resolution of a control promise and data.
    541544Note, @async-await@ is just syntactic-sugar over the event engine so it does not solve these deficiencies.
    542 For multi-threaded languages like Java, the asynchronous call queues a callee action with an executor (server), which subsequently executes the work by a thread in the executor thread-pool.
     545For multithreaded languages like Java, the asynchronous call queues a callee action with an executor (server), which subsequently executes the work by a thread in the executor thread-pool.
    543546The problem is when concurrent work-units need to interact and/or block as this effects the executor by stopping threads.
    544547While it is possible to extend this approach to support the necessary mechanisms, \eg message passing in Actors, we show monitors and threads provide an equally competitive approach that does not deviate from normal call communication and can be used to build asynchronous call, as is done in Java.
     
    548551\label{s:StatefulFunction}
    549552
    550 A \emph{stateful function} has the ability to remember state between calls, where state can be either data or execution, \eg plugin, device driver, finite-state machine (FSM).
     553A \emph{stateful function} has the ability to remember state between calls, where state can be either data or execution, \eg plugin, device driver, FSM.
    551554A simple technique to retain data state between calls is @static@ declarations within a function, which is often implemented by hoisting the declarations to the global scope but hiding the names within the function using name mangling.
    552555However, each call starts the function at the top making it difficult to determine the last point of execution in an algorithm, and requiring multiple flag variables and testing to reestablish the continuation point.
     
    606609\end{tabular}
    607610\end{center}
    608 \CFA's preferred presentation model for generators/coroutines/threads is a hybrid of functions and classes, giving an object-oriented flavour.
     611\CFA's preferred presentation model for generators/coroutines/threads is a hybrid of functions and classes, giving an object-oriented flavor.
    609612Essentially, the generator/coroutine/thread function is semantically coupled with a generator/coroutine/thread custom type via the type's name.
    610613The custom type solves several issues, while accessing the underlying mechanisms used by the custom types is still allowed for flexibility reasons.
     
    621624The \CFA \lstinline|with| clause opens an aggregate scope making its fields directly accessible, like Pascal \lstinline|with|, but using parallel semantics;
    622625multiple aggregates may be opened.
    623 \CFA has rebindable references \lstinline|int i, & ip = i, j; `&ip = &j;`| and non-rebindable references \lstinline|int i, & `const` ip = i, j; `&ip = &j;` // disallowed|.
     626\CFA has rebindable references \lstinline|int i, & ip = i, j; `&ip = &j;`| and nonrebindable references \lstinline|int i, & `const` ip = i, j; `&ip = &j;` // disallowed|.
    624627}%
    625628
     
    803806called a \emph{generator main} (leveraging the starting semantics for program @main@ in C), which is connected to the generator type via its single reference parameter.
    804807The generator main contains @suspend@ statements that suspend execution without ending the generator versus @return@.
    805 For the Fibonacci generator-main,
    806 the top initialization state appears at the start and the middle execution state is denoted by statement @suspend@.
     808For the Fibonacci generator-main, the top initialization state appears at the start and the middle execution state is denoted by statement @suspend@.
    807809Any local variables in @main@ \emph{are not retained} between calls;
    808810hence local variables are only for temporary computations \emph{between} suspends.
     
    816818At the start of the generator main, the @static@ declaration, @states@, is initialized to the N suspend points in the generator, where operator @&&@ dereferences or references a label~\cite{gccValueLabels}.
    817819Next, the computed @goto@ selects the last suspend point and branches to it.
    818 The  cost of setting @restart@ and branching via the computed @goto@ adds very little cost to the suspend and resume calls.
     820The cost of setting @restart@ and branching via the computed @goto@ adds very little cost to the suspend and resume calls.
    819821
    820822An advantage of the \CFA explicit generator type is the ability to allow multiple type-safe interface functions taking and returning arbitrary types.
     
    933935\ldots\, STX \ldots\, message \ldots\, ESC ETX \ldots\, message \ldots\, ETX 2-byte crc \ldots
    934936\end{center}
    935 where the network message begins with the control character STX, ends with an ETX, and is followed by a 2-byte cyclic-redundancy check.
     937where the network message begins with the control character STX, ends with an ETX, and is followed by a two-byte cyclic-redundancy check.
    936938Control characters may appear in a message if preceded by an ESC.
    937939When a message byte arrives, it triggers an interrupt, and the operating system services the interrupt by calling the device driver with the byte read from a hardware register.
     
    10831085Figure~\ref{f:CPingPongSim} shows the C implementation of the \CFA symmetric generator, where there is still only one additional field, @restart@, but @resume@ is more complex because it does a forward rather than backward jump.
    10841086Before the jump, the parameter for the next call @partner@ is placed into the register used for the first parameter, @rdi@, and the remaining registers are reset for a return.
    1085 The @jmp comain@ restarts the function but with a different parameter, so the new call's behaviour depends on the state of the coroutine type, i.e., branch to restart location with different data state.
    1086 While the semantics of call forward is a tail-call optimization, which compilers perform, the generator state is different on each call rather a common state for a tail-recursive function (i.e., the parameter to the function never changes during the forward calls.
     1087The @jmp comain@ restarts the function but with a different parameter, so the new call's behavior depends on the state of the coroutine type, \ie branch to restart location with different data state.
     1088While the semantics of call forward is a tail-call optimization, which compilers perform, the generator state is different on each call rather a common state for a tail-recursive function (\ie the parameter to the function never changes during the forward calls).
    10871089However, this assembler code depends on what entry code is generated, specifically if there are local variables and the level of optimization.
    10881090Hence, internal compiler support is necessary for any forward call or backwards return, \eg LLVM has various coroutine support~\cite{CoroutineTS}, and \CFA can leverage this support should it eventually fork @clang@.
     
    11571159\end{cfa}
    11581160A call to this function is placed at the end of the device driver's coroutine-main.
    1159 For complex finite-state machines, refactoring is part of normal program abstraction, especially when code is used in multiple places.
     1161For complex FSMs, refactoring is part of normal program abstraction, especially when code is used in multiple places.
    11601162Again, this complexity is usually associated with execution state rather than data state.
    11611163
     
    14461448
    14471449
    1448 \subsection{Generator / Coroutine Implementation}
     1450\subsection{Generator / coroutine implementation}
    14491451
    14501452A significant implementation challenge for generators and coroutines (and threads in Section~\ref{s:threads}) is adding extra fields to the custom types and related functions, \eg inserting code after/before the coroutine constructor/destructor and @main@ to create/initialize/de-initialize/destroy any extra fields, \eg the coroutine stack.
    1451 There are several solutions to this problem, which follow from the object-oriented flavour of adopting custom types.
     1453There are several solutions to this problem, which follow from the object-oriented flavor of adopting custom types.
    14521454
    14531455For object-oriented languages, inheritance is used to provide extra fields and code via explicit inheritance:
     
    16701672
    16711673
    1672 \subsection{Thread Implementation}
     1674\subsection{Thread implementation}
    16731675
    16741676Threads in \CFA are user level run by runtime kernel threads (see Section~\ref{s:CFARuntimeStructure}), where user threads provide concurrency and kernel threads provide parallelism.
     
    18061808\CFA designated functions are marked by an explicitly parameter-only pointer/reference qualifier @mutex@ (discussed further in Section\ref{s:MutexAcquisition}).
    18071809Whereas, Java designated members are marked with \lstinline[language=java]|synchronized| that applies to the implicit reference parameter @this@.
    1808 In the example, the increment and setter operations need mutual exclusion while the read-only getter operation can be non-mutex if reading the implementation is atomic.
    1809 
    1810 
    1811 \subsection{Monitor Implementation}
     1810In the example, the increment and setter operations need mutual exclusion while the read-only getter operation can be nonmutex if reading the implementation is atomic.
     1811
     1812
     1813\subsection{Monitor implementation}
    18121814
    18131815For the same design reasons, \CFA provides a custom @monitor@ type and a @trait@ to enforce and restrict the monitor-interface functions.
     
    18351837
    18361838
    1837 \subsection{Mutex Acquisition}
     1839\subsection{Mutex acquisition}
    18381840\label{s:MutexAcquisition}
    18391841
     
    18501852Because of the statically unknown size, \CFA only supports a single reference @mutex@ parameter, @f1@.
    18511853
    1852 The \CFA @mutex@ qualifier does allow the ability to support multi-monitor functions,\footnote{
     1854The \CFA @mutex@ qualifier does allow the ability to support multimonitor functions,\footnote{
    18531855While object-oriented monitors can be extended with a mutex qualifier for multiple-monitor members, no prior example of this feature could be found.}
    18541856where the number of acquisitions is statically known, called \newterm{bulk acquire}.
     
    19801982% There are many aspects of scheduling in a concurrency system, all related to resource utilization by waiting threads, \ie which thread gets the resource next.
    19811983% Different forms of scheduling include access to processors by threads (see Section~\ref{s:RuntimeStructureCluster}), another is access to a shared resource by a lock or monitor.
    1982 This section discusses scheduling for waiting threads eligible for monitor entry~\cite{Buhr95b}, \ie which user thread gets the shared resource next. (See Section~\ref{s:RuntimeStructureCluster} for scheduling kernel threads on virtual processors.)
     1984This section discusses scheduling for waiting threads eligible for monitor entry~\cite{Buhr95b}, \ie which user thread gets the shared resource next.
     1985(See Section~\ref{s:RuntimeStructureCluster} for scheduling kernel threads on virtual processors.)
    19831986While monitor mutual-exclusion provides safe access to its shared data, the data may indicate a thread cannot proceed, \eg a bounded buffer may be full/\-empty so produce/consumer threads must block.
    19841987Leaving the monitor and retrying (busy waiting) is impractical for high-level programming.
     
    19911994For complex scheduling, the approaches can be combined, so there are threads waiting inside and outside.
    19921995
    1993 \CFA monitors do not allow calling threads to barge ahead of signalled threads via barging prevention, which simplifies synchronization among threads in the monitor and increases correctness.
     1996\CFA monitors do not allow calling threads to barge ahead of signaled threads via barging prevention, which simplifies synchronization among threads in the monitor and increases correctness.
    19941997A direct consequence of this semantics is that unblocked waiting threads are not required to recheck the waiting condition, \ie waits are not in a starvation-prone busy-loop as required by the signals-as-hints style with barging.
    19951998Preventing barging comes directly from Hoare's semantics in the seminal paper on monitors~\cite[p.~550]{Hoare74}.
    19961999% \begin{cquote}
    19972000% However, we decree that a signal operation be followed immediately by resumption of a waiting program, without possibility of an intervening procedure call from yet a third program.
    1998 % It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signalling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74}
     2001% It is only in this way that a waiting program has an absolute guarantee that it can acquire the resource just released by the signaling program without any danger that a third program will interpose a monitor entry and seize the resource instead.~\cite[p.~550]{Hoare74}
    19992002% \end{cquote}
    20002003Furthermore, \CFA concurrency has no spurious wakeup~\cite[\S~9]{Buhr05a}, which eliminates an implicit self barging.
    20012004
    2002 Monitor mutual-exclusion means signalling cannot have the signaller and signalled thread in the monitor simultaneously, so only the signaller or signallee can proceed and the other waits on an implicit urgent list~\cite[p.~551]{Hoare74}.
     2005Monitor mutual-exclusion means signaling cannot have the signaller and signaled thread in the monitor simultaneously, so only the signaller or signallee can proceed and the other waits on an implicit urgent list~\cite[p.~551]{Hoare74}.
    20032006Figure~\ref{f:MonitorScheduling} shows internal and external scheduling for the bounded-buffer examples in Figure~\ref{f:GenericBoundedBuffer}.
    20042007For internal scheduling in Figure~\ref{f:BBInt}, the @signal@ moves the signallee, front thread of the specified condition queue, to the urgent list (see Figure~\ref{f:MonitorScheduling}) and the signaller continues (solid line).
    20052008Multiple signals move multiple signallees to urgent until the condition queue is empty.
    20062009When the signaller exits or waits, a thread is implicitly unblocked from urgent, if available, before unblocking a calling thread to prevent barging.
    2007 (Java conceptually moves the signalled thread to the calling queue, and hence, allows barging.)
    2008 Signal is used when the signaller is providing the cooperation needed by the signallee, \eg creating an empty slot in a buffer for a producer, and the signaller immediately exits the monitor to run concurrently consuming the buffer element, and passes control of the monitor to the signalled thread, which can immediately take advantage of the state change.
     2010(Java conceptually moves the signaled thread to the calling queue, and hence, allows barging.)
     2011Signal is used when the signaller is providing the cooperation needed by the signallee, \eg creating an empty slot in a buffer for a producer, and the signaller immediately exits the monitor to run concurrently consuming the buffer element, and passes control of the monitor to the signaled thread, which can immediately take advantage of the state change.
    20092012Specifically, the @wait@ function atomically blocks the calling thread and implicitly releases the monitor lock(s) for all monitors in the function's parameter list.
    2010 Signalling is unconditional because signalling an empty condition queue does nothing.
     2013Signalling is unconditional because signaling an empty condition queue does nothing.
    20112014It is common to declare condition queues as monitor fields to prevent shared access, hence no locking is required for access as the queues are protected by the monitor lock.
    20122015In \CFA, a condition queue can be created and stored independently.
     
    21462149        rcnt += 1;
    21472150        if ( ! empty(RWers) && `front(RWers) == READER` )
    2148                 `signal( RWers )`;  // daisy-chain signalling
     2151                `signal( RWers )`;  // daisy-chain signaling
    21492152}
    21502153void StartWrite( ReadersWriter & mutex rw ) with(rw) {
     
    22012204
    22022205Finally, external scheduling requires urgent to be a stack, because the signaller expects to execute immediately after the specified monitor call has exited or waited.
    2203 Internal schedulling performing multiple signalling results in unblocking from urgent in the reverse order from signalling.
     2206Internal scheduling performing multiple signaling results in unblocking from urgent in the reverse order from signaling.
    22042207It is rare for the unblocking order to be important as an unblocked thread can be time-sliced immediately after leaving the monitor.
    2205 If the unblocking order is important, multiple signalling can be restructured into daisy-chain signalling, where each thread signals the next thread.
    2206 Hence, \CFA uses a single urgent stack to correctly handle @waitfor@ and adequately support both forms of signalling.
     2208If the unblocking order is important, multiple signaling can be restructured into daisy-chain signaling, where each thread signals the next thread.
     2209Hence, \CFA uses a single urgent stack to correctly handle @waitfor@ and adequately support both forms of signaling.
    22072210(Advanced @waitfor@ features are discussed in Section~\ref{s:ExtendedWaitfor}.)
    22082211
     
    22702273\end{figure}
    22712274
    2272 Figure~\ref{f:DatingServiceMonitor} shows a dating service demonstrating non-blocking and blocking signalling.
     2275Figure~\ref{f:DatingServiceMonitor} shows a dating service demonstrating nonblocking and blocking signaling.
    22732276The dating service matches girl and boy threads with matching compatibility codes so they can exchange phone numbers.
    22742277A thread blocks until an appropriate partner arrives.
     
    23152318\end{cquote}
    23162319For @wait( e )@, the default semantics is to atomically block the signaller and release all acquired mutex parameters, \ie @wait( e, m1, m2 )@.
    2317 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@.
     2320To override the implicit multimonitor wait, specific mutex parameter(s) can be specified, \eg @wait( e, m1 )@.
    23182321Wait cannot statically verify the released monitors are the acquired mutex-parameters without disallowing separately compiled helper functions calling @wait@.
    23192322While \CC supports bulk locking, @wait@ only accepts a single lock for a condition queue, so bulk locking with condition queues is asymmetric.
     
    23242327}
    23252328\end{cfa}
    2326 must have acquired at least the same locks as the waiting thread signalled from a condition queue to allow the locks to be passed, and hence, prevent barging.
     2329must have acquired at least the same locks as the waiting thread signaled from a condition queue to allow the locks to be passed, and hence, prevent barging.
    23272330
    23282331Similarly, for @waitfor( rtn )@, the default semantics is to atomically block the acceptor and release all acquired mutex parameters, \ie @waitfor( rtn : m1, m2 )@.
    2329 To override the implicit multi-monitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn : m1 )@.
     2332To override the implicit multimonitor wait, specific mutex parameter(s) can be specified, \eg @waitfor( rtn : m1 )@.
    23302333@waitfor@ does statically verify the monitor types passed are the same as the acquired mutex-parameters of the given function or function pointer, hence the prototype must be accessible.
    23312334% When an overloaded function appears in an @waitfor@ statement, calls to any function with that name are accepted.
     
    23452348        ... signal( `e` ); ...
    23462349\end{cfa}
    2347 The @wait@ only releases @m1@ so the signalling thread cannot acquire @m1@ and @m2@ to enter @bar@ and @signal@ the condition.
     2350The @wait@ only releases @m1@ so the signaling thread cannot acquire @m1@ and @m2@ to enter @bar@ and @signal@ the condition.
    23482351While deadlock can occur with multiple/nesting acquisition, this is a consequence of locks, and by extension monitor locking is not perfectly composable.
    23492352
     
    24092412
    24102413
    2411 \subsection{Bulk Barging Prevention}
    2412 
    2413 Figure~\ref{f:BulkBargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signalling semantics.
     2414\subsection{Bulk barging prevention}
     2415
     2416Figure~\ref{f:BulkBargingPrevention} shows \CFA code where bulk acquire adds complexity to the internal-signaling semantics.
    24142417The complexity begins at the end of the inner @mutex@ statement, where the semantics of internal scheduling need to be extended for multiple monitors.
    24152418The problem is that bulk acquire is used in the inner @mutex@ statement where one of the monitors is already acquired.
    2416 When the signalling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting threads to prevent barging into the outer @mutex@ statement by another thread.
    2417 However, both the signalling and waiting threads W1 and W2 need some subset of monitors @m1@ and @m2@.
     2419When the signaling thread reaches the end of the inner @mutex@ statement, it should transfer ownership of @m1@ and @m2@ to the waiting threads to prevent barging into the outer @mutex@ statement by another thread.
     2420However, both the signaling and waiting threads W1 and W2 need some subset of monitors @m1@ and @m2@.
    24182421\begin{cquote}
    24192422condition c: (order 1) W2(@m2@), W1(@m1@,@m2@)\ \ \ or\ \ \ (order 2) W1(@m1@,@m2@), W2(@m2@) \\
     
    24822485\end{figure}
    24832486
    2484 One scheduling solution is for the signaller S to keep ownership of all locks until the last lock is ready to be transferred, because this semantics fits most closely to the behaviour of single-monitor scheduling.
     2487One scheduling solution is for the signaller S to keep ownership of all locks until the last lock is ready to be transferred, because this semantics fits most closely to the behavior of single-monitor scheduling.
    24852488However, this solution is inefficient if W2 waited first and immediate passed @m2@ when released, while S retains @m1@ until completion of the outer mutex statement.
    24862489If W1 waited first, the signaller must retain @m1@ amd @m2@ until completion of the outer mutex statement and then pass both to W1.
     
    24952498\label{s:waitforImplementation}
    24962499
    2497 In a statically-typed object-oriented programming language, a class has an exhaustive list of members, even when members are added via static inheritance (see Figure~\ref{f:uCinheritance}).
     2500In a statically typed object-oriented programming language, a class has an exhaustive list of members, even when members are added via static inheritance (see Figure~\ref{f:uCinheritance}).
    24982501Knowing all members at compilation, even separate compilation, allows uniquely numbered them so the accept-statement implementation can use a fast and compact bit mask with $O(1)$ compare.
    24992502
     
    25392542\hspace{3pt}
    25402543\subfloat[\CFA]{\label{f:CFinheritance}\usebox\myboxB}
    2541 \caption{Member / Function visibility}
     2544\caption{Member / function visibility}
    25422545\label{f:MemberFunctionVisibility}
    25432546\end{figure}
    25442547
    2545 However, the @waitfor@ statement in translation unit 2 (see Figure~\ref{f:CFinheritance}) cannot see function @g@ in translation unit 1 precluding a unique numbering for a bit-mask because the monitor type only carries the protected shared-data.
     2548However, the @waitfor@ statement in translation unit 2 (see Figure~\ref{f:CFinheritance}) cannot see function @g@ in translation unit 1 precluding a unique numbering for a bit-mask because the monitor type only carries the protected shared data.
    25462549(A possible way to construct a dense mapping is at link or load-time.)
    25472550Hence, function pointers are used to identify the functions listed in the @waitfor@ statement, stored in a variable-sized array.
     
    25502553
    25512554
    2552 \subsection{Multi-Monitor Scheduling}
     2555\subsection{Multimonitor scheduling}
    25532556\label{s:Multi-MonitorScheduling}
    25542557
    2555 External scheduling, like internal scheduling, becomes significantly more complex for multi-monitor semantics.
     2558External scheduling, like internal scheduling, becomes significantly more complex for multimonitor semantics.
    25562559Even in the simplest case, new semantics need to be established.
    25572560\begin{cfa}
     
    25652568\end{cfa}
    25662569Both locks are acquired by function @g@, so when function @f@ is called, the lock for monitor @m2@ is passed from @g@ to @f@, while @g@ still holds lock @m1@.
    2567 This behaviour can be extended to the multi-monitor @waitfor@ statement.
     2570This behavior can be extended to the multimonitor @waitfor@ statement.
    25682571\begin{cfa}
    25692572monitor M { ... };
     
    25742577% Also, the order of the monitors in a @waitfor@ statement must match the order of the mutex parameters.
    25752578
    2576 Figure~\ref{f:UnmatchedMutexSets} shows internal and external scheduling with multiple monitors that must match exactly with a signalling or accepting thread, \ie partial matching results in waiting.
     2579Figure~\ref{f:UnmatchedMutexSets} shows internal and external scheduling with multiple monitors that must match exactly with a signaling or accepting thread, \ie partial matching results in waiting.
    25772580In both cases, the set of monitors is disjoint so unblocking is impossible.
    25782581
     
    27662769
    27672770
    2768 \subsection{\texorpdfstring{\protect\lstinline@mutex@ Generators / Coroutines / Threads}{monitor Generators / Coroutines / Threads}}
     2771\subsection{\texorpdfstring{\protect\lstinline@mutex@ Generators / coroutines / threads}{monitor Generators / coroutines / threads}}
    27692772
    27702773\CFA generators, coroutines, and threads can also be @mutex@ (Table~\ref{t:ExecutionPropertyComposition} cases 4, 6, 12) allowing safe \emph{direct communication} with threads, \ie the custom types can have mutex functions that are called by other threads.
     
    28082811%
    28092812%
    2810 % \subsection{User Threads}
     2813% \subsection{User threads}
    28112814%
    28122815% A direct improvement on kernel threads is user threads, \eg Erlang~\cite{Erlang} and \uC~\cite{uC++book}.
     
    28232826
    28242827\begin{comment}
    2825 \subsection{Thread Pools}
     2828\subsection{Thread pools}
    28262829
    28272830In contrast to direct threading is indirect \newterm{thread pools}, \eg Java @executor@, where small jobs (work units) are inserted into a work pool for execution.
     
    29022905The purpose of a cluster is to control the amount of parallelism that is possible among threads, plus scheduling and other execution defaults.
    29032906The default cluster-scheduler is single-queue multi-server, which provides automatic load-balancing of threads on processors.
    2904 However, the design allows changing the scheduler, \eg multi-queue multi-server with work-stealing/sharing across the virtual processors.
     2907However, the design allows changing the scheduler, \eg multi-queue multiserver with work-stealing/sharing across the virtual processors.
    29052908If several clusters exist, both threads and virtual processors, can be explicitly migrated from one cluster to another.
    29062909No automatic load balancing among clusters is performed by \CFA.
     
    29122915
    29132916
    2914 \subsection{Virtual Processor}
     2917\subsection{Virtual processor}
    29152918\label{s:RuntimeStructureProcessor}
    29162919
     
    29352938This storage is allocated at the base of a thread's stack before blocking, which means programmers must add a small amount of extra space for stacks.
    29362939
    2937 In \CFA, ordering of monitor acquisition relies on memory ordering to prevent deadlock~\cite{Havender68}, because all objects have distinct non-overlapping memory layouts, and mutual-exclusion for a monitor is only defined for its lifetime.
     2940In \CFA, ordering of monitor acquisition relies on memory ordering to prevent deadlock~\cite{Havender68}, because all objects have distinct nonoverlapping memory layouts, and mutual-exclusion for a monitor is only defined for its lifetime.
    29382941When a mutex call is made, pointers to the concerned monitors are aggregated into a variable-length array and sorted.
    29392942This array persists for the entire duration of the mutual exclusion and is used extensively for synchronization operations.
     
    29542957
    29552958Nondeterministic preemption provides fairness from long-running threads, and forces concurrent programmers to write more robust programs, rather than relying on code between cooperative scheduling to be atomic.
    2956 This atomic reliance can fail on multi-core machines, because execution across cores is nondeterministic.
     2959This atomic reliance can fail on multicore machines, because execution across cores is nondeterministic.
    29572960A different reason for not supporting preemption is that it significantly complicates the runtime system, \eg Windows runtime does not support interrupts and on Linux systems, interrupts are complex (see below).
    29582961Preemption is normally handled by setting a countdown timer on each virtual processor.
     
    29622965The only issue with this approach is that signal masks from one kernel thread may be restored on another as part of returning from the signal handler;
    29632966therefore, the same signal mask is required for all virtual processors in a cluster.
    2964 Because preemption interval is usually long (1 millisecond) performance cost is negligible.
     2967Because preemption interval is usually long (1 ms) performance cost is negligible.
    29652968
    29662969Linux switched a decade ago from specific to arbitrary virtual-processor signal-delivery for applications with multiple kernel threads.
     
    29732976
    29742977
    2975 \subsection{Debug Kernel}
    2976 
    2977 There are two versions of the \CFA runtime kernel: debug and non-debug.
    2978 The debugging version has many runtime checks and internal assertions, \eg stack non-writable guard page, and checks for stack overflow whenever context switches occur among coroutines and threads, which catches most stack overflows.
    2979 After a program is debugged, the non-debugging version can be used to significantly decrease space and increase performance.
     2978\subsection{Debug kernel}
     2979
     2980There are two versions of the \CFA runtime kernel: debug and nondebug.
     2981The debugging version has many runtime checks and internal assertions, \eg stack nonwritable guard page, and checks for stack overflow whenever context switches occur among coroutines and threads, which catches most stack overflows.
     2982After a program is debugged, the nondebugging version can be used to significantly decrease space and increase performance.
    29802983
    29812984
     
    29842987
    29852988To test the performance of the \CFA runtime, a series of microbenchmarks are used to compare \CFA with pthreads, Java 11.0.6, Go 1.12.6, Rust 1.37.0, Python 3.7.6, Node.js 12.14.1, and \uC 7.0.0.
    2986 For comparison, the package must be multi-processor (M:N), which excludes libdil and libmil~\cite{libdill} (M:1)), and use a shared-memory programming model, \eg not message passing.
     2989For comparison, the package must be multiprocessor (M:N), which excludes libdil and libmil~\cite{libdill} (M:1)), and use a shared-memory programming model, \eg not message passing.
    29872990The benchmark computer is an AMD Opteron\texttrademark\ 6380 NUMA 64-core, 8 socket, 2.5 GHz processor, running Ubuntu 16.04.6 LTS, and pthreads/\CFA/\uC are compiled with gcc 9.2.1.
    29882991
    2989 All benchmarks are run using the following harness. (The Java harness is augmented to circumvent JIT issues.)
     2992All benchmarks are run using the following harness.
     2993(The Java harness is augmented to circumvent JIT issues.)
    29902994\begin{cfa}
    29912995#define BENCH( `run` ) uint64_t start = cputime_ns();  `run;`  double result = (double)(cputime_ns() - start) / N;
    29922996\end{cfa}
    29932997where CPU time in nanoseconds is from the appropriate language clock.
    2994 Each benchmark is performed @N@ times, where @N@ is selected so the benchmark runs in the range of 2--20 seconds for the specific programming language;
     2998Each benchmark is performed @N@ times, where @N@ is selected so the benchmark runs in the range of 2--20 s for the specific programming language;
    29952999each @N@ appears after the experiment name in the following tables.
    29963000The total time is divided by @N@ to obtain the average time for a benchmark.
    29973001Each benchmark experiment is run 13 times and the average appears in the table.
    29983002For languages with a runtime JIT (Java, Node.js, Python), a single half-hour long experiment is run to check stability;
    2999 all long-experiment results are statistically equivalent, \ie median/average/standard-deviation correlate with the short-experiment results, indicating the short experiments reached a steady state.
     3003all long-experiment results are statistically equivalent, \ie median/average/SD correlate with the short-experiment results, indicating the short experiments reached a steady state.
    30003004All omitted tests for other languages are functionally identical to the \CFA tests and available online~\cite{CforallConcurrentBenchmarks}.
    30013005
    3002 \paragraph{Creation}
     3006\subsection{Creation}
    30033007
    30043008Creation is measured by creating and deleting a specific kind of control-flow object.
     
    30303034
    30313035\begin{tabular}[t]{@{}r*{3}{D{.}{.}{5.2}}@{}}
    3032 \multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     3036\multicolumn{1}{@{}r}{Object(N)\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
    30333037\CFA generator (1B)                     & 0.6           & 0.6           & 0.0           \\
    30343038\CFA coroutine lazy     (100M)  & 13.4          & 13.1          & 0.5           \\
     
    30493053
    30503054\vspace*{-10pt}
    3051 \paragraph{Internal Scheduling}
    3052 
    3053 Internal scheduling is measured using a cycle of two threads signalling and waiting.
     3055\subsection{Internal scheduling}
     3056
     3057Internal scheduling is measured using a cycle of two threads signaling and waiting.
    30543058Figure~\ref{f:schedint} shows the code for \CFA, with results in Table~\ref{t:schedint}.
    30553059Note, the \CFA incremental cost for bulk acquire is a fixed cost for small numbers of mutex objects.
     
    30933097
    30943098\begin{tabular}{@{}r*{3}{D{.}{.}{5.2}}@{}}
    3095 \multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     3099\multicolumn{1}{@{}r}{Object(N)\hspace*{10pt}} & \multicolumn{1}{c}{Median} & \multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
    30963100\CFA @signal@, 1 monitor (10M)  & 364.4         & 364.2         & 4.4           \\
    30973101\CFA @signal@, 2 monitor (10M)  & 484.4         & 483.9         & 8.8           \\
     
    31063110
    31073111
    3108 \paragraph{External Scheduling}
     3112\subsection{External scheduling}
    31093113
    31103114External scheduling is measured using a cycle of two threads calling and accepting the call using the @waitfor@ statement.
     
    31403144\label{t:schedext}
    31413145\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
    3142 \multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     3146\multicolumn{1}{@{}r}{Object(N)\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
    31433147\CFA @waitfor@, 1 monitor (10M) & 367.1 & 365.3 & 5.0   \\
    31443148\CFA @waitfor@, 2 monitor (10M) & 463.0 & 464.6 & 7.1   \\
     
    31493153\end{multicols}
    31503154
    3151 \paragraph{Mutual-Exclusion}
     3155\subsection{Mutual-Exclusion}
    31523156
    31533157Uncontented mutual exclusion, which frequently occurs, is measured by entering and leaving a critical section.
    3154 For monitors, entering and leaving a mutex function is measured, otherwise the language-appropriate mutex-lock is measured.
    3155 For comparison, a spinning (versus blocking) test-and-test-set lock is presented.
     3158For monitors, entering and leaving a mutex function are measured, otherwise the language-appropriate mutex-lock is measured.
     3159For comparison, a spinning (vs.\ blocking) test-and-test-set lock is presented.
    31563160Figure~\ref{f:mutex} shows the code for \CFA with results in Table~\ref{t:mutex}.
    31573161Note the incremental cost of bulk acquire for \CFA, which is largely a fixed cost for small numbers of mutex objects.
     
    31763180\label{t:mutex}
    31773181\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
    3178 \multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     3182\multicolumn{1}{@{}r}{Object(N)\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
    31793183test-and-test-set lock (50M)            & 19.1  & 18.9  & 0.4   \\
    31803184\CFA @mutex@ function, 1 arg. (50M)     & 48.3  & 47.8  & 0.9   \\
     
    31903194\end{multicols}
    31913195
    3192 \paragraph{Context Switching}
     3196\subsection{Context switching}
    31933197
    31943198In procedural programming, the cost of a function call is important as modularization (refactoring) increases.
     
    32373241\label{t:ctx-switch}
    32383242\begin{tabular}{@{}r*{3}{D{.}{.}{3.2}}@{}}
    3239 \multicolumn{1}{@{}r}{N\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
     3243\multicolumn{1}{@{}r}{Object(N)\hspace*{10pt}} & \multicolumn{1}{c}{Median} &\multicolumn{1}{c}{Average} & \multicolumn{1}{c@{}}{Std Dev} \\
    32403244C function (10B)                        & 1.8           & 1.8           & 0.0   \\
    32413245\CFA generator (5B)                     & 1.8           & 2.0           & 0.3   \\
     
    32903294
    32913295\medskip
    3292 \textbf{Flexible Scheduling:}
     3296\textbf{Flexible scheduling:}
    32933297An important part of concurrency is scheduling.
    32943298Different scheduling algorithms can affect performance, both in terms of average and variation.
     
    32963300One solution is to offer various tuning options, allowing the scheduler to be adjusted to the requirements of the workload.
    32973301However, to be truly flexible, a pluggable scheduler is necessary.
    3298 Currently, the \CFA pluggable scheduler is too simple to handle complex scheduling, \eg quality of service and real-time, where the scheduler must interact with mutex objects to deal with issues like priority inversion~\cite{Buhr00b}.
     3302Currently, the \CFA pluggable scheduler is too simple to handle complex scheduling, \eg quality of service and real time, where the scheduler must interact with mutex objects to deal with issues like priority inversion~\cite{Buhr00b}.
    32993303
    33003304\smallskip
     
    33023306Many modern workloads are not bound by computation but IO operations, common cases being web servers and XaaS~\cite{XaaS} (anything as a service).
    33033307These types of workloads require significant engineering to amortizing costs of blocking IO-operations.
    3304 At its core, non-blocking I/O is an operating-system level feature queuing IO operations, \eg network operations, and registering for notifications instead of waiting for requests to complete.
     3308At its core, nonblocking I/O is an operating-system level feature queuing IO operations, \eg network operations, and registering for notifications instead of waiting for requests to complete.
    33053309Current trends use asynchronous programming like callbacks, futures, and/or promises, \eg Node.js~\cite{NodeJs} for JavaScript, Spring MVC~\cite{SpringMVC} for Java, and Django~\cite{Django} for Python.
    3306 However, these solutions lead to code that is hard to create, read and maintain.
    3307 A better approach is to tie non-blocking I/O into the concurrency system to provide ease of use with low overhead, \eg thread-per-connection web-services.
    3308 A non-blocking I/O library is currently under development for \CFA.
     3310However, these solutions lead to code that is hard to create, read, and maintain.
     3311A better approach is to tie nonblocking I/O into the concurrency system to provide ease of use with low overhead, \eg thread-per-connection web-services.
     3312A nonblocking I/O library is currently under development for \CFA.
    33093313
    33103314\smallskip
    3311 \textbf{Other Concurrency Tools:}
     3315\textbf{Other concurrency tools:}
    33123316While monitors offer flexible and powerful concurrency for \CFA, other concurrency tools are also necessary for a complete multi-paradigm concurrency package.
    33133317Examples of such tools can include futures and promises~\cite{promises}, executors and actors.
     
    33163320
    33173321\smallskip
    3318 \textbf{Implicit Threading:}
     3322\textbf{Implicit threading:}
    33193323Basic \emph{embarrassingly parallel} applications can benefit greatly from implicit concurrency, where sequential programs are converted to concurrent, with some help from pragmas to guide the conversion.
    33203324This type of concurrency can be achieved both at the language level and at the library level.
  • doc/papers/concurrency/annex/local.bib

    re3282fe r4432b52  
    2929    booktitle   = {Supercomputing, 2005. Proceedings of the ACM/IEEE SC 2005 Conference},
    3030    publisher   = {IEEE},
     31    location    = {Seattle, Washington, U.S.A.},
     32    month       = nov,
    3133    year        = {2005},
    3234    pages       = {35-35},
    33     month       = nov,
    3435}
    3536
     
    5859
    5960@manual{Cpp-Transactions,
    60         keywords        = {C++, Transactional Memory},
    61         title           = {Tech. Spec. for C++ Extensions for Transactional Memory},
    62         organization= {International Standard ISO/IEC TS 19841:2015 },
    63         publisher   = {American National Standards Institute},
    64         address = {http://www.iso.org},
    65         year            = 2015,
     61    keywords    = {C++, Transactional Memory},
     62    title       = {Tech. Spec. for C++ Extensions for Transactional Memory {ISO/IEC} {TS} 19841:2015},
     63    organization= {International Standard Organization},
     64    address     = {Geneva, Switzerland},
     65    year        = 2015,
     66    note        = {\href{https://www.iso.org/standard/66343.html}{https://\-www.iso.org/\-standard/\-66343.html}},
    6667}
    6768
     
    109110@manual{affinityLinux,
    110111        key     = {TBB},
    111         title           = "{Linux man page - sched\_setaffinity(2)}"
     112        title           = "{Linux man page - sched\_setaffinity(2)}",
     113        note    = {\href{https://man7.org/linux/man-pages/man2/sched_setaffinity.2.html}{https://\-man7.org/\-linux/man-pages/\-man2/sched\_setaffinity.2.html}},
    112114}
    113115
    114116@manual{affinityWindows,
    115         title           = "{Windows (vs.85) - SetThreadAffinityMask function}"
     117        title           = "{Windows documentation - SetThreadAffinityMask function}",
     118        note    = {\href{https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setthreadaffinitymask}{https://\-docs.microsoft.com/\-en-us/\-windows/\-win32/api/\-winbase/\-nf-winbase-setthreadaffinitymask}}
    116119}
    117120
  • doc/papers/concurrency/mail

    re3282fe r4432b52  
    1010Dear Dr Buhr,
    1111
    12 Your manuscript entitled "Concurrency in C" has been received by Software:
     12Your manuscript entitled "Concurrency in Cforall" has been received by Software:
    1313Practice and Experience. It will be given full consideration for publication in
    1414the journal.
     
    4141Dear Dr Buhr,
    4242
    43 Many thanks for submitting SPE-18-0205 entitled "Concurrency in C" to Software: Practice and Experience.
     43Many thanks for submitting SPE-18-0205 entitled "Concurrency in Cforall" to Software: Practice and Experience.
    4444
    4545In view of the comments of the referees found at the bottom of this letter, I cannot accept your paper for publication in Software: Practice and Experience. I hope that you find the referees' very detailed comments helpful.
  • doc/papers/concurrency/mail2

    re3282fe r4432b52  
    1 
    21Date: Wed, 26 Jun 2019 20:12:38 +0000
    32From: Aaron Thomas <onbehalfof@manuscriptcentral.com>
     
    12861285
    12871286Wiley Author Services
     1287
     1288
     1289
     1290From: "Pacaanas, Joel -" <jpacaanas@wiley.com>
     1291To: "Peter A. Buhr" <pabuhr@uwaterloo.ca>
     1292CC: Thierry Delisle <tdelisle@uwaterloo.ca>
     1293Subject: RE: Action: Proof of SPE_EV_SPE2925 for Software: Practice And Experience ready for review
     1294Date: Thu, 5 Nov 2020 02:03:27 +0000
     1295
     1296Dear Dr Buhr,
     1297
     1298Thank you for letting me know. We will wait for your corrections then.
     1299
     1300Best regards,
     1301Joel
     1302
     1303Joel Q. Pacaanas
     1304Production Editor
     1305On behalf of Wiley
     1306Manila
     1307We partner with global experts to further innovative research.
     1308
     1309E-mail: jpacaanas@wiley.com
     1310Tel: +632 88558618
     1311Fax: +632 5325 0768
     1312
     1313-----Original Message-----
     1314From: Peter A. Buhr [mailto:pabuhr@uwaterloo.ca]
     1315Sent: Thursday, November 5, 2020 5:57 AM
     1316To: SPE Proofs <speproofs@wiley.com>
     1317Cc: Thierry Delisle <tdelisle@uwaterloo.ca>
     1318Subject: Re: Action: Proof of SPE_EV_SPE2925 for Software: Practice And Experience ready for review
     1319
     1320       This is an external email.
     1321
     1322    We appreciate that the COVID-19 pandemic may create conditions for you that
     1323    make it difficult for you to review your proof within standard time
     1324    frames. If you have any problems keeping to this schedule, please reach out
     1325    to me at (SPEproofs@wiley.com) to discuss alternatives.
     1326
     1327Hi,
     1328
     1329We are in the middle of reading the proofs but it will take a little more
     1330time. I can send the proofs back by Monday Nov 9, but probably earlier.
     1331
     1332
     1333
     1334From: "Pacaanas, Joel -" <jpacaanas@wiley.com>
     1335To: "Peter A. Buhr" <pabuhr@uwaterloo.ca>
     1336CC: "tdelisle@uwaterloo.ca" <tdelisle@uwaterloo.ca>
     1337Subject: RE: Action: Proof of SPE_EV_SPE2925 for Software: Practice And Experience ready for review
     1338Date: Fri, 20 Nov 2020 05:27:18 +0000
     1339
     1340Dear Peter,
     1341
     1342We have now reset the proof back to original stage. Please refer to the below editable link.
     1343
     1344https://wiley.eproofing.in/Proof.aspx?token=ab7739d5678447fbbe5036f3bcba2445081500061
     1345
     1346Since the proof was reset, your added corrections before has also been removed. Please add them back.
     1347
     1348Please return your corrections at your earliest convenience.
     1349
     1350Best regards,
     1351Joel
     1352
     1353Joel Q. Pacaanas
     1354Production Editor
     1355On behalf of Wiley
     1356Manila
     1357We partner with global experts to further innovative research.
     1358
     1359E-mail: jpacaanas@wiley.com
     1360Tel: +632 88558618
     1361Fax: +632 5325 0768
     1362
     1363
     1364 
     1365From: "Wiley Online Proofing" <notifications@eproofing.in>
     1366To: pabuhr@uwaterloo.ca
     1367Cc: SPEproofs@wiley.com
     1368Reply-To: eproofing@wiley.com
     1369Date: 26 Nov 2020 18:57:27 +0000
     1370Subject: Corrections successfully submitted for SPE_EV_SPE2925, Advanced control-flow in Cforall.
     1371
     1372Corrections successfully submitted
     1373
     1374Dear Dr. Peter Buhr,
     1375
     1376Thank you for reviewing the proof of the Software: Practice And Experience article Advanced control-flow in Cforall.
     1377
     1378View Article https://wiley.eproofing.in/Proof.aspx?token=ab7739d5678447fbbe5036f3bcba2445081500061
     1379
     1380This is a read-only version of your article with the corrections you have marked up.
     1381
     1382If you encounter any problems or have questions please contact me, Joel Pacaanas at (SPEproofs@wiley.com). For the quickest response include the journal name and your article ID (found in the subject line) in all correspondence.
     1383
     1384Best regards,
     1385Joel Pacaanas
Note: See TracChangeset for help on using the changeset viewer.