Changeset aac7197
- Timestamp:
- Mar 22, 2018, 11:44:36 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 84832d87
- Parents:
- 6ecc079
- Location:
- doc/papers/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/papers/concurrency/Makefile
r6ecc079 raac7197 4 4 Figures = figures 5 5 Macros = AMA/AMA-stix/ama 6 TeXLIB = .: style:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography:6 TeXLIB = .:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography: 7 7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 8 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex -
doc/papers/concurrency/Paper.tex
r6ecc079 raac7197 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[AMA,STIX1COL]{WileyNJD-v2} 10 2 … … 20 12 21 13 % Latex packages used in the document. 22 23 \usepackage[T1]{fontenc} % allow Latin1 (extended ASCII) characters24 \usepackage{textcomp}25 \usepackage[latin1]{inputenc}26 27 14 \usepackage{epic,eepic} 28 15 \usepackage{xspace} … … 34 21 \usepackage{siunitx} 35 22 \sisetup{ binary-units=true } 36 \input{style} % bespoke macros used in the document23 %\input{style} % bespoke macros used in the document 37 24 38 25 \hypersetup{breaklinks=true} … … 51 38 % Names used in the document. 52 39 53 \newcommand{\CS}{C\raisebox{-0.9ex}{\large$^\sharp$}\xspace} 40 \newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}\xspace} % Cforall symbolic name 41 \newcommand{\CFA}{\protect\CFAIcon} % safe for section/caption 42 \newcommand{\CFL}{\textrm{Cforall}\xspace} % Cforall symbolic name 43 \newcommand{\Celeven}{\textrm{C11}\xspace} % C11 symbolic name 44 \newcommand{\CC}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name 45 \newcommand{\CCeleven}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name 46 \newcommand{\CCfourteen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name 47 \newcommand{\CCseventeen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name 48 \newcommand{\CCtwenty}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name 49 \newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name 50 51 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 54 52 55 53 \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}} … … 62 60 \newcommand{\TODO}{{\Textbf{TODO}}} 63 61 64 65 \newsavebox{\LstBox}66 67 62 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 63 64 % Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore 65 % removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR 66 % AFTER HYPERREF. 67 %\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}} 68 \renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}} 69 70 \makeatletter 71 % parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for 72 % use rather than use \parident directly. 73 \newlength{\parindentlnth} 74 \setlength{\parindentlnth}{\parindent} 75 76 \newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{\lst@basicstyle{#1}}}} 77 \newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}} 78 \newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}} 79 80 \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly 81 \newlength{\columnposn} 82 \setlength{\gcolumnposn}{3.5in} 83 \setlength{\columnposn}{\gcolumnposn} 84 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}} 85 \newcommand{\CRT}{\global\columnposn=\gcolumnposn} 86 87 % Denote newterms in particular font and index them without particular font and in lowercase, e.g., \newterm{abc}. 88 % The option parameter provides an index term different from the new term, e.g., \newterm[\texttt{abc}]{abc} 89 % The star version does not lowercase the index information, e.g., \newterm*{IBM}. 90 \newcommand{\newtermFontInline}{\emph} 91 \newcommand{\newterm}{\@ifstar\@snewterm\@newterm} 92 \newcommand{\@newterm}[2][\@empty]{\lowercase{\def\temp{#2}}{\newtermFontInline{#2}}\ifx#1\@empty\index{\temp}\else\index{#1@{\protect#2}}\fi} 93 \newcommand{\@snewterm}[2][\@empty]{{\newtermFontInline{#2}}\ifx#1\@empty\index{#2}\else\index{#1@{\protect#2}}\fi} 94 95 % Latin abbreviation 96 \newcommand{\abbrevFont}{\textit} % set empty for no italics 97 \newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.} 98 \newcommand*{\eg}{% 99 \@ifnextchar{,}{\EG}% 100 {\@ifnextchar{:}{\EG}% 101 {\EG,\xspace}}% 102 }% 103 \newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.} 104 \newcommand*{\ie}{% 105 \@ifnextchar{,}{\IE}% 106 {\@ifnextchar{:}{\IE}% 107 {\IE,\xspace}}% 108 }% 109 \newcommand{\ETC}{\abbrevFont{etc}} 110 \newcommand*{\etc}{% 111 \@ifnextchar{.}{\ETC}% 112 {\ETC.\xspace}% 113 }% 114 \newcommand{\ETAL}{\abbrevFont{et}~\abbrevFont{al}} 115 \renewcommand*{\etal}{% 116 \@ifnextchar{.}{\protect\ETAL}% 117 {\protect\ETAL.\xspace}% 118 }% 119 \newcommand{\VIZ}{\abbrevFont{viz}} 120 \newcommand*{\viz}{% 121 \@ifnextchar{.}{\VIZ}% 122 {\VIZ.\xspace}% 123 }% 124 \makeatother 125 126 \newenvironment{cquote}{% 127 \list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% 128 \item\relax 129 }{% 130 \endlist 131 }% cquote 132 133 % CFA programming language, based on ANSI C (with some gcc additions) 134 \lstdefinelanguage{CFA}[ANSI]{C}{ 135 morekeywords={ 136 _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, _At, __attribute, 137 __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, 138 __const, __const__, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, 139 finally, forall, ftype, _Generic, _Imaginary, inline, __label__, lvalue, _Noreturn, one_t, 140 otype, restrict, _Static_assert, throw, throwResume, trait, try, ttype, typeof, __typeof, 141 __typeof__, virtual, with, zero_t}, 142 morekeywords=[2]{ 143 _Atomic, coroutine, is_coroutine, is_monitor, is_thread, monitor, mutex, nomutex, or, 144 resume, suspend, thread, _Thread_local, waitfor, when, yield}, 145 moredirectives={defined,include_next}% 146 } 147 148 \lstset{ 149 language=CFA, 150 columns=fullflexible, 151 basicstyle=\linespread{0.9}\sf, % reduce line spacing and use sanserif font 152 stringstyle=\tt, % use typewriter font 153 tabsize=5, % N space tabbing 154 xleftmargin=\parindentlnth, % indent code to paragraph indentation 155 %mathescape=true, % LaTeX math escape in CFA code $...$ 156 escapechar=\$, % LaTeX escape in CFA code 157 keepspaces=true, % 158 showstringspaces=false, % do not show spaces with cup 159 showlines=true, % show blank lines at end of code 160 aboveskip=4pt, % spacing above/below code block 161 belowskip=3pt, 162 % replace/adjust listing characters that look bad in sanserif 163 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1 164 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 165 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textgreater}}2, 166 moredelim=**[is][\color{red}]{`}{`}, 167 }% lstset 168 169 % uC++ programming language, based on ANSI C++ 170 \lstdefinelanguage{uC++}[ANSI]{C++}{ 171 morekeywords={ 172 _Accept, _AcceptReturn, _AcceptWait, _Actor, _At, _CatchResume, _Cormonitor, _Coroutine, _Disable, 173 _Else, _Enable, _Event, _Finally, _Monitor, _Mutex, _Nomutex, _PeriodicTask, _RealTimeTask, 174 _Resume, _Select, _SporadicTask, _Task, _Timeout, _When, _With, _Throw}, 175 } 176 \lstdefinelanguage{Golang}{ 177 morekeywords=[1]{package,import,func,type,struct,return,defer,panic,recover,select,var,const,iota,}, 178 morekeywords=[2]{string,uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64, 179 bool,float32,float64,complex64,complex128,byte,rune,uintptr, error,interface}, 180 morekeywords=[3]{map,slice,make,new,nil,len,cap,copy,close,true,false,delete,append,real,imag,complex,chan,}, 181 morekeywords=[4]{for,break,continue,range,goto,switch,case,fallthrough,if,else,default,}, 182 morekeywords=[5]{Println,Printf,Error,}, 183 sensitive=true, 184 morecomment=[l]{//}, 185 morecomment=[s]{/*}{*/}, 186 morestring=[b]', 187 morestring=[b]", 188 morestring=[s]{`}{`}, 189 } 190 191 \lstnewenvironment{cfa}[1][] 192 {\lstset{#1}} 193 {} 194 \lstnewenvironment{C++}[1][] % use C++ style 195 {\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}} 196 {} 197 \lstnewenvironment{uC++}[1][] 198 {\lstset{#1}} 199 {} 200 \lstnewenvironment{Go}[1][] 201 {\lstset{#1}} 202 {} 203 204 % inline code @...@ 205 \lstMakeShortInline@% 206 68 207 69 208 \title{\texorpdfstring{Concurrency in \protect\CFA}{Concurrency in Cforall}} … … 81 220 \abstract[Summary]{ 82 221 \CFA is a modern, polymorphic, \emph{non-object-oriented} extension of the C programming language. 83 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system that supports them.222 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system. 84 223 These features are created from scratch as ISO C lacks concurrency, relying largely on pthreads. 85 224 Coroutines and lightweight (user) threads are introduced into the language. … … 87 226 A unique contribution is allowing multiple monitors to be safely acquired simultaneously. 88 227 All features respect the expectations of C programmers, while being fully integrate with the \CFA polymorphic type-system and other language features. 89 Finally, experimental results are presented to validate several of the new features withother concurrent programming-languages.228 Finally, experimental results are presented to compare the performance of the new features with similar mechanisms in other concurrent programming-languages. 90 229 }% 91 230 … … 104 243 % ====================================================================== 105 244 106 This paper provides a minimal concurrency \textbf{api} that is simple, efficient and can be reused to build higher-level features. 107 The simplest possible concurrency system is a thread and a lock but this low-level approach is hard to master. 108 An easier approach for users is to support higher-level constructs as the basis of concurrency. 109 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{HPP:Study}. 110 Examples are task based, message passing and implicit threading. 111 The high-level approach and its minimal \textbf{api} are tested in a dialect of C, called \CFA. 112 Furthermore, the proposed \textbf{api} doubles as an early definition of the \CFA language and library. 113 This paper also provides an implementation of the concurrency library for \CFA as well as all the required language features added to the source-to-source translator. 114 115 There are actually two problems that need to be solved in the design of concurrency for a programming language: which concurrency and which parallelism tools are available to the programmer. 245 This paper provides a minimal concurrency \newterm{API} that is simple, efficient and can be used to build other concurrency features. 246 While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master. 247 An easier approach for programmers is to support higher-level constructs as the basis of concurrency. 248 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{Hochstein05}. 249 Examples of high-level approaches are task based~\cite{TBB}, message passing~\cite{Erlang,MPI}, and implicit threading~\cite{OpenMP}. 250 251 The terminology used in this paper is as follows. 252 A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state. 253 Multiple simultaneous threads gives rise to \newterm{concurrency}, which requires locking to ensure safe access to shared data. 254 % Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, etc.) concurrent threads are introduced. 255 \newterm{Locking}, and by extension locks, are defined as a mechanism to prevent progress of threads to provide safety. 256 \newterm{Parallelism} is running multiple threads simultaneously. 257 Parallelism implies \emph{actual} simultaneous execution, where concurrency only requires \emph{apparent} simultaneous execution. 258 As such, parallelism is only observable in differences in performance, which is observed through differences in timing. 259 260 Hence, there are two problems to be solved in the design of concurrency for a programming language: concurrency and parallelism. 116 261 While these two concepts are often combined, they are in fact distinct, requiring different tools~\cite{Buhr05a}. 117 Concurrency tools need to handle mutual exclusion and synchronization, while parallelism tools are about performance, cost and resource utilization. 118 119 In the context of this paper, a \textbf{thread} is a fundamental unit of execution that runs a sequence of code, generally on a program stack. 120 Having multiple simultaneous threads gives rise to concurrency and generally requires some kind of locking mechanism to ensure proper execution. 121 Correspondingly, \textbf{concurrency} is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, etc.) concurrent threads are introduced. 122 Accordingly, \textbf{locking} (and by extension locks) are defined as a mechanism that prevents the progress of certain threads in order to avoid problems due to concurrency. 123 Finally, in this paper \textbf{parallelism} is distinct from concurrency and is defined as running multiple threads simultaneously. 124 More precisely, parallelism implies \emph{actual} simultaneous execution as opposed to concurrency which only requires \emph{apparent} simultaneous execution. 125 As such, parallelism is only observable in the differences in performance or, more generally, differences in timing. 262 Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost and resource utilization. 263 264 The proposed concurrency API is implemented in a dialect of C, called \CFA. 265 The paper discusses how the language features are added to the \CFA translator with respect to parsing, semantic, and type checking, and the corresponding high-perforamnce runtime-library to implement the concurrency features. 126 266 127 267 % ====================================================================== … … 139 279 Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (e.g., {\tt this}), it does have some notion of objects\footnote{C defines the term objects as : ``region of data storage in the execution environment, the contents of which can represent 140 280 values''~\cite[3.15]{C11}}, most importantly construction and destruction of objects. 141 Most of the following code examples can be found on the \CFA website~\cite{ www-cfa}.142 143 % ====================================================================== 281 Most of the following code examples can be found on the \CFA website~\cite{Cforall}. 282 283 144 284 \subsection{References} 145 285 146 286 Like \CC, \CFA introduces rebind-able references providing multiple dereferencing as an alternative to pointers. 147 287 In regards to concurrency, the semantic difference between pointers and references are not particularly relevant, but since this document uses mostly references, here is a quick overview of the semantics: 148 \begin{cfa code}288 \begin{cfa} 149 289 int x, *p1 = &x, **p2 = &p1, ***p3 = &p2, 150 290 &r1 = x, &&r2 = r1, &&&r3 = r2; 151 ***p3 = 3; //change x152 r3 = 3; //change x, ***r3153 **p3 = ...; //change p1154 *p3 = ...; //change p2155 int y, z, & ar[3] = {x, y, z}; //initialize array of references156 typeof( ar[1]) p; //is int, referenced object type157 typeof(&ar[1]) q; //is int &, reference type158 sizeof( ar[1]) == sizeof(int); //is true, referenced object size159 sizeof(&ar[1]) == sizeof(int *); //is true, reference size160 \end{cfa code}291 ***p3 = 3; $\C{// change x}$ 292 r3 = 3; $\C{// change x, ***r3}$ 293 **p3 = ...; $\C{// change p1}$ 294 *p3 = ...; $\C{// change p2}$ 295 int y, z, & ar[3] = {x, y, z}; $\C{// initialize array of references}$ 296 typeof( ar[1]) p; $\C{// is int, referenced object type}$ 297 typeof(&ar[1]) q; $\C{// is int \&, reference type}$ 298 sizeof( ar[1]) == sizeof(int); $\C{// is true, referenced object size}$ 299 sizeof(&ar[1]) == sizeof(int *); $\C{// is true, reference size}$ 300 \end{cfa} 161 301 The important take away from this code example is that a reference offers a handle to an object, much like a pointer, but which is automatically dereferenced for convenience. 162 302 … … 167 307 As well, \CFA uses the return type as part of the selection criteria, as in Ada~\cite{Ada}. 168 308 For routines with multiple parameters and returns, the selection is complex. 169 \begin{cfa code}170 // selection based on type and number of parameters171 void f(void); //(1)172 void f(char); //(2)173 void f(int, double); //(3)174 f(); //select (1)175 f('a'); //select (2)176 f(3, 5.2); //select (3)177 178 // selection based on type and number of returns179 char f(int); //(1)180 double f(int); //(2)181 char c = f(3); //select (1)182 double d = f(4); //select (2)183 \end{cfa code}309 \begin{cfa} 310 // selection based on type and number of parameters 311 void f(void); $\C{// (1)}$ 312 void f(char); $\C{// (2)}$ 313 void f(int, double); $\C{// (3)}$ 314 f(); $\C{// select (1)}$ 315 f('a'); $\C{// select (2)}$ 316 f(3, 5.2); $\C{// select (3)}$ 317 318 // selection based on type and number of returns 319 char f(int); $\C{// (1)}$ 320 double f(int); $\C{// (2)}$ 321 char c = f(3); $\C{// select (1)}$ 322 double d = f(4); $\C{// select (2)}$ 323 \end{cfa} 184 324 This feature is particularly important for concurrency since the runtime system relies on creating different types to represent concurrency objects. 185 325 Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions that prevent name clashes. 186 As seen in section \ref{basics}, routine \code{main}is an example that benefits from overloading.326 As seen in section \ref{basics}, routine @main@ is an example that benefits from overloading. 187 327 188 328 % ====================================================================== … … 190 330 Overloading also extends to operators. 191 331 The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation appear, e.g.: 192 \begin{cfa code}193 int ++? (int op); //unary prefix increment194 int ?++ (int op); //unary postfix increment195 int ?+? (int op1, int op2); //binary plus196 int ?<=?(int op1, int op2); //binary less than197 int ?=? (int & op1, int op2); //binary assignment198 int ?+=?(int & op1, int op2); //binary plus-assignment332 \begin{cfa} 333 int ++? (int op); $\C{// unary prefix increment}$ 334 int ?++ (int op); $\C{// unary postfix increment}$ 335 int ?+? (int op1, int op2); $\C{// binary plus}$ 336 int ?<=?(int op1, int op2); $\C{// binary less than}$ 337 int ?=? (int & op1, int op2); $\C{// binary assignment}$ 338 int ?+=?(int & op1, int op2); $\C{// binary plus-assignment}$ 199 339 200 340 struct S {int i, j;}; 201 S ?+?(S op1, S op2) { //add two structures341 S ?+?(S op1, S op2) { $\C{// add two structures}$ 202 342 return (S){op1.i + op2.i, op1.j + op2.j}; 203 343 } 204 344 S s1 = {1, 2}, s2 = {2, 3}, s3; 205 s3 = s1 + s2; //compute sum: s3 == {2, 5}206 \end{cfa code}345 s3 = s1 + s2; $\C{// compute sum: s3 == {2, 5}}$ 346 \end{cfa} 207 347 While concurrency does not use operator overloading directly, this feature is more important as an introduction for the syntax of constructors. 208 348 … … 211 351 Object lifetime is often a challenge in concurrency. \CFA uses the approach of giving concurrent meaning to object lifetime as a means of synchronization and/or mutual exclusion. 212 352 Since \CFA relies heavily on the lifetime of objects, constructors and destructors is a core feature required for concurrency and parallelism. \CFA uses the following syntax for constructors and destructors: 213 \begin{cfa code}353 \begin{cfa} 214 354 struct S { 215 355 size_t size; 216 356 int * ia; 217 357 }; 218 void ?{}(S & s, int asize) { //constructor operator219 s.size = asize; //initialize fields358 void ?{}(S & s, int asize) { $\C{// constructor operator}$ 359 s.size = asize; $\C{// initialize fields}$ 220 360 s.ia = calloc(size, sizeof(S)); 221 361 } 222 void ^?{}(S & s) { //destructor operator223 free(ia); //de-initialization fields362 void ^?{}(S & s) { $\C{// destructor operator}$ 363 free(ia); $\C{// de-initialization fields}$ 224 364 } 225 365 int main() { 226 S x = {10}, y = {100}; //implicit calls: ?{}(x, 10), ?{}(y, 100)227 ... //use x and y228 ^x{}; ^y{}; //explicit calls to de-initialize229 x{20}; y{200}; //explicit calls to reinitialize230 ... //reuse x and y231 } //implicit calls: ^?{}(y), ^?{}(x)232 \end{cfa code}366 S x = {10}, y = {100}; $\C{// implicit calls: ?\{\}(x, 10), ?\{\}(y, 100)}$ 367 ... $\C{// use x and y}$ 368 ^x{}; ^y{}; $\C{// explicit calls to de-initialize}$ 369 x{20}; y{200}; $\C{// explicit calls to reinitialize}$ 370 ... $\C{// reuse x and y}$ 371 } $\C{// implicit calls: \^?\{\}(y), \^?\{\}(x)}$ 372 \end{cfa} 233 373 The language guarantees that every object and all their fields are constructed. 234 374 Like \CC, construction of an object is automatically done on allocation and destruction of the object is done on deallocation. 235 375 Allocation and deallocation can occur on the stack or on the heap. 236 \begin{cfa code}376 \begin{cfa} 237 377 { 238 struct S s = {10}; //allocation, call constructor378 struct S s = {10}; $\C{// allocation, call constructor}$ 239 379 ... 240 } //deallocation, call destructor241 struct S * s = new(); //allocation, call constructor380 } $\C{// deallocation, call destructor}$ 381 struct S * s = new(); $\C{// allocation, call constructor}$ 242 382 ... 243 delete(s); //deallocation, call destructor244 \end{cfa code}245 Note that like \CC, \CFA introduces \code{new} and \code{delete}, which behave like \code{malloc} and \code{free} in addition to constructing and destructing objects, after calling \code{malloc} and before calling \code{free}, respectively.383 delete(s); $\C{// deallocation, call destructor}$ 384 \end{cfa} 385 Note that like \CC, \CFA introduces @new@ and @delete@, which behave like @malloc@ and @free@ in addition to constructing and destructing objects, after calling @malloc@ and before calling @free@, respectively. 246 386 247 387 % ====================================================================== … … 249 389 \label{s:ParametricPolymorphism} 250 390 Routines in \CFA can also be reused for multiple types. 251 This capability is done using the \code{forall}clauses, which allow separately compiled routines to support generic usage over multiple types.391 This capability is done using the @forall@ clauses, which allow separately compiled routines to support generic usage over multiple types. 252 392 For example, the following sum function works for any type that supports construction from 0 and addition: 253 \begin{cfa code}254 // constraint type, 0 and +393 \begin{cfa} 394 // constraint type, 0 and + 255 395 forall(otype T | { void ?{}(T *, zero_t); T ?+?(T, T); }) 256 396 T sum(T a[ ], size_t size) { 257 T total = 0; //construct T from 0397 T total = 0; $\C{// construct T from 0}$ 258 398 for(size_t i = 0; i < size; i++) 259 total = total + a[i]; //select appropriate +399 total = total + a[i]; $\C{// select appropriate +}$ 260 400 return total; 261 401 } 262 402 263 403 S sa[5]; 264 int i = sum(sa, 5); //use S's 0 construction and +265 \end{cfa code}404 int i = sum(sa, 5); $\C{// use S's 0 construction and +}$ 405 \end{cfa} 266 406 267 407 Since writing constraints on types can become cumbersome for more constrained functions, \CFA also has the concept of traits. 268 408 Traits are named collection of constraints that can be used both instead and in addition to regular constraints: 269 \begin{cfa code}409 \begin{cfa} 270 410 trait summable( otype T ) { 271 void ?{}(T *, zero_t); //constructor from 0 literal272 T ?+?(T, T); //assortment of additions411 void ?{}(T *, zero_t); $\C{// constructor from 0 literal}$ 412 T ?+?(T, T); $\C{// assortment of additions}$ 273 413 T ?+=?(T *, T); 274 414 T ++?(T *); 275 415 T ?++(T *); 276 416 }; 277 forall( otype T | summable(T) ) //use trait417 forall( otype T | summable(T) ) $\C{// use trait}$ 278 418 T sum(T a[], size_t size); 279 \end{cfa code}280 281 Note that the type use for assertions can be either an \code{otype} or a \code{dtype}.282 Types declared as \code{otype}refer to ``complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator.283 Using \code{dtype,}on the other hand, has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable.419 \end{cfa} 420 421 Note that the type use for assertions can be either an @otype@ or a @dtype@. 422 Types declared as @otype@ refer to ``complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator. 423 Using @dtype@, on the other hand, has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable. 284 424 285 425 % ====================================================================== 286 426 \subsection{with Clause/Statement} 287 427 Since \CFA lacks the concept of a receiver, certain functions end up needing to repeat variable names often. 288 To remove this inconvenience, \CFA provides the \code{with}statement, which opens an aggregate scope making its fields directly accessible (like Pascal).289 \begin{cfa code}428 To remove this inconvenience, \CFA provides the @with@ statement, which opens an aggregate scope making its fields directly accessible (like Pascal). 429 \begin{cfa} 290 430 struct S { int i, j; }; 291 int mem(S & this) with (this) //with clause292 i = 1; //this->i293 j = 2; //this->j431 int mem(S & this) with (this) $\C{// with clause}$ 432 i = 1; $\C{// this->i}$ 433 j = 2; $\C{// this->j}$ 294 434 } 295 435 int foo() { 296 436 struct S1 { ... } s1; 297 437 struct S2 { ... } s2; 298 with (s1) //with statement438 with (s1) $\C{// with statement}$ 299 439 { 300 // access fields of s1 without qualification301 with (s2) //nesting440 // access fields of s1 without qualification 441 with (s2) $\C{// nesting}$ 302 442 { 303 // access fields of s1 and s2 without qualification443 // access fields of s1 and s2 without qualification 304 444 } 305 445 } 306 with (s1, s2) //scopes open in parallel446 with (s1, s2) $\C{// scopes open in parallel}$ 307 447 { 308 // access fields of s1 and s2 without qualification309 } 310 } 311 \end{cfa code}312 313 For more information on \CFA see \cite{cforall-ug, rob-thesis,www-cfa}.448 // access fields of s1 and s2 without qualification 449 } 450 } 451 \end{cfa} 452 453 For more information on \CFA see \cite{cforall-ug,Schluntz17,www-cfa}. 314 454 315 455 % ====================================================================== … … 339 479 Optimal performance in concurrent applications is often obtained by having as much non-determinism as correctness allows. 340 480 481 341 482 \section{\protect\CFA's Thread Building Blocks} 483 342 484 One of the important features that are missing in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional. 343 485 As such, library support for threading is far from widespread. … … 347 489 And being a system-level language means programmers expect to choose precisely which features they need and which cost they are willing to pay. 348 490 491 349 492 \section{Coroutines: A Stepping Stone}\label{coroutine} 493 350 494 While the main focus of this proposal is concurrency and parallelism, it is important to address coroutines, which are actually a significant building block of a concurrency system. \textbf{Coroutine}s are generalized routines which have predefined points where execution is suspended and can be resumed at a later time. 351 495 Therefore, they need to deal with context switches and other context-management operations. 352 496 This proposal includes coroutines both as an intermediate step for the implementation of threads, and a first-class feature of \CFA. 353 497 Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. 354 The core \textbf{api} of coroutines revolves around two features: independent call-stacks and \code{suspend}/\code{resume}.355 356 \begin{ table}498 The core \textbf{api} of coroutines revolves around two features: independent call-stacks and @suspend@/@resume@. 499 500 \begin{figure} 357 501 \begin{center} 358 \begin{tabular}{c @{\hskip 0.025in}|@{\hskip 0.025in} c @{\hskip 0.025in}|@{\hskip 0.025in}c}359 \begin{c code}[tabsize=2]360 // Using callbacks502 \begin{tabular}{c|c|c} 503 \begin{cfa} 504 // Using callback 361 505 void fibonacci_func( 362 506 int n, 363 void (* callback)(int)507 void (* callback)( int ) 364 508 ) { 365 int first = 0; 366 int second = 1; 367 int next, i; 368 for(i = 0; i < n; i++) 369 { 370 if(i <= 1) 371 next = i; 372 else { 373 next = f1 + f2; 374 f1 = f2; 375 f2 = next; 376 } 377 callback(next); 378 } 379 } 380 509 int fn, f1 = 0, f2 = 1; 510 for ( int i = 0; i < n; i++ ) { 511 callback( f1 ); 512 fn = f1 + f2; 513 f1 = f2; f2 = fn; 514 } 515 } 381 516 int main() { 382 void print_fib(int n) { 383 printf("%d\n", n); 384 } 385 386 fibonacci_func( 387 10, print_fib 388 ); 389 390 391 392 } 393 \end{ccode}&\begin{ccode}[tabsize=2] 394 //Using output array 517 void print_fib( int n ) { 518 printf( "%d\n", n ); 519 } 520 fibonacci_func( 10, print_fib ); 521 } 522 523 \end{cfa} 524 & 525 \begin{cfa} 526 // Using output array 395 527 void fibonacci_array( 396 528 int n, 397 int * array529 int * array 398 530 ) { 399 int f1 = 0; int f2 = 1; 400 int next, i; 401 for(i = 0; i < n; i++) 402 { 403 if(i <= 1) 404 next = i; 405 else { 406 next = f1 + f2; 407 f1 = f2; 408 f2 = next; 409 } 410 array[i] = next; 411 } 412 } 413 414 531 int fn, f1 = 0, f2 = 1; 532 for ( int i = 0; i < n; i++ ) { 533 array[i] = f1; 534 fn = f1 + f2; 535 f1 = f2; f2 = fn; 536 } 537 } 415 538 int main() { 416 539 int a[10]; 417 418 fibonacci_func( 419 10, a 420 ); 421 422 for(int i=0;i<10;i++){ 423 printf("%d\n", a[i]); 424 } 425 426 } 427 \end{ccode}&\begin{ccode}[tabsize=2] 428 //Using external state 540 fibonacci_array( 10, a ); 541 for ( int i = 0; i < 10; i++ ) { 542 printf( "%d\n", a[i] ); 543 } 544 } 545 \end{cfa} 546 & 547 \begin{cfa} 548 // Using external state 429 549 typedef struct { 430 550 int f1, f2; 431 551 } Iterator_t; 432 433 552 int fibonacci_state( 434 Iterator_t * it553 Iterator_t * it 435 554 ) { 436 int f; 437 f = it->f1 + it->f2; 438 it->f2 = it->f1; 439 it->f1 = max(f,1); 440 return f; 441 } 442 443 444 445 446 447 448 555 int ret = it->f1; 556 int fn = it->f1 + it->f2; 557 it->f2 = it->f1; it->f1 = fn; 558 return ret; 559 } 449 560 int main() { 450 Iterator_t it={0,0}; 451 452 for(int i=0;i<10;i++){ 453 printf("%d\n", 454 fibonacci_state( 455 &it 456 ); 457 ); 458 } 459 460 } 461 \end{ccode} 561 Iterator_t it = { 0, 1 }; 562 for ( int i = 0; i < 10; i++ ) { 563 printf( "%d\n", 564 fibonacci_state( &it ) ); 565 } 566 } 567 \end{cfa} 462 568 \end{tabular} 463 569 \end{center} 464 \caption{ Different implementations of a Fibonacci sequence generator in C.}570 \caption{C Fibonacci Implementations} 465 571 \label{lst:fibonacci-c} 466 \end{ table}572 \end{figure} 467 573 468 574 A good example of a problem made easier with coroutines is generators, e.g., generating the Fibonacci sequence. … … 474 580 Listing \ref{lst:fibonacci-cfa} is an example of a solution to the Fibonacci problem using \CFA coroutines, where the coroutine stack holds sufficient state for the next generation. 475 581 This solution has the advantage of having very strong decoupling between how the sequence is generated and how it is used. 476 Indeed, this version is as easy to use as the \code{fibonacci_state} solution, while the implementation is very similar to the \code{fibonacci_func}example.582 Indeed, this version is as easy to use as the @fibonacci_state@ solution, while the implementation is very similar to the @fibonacci_func@ example. 477 583 478 584 \begin{figure} 479 \begin{cfacode}[caption={Implementation of Fibonacci using coroutines},label={lst:fibonacci-cfa}] 480 coroutine Fibonacci { 481 int fn; //used for communication 482 }; 483 484 void ?{}(Fibonacci& this) { //constructor 485 this.fn = 0; 486 } 487 488 //main automatically called on first resume 489 void main(Fibonacci& this) with (this) { 490 int fn1, fn2; //retained between resumes 491 fn = 0; 492 fn1 = fn; 493 suspend(this); //return to last resume 494 495 fn = 1; 496 fn2 = fn1; 497 fn1 = fn; 498 suspend(this); //return to last resume 499 585 \begin{cfa} 586 coroutine Fibonacci { int fn; }; $\C{// used for communication}$ 587 588 void ?{}( Fibonacci & fib ) with( fib ) { fn = 0; } $\C{// constructor}$ 589 590 void main( Fibonacci & fib ) with( fib ) { $\C{// main called on first resume}$ 591 int fn1, fn2; $\C{// retained between resumes}$ 592 fn = 0; fn1 = fn; $\C{// 1st case}$ 593 suspend(); $\C{// restart last resume}$ 594 fn = 1; fn2 = fn1; fn1 = fn; $\C{// 2nd case}$ 595 suspend(); $\C{// restart last resume}$ 500 596 for ( ;; ) { 501 fn = fn1 + fn2; 502 fn2 = fn1; 503 fn1 = fn; 504 suspend(this); //return to last resume 505 } 506 } 507 508 int next(Fibonacci& this) { 509 resume(this); //transfer to last suspend 510 return this.fn; 511 } 512 513 void main() { //regular program main 597 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; $\C{// general case}$ 598 suspend(); $\C{// restart last resume}$ 599 } 600 } 601 int next( Fibonacci & fib ) with( fib ) { 602 resume( fib ); $\C{// restart last suspend}$ 603 return fn; 604 } 605 int main() { 514 606 Fibonacci f1, f2; 515 607 for ( int i = 1; i <= 10; i += 1 ) { … … 517 609 } 518 610 } 519 \end{cfacode} 611 \end{cfa} 612 \caption{Coroutine Fibonacci } 613 \label{lst:fibonacci-cfa} 520 614 \end{figure} 521 615 522 Listing \ref{lst:fmt-line} shows the \code{Format}coroutine for restructuring text into groups of character blocks of fixed size.616 Listing \ref{lst:fmt-line} shows the @Format@ coroutine for restructuring text into groups of character blocks of fixed size. 523 617 The example takes advantage of resuming coroutines in the constructor to simplify the code and highlights the idea that interesting control flow can occur in the constructor. 524 618 525 619 \begin{figure} 526 \begin{cfa code}[tabsize=3,caption={Formatting text into lines of 5 blocks of 4 characters.},label={lst:fmt-line}]527 // format characters into blocks of 4 and groups of 5 blocks per line620 \begin{cfa}[tabsize=3,caption={Formatting text into lines of 5 blocks of 4 characters.},label={lst:fmt-line}] 621 // format characters into blocks of 4 and groups of 5 blocks per line 528 622 coroutine Format { 529 char ch; // used for communication530 int g, b; // global because used in destructor623 char ch; // used for communication 624 int g, b; // global because used in destructor 531 625 }; 532 626 533 627 void ?{}(Format& fmt) { 534 resume( fmt ); // prime (start) coroutine628 resume( fmt ); // prime (start) coroutine 535 629 } 536 630 … … 541 635 542 636 void main(Format& fmt) with fmt { 543 for ( ;; ) { // for as many characters544 for(g = 0; g < 5; g++) { // groups of 5 blocks545 for(b = 0; b < 4; fb++) { // blocks of 4 characters637 for ( ;; ) { // for as many characters 638 for(g = 0; g < 5; g++) { // groups of 5 blocks 639 for(b = 0; b < 4; fb++) { // blocks of 4 characters 546 640 suspend(); 547 sout | ch; // print character641 sout | ch; // print character 548 642 } 549 sout | " "; // print block separator643 sout | " "; // print block separator 550 644 } 551 sout | endl; // print group separator645 sout | endl; // print group separator 552 646 } 553 647 } … … 561 655 Format fmt; 562 656 char ch; 563 Eof: for ( ;; ) { // read until end of file564 sin | ch; // read one character565 if(eof(sin)) break Eof; // eof ?566 prt(fmt, ch); // push character for formatting567 } 568 } 569 \end{cfa code}657 Eof: for ( ;; ) { // read until end of file 658 sin | ch; // read one character 659 if(eof(sin)) break Eof; // eof ? 660 prt(fmt, ch); // push character for formatting 661 } 662 } 663 \end{cfa} 570 664 \end{figure} 571 665 … … 582 676 For example, the following code, while looking benign, can run into undefined behaviour because of thunks: 583 677 584 \begin{cfa code}585 // async: Runs function asynchronously on another thread678 \begin{cfa} 679 // async: Runs function asynchronously on another thread 586 680 forall(otype T) 587 681 extern void async(void (*func)(T*), T* obj); … … 592 686 void bar() { 593 687 int a; 594 async(noop, &a); // start thread running noop with argument a595 } 596 \end{cfa code}688 async(noop, &a); // start thread running noop with argument a 689 } 690 \end{cfa} 597 691 598 692 The generated C code\footnote{Code trimmed down for brevity} creates a local thunk to hold type information: 599 693 600 \begin{c code}694 \begin{cfa} 601 695 extern void async(/* omitted */, void (*func)(void*), void* obj); 602 696 … … 612 706 async(/* omitted */, ((void (*)(void*))(&_thunk0)), (&a)); 613 707 } 614 \end{c code}615 The problem in this example is a storage management issue, the function pointer \code{_thunk0}is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; i.e., the stack-based thunk being destroyed before it can be used.708 \end{cfa} 709 The problem in this example is a storage management issue, the function pointer @_thunk0@ is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; i.e., the stack-based thunk being destroyed before it can be used. 616 710 This challenge is an extension of challenges that come with second-class routines. 617 711 Indeed, GCC nested routines also have the limitation that nested routine cannot be passed outside of the declaration scope. … … 621 715 One solution to this challenge is to use composition/containment, where coroutine fields are added to manage the coroutine. 622 716 623 \begin{cfa code}717 \begin{cfa} 624 718 struct Fibonacci { 625 int fn; // used for communication626 coroutine c; // composition719 int fn; // used for communication 720 coroutine c; // composition 627 721 }; 628 722 … … 633 727 void ?{}(Fibonacci& this) { 634 728 this.fn = 0; 635 // Call constructor to initialize coroutine729 // Call constructor to initialize coroutine 636 730 (this.c){myMain}; 637 731 } 638 \end{cfa code}732 \end{cfa} 639 733 The downside of this approach is that users need to correctly construct the coroutine handle before using it. 640 734 Like any other objects, the user must carefully choose construction order to prevent usage of objects not yet constructed. … … 645 739 The next alternative is to use language support to annotate coroutines as follows: 646 740 647 \begin{cfa code}741 \begin{cfa} 648 742 coroutine Fibonacci { 649 int fn; // used for communication743 int fn; // used for communication 650 744 }; 651 \end{cfa code}652 The \code{coroutine}keyword means the compiler can find and inject code where needed.745 \end{cfa} 746 The @coroutine@ keyword means the compiler can find and inject code where needed. 653 747 The downside of this approach is that it makes coroutine a special case in the language. 654 748 Users wanting to extend coroutines or build their own for various reasons can only do so in ways offered by the language. … … 661 755 For coroutines as for threads, many implementations are based on routine pointers or function objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}. 662 756 For example, Boost implements coroutines in terms of four functor object types: 663 \begin{cfa code}757 \begin{cfa} 664 758 asymmetric_coroutine<>::pull_type 665 759 asymmetric_coroutine<>::push_type 666 760 symmetric_coroutine<>::call_type 667 761 symmetric_coroutine<>::yield_type 668 \end{cfa code}762 \end{cfa} 669 763 Often, the canonical threading paradigm in languages is based on function pointers, \texttt{pthread} being one of the most well-known examples. 670 764 The main problem of this approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. … … 672 766 673 767 A variation of this would be to use a simple function pointer in the same way \texttt{pthread} does for threads: 674 \begin{cfa code}768 \begin{cfa} 675 769 void foo( coroutine_t cid, void* arg ) { 676 770 int* value = (int*)arg; 677 // Coroutine body771 // Coroutine body 678 772 } 679 773 … … 683 777 coroutine_resume( &cid ); 684 778 } 685 \end{cfa code}779 \end{cfa} 686 780 This semantics is more common for thread interfaces but coroutines work equally well. 687 781 As discussed in section \ref{threads}, this approach is superseded by static approaches in terms of expressivity. … … 690 784 691 785 Finally, the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. 692 This approach defines a coroutine as anything that satisfies the trait \code{is_coroutine}(as defined below) and is used as a coroutine.693 694 \begin{cfa code}786 This approach defines a coroutine as anything that satisfies the trait @is_coroutine@ (as defined below) and is used as a coroutine. 787 788 \begin{cfa} 695 789 trait is_coroutine(dtype T) { 696 790 void main(T& this); … … 700 794 forall( dtype T | is_coroutine(T) ) void suspend(T&); 701 795 forall( dtype T | is_coroutine(T) ) void resume (T&); 702 \end{cfa code}703 This ensures that an object is not a coroutine until \code{resume}is called on the object.704 Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine}trait to compile.705 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the \code{get_coroutine}routine.706 The \CFA keyword \code{coroutine}simply has the effect of implementing the getter and forward declarations required for users to implement the main routine.796 \end{cfa} 797 This ensures that an object is not a coroutine until @resume@ is called on the object. 798 Correspondingly, any object that is passed to @resume@ is a coroutine since it must satisfy the @is_coroutine@ trait to compile. 799 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine. 800 The \CFA keyword @coroutine@ simply has the effect of implementing the getter and forward declarations required for users to implement the main routine. 707 801 708 802 \begin{center} 709 803 \begin{tabular}{c c c} 710 \begin{cfa code}[tabsize=3]804 \begin{cfa}[tabsize=3] 711 805 coroutine MyCoroutine { 712 806 int someValue; 713 807 }; 714 \end{cfa code} & == & \begin{cfacode}[tabsize=3]808 \end{cfa} & == & \begin{cfa}[tabsize=3] 715 809 struct MyCoroutine { 716 810 int someValue; … … 726 820 727 821 void main(struct MyCoroutine* this); 728 \end{cfa code}822 \end{cfa} 729 823 \end{tabular} 730 824 \end{center} … … 736 830 Both user and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism. 737 831 User threads offer a flexible and lightweight interface. 738 A thread can be declared using a struct declaration \code{thread}as follows:739 740 \begin{cfa code}832 A thread can be declared using a struct declaration @thread@ as follows: 833 834 \begin{cfa} 741 835 thread foo {}; 742 \end{cfa code}836 \end{cfa} 743 837 744 838 As for coroutines, the keyword is a thin wrapper around a \CFA trait: 745 839 746 \begin{cfa code}840 \begin{cfa} 747 841 trait is_thread(dtype T) { 748 842 void ^?{}(T & mutex this); … … 750 844 thread_desc* get_thread(T & this); 751 845 }; 752 \end{cfa code}846 \end{cfa} 753 847 754 848 Obviously, for this thread implementation to be useful it must run some user code. 755 849 Several other threading interfaces use a function-pointer representation as the interface of threads (for example \Csharp~\cite{Csharp} and Scala~\cite{Scala}). 756 However, this proposal considers that statically tying a \code{main}routine to a thread supersedes this approach.757 Since the \code{main}routine is already a special routine in \CFA (where the program begins), it is a natural extension of the semantics to use overloading to declare mains for different threads (the normal main being the main of the initial thread).758 As such the \code{main}routine of a thread can be defined as759 \begin{cfa code}850 However, this proposal considers that statically tying a @main@ routine to a thread supersedes this approach. 851 Since the @main@ routine is already a special routine in \CFA (where the program begins), it is a natural extension of the semantics to use overloading to declare mains for different threads (the normal main being the main of the initial thread). 852 As such the @main@ routine of a thread can be defined as 853 \begin{cfa} 760 854 thread foo {}; 761 855 … … 763 857 sout | "Hello World!" | endl; 764 858 } 765 \end{cfa code}766 767 In this example, threads of type \code{foo} start execution in the \code{void main(foo &)} routine, which prints \code{"Hello World!".}While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity.859 \end{cfa} 860 861 In this example, threads of type @foo@ start execution in the @void main(foo &)@ routine, which prints @"Hello World!".@ While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity. 768 862 With the static semantics it is trivial to write a thread type that takes a function pointer as a parameter and executes it on its stack asynchronously. 769 \begin{cfa code}863 \begin{cfa} 770 864 typedef void (*voidFunc)(int); 771 865 … … 781 875 782 876 void main(FuncRunner & this) { 783 // thread starts here and runs the function877 // thread starts here and runs the function 784 878 this.func( this.arg ); 785 879 } … … 793 887 return 0? 794 888 } 795 \end{cfa code}889 \end{cfa} 796 890 797 891 A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{api}. 798 892 799 893 Of course, for threads to be useful, it must be possible to start and stop threads and wait for them to complete execution. 800 While using an \textbf{api} such as \code{fork} and \code{join}is relatively common in the literature, such an interface is unnecessary.801 Indeed, the simplest approach is to use \textbf{raii} principles and have threads \code{fork} after the constructor has completed and \code{join}before the destructor runs.802 \begin{cfa code}894 While using an \textbf{api} such as @fork@ and @join@ is relatively common in the literature, such an interface is unnecessary. 895 Indeed, the simplest approach is to use \textbf{raii} principles and have threads @fork@ after the constructor has completed and @join@ before the destructor runs. 896 \begin{cfa} 803 897 thread World; 804 898 … … 809 903 void main() { 810 904 World w; 811 // Thread forks here812 813 // Printing "Hello " and "World!" are run concurrently905 // Thread forks here 906 907 // Printing "Hello " and "World!" are run concurrently 814 908 sout | "Hello " | endl; 815 909 816 // Implicit join at end of scope817 } 818 \end{cfa code}910 // Implicit join at end of scope 911 } 912 \end{cfa} 819 913 820 914 This semantic has several advantages over explicit semantics: a thread is always started and stopped exactly once, users cannot make any programming errors, and it naturally scales to multiple threads meaning basic synchronization is very simple. 821 915 822 \begin{cfa code}916 \begin{cfa} 823 917 thread MyThread { 824 918 //... 825 919 }; 826 920 827 // main921 // main 828 922 void main(MyThread& this) { 829 923 //... … … 832 926 void foo() { 833 927 MyThread thrds[10]; 834 // Start 10 threads at the beginning of the scope928 // Start 10 threads at the beginning of the scope 835 929 836 930 DoStuff(); 837 931 838 // Wait for the 10 threads to finish839 } 840 \end{cfa code}932 // Wait for the 10 threads to finish 933 } 934 \end{cfa} 841 935 842 936 However, one of the drawbacks of this approach is that threads always form a tree where nodes must always outlive their children, i.e., they are always destroyed in the opposite order of construction because of C scoping rules. 843 937 This restriction is relaxed by using dynamic allocation, so threads can outlive the scope in which they are created, much like dynamically allocating memory lets objects outlive the scope in which they are created. 844 938 845 \begin{cfa code}939 \begin{cfa} 846 940 thread MyThread { 847 941 //... … … 855 949 MyThread* long_lived; 856 950 { 857 // Start a thread at the beginning of the scope951 // Start a thread at the beginning of the scope 858 952 MyThread short_lived; 859 953 860 // create another thread that will outlive the thread in this scope954 // create another thread that will outlive the thread in this scope 861 955 long_lived = new MyThread; 862 956 863 957 DoStuff(); 864 958 865 // Wait for the thread short_lived to finish959 // Wait for the thread short_lived to finish 866 960 } 867 961 DoMoreStuff(); 868 962 869 // Now wait for the long_lived to finish963 // Now wait for the long_lived to finish 870 964 delete long_lived; 871 965 } 872 \end{cfa code}966 \end{cfa} 873 967 874 968 … … 888 982 At the lowest level, concurrent paradigms are implemented as atomic operations and locks. 889 983 Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. 890 However, for productivity reasons it is desirable to have a higher-level construct be the core concurrency paradigm~\cite{H PP:Study}.984 However, for productivity reasons it is desirable to have a higher-level construct be the core concurrency paradigm~\cite{Hochstein05}. 891 985 892 986 An approach that is worth mentioning because it is gaining in popularity is transactional memory~\cite{Herlihy93}. … … 909 1003 Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to higher-level concurrency techniques, which sacrifice some performance in order to improve ease of use. 910 1004 Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. 911 For example, the \CC \code{std::atomic<T>}offers an easy way to express mutual-exclusion on a restricted set of operations (e.g., reading/writing large types atomically).1005 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (e.g., reading/writing large types atomically). 912 1006 Another challenge with low-level locks is composability. 913 1007 Locks have restricted composability because it takes careful organizing for multiple locks to be used while preventing deadlocks. … … 938 1032 This concept is generally associated with object-oriented languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OO semantics. 939 1033 The only requirement is the ability to declare a handle to a shared object and a set of routines that act on it: 940 \begin{cfa code}1034 \begin{cfa} 941 1035 typedef /*some monitor type*/ monitor; 942 1036 int f(monitor & m); 943 1037 944 1038 int main() { 945 monitor m; // Handle m946 f(m); // Routine using handle947 } 948 \end{cfa code}1039 monitor m; // Handle m 1040 f(m); // Routine using handle 1041 } 1042 \end{cfa} 949 1043 950 1044 % ====================================================================== … … 956 1050 First, it is necessary to use pass-by-reference over pass-by-value for monitor routines. 957 1051 This semantics is important, because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. 958 Therefore, monitors are non-copy-able objects ( \code{dtype}).1052 Therefore, monitors are non-copy-able objects (@dtype@). 959 1053 960 1054 Another aspect to consider is when a monitor acquires its mutual exclusion. 961 1055 For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. 962 Passthrough can occur for generic helper routines ( \code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter:963 964 \begin{cfa code}1056 Passthrough can occur for generic helper routines (@swap@, @sort@, etc.) or specific helper routines like the following to implement an atomic counter: 1057 1058 \begin{cfa} 965 1059 monitor counter_t { /*...see section $\ref{data}$...*/ }; 966 1060 967 void ?{}(counter_t & nomutex this); // constructor968 size_t ++?(counter_t & mutex this); // increment969 970 // need for mutex is platform dependent971 void ?{}(size_t * this, counter_t & mutex cnt); // conversion972 \end{cfa code}1061 void ?{}(counter_t & nomutex this); // constructor 1062 size_t ++?(counter_t & mutex this); // increment 1063 1064 // need for mutex is platform dependent 1065 void ?{}(size_t * this, counter_t & mutex cnt); // conversion 1066 \end{cfa} 973 1067 This counter is used as follows: 974 1068 \begin{center} 975 1069 \begin{tabular}{c @{\hskip 0.35in} c @{\hskip 0.35in} c} 976 \begin{cfa code}977 // shared counter1070 \begin{cfa} 1071 // shared counter 978 1072 counter_t cnt1, cnt2; 979 1073 980 // multiple threads access counter1074 // multiple threads access counter 981 1075 thread 1 : cnt1++; cnt2++; 982 1076 thread 2 : cnt1++; cnt2++; … … 984 1078 ... 985 1079 thread N : cnt1++; cnt2++; 986 \end{cfa code}1080 \end{cfa} 987 1081 \end{tabular} 988 1082 \end{center} 989 Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to the \CC template \code{std::atomic}.990 991 Here, the constructor ( \code{?\{\}}) uses the \code{nomutex}keyword to signify that it does not acquire the monitor mutual-exclusion when constructing.992 This semantics is because an object not yet con \-structed should never be shared and therefore does not require mutual exclusion.1083 Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to the \CC template @std::atomic@. 1084 1085 Here, the constructor (@?{}@) uses the @nomutex@ keyword to signify that it does not acquire the monitor mutual-exclusion when constructing. 1086 This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. 993 1087 Furthermore, it allows the implementation greater freedom when it initializes the monitor locking. 994 The prefix increment operator uses \code{mutex}to protect the incrementing process from race conditions.995 Finally, there is a conversion operator from \code{counter_t} to \code{size_t}.996 This conversion may or may not require the \code{mutex} keyword depending on whether or not reading a \code{size_t}is an atomic operation.1088 The prefix increment operator uses @mutex@ to protect the incrementing process from race conditions. 1089 Finally, there is a conversion operator from @counter_t@ to @size_t@. 1090 This conversion may or may not require the @mutex@ keyword depending on whether or not reading a @size_t@ is an atomic operation. 997 1091 998 1092 For maximum usability, monitors use \textbf{multi-acq} semantics, which means a single thread can acquire the same monitor multiple times without deadlock. 999 1093 For example, listing \ref{fig:search} uses recursion and \textbf{multi-acq} to print values inside a binary tree. 1000 1094 \begin{figure} 1001 \begin{cfa code}[caption={Recursive printing algorithm using \textbf{multi-acq}.},label={fig:search}]1095 \begin{cfa}[caption={Recursive printing algorithm using \textbf{multi-acq}.},label={fig:search}] 1002 1096 monitor printer { ... }; 1003 1097 struct tree { … … 1012 1106 print(p, t->right); 1013 1107 } 1014 \end{cfa code}1108 \end{cfa} 1015 1109 \end{figure} 1016 1110 1017 Having both \code{mutex} and \code{nomutex}keywords can be redundant, depending on the meaning of a routine having neither of these keywords.1018 For example, it is reasonable that it should default to the safest option ( \code{mutex}) when given a routine without qualifiers \code{void foo(counter_t & this)}, whereas assuming \code{nomutex}is unsafe and may cause subtle errors.1019 On the other hand, \code{nomutex}is the ``normal'' parameter behaviour, it effectively states explicitly that ``this routine is not special''.1111 Having both @mutex@ and @nomutex@ keywords can be redundant, depending on the meaning of a routine having neither of these keywords. 1112 For example, it is reasonable that it should default to the safest option (@mutex@) when given a routine without qualifiers @void foo(counter_t & this)@, whereas assuming @nomutex@ is unsafe and may cause subtle errors. 1113 On the other hand, @nomutex@ is the ``normal'' parameter behaviour, it effectively states explicitly that ``this routine is not special''. 1020 1114 Another alternative is making exactly one of these keywords mandatory, which provides the same semantics but without the ambiguity of supporting routines with neither keyword. 1021 1115 Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. … … 1023 1117 Mandatory keywords in \CFA would imply that the compiler must know without doubt whether or not a parameter is a monitor or not. 1024 1118 Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. 1025 For this reason, \CFA only has the \code{mutex} keyword and uses no keyword to mean \code{nomutex}.1026 1027 The next semantic decision is to establish when \code{mutex}may be used as a type qualifier.1119 For this reason, \CFA only has the @mutex@ keyword and uses no keyword to mean @nomutex@. 1120 1121 The next semantic decision is to establish when @mutex@ may be used as a type qualifier. 1028 1122 Consider the following declarations: 1029 \begin{cfa code}1123 \begin{cfa} 1030 1124 int f1(monitor & mutex m); 1031 1125 int f2(const monitor & mutex m); … … 1033 1127 int f4(monitor * mutex m []); 1034 1128 int f5(graph(monitor *) & mutex m); 1035 \end{cfa code}1129 \end{cfa} 1036 1130 The problem is to identify which object(s) should be acquired. 1037 1131 Furthermore, each object needs to be acquired only once. 1038 In the case of simple routines like \code{f1} and \code{f2}it is easy to identify an exhaustive list of objects to acquire on entry.1039 Adding indirections ( \code{f3}) still allows the compiler and programmer to identify which object is acquired.1040 However, adding in arrays ( \code{f4}) makes it much harder.1132 In the case of simple routines like @f1@ and @f2@ it is easy to identify an exhaustive list of objects to acquire on entry. 1133 Adding indirections (@f3@) still allows the compiler and programmer to identify which object is acquired. 1134 However, adding in arrays (@f4@) makes it much harder. 1041 1135 Array lengths are not necessarily known in C, and even then, making sure objects are only acquired once becomes none-trivial. 1042 This problem can be extended to absurd limits like \code{f5}, which uses a graph of monitors.1136 This problem can be extended to absurd limits like @f5@, which uses a graph of monitors. 1043 1137 To make the issue tractable, this project imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with at most one level of indirection (ignoring potential qualifiers). 1044 Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m}is acquired, passing an array to this routine would be type-safe and yet result in undefined behaviour because only the first element of the array is acquired.1138 Also note that while routine @f3@ can be supported, meaning that monitor @**m@ is acquired, passing an array to this routine would be type-safe and yet result in undefined behaviour because only the first element of the array is acquired. 1045 1139 However, this ambiguity is part of the C type-system with respects to arrays. 1046 For this reason, \code{mutex}is disallowed in the context where arrays may be passed:1047 \begin{cfa code}1048 int f1(monitor & mutex m); // Okay : recommended case1049 int f2(monitor * mutex m); // Not Okay : Could be an array1050 int f3(monitor mutex m []); // Not Okay : Array of unknown length1051 int f4(monitor ** mutex m); // Not Okay : Could be an array1052 int f5(monitor * mutex m []); // Not Okay : Array of unknown length1053 \end{cfa code}1140 For this reason, @mutex@ is disallowed in the context where arrays may be passed: 1141 \begin{cfa} 1142 int f1(monitor & mutex m); // Okay : recommended case 1143 int f2(monitor * mutex m); // Not Okay : Could be an array 1144 int f3(monitor mutex m []); // Not Okay : Array of unknown length 1145 int f4(monitor ** mutex m); // Not Okay : Could be an array 1146 int f5(monitor * mutex m []); // Not Okay : Array of unknown length 1147 \end{cfa} 1054 1148 Note that not all array functions are actually distinct in the type system. 1055 1149 However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic. … … 1057 1151 Unlike object-oriented monitors, where calling a mutex member \emph{implicitly} acquires mutual-exclusion of the receiver object, \CFA uses an explicit mechanism to specify the object that acquires mutual-exclusion. 1058 1152 A consequence of this approach is that it extends naturally to multi-monitor calls. 1059 \begin{cfa code}1153 \begin{cfa} 1060 1154 int f(MonitorA & mutex a, MonitorB & mutex b); 1061 1155 … … 1063 1157 MonitorB b; 1064 1158 f(a,b); 1065 \end{cfa code}1159 \end{cfa} 1066 1160 While OO monitors could be extended with a mutex qualifier for multiple-monitor calls, no example of this feature could be found. 1067 1161 The capability to acquire multiple locks before entering a critical section is called \emph{\textbf{bulk-acq}}. … … 1071 1165 This consistent ordering means acquiring multiple monitors is safe from deadlock when using \textbf{bulk-acq}. 1072 1166 However, users can still force the acquiring order. 1073 For example, notice which routines use \code{mutex}/\code{nomutex}and how this affects acquiring order:1074 \begin{cfa code}1075 void foo(A& mutex a, B& mutex b) { // acquire a & b1167 For example, notice which routines use @mutex@/@nomutex@ and how this affects acquiring order: 1168 \begin{cfa} 1169 void foo(A& mutex a, B& mutex b) { // acquire a & b 1076 1170 ... 1077 1171 } 1078 1172 1079 void bar(A& mutex a, B& /*nomutex*/ b) { // acquire a1080 ... foo(a, b); ... // acquire b1081 } 1082 1083 void baz(A& /*nomutex*/ a, B& mutex b) { // acquire b1084 ... foo(a, b); ... // acquire a1085 } 1086 \end{cfa code}1087 The \textbf{multi-acq} monitor lock allows a monitor lock to be acquired by both \code{bar} or \code{baz} and acquired again in \code{foo}.1088 In the calls to \code{bar} and \code{baz}the monitors are acquired in opposite order.1173 void bar(A& mutex a, B& /*nomutex*/ b) { // acquire a 1174 ... foo(a, b); ... // acquire b 1175 } 1176 1177 void baz(A& /*nomutex*/ a, B& mutex b) { // acquire b 1178 ... foo(a, b); ... // acquire a 1179 } 1180 \end{cfa} 1181 The \textbf{multi-acq} monitor lock allows a monitor lock to be acquired by both @bar@ or @baz@ and acquired again in @foo@. 1182 In the calls to @bar@ and @baz@ the monitors are acquired in opposite order. 1089 1183 1090 1184 However, such use leads to lock acquiring order problems. 1091 In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}.1185 In the example above, the user uses implicit ordering in the case of function @foo@ but explicit ordering in the case of @bar@ and @baz@. 1092 1186 This subtle difference means that calling these routines concurrently may lead to deadlock and is therefore undefined behaviour. 1093 1187 As shown~\cite{Lister77}, solving this problem requires: … … 1101 1195 1102 1196 For example, \textbf{multi-acq} and \textbf{bulk-acq} can be used together in interesting ways: 1103 \begin{cfa code}1197 \begin{cfa} 1104 1198 monitor bank { ... }; 1105 1199 … … 1110 1204 deposit( yourbank, me2you ); 1111 1205 } 1112 \end{cfa code}1206 \end{cfa} 1113 1207 This example shows a trivial solution to the bank-account transfer problem~\cite{BankTransfer}. 1114 1208 Without \textbf{multi-acq} and \textbf{bulk-acq}, the solution to this problem is much more involved and requires careful engineering. 1115 1209 1116 \subsection{\code{mutex} statement} \label{mutex-stmt} 1117 1118 The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the \code{mutex} statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}. 1119 Table \ref{lst:mutex-stmt} shows an example of the \code{mutex} statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. 1120 Beyond naming, the \code{mutex} statement has no semantic difference from a routine call with \code{mutex} parameters. 1210 1211 \subsection{\protect\lstinline|mutex| statement} \label{mutex-stmt} 1212 1213 The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the @mutex@ statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}. 1214 Table \ref{lst:mutex-stmt} shows an example of the @mutex@ statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. 1215 Beyond naming, the @mutex@ statement has no semantic difference from a routine call with @mutex@ parameters. 1121 1216 1122 1217 \begin{table} 1123 1218 \begin{center} 1124 1219 \begin{tabular}{|c|c|} 1125 function call & \code{mutex}statement \\1220 function call & @mutex@ statement \\ 1126 1221 \hline 1127 \begin{cfa code}[tabsize=3]1222 \begin{cfa}[tabsize=3] 1128 1223 monitor M {}; 1129 1224 void foo( M & mutex m1, M & mutex m2 ) { 1130 // critical section1225 // critical section 1131 1226 } 1132 1227 … … 1134 1229 foo( m1, m2 ); 1135 1230 } 1136 \end{cfa code}&\begin{cfacode}[tabsize=3]1231 \end{cfa}&\begin{cfa}[tabsize=3] 1137 1232 monitor M {}; 1138 1233 void bar( M & m1, M & m2 ) { 1139 1234 mutex(m1, m2) { 1140 // critical section1141 } 1142 } 1143 1144 1145 \end{cfa code}1235 // critical section 1236 } 1237 } 1238 1239 1240 \end{cfa} 1146 1241 \end{tabular} 1147 1242 \end{center} 1148 \caption{Regular call semantics vs. \ code{mutex}statement}1243 \caption{Regular call semantics vs. \protect\lstinline|mutex| statement} 1149 1244 \label{lst:mutex-stmt} 1150 1245 \end{table} … … 1159 1254 This data should be intrinsic to the monitor declaration to prevent any accidental use of data without its appropriate protection. 1160 1255 For example, here is a complete version of the counter shown in section \ref{call}: 1161 \begin{cfa code}1256 \begin{cfa} 1162 1257 monitor counter_t { 1163 1258 int value; … … 1172 1267 } 1173 1268 1174 // need for mutex is platform dependent here1269 // need for mutex is platform dependent here 1175 1270 void ?{}(int * this, counter_t & mutex cnt) { 1176 1271 *this = (int)cnt; 1177 1272 } 1178 \end{cfa code}1179 1180 Like threads and coroutines, monitors are defined in terms of traits with some additional language support in the form of the \code{monitor}keyword.1273 \end{cfa} 1274 1275 Like threads and coroutines, monitors are defined in terms of traits with some additional language support in the form of the @monitor@ keyword. 1181 1276 The monitor trait is: 1182 \begin{cfa code}1277 \begin{cfa} 1183 1278 trait is_monitor(dtype T) { 1184 1279 monitor_desc * get_monitor( T & ); 1185 1280 void ^?{}( T & mutex ); 1186 1281 }; 1187 \end{cfa code}1188 Note that the destructor of a monitor must be a \code{mutex}routine to prevent deallocation while a thread is accessing the monitor.1189 As with any object, calls to a monitor, using \code{mutex}or otherwise, is undefined behaviour after the destructor has run.1282 \end{cfa} 1283 Note that the destructor of a monitor must be a @mutex@ routine to prevent deallocation while a thread is accessing the monitor. 1284 As with any object, calls to a monitor, using @mutex@ or otherwise, is undefined behaviour after the destructor has run. 1190 1285 1191 1286 % ====================================================================== … … 1202 1297 First, here is a simple example of internal scheduling: 1203 1298 1204 \begin{cfa code}1299 \begin{cfa} 1205 1300 monitor A { 1206 1301 condition e; … … 1209 1304 void foo(A& mutex a1, A& mutex a2) { 1210 1305 ... 1211 // Wait for cooperation from bar()1306 // Wait for cooperation from bar() 1212 1307 wait(a1.e); 1213 1308 ... … … 1215 1310 1216 1311 void bar(A& mutex a1, A& mutex a2) { 1217 // Provide cooperation for foo()1312 // Provide cooperation for foo() 1218 1313 ... 1219 // Unblock foo1314 // Unblock foo 1220 1315 signal(a1.e); 1221 1316 } 1222 \end{cfa code}1317 \end{cfa} 1223 1318 There are two details to note here. 1224 First, \code{signal}is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section.1319 First, @signal@ is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section. 1225 1320 This semantics is needed to respect mutual-exclusion, i.e., the signaller and signalled thread cannot be in the monitor simultaneously. 1226 The alternative is to return immediately after the call to \code{signal}, which is significantly more restrictive.1227 Second, in \CFA, while it is common to store a \code{condition} as a field of the monitor, a \code{condition}variable can be stored/created independently of a monitor.1228 Here routine \code{foo} waits for the \code{signal} from \code{bar}before making further progress, ensuring a basic ordering.1229 1230 An important aspect of the implementation is that \CFA does not allow barging, which means that once function \code{bar} releases the monitor, \code{foo}is guaranteed to be the next thread to acquire the monitor (unless some other thread waited on the same condition).1321 The alternative is to return immediately after the call to @signal@, which is significantly more restrictive. 1322 Second, in \CFA, while it is common to store a @condition@ as a field of the monitor, a @condition@ variable can be stored/created independently of a monitor. 1323 Here routine @foo@ waits for the @signal@ from @bar@ before making further progress, ensuring a basic ordering. 1324 1325 An important aspect of the implementation is that \CFA does not allow barging, which means that once function @bar@ releases the monitor, @foo@ is guaranteed to be the next thread to acquire the monitor (unless some other thread waited on the same condition). 1231 1326 This guarantee offers the benefit of not having to loop around waits to recheck that a condition is met. 1232 1327 The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding barging prevention or barging avoidance is more involved without language support. … … 1240 1335 It is easy to understand the problem of multi-monitor scheduling using a series of pseudo-code examples. 1241 1336 Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors. 1242 Indeed, \code{wait}statements always use the implicit condition variable as parameters and explicitly name the monitors (A and B) associated with the condition.1337 Indeed, @wait@ statements always use the implicit condition variable as parameters and explicitly name the monitors (A and B) associated with the condition. 1243 1338 Note that in \CFA, condition variables are tied to a \emph{group} of monitors on first use (called branding), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors. 1244 1339 The example below shows the simple case of having two threads (one for each column) and a single monitor A. … … 1246 1341 \begin{multicols}{2} 1247 1342 thread 1 1248 \begin{ pseudo}1343 \begin{cfa} 1249 1344 acquire A 1250 1345 wait A 1251 1346 release A 1252 \end{ pseudo}1347 \end{cfa} 1253 1348 1254 1349 \columnbreak 1255 1350 1256 1351 thread 2 1257 \begin{ pseudo}1352 \begin{cfa} 1258 1353 acquire A 1259 1354 signal A 1260 1355 release A 1261 \end{ pseudo}1356 \end{cfa} 1262 1357 \end{multicols} 1263 1358 One thread acquires before waiting (atomically blocking and releasing A) and the other acquires before signalling. 1264 It is important to note here that both \code{wait} and \code{signal}must be called with the proper monitor(s) already acquired.1359 It is important to note here that both @wait@ and @signal@ must be called with the proper monitor(s) already acquired. 1265 1360 This semantic is a logical requirement for barging prevention. 1266 1361 1267 1362 A direct extension of the previous example is a \textbf{bulk-acq} version: 1268 1363 \begin{multicols}{2} 1269 \begin{ pseudo}1364 \begin{cfa} 1270 1365 acquire A & B 1271 1366 wait A & B 1272 1367 release A & B 1273 \end{ pseudo}1368 \end{cfa} 1274 1369 \columnbreak 1275 \begin{ pseudo}1370 \begin{cfa} 1276 1371 acquire A & B 1277 1372 signal A & B 1278 1373 release A & B 1279 \end{ pseudo}1374 \end{cfa} 1280 1375 \end{multicols} 1281 1376 \noindent This version uses \textbf{bulk-acq} (denoted using the {\sf\&} symbol), but the presence of multiple monitors does not add a particularly new meaning. … … 1285 1380 1286 1381 While deadlock issues can occur when nesting monitors, these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable. 1287 For monitors, a well-known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a \code{wait}is made by a thread that holds more than one monitor.1288 For example, the following pseudo-code runs into the nested-monitor problem:1382 For monitors, a well-known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a @wait@ is made by a thread that holds more than one monitor. 1383 For example, the following cfa-code runs into the nested-monitor problem: 1289 1384 \begin{multicols}{2} 1290 \begin{ pseudo}1385 \begin{cfa} 1291 1386 acquire A 1292 1387 acquire B … … 1294 1389 release B 1295 1390 release A 1296 \end{ pseudo}1391 \end{cfa} 1297 1392 1298 1393 \columnbreak 1299 1394 1300 \begin{ pseudo}1395 \begin{cfa} 1301 1396 acquire A 1302 1397 acquire B … … 1304 1399 release B 1305 1400 release A 1306 \end{ pseudo}1401 \end{cfa} 1307 1402 \end{multicols} 1308 \noindent The \code{wait} only releases monitor \code{B} so the signalling thread cannot acquire monitor \code{A} to get to the \code{signal}.1309 Attempting release of all acquired monitors at the \code{wait} introduces a different set of problems, such as releasing monitor \code{C}, which has nothing to do with the \code{signal}.1403 \noindent The @wait@ only releases monitor @B@ so the signalling thread cannot acquire monitor @A@ to get to the @signal@. 1404 Attempting release of all acquired monitors at the @wait@ introduces a different set of problems, such as releasing monitor @C@, which has nothing to do with the @signal@. 1310 1405 1311 1406 However, for monitors as for locks, it is possible to write a program using nesting without encountering any problems if nesting is done correctly. 1312 For example, the next pseudo-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}.1407 For example, the next cfa-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}. 1313 1408 1314 1409 \begin{multicols}{2} 1315 \begin{ pseudo}1410 \begin{cfa} 1316 1411 acquire A 1317 1412 acquire B … … 1319 1414 release B 1320 1415 release A 1321 \end{ pseudo}1416 \end{cfa} 1322 1417 1323 1418 \columnbreak 1324 1419 1325 \begin{ pseudo}1420 \begin{cfa} 1326 1421 1327 1422 acquire B … … 1329 1424 release B 1330 1425 1331 \end{ pseudo}1426 \end{cfa} 1332 1427 \end{multicols} 1333 1428 … … 1341 1436 1342 1437 A larger example is presented to show complex issues for \textbf{bulk-acq} and its implementation options are analyzed. 1343 Listing \ref{lst:int-bulk- pseudo} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the pseudo-code in listing \ref{lst:int-bulk-pseudo}.1344 For the purpose of translating the given pseudo-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., \code{mutex} parameters, global variables, pointer parameters, or using locals with the \code{mutex}statement.1438 Listing \ref{lst:int-bulk-cfa} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the cfa-code in listing \ref{lst:int-bulk-cfa}. 1439 For the purpose of translating the given cfa-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., @mutex@ parameters, global variables, pointer parameters, or using locals with the @mutex@ statement. 1345 1440 1346 1441 \begin{figure}[!t] 1347 1442 \begin{multicols}{2} 1348 1443 Waiting thread 1349 \begin{ pseudo}[numbers=left]1444 \begin{cfa}[numbers=left] 1350 1445 acquire A 1351 // Code Section 11446 // Code Section 1 1352 1447 acquire A & B 1353 // Code Section 21448 // Code Section 2 1354 1449 wait A & B 1355 // Code Section 31450 // Code Section 3 1356 1451 release A & B 1357 // Code Section 41452 // Code Section 4 1358 1453 release A 1359 \end{ pseudo}1454 \end{cfa} 1360 1455 \columnbreak 1361 1456 Signalling thread 1362 \begin{ pseudo}[numbers=left, firstnumber=10,escapechar=|]1457 \begin{cfa}[numbers=left, firstnumber=10,escapechar=|] 1363 1458 acquire A 1364 // Code Section 51459 // Code Section 5 1365 1460 acquire A & B 1366 // Code Section 61461 // Code Section 6 1367 1462 |\label{line:signal1}|signal A & B 1368 // Code Section 71463 // Code Section 7 1369 1464 |\label{line:releaseFirst}|release A & B 1370 // Code Section 81465 // Code Section 8 1371 1466 |\label{line:lastRelease}|release A 1372 \end{ pseudo}1467 \end{cfa} 1373 1468 \end{multicols} 1374 \begin{cfa code}[caption={Internal scheduling with \textbf{bulk-acq}},label={lst:int-bulk-pseudo}]1375 \end{cfa code}1469 \begin{cfa}[caption={Internal scheduling with \textbf{bulk-acq}},label={lst:int-bulk-cfa}] 1470 \end{cfa} 1376 1471 \begin{center} 1377 \begin{cfa code}[xleftmargin=.4\textwidth]1472 \begin{cfa}[xleftmargin=.4\textwidth] 1378 1473 monitor A a; 1379 1474 monitor B b; 1380 1475 condition c; 1381 \end{cfa code}1476 \end{cfa} 1382 1477 \end{center} 1383 1478 \begin{multicols}{2} 1384 1479 Waiting thread 1385 \begin{cfa code}1480 \begin{cfa} 1386 1481 mutex(a) { 1387 // Code Section 11482 // Code Section 1 1388 1483 mutex(a, b) { 1389 // Code Section 21484 // Code Section 2 1390 1485 wait(c); 1391 // Code Section 31392 } 1393 // Code Section 41394 } 1395 \end{cfa code}1486 // Code Section 3 1487 } 1488 // Code Section 4 1489 } 1490 \end{cfa} 1396 1491 \columnbreak 1397 1492 Signalling thread 1398 \begin{cfa code}1493 \begin{cfa} 1399 1494 mutex(a) { 1400 // Code Section 51495 // Code Section 5 1401 1496 mutex(a, b) { 1402 // Code Section 61497 // Code Section 6 1403 1498 signal(c); 1404 // Code Section 71405 } 1406 // Code Section 81407 } 1408 \end{cfa code}1499 // Code Section 7 1500 } 1501 // Code Section 8 1502 } 1503 \end{cfa} 1409 1504 \end{multicols} 1410 \begin{cfa code}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-pseudo}},label={lst:int-bulk-cfa}]1411 \end{cfa code}1505 \begin{cfa}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-cfa}},label={lst:int-bulk-cfa}] 1506 \end{cfa} 1412 1507 \begin{multicols}{2} 1413 1508 Waiter 1414 \begin{ pseudo}[numbers=left]1509 \begin{cfa}[numbers=left] 1415 1510 acquire A 1416 1511 acquire A & B … … 1418 1513 release A & B 1419 1514 release A 1420 \end{ pseudo}1515 \end{cfa} 1421 1516 1422 1517 \columnbreak 1423 1518 1424 1519 Signaller 1425 \begin{ pseudo}[numbers=left, firstnumber=6,escapechar=|]1520 \begin{cfa}[numbers=left, firstnumber=6,escapechar=|] 1426 1521 acquire A 1427 1522 acquire A & B 1428 1523 signal A & B 1429 1524 release A & B 1430 |\label{line:secret}|// Secretly keep B here1525 |\label{line:secret}|// Secretly keep B here 1431 1526 release A 1432 // Wakeup waiter and transfer A & B1433 \end{ pseudo}1527 // Wakeup waiter and transfer A & B 1528 \end{cfa} 1434 1529 \end{multicols} 1435 \begin{cfa code}[caption={Listing \ref{lst:int-bulk-pseudo}, with delayed signalling comments},label={lst:int-secret}]1436 \end{cfa code}1530 \begin{cfa}[caption={Listing \ref{lst:int-bulk-cfa}, with delayed signalling comments},label={lst:int-secret}] 1531 \end{cfa} 1437 1532 \end{figure} 1438 1533 1439 The complexity begins at code sections 4 and 8 in listing \ref{lst:int-bulk- pseudo}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors.1440 The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous pseudo-code.1441 When the signaller thread reaches the location where it should ``release \code{A & B}'' (listing \ref{lst:int-bulk-pseudo} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor \code{B}to the waiting thread.1442 This ownership transfer is required in order to prevent barging into \code{B} by another thread, since both the signalling and signalled threads still need monitor \code{A}.1534 The complexity begins at code sections 4 and 8 in listing \ref{lst:int-bulk-cfa}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors. 1535 The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous cfa-code. 1536 When the signaller thread reaches the location where it should ``release @A & B@'' (listing \ref{lst:int-bulk-cfa} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor @B@ to the waiting thread. 1537 This ownership transfer is required in order to prevent barging into @B@ by another thread, since both the signalling and signalled threads still need monitor @A@. 1443 1538 There are three options: 1444 1539 … … 1452 1547 1453 1548 However, listing \ref{lst:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors. 1454 Listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor \code{A}, using a different condition variable.1455 Because the third thread is signalled when secretly holding \code{B}, the goal becomes unreachable.1549 Listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable. 1550 Because the third thread is signalled when secretly holding @B@, the goal becomes unreachable. 1456 1551 Depending on the order of signals (listing \ref{lst:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen: 1457 1552 1458 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor \code{A}needs to be passed to thread $\beta$ when thread $\alpha$ is done with it.1459 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor \code{B} needs to be retained and passed to thread $\alpha$ along with monitor \code{A}, which can be done directly or possibly using thread $\beta$ as an intermediate.1553 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor @A@ needs to be passed to thread $\beta$ when thread $\alpha$ is done with it. 1554 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor @B@ needs to be retained and passed to thread $\alpha$ along with monitor @A@, which can be done directly or possibly using thread $\beta$ as an intermediate. 1460 1555 \\ 1461 1556 … … 1471 1566 \begin{multicols}{3} 1472 1567 Thread $\alpha$ 1473 \begin{ pseudo}[numbers=left, firstnumber=1]1568 \begin{cfa}[numbers=left, firstnumber=1] 1474 1569 acquire A 1475 1570 acquire A & B … … 1477 1572 release A & B 1478 1573 release A 1479 \end{ pseudo}1574 \end{cfa} 1480 1575 \columnbreak 1481 1576 Thread $\gamma$ 1482 \begin{ pseudo}[numbers=left, firstnumber=6, escapechar=|]1577 \begin{cfa}[numbers=left, firstnumber=6, escapechar=|] 1483 1578 acquire A 1484 1579 acquire A & B … … 1487 1582 |\label{line:signal-a}|signal A 1488 1583 |\label{line:release-a}|release A 1489 \end{ pseudo}1584 \end{cfa} 1490 1585 \columnbreak 1491 1586 Thread $\beta$ 1492 \begin{ pseudo}[numbers=left, firstnumber=12, escapechar=|]1587 \begin{cfa}[numbers=left, firstnumber=12, escapechar=|] 1493 1588 acquire A 1494 1589 wait A 1495 1590 |\label{line:release-aa}|release A 1496 \end{ pseudo}1591 \end{cfa} 1497 1592 \end{multicols} 1498 \begin{cfa code}[caption={Pseudo-code for the three thread example.},label={lst:dependency}]1499 \end{cfa code}1593 \begin{cfa}[caption={Pseudo-code for the three thread example.},label={lst:dependency}] 1594 \end{cfa} 1500 1595 \begin{center} 1501 1596 \input{dependency} … … 1505 1600 \end{figure} 1506 1601 1507 In listing \ref{lst:int-bulk- pseudo}, there is a solution that satisfies both barging prevention and mutual exclusion.1508 If ownership of both monitors is transferred to the waiter when the signaller releases \code{A & B} and then the waiter transfers back ownership of \code{A} back to the signaller when it releases it, then the problem is solved (\code{B}is no longer in use at this point).1602 In listing \ref{lst:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion. 1603 If ownership of both monitors is transferred to the waiter when the signaller releases @A & B@ and then the waiter transfers back ownership of @A@ back to the signaller when it releases it, then the problem is solved (@B@ is no longer in use at this point). 1509 1604 Dynamically finding the correct order is therefore the second possible solution. 1510 1605 The problem is effectively resolving a dependency graph of ownership requirements. … … 1514 1609 \begin{figure} 1515 1610 \begin{multicols}{2} 1516 \begin{ pseudo}1611 \begin{cfa} 1517 1612 acquire A 1518 1613 acquire B … … 1522 1617 release B 1523 1618 release A 1524 \end{ pseudo}1619 \end{cfa} 1525 1620 1526 1621 \columnbreak 1527 1622 1528 \begin{ pseudo}1623 \begin{cfa} 1529 1624 acquire A 1530 1625 acquire B … … 1534 1629 release B 1535 1630 release A 1536 \end{ pseudo}1631 \end{cfa} 1537 1632 \end{multicols} 1538 \begin{cfa code}[caption={Extension to three monitors of listing \ref{lst:int-bulk-pseudo}},label={lst:explosion}]1539 \end{cfa code}1633 \begin{cfa}[caption={Extension to three monitors of listing \ref{lst:int-bulk-cfa}},label={lst:explosion}] 1634 \end{cfa} 1540 1635 \end{figure} 1541 1636 … … 1546 1641 \subsubsection{Partial Signalling} \label{partial-sig} 1547 1642 Finally, the solution that is chosen for \CFA is to use partial signalling. 1548 Again using listing \ref{lst:int-bulk- pseudo}, the partial signalling solution transfers ownership of monitor \code{B} at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor \code{A}.1643 Again using listing \ref{lst:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@. 1549 1644 Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread. 1550 1645 This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met. … … 1554 1649 Using partial signalling, listing \ref{lst:dependency} can be solved easily: 1555 1650 \begin{itemize} 1556 \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor \code{B} to thread $\alpha$ and continues to hold monitor \code{A}.1557 \item When thread $\gamma$ reaches line \ref{line:release-a} it transfers monitor \code{A}to thread $\beta$ and wakes it up.1558 \item When thread $\beta$ reaches line \ref{line:release-aa} it transfers monitor \code{A}to thread $\alpha$ and wakes it up.1651 \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@. 1652 \item When thread $\gamma$ reaches line \ref{line:release-a} it transfers monitor @A@ to thread $\beta$ and wakes it up. 1653 \item When thread $\beta$ reaches line \ref{line:release-aa} it transfers monitor @A@ to thread $\alpha$ and wakes it up. 1559 1654 \end{itemize} 1560 1655 … … 1566 1661 \begin{table} 1567 1662 \begin{tabular}{|c|c|} 1568 \code{signal} & \code{signal_block}\\1663 @signal@ & @signal_block@ \\ 1569 1664 \hline 1570 \begin{cfa code}[tabsize=3]1665 \begin{cfa}[tabsize=3] 1571 1666 monitor DatingService 1572 1667 { 1573 // compatibility codes1668 // compatibility codes 1574 1669 enum{ CCodes = 20 }; 1575 1670 … … 1582 1677 condition exchange; 1583 1678 1584 int girl(int phoneNo, int c code)1679 int girl(int phoneNo, int cfa) 1585 1680 { 1586 // no compatible boy ?1587 if(empty(boys[c code]))1681 // no compatible boy ? 1682 if(empty(boys[cfa])) 1588 1683 { 1589 // wait for boy1590 wait(girls[c code]);1591 1592 // make phone number available1684 // wait for boy 1685 wait(girls[cfa]); 1686 1687 // make phone number available 1593 1688 girlPhoneNo = phoneNo; 1594 1689 1595 // wake boy from chair1690 // wake boy from chair 1596 1691 signal(exchange); 1597 1692 } 1598 1693 else 1599 1694 { 1600 // make phone number available1695 // make phone number available 1601 1696 girlPhoneNo = phoneNo; 1602 1697 1603 // wake boy1604 signal(boys[c code]);1605 1606 // sit in chair1698 // wake boy 1699 signal(boys[cfa]); 1700 1701 // sit in chair 1607 1702 wait(exchange); 1608 1703 } … … 1610 1705 } 1611 1706 1612 int boy(int phoneNo, int c code)1707 int boy(int phoneNo, int cfa) 1613 1708 { 1614 // same as above1615 // with boy/girl interchanged1616 } 1617 \end{cfa code}&\begin{cfacode}[tabsize=3]1709 // same as above 1710 // with boy/girl interchanged 1711 } 1712 \end{cfa}&\begin{cfa}[tabsize=3] 1618 1713 monitor DatingService 1619 1714 { 1620 // compatibility codes1715 // compatibility codes 1621 1716 enum{ CCodes = 20 }; 1622 1717 … … 1627 1722 condition girls[CCodes]; 1628 1723 condition boys [CCodes]; 1629 // exchange is not needed1630 1631 int girl(int phoneNo, int c code)1724 // exchange is not needed 1725 1726 int girl(int phoneNo, int cfa) 1632 1727 { 1633 // no compatible boy ?1634 if(empty(boys[c code]))1728 // no compatible boy ? 1729 if(empty(boys[cfa])) 1635 1730 { 1636 // wait for boy1637 wait(girls[c code]);1638 1639 // make phone number available1731 // wait for boy 1732 wait(girls[cfa]); 1733 1734 // make phone number available 1640 1735 girlPhoneNo = phoneNo; 1641 1736 1642 // wake boy from chair1737 // wake boy from chair 1643 1738 signal(exchange); 1644 1739 } 1645 1740 else 1646 1741 { 1647 // make phone number available1742 // make phone number available 1648 1743 girlPhoneNo = phoneNo; 1649 1744 1650 // wake boy1651 signal_block(boys[c code]);1652 1653 // second handshake unnecessary1745 // wake boy 1746 signal_block(boys[cfa]); 1747 1748 // second handshake unnecessary 1654 1749 1655 1750 } … … 1657 1752 } 1658 1753 1659 int boy(int phoneNo, int c code)1754 int boy(int phoneNo, int cfa) 1660 1755 { 1661 // same as above1662 // with boy/girl interchanged1663 } 1664 \end{cfa code}1756 // same as above 1757 // with boy/girl interchanged 1758 } 1759 \end{cfa} 1665 1760 \end{tabular} 1666 \caption{Dating service example using \ code{signal} and \code{signal_block}. }1761 \caption{Dating service example using \protect\lstinline|signal| and \protect\lstinline|signal_block|. } 1667 1762 \label{tbl:datingservice} 1668 1763 \end{table} 1669 1764 An important note is that, until now, signalling a monitor was a delayed operation. 1670 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the \code{signal}statement.1671 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the \code{signal_block}routine.1765 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the @signal@ statement. 1766 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the @signal_block@ routine. 1672 1767 1673 1768 The example in table \ref{tbl:datingservice} highlights the difference in behaviour. 1674 As mentioned, \code{signal}only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed.1675 To avoid this explicit synchronization, the \code{condition} type offers the \code{signal_block}routine, which handles the two-way handshake as shown in the example.1769 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed. 1770 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example. 1676 1771 This feature removes the need for a second condition variables and simplifies programming. 1677 Like every other monitor semantic, \code{signal_block} uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to \code{signal_block}, meaning no other thread can acquire the monitor either before or after the call.1772 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call. 1678 1773 1679 1774 % ====================================================================== … … 1687 1782 Internal Scheduling & External Scheduling & Go\\ 1688 1783 \hline 1689 \begin{u cppcode}[tabsize=3]1784 \begin{uC++}[tabsize=3] 1690 1785 _Monitor Semaphore { 1691 1786 condition c; … … 1702 1797 } 1703 1798 } 1704 \end{u cppcode}&\begin{ucppcode}[tabsize=3]1799 \end{uC++}&\begin{uC++}[tabsize=3] 1705 1800 _Monitor Semaphore { 1706 1801 … … 1717 1812 } 1718 1813 } 1719 \end{u cppcode}&\begin{gocode}[tabsize=3]1814 \end{uC++}&\begin{Go}[tabsize=3] 1720 1815 type MySem struct { 1721 1816 inUse bool … … 1737 1832 s.inUse = false 1738 1833 1739 // This actually deadlocks1740 // when single thread1834 // This actually deadlocks 1835 // when single thread 1741 1836 s.c <- false 1742 1837 } 1743 \end{ gocode}1838 \end{Go} 1744 1839 \end{tabular} 1745 1840 \caption{Different forms of scheduling.} … … 1748 1843 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency. 1749 1844 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring. 1750 External scheduling can generally be done either in terms of control flow (e.g., Ada with \code{accept}, \uC with \code{_Accept}) or in terms of data (e.g., Go with channels).1845 External scheduling can generally be done either in terms of control flow (e.g., Ada with @accept@, \uC with @_Accept@) or in terms of data (e.g., Go with channels). 1751 1846 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics. 1752 1847 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines. 1753 The previous example shows a simple use \code{_Accept} versus \code{wait}/\code{signal}and its advantages.1754 Note that while other languages often use \code{accept}/\code{select} as the core external scheduling keyword, \CFA uses \code{waitfor}to prevent name collisions with existing socket \textbf{api}s.1755 1756 For the \code{P} member above using internal scheduling, the call to \code{wait} only guarantees that \code{V} is the last routine to access the monitor, allowing a third routine, say \code{isInUse()}, acquire mutual exclusion several times while routine \code{P}is waiting.1757 On the other hand, external scheduling guarantees that while routine \code{P} is waiting, no other routine than \code{V}can acquire the monitor.1848 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages. 1849 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s. 1850 1851 For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting. 1852 On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor. 1758 1853 1759 1854 % ====================================================================== … … 1765 1860 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: 1766 1861 1767 \begin{cfa code}1862 \begin{cfa} 1768 1863 monitor A {}; 1769 1864 1770 1865 void f(A & mutex a); 1771 1866 void g(A & mutex a) { 1772 waitfor(f); // Obvious which f() to wait for1773 } 1774 1775 void f(A & mutex a, int); // New different F added in scope1867 waitfor(f); // Obvious which f() to wait for 1868 } 1869 1870 void f(A & mutex a, int); // New different F added in scope 1776 1871 void h(A & mutex a) { 1777 waitfor(f); // Less obvious which f() to wait for1778 } 1779 \end{cfa code}1872 waitfor(f); // Less obvious which f() to wait for 1873 } 1874 \end{cfa} 1780 1875 1781 1876 Furthermore, external scheduling is an example where implementation constraints become visible from the interface. 1782 Here is the pseudo-code for the entering phase of a monitor:1877 Here is the cfa-code for the entering phase of a monitor: 1783 1878 \begin{center} 1784 1879 \begin{tabular}{l} 1785 \begin{ pseudo}1880 \begin{cfa} 1786 1881 if monitor is free 1787 1882 enter … … 1792 1887 else 1793 1888 block 1794 \end{ pseudo}1889 \end{cfa} 1795 1890 \end{tabular} 1796 1891 \end{center} 1797 1892 For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions. 1798 However, a fast check for \pscode{monitor accepts me}is much harder to implement depending on the constraints put on the monitors.1893 However, a fast check for @monitor accepts me@ is much harder to implement depending on the constraints put on the monitors. 1799 1894 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}. 1800 1895 … … 1822 1917 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}. 1823 1918 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate. 1824 Generating a mask dynamically means that the storage for the mask information can vary between calls to \code{waitfor}, allowing for more flexibility and extensions.1919 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions. 1825 1920 Storing an array of accepted function pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search. 1826 Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the \code{waitfor}statement to check if a routine is already queued.1921 Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued. 1827 1922 1828 1923 \begin{figure} 1829 \begin{cfa code}[caption={Example of nested external scheduling},label={lst:nest-ext}]1924 \begin{cfa}[caption={Example of nested external scheduling},label={lst:nest-ext}] 1830 1925 monitor M {}; 1831 1926 void foo( M & mutex a ) {} 1832 1927 void bar( M & mutex b ) { 1833 // Nested in the waitfor(bar, c) call1928 // Nested in the waitfor(bar, c) call 1834 1929 waitfor(foo, b); 1835 1930 } … … 1838 1933 } 1839 1934 1840 \end{cfa code}1935 \end{cfa} 1841 1936 \end{figure} 1842 1937 … … 1858 1953 External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax. 1859 1954 Even in the simplest possible case, some new semantics needs to be established: 1860 \begin{cfa code}1955 \begin{cfa} 1861 1956 monitor M {}; 1862 1957 … … 1864 1959 1865 1960 void g(M & mutex b, M & mutex c) { 1866 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex)1867 } 1868 \end{cfa code}1961 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex) 1962 } 1963 \end{cfa} 1869 1964 The obvious solution is to specify the correct monitor as follows: 1870 1965 1871 \begin{cfa code}1966 \begin{cfa} 1872 1967 monitor M {}; 1873 1968 … … 1875 1970 1876 1971 void g(M & mutex a, M & mutex b) { 1877 // wait for call to f with argument b1972 // wait for call to f with argument b 1878 1973 waitfor(f, b); 1879 1974 } 1880 \end{cfa code}1975 \end{cfa} 1881 1976 This syntax is unambiguous. 1882 Both locks are acquired and kept by \code{g}.1883 When routine \code{f} is called, the lock for monitor \code{b} is temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{a}).1884 This behaviour can be extended to the multi-monitor \code{waitfor}statement as follows.1885 1886 \begin{cfa code}1977 Both locks are acquired and kept by @g@. 1978 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@). 1979 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows. 1980 1981 \begin{cfa} 1887 1982 monitor M {}; 1888 1983 … … 1890 1985 1891 1986 void g(M & mutex a, M & mutex b) { 1892 // wait for call to f with arguments a and b1987 // wait for call to f with arguments a and b 1893 1988 waitfor(f, a, b); 1894 1989 } 1895 \end{cfa code}1896 1897 Note that the set of monitors passed to the \code{waitfor} statement must be entirely contained in the set of monitors already acquired in the routine. \code{waitfor}used in any other context is undefined behaviour.1990 \end{cfa} 1991 1992 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour. 1898 1993 1899 1994 An important behaviour to note is when a set of monitors only match partially: 1900 1995 1901 \begin{cfa code}1996 \begin{cfa} 1902 1997 mutex struct A {}; 1903 1998 … … 1912 2007 1913 2008 void foo() { 1914 g(a1, b); // block on accept2009 g(a1, b); // block on accept 1915 2010 } 1916 2011 1917 2012 void bar() { 1918 f(a2, b); // fulfill cooperation1919 } 1920 \end{cfa code}2013 f(a2, b); // fulfill cooperation 2014 } 2015 \end{cfa} 1921 2016 While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables. 1922 2017 In both cases, partially matching monitor sets does not wakeup the waiting thread. 1923 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; \code{waitfor(f,a,b)} and \code{waitfor(f,b,a)}are indistinguishable waiting condition.1924 1925 % ====================================================================== 1926 % ====================================================================== 1927 \subsection{\ code{waitfor}Semantics}1928 % ====================================================================== 1929 % ====================================================================== 1930 1931 Syntactically, the \code{waitfor}statement takes a function identifier and a set of monitors.1932 While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the \code{waitfor}statement.2018 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition. 2019 2020 % ====================================================================== 2021 % ====================================================================== 2022 \subsection{\protect\lstinline|waitfor| Semantics} 2023 % ====================================================================== 2024 % ====================================================================== 2025 2026 Syntactically, the @waitfor@ statement takes a function identifier and a set of monitors. 2027 While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the @waitfor@ statement. 1933 2028 It checks that the set of monitors passed in matches the requirements for a function call. 1934 2029 Listing \ref{lst:waitfor} shows various usages of the waitfor statement and which are acceptable. 1935 The choice of the function type is made ignoring any non- \code{mutex}parameter.2030 The choice of the function type is made ignoring any non-@mutex@ parameter. 1936 2031 One limitation of the current implementation is that it does not handle overloading, but overloading is possible. 1937 2032 \begin{figure} 1938 \begin{cfa code}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}]2033 \begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}] 1939 2034 monitor A{}; 1940 2035 monitor B{}; … … 1950 2045 void (*fp)( A & mutex ) = f1; 1951 2046 1952 waitfor(f1, a1); // Correct : 1 monitor case1953 waitfor(f2, a1, b1); // Correct : 2 monitor case1954 waitfor(f3, a1); // Correct : non-mutex arguments are ignored1955 waitfor(f1, *ap); // Correct : expression as argument1956 1957 waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments1958 waitfor(f2, a1); // Incorrect : Too few mutex arguments1959 waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match1960 waitfor(f1, 1); // Incorrect : 1 not a mutex argument1961 waitfor(f9, a1); // Incorrect : f9 function does not exist1962 waitfor(*fp, a1 ); // Incorrect : fp not an identifier1963 waitfor(f4, a1); // Incorrect : f4 ambiguous1964 1965 waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex1966 } 1967 \end{cfa code}2047 waitfor(f1, a1); // Correct : 1 monitor case 2048 waitfor(f2, a1, b1); // Correct : 2 monitor case 2049 waitfor(f3, a1); // Correct : non-mutex arguments are ignored 2050 waitfor(f1, *ap); // Correct : expression as argument 2051 2052 waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments 2053 waitfor(f2, a1); // Incorrect : Too few mutex arguments 2054 waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match 2055 waitfor(f1, 1); // Incorrect : 1 not a mutex argument 2056 waitfor(f9, a1); // Incorrect : f9 function does not exist 2057 waitfor(*fp, a1 ); // Incorrect : fp not an identifier 2058 waitfor(f4, a1); // Incorrect : f4 ambiguous 2059 2060 waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex 2061 } 2062 \end{cfa} 1968 2063 \end{figure} 1969 2064 1970 Finally, for added flexibility, \CFA supports constructing a complex \code{waitfor} statement using the \code{or}, \code{timeout} and \code{else}.1971 Indeed, multiple \code{waitfor} clauses can be chained together using \code{or}; this chain forms a single statement that uses baton pass to any function that fits one of the function+monitor set passed in.1972 To enable users to tell which accepted function executed, \code{waitfor}s are followed by a statement (including the null statement \code{;}) or a compound statement, which is executed after the clause is triggered.1973 A \code{waitfor} chain can also be followed by a \code{timeout}, to signify an upper bound on the wait, or an \code{else}, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues.1974 Any and all of these clauses can be preceded by a \code{when}condition to dynamically toggle the accept clauses on or off based on some current state.2065 Finally, for added flexibility, \CFA supports constructing a complex @waitfor@ statement using the @or@, @timeout@ and @else@. 2066 Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any function that fits one of the function+monitor set passed in. 2067 To enable users to tell which accepted function executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered. 2068 A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues. 2069 Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state. 1975 2070 Listing \ref{lst:waitfor2} demonstrates several complex masks and some incorrect ones. 1976 2071 1977 2072 \begin{figure} 1978 \begin{cfa code}[caption={Various correct and incorrect uses of the or, else, and timeout clause around a waitfor statement},label={lst:waitfor2}]2073 \begin{cfa}[caption={Various correct and incorrect uses of the or, else, and timeout clause around a waitfor statement},label={lst:waitfor2}] 1979 2074 monitor A{}; 1980 2075 … … 1983 2078 1984 2079 void foo( A & mutex a, bool b, int t ) { 1985 // Correct : blocking case2080 // Correct : blocking case 1986 2081 waitfor(f1, a); 1987 2082 1988 // Correct : block with statement2083 // Correct : block with statement 1989 2084 waitfor(f1, a) { 1990 2085 sout | "f1" | endl; 1991 2086 } 1992 2087 1993 // Correct : block waiting for f1 or f22088 // Correct : block waiting for f1 or f2 1994 2089 waitfor(f1, a) { 1995 2090 sout | "f1" | endl; … … 1998 2093 } 1999 2094 2000 // Correct : non-blocking case2095 // Correct : non-blocking case 2001 2096 waitfor(f1, a); or else; 2002 2097 2003 // Correct : non-blocking case2098 // Correct : non-blocking case 2004 2099 waitfor(f1, a) { 2005 2100 sout | "blocked" | endl; … … 2008 2103 } 2009 2104 2010 // Correct : block at most 10 seconds2105 // Correct : block at most 10 seconds 2011 2106 waitfor(f1, a) { 2012 2107 sout | "blocked" | endl; … … 2015 2110 } 2016 2111 2017 // Correct : block only if b == true2018 // if b == false, don't even make the call2112 // Correct : block only if b == true 2113 // if b == false, don't even make the call 2019 2114 when(b) waitfor(f1, a); 2020 2115 2021 // Correct : block only if b == true2022 // if b == false, make non-blocking call2116 // Correct : block only if b == true 2117 // if b == false, make non-blocking call 2023 2118 waitfor(f1, a); or when(!b) else; 2024 2119 2025 // Correct : block only of t > 12120 // Correct : block only of t > 1 2026 2121 waitfor(f1, a); or when(t > 1) timeout(t); or else; 2027 2122 2028 // Incorrect : timeout clause is dead code2123 // Incorrect : timeout clause is dead code 2029 2124 waitfor(f1, a); or timeout(t); or else; 2030 2125 2031 // Incorrect : order must be2032 // waitfor [or waitfor... [or timeout] [or else]]2126 // Incorrect : order must be 2127 // waitfor [or waitfor... [or timeout] [or else]] 2033 2128 timeout(t); or waitfor(f1, a); or else; 2034 2129 } 2035 \end{cfa code}2130 \end{cfa} 2036 2131 \end{figure} 2037 2132 … … 2041 2136 % ====================================================================== 2042 2137 % ====================================================================== 2043 An interesting use for the \code{waitfor}statement is destructor semantics.2044 Indeed, the \code{waitfor} statement can accept any \code{mutex}routine, which includes the destructor (see section \ref{data}).2138 An interesting use for the @waitfor@ statement is destructor semantics. 2139 Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}). 2045 2140 However, with the semantics discussed until now, waiting for the destructor does not make any sense, since using an object after its destructor is called is undefined behaviour. 2046 The simplest approach is to disallow \code{waitfor}on a destructor.2047 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current \code{mutex}routine, similarly to how a condition is signalled.2141 The simplest approach is to disallow @waitfor@ on a destructor. 2142 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current @mutex@ routine, similarly to how a condition is signalled. 2048 2143 \begin{figure} 2049 \begin{cfa code}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}]2144 \begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}] 2050 2145 monitor Executer {}; 2051 2146 struct Action; … … 2061 2156 } 2062 2157 } 2063 \end{cfa code}2158 \end{cfa} 2064 2159 \end{figure} 2065 2160 For example, listing \ref{lst:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop. … … 2079 2174 In this decade, it is no longer reasonable to create a high-performance application without caring about parallelism. 2080 2175 Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization. 2081 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like \code{fork}, \code{join}, etc.2176 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, etc. 2082 2177 However, since these have significant costs and limitations, \textbf{kthread} are now mostly used as an implementation tool rather than a user oriented one. 2083 2178 There are several alternatives to solve these issues that all have strengths and weaknesses. … … 2158 2253 Conveniently, the call stack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays. 2159 2254 Since stack allocation is based on scopes, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array. 2160 The threads and the condition both have a fixed amount of memory, while \code{mutex}routines and blocking calls allow for an unbound amount, within the stack size.2255 The threads and the condition both have a fixed amount of memory, while @mutex@ routines and blocking calls allow for an unbound amount, within the stack size. 2161 2256 2162 2257 Note that since the major contributions of this paper are extending monitor semantics to \textbf{bulk-acq} and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed. … … 2168 2263 % ====================================================================== 2169 2264 2170 The first step towards the monitor implementation is simple \code{mutex}routines.2265 The first step towards the monitor implementation is simple @mutex@ routines. 2171 2266 In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{lst:entry1}. 2172 2267 The entry/exit procedures do not have to be extended to support multiple monitors. … … 2179 2274 \begin{multicols}{2} 2180 2275 Entry 2181 \begin{ pseudo}2276 \begin{cfa} 2182 2277 if monitor is free 2183 2278 enter … … 2187 2282 block 2188 2283 increment recursions 2189 \end{ pseudo}2284 \end{cfa} 2190 2285 \columnbreak 2191 2286 Exit 2192 \begin{ pseudo}2287 \begin{cfa} 2193 2288 decrement recursion 2194 2289 if recursion == 0 2195 2290 if entry queue not empty 2196 2291 wake-up thread 2197 \end{ pseudo}2292 \end{cfa} 2198 2293 \end{multicols} 2199 \begin{ pseudo}[caption={Initial entry and exit routine for monitors},label={lst:entry1}]2200 \end{ pseudo}2294 \begin{cfa}[caption={Initial entry and exit routine for monitors},label={lst:entry1}] 2295 \end{cfa} 2201 2296 \end{figure} 2202 2297 … … 2205 2300 However, it is shown that entry-point locking solves most of the issues. 2206 2301 2207 First of all, interaction between \code{otype}polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying.2208 Therefore, the main question is how to support \code{dtype}polymorphism.2302 First of all, interaction between @otype@ polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying. 2303 Therefore, the main question is how to support @dtype@ polymorphism. 2209 2304 It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, i.e., acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call. 2210 2305 For example: … … 2213 2308 \begin{tabular}{|c|c|c|} 2214 2309 Mutex & \textbf{callsite-locking} & \textbf{entry-point-locking} \\ 2215 call & pseudo-code & pseudo-code \\2310 call & cfa-code & cfa-code \\ 2216 2311 \hline 2217 \begin{cfa code}[tabsize=3]2312 \begin{cfa}[tabsize=3] 2218 2313 void foo(monitor& mutex a){ 2219 2314 2220 // Do Work2315 // Do Work 2221 2316 //... 2222 2317 … … 2229 2324 2230 2325 } 2231 \end{cfa code} & \begin{pseudo}[tabsize=3]2326 \end{cfa} & \begin{cfa}[tabsize=3] 2232 2327 foo(& a) { 2233 2328 2234 // Do Work2329 // Do Work 2235 2330 //... 2236 2331 … … 2243 2338 release(a); 2244 2339 } 2245 \end{ pseudo} & \begin{pseudo}[tabsize=3]2340 \end{cfa} & \begin{cfa}[tabsize=3] 2246 2341 foo(& a) { 2247 2342 acquire(a); 2248 // Do Work2343 // Do Work 2249 2344 //... 2250 2345 release(a); … … 2257 2352 2258 2353 } 2259 \end{ pseudo}2354 \end{cfa} 2260 2355 \end{tabular} 2261 2356 \end{center} … … 2264 2359 \end{table} 2265 2360 2266 Note the \code{mutex}keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.:2267 \begin{cfa code}2268 // Incorrect: T may not be monitor2361 Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.: 2362 \begin{cfa} 2363 // Incorrect: T may not be monitor 2269 2364 forall(dtype T) 2270 2365 void foo(T * mutex t); 2271 2366 2272 // Correct: this function only works on monitors (any monitor)2367 // Correct: this function only works on monitors (any monitor) 2273 2368 forall(dtype T | is_monitor(T)) 2274 2369 void bar(T * mutex t)); 2275 \end{cfa code}2370 \end{cfa} 2276 2371 2277 2372 Both entry point and \textbf{callsite-locking} are feasible implementations. … … 2323 2418 Obviously, this doubles the context-switch cost because threads must context-switch to an intermediate stack. 2324 2419 The alternative 1-step context-switch uses the stack of the ``from'' thread to schedule and then context-switches directly to the ``to'' thread. 2325 However, the performance of the 2-step context-switch is still superior to a \code{pthread_yield}(see section \ref{results}).2326 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft \code{SwitchToFiber}~\cite{switchToWindows} routine).2420 However, the performance of the 2-step context-switch is still superior to a @pthread_yield@ (see section \ref{results}). 2421 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft @SwitchToFiber@~\cite{switchToWindows} routine). 2327 2422 This option is not currently present in \CFA, but the changes required to add it are strictly additive. 2328 2423 … … 2356 2451 This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' functions from other functions.}. 2357 2452 However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks. 2358 For this reason, the alarm thread is in a tight loop around a system call to \code{sigwaitinfo}, requiring very little CPU time for preemption.2453 For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption. 2359 2454 One final detail about the alarm thread is how to wake it when additional communication is required (e.g., on thread termination). 2360 This unblocking is also done using {\tt SIGALRM}, but sent through the \code{pthread_sigqueue}.2361 Indeed, \code{sigwait} can differentiate signals sent from \code{pthread_sigqueue}from signals sent from alarms or the kernel.2455 This unblocking is also done using {\tt SIGALRM}, but sent through the @pthread_sigqueue@. 2456 Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel. 2362 2457 2363 2458 \subsection{Scheduler} … … 2405 2500 \begin{multicols}{2} 2406 2501 Entry 2407 \begin{ pseudo}2502 \begin{cfa} 2408 2503 if monitor is free 2409 2504 enter … … 2414 2509 increment recursion 2415 2510 2416 \end{ pseudo}2511 \end{cfa} 2417 2512 \columnbreak 2418 2513 Exit 2419 \begin{ pseudo}2514 \begin{cfa} 2420 2515 decrement recursion 2421 2516 if recursion == 0 … … 2427 2522 if entry queue not empty 2428 2523 wake-up thread 2429 \end{ pseudo}2524 \end{cfa} 2430 2525 \end{multicols} 2431 \begin{ pseudo}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}]2432 \end{ pseudo}2526 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}] 2527 \end{cfa} 2433 2528 \end{figure} 2434 2529 … … 2436 2531 Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership. 2437 2532 This solution is deadlock safe as well as preventing any potential barging. 2438 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the \code{wait} and \code{signal_block}routines.2533 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the @wait@ and @signal_block@ routines. 2439 2534 2440 2535 \begin{figure}[H] … … 2448 2543 Figure \ref{fig:structs} shows a high-level representation of these data structures. 2449 2544 The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive ``next'' pointers for linking onto monitors. 2450 The \code{condition node} is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each \code{condition criterion}is moved to the AS-stack.2545 The @condition node@ is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each @condition criterion@ is moved to the AS-stack. 2451 2546 Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{lst:entry2}. 2452 2547 … … 2458 2553 Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}. 2459 2554 For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (i.e., no signal statement uses multiple conditions). 2460 However, in the case of external scheduling, there is no equivalent object which is associated with \code{waitfor}statements.2555 However, in the case of external scheduling, there is no equivalent object which is associated with @waitfor@ statements. 2461 2556 This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired. 2462 These monitors being the only objects that have sufficient lifetime and are available on both sides of the \code{waitfor}statement.2557 These monitors being the only objects that have sufficient lifetime and are available on both sides of the @waitfor@ statement. 2463 2558 This requires an algorithm to choose which monitor holds the relevant queue. 2464 2559 It is also important that said algorithm be independent of the order in which users list parameters. … … 2477 2572 \begin{itemize} 2478 2573 \item The threads waiting on the entry queue need to keep track of which routine they are trying to enter, and using which set of monitors. 2479 The \code{mutex}routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information.2574 The @mutex@ routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information. 2480 2575 \item The monitors need to keep a mask of acceptable routines. 2481 2576 This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it. 2482 2577 It also needs storage to keep track of which routine was accepted. 2483 2578 Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread. 2484 Note that if a thread has acquired two monitors but executes a \code{waitfor}with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless.2485 This becomes relevant when \code{when} clauses affect the number of monitors passed to a \code{waitfor}statement.2579 Note that if a thread has acquired two monitors but executes a @waitfor@ with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless. 2580 This becomes relevant when @when@ clauses affect the number of monitors passed to a @waitfor@ statement. 2486 2581 \item The entry/exit routines need to be updated as shown in listing \ref{lst:entry3}. 2487 2582 \end{itemize} … … 2491 2586 This routine is needed because of the storage requirements of the call order inversion. 2492 2587 Indeed, when waiting for the destructors, storage is needed for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for. 2493 For regular \code{waitfor}statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later.2494 The \code{waitfor}semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor}2588 For regular @waitfor@ statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later. 2589 The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor} 2495 2590 2496 2591 \begin{figure} 2497 2592 \begin{multicols}{2} 2498 2593 Entry 2499 \begin{ pseudo}2594 \begin{cfa} 2500 2595 if monitor is free 2501 2596 enter … … 2508 2603 block 2509 2604 increment recursion 2510 \end{ pseudo}2605 \end{cfa} 2511 2606 \columnbreak 2512 2607 Exit 2513 \begin{ pseudo}2608 \begin{cfa} 2514 2609 decrement recursion 2515 2610 if recursion == 0 … … 2524 2619 wake-up thread 2525 2620 endif 2526 \end{ pseudo}2621 \end{cfa} 2527 2622 \end{multicols} 2528 \begin{ pseudo}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}]2529 \end{ pseudo}2623 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}] 2624 \end{cfa} 2530 2625 \end{figure} 2531 2626 … … 2533 2628 \begin{multicols}{2} 2534 2629 Destructor Entry 2535 \begin{ pseudo}2630 \begin{cfa} 2536 2631 if monitor is free 2537 2632 enter … … 2547 2642 wait 2548 2643 increment recursion 2549 \end{ pseudo}2644 \end{cfa} 2550 2645 \columnbreak 2551 2646 Waitfor 2552 \begin{ pseudo}2647 \begin{cfa} 2553 2648 if matching thread is already there 2554 2649 if found destructor … … 2570 2665 block 2571 2666 return 2572 \end{ pseudo}2667 \end{cfa} 2573 2668 \end{multicols} 2574 \begin{ pseudo}[caption={Pseudo code for the \code{waitfor} routine and the \code{mutex}entry routine for destructors},label={lst:entry-dtor}]2575 \end{ pseudo}2669 \begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={lst:entry-dtor}] 2670 \end{cfa} 2576 2671 \end{figure} 2577 2672 … … 2585 2680 2586 2681 \section{Threads As Monitors} 2587 As it was subtly alluded in section \ref{threads}, \code{thread}s in \CFA are in fact monitors, which means that all monitor features are available when using threads.2682 As it was subtly alluded in section \ref{threads}, @thread@s in \CFA are in fact monitors, which means that all monitor features are available when using threads. 2588 2683 For example, here is a very simple two thread pipeline that could be used for a simulator of a game engine: 2589 2684 \begin{figure}[H] 2590 \begin{cfa code}[caption={Toy simulator using \code{thread}s and \code{monitor}s.},label={lst:engine-v1}]2685 \begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={lst:engine-v1}] 2591 2686 // Visualization declaration 2592 2687 thread Renderer {} renderer; … … 2615 2710 } 2616 2711 } 2617 \end{cfa code}2712 \end{cfa} 2618 2713 \end{figure} 2619 2714 One of the obvious complaints of the previous code snippet (other than its toy-like simplicity) is that it does not handle exit conditions and just goes on forever. 2620 2715 Luckily, the monitor semantics can also be used to clearly enforce a shutdown order in a concise manner: 2621 2716 \begin{figure}[H] 2622 \begin{cfa code}[caption={Same toy simulator with proper termination condition.},label={lst:engine-v2}]2717 \begin{cfa}[caption={Same toy simulator with proper termination condition.},label={lst:engine-v2}] 2623 2718 // Visualization declaration 2624 2719 thread Renderer {} renderer; … … 2658 2753 // Call destructor for simulator once simulator finishes 2659 2754 // Call destructor for renderer to signify shutdown 2660 \end{cfa code}2755 \end{cfa} 2661 2756 \end{figure} 2662 2757 … … 2664 2759 As mentioned in section \ref{preemption}, \CFA uses preemptive threads by default but can use fibers on demand. 2665 2760 Currently, using fibers is done by adding the following line of code to the program~: 2666 \begin{cfa code}2761 \begin{cfa} 2667 2762 unsigned int default_preemption() { 2668 2763 return 0; 2669 2764 } 2670 \end{cfa code}2765 \end{cfa} 2671 2766 This function is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, i.e., no preemption. 2672 2767 However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{lst:fiber-uthread} 2673 2768 \begin{figure} 2674 \begin{cfa code}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={lst:fiber-uthread}]2675 // Cluster forward declaration2769 \begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={lst:fiber-uthread}] 2770 // Cluster forward declaration 2676 2771 struct cluster; 2677 2772 2678 // Processor forward declaration2773 // Processor forward declaration 2679 2774 struct processor; 2680 2775 2681 // Construct clusters with a preemption rate2776 // Construct clusters with a preemption rate 2682 2777 void ?{}(cluster& this, unsigned int rate); 2683 // Construct processor and add it to cluster2778 // Construct processor and add it to cluster 2684 2779 void ?{}(processor& this, cluster& cluster); 2685 // Construct thread and schedule it on cluster2780 // Construct thread and schedule it on cluster 2686 2781 void ?{}(thread& this, cluster& cluster); 2687 2782 2688 // Declare two clusters2689 cluster thread_cluster = { 10`ms }; // Preempt every 10 ms2690 cluster fibers_cluster = { 0 }; // Never preempt2691 2692 // Construct 4 processors2783 // Declare two clusters 2784 cluster thread_cluster = { 10`ms }; // Preempt every 10 ms 2785 cluster fibers_cluster = { 0 }; // Never preempt 2786 2787 // Construct 4 processors 2693 2788 processor processors[4] = { 2694 2789 //2 for the thread cluster … … 2700 2795 }; 2701 2796 2702 // Declares thread2797 // Declares thread 2703 2798 thread UThread {}; 2704 2799 void ?{}(UThread& this) { 2705 // Construct underlying thread to automatically2706 // be scheduled on the thread cluster2800 // Construct underlying thread to automatically 2801 // be scheduled on the thread cluster 2707 2802 (this){ thread_cluster } 2708 2803 } … … 2710 2805 void main(UThread & this); 2711 2806 2712 // Declares fibers2807 // Declares fibers 2713 2808 thread Fiber {}; 2714 2809 void ?{}(Fiber& this) { 2715 // Construct underlying thread to automatically2716 // be scheduled on the fiber cluster2810 // Construct underlying thread to automatically 2811 // be scheduled on the fiber cluster 2717 2812 (this.__thread){ fibers_cluster } 2718 2813 } 2719 2814 2720 2815 void main(Fiber & this); 2721 \end{cfa code}2816 \end{cfa} 2722 2817 \end{figure} 2723 2818 … … 2763 2858 2764 2859 \section{Micro Benchmarks} 2765 All benchmarks are run using the same harness to produce the results, seen as the \code{BENCH()}macro in the following examples.2860 All benchmarks are run using the same harness to produce the results, seen as the @BENCH()@ macro in the following examples. 2766 2861 This macro uses the following logic to benchmark the code: 2767 \begin{ pseudo}2862 \begin{cfa} 2768 2863 #define BENCH(run, result) \ 2769 2864 before = gettime(); \ … … 2771 2866 after = gettime(); \ 2772 2867 result = (after - before) / N; 2773 \end{ pseudo}2774 The method used to get time is \code{clock_gettime(CLOCK_THREAD_CPUTIME_ID);}.2868 \end{cfa} 2869 The method used to get time is @clock_gettime(CLOCK_THREAD_CPUTIME_ID);@. 2775 2870 Each benchmark is using many iterations of a simple call to measure the cost of the call. 2776 2871 The specific number of iterations depends on the specific benchmark. … … 2787 2882 \begin{multicols}{2} 2788 2883 \CFA Coroutines 2789 \begin{cfa code}2884 \begin{cfa} 2790 2885 coroutine GreatSuspender {}; 2791 2886 void main(GreatSuspender& this) { … … 2803 2898 printf("%llu\n", result); 2804 2899 } 2805 \end{cfa code}2900 \end{cfa} 2806 2901 \columnbreak 2807 2902 \CFA Threads 2808 \begin{cfa code}2903 \begin{cfa} 2809 2904 2810 2905 … … 2822 2917 printf("%llu\n", result); 2823 2918 } 2824 \end{cfa code}2919 \end{cfa} 2825 2920 \end{multicols} 2826 \begin{cfa code}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={lst:ctx-switch}]2827 \end{cfa code}2921 \begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={lst:ctx-switch}] 2922 \end{cfa} 2828 2923 \end{figure} 2829 2924 … … 2853 2948 For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine. 2854 2949 Listing \ref{lst:mutex} shows the code for \CFA. 2855 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a \code{pthread_mutex}lock is also measured.2950 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured. 2856 2951 The results can be shown in table \ref{tab:mutex}. 2857 2952 2858 2953 \begin{figure} 2859 \begin{cfa code}[caption={\CFA benchmark code used to measure mutex routines.},label={lst:mutex}]2954 \begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={lst:mutex}] 2860 2955 monitor M {}; 2861 2956 void __attribute__((noinline)) call( M & mutex m /*, m2, m3, m4*/ ) {} … … 2871 2966 printf("%llu\n", result); 2872 2967 } 2873 \end{cfa code}2968 \end{cfa} 2874 2969 \end{figure} 2875 2970 … … 2883 2978 FetchAdd + FetchSub & 26 & 26 & 0 \\ 2884 2979 Pthreads Mutex Lock & 31 & 31.86 & 0.99 \\ 2885 \uC \code{monitor}member routine & 30 & 30 & 0 \\2886 \CFA \code{mutex}routine, 1 argument & 41 & 41.57 & 0.9 \\2887 \CFA \code{mutex}routine, 2 argument & 76 & 76.96 & 1.57 \\2888 \CFA \code{mutex}routine, 4 argument & 145 & 146.68 & 3.85 \\2980 \uC @monitor@ member routine & 30 & 30 & 0 \\ 2981 \CFA @mutex@ routine, 1 argument & 41 & 41.57 & 0.9 \\ 2982 \CFA @mutex@ routine, 2 argument & 76 & 76.96 & 1.57 \\ 2983 \CFA @mutex@ routine, 4 argument & 145 & 146.68 & 3.85 \\ 2889 2984 Java synchronized routine & 27 & 28.57 & 2.6 \\ 2890 2985 \hline … … 2902 2997 2903 2998 \begin{figure} 2904 \begin{cfa code}[caption={Benchmark code for internal scheduling},label={lst:int-sched}]2999 \begin{cfa}[caption={Benchmark code for internal scheduling},label={lst:int-sched}] 2905 3000 volatile int go = 0; 2906 3001 condition c; … … 2932 3027 return do_wait(m1); 2933 3028 } 2934 \end{cfa code}3029 \end{cfa} 2935 3030 \end{figure} 2936 3031 … … 2942 3037 \hline 2943 3038 Pthreads Condition Variable & 5902.5 & 6093.29 & 714.78 \\ 2944 \uC \code{signal}& 322 & 323 & 3.36 \\2945 \CFA \code{signal}, 1 \code{monitor}& 352.5 & 353.11 & 3.66 \\2946 \CFA \code{signal}, 2 \code{monitor}& 430 & 430.29 & 8.97 \\2947 \CFA \code{signal}, 4 \code{monitor}& 594.5 & 606.57 & 18.33 \\2948 Java \code{notify}& 13831.5 & 15698.21 & 4782.3 \\3039 \uC @signal@ & 322 & 323 & 3.36 \\ 3040 \CFA @signal@, 1 @monitor@ & 352.5 & 353.11 & 3.66 \\ 3041 \CFA @signal@, 2 @monitor@ & 430 & 430.29 & 8.97 \\ 3042 \CFA @signal@, 4 @monitor@ & 594.5 & 606.57 & 18.33 \\ 3043 Java @notify@ & 13831.5 & 15698.21 & 4782.3 \\ 2949 3044 \hline 2950 3045 \end{tabular} … … 2956 3051 2957 3052 \subsection{External Scheduling} 2958 The Internal scheduling benchmark measures the cost of the \code{waitfor} statement (\code{_Accept}in \uC).3053 The Internal scheduling benchmark measures the cost of the @waitfor@ statement (@_Accept@ in \uC). 2959 3054 Listing \ref{lst:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}. 2960 3055 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 2961 3056 2962 3057 \begin{figure} 2963 \begin{cfa code}[caption={Benchmark code for external scheduling},label={lst:ext-sched}]3058 \begin{cfa}[caption={Benchmark code for external scheduling},label={lst:ext-sched}] 2964 3059 volatile int go = 0; 2965 3060 monitor M {}; … … 2990 3085 return do_wait(m1); 2991 3086 } 2992 \end{cfa code}3087 \end{cfa} 2993 3088 \end{figure} 2994 3089 … … 2999 3094 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3000 3095 \hline 3001 \uC \code{Accept}& 350 & 350.61 & 3.11 \\3002 \CFA \code{waitfor}, 1 \code{monitor}& 358.5 & 358.36 & 3.82 \\3003 \CFA \code{waitfor}, 2 \code{monitor}& 422 & 426.79 & 7.95 \\3004 \CFA \code{waitfor}, 4 \code{monitor}& 579.5 & 585.46 & 11.25 \\3096 \uC @Accept@ & 350 & 350.61 & 3.11 \\ 3097 \CFA @waitfor@, 1 @monitor@ & 358.5 & 358.36 & 3.82 \\ 3098 \CFA @waitfor@, 2 @monitor@ & 422 & 426.79 & 7.95 \\ 3099 \CFA @waitfor@, 4 @monitor@ & 579.5 & 585.46 & 11.25 \\ 3005 3100 \hline 3006 3101 \end{tabular} … … 3020 3115 \begin{center} 3021 3116 \texttt{pthread} 3022 \begin{c code}3117 \begin{cfa} 3023 3118 int main() { 3024 3119 BENCH( … … 3039 3134 printf("%llu\n", result); 3040 3135 } 3041 \end{c code}3136 \end{cfa} 3042 3137 3043 3138 3044 3139 3045 3140 \CFA Threads 3046 \begin{cfa code}3141 \begin{cfa} 3047 3142 int main() { 3048 3143 BENCH( … … 3054 3149 printf("%llu\n", result); 3055 3150 } 3056 \end{cfa code}3151 \end{cfa} 3057 3152 \end{center} 3058 \begin{cfa code}[caption={Benchmark code for \texttt{pthread}s and \CFA to measure object creation},label={lst:creation}]3059 \end{cfa code}3153 \begin{cfa}[caption={Benchmark code for \texttt{pthread}s and \CFA to measure object creation},label={lst:creation}] 3154 \end{cfa} 3060 3155 \end{figure} 3061 3156 … … 3141 3236 \begin{tabular}[t]{|c|c|c|} 3142 3237 Sequential & Library Parallel & Language Parallel \\ 3143 \begin{cfa code}[tabsize=3]3238 \begin{cfa}[tabsize=3] 3144 3239 void big_sum( 3145 3240 int* a, int* b, … … 3165 3260 //... fill in a & b 3166 3261 big_sum(a,b,c,10000); 3167 \end{cfa code} &\begin{cfacode}[tabsize=3]3262 \end{cfa} &\begin{cfa}[tabsize=3] 3168 3263 void big_sum( 3169 3264 int* a, int* b, … … 3189 3284 //... fill in a & b 3190 3285 big_sum(a,b,c,10000); 3191 \end{cfa code}&\begin{cfacode}[tabsize=3]3286 \end{cfa}&\begin{cfa}[tabsize=3] 3192 3287 void big_sum( 3193 3288 int* a, int* b, … … 3213 3308 //... fill in a & b 3214 3309 big_sum(a,b,c,10000); 3215 \end{cfa code}3310 \end{cfa} 3216 3311 \end{tabular} 3217 3312 \end{center} -
doc/papers/concurrency/annex/local.bib
r6ecc079 raac7197 21 21 @string{pldi="Programming Language Design and Implementation"} 22 22 23 24 @article{HPP:Study, 25 keywords = {Parallel, Productivity}, 26 author = {Lorin Hochstein and Jeff Carver and Forrest Shull and Sima Asgari and Victor Basili and Jeffrey K. Hollingsworth and Marvin V. Zelkowitz }, 27 title = {Parallel Programmer Productivity: A Case Study of Novice Parallel Programmers}, 23 @inproceedings{Hochstein05, 24 keywords = {Application software; Computer aided software engineering; Concurrent computing; Educational 25 institutions; High performance computing; Humans; Instruments; Productivity; Programming profession; 26 Software engineering}, 27 author = {Lorin Hochstein and Jeff Carver and Forrest Shull and Sima Asgari and Victor Basili and Jeffrey K. Hollingsworth and Marvin V. Zelkowitz}, 28 title = {Parallel Programmer Productivity: A Case Study of Novice Parallel Programmers}, 29 booktitle = {Supercomputing, 2005. Proceedings of the ACM/IEEE SC 2005 Conference}, 30 publisher = {IEEE}, 31 year = {2005}, 32 pages = {35-35}, 33 month = nov, 28 34 } 29 35 … … 35 41 } 36 42 37 @article{TBB, 38 key = {TBB}, 39 keywords = {Intel, TBB}, 40 title = {Intel Thread Building Blocks}, 41 note = "\url{https://www.threadingbuildingblocks.org/}" 43 @misc{TBB, 44 keywords = {Intel, TBB}, 45 key = {TBB}, 46 title = {Thread Building Blocks}, 47 howpublished= {Intel, \url{https://www.threadingbuildingblocks.org}}, 48 note = {Accessed: 2018-3}, 42 49 } 43 50 … … 48 55 title = {C$\forall$ Programmming Language}, 49 56 note = {\url{https://plg.uwaterloo.ca/~cforall}}, 50 }51 52 @mastersthesis{rob-thesis,53 keywords = {Constructors, Destructors, Tuples},54 author = {Rob Schluntz},55 title = {Resource Management and Tuples in Cforall},56 year = 2017,57 school = {University of Waterloo},58 note = {\url{https://uwspace.uwaterloo.ca/handle/10012/11830}},59 57 } 60 58 -
doc/papers/concurrency/style/cfa-format.tex
r6ecc079 raac7197 11 11 % from https://gist.github.com/nikolajquorning/92bbbeef32e1dd80105c9bf2daceb89a 12 12 \lstdefinelanguage{sml} { 13 morekeywords= { 14 EQUAL, GREATER, LESS, NONE, SOME, abstraction, abstype, and, andalso, array, as, before, bool, case, char, datatype, do, else, end, eqtype, exception, exn, false, fn, fun, functor, handle, if, in, include, infix, infixr, int, let, list, local, nil, nonfix, not, o, of, op, open, option, orelse, overload, print, raise, real, rec, ref, sharing, sig, signature, string, struct, structure, substring, then, true, type, unit, val, vector, where, while, with, withtype, word 15 }, 16 morestring=[b]", 17 morecomment=[s]{(*}{*)}, 13 morekeywords= { 14 EQUAL, GREATER, LESS, NONE, SOME, abstraction, abstype, and, andalso, array, as, before, 15 bool, case, char, datatype, do, else, end, eqtype, exception, exn, false, fn, fun, functor, 16 handle, if, in, include, infix, infixr, int, let, list, local, nil, nonfix, not, o, of, op, 17 open, option, orelse, overload, print, raise, real, rec, ref, sharing, sig, signature, 18 string, struct, structure, substring, then, true, type, unit, val, vector, where, while, 19 with, withtype, word 20 }, 21 morestring=[b]", 22 morecomment=[s]{(*}{*)}, 18 23 } 19 24 20 25 \lstdefinelanguage{D}{ 21 % Keywords 22 morekeywords=[1]{ 23 abstract, alias, align, auto, body, break, cast, catch, class, const, 24 continue, debug, delegate, delete, deprecated, do, else, enum, export, 25 false, final, finally, for, foreach, foreach_reverse, function, goto, if, 26 immutable, import, in, inout, interface, invariant, is, lazy, macro, mixin, 27 module, new, nothrow, null, out, override, package, pragma, private, 28 protected, public, pure, ref, return, shared, static, struct, super, 29 switch, synchronized, template, this, throw, true, try, typedef, typeid, 30 typeof, union, unittest, volatile, while, with 31 }, 32 % Special identifiers, common functions 33 morekeywords=[2]{enforce}, 34 % Ugly identifiers 35 morekeywords=[3]{ 36 __DATE__, __EOF__, __FILE__, __LINE__, __TIMESTAMP__, __TIME__, __VENDOR__, 37 __VERSION__, __ctfe, __gshared, __monitor, __thread, __vptr, _argptr, 38 _arguments, _ctor, _dtor 39 }, 40 % Basic types 41 morekeywords=[4]{ 42 byte, ubyte, short, ushort, int, uint, long, ulong, cent, ucent, void, 43 bool, bit, float, double, real, ushort, int, uint, long, ulong, float, 44 char, wchar, dchar, string, wstring, dstring, ireal, ifloat, idouble, 45 creal, cfloat, cdouble, size_t, ptrdiff_t, sizediff_t, equals_t, hash_t 46 }, 47 % Strings 48 morestring=[b]{"}, 49 morestring=[b]{'}, 50 morestring=[b]{`}, 51 % Comments 52 comment=[l]{//}, 53 morecomment=[s]{/*}{*/}, 54 morecomment=[s][\color{blue}]{/**}{*/}, 55 morecomment=[n]{/+}{+/}, 56 morecomment=[n][\color{blue}]{/++}{+/}, 57 % Options 58 sensitive=true 26 % Keywords 27 morekeywords=[1]{ 28 abstract, alias, align, auto, body, break, cast, catch, class, const, continue, debug, 29 delegate, delete, deprecated, do, else, enum, export, false, final, finally, for, foreach, 30 foreach_reverse, function, goto, if, immutable, import, in, inout, interface, invariant, is, 31 lazy, macro, mixin, module, new, nothrow, null, out, override, package, pragma, private, 32 protected, public, pure, ref, return, shared, static, struct, super, switch, synchronized, 33 template, this, throw, true, try, typedef, typeid, typeof, union, unittest, volatile, while, 34 with 35 }, 36 % Special identifiers, common functions 37 morekeywords=[2]{enforce}, 38 % Ugly identifiers 39 morekeywords=[3]{ 40 __DATE__, __EOF__, __FILE__, __LINE__, __TIMESTAMP__, __TIME__, __VENDOR__, 41 __VERSION__, __ctfe, __gshared, __monitor, __thread, __vptr, _argptr, 42 _arguments, _ctor, _dtor 43 }, 44 % Basic types 45 morekeywords=[4]{ 46 byte, ubyte, short, ushort, int, uint, long, ulong, cent, ucent, void, bool, bit, float, 47 double, real, ushort, int, uint, long, ulong, float, char, wchar, dchar, string, wstring, 48 dstring, ireal, ifloat, idouble, creal, cfloat, cdouble, size_t, ptrdiff_t, sizediff_t, 49 equals_t, hash_t 50 }, 51 % Strings 52 morestring=[b]{"}, 53 morestring=[b]{'}, 54 morestring=[b]{`}, 55 % Comments 56 comment=[l]{//}, 57 morecomment=[s]{/*}{*/}, 58 morecomment=[s][\color{blue}]{/**}{*/}, 59 morecomment=[n]{/+}{+/}, 60 morecomment=[n][\color{blue}]{/++}{+/}, 61 % Options 62 sensitive=true 59 63 } 60 64 61 65 \lstdefinelanguage{rust}{ 62 % Keywords 63 morekeywords=[1]{ 64 abstract, alignof, as, become, box, 65 break, const, continue, crate, do, 66 else, enum, extern, false, final, 67 fn, for, if, impl, in, 68 let, loop, macro, match, mod, 69 move, mut, offsetof, override, priv, 70 proc, pub, pure, ref, return, 71 Self, self, sizeof, static, struct, 72 super, trait, true, type, typeof, 73 unsafe, unsized, use, virtual, where, 74 while, yield 75 }, 76 % Strings 77 morestring=[b]{"}, 78 % Comments 79 comment=[l]{//}, 80 morecomment=[s]{/*}{*/}, 81 % Options 82 sensitive=true 66 % Keywords 67 morekeywords=[1]{ 68 abstract, alignof, as, become, box, break, const, continue, crate, do, else, enum, extern, 69 false, final, fn, for, if, impl, in, let, loop, macro, match, mod, move, mut, offsetof, 70 override, priv, proc, pub, pure, ref, return, Self, self, sizeof, static, struct, super, 71 trait, true, type, typeof, unsafe, unsized, use, virtual, where, while, yield 72 }, 73 % Strings 74 morestring=[b]{"}, 75 % Comments 76 comment=[l]{//}, 77 morecomment=[s]{/*}{*/}, 78 % Options 79 sensitive=true 83 80 } 84 81 85 82 \lstdefinelanguage{pseudo}{ 86 morekeywords={string,uint,int,bool,float}, %87 sensitive=true, %88 morecomment=[l]{//}, %89 morecomment=[s]{/*}{*/}, %90 morestring=[b]', %91 morestring=[b]", %92 morestring=[s]{`}{`}, %93 } %83 morekeywords={string,uint,int,bool,float}, 84 sensitive=true, 85 morecomment=[l]{//}, 86 morecomment=[s]{/*}{*/}, 87 morestring=[b]', 88 morestring=[b]", 89 morestring=[s]{`}{`}, 90 } 94 91 95 92 \newcommand{\KWC}{K-W C\xspace} 96 93 97 94 \lstdefinestyle{pseudoStyle}{ 98 99 100 101 102 103 104 stringstyle=\sf\color{Mahogany},% use sanserif font105 106 107 aboveskip=4pt,% spacing above/below code block108 109 110 111 112 113 showlines=true,% show blank lines at end of code114 115 116 117 xleftmargin=\parindentlnth,% indent code to paragraph indentation118 119 120 95 escapeinside={@@}, 96 basicstyle=\linespread{0.9}\sf\footnotesize, % reduce line spacing and use typewriter font 97 keywordstyle=\bfseries\color{blue}, 98 keywordstyle=[2]\bfseries\color{Plum}, 99 commentstyle=\itshape\color{OliveGreen}, % green and italic comments 100 identifierstyle=\color{identifierCol}, 101 stringstyle=\sf\color{Mahogany}, % use sanserif font 102 mathescape=true, 103 columns=fixed, 104 aboveskip=4pt, % spacing above/below code block 105 belowskip=3pt, 106 keepspaces=true, 107 tabsize=4, 108 % frame=lines, 109 literate=, 110 showlines=true, % show blank lines at end of code 111 showspaces=false, 112 showstringspaces=false, 113 escapechar=\$, 114 xleftmargin=\parindentlnth, % indent code to paragraph indentation 115 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 116 % moredelim=* detects keywords, comments, strings, and other delimiters and applies their formatting 117 % moredelim=** allows cumulative application 121 118 } 122 119 123 120 \lstdefinestyle{defaultStyle}{ 124 125 126 127 128 129 130 stringstyle=\sf\color{Mahogany},% use sanserif font131 132 133 aboveskip=4pt,% spacing above/below code block134 135 136 137 138 139 showlines=true,% show blank lines at end of code140 141 142 143 xleftmargin=\parindentlnth,% indent code to paragraph indentation144 145 146 121 escapeinside={@@}, 122 basicstyle=\linespread{0.9}\tt\footnotesize, % reduce line spacing and use typewriter font 123 keywordstyle=\bfseries\color{blue}, 124 keywordstyle=[2]\bfseries\color{Plum}, 125 commentstyle=\itshape\color{OliveGreen}, % green and italic comments 126 identifierstyle=\color{identifierCol}, 127 stringstyle=\sf\color{Mahogany}, % use sanserif font 128 mathescape=true, 129 columns=fixed, 130 aboveskip=4pt, % spacing above/below code block 131 belowskip=3pt, 132 keepspaces=true, 133 tabsize=4, 134 % frame=lines, 135 literate=, 136 showlines=true, % show blank lines at end of code 137 showspaces=false, 138 showstringspaces=false, 139 escapechar=\$, 140 xleftmargin=\parindentlnth, % indent code to paragraph indentation 141 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 142 % moredelim=* detects keywords, comments, strings, and other delimiters and applies their formatting 143 % moredelim=** allows cumulative application 147 144 } 148 145 149 146 \lstdefinestyle{cfaStyle}{ 150 151 147 escapeinside={@@}, 148 basicstyle=\linespread{0.9}\sf, % reduce line spacing and use typewriter font 152 149 % keywordstyle=\bfseries\color{blue}, 153 150 keywordstyle=[2]\bfseries\color{red}, 154 151 % commentstyle=\sf\itshape\color{OliveGreen}, % green and italic comments 155 156 % stringstyle=\sf\color{Mahogany}, 157 158 159 160 aboveskip=4pt,% spacing above/below code block161 162 163 164 165 166 showlines=true,% show blank lines at end of code167 168 169 170 xleftmargin=\parindentlnth,% indent code to paragraph indentation171 172 152 identifierstyle=\color{identifierCol}, 153 % stringstyle=\sf\color{Mahogany}, % use sanserif font 154 stringstyle=\tt, % use typewriter font 155 mathescape=true, 156 columns=fixed, 157 aboveskip=4pt, % spacing above/below code block 158 belowskip=3pt, 159 keepspaces=true, 160 tabsize=4, 161 % frame=lines, 162 literate=, 163 showlines=true, % show blank lines at end of code 164 showspaces=false, 165 showstringspaces=false, 166 escapechar=\$, 167 xleftmargin=\parindentlnth, % indent code to paragraph indentation 168 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 169 morekeywords=[2]{accept, signal, signal_block, wait, waitfor}, 173 170 } 174 171 … … 176 173 177 174 \lstnewenvironment{ccode}[1][]{ 178 179 180 181 182 183 175 \lstset{ 176 language = C, 177 style=defaultStyle, 178 captionpos=b, 179 #1 180 } 184 181 }{} 185 182 186 183 \lstnewenvironment{cfacode}[1][]{ 187 188 189 190 191 192 184 \lstset{ 185 language = CFA, 186 style=cfaStyle, 187 captionpos=b, 188 #1 189 } 193 190 }{} 194 191 195 192 \lstnewenvironment{pseudo}[1][]{ 196 197 198 199 200 201 193 \lstset{ 194 language = pseudo, 195 style=pseudoStyle, 196 captionpos=b, 197 #1 198 } 202 199 }{} 203 200 204 201 \lstnewenvironment{cppcode}[1][]{ 205 206 207 208 209 210 202 \lstset{ 203 language = c++, 204 style=defaultStyle, 205 captionpos=b, 206 #1 207 } 211 208 }{} 212 209 213 210 \lstnewenvironment{ucppcode}[1][]{ 214 215 216 217 218 219 211 \lstset{ 212 language = c++, 213 style=defaultStyle, 214 captionpos=b, 215 #1 216 } 220 217 }{} 221 218 222 219 \lstnewenvironment{javacode}[1][]{ 223 224 225 226 227 228 220 \lstset{ 221 language = java, 222 style=defaultStyle, 223 captionpos=b, 224 #1 225 } 229 226 }{} 230 227 231 228 \lstnewenvironment{scalacode}[1][]{ 232 233 234 235 236 237 229 \lstset{ 230 language = scala, 231 style=defaultStyle, 232 captionpos=b, 233 #1 234 } 238 235 }{} 239 236 240 237 \lstnewenvironment{smlcode}[1][]{ 241 242 243 244 245 246 238 \lstset{ 239 language = sml, 240 style=defaultStyle, 241 captionpos=b, 242 #1 243 } 247 244 }{} 248 245 249 246 \lstnewenvironment{dcode}[1][]{ 250 251 252 253 254 255 247 \lstset{ 248 language = D, 249 style=defaultStyle, 250 captionpos=b, 251 #1 252 } 256 253 }{} 257 254 258 255 \lstnewenvironment{rustcode}[1][]{ 259 260 261 262 263 264 256 \lstset{ 257 language = rust, 258 style=defaultStyle, 259 captionpos=b, 260 #1 261 } 265 262 }{} 266 263 267 264 \lstnewenvironment{gocode}[1][]{ 268 269 270 271 272 273 265 \lstset{ 266 language = Golang, 267 style=defaultStyle, 268 captionpos=b, 269 #1 270 } 274 271 }{} 275 272 … … 279 276 \newcommand{\code}[1]{\lstinline[language=CFA,style=cfaStyle]{#1}} 280 277 \newcommand{\pscode}[1]{\lstinline[language=pseudo,style=pseudoStyle]{#1}} 278 279 % Local Variables: % 280 % tab-width: 4 % 281 % fill-column: 100 % 282 % End: %
Note: See TracChangeset
for help on using the changeset viewer.