Changeset b5563e1
- Timestamp:
- Mar 27, 2018, 10:21:32 AM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 3d2b7bc
- Parents:
- a9b1b0c (diff), af1ed1ad (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 1 added
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/papers/concurrency/Makefile
ra9b1b0c rb5563e1 4 4 Figures = figures 5 5 Macros = AMA/AMA-stix/ama 6 TeXLIB = .: style:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography:6 TeXLIB = .:annex:../../LaTeXmacros:${Macros}:${Build}:../../bibliography: 7 7 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && latex -halt-on-error -output-directory=${Build} 8 8 BibTeX = BIBINPUTS=${TeXLIB} && export BIBINPUTS && bibtex -
doc/papers/concurrency/Paper.tex
ra9b1b0c rb5563e1 1 % inline code ©...© (copyright symbol) emacs: C-q M-)2 % red highlighting ®...® (registered trademark symbol) emacs: C-q M-.3 % blue highlighting ß...ß (sharp s symbol) emacs: C-q M-_4 % green highlighting ¢...¢ (cent symbol) emacs: C-q M-"5 % LaTex escape §...§ (section symbol) emacs: C-q M-'6 % keyword escape ¶...¶ (pilcrow symbol) emacs: C-q M-^7 % math escape $...$ (dollar symbol)8 9 1 \documentclass[AMA,STIX1COL]{WileyNJD-v2} 10 2 … … 20 12 21 13 % Latex packages used in the document. 22 23 \usepackage[T1]{fontenc} % allow Latin1 (extended ASCII) characters24 \usepackage{textcomp}25 \usepackage[latin1]{inputenc}26 27 14 \usepackage{epic,eepic} 28 15 \usepackage{xspace} … … 34 21 \usepackage{siunitx} 35 22 \sisetup{ binary-units=true } 36 \input{style} % bespoke macros used in the document23 %\input{style} % bespoke macros used in the document 37 24 38 25 \hypersetup{breaklinks=true} … … 51 38 % Names used in the document. 52 39 53 \newcommand{\CS}{C\raisebox{-0.9ex}{\large$^\sharp$}\xspace} 40 \newcommand{\CFAIcon}{\textsf{C}\raisebox{\depth}{\rotatebox{180}{\textsf{A}}}\xspace} % Cforall symbolic name 41 \newcommand{\CFA}{\protect\CFAIcon} % safe for section/caption 42 \newcommand{\CFL}{\textrm{Cforall}\xspace} % Cforall symbolic name 43 \newcommand{\Celeven}{\textrm{C11}\xspace} % C11 symbolic name 44 \newcommand{\CC}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}\xspace} % C++ symbolic name 45 \newcommand{\CCeleven}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}11\xspace} % C++11 symbolic name 46 \newcommand{\CCfourteen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}14\xspace} % C++14 symbolic name 47 \newcommand{\CCseventeen}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}17\xspace} % C++17 symbolic name 48 \newcommand{\CCtwenty}{\textrm{C}\kern-.1em\hbox{+\kern-.25em+}20\xspace} % C++20 symbolic name 49 \newcommand{\Csharp}{C\raisebox{-0.7ex}{\Large$^\sharp$}\xspace} % C# symbolic name 50 51 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 54 52 55 53 \newcommand{\Textbf}[2][red]{{\color{#1}{\textbf{#2}}}} … … 62 60 \newcommand{\TODO}{{\Textbf{TODO}}} 63 61 64 65 \newsavebox{\LstBox}66 67 62 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 63 64 % Default underscore is too low and wide. Cannot use lstlisting "literate" as replacing underscore 65 % removes it as a variable-name character so keywords in variables are highlighted. MUST APPEAR 66 % AFTER HYPERREF. 67 %\DeclareTextCommandDefault{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.1ex}}} 68 \renewcommand{\textunderscore}{\leavevmode\makebox[1.2ex][c]{\rule{1ex}{0.075ex}}} 69 70 \makeatletter 71 % parindent is relative, i.e., toggled on/off in environments like itemize, so store the value for 72 % use rather than use \parident directly. 73 \newlength{\parindentlnth} 74 \setlength{\parindentlnth}{\parindent} 75 76 \newcommand{\LstBasicStyle}[1]{{\lst@basicstyle{\lst@basicstyle{#1}}}} 77 \newcommand{\LstKeywordStyle}[1]{{\lst@basicstyle{\lst@keywordstyle{#1}}}} 78 \newcommand{\LstCommentStyle}[1]{{\lst@basicstyle{\lst@commentstyle{#1}}}} 79 80 \newlength{\gcolumnposn} % temporary hack because lstlisting does not handle tabs correctly 81 \newlength{\columnposn} 82 \setlength{\gcolumnposn}{3.5in} 83 \setlength{\columnposn}{\gcolumnposn} 84 \newcommand{\C}[2][\@empty]{\ifx#1\@empty\else\global\setlength{\columnposn}{#1}\global\columnposn=\columnposn\fi\hfill\makebox[\textwidth-\columnposn][l]{\lst@basicstyle{\LstCommentStyle{#2}}}} 85 \newcommand{\CRT}{\global\columnposn=\gcolumnposn} 86 87 % Denote newterms in particular font and index them without particular font and in lowercase, e.g., \newterm{abc}. 88 % The option parameter provides an index term different from the new term, e.g., \newterm[\texttt{abc}]{abc} 89 % The star version does not lowercase the index information, e.g., \newterm*{IBM}. 90 \newcommand{\newtermFontInline}{\emph} 91 \newcommand{\newterm}{\@ifstar\@snewterm\@newterm} 92 \newcommand{\@newterm}[2][\@empty]{\lowercase{\def\temp{#2}}{\newtermFontInline{#2}}\ifx#1\@empty\index{\temp}\else\index{#1@{\protect#2}}\fi} 93 \newcommand{\@snewterm}[2][\@empty]{{\newtermFontInline{#2}}\ifx#1\@empty\index{#2}\else\index{#1@{\protect#2}}\fi} 94 95 % Latin abbreviation 96 \newcommand{\abbrevFont}{\textit} % set empty for no italics 97 \newcommand{\EG}{\abbrevFont{e}.\abbrevFont{g}.} 98 \newcommand*{\eg}{% 99 \@ifnextchar{,}{\EG}% 100 {\@ifnextchar{:}{\EG}% 101 {\EG,\xspace}}% 102 }% 103 \newcommand{\IE}{\abbrevFont{i}.\abbrevFont{e}.} 104 \newcommand*{\ie}{% 105 \@ifnextchar{,}{\IE}% 106 {\@ifnextchar{:}{\IE}% 107 {\IE,\xspace}}% 108 }% 109 \newcommand{\ETC}{\abbrevFont{etc}} 110 \newcommand*{\etc}{% 111 \@ifnextchar{.}{\ETC}% 112 {\ETC.\xspace}% 113 }% 114 \newcommand{\ETAL}{\abbrevFont{et}~\abbrevFont{al}} 115 \renewcommand*{\etal}{% 116 \@ifnextchar{.}{\protect\ETAL}% 117 {\protect\ETAL.\xspace}% 118 }% 119 \newcommand{\VIZ}{\abbrevFont{viz}} 120 \newcommand*{\viz}{% 121 \@ifnextchar{.}{\VIZ}% 122 {\VIZ.\xspace}% 123 }% 124 \makeatother 125 126 \newenvironment{cquote}{% 127 \list{}{\lstset{resetmargins=true,aboveskip=0pt,belowskip=0pt}\topsep=3pt\parsep=0pt\leftmargin=\parindentlnth\rightmargin\leftmargin}% 128 \item\relax 129 }{% 130 \endlist 131 }% cquote 132 133 % CFA programming language, based on ANSI C (with some gcc additions) 134 \lstdefinelanguage{CFA}[ANSI]{C}{ 135 morekeywords={ 136 _Alignas, _Alignof, __alignof, __alignof__, asm, __asm, __asm__, _At, __attribute, 137 __attribute__, auto, _Bool, catch, catchResume, choose, _Complex, __complex, __complex__, 138 __const, __const__, disable, dtype, enable, exception, __extension__, fallthrough, fallthru, 139 finally, forall, ftype, _Generic, _Imaginary, inline, __label__, lvalue, _Noreturn, one_t, 140 otype, restrict, _Static_assert, throw, throwResume, trait, try, ttype, typeof, __typeof, 141 __typeof__, virtual, with, zero_t}, 142 morekeywords=[2]{ 143 _Atomic, coroutine, is_coroutine, is_monitor, is_thread, monitor, mutex, nomutex, or, 144 resume, suspend, thread, _Thread_local, waitfor, when, yield}, 145 moredirectives={defined,include_next}% 146 } 147 148 \lstset{ 149 language=CFA, 150 columns=fullflexible, 151 basicstyle=\linespread{0.9}\sf, % reduce line spacing and use sanserif font 152 stringstyle=\tt, % use typewriter font 153 tabsize=5, % N space tabbing 154 xleftmargin=\parindentlnth, % indent code to paragraph indentation 155 %mathescape=true, % LaTeX math escape in CFA code $...$ 156 escapechar=\$, % LaTeX escape in CFA code 157 keepspaces=true, % 158 showstringspaces=false, % do not show spaces with cup 159 showlines=true, % show blank lines at end of code 160 aboveskip=4pt, % spacing above/below code block 161 belowskip=3pt, 162 % replace/adjust listing characters that look bad in sanserif 163 literate={-}{\makebox[1ex][c]{\raisebox{0.4ex}{\rule{0.8ex}{0.1ex}}}}1 {^}{\raisebox{0.6ex}{$\scriptstyle\land\,$}}1 164 {~}{\raisebox{0.3ex}{$\scriptstyle\sim\,$}}1 % {`}{\ttfamily\upshape\hspace*{-0.1ex}`}1 165 {<-}{$\leftarrow$}2 {=>}{$\Rightarrow$}2 {->}{\makebox[1ex][c]{\raisebox{0.5ex}{\rule{0.8ex}{0.075ex}}}\kern-0.2ex{\textgreater}}2, 166 moredelim=**[is][\color{red}]{`}{`}, 167 }% lstset 168 169 % uC++ programming language, based on ANSI C++ 170 \lstdefinelanguage{uC++}[ANSI]{C++}{ 171 morekeywords={ 172 _Accept, _AcceptReturn, _AcceptWait, _Actor, _At, _CatchResume, _Cormonitor, _Coroutine, _Disable, 173 _Else, _Enable, _Event, _Finally, _Monitor, _Mutex, _Nomutex, _PeriodicTask, _RealTimeTask, 174 _Resume, _Select, _SporadicTask, _Task, _Timeout, _When, _With, _Throw}, 175 } 176 \lstdefinelanguage{Golang}{ 177 morekeywords=[1]{package,import,func,type,struct,return,defer,panic,recover,select,var,const,iota,}, 178 morekeywords=[2]{string,uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64, 179 bool,float32,float64,complex64,complex128,byte,rune,uintptr, error,interface}, 180 morekeywords=[3]{map,slice,make,new,nil,len,cap,copy,close,true,false,delete,append,real,imag,complex,chan,}, 181 morekeywords=[4]{for,break,continue,range,goto,switch,case,fallthrough,if,else,default,}, 182 morekeywords=[5]{Println,Printf,Error,}, 183 sensitive=true, 184 morecomment=[l]{//}, 185 morecomment=[s]{/*}{*/}, 186 morestring=[b]', 187 morestring=[b]", 188 morestring=[s]{`}{`}, 189 } 190 191 \lstnewenvironment{cfa}[1][] 192 {\lstset{#1}} 193 {} 194 \lstnewenvironment{C++}[1][] % use C++ style 195 {\lstset{language=C++,moredelim=**[is][\protect\color{red}]{`}{`},#1}\lstset{#1}} 196 {} 197 \lstnewenvironment{uC++}[1][] 198 {\lstset{#1}} 199 {} 200 \lstnewenvironment{Go}[1][] 201 {\lstset{#1}} 202 {} 203 204 % inline code @...@ 205 \lstMakeShortInline@% 206 68 207 69 208 \title{\texorpdfstring{Concurrency in \protect\CFA}{Concurrency in Cforall}} … … 81 220 \abstract[Summary]{ 82 221 \CFA is a modern, polymorphic, \emph{non-object-oriented} extension of the C programming language. 83 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system that supports them.222 This paper discusses the design of the concurrency and parallelism features in \CFA, and the concurrent runtime-system. 84 223 These features are created from scratch as ISO C lacks concurrency, relying largely on pthreads. 85 224 Coroutines and lightweight (user) threads are introduced into the language. … … 87 226 A unique contribution is allowing multiple monitors to be safely acquired simultaneously. 88 227 All features respect the expectations of C programmers, while being fully integrate with the \CFA polymorphic type-system and other language features. 89 Finally, experimental results are presented to validate several of the new features withother concurrent programming-languages.228 Finally, experimental results are presented to compare the performance of the new features with similar mechanisms in other concurrent programming-languages. 90 229 }% 91 230 … … 104 243 % ====================================================================== 105 244 106 This paper provides a minimal concurrency \textbf{api} that is simple, efficient and can be reused to build higher-level features. 107 The simplest possible concurrency system is a thread and a lock but this low-level approach is hard to master. 108 An easier approach for users is to support higher-level constructs as the basis of concurrency. 109 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{HPP:Study}. 110 Examples are task based, message passing and implicit threading. 111 The high-level approach and its minimal \textbf{api} are tested in a dialect of C, called \CFA. 112 Furthermore, the proposed \textbf{api} doubles as an early definition of the \CFA language and library. 113 This paper also provides an implementation of the concurrency library for \CFA as well as all the required language features added to the source-to-source translator. 114 115 There are actually two problems that need to be solved in the design of concurrency for a programming language: which concurrency and which parallelism tools are available to the programmer. 245 This paper provides a minimal concurrency \newterm{API} that is simple, efficient and can be used to build other concurrency features. 246 While the simplest concurrency system is a thread and a lock, this low-level approach is hard to master. 247 An easier approach for programmers is to support higher-level constructs as the basis of concurrency. 248 Indeed, for highly productive concurrent programming, high-level approaches are much more popular~\cite{Hochstein05}. 249 Examples of high-level approaches are task based~\cite{TBB}, message passing~\cite{Erlang,MPI}, and implicit threading~\cite{OpenMP}. 250 251 The terminology used in this paper is as follows. 252 A \newterm{thread} is a fundamental unit of execution that runs a sequence of code and requires a stack to maintain state. 253 Multiple simultaneous threads gives rise to \newterm{concurrency}, which requires locking to ensure safe access to shared data. 254 % Correspondingly, concurrency is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, etc.) concurrent threads are introduced. 255 \newterm{Locking}, and by extension locks, are defined as a mechanism to prevent progress of threads to provide safety. 256 \newterm{Parallelism} is running multiple threads simultaneously. 257 Parallelism implies \emph{actual} simultaneous execution, where concurrency only requires \emph{apparent} simultaneous execution. 258 As such, parallelism is only observable in differences in performance, which is observed through differences in timing. 259 260 Hence, there are two problems to be solved in the design of concurrency for a programming language: concurrency and parallelism. 116 261 While these two concepts are often combined, they are in fact distinct, requiring different tools~\cite{Buhr05a}. 117 Concurrency tools need to handle mutual exclusion and synchronization, while parallelism tools are about performance, cost and resource utilization. 118 119 In the context of this paper, a \textbf{thread} is a fundamental unit of execution that runs a sequence of code, generally on a program stack. 120 Having multiple simultaneous threads gives rise to concurrency and generally requires some kind of locking mechanism to ensure proper execution. 121 Correspondingly, \textbf{concurrency} is defined as the concepts and challenges that occur when multiple independent (sharing memory, timing dependencies, etc.) concurrent threads are introduced. 122 Accordingly, \textbf{locking} (and by extension locks) are defined as a mechanism that prevents the progress of certain threads in order to avoid problems due to concurrency. 123 Finally, in this paper \textbf{parallelism} is distinct from concurrency and is defined as running multiple threads simultaneously. 124 More precisely, parallelism implies \emph{actual} simultaneous execution as opposed to concurrency which only requires \emph{apparent} simultaneous execution. 125 As such, parallelism is only observable in the differences in performance or, more generally, differences in timing. 262 Concurrency tools handle mutual exclusion and synchronization, while parallelism tools handle performance, cost and resource utilization. 263 264 The proposed concurrency API is implemented in a dialect of C, called \CFA. 265 The paper discusses how the language features are added to the \CFA translator with respect to parsing, semantic, and type checking, and the corresponding high-perforamnce runtime-library to implement the concurrency features. 126 266 127 267 % ====================================================================== … … 139 279 Interestingly, while \CFA is not an object-oriented language, lacking the concept of a receiver (e.g., {\tt this}), it does have some notion of objects\footnote{C defines the term objects as : ``region of data storage in the execution environment, the contents of which can represent 140 280 values''~\cite[3.15]{C11}}, most importantly construction and destruction of objects. 141 Most of the following code examples can be found on the \CFA website~\cite{ www-cfa}.142 143 % ====================================================================== 281 Most of the following code examples can be found on the \CFA website~\cite{Cforall}. 282 283 144 284 \subsection{References} 145 285 146 286 Like \CC, \CFA introduces rebind-able references providing multiple dereferencing as an alternative to pointers. 147 287 In regards to concurrency, the semantic difference between pointers and references are not particularly relevant, but since this document uses mostly references, here is a quick overview of the semantics: 148 \begin{cfa code}288 \begin{cfa} 149 289 int x, *p1 = &x, **p2 = &p1, ***p3 = &p2, 150 290 &r1 = x, &&r2 = r1, &&&r3 = r2; 151 ***p3 = 3; //change x152 r3 = 3; //change x, ***r3153 **p3 = ...; //change p1154 *p3 = ...; //change p2155 int y, z, & ar[3] = {x, y, z}; //initialize array of references156 typeof( ar[1]) p; //is int, referenced object type157 typeof(&ar[1]) q; //is int &, reference type158 sizeof( ar[1]) == sizeof(int); //is true, referenced object size159 sizeof(&ar[1]) == sizeof(int *); //is true, reference size160 \end{cfa code}291 ***p3 = 3; $\C{// change x}$ 292 r3 = 3; $\C{// change x, ***r3}$ 293 **p3 = ...; $\C{// change p1}$ 294 *p3 = ...; $\C{// change p2}$ 295 int y, z, & ar[3] = {x, y, z}; $\C{// initialize array of references}$ 296 typeof( ar[1]) p; $\C{// is int, referenced object type}$ 297 typeof(&ar[1]) q; $\C{// is int \&, reference type}$ 298 sizeof( ar[1]) == sizeof(int); $\C{// is true, referenced object size}$ 299 sizeof(&ar[1]) == sizeof(int *); $\C{// is true, reference size}$ 300 \end{cfa} 161 301 The important take away from this code example is that a reference offers a handle to an object, much like a pointer, but which is automatically dereferenced for convenience. 162 302 … … 167 307 As well, \CFA uses the return type as part of the selection criteria, as in Ada~\cite{Ada}. 168 308 For routines with multiple parameters and returns, the selection is complex. 169 \begin{cfa code}170 // selection based on type and number of parameters171 void f(void); //(1)172 void f(char); //(2)173 void f(int, double); //(3)174 f(); //select (1)175 f('a'); //select (2)176 f(3, 5.2); //select (3)177 178 // selection based on type and number of returns179 char f(int); //(1)180 double f(int); //(2)181 char c = f(3); //select (1)182 double d = f(4); //select (2)183 \end{cfa code}309 \begin{cfa} 310 // selection based on type and number of parameters 311 void f(void); $\C{// (1)}$ 312 void f(char); $\C{// (2)}$ 313 void f(int, double); $\C{// (3)}$ 314 f(); $\C{// select (1)}$ 315 f('a'); $\C{// select (2)}$ 316 f(3, 5.2); $\C{// select (3)}$ 317 318 // selection based on type and number of returns 319 char f(int); $\C{// (1)}$ 320 double f(int); $\C{// (2)}$ 321 char c = f(3); $\C{// select (1)}$ 322 double d = f(4); $\C{// select (2)}$ 323 \end{cfa} 184 324 This feature is particularly important for concurrency since the runtime system relies on creating different types to represent concurrency objects. 185 325 Therefore, overloading is necessary to prevent the need for long prefixes and other naming conventions that prevent name clashes. 186 As seen in section \ref{basics}, routine \code{main}is an example that benefits from overloading.326 As seen in section \ref{basics}, routine @main@ is an example that benefits from overloading. 187 327 188 328 % ====================================================================== … … 190 330 Overloading also extends to operators. 191 331 The syntax for denoting operator-overloading is to name a routine with the symbol of the operator and question marks where the arguments of the operation appear, e.g.: 192 \begin{cfa code}193 int ++? (int op); //unary prefix increment194 int ?++ (int op); //unary postfix increment195 int ?+? (int op1, int op2); //binary plus196 int ?<=?(int op1, int op2); //binary less than197 int ?=? (int & op1, int op2); //binary assignment198 int ?+=?(int & op1, int op2); //binary plus-assignment332 \begin{cfa} 333 int ++? (int op); $\C{// unary prefix increment}$ 334 int ?++ (int op); $\C{// unary postfix increment}$ 335 int ?+? (int op1, int op2); $\C{// binary plus}$ 336 int ?<=?(int op1, int op2); $\C{// binary less than}$ 337 int ?=? (int & op1, int op2); $\C{// binary assignment}$ 338 int ?+=?(int & op1, int op2); $\C{// binary plus-assignment}$ 199 339 200 340 struct S {int i, j;}; 201 S ?+?(S op1, S op2) { //add two structures341 S ?+?(S op1, S op2) { $\C{// add two structures}$ 202 342 return (S){op1.i + op2.i, op1.j + op2.j}; 203 343 } 204 344 S s1 = {1, 2}, s2 = {2, 3}, s3; 205 s3 = s1 + s2; //compute sum: s3 == {2, 5}206 \end{cfa code}345 s3 = s1 + s2; $\C{// compute sum: s3 == {2, 5}}$ 346 \end{cfa} 207 347 While concurrency does not use operator overloading directly, this feature is more important as an introduction for the syntax of constructors. 208 348 … … 211 351 Object lifetime is often a challenge in concurrency. \CFA uses the approach of giving concurrent meaning to object lifetime as a means of synchronization and/or mutual exclusion. 212 352 Since \CFA relies heavily on the lifetime of objects, constructors and destructors is a core feature required for concurrency and parallelism. \CFA uses the following syntax for constructors and destructors: 213 \begin{cfa code}353 \begin{cfa} 214 354 struct S { 215 355 size_t size; 216 356 int * ia; 217 357 }; 218 void ?{}(S & s, int asize) { //constructor operator219 s.size = asize; //initialize fields358 void ?{}(S & s, int asize) { $\C{// constructor operator}$ 359 s.size = asize; $\C{// initialize fields}$ 220 360 s.ia = calloc(size, sizeof(S)); 221 361 } 222 void ^?{}(S & s) { //destructor operator223 free(ia); //de-initialization fields362 void ^?{}(S & s) { $\C{// destructor operator}$ 363 free(ia); $\C{// de-initialization fields}$ 224 364 } 225 365 int main() { 226 S x = {10}, y = {100}; //implicit calls: ?{}(x, 10), ?{}(y, 100)227 ... //use x and y228 ^x{}; ^y{}; //explicit calls to de-initialize229 x{20}; y{200}; //explicit calls to reinitialize230 ... //reuse x and y231 } //implicit calls: ^?{}(y), ^?{}(x)232 \end{cfa code}366 S x = {10}, y = {100}; $\C{// implicit calls: ?\{\}(x, 10), ?\{\}(y, 100)}$ 367 ... $\C{// use x and y}$ 368 ^x{}; ^y{}; $\C{// explicit calls to de-initialize}$ 369 x{20}; y{200}; $\C{// explicit calls to reinitialize}$ 370 ... $\C{// reuse x and y}$ 371 } $\C{// implicit calls: \^?\{\}(y), \^?\{\}(x)}$ 372 \end{cfa} 233 373 The language guarantees that every object and all their fields are constructed. 234 374 Like \CC, construction of an object is automatically done on allocation and destruction of the object is done on deallocation. 235 375 Allocation and deallocation can occur on the stack or on the heap. 236 \begin{cfa code}376 \begin{cfa} 237 377 { 238 struct S s = {10}; //allocation, call constructor378 struct S s = {10}; $\C{// allocation, call constructor}$ 239 379 ... 240 } //deallocation, call destructor241 struct S * s = new(); //allocation, call constructor380 } $\C{// deallocation, call destructor}$ 381 struct S * s = new(); $\C{// allocation, call constructor}$ 242 382 ... 243 delete(s); //deallocation, call destructor244 \end{cfa code}245 Note that like \CC, \CFA introduces \code{new} and \code{delete}, which behave like \code{malloc} and \code{free} in addition to constructing and destructing objects, after calling \code{malloc} and before calling \code{free}, respectively.383 delete(s); $\C{// deallocation, call destructor}$ 384 \end{cfa} 385 Note that like \CC, \CFA introduces @new@ and @delete@, which behave like @malloc@ and @free@ in addition to constructing and destructing objects, after calling @malloc@ and before calling @free@, respectively. 246 386 247 387 % ====================================================================== … … 249 389 \label{s:ParametricPolymorphism} 250 390 Routines in \CFA can also be reused for multiple types. 251 This capability is done using the \code{forall}clauses, which allow separately compiled routines to support generic usage over multiple types.391 This capability is done using the @forall@ clauses, which allow separately compiled routines to support generic usage over multiple types. 252 392 For example, the following sum function works for any type that supports construction from 0 and addition: 253 \begin{cfa code}254 // constraint type, 0 and +393 \begin{cfa} 394 // constraint type, 0 and + 255 395 forall(otype T | { void ?{}(T *, zero_t); T ?+?(T, T); }) 256 396 T sum(T a[ ], size_t size) { 257 T total = 0; //construct T from 0397 T total = 0; $\C{// construct T from 0}$ 258 398 for(size_t i = 0; i < size; i++) 259 total = total + a[i]; //select appropriate +399 total = total + a[i]; $\C{// select appropriate +}$ 260 400 return total; 261 401 } 262 402 263 403 S sa[5]; 264 int i = sum(sa, 5); //use S's 0 construction and +265 \end{cfa code}404 int i = sum(sa, 5); $\C{// use S's 0 construction and +}$ 405 \end{cfa} 266 406 267 407 Since writing constraints on types can become cumbersome for more constrained functions, \CFA also has the concept of traits. 268 408 Traits are named collection of constraints that can be used both instead and in addition to regular constraints: 269 \begin{cfa code}409 \begin{cfa} 270 410 trait summable( otype T ) { 271 void ?{}(T *, zero_t); //constructor from 0 literal272 T ?+?(T, T); //assortment of additions411 void ?{}(T *, zero_t); $\C{// constructor from 0 literal}$ 412 T ?+?(T, T); $\C{// assortment of additions}$ 273 413 T ?+=?(T *, T); 274 414 T ++?(T *); 275 415 T ?++(T *); 276 416 }; 277 forall( otype T | summable(T) ) //use trait417 forall( otype T | summable(T) ) $\C{// use trait}$ 278 418 T sum(T a[], size_t size); 279 \end{cfa code}280 281 Note that the type use for assertions can be either an \code{otype} or a \code{dtype}.282 Types declared as \code{otype}refer to ``complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator.283 Using \code{dtype,}on the other hand, has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable.419 \end{cfa} 420 421 Note that the type use for assertions can be either an @otype@ or a @dtype@. 422 Types declared as @otype@ refer to ``complete'' objects, i.e., objects with a size, a default constructor, a copy constructor, a destructor and an assignment operator. 423 Using @dtype@, on the other hand, has none of these assumptions but is extremely restrictive, it only guarantees the object is addressable. 284 424 285 425 % ====================================================================== 286 426 \subsection{with Clause/Statement} 287 427 Since \CFA lacks the concept of a receiver, certain functions end up needing to repeat variable names often. 288 To remove this inconvenience, \CFA provides the \code{with}statement, which opens an aggregate scope making its fields directly accessible (like Pascal).289 \begin{cfa code}428 To remove this inconvenience, \CFA provides the @with@ statement, which opens an aggregate scope making its fields directly accessible (like Pascal). 429 \begin{cfa} 290 430 struct S { int i, j; }; 291 int mem(S & this) with (this) //with clause292 i = 1; //this->i293 j = 2; //this->j431 int mem(S & this) with (this) $\C{// with clause}$ 432 i = 1; $\C{// this->i}$ 433 j = 2; $\C{// this->j}$ 294 434 } 295 435 int foo() { 296 436 struct S1 { ... } s1; 297 437 struct S2 { ... } s2; 298 with (s1) //with statement438 with (s1) $\C{// with statement}$ 299 439 { 300 // access fields of s1 without qualification301 with (s2) //nesting440 // access fields of s1 without qualification 441 with (s2) $\C{// nesting}$ 302 442 { 303 // access fields of s1 and s2 without qualification443 // access fields of s1 and s2 without qualification 304 444 } 305 445 } 306 with (s1, s2) //scopes open in parallel446 with (s1, s2) $\C{// scopes open in parallel}$ 307 447 { 308 // access fields of s1 and s2 without qualification309 } 310 } 311 \end{cfa code}312 313 For more information on \CFA see \cite{cforall-ug, rob-thesis,www-cfa}.448 // access fields of s1 and s2 without qualification 449 } 450 } 451 \end{cfa} 452 453 For more information on \CFA see \cite{cforall-ug,Schluntz17,www-cfa}. 314 454 315 455 % ====================================================================== … … 339 479 Optimal performance in concurrent applications is often obtained by having as much non-determinism as correctness allows. 340 480 481 341 482 \section{\protect\CFA's Thread Building Blocks} 483 342 484 One of the important features that are missing in C is threading\footnote{While the C11 standard defines a ``threads.h'' header, it is minimal and defined as optional. 343 485 As such, library support for threading is far from widespread. 344 At the time of writing the paper, neither \ texttt{gcc} nor \texttt{clang} support ``threads.h'' in their respectivestandard libraries.}.486 At the time of writing the paper, neither \protect\lstinline|gcc| nor \protect\lstinline|clang| support ``threads.h'' in their standard libraries.}. 345 487 On modern architectures, a lack of threading is unacceptable~\cite{Sutter05, Sutter05b}, and therefore modern programming languages must have the proper tools to allow users to write efficient concurrent programs to take advantage of parallelism. 346 488 As an extension of C, \CFA needs to express these concepts in a way that is as natural as possible to programmers familiar with imperative languages. 347 489 And being a system-level language means programmers expect to choose precisely which features they need and which cost they are willing to pay. 348 490 491 349 492 \section{Coroutines: A Stepping Stone}\label{coroutine} 493 350 494 While the main focus of this proposal is concurrency and parallelism, it is important to address coroutines, which are actually a significant building block of a concurrency system. \textbf{Coroutine}s are generalized routines which have predefined points where execution is suspended and can be resumed at a later time. 351 495 Therefore, they need to deal with context switches and other context-management operations. 352 496 This proposal includes coroutines both as an intermediate step for the implementation of threads, and a first-class feature of \CFA. 353 497 Furthermore, many design challenges of threads are at least partially present in designing coroutines, which makes the design effort that much more relevant. 354 The core \textbf{api} of coroutines revolves around two features: independent call-stacks and \code{suspend}/\code{resume}.355 356 \begin{ table}498 The core \textbf{api} of coroutines revolves around two features: independent call-stacks and @suspend@/@resume@. 499 500 \begin{figure} 357 501 \begin{center} 358 \begin{tabular}{c @{\hskip 0.025in}|@{\hskip 0.025in} c @{\hskip 0.025in}|@{\hskip 0.025in} c} 359 \begin{ccode}[tabsize=2] 360 //Using callbacks 361 void fibonacci_func( 362 int n, 363 void (*callback)(int) 502 \begin{tabular}{@{}lll@{}} 503 \multicolumn{1}{c}{\textbf{callback}} & \multicolumn{1}{c}{\textbf{output array}} & \multicolumn{1}{c}{\textbf{external state}} \\ 504 \begin{cfa} 505 void fib_func( 506 int n, void (* callback)( int ) 364 507 ) { 365 int first = 0; 366 int second = 1; 367 int next, i; 368 for(i = 0; i < n; i++) 369 { 370 if(i <= 1) 371 next = i; 372 else { 373 next = f1 + f2; 374 f1 = f2; 375 f2 = next; 376 } 377 callback(next); 378 } 379 } 380 508 int fn, f1 = 0, f2 = 1; 509 for ( int i = 0; i < n; i++ ) { 510 callback( f1 ); 511 fn = f1 + f2; 512 f1 = f2; f2 = fn; 513 } 514 } 381 515 int main() { 382 void print_fib(int n) { 383 printf("%d\n", n); 384 } 385 386 fibonacci_func( 387 10, print_fib 388 ); 389 390 391 392 } 393 \end{ccode}&\begin{ccode}[tabsize=2] 394 //Using output array 395 void fibonacci_array( 396 int n, 397 int* array 516 void print_fib( int n ) { 517 printf( "%d\n", n ); 518 } 519 fib_func( 10, print_fib ); 520 } 521 522 \end{cfa} 523 & 524 \begin{cfa} 525 void fib_array( 526 int n, int * array 398 527 ) { 399 int f1 = 0; int f2 = 1; 400 int next, i; 401 for(i = 0; i < n; i++) 402 { 403 if(i <= 1) 404 next = i; 405 else { 406 next = f1 + f2; 407 f1 = f2; 408 f2 = next; 409 } 410 array[i] = next; 411 } 412 } 413 414 528 int fn, f1 = 0, f2 = 1; 529 for ( int i = 0; i < n; i++ ) { 530 array[i] = f1; 531 fn = f1 + f2; 532 f1 = f2; f2 = fn; 533 } 534 } 415 535 int main() { 416 536 int a[10]; 417 418 fibonacci_func( 419 10, a 420 ); 421 422 for(int i=0;i<10;i++){ 423 printf("%d\n", a[i]); 424 } 425 426 } 427 \end{ccode}&\begin{ccode}[tabsize=2] 428 //Using external state 429 typedef struct { 430 int f1, f2; 431 } Iterator_t; 432 433 int fibonacci_state( 434 Iterator_t* it 537 fib_array( 10, a ); 538 for ( int i = 0; i < 10; i++ ) { 539 printf( "%d\n", a[i] ); 540 } 541 } 542 \end{cfa} 543 & 544 \begin{cfa} 545 546 typedef struct { int f1, f2; } Fib; 547 int fib_state( 548 Fib * fib 435 549 ) { 436 int f; 437 f = it->f1 + it->f2; 438 it->f2 = it->f1; 439 it->f1 = max(f,1); 440 return f; 441 } 442 443 444 445 446 447 448 550 int ret = fib->f1; 551 int fn = fib->f1 + fib->f2; 552 fib->f2 = fib->f1; fib->f1 = fn; 553 return ret; 554 } 449 555 int main() { 450 Iterator_t it={0,0}; 451 452 for(int i=0;i<10;i++){ 453 printf("%d\n", 454 fibonacci_state( 455 &it 456 ); 457 ); 458 } 459 460 } 461 \end{ccode} 556 Fib fib = { 0, 1 }; 557 558 for ( int i = 0; i < 10; i++ ) { 559 printf( "%d\n", fib_state( &fib ) ); 560 } 561 } 562 \end{cfa} 462 563 \end{tabular} 463 564 \end{center} 464 \caption{ Different implementations of a Fibonacci sequence generator in C.}465 \label{lst:fib onacci-c}466 \end{ table}565 \caption{Fibonacci Implementations in C} 566 \label{lst:fib-c} 567 \end{figure} 467 568 468 569 A good example of a problem made easier with coroutines is generators, e.g., generating the Fibonacci sequence. … … 474 575 Listing \ref{lst:fibonacci-cfa} is an example of a solution to the Fibonacci problem using \CFA coroutines, where the coroutine stack holds sufficient state for the next generation. 475 576 This solution has the advantage of having very strong decoupling between how the sequence is generated and how it is used. 476 Indeed, this version is as easy to use as the \code{fibonacci_state} solution, while the implementation is very similar to the \code{fibonacci_func}example.577 Indeed, this version is as easy to use as the @fibonacci_state@ solution, while the implementation is very similar to the @fibonacci_func@ example. 477 578 478 579 \begin{figure} 479 \begin{cfacode}[caption={Implementation of Fibonacci using coroutines},label={lst:fibonacci-cfa}] 480 coroutine Fibonacci { 481 int fn; //used for communication 580 \begin{cfa} 581 coroutine Fibonacci { int fn; }; $\C{// used for communication}$ 582 583 void ?{}( Fibonacci & fib ) with( fib ) { fn = 0; } $\C{// constructor}$ 584 585 void main( Fibonacci & fib ) with( fib ) { $\C{// main called on first resume}$ 586 int fn1, fn2; $\C{// retained between resumes}$ 587 fn = 0; fn1 = fn; $\C{// 1st case}$ 588 suspend(); $\C{// restart last resume}$ 589 fn = 1; fn2 = fn1; fn1 = fn; $\C{// 2nd case}$ 590 suspend(); $\C{// restart last resume}$ 591 for ( ;; ) { 592 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; $\C{// general case}$ 593 suspend(); $\C{// restart last resume}$ 594 } 595 } 596 int next( Fibonacci & fib ) with( fib ) { 597 resume( fib ); $\C{// restart last suspend}$ 598 return fn; 599 } 600 int main() { 601 Fibonacci f1, f2; 602 for ( int i = 1; i <= 10; i++ ) { 603 sout | next( f1 ) | next( f2 ) | endl; 604 } 605 } 606 \end{cfa} 607 \caption{Coroutine Fibonacci } 608 \label{lst:fibonacci-cfa} 609 \end{figure} 610 611 Listing \ref{lst:fmt-line} shows the @Format@ coroutine for restructuring text into groups of character blocks of fixed size. 612 The example takes advantage of resuming coroutines in the constructor to simplify the code and highlights the idea that interesting control flow can occur in the constructor. 613 614 \begin{figure} 615 \begin{cfa}[tabsize=3,caption={Formatting text into lines of 5 blocks of 4 characters.},label={lst:fmt-line}] 616 // format characters into blocks of 4 and groups of 5 blocks per line 617 coroutine Format { 618 char ch; // used for communication 619 int g, b; // global because used in destructor 482 620 }; 483 621 484 void ?{}(Fibonacci& this) { //constructor485 this.fn = 0;486 }487 488 //main automatically called on first resume489 void main(Fibonacci& this) with (this) {490 int fn1, fn2; //retained between resumes491 fn = 0;492 fn1 = fn;493 suspend(this); //return to last resume494 495 fn = 1;496 fn2 = fn1;497 fn1 = fn;498 suspend(this); //return to last resume499 500 for ( ;; ) {501 fn = fn1 + fn2;502 fn2 = fn1;503 fn1 = fn;504 suspend(this); //return to last resume505 }506 }507 508 int next(Fibonacci& this) {509 resume(this); //transfer to last suspend510 return this.fn;511 }512 513 void main() { //regular program main514 Fibonacci f1, f2;515 for ( int i = 1; i <= 10; i += 1 ) {516 sout | next( f1 ) | next( f2 ) | endl;517 }518 }519 \end{cfacode}520 \end{figure}521 522 Listing \ref{lst:fmt-line} shows the \code{Format} coroutine for restructuring text into groups of character blocks of fixed size.523 The example takes advantage of resuming coroutines in the constructor to simplify the code and highlights the idea that interesting control flow can occur in the constructor.524 525 \begin{figure}526 \begin{cfacode}[tabsize=3,caption={Formatting text into lines of 5 blocks of 4 characters.},label={lst:fmt-line}]527 //format characters into blocks of 4 and groups of 5 blocks per line528 coroutine Format {529 char ch; //used for communication530 int g, b; //global because used in destructor531 };532 533 622 void ?{}(Format& fmt) { 534 resume( fmt ); // prime (start) coroutine623 resume( fmt ); // prime (start) coroutine 535 624 } 536 625 … … 541 630 542 631 void main(Format& fmt) with fmt { 543 for ( ;; ) { // for as many characters544 for(g = 0; g < 5; g++) { // groups of 5 blocks545 for(b = 0; b < 4; fb++) { // blocks of 4 characters632 for ( ;; ) { // for as many characters 633 for(g = 0; g < 5; g++) { // groups of 5 blocks 634 for(b = 0; b < 4; fb++) { // blocks of 4 characters 546 635 suspend(); 547 sout | ch; // print character636 sout | ch; // print character 548 637 } 549 sout | " "; // print block separator638 sout | " "; // print block separator 550 639 } 551 sout | endl; // print group separator640 sout | endl; // print group separator 552 641 } 553 642 } … … 561 650 Format fmt; 562 651 char ch; 563 Eof: for ( ;; ) { // read until end of file564 sin | ch; // read one character565 if(eof(sin)) break Eof; // eof ?566 prt(fmt, ch); // push character for formatting567 } 568 } 569 \end{cfa code}652 Eof: for ( ;; ) { // read until end of file 653 sin | ch; // read one character 654 if(eof(sin)) break Eof; // eof ? 655 prt(fmt, ch); // push character for formatting 656 } 657 } 658 \end{cfa} 570 659 \end{figure} 571 660 … … 582 671 For example, the following code, while looking benign, can run into undefined behaviour because of thunks: 583 672 584 \begin{cfa code}585 // async: Runs function asynchronously on another thread673 \begin{cfa} 674 // async: Runs function asynchronously on another thread 586 675 forall(otype T) 587 676 extern void async(void (*func)(T*), T* obj); … … 592 681 void bar() { 593 682 int a; 594 async(noop, &a); // start thread running noop with argument a595 } 596 \end{cfa code}683 async(noop, &a); // start thread running noop with argument a 684 } 685 \end{cfa} 597 686 598 687 The generated C code\footnote{Code trimmed down for brevity} creates a local thunk to hold type information: 599 688 600 \begin{c code}689 \begin{cfa} 601 690 extern void async(/* omitted */, void (*func)(void*), void* obj); 602 691 … … 612 701 async(/* omitted */, ((void (*)(void*))(&_thunk0)), (&a)); 613 702 } 614 \end{c code}615 The problem in this example is a storage management issue, the function pointer \code{_thunk0}is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; i.e., the stack-based thunk being destroyed before it can be used.703 \end{cfa} 704 The problem in this example is a storage management issue, the function pointer @_thunk0@ is only valid until the end of the block, which limits the viable solutions because storing the function pointer for too long causes undefined behaviour; i.e., the stack-based thunk being destroyed before it can be used. 616 705 This challenge is an extension of challenges that come with second-class routines. 617 706 Indeed, GCC nested routines also have the limitation that nested routine cannot be passed outside of the declaration scope. … … 621 710 One solution to this challenge is to use composition/containment, where coroutine fields are added to manage the coroutine. 622 711 623 \begin{cfa code}712 \begin{cfa} 624 713 struct Fibonacci { 625 int fn; // used for communication626 coroutine c; // composition714 int fn; // used for communication 715 coroutine c; // composition 627 716 }; 628 717 … … 633 722 void ?{}(Fibonacci& this) { 634 723 this.fn = 0; 635 // Call constructor to initialize coroutine724 // Call constructor to initialize coroutine 636 725 (this.c){myMain}; 637 726 } 638 \end{cfa code}727 \end{cfa} 639 728 The downside of this approach is that users need to correctly construct the coroutine handle before using it. 640 729 Like any other objects, the user must carefully choose construction order to prevent usage of objects not yet constructed. … … 645 734 The next alternative is to use language support to annotate coroutines as follows: 646 735 647 \begin{cfa code}736 \begin{cfa} 648 737 coroutine Fibonacci { 649 int fn; // used for communication738 int fn; // used for communication 650 739 }; 651 \end{cfa code}652 The \code{coroutine}keyword means the compiler can find and inject code where needed.740 \end{cfa} 741 The @coroutine@ keyword means the compiler can find and inject code where needed. 653 742 The downside of this approach is that it makes coroutine a special case in the language. 654 743 Users wanting to extend coroutines or build their own for various reasons can only do so in ways offered by the language. … … 661 750 For coroutines as for threads, many implementations are based on routine pointers or function objects~\cite{Butenhof97, C++14, MS:VisualC++, BoostCoroutines15}. 662 751 For example, Boost implements coroutines in terms of four functor object types: 663 \begin{cfa code}752 \begin{cfa} 664 753 asymmetric_coroutine<>::pull_type 665 754 asymmetric_coroutine<>::push_type 666 755 symmetric_coroutine<>::call_type 667 756 symmetric_coroutine<>::yield_type 668 \end{cfa code}669 Often, the canonical threading paradigm in languages is based on function pointers, \texttt{pthread}being one of the most well-known examples.757 \end{cfa} 758 Often, the canonical threading paradigm in languages is based on function pointers, @pthread@ being one of the most well-known examples. 670 759 The main problem of this approach is that the thread usage is limited to a generic handle that must otherwise be wrapped in a custom type. 671 760 Since the custom type is simple to write in \CFA and solves several issues, added support for routine/lambda based coroutines adds very little. 672 761 673 A variation of this would be to use a simple function pointer in the same way \texttt{pthread}does for threads:674 \begin{cfa code}762 A variation of this would be to use a simple function pointer in the same way @pthread@ does for threads: 763 \begin{cfa} 675 764 void foo( coroutine_t cid, void* arg ) { 676 765 int* value = (int*)arg; 677 // Coroutine body766 // Coroutine body 678 767 } 679 768 … … 683 772 coroutine_resume( &cid ); 684 773 } 685 \end{cfa code}774 \end{cfa} 686 775 This semantics is more common for thread interfaces but coroutines work equally well. 687 776 As discussed in section \ref{threads}, this approach is superseded by static approaches in terms of expressivity. … … 690 779 691 780 Finally, the underlying approach, which is the one closest to \CFA idioms, is to use trait-based lazy coroutines. 692 This approach defines a coroutine as anything that satisfies the trait \code{is_coroutine}(as defined below) and is used as a coroutine.693 694 \begin{cfa code}781 This approach defines a coroutine as anything that satisfies the trait @is_coroutine@ (as defined below) and is used as a coroutine. 782 783 \begin{cfa} 695 784 trait is_coroutine(dtype T) { 696 785 void main(T& this); … … 700 789 forall( dtype T | is_coroutine(T) ) void suspend(T&); 701 790 forall( dtype T | is_coroutine(T) ) void resume (T&); 702 \end{cfa code}703 This ensures that an object is not a coroutine until \code{resume}is called on the object.704 Correspondingly, any object that is passed to \code{resume} is a coroutine since it must satisfy the \code{is_coroutine}trait to compile.705 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the \code{get_coroutine}routine.706 The \CFA keyword \code{coroutine}simply has the effect of implementing the getter and forward declarations required for users to implement the main routine.791 \end{cfa} 792 This ensures that an object is not a coroutine until @resume@ is called on the object. 793 Correspondingly, any object that is passed to @resume@ is a coroutine since it must satisfy the @is_coroutine@ trait to compile. 794 The advantage of this approach is that users can easily create different types of coroutines, for example, changing the memory layout of a coroutine is trivial when implementing the @get_coroutine@ routine. 795 The \CFA keyword @coroutine@ simply has the effect of implementing the getter and forward declarations required for users to implement the main routine. 707 796 708 797 \begin{center} 709 798 \begin{tabular}{c c c} 710 \begin{cfa code}[tabsize=3]799 \begin{cfa}[tabsize=3] 711 800 coroutine MyCoroutine { 712 801 int someValue; 713 802 }; 714 \end{cfa code} & == & \begin{cfacode}[tabsize=3]803 \end{cfa} & == & \begin{cfa}[tabsize=3] 715 804 struct MyCoroutine { 716 805 int someValue; … … 726 815 727 816 void main(struct MyCoroutine* this); 728 \end{cfa code}817 \end{cfa} 729 818 \end{tabular} 730 819 \end{center} … … 736 825 Both user and kernel threads are supported, where user threads are the concurrency mechanism and kernel threads are the parallel mechanism. 737 826 User threads offer a flexible and lightweight interface. 738 A thread can be declared using a struct declaration \code{thread}as follows:739 740 \begin{cfa code}827 A thread can be declared using a struct declaration @thread@ as follows: 828 829 \begin{cfa} 741 830 thread foo {}; 742 \end{cfa code}831 \end{cfa} 743 832 744 833 As for coroutines, the keyword is a thin wrapper around a \CFA trait: 745 834 746 \begin{cfa code}835 \begin{cfa} 747 836 trait is_thread(dtype T) { 748 837 void ^?{}(T & mutex this); … … 750 839 thread_desc* get_thread(T & this); 751 840 }; 752 \end{cfa code}841 \end{cfa} 753 842 754 843 Obviously, for this thread implementation to be useful it must run some user code. 755 844 Several other threading interfaces use a function-pointer representation as the interface of threads (for example \Csharp~\cite{Csharp} and Scala~\cite{Scala}). 756 However, this proposal considers that statically tying a \code{main}routine to a thread supersedes this approach.757 Since the \code{main}routine is already a special routine in \CFA (where the program begins), it is a natural extension of the semantics to use overloading to declare mains for different threads (the normal main being the main of the initial thread).758 As such the \code{main}routine of a thread can be defined as759 \begin{cfa code}845 However, this proposal considers that statically tying a @main@ routine to a thread supersedes this approach. 846 Since the @main@ routine is already a special routine in \CFA (where the program begins), it is a natural extension of the semantics to use overloading to declare mains for different threads (the normal main being the main of the initial thread). 847 As such the @main@ routine of a thread can be defined as 848 \begin{cfa} 760 849 thread foo {}; 761 850 … … 763 852 sout | "Hello World!" | endl; 764 853 } 765 \end{cfa code}766 767 In this example, threads of type \code{foo} start execution in the \code{void main(foo &)} routine, which prints \code{"Hello World!".}While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity.854 \end{cfa} 855 856 In this example, threads of type @foo@ start execution in the @void main(foo &)@ routine, which prints @"Hello World!".@ While this paper encourages this approach to enforce strongly typed programming, users may prefer to use the routine-based thread semantics for the sake of simplicity. 768 857 With the static semantics it is trivial to write a thread type that takes a function pointer as a parameter and executes it on its stack asynchronously. 769 \begin{cfa code}858 \begin{cfa} 770 859 typedef void (*voidFunc)(int); 771 860 … … 781 870 782 871 void main(FuncRunner & this) { 783 // thread starts here and runs the function872 // thread starts here and runs the function 784 873 this.func( this.arg ); 785 874 } … … 793 882 return 0? 794 883 } 795 \end{cfa code}884 \end{cfa} 796 885 797 886 A consequence of the strongly typed approach to main is that memory layout of parameters and return values to/from a thread are now explicitly specified in the \textbf{api}. 798 887 799 888 Of course, for threads to be useful, it must be possible to start and stop threads and wait for them to complete execution. 800 While using an \textbf{api} such as \code{fork} and \code{join}is relatively common in the literature, such an interface is unnecessary.801 Indeed, the simplest approach is to use \textbf{raii} principles and have threads \code{fork} after the constructor has completed and \code{join}before the destructor runs.802 \begin{cfa code}889 While using an \textbf{api} such as @fork@ and @join@ is relatively common in the literature, such an interface is unnecessary. 890 Indeed, the simplest approach is to use \textbf{raii} principles and have threads @fork@ after the constructor has completed and @join@ before the destructor runs. 891 \begin{cfa} 803 892 thread World; 804 893 … … 809 898 void main() { 810 899 World w; 811 // Thread forks here812 813 // Printing "Hello " and "World!" are run concurrently900 // Thread forks here 901 902 // Printing "Hello " and "World!" are run concurrently 814 903 sout | "Hello " | endl; 815 904 816 // Implicit join at end of scope817 } 818 \end{cfa code}905 // Implicit join at end of scope 906 } 907 \end{cfa} 819 908 820 909 This semantic has several advantages over explicit semantics: a thread is always started and stopped exactly once, users cannot make any programming errors, and it naturally scales to multiple threads meaning basic synchronization is very simple. 821 910 822 \begin{cfa code}911 \begin{cfa} 823 912 thread MyThread { 824 913 //... 825 914 }; 826 915 827 // main916 // main 828 917 void main(MyThread& this) { 829 918 //... … … 832 921 void foo() { 833 922 MyThread thrds[10]; 834 // Start 10 threads at the beginning of the scope923 // Start 10 threads at the beginning of the scope 835 924 836 925 DoStuff(); 837 926 838 // Wait for the 10 threads to finish839 } 840 \end{cfa code}927 // Wait for the 10 threads to finish 928 } 929 \end{cfa} 841 930 842 931 However, one of the drawbacks of this approach is that threads always form a tree where nodes must always outlive their children, i.e., they are always destroyed in the opposite order of construction because of C scoping rules. 843 932 This restriction is relaxed by using dynamic allocation, so threads can outlive the scope in which they are created, much like dynamically allocating memory lets objects outlive the scope in which they are created. 844 933 845 \begin{cfa code}934 \begin{cfa} 846 935 thread MyThread { 847 936 //... … … 855 944 MyThread* long_lived; 856 945 { 857 // Start a thread at the beginning of the scope946 // Start a thread at the beginning of the scope 858 947 MyThread short_lived; 859 948 860 // create another thread that will outlive the thread in this scope949 // create another thread that will outlive the thread in this scope 861 950 long_lived = new MyThread; 862 951 863 952 DoStuff(); 864 953 865 // Wait for the thread short_lived to finish954 // Wait for the thread short_lived to finish 866 955 } 867 956 DoMoreStuff(); 868 957 869 // Now wait for the long_lived to finish958 // Now wait for the long_lived to finish 870 959 delete long_lived; 871 960 } 872 \end{cfa code}961 \end{cfa} 873 962 874 963 … … 888 977 At the lowest level, concurrent paradigms are implemented as atomic operations and locks. 889 978 Many such mechanisms have been proposed, including semaphores~\cite{Dijkstra68b} and path expressions~\cite{Campbell74}. 890 However, for productivity reasons it is desirable to have a higher-level construct be the core concurrency paradigm~\cite{H PP:Study}.979 However, for productivity reasons it is desirable to have a higher-level construct be the core concurrency paradigm~\cite{Hochstein05}. 891 980 892 981 An approach that is worth mentioning because it is gaining in popularity is transactional memory~\cite{Herlihy93}. … … 909 998 Methods range from low-level locks, which are fast and flexible but require significant attention to be correct, to higher-level concurrency techniques, which sacrifice some performance in order to improve ease of use. 910 999 Ease of use comes by either guaranteeing some problems cannot occur (e.g., being deadlock free) or by offering a more explicit coupling between data and corresponding critical section. 911 For example, the \CC \code{std::atomic<T>}offers an easy way to express mutual-exclusion on a restricted set of operations (e.g., reading/writing large types atomically).1000 For example, the \CC @std::atomic<T>@ offers an easy way to express mutual-exclusion on a restricted set of operations (e.g., reading/writing large types atomically). 912 1001 Another challenge with low-level locks is composability. 913 1002 Locks have restricted composability because it takes careful organizing for multiple locks to be used while preventing deadlocks. … … 938 1027 This concept is generally associated with object-oriented languages like Java~\cite{Java} or \uC~\cite{uC++book} but does not strictly require OO semantics. 939 1028 The only requirement is the ability to declare a handle to a shared object and a set of routines that act on it: 940 \begin{cfa code}1029 \begin{cfa} 941 1030 typedef /*some monitor type*/ monitor; 942 1031 int f(monitor & m); 943 1032 944 1033 int main() { 945 monitor m; // Handle m946 f(m); // Routine using handle947 } 948 \end{cfa code}1034 monitor m; // Handle m 1035 f(m); // Routine using handle 1036 } 1037 \end{cfa} 949 1038 950 1039 % ====================================================================== … … 956 1045 First, it is necessary to use pass-by-reference over pass-by-value for monitor routines. 957 1046 This semantics is important, because at their core, monitors are implicit mutual-exclusion objects (locks), and these objects cannot be copied. 958 Therefore, monitors are non-copy-able objects ( \code{dtype}).1047 Therefore, monitors are non-copy-able objects (@dtype@). 959 1048 960 1049 Another aspect to consider is when a monitor acquires its mutual exclusion. 961 1050 For example, a monitor may need to be passed through multiple helper routines that do not acquire the monitor mutual-exclusion on entry. 962 Passthrough can occur for generic helper routines ( \code{swap}, \code{sort}, etc.) or specific helper routines like the following to implement an atomic counter:963 964 \begin{cfa code}1051 Passthrough can occur for generic helper routines (@swap@, @sort@, etc.) or specific helper routines like the following to implement an atomic counter: 1052 1053 \begin{cfa} 965 1054 monitor counter_t { /*...see section $\ref{data}$...*/ }; 966 1055 967 void ?{}(counter_t & nomutex this); // constructor968 size_t ++?(counter_t & mutex this); // increment969 970 // need for mutex is platform dependent971 void ?{}(size_t * this, counter_t & mutex cnt); // conversion972 \end{cfa code}1056 void ?{}(counter_t & nomutex this); // constructor 1057 size_t ++?(counter_t & mutex this); // increment 1058 1059 // need for mutex is platform dependent 1060 void ?{}(size_t * this, counter_t & mutex cnt); // conversion 1061 \end{cfa} 973 1062 This counter is used as follows: 974 1063 \begin{center} 975 1064 \begin{tabular}{c @{\hskip 0.35in} c @{\hskip 0.35in} c} 976 \begin{cfa code}977 // shared counter1065 \begin{cfa} 1066 // shared counter 978 1067 counter_t cnt1, cnt2; 979 1068 980 // multiple threads access counter1069 // multiple threads access counter 981 1070 thread 1 : cnt1++; cnt2++; 982 1071 thread 2 : cnt1++; cnt2++; … … 984 1073 ... 985 1074 thread N : cnt1++; cnt2++; 986 \end{cfa code}1075 \end{cfa} 987 1076 \end{tabular} 988 1077 \end{center} 989 Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to the \CC template \code{std::atomic}.990 991 Here, the constructor ( \code{?\{\}}) uses the \code{nomutex}keyword to signify that it does not acquire the monitor mutual-exclusion when constructing.992 This semantics is because an object not yet con \-structed should never be shared and therefore does not require mutual exclusion.1078 Notice how the counter is used without any explicit synchronization and yet supports thread-safe semantics for both reading and writing, which is similar in usage to the \CC template @std::atomic@. 1079 1080 Here, the constructor (@?{}@) uses the @nomutex@ keyword to signify that it does not acquire the monitor mutual-exclusion when constructing. 1081 This semantics is because an object not yet constructed should never be shared and therefore does not require mutual exclusion. 993 1082 Furthermore, it allows the implementation greater freedom when it initializes the monitor locking. 994 The prefix increment operator uses \code{mutex}to protect the incrementing process from race conditions.995 Finally, there is a conversion operator from \code{counter_t} to \code{size_t}.996 This conversion may or may not require the \code{mutex} keyword depending on whether or not reading a \code{size_t}is an atomic operation.1083 The prefix increment operator uses @mutex@ to protect the incrementing process from race conditions. 1084 Finally, there is a conversion operator from @counter_t@ to @size_t@. 1085 This conversion may or may not require the @mutex@ keyword depending on whether or not reading a @size_t@ is an atomic operation. 997 1086 998 1087 For maximum usability, monitors use \textbf{multi-acq} semantics, which means a single thread can acquire the same monitor multiple times without deadlock. 999 1088 For example, listing \ref{fig:search} uses recursion and \textbf{multi-acq} to print values inside a binary tree. 1000 1089 \begin{figure} 1001 \begin{cfa code}[caption={Recursive printing algorithm using \textbf{multi-acq}.},label={fig:search}]1090 \begin{cfa}[caption={Recursive printing algorithm using \textbf{multi-acq}.},label={fig:search}] 1002 1091 monitor printer { ... }; 1003 1092 struct tree { … … 1012 1101 print(p, t->right); 1013 1102 } 1014 \end{cfa code}1103 \end{cfa} 1015 1104 \end{figure} 1016 1105 1017 Having both \code{mutex} and \code{nomutex}keywords can be redundant, depending on the meaning of a routine having neither of these keywords.1018 For example, it is reasonable that it should default to the safest option ( \code{mutex}) when given a routine without qualifiers \code{void foo(counter_t & this)}, whereas assuming \code{nomutex}is unsafe and may cause subtle errors.1019 On the other hand, \code{nomutex}is the ``normal'' parameter behaviour, it effectively states explicitly that ``this routine is not special''.1106 Having both @mutex@ and @nomutex@ keywords can be redundant, depending on the meaning of a routine having neither of these keywords. 1107 For example, it is reasonable that it should default to the safest option (@mutex@) when given a routine without qualifiers @void foo(counter_t & this)@, whereas assuming @nomutex@ is unsafe and may cause subtle errors. 1108 On the other hand, @nomutex@ is the ``normal'' parameter behaviour, it effectively states explicitly that ``this routine is not special''. 1020 1109 Another alternative is making exactly one of these keywords mandatory, which provides the same semantics but without the ambiguity of supporting routines with neither keyword. 1021 1110 Mandatory keywords would also have the added benefit of being self-documented but at the cost of extra typing. … … 1023 1112 Mandatory keywords in \CFA would imply that the compiler must know without doubt whether or not a parameter is a monitor or not. 1024 1113 Since \CFA relies heavily on traits as an abstraction mechanism, the distinction between a type that is a monitor and a type that looks like a monitor can become blurred. 1025 For this reason, \CFA only has the \code{mutex} keyword and uses no keyword to mean \code{nomutex}.1026 1027 The next semantic decision is to establish when \code{mutex}may be used as a type qualifier.1114 For this reason, \CFA only has the @mutex@ keyword and uses no keyword to mean @nomutex@. 1115 1116 The next semantic decision is to establish when @mutex@ may be used as a type qualifier. 1028 1117 Consider the following declarations: 1029 \begin{cfa code}1118 \begin{cfa} 1030 1119 int f1(monitor & mutex m); 1031 1120 int f2(const monitor & mutex m); … … 1033 1122 int f4(monitor * mutex m []); 1034 1123 int f5(graph(monitor *) & mutex m); 1035 \end{cfa code}1124 \end{cfa} 1036 1125 The problem is to identify which object(s) should be acquired. 1037 1126 Furthermore, each object needs to be acquired only once. 1038 In the case of simple routines like \code{f1} and \code{f2}it is easy to identify an exhaustive list of objects to acquire on entry.1039 Adding indirections ( \code{f3}) still allows the compiler and programmer to identify which object is acquired.1040 However, adding in arrays ( \code{f4}) makes it much harder.1127 In the case of simple routines like @f1@ and @f2@ it is easy to identify an exhaustive list of objects to acquire on entry. 1128 Adding indirections (@f3@) still allows the compiler and programmer to identify which object is acquired. 1129 However, adding in arrays (@f4@) makes it much harder. 1041 1130 Array lengths are not necessarily known in C, and even then, making sure objects are only acquired once becomes none-trivial. 1042 This problem can be extended to absurd limits like \code{f5}, which uses a graph of monitors.1131 This problem can be extended to absurd limits like @f5@, which uses a graph of monitors. 1043 1132 To make the issue tractable, this project imposes the requirement that a routine may only acquire one monitor per parameter and it must be the type of the parameter with at most one level of indirection (ignoring potential qualifiers). 1044 Also note that while routine \code{f3} can be supported, meaning that monitor \code{**m}is acquired, passing an array to this routine would be type-safe and yet result in undefined behaviour because only the first element of the array is acquired.1133 Also note that while routine @f3@ can be supported, meaning that monitor @**m@ is acquired, passing an array to this routine would be type-safe and yet result in undefined behaviour because only the first element of the array is acquired. 1045 1134 However, this ambiguity is part of the C type-system with respects to arrays. 1046 For this reason, \code{mutex}is disallowed in the context where arrays may be passed:1047 \begin{cfa code}1048 int f1(monitor & mutex m); // Okay : recommended case1049 int f2(monitor * mutex m); // Not Okay : Could be an array1050 int f3(monitor mutex m []); // Not Okay : Array of unknown length1051 int f4(monitor ** mutex m); // Not Okay : Could be an array1052 int f5(monitor * mutex m []); // Not Okay : Array of unknown length1053 \end{cfa code}1135 For this reason, @mutex@ is disallowed in the context where arrays may be passed: 1136 \begin{cfa} 1137 int f1(monitor & mutex m); // Okay : recommended case 1138 int f2(monitor * mutex m); // Not Okay : Could be an array 1139 int f3(monitor mutex m []); // Not Okay : Array of unknown length 1140 int f4(monitor ** mutex m); // Not Okay : Could be an array 1141 int f5(monitor * mutex m []); // Not Okay : Array of unknown length 1142 \end{cfa} 1054 1143 Note that not all array functions are actually distinct in the type system. 1055 1144 However, even if the code generation could tell the difference, the extra information is still not sufficient to extend meaningfully the monitor call semantic. … … 1057 1146 Unlike object-oriented monitors, where calling a mutex member \emph{implicitly} acquires mutual-exclusion of the receiver object, \CFA uses an explicit mechanism to specify the object that acquires mutual-exclusion. 1058 1147 A consequence of this approach is that it extends naturally to multi-monitor calls. 1059 \begin{cfa code}1148 \begin{cfa} 1060 1149 int f(MonitorA & mutex a, MonitorB & mutex b); 1061 1150 … … 1063 1152 MonitorB b; 1064 1153 f(a,b); 1065 \end{cfa code}1154 \end{cfa} 1066 1155 While OO monitors could be extended with a mutex qualifier for multiple-monitor calls, no example of this feature could be found. 1067 1156 The capability to acquire multiple locks before entering a critical section is called \emph{\textbf{bulk-acq}}. … … 1071 1160 This consistent ordering means acquiring multiple monitors is safe from deadlock when using \textbf{bulk-acq}. 1072 1161 However, users can still force the acquiring order. 1073 For example, notice which routines use \code{mutex}/\code{nomutex}and how this affects acquiring order:1074 \begin{cfa code}1075 void foo(A& mutex a, B& mutex b) { // acquire a & b1162 For example, notice which routines use @mutex@/@nomutex@ and how this affects acquiring order: 1163 \begin{cfa} 1164 void foo(A& mutex a, B& mutex b) { // acquire a & b 1076 1165 ... 1077 1166 } 1078 1167 1079 void bar(A& mutex a, B& /*nomutex*/ b) { // acquire a1080 ... foo(a, b); ... // acquire b1081 } 1082 1083 void baz(A& /*nomutex*/ a, B& mutex b) { // acquire b1084 ... foo(a, b); ... // acquire a1085 } 1086 \end{cfa code}1087 The \textbf{multi-acq} monitor lock allows a monitor lock to be acquired by both \code{bar} or \code{baz} and acquired again in \code{foo}.1088 In the calls to \code{bar} and \code{baz}the monitors are acquired in opposite order.1168 void bar(A& mutex a, B& /*nomutex*/ b) { // acquire a 1169 ... foo(a, b); ... // acquire b 1170 } 1171 1172 void baz(A& /*nomutex*/ a, B& mutex b) { // acquire b 1173 ... foo(a, b); ... // acquire a 1174 } 1175 \end{cfa} 1176 The \textbf{multi-acq} monitor lock allows a monitor lock to be acquired by both @bar@ or @baz@ and acquired again in @foo@. 1177 In the calls to @bar@ and @baz@ the monitors are acquired in opposite order. 1089 1178 1090 1179 However, such use leads to lock acquiring order problems. 1091 In the example above, the user uses implicit ordering in the case of function \code{foo} but explicit ordering in the case of \code{bar} and \code{baz}.1180 In the example above, the user uses implicit ordering in the case of function @foo@ but explicit ordering in the case of @bar@ and @baz@. 1092 1181 This subtle difference means that calling these routines concurrently may lead to deadlock and is therefore undefined behaviour. 1093 1182 As shown~\cite{Lister77}, solving this problem requires: … … 1101 1190 1102 1191 For example, \textbf{multi-acq} and \textbf{bulk-acq} can be used together in interesting ways: 1103 \begin{cfa code}1192 \begin{cfa} 1104 1193 monitor bank { ... }; 1105 1194 … … 1110 1199 deposit( yourbank, me2you ); 1111 1200 } 1112 \end{cfa code}1201 \end{cfa} 1113 1202 This example shows a trivial solution to the bank-account transfer problem~\cite{BankTransfer}. 1114 1203 Without \textbf{multi-acq} and \textbf{bulk-acq}, the solution to this problem is much more involved and requires careful engineering. 1115 1204 1116 \subsection{\code{mutex} statement} \label{mutex-stmt} 1117 1118 The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the \code{mutex} statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}. 1119 Table \ref{lst:mutex-stmt} shows an example of the \code{mutex} statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. 1120 Beyond naming, the \code{mutex} statement has no semantic difference from a routine call with \code{mutex} parameters. 1205 1206 \subsection{\protect\lstinline|mutex| statement} \label{mutex-stmt} 1207 1208 The call semantics discussed above have one software engineering issue: only a routine can acquire the mutual-exclusion of a set of monitor. \CFA offers the @mutex@ statement to work around the need for unnecessary names, avoiding a major software engineering problem~\cite{2FTwoHardThings}. 1209 Table \ref{lst:mutex-stmt} shows an example of the @mutex@ statement, which introduces a new scope in which the mutual-exclusion of a set of monitor is acquired. 1210 Beyond naming, the @mutex@ statement has no semantic difference from a routine call with @mutex@ parameters. 1121 1211 1122 1212 \begin{table} 1123 1213 \begin{center} 1124 1214 \begin{tabular}{|c|c|} 1125 function call & \code{mutex}statement \\1215 function call & @mutex@ statement \\ 1126 1216 \hline 1127 \begin{cfa code}[tabsize=3]1217 \begin{cfa}[tabsize=3] 1128 1218 monitor M {}; 1129 1219 void foo( M & mutex m1, M & mutex m2 ) { 1130 // critical section1220 // critical section 1131 1221 } 1132 1222 … … 1134 1224 foo( m1, m2 ); 1135 1225 } 1136 \end{cfa code}&\begin{cfacode}[tabsize=3]1226 \end{cfa}&\begin{cfa}[tabsize=3] 1137 1227 monitor M {}; 1138 1228 void bar( M & m1, M & m2 ) { 1139 1229 mutex(m1, m2) { 1140 // critical section1141 } 1142 } 1143 1144 1145 \end{cfa code}1230 // critical section 1231 } 1232 } 1233 1234 1235 \end{cfa} 1146 1236 \end{tabular} 1147 1237 \end{center} 1148 \caption{Regular call semantics vs. \ code{mutex}statement}1238 \caption{Regular call semantics vs. \protect\lstinline|mutex| statement} 1149 1239 \label{lst:mutex-stmt} 1150 1240 \end{table} … … 1159 1249 This data should be intrinsic to the monitor declaration to prevent any accidental use of data without its appropriate protection. 1160 1250 For example, here is a complete version of the counter shown in section \ref{call}: 1161 \begin{cfa code}1251 \begin{cfa} 1162 1252 monitor counter_t { 1163 1253 int value; … … 1172 1262 } 1173 1263 1174 // need for mutex is platform dependent here1264 // need for mutex is platform dependent here 1175 1265 void ?{}(int * this, counter_t & mutex cnt) { 1176 1266 *this = (int)cnt; 1177 1267 } 1178 \end{cfa code}1179 1180 Like threads and coroutines, monitors are defined in terms of traits with some additional language support in the form of the \code{monitor}keyword.1268 \end{cfa} 1269 1270 Like threads and coroutines, monitors are defined in terms of traits with some additional language support in the form of the @monitor@ keyword. 1181 1271 The monitor trait is: 1182 \begin{cfa code}1272 \begin{cfa} 1183 1273 trait is_monitor(dtype T) { 1184 1274 monitor_desc * get_monitor( T & ); 1185 1275 void ^?{}( T & mutex ); 1186 1276 }; 1187 \end{cfa code}1188 Note that the destructor of a monitor must be a \code{mutex}routine to prevent deallocation while a thread is accessing the monitor.1189 As with any object, calls to a monitor, using \code{mutex}or otherwise, is undefined behaviour after the destructor has run.1277 \end{cfa} 1278 Note that the destructor of a monitor must be a @mutex@ routine to prevent deallocation while a thread is accessing the monitor. 1279 As with any object, calls to a monitor, using @mutex@ or otherwise, is undefined behaviour after the destructor has run. 1190 1280 1191 1281 % ====================================================================== … … 1202 1292 First, here is a simple example of internal scheduling: 1203 1293 1204 \begin{cfa code}1294 \begin{cfa} 1205 1295 monitor A { 1206 1296 condition e; … … 1209 1299 void foo(A& mutex a1, A& mutex a2) { 1210 1300 ... 1211 // Wait for cooperation from bar()1301 // Wait for cooperation from bar() 1212 1302 wait(a1.e); 1213 1303 ... … … 1215 1305 1216 1306 void bar(A& mutex a1, A& mutex a2) { 1217 // Provide cooperation for foo()1307 // Provide cooperation for foo() 1218 1308 ... 1219 // Unblock foo1309 // Unblock foo 1220 1310 signal(a1.e); 1221 1311 } 1222 \end{cfa code}1312 \end{cfa} 1223 1313 There are two details to note here. 1224 First, \code{signal}is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section.1314 First, @signal@ is a delayed operation; it only unblocks the waiting thread when it reaches the end of the critical section. 1225 1315 This semantics is needed to respect mutual-exclusion, i.e., the signaller and signalled thread cannot be in the monitor simultaneously. 1226 The alternative is to return immediately after the call to \code{signal}, which is significantly more restrictive.1227 Second, in \CFA, while it is common to store a \code{condition} as a field of the monitor, a \code{condition}variable can be stored/created independently of a monitor.1228 Here routine \code{foo} waits for the \code{signal} from \code{bar}before making further progress, ensuring a basic ordering.1229 1230 An important aspect of the implementation is that \CFA does not allow barging, which means that once function \code{bar} releases the monitor, \code{foo}is guaranteed to be the next thread to acquire the monitor (unless some other thread waited on the same condition).1316 The alternative is to return immediately after the call to @signal@, which is significantly more restrictive. 1317 Second, in \CFA, while it is common to store a @condition@ as a field of the monitor, a @condition@ variable can be stored/created independently of a monitor. 1318 Here routine @foo@ waits for the @signal@ from @bar@ before making further progress, ensuring a basic ordering. 1319 1320 An important aspect of the implementation is that \CFA does not allow barging, which means that once function @bar@ releases the monitor, @foo@ is guaranteed to be the next thread to acquire the monitor (unless some other thread waited on the same condition). 1231 1321 This guarantee offers the benefit of not having to loop around waits to recheck that a condition is met. 1232 1322 The main reason \CFA offers this guarantee is that users can easily introduce barging if it becomes a necessity but adding barging prevention or barging avoidance is more involved without language support. … … 1240 1330 It is easy to understand the problem of multi-monitor scheduling using a series of pseudo-code examples. 1241 1331 Note that for simplicity in the following snippets of pseudo-code, waiting and signalling is done using an implicit condition variable, like Java built-in monitors. 1242 Indeed, \code{wait}statements always use the implicit condition variable as parameters and explicitly name the monitors (A and B) associated with the condition.1332 Indeed, @wait@ statements always use the implicit condition variable as parameters and explicitly name the monitors (A and B) associated with the condition. 1243 1333 Note that in \CFA, condition variables are tied to a \emph{group} of monitors on first use (called branding), which means that using internal scheduling with distinct sets of monitors requires one condition variable per set of monitors. 1244 1334 The example below shows the simple case of having two threads (one for each column) and a single monitor A. … … 1246 1336 \begin{multicols}{2} 1247 1337 thread 1 1248 \begin{ pseudo}1338 \begin{cfa} 1249 1339 acquire A 1250 1340 wait A 1251 1341 release A 1252 \end{ pseudo}1342 \end{cfa} 1253 1343 1254 1344 \columnbreak 1255 1345 1256 1346 thread 2 1257 \begin{ pseudo}1347 \begin{cfa} 1258 1348 acquire A 1259 1349 signal A 1260 1350 release A 1261 \end{ pseudo}1351 \end{cfa} 1262 1352 \end{multicols} 1263 1353 One thread acquires before waiting (atomically blocking and releasing A) and the other acquires before signalling. 1264 It is important to note here that both \code{wait} and \code{signal}must be called with the proper monitor(s) already acquired.1354 It is important to note here that both @wait@ and @signal@ must be called with the proper monitor(s) already acquired. 1265 1355 This semantic is a logical requirement for barging prevention. 1266 1356 1267 1357 A direct extension of the previous example is a \textbf{bulk-acq} version: 1268 1358 \begin{multicols}{2} 1269 \begin{ pseudo}1359 \begin{cfa} 1270 1360 acquire A & B 1271 1361 wait A & B 1272 1362 release A & B 1273 \end{ pseudo}1363 \end{cfa} 1274 1364 \columnbreak 1275 \begin{ pseudo}1365 \begin{cfa} 1276 1366 acquire A & B 1277 1367 signal A & B 1278 1368 release A & B 1279 \end{ pseudo}1369 \end{cfa} 1280 1370 \end{multicols} 1281 1371 \noindent This version uses \textbf{bulk-acq} (denoted using the {\sf\&} symbol), but the presence of multiple monitors does not add a particularly new meaning. … … 1285 1375 1286 1376 While deadlock issues can occur when nesting monitors, these issues are only a symptom of the fact that locks, and by extension monitors, are not perfectly composable. 1287 For monitors, a well-known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a \code{wait}is made by a thread that holds more than one monitor.1288 For example, the following pseudo-code runs into the nested-monitor problem:1377 For monitors, a well-known deadlock problem is the Nested Monitor Problem~\cite{Lister77}, which occurs when a @wait@ is made by a thread that holds more than one monitor. 1378 For example, the following cfa-code runs into the nested-monitor problem: 1289 1379 \begin{multicols}{2} 1290 \begin{ pseudo}1380 \begin{cfa} 1291 1381 acquire A 1292 1382 acquire B … … 1294 1384 release B 1295 1385 release A 1296 \end{ pseudo}1386 \end{cfa} 1297 1387 1298 1388 \columnbreak 1299 1389 1300 \begin{ pseudo}1390 \begin{cfa} 1301 1391 acquire A 1302 1392 acquire B … … 1304 1394 release B 1305 1395 release A 1306 \end{ pseudo}1396 \end{cfa} 1307 1397 \end{multicols} 1308 \noindent The \code{wait} only releases monitor \code{B} so the signalling thread cannot acquire monitor \code{A} to get to the \code{signal}.1309 Attempting release of all acquired monitors at the \code{wait} introduces a different set of problems, such as releasing monitor \code{C}, which has nothing to do with the \code{signal}.1398 \noindent The @wait@ only releases monitor @B@ so the signalling thread cannot acquire monitor @A@ to get to the @signal@. 1399 Attempting release of all acquired monitors at the @wait@ introduces a different set of problems, such as releasing monitor @C@, which has nothing to do with the @signal@. 1310 1400 1311 1401 However, for monitors as for locks, it is possible to write a program using nesting without encountering any problems if nesting is done correctly. 1312 For example, the next pseudo-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}.1402 For example, the next cfa-code snippet acquires monitors {\sf A} then {\sf B} before waiting, while only acquiring {\sf B} when signalling, effectively avoiding the Nested Monitor Problem~\cite{Lister77}. 1313 1403 1314 1404 \begin{multicols}{2} 1315 \begin{ pseudo}1405 \begin{cfa} 1316 1406 acquire A 1317 1407 acquire B … … 1319 1409 release B 1320 1410 release A 1321 \end{ pseudo}1411 \end{cfa} 1322 1412 1323 1413 \columnbreak 1324 1414 1325 \begin{ pseudo}1415 \begin{cfa} 1326 1416 1327 1417 acquire B … … 1329 1419 release B 1330 1420 1331 \end{ pseudo}1421 \end{cfa} 1332 1422 \end{multicols} 1333 1423 … … 1341 1431 1342 1432 A larger example is presented to show complex issues for \textbf{bulk-acq} and its implementation options are analyzed. 1343 Listing \ref{lst:int-bulk- pseudo} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the pseudo-code in listing \ref{lst:int-bulk-pseudo}.1344 For the purpose of translating the given pseudo-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., \code{mutex} parameters, global variables, pointer parameters, or using locals with the \code{mutex}statement.1345 1346 \begin{figure} [!t]1433 Listing \ref{lst:int-bulk-cfa} shows an example where \textbf{bulk-acq} adds a significant layer of complexity to the internal signalling semantics, and listing \ref{lst:int-bulk-cfa} shows the corresponding \CFA code to implement the cfa-code in listing \ref{lst:int-bulk-cfa}. 1434 For the purpose of translating the given cfa-code into \CFA-code, any method of introducing a monitor is acceptable, e.g., @mutex@ parameters, global variables, pointer parameters, or using locals with the @mutex@ statement. 1435 1436 \begin{figure} 1347 1437 \begin{multicols}{2} 1348 1438 Waiting thread 1349 \begin{ pseudo}[numbers=left]1439 \begin{cfa}[numbers=left] 1350 1440 acquire A 1351 // Code Section 11441 // Code Section 1 1352 1442 acquire A & B 1353 // Code Section 21443 // Code Section 2 1354 1444 wait A & B 1355 // Code Section 31445 // Code Section 3 1356 1446 release A & B 1357 // Code Section 41447 // Code Section 4 1358 1448 release A 1359 \end{ pseudo}1449 \end{cfa} 1360 1450 \columnbreak 1361 1451 Signalling thread 1362 \begin{ pseudo}[numbers=left, firstnumber=10,escapechar=|]1452 \begin{cfa}[numbers=left, firstnumber=10,escapechar=|] 1363 1453 acquire A 1364 // Code Section 51454 // Code Section 5 1365 1455 acquire A & B 1366 // Code Section 61456 // Code Section 6 1367 1457 |\label{line:signal1}|signal A & B 1368 // Code Section 71458 // Code Section 7 1369 1459 |\label{line:releaseFirst}|release A & B 1370 // Code Section 81460 // Code Section 8 1371 1461 |\label{line:lastRelease}|release A 1372 \end{ pseudo}1462 \end{cfa} 1373 1463 \end{multicols} 1374 \begin{cfa code}[caption={Internal scheduling with \textbf{bulk-acq}},label={lst:int-bulk-pseudo}]1375 \end{cfa code}1464 \begin{cfa}[caption={Internal scheduling with \textbf{bulk-acq}},label={lst:int-bulk-cfa}] 1465 \end{cfa} 1376 1466 \begin{center} 1377 \begin{cfa code}[xleftmargin=.4\textwidth]1467 \begin{cfa}[xleftmargin=.4\textwidth] 1378 1468 monitor A a; 1379 1469 monitor B b; 1380 1470 condition c; 1381 \end{cfa code}1471 \end{cfa} 1382 1472 \end{center} 1383 1473 \begin{multicols}{2} 1384 1474 Waiting thread 1385 \begin{cfa code}1475 \begin{cfa} 1386 1476 mutex(a) { 1387 // Code Section 11477 // Code Section 1 1388 1478 mutex(a, b) { 1389 // Code Section 21479 // Code Section 2 1390 1480 wait(c); 1391 // Code Section 31392 } 1393 // Code Section 41394 } 1395 \end{cfa code}1481 // Code Section 3 1482 } 1483 // Code Section 4 1484 } 1485 \end{cfa} 1396 1486 \columnbreak 1397 1487 Signalling thread 1398 \begin{cfa code}1488 \begin{cfa} 1399 1489 mutex(a) { 1400 // Code Section 51490 // Code Section 5 1401 1491 mutex(a, b) { 1402 // Code Section 61492 // Code Section 6 1403 1493 signal(c); 1404 // Code Section 71405 } 1406 // Code Section 81407 } 1408 \end{cfa code}1494 // Code Section 7 1495 } 1496 // Code Section 8 1497 } 1498 \end{cfa} 1409 1499 \end{multicols} 1410 \begin{cfa code}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-pseudo}},label={lst:int-bulk-cfa}]1411 \end{cfa code}1500 \begin{cfa}[caption={Equivalent \CFA code for listing \ref{lst:int-bulk-cfa}},label={lst:int-bulk-cfa}] 1501 \end{cfa} 1412 1502 \begin{multicols}{2} 1413 1503 Waiter 1414 \begin{ pseudo}[numbers=left]1504 \begin{cfa}[numbers=left] 1415 1505 acquire A 1416 1506 acquire A & B … … 1418 1508 release A & B 1419 1509 release A 1420 \end{ pseudo}1510 \end{cfa} 1421 1511 1422 1512 \columnbreak 1423 1513 1424 1514 Signaller 1425 \begin{ pseudo}[numbers=left, firstnumber=6,escapechar=|]1515 \begin{cfa}[numbers=left, firstnumber=6,escapechar=|] 1426 1516 acquire A 1427 1517 acquire A & B 1428 1518 signal A & B 1429 1519 release A & B 1430 |\label{line:secret}|// Secretly keep B here1520 |\label{line:secret}|// Secretly keep B here 1431 1521 release A 1432 // Wakeup waiter and transfer A & B1433 \end{ pseudo}1522 // Wakeup waiter and transfer A & B 1523 \end{cfa} 1434 1524 \end{multicols} 1435 \begin{cfa code}[caption={Listing \ref{lst:int-bulk-pseudo}, with delayed signalling comments},label={lst:int-secret}]1436 \end{cfa code}1525 \begin{cfa}[caption={Listing \ref{lst:int-bulk-cfa}, with delayed signalling comments},label={lst:int-secret}] 1526 \end{cfa} 1437 1527 \end{figure} 1438 1528 1439 The complexity begins at code sections 4 and 8 in listing \ref{lst:int-bulk- pseudo}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors.1440 The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous pseudo-code.1441 When the signaller thread reaches the location where it should ``release \code{A & B}'' (listing \ref{lst:int-bulk-pseudo} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor \code{B}to the waiting thread.1442 This ownership transfer is required in order to prevent barging into \code{B} by another thread, since both the signalling and signalled threads still need monitor \code{A}.1529 The complexity begins at code sections 4 and 8 in listing \ref{lst:int-bulk-cfa}, which are where the existing semantics of internal scheduling needs to be extended for multiple monitors. 1530 The root of the problem is that \textbf{bulk-acq} is used in a context where one of the monitors is already acquired, which is why it is important to define the behaviour of the previous cfa-code. 1531 When the signaller thread reaches the location where it should ``release @A & B@'' (listing \ref{lst:int-bulk-cfa} line \ref{line:releaseFirst}), it must actually transfer ownership of monitor @B@ to the waiting thread. 1532 This ownership transfer is required in order to prevent barging into @B@ by another thread, since both the signalling and signalled threads still need monitor @A@. 1443 1533 There are three options: 1444 1534 … … 1452 1542 1453 1543 However, listing \ref{lst:int-secret} shows this solution can become much more complicated depending on what is executed while secretly holding B at line \ref{line:secret}, while avoiding the need to transfer ownership of a subset of the condition monitors. 1454 Listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor \code{A}, using a different condition variable.1455 Because the third thread is signalled when secretly holding \code{B}, the goal becomes unreachable.1544 Listing \ref{lst:dependency} shows a slightly different example where a third thread is waiting on monitor @A@, using a different condition variable. 1545 Because the third thread is signalled when secretly holding @B@, the goal becomes unreachable. 1456 1546 Depending on the order of signals (listing \ref{lst:dependency} line \ref{line:signal-ab} and \ref{line:signal-a}) two cases can happen: 1457 1547 1458 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor \code{A}needs to be passed to thread $\beta$ when thread $\alpha$ is done with it.1459 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor \code{B} needs to be retained and passed to thread $\alpha$ along with monitor \code{A}, which can be done directly or possibly using thread $\beta$ as an intermediate.1548 \paragraph{Case 1: thread $\alpha$ goes first.} In this case, the problem is that monitor @A@ needs to be passed to thread $\beta$ when thread $\alpha$ is done with it. 1549 \paragraph{Case 2: thread $\beta$ goes first.} In this case, the problem is that monitor @B@ needs to be retained and passed to thread $\alpha$ along with monitor @A@, which can be done directly or possibly using thread $\beta$ as an intermediate. 1460 1550 \\ 1461 1551 … … 1471 1561 \begin{multicols}{3} 1472 1562 Thread $\alpha$ 1473 \begin{ pseudo}[numbers=left, firstnumber=1]1563 \begin{cfa}[numbers=left, firstnumber=1] 1474 1564 acquire A 1475 1565 acquire A & B … … 1477 1567 release A & B 1478 1568 release A 1479 \end{ pseudo}1569 \end{cfa} 1480 1570 \columnbreak 1481 1571 Thread $\gamma$ 1482 \begin{ pseudo}[numbers=left, firstnumber=6, escapechar=|]1572 \begin{cfa}[numbers=left, firstnumber=6, escapechar=|] 1483 1573 acquire A 1484 1574 acquire A & B … … 1487 1577 |\label{line:signal-a}|signal A 1488 1578 |\label{line:release-a}|release A 1489 \end{ pseudo}1579 \end{cfa} 1490 1580 \columnbreak 1491 1581 Thread $\beta$ 1492 \begin{ pseudo}[numbers=left, firstnumber=12, escapechar=|]1582 \begin{cfa}[numbers=left, firstnumber=12, escapechar=|] 1493 1583 acquire A 1494 1584 wait A 1495 1585 |\label{line:release-aa}|release A 1496 \end{ pseudo}1586 \end{cfa} 1497 1587 \end{multicols} 1498 \begin{cfa code}[caption={Pseudo-code for the three thread example.},label={lst:dependency}]1499 \end{cfa code}1588 \begin{cfa}[caption={Pseudo-code for the three thread example.},label={lst:dependency}] 1589 \end{cfa} 1500 1590 \begin{center} 1501 1591 \input{dependency} … … 1505 1595 \end{figure} 1506 1596 1507 In listing \ref{lst:int-bulk- pseudo}, there is a solution that satisfies both barging prevention and mutual exclusion.1508 If ownership of both monitors is transferred to the waiter when the signaller releases \code{A & B} and then the waiter transfers back ownership of \code{A} back to the signaller when it releases it, then the problem is solved (\code{B}is no longer in use at this point).1597 In listing \ref{lst:int-bulk-cfa}, there is a solution that satisfies both barging prevention and mutual exclusion. 1598 If ownership of both monitors is transferred to the waiter when the signaller releases @A & B@ and then the waiter transfers back ownership of @A@ back to the signaller when it releases it, then the problem is solved (@B@ is no longer in use at this point). 1509 1599 Dynamically finding the correct order is therefore the second possible solution. 1510 1600 The problem is effectively resolving a dependency graph of ownership requirements. … … 1514 1604 \begin{figure} 1515 1605 \begin{multicols}{2} 1516 \begin{ pseudo}1606 \begin{cfa} 1517 1607 acquire A 1518 1608 acquire B … … 1522 1612 release B 1523 1613 release A 1524 \end{ pseudo}1614 \end{cfa} 1525 1615 1526 1616 \columnbreak 1527 1617 1528 \begin{ pseudo}1618 \begin{cfa} 1529 1619 acquire A 1530 1620 acquire B … … 1534 1624 release B 1535 1625 release A 1536 \end{ pseudo}1626 \end{cfa} 1537 1627 \end{multicols} 1538 \begin{cfa code}[caption={Extension to three monitors of listing \ref{lst:int-bulk-pseudo}},label={lst:explosion}]1539 \end{cfa code}1628 \begin{cfa}[caption={Extension to three monitors of listing \ref{lst:int-bulk-cfa}},label={lst:explosion}] 1629 \end{cfa} 1540 1630 \end{figure} 1541 1631 … … 1546 1636 \subsubsection{Partial Signalling} \label{partial-sig} 1547 1637 Finally, the solution that is chosen for \CFA is to use partial signalling. 1548 Again using listing \ref{lst:int-bulk- pseudo}, the partial signalling solution transfers ownership of monitor \code{B} at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor \code{A}.1638 Again using listing \ref{lst:int-bulk-cfa}, the partial signalling solution transfers ownership of monitor @B@ at lines \ref{line:signal1} to the waiter but does not wake the waiting thread since it is still using monitor @A@. 1549 1639 Only when it reaches line \ref{line:lastRelease} does it actually wake up the waiting thread. 1550 1640 This solution has the benefit that complexity is encapsulated into only two actions: passing monitors to the next owner when they should be released and conditionally waking threads if all conditions are met. … … 1554 1644 Using partial signalling, listing \ref{lst:dependency} can be solved easily: 1555 1645 \begin{itemize} 1556 \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor \code{B} to thread $\alpha$ and continues to hold monitor \code{A}.1557 \item When thread $\gamma$ reaches line \ref{line:release-a} it transfers monitor \code{A}to thread $\beta$ and wakes it up.1558 \item When thread $\beta$ reaches line \ref{line:release-aa} it transfers monitor \code{A}to thread $\alpha$ and wakes it up.1646 \item When thread $\gamma$ reaches line \ref{line:release-ab} it transfers monitor @B@ to thread $\alpha$ and continues to hold monitor @A@. 1647 \item When thread $\gamma$ reaches line \ref{line:release-a} it transfers monitor @A@ to thread $\beta$ and wakes it up. 1648 \item When thread $\beta$ reaches line \ref{line:release-aa} it transfers monitor @A@ to thread $\alpha$ and wakes it up. 1559 1649 \end{itemize} 1560 1650 … … 1566 1656 \begin{table} 1567 1657 \begin{tabular}{|c|c|} 1568 \code{signal} & \code{signal_block}\\1658 @signal@ & @signal_block@ \\ 1569 1659 \hline 1570 \begin{cfacode}[tabsize=3] 1571 monitor DatingService 1572 { 1573 //compatibility codes 1660 \begin{cfa}[tabsize=3] 1661 monitor DatingService { 1662 // compatibility codes 1574 1663 enum{ CCodes = 20 }; 1575 1664 … … 1582 1671 condition exchange; 1583 1672 1584 int girl(int phoneNo, int ccode) 1585 { 1586 //no compatible boy ? 1587 if(empty(boys[ccode])) 1588 { 1589 //wait for boy 1590 wait(girls[ccode]); 1591 1592 //make phone number available 1593 girlPhoneNo = phoneNo; 1594 1595 //wake boy from chair 1596 signal(exchange); 1597 } 1598 else 1599 { 1600 //make phone number available 1601 girlPhoneNo = phoneNo; 1602 1603 //wake boy 1604 signal(boys[ccode]); 1605 1606 //sit in chair 1607 wait(exchange); 1673 int girl(int phoneNo, int cfa) { 1674 // no compatible boy ? 1675 if(empty(boys[cfa])) { 1676 wait(girls[cfa]); // wait for boy 1677 girlPhoneNo = phoneNo; // make phone number available 1678 signal(exchange); // wake boy from chair 1679 } else { 1680 girlPhoneNo = phoneNo; // make phone number available 1681 signal(boys[cfa]); // wake boy 1682 wait(exchange); // sit in chair 1608 1683 } 1609 1684 return boyPhoneNo; 1610 1685 } 1611 1612 int boy(int phoneNo, int ccode) 1613 { 1614 //same as above 1615 //with boy/girl interchanged 1616 } 1617 \end{cfacode}&\begin{cfacode}[tabsize=3] 1618 monitor DatingService 1619 { 1620 //compatibility codes 1621 enum{ CCodes = 20 }; 1686 int boy(int phoneNo, int cfa) { 1687 // same as above 1688 // with boy/girl interchanged 1689 } 1690 \end{cfa}&\begin{cfa}[tabsize=3] 1691 monitor DatingService { 1692 1693 enum{ CCodes = 20 }; // compatibility codes 1622 1694 1623 1695 int girlPhoneNo; … … 1627 1699 condition girls[CCodes]; 1628 1700 condition boys [CCodes]; 1629 //exchange is not needed 1630 1631 int girl(int phoneNo, int ccode) 1632 { 1633 //no compatible boy ? 1634 if(empty(boys[ccode])) 1635 { 1636 //wait for boy 1637 wait(girls[ccode]); 1638 1639 //make phone number available 1640 girlPhoneNo = phoneNo; 1641 1642 //wake boy from chair 1643 signal(exchange); 1644 } 1645 else 1646 { 1647 //make phone number available 1648 girlPhoneNo = phoneNo; 1649 1650 //wake boy 1651 signal_block(boys[ccode]); 1652 1653 //second handshake unnecessary 1701 // exchange is not needed 1702 1703 int girl(int phoneNo, int cfa) { 1704 // no compatible boy ? 1705 if(empty(boys[cfa])) { 1706 wait(girls[cfa]); // wait for boy 1707 girlPhoneNo = phoneNo; // make phone number available 1708 signal(exchange); // wake boy from chair 1709 } else { 1710 girlPhoneNo = phoneNo; // make phone number available 1711 signal_block(boys[cfa]); // wake boy 1712 1713 // second handshake unnecessary 1654 1714 1655 1715 } … … 1657 1717 } 1658 1718 1659 int boy(int phoneNo, int ccode) 1660 { 1661 //same as above 1662 //with boy/girl interchanged 1663 } 1664 \end{cfacode} 1719 int boy(int phoneNo, int cfa) { 1720 // same as above 1721 // with boy/girl interchanged 1722 } 1723 \end{cfa} 1665 1724 \end{tabular} 1666 \caption{Dating service example using \ code{signal} and \code{signal_block}. }1725 \caption{Dating service example using \protect\lstinline|signal| and \protect\lstinline|signal_block|. } 1667 1726 \label{tbl:datingservice} 1668 1727 \end{table} 1669 1728 An important note is that, until now, signalling a monitor was a delayed operation. 1670 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the \code{signal}statement.1671 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the \code{signal_block}routine.1729 The ownership of the monitor is transferred only when the monitor would have otherwise been released, not at the point of the @signal@ statement. 1730 However, in some cases, it may be more convenient for users to immediately transfer ownership to the thread that is waiting for cooperation, which is achieved using the @signal_block@ routine. 1672 1731 1673 1732 The example in table \ref{tbl:datingservice} highlights the difference in behaviour. 1674 As mentioned, \code{signal}only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed.1675 To avoid this explicit synchronization, the \code{condition} type offers the \code{signal_block}routine, which handles the two-way handshake as shown in the example.1733 As mentioned, @signal@ only transfers ownership once the current critical section exits; this behaviour requires additional synchronization when a two-way handshake is needed. 1734 To avoid this explicit synchronization, the @condition@ type offers the @signal_block@ routine, which handles the two-way handshake as shown in the example. 1676 1735 This feature removes the need for a second condition variables and simplifies programming. 1677 Like every other monitor semantic, \code{signal_block} uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to \code{signal_block}, meaning no other thread can acquire the monitor either before or after the call.1736 Like every other monitor semantic, @signal_block@ uses barging prevention, which means mutual-exclusion is baton-passed both on the front end and the back end of the call to @signal_block@, meaning no other thread can acquire the monitor either before or after the call. 1678 1737 1679 1738 % ====================================================================== … … 1687 1746 Internal Scheduling & External Scheduling & Go\\ 1688 1747 \hline 1689 \begin{u cppcode}[tabsize=3]1748 \begin{uC++}[tabsize=3] 1690 1749 _Monitor Semaphore { 1691 1750 condition c; … … 1702 1761 } 1703 1762 } 1704 \end{u cppcode}&\begin{ucppcode}[tabsize=3]1763 \end{uC++}&\begin{uC++}[tabsize=3] 1705 1764 _Monitor Semaphore { 1706 1765 … … 1717 1776 } 1718 1777 } 1719 \end{u cppcode}&\begin{gocode}[tabsize=3]1778 \end{uC++}&\begin{Go}[tabsize=3] 1720 1779 type MySem struct { 1721 1780 inUse bool … … 1737 1796 s.inUse = false 1738 1797 1739 // This actually deadlocks1740 // when single thread1798 // This actually deadlocks 1799 // when single thread 1741 1800 s.c <- false 1742 1801 } 1743 \end{ gocode}1802 \end{Go} 1744 1803 \end{tabular} 1745 1804 \caption{Different forms of scheduling.} … … 1748 1807 This method is more constrained and explicit, which helps users reduce the non-deterministic nature of concurrency. 1749 1808 Indeed, as the following examples demonstrate, external scheduling allows users to wait for events from other threads without the concern of unrelated events occurring. 1750 External scheduling can generally be done either in terms of control flow (e.g., Ada with \code{accept}, \uC with \code{_Accept}) or in terms of data (e.g., Go with channels).1809 External scheduling can generally be done either in terms of control flow (e.g., Ada with @accept@, \uC with @_Accept@) or in terms of data (e.g., Go with channels). 1751 1810 Of course, both of these paradigms have their own strengths and weaknesses, but for this project, control-flow semantics was chosen to stay consistent with the rest of the languages semantics. 1752 1811 Two challenges specific to \CFA arise when trying to add external scheduling with loose object definitions and multiple-monitor routines. 1753 The previous example shows a simple use \code{_Accept} versus \code{wait}/\code{signal}and its advantages.1754 Note that while other languages often use \code{accept}/\code{select} as the core external scheduling keyword, \CFA uses \code{waitfor}to prevent name collisions with existing socket \textbf{api}s.1755 1756 For the \code{P} member above using internal scheduling, the call to \code{wait} only guarantees that \code{V} is the last routine to access the monitor, allowing a third routine, say \code{isInUse()}, acquire mutual exclusion several times while routine \code{P}is waiting.1757 On the other hand, external scheduling guarantees that while routine \code{P} is waiting, no other routine than \code{V}can acquire the monitor.1812 The previous example shows a simple use @_Accept@ versus @wait@/@signal@ and its advantages. 1813 Note that while other languages often use @accept@/@select@ as the core external scheduling keyword, \CFA uses @waitfor@ to prevent name collisions with existing socket \textbf{api}s. 1814 1815 For the @P@ member above using internal scheduling, the call to @wait@ only guarantees that @V@ is the last routine to access the monitor, allowing a third routine, say @isInUse()@, acquire mutual exclusion several times while routine @P@ is waiting. 1816 On the other hand, external scheduling guarantees that while routine @P@ is waiting, no other routine than @V@ can acquire the monitor. 1758 1817 1759 1818 % ====================================================================== … … 1765 1824 Since \CFA is not object oriented, monitors become both more difficult to implement and less clear for a user: 1766 1825 1767 \begin{cfa code}1826 \begin{cfa} 1768 1827 monitor A {}; 1769 1828 1770 1829 void f(A & mutex a); 1771 1830 void g(A & mutex a) { 1772 waitfor(f); // Obvious which f() to wait for1773 } 1774 1775 void f(A & mutex a, int); // New different F added in scope1831 waitfor(f); // Obvious which f() to wait for 1832 } 1833 1834 void f(A & mutex a, int); // New different F added in scope 1776 1835 void h(A & mutex a) { 1777 waitfor(f); // Less obvious which f() to wait for1778 } 1779 \end{cfa code}1836 waitfor(f); // Less obvious which f() to wait for 1837 } 1838 \end{cfa} 1780 1839 1781 1840 Furthermore, external scheduling is an example where implementation constraints become visible from the interface. 1782 Here is the pseudo-code for the entering phase of a monitor:1841 Here is the cfa-code for the entering phase of a monitor: 1783 1842 \begin{center} 1784 1843 \begin{tabular}{l} 1785 \begin{ pseudo}1844 \begin{cfa} 1786 1845 if monitor is free 1787 1846 enter … … 1792 1851 else 1793 1852 block 1794 \end{ pseudo}1853 \end{cfa} 1795 1854 \end{tabular} 1796 1855 \end{center} 1797 1856 For the first two conditions, it is easy to implement a check that can evaluate the condition in a few instructions. 1798 However, a fast check for \pscode{monitor accepts me}is much harder to implement depending on the constraints put on the monitors.1857 However, a fast check for @monitor accepts me@ is much harder to implement depending on the constraints put on the monitors. 1799 1858 Indeed, monitors are often expressed as an entry queue and some acceptor queue as in Figure~\ref{fig:ClassicalMonitor}. 1800 1859 … … 1822 1881 The alternative is to alter the implementation as in Figure~\ref{fig:BulkMonitor}. 1823 1882 Here, the mutex routine called is associated with a thread on the entry queue while a list of acceptable routines is kept separate. 1824 Generating a mask dynamically means that the storage for the mask information can vary between calls to \code{waitfor}, allowing for more flexibility and extensions.1883 Generating a mask dynamically means that the storage for the mask information can vary between calls to @waitfor@, allowing for more flexibility and extensions. 1825 1884 Storing an array of accepted function pointers replaces the single instruction bitmask comparison with dereferencing a pointer followed by a linear search. 1826 Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the \code{waitfor}statement to check if a routine is already queued.1885 Furthermore, supporting nested external scheduling (e.g., listing \ref{lst:nest-ext}) may now require additional searches for the @waitfor@ statement to check if a routine is already queued. 1827 1886 1828 1887 \begin{figure} 1829 \begin{cfa code}[caption={Example of nested external scheduling},label={lst:nest-ext}]1888 \begin{cfa}[caption={Example of nested external scheduling},label={lst:nest-ext}] 1830 1889 monitor M {}; 1831 1890 void foo( M & mutex a ) {} 1832 1891 void bar( M & mutex b ) { 1833 // Nested in the waitfor(bar, c) call1892 // Nested in the waitfor(bar, c) call 1834 1893 waitfor(foo, b); 1835 1894 } … … 1838 1897 } 1839 1898 1840 \end{cfa code}1899 \end{cfa} 1841 1900 \end{figure} 1842 1901 … … 1858 1917 External scheduling, like internal scheduling, becomes significantly more complex when introducing multi-monitor syntax. 1859 1918 Even in the simplest possible case, some new semantics needs to be established: 1860 \begin{cfa code}1919 \begin{cfa} 1861 1920 monitor M {}; 1862 1921 … … 1864 1923 1865 1924 void g(M & mutex b, M & mutex c) { 1866 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex)1867 } 1868 \end{cfa code}1925 waitfor(f); // two monitors M => unknown which to pass to f(M & mutex) 1926 } 1927 \end{cfa} 1869 1928 The obvious solution is to specify the correct monitor as follows: 1870 1929 1871 \begin{cfa code}1930 \begin{cfa} 1872 1931 monitor M {}; 1873 1932 … … 1875 1934 1876 1935 void g(M & mutex a, M & mutex b) { 1877 // wait for call to f with argument b1936 // wait for call to f with argument b 1878 1937 waitfor(f, b); 1879 1938 } 1880 \end{cfa code}1939 \end{cfa} 1881 1940 This syntax is unambiguous. 1882 Both locks are acquired and kept by \code{g}.1883 When routine \code{f} is called, the lock for monitor \code{b} is temporarily transferred from \code{g} to \code{f} (while \code{g} still holds lock \code{a}).1884 This behaviour can be extended to the multi-monitor \code{waitfor}statement as follows.1885 1886 \begin{cfa code}1941 Both locks are acquired and kept by @g@. 1942 When routine @f@ is called, the lock for monitor @b@ is temporarily transferred from @g@ to @f@ (while @g@ still holds lock @a@). 1943 This behaviour can be extended to the multi-monitor @waitfor@ statement as follows. 1944 1945 \begin{cfa} 1887 1946 monitor M {}; 1888 1947 … … 1890 1949 1891 1950 void g(M & mutex a, M & mutex b) { 1892 // wait for call to f with arguments a and b1951 // wait for call to f with arguments a and b 1893 1952 waitfor(f, a, b); 1894 1953 } 1895 \end{cfa code}1896 1897 Note that the set of monitors passed to the \code{waitfor} statement must be entirely contained in the set of monitors already acquired in the routine. \code{waitfor}used in any other context is undefined behaviour.1954 \end{cfa} 1955 1956 Note that the set of monitors passed to the @waitfor@ statement must be entirely contained in the set of monitors already acquired in the routine. @waitfor@ used in any other context is undefined behaviour. 1898 1957 1899 1958 An important behaviour to note is when a set of monitors only match partially: 1900 1959 1901 \begin{cfa code}1960 \begin{cfa} 1902 1961 mutex struct A {}; 1903 1962 … … 1912 1971 1913 1972 void foo() { 1914 g(a1, b); // block on accept1973 g(a1, b); // block on accept 1915 1974 } 1916 1975 1917 1976 void bar() { 1918 f(a2, b); // fulfill cooperation1919 } 1920 \end{cfa code}1977 f(a2, b); // fulfill cooperation 1978 } 1979 \end{cfa} 1921 1980 While the equivalent can happen when using internal scheduling, the fact that conditions are specific to a set of monitors means that users have to use two different condition variables. 1922 1981 In both cases, partially matching monitor sets does not wakeup the waiting thread. 1923 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; \code{waitfor(f,a,b)} and \code{waitfor(f,b,a)}are indistinguishable waiting condition.1924 1925 % ====================================================================== 1926 % ====================================================================== 1927 \subsection{\ code{waitfor}Semantics}1928 % ====================================================================== 1929 % ====================================================================== 1930 1931 Syntactically, the \code{waitfor}statement takes a function identifier and a set of monitors.1932 While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the \code{waitfor}statement.1982 It is also important to note that in the case of external scheduling the order of parameters is irrelevant; @waitfor(f,a,b)@ and @waitfor(f,b,a)@ are indistinguishable waiting condition. 1983 1984 % ====================================================================== 1985 % ====================================================================== 1986 \subsection{\protect\lstinline|waitfor| Semantics} 1987 % ====================================================================== 1988 % ====================================================================== 1989 1990 Syntactically, the @waitfor@ statement takes a function identifier and a set of monitors. 1991 While the set of monitors can be any list of expressions, the function name is more restricted because the compiler validates at compile time the validity of the function type and the parameters used with the @waitfor@ statement. 1933 1992 It checks that the set of monitors passed in matches the requirements for a function call. 1934 1993 Listing \ref{lst:waitfor} shows various usages of the waitfor statement and which are acceptable. 1935 The choice of the function type is made ignoring any non- \code{mutex}parameter.1994 The choice of the function type is made ignoring any non-@mutex@ parameter. 1936 1995 One limitation of the current implementation is that it does not handle overloading, but overloading is possible. 1937 1996 \begin{figure} 1938 \begin{cfa code}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}]1997 \begin{cfa}[caption={Various correct and incorrect uses of the waitfor statement},label={lst:waitfor}] 1939 1998 monitor A{}; 1940 1999 monitor B{}; … … 1950 2009 void (*fp)( A & mutex ) = f1; 1951 2010 1952 waitfor(f1, a1); // Correct : 1 monitor case1953 waitfor(f2, a1, b1); // Correct : 2 monitor case1954 waitfor(f3, a1); // Correct : non-mutex arguments are ignored1955 waitfor(f1, *ap); // Correct : expression as argument1956 1957 waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments1958 waitfor(f2, a1); // Incorrect : Too few mutex arguments1959 waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match1960 waitfor(f1, 1); // Incorrect : 1 not a mutex argument1961 waitfor(f9, a1); // Incorrect : f9 function does not exist1962 waitfor(*fp, a1 ); // Incorrect : fp not an identifier1963 waitfor(f4, a1); // Incorrect : f4 ambiguous1964 1965 waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex1966 } 1967 \end{cfa code}2011 waitfor(f1, a1); // Correct : 1 monitor case 2012 waitfor(f2, a1, b1); // Correct : 2 monitor case 2013 waitfor(f3, a1); // Correct : non-mutex arguments are ignored 2014 waitfor(f1, *ap); // Correct : expression as argument 2015 2016 waitfor(f1, a1, b1); // Incorrect : Too many mutex arguments 2017 waitfor(f2, a1); // Incorrect : Too few mutex arguments 2018 waitfor(f2, a1, a2); // Incorrect : Mutex arguments don't match 2019 waitfor(f1, 1); // Incorrect : 1 not a mutex argument 2020 waitfor(f9, a1); // Incorrect : f9 function does not exist 2021 waitfor(*fp, a1 ); // Incorrect : fp not an identifier 2022 waitfor(f4, a1); // Incorrect : f4 ambiguous 2023 2024 waitfor(f2, a1, b2); // Undefined behaviour : b2 not mutex 2025 } 2026 \end{cfa} 1968 2027 \end{figure} 1969 2028 1970 Finally, for added flexibility, \CFA supports constructing a complex \code{waitfor} statement using the \code{or}, \code{timeout} and \code{else}.1971 Indeed, multiple \code{waitfor} clauses can be chained together using \code{or}; this chain forms a single statement that uses baton pass to any function that fits one of the function+monitor set passed in.1972 To enable users to tell which accepted function executed, \code{waitfor}s are followed by a statement (including the null statement \code{;}) or a compound statement, which is executed after the clause is triggered.1973 A \code{waitfor} chain can also be followed by a \code{timeout}, to signify an upper bound on the wait, or an \code{else}, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues.1974 Any and all of these clauses can be preceded by a \code{when}condition to dynamically toggle the accept clauses on or off based on some current state.2029 Finally, for added flexibility, \CFA supports constructing a complex @waitfor@ statement using the @or@, @timeout@ and @else@. 2030 Indeed, multiple @waitfor@ clauses can be chained together using @or@; this chain forms a single statement that uses baton pass to any function that fits one of the function+monitor set passed in. 2031 To enable users to tell which accepted function executed, @waitfor@s are followed by a statement (including the null statement @;@) or a compound statement, which is executed after the clause is triggered. 2032 A @waitfor@ chain can also be followed by a @timeout@, to signify an upper bound on the wait, or an @else@, to signify that the call should be non-blocking, which checks for a matching function call already arrived and otherwise continues. 2033 Any and all of these clauses can be preceded by a @when@ condition to dynamically toggle the accept clauses on or off based on some current state. 1975 2034 Listing \ref{lst:waitfor2} demonstrates several complex masks and some incorrect ones. 1976 2035 1977 2036 \begin{figure} 1978 \begin{cfacode}[caption={Various correct and incorrect uses of the or, else, and timeout clause around a waitfor statement},label={lst:waitfor2}] 2037 \lstset{language=CFA,deletedelim=**[is][]{`}{`}} 2038 \begin{cfa} 1979 2039 monitor A{}; 1980 2040 … … 1983 2043 1984 2044 void foo( A & mutex a, bool b, int t ) { 1985 //Correct : blocking case 1986 waitfor(f1, a); 1987 1988 //Correct : block with statement 1989 waitfor(f1, a) { 2045 waitfor(f1, a); $\C{// Correct : blocking case}$ 2046 2047 waitfor(f1, a) { $\C{// Correct : block with statement}$ 1990 2048 sout | "f1" | endl; 1991 2049 } 1992 1993 //Correct : block waiting for f1 or f2 1994 waitfor(f1, a) { 2050 waitfor(f1, a) { $\C{// Correct : block waiting for f1 or f2}$ 1995 2051 sout | "f1" | endl; 1996 2052 } or waitfor(f2, a) { 1997 2053 sout | "f2" | endl; 1998 2054 } 1999 2000 //Correct : non-blocking case 2001 waitfor(f1, a); or else; 2002 2003 //Correct : non-blocking case 2004 waitfor(f1, a) { 2055 waitfor(f1, a); or else; $\C{// Correct : non-blocking case}$ 2056 2057 waitfor(f1, a) { $\C{// Correct : non-blocking case}$ 2005 2058 sout | "blocked" | endl; 2006 2059 } or else { 2007 2060 sout | "didn't block" | endl; 2008 2061 } 2009 2010 //Correct : block at most 10 seconds 2011 waitfor(f1, a) { 2062 waitfor(f1, a) { $\C{// Correct : block at most 10 seconds}$ 2012 2063 sout | "blocked" | endl; 2013 2064 } or timeout( 10`s) { 2014 2065 sout | "didn't block" | endl; 2015 2066 } 2016 2017 //Correct : block only if b == true 2018 //if b == false, don't even make the call 2067 // Correct : block only if b == true if b == false, don't even make the call 2019 2068 when(b) waitfor(f1, a); 2020 2069 2021 //Correct : block only if b == true 2022 //if b == false, make non-blocking call 2070 // Correct : block only if b == true if b == false, make non-blocking call 2023 2071 waitfor(f1, a); or when(!b) else; 2024 2072 2025 // Correct : block only of t > 12073 // Correct : block only of t > 1 2026 2074 waitfor(f1, a); or when(t > 1) timeout(t); or else; 2027 2075 2028 // Incorrect : timeout clause is dead code2076 // Incorrect : timeout clause is dead code 2029 2077 waitfor(f1, a); or timeout(t); or else; 2030 2078 2031 //Incorrect : order must be 2032 //waitfor [or waitfor... [or timeout] [or else]] 2079 // Incorrect : order must be waitfor [or waitfor... [or timeout] [or else]] 2033 2080 timeout(t); or waitfor(f1, a); or else; 2034 2081 } 2035 \end{cfacode} 2082 \end{cfa} 2083 \caption{Correct and incorrect uses of the or, else, and timeout clause around a waitfor statement} 2084 \label{lst:waitfor2} 2036 2085 \end{figure} 2037 2086 … … 2041 2090 % ====================================================================== 2042 2091 % ====================================================================== 2043 An interesting use for the \code{waitfor}statement is destructor semantics.2044 Indeed, the \code{waitfor} statement can accept any \code{mutex}routine, which includes the destructor (see section \ref{data}).2092 An interesting use for the @waitfor@ statement is destructor semantics. 2093 Indeed, the @waitfor@ statement can accept any @mutex@ routine, which includes the destructor (see section \ref{data}). 2045 2094 However, with the semantics discussed until now, waiting for the destructor does not make any sense, since using an object after its destructor is called is undefined behaviour. 2046 The simplest approach is to disallow \code{waitfor}on a destructor.2047 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current \code{mutex}routine, similarly to how a condition is signalled.2095 The simplest approach is to disallow @waitfor@ on a destructor. 2096 However, a more expressive approach is to flip ordering of execution when waiting for the destructor, meaning that waiting for the destructor allows the destructor to run after the current @mutex@ routine, similarly to how a condition is signalled. 2048 2097 \begin{figure} 2049 \begin{cfa code}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}]2098 \begin{cfa}[caption={Example of an executor which executes action in series until the destructor is called.},label={lst:dtor-order}] 2050 2099 monitor Executer {}; 2051 2100 struct Action; … … 2061 2110 } 2062 2111 } 2063 \end{cfa code}2112 \end{cfa} 2064 2113 \end{figure} 2065 2114 For example, listing \ref{lst:dtor-order} shows an example of an executor with an infinite loop, which waits for the destructor to break out of this loop. … … 2079 2128 In this decade, it is no longer reasonable to create a high-performance application without caring about parallelism. 2080 2129 Indeed, parallelism is an important aspect of performance and more specifically throughput and hardware utilization. 2081 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like \code{fork}, \code{join}, etc.2130 The lowest-level approach of parallelism is to use \textbf{kthread} in combination with semantics like @fork@, @join@, etc. 2082 2131 However, since these have significant costs and limitations, \textbf{kthread} are now mostly used as an implementation tool rather than a user oriented one. 2083 2132 There are several alternatives to solve these issues that all have strengths and weaknesses. … … 2158 2207 Conveniently, the call stack fits that description and is easy to use, which is why it is used heavily in the implementation of internal scheduling, particularly variable-length arrays. 2159 2208 Since stack allocation is based on scopes, the first step of the implementation is to identify the scopes that are available to store the information, and which of these can have a variable-length array. 2160 The threads and the condition both have a fixed amount of memory, while \code{mutex}routines and blocking calls allow for an unbound amount, within the stack size.2209 The threads and the condition both have a fixed amount of memory, while @mutex@ routines and blocking calls allow for an unbound amount, within the stack size. 2161 2210 2162 2211 Note that since the major contributions of this paper are extending monitor semantics to \textbf{bulk-acq} and loose object definitions, any challenges that are not resulting of these characteristics of \CFA are considered as solved problems and therefore not discussed. … … 2168 2217 % ====================================================================== 2169 2218 2170 The first step towards the monitor implementation is simple \code{mutex}routines.2219 The first step towards the monitor implementation is simple @mutex@ routines. 2171 2220 In the single monitor case, mutual-exclusion is done using the entry/exit procedure in listing \ref{lst:entry1}. 2172 2221 The entry/exit procedures do not have to be extended to support multiple monitors. … … 2179 2228 \begin{multicols}{2} 2180 2229 Entry 2181 \begin{ pseudo}2230 \begin{cfa} 2182 2231 if monitor is free 2183 2232 enter … … 2187 2236 block 2188 2237 increment recursions 2189 \end{ pseudo}2238 \end{cfa} 2190 2239 \columnbreak 2191 2240 Exit 2192 \begin{ pseudo}2241 \begin{cfa} 2193 2242 decrement recursion 2194 2243 if recursion == 0 2195 2244 if entry queue not empty 2196 2245 wake-up thread 2197 \end{ pseudo}2246 \end{cfa} 2198 2247 \end{multicols} 2199 \begin{ pseudo}[caption={Initial entry and exit routine for monitors},label={lst:entry1}]2200 \end{ pseudo}2248 \begin{cfa}[caption={Initial entry and exit routine for monitors},label={lst:entry1}] 2249 \end{cfa} 2201 2250 \end{figure} 2202 2251 … … 2205 2254 However, it is shown that entry-point locking solves most of the issues. 2206 2255 2207 First of all, interaction between \code{otype}polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying.2208 Therefore, the main question is how to support \code{dtype}polymorphism.2256 First of all, interaction between @otype@ polymorphism (see Section~\ref{s:ParametricPolymorphism}) and monitors is impossible since monitors do not support copying. 2257 Therefore, the main question is how to support @dtype@ polymorphism. 2209 2258 It is important to present the difference between the two acquiring options: \textbf{callsite-locking} and entry-point locking, i.e., acquiring the monitors before making a mutex routine-call or as the first operation of the mutex routine-call. 2210 2259 For example: 2211 \begin{table} [H]2260 \begin{table} 2212 2261 \begin{center} 2213 2262 \begin{tabular}{|c|c|c|} 2214 2263 Mutex & \textbf{callsite-locking} & \textbf{entry-point-locking} \\ 2215 call & pseudo-code & pseudo-code \\2264 call & cfa-code & cfa-code \\ 2216 2265 \hline 2217 \begin{cfa code}[tabsize=3]2266 \begin{cfa}[tabsize=3] 2218 2267 void foo(monitor& mutex a){ 2219 2268 2220 // Do Work2269 // Do Work 2221 2270 //... 2222 2271 … … 2229 2278 2230 2279 } 2231 \end{cfa code} & \begin{pseudo}[tabsize=3]2280 \end{cfa} & \begin{cfa}[tabsize=3] 2232 2281 foo(& a) { 2233 2282 2234 // Do Work2283 // Do Work 2235 2284 //... 2236 2285 … … 2243 2292 release(a); 2244 2293 } 2245 \end{ pseudo} & \begin{pseudo}[tabsize=3]2294 \end{cfa} & \begin{cfa}[tabsize=3] 2246 2295 foo(& a) { 2247 2296 acquire(a); 2248 // Do Work2297 // Do Work 2249 2298 //... 2250 2299 release(a); … … 2257 2306 2258 2307 } 2259 \end{ pseudo}2308 \end{cfa} 2260 2309 \end{tabular} 2261 2310 \end{center} … … 2264 2313 \end{table} 2265 2314 2266 Note the \code{mutex}keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.:2267 \begin{cfa code}2268 // Incorrect: T may not be monitor2315 Note the @mutex@ keyword relies on the type system, which means that in cases where a generic monitor-routine is desired, writing the mutex routine is possible with the proper trait, e.g.: 2316 \begin{cfa} 2317 // Incorrect: T may not be monitor 2269 2318 forall(dtype T) 2270 2319 void foo(T * mutex t); 2271 2320 2272 // Correct: this function only works on monitors (any monitor)2321 // Correct: this function only works on monitors (any monitor) 2273 2322 forall(dtype T | is_monitor(T)) 2274 2323 void bar(T * mutex t)); 2275 \end{cfa code}2324 \end{cfa} 2276 2325 2277 2326 Both entry point and \textbf{callsite-locking} are feasible implementations. … … 2299 2348 2300 2349 \subsection{Processors} 2301 Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically \texttt{pthread}s in the current implementation of \CFA.2350 Parallelism in \CFA is built around using processors to specify how much parallelism is desired. \CFA processors are object wrappers around kernel threads, specifically @pthread@s in the current implementation of \CFA. 2302 2351 Indeed, any parallelism must go through operating-system libraries. 2303 2352 However, \textbf{uthread} are still the main source of concurrency, processors are simply the underlying source of parallelism. … … 2308 2357 \subsection{Stack Management} 2309 2358 One of the challenges of this system is to reduce the footprint as much as possible. 2310 Specifically, all \texttt{pthread}s created also have a stack created with them, which should be used as much as possible.2359 Specifically, all @pthread@s created also have a stack created with them, which should be used as much as possible. 2311 2360 Normally, coroutines also create their own stack to run on, however, in the case of the coroutines used for processors, these coroutines run directly on the \textbf{kthread} stack, effectively stealing the processor stack. 2312 2361 The exception to this rule is the Main Processor, i.e., the initial \textbf{kthread} that is given to any program. … … 2323 2372 Obviously, this doubles the context-switch cost because threads must context-switch to an intermediate stack. 2324 2373 The alternative 1-step context-switch uses the stack of the ``from'' thread to schedule and then context-switches directly to the ``to'' thread. 2325 However, the performance of the 2-step context-switch is still superior to a \code{pthread_yield}(see section \ref{results}).2326 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft \code{SwitchToFiber}~\cite{switchToWindows} routine).2374 However, the performance of the 2-step context-switch is still superior to a @pthread_yield@ (see section \ref{results}). 2375 Additionally, for users in need for optimal performance, it is important to note that having a 2-step context-switch as the default does not prevent \CFA from offering a 1-step context-switch (akin to the Microsoft @SwitchToFiber@~\cite{switchToWindows} routine). 2327 2376 This option is not currently present in \CFA, but the changes required to add it are strictly additive. 2328 2377 … … 2356 2405 This behaviour is only a problem if all kernel threads, among which a user thread can migrate, differ in terms of signal masks\footnote{Sadly, official POSIX documentation is silent on what distinguishes ``async-signal-safe'' functions from other functions.}. 2357 2406 However, since the kernel thread handling preemption requires a different signal mask, executing user threads on the kernel-alarm thread can cause deadlocks. 2358 For this reason, the alarm thread is in a tight loop around a system call to \code{sigwaitinfo}, requiring very little CPU time for preemption.2407 For this reason, the alarm thread is in a tight loop around a system call to @sigwaitinfo@, requiring very little CPU time for preemption. 2359 2408 One final detail about the alarm thread is how to wake it when additional communication is required (e.g., on thread termination). 2360 This unblocking is also done using {\tt SIGALRM}, but sent through the \code{pthread_sigqueue}.2361 Indeed, \code{sigwait} can differentiate signals sent from \code{pthread_sigqueue}from signals sent from alarms or the kernel.2409 This unblocking is also done using {\tt SIGALRM}, but sent through the @pthread_sigqueue@. 2410 Indeed, @sigwait@ can differentiate signals sent from @pthread_sigqueue@ from signals sent from alarms or the kernel. 2362 2411 2363 2412 \subsection{Scheduler} … … 2373 2422 The following figure is the traditional illustration of a monitor (repeated from page~\pageref{fig:ClassicalMonitor} for convenience): 2374 2423 2375 \begin{figure} [H]2424 \begin{figure} 2376 2425 \begin{center} 2377 2426 {\resizebox{0.4\textwidth}{!}{\input{monitor}}} … … 2388 2437 Secondly, the thread waiting on the condition has to be separated across multiple monitors, seen in figure \ref{fig:monitor_cfa}. 2389 2438 2390 \begin{figure} [H]2439 \begin{figure} 2391 2440 \begin{center} 2392 2441 {\resizebox{0.8\textwidth}{!}{\input{int_monitor}}} … … 2402 2451 For a specific signalling operation every monitor needs a piece of thread on its AS-stack. 2403 2452 2404 \begin{figure} [b]2453 \begin{figure} 2405 2454 \begin{multicols}{2} 2406 2455 Entry 2407 \begin{ pseudo}2456 \begin{cfa} 2408 2457 if monitor is free 2409 2458 enter … … 2414 2463 increment recursion 2415 2464 2416 \end{ pseudo}2465 \end{cfa} 2417 2466 \columnbreak 2418 2467 Exit 2419 \begin{ pseudo}2468 \begin{cfa} 2420 2469 decrement recursion 2421 2470 if recursion == 0 … … 2427 2476 if entry queue not empty 2428 2477 wake-up thread 2429 \end{ pseudo}2478 \end{cfa} 2430 2479 \end{multicols} 2431 \begin{ pseudo}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}]2432 \end{ pseudo}2480 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling},label={lst:entry2}] 2481 \end{cfa} 2433 2482 \end{figure} 2434 2483 … … 2436 2485 Basically, the solution boils down to having a separate data structure for the condition queue and the AS-stack, and unconditionally transferring ownership of the monitors but only unblocking the thread when the last monitor has transferred ownership. 2437 2486 This solution is deadlock safe as well as preventing any potential barging. 2438 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the \code{wait} and \code{signal_block}routines.2439 2440 \begin{figure} [H]2487 The data structures used for the AS-stack are reused extensively for external scheduling, but in the case of internal scheduling, the data is allocated using variable-length arrays on the call stack of the @wait@ and @signal_block@ routines. 2488 2489 \begin{figure} 2441 2490 \begin{center} 2442 2491 {\resizebox{0.8\textwidth}{!}{\input{monitor_structs.pstex_t}}} … … 2448 2497 Figure \ref{fig:structs} shows a high-level representation of these data structures. 2449 2498 The main idea behind them is that, a thread cannot contain an arbitrary number of intrusive ``next'' pointers for linking onto monitors. 2450 The \code{condition node} is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each \code{condition criterion}is moved to the AS-stack.2499 The @condition node@ is the data structure that is queued onto a condition variable and, when signalled, the condition queue is popped and each @condition criterion@ is moved to the AS-stack. 2451 2500 Once all the criteria have been popped from their respective AS-stacks, the thread is woken up, which is what is shown in listing \ref{lst:entry2}. 2452 2501 … … 2458 2507 Similarly to internal scheduling, external scheduling for multiple monitors relies on the idea that waiting-thread queues are no longer specific to a single monitor, as mentioned in section \ref{extsched}. 2459 2508 For internal scheduling, these queues are part of condition variables, which are still unique for a given scheduling operation (i.e., no signal statement uses multiple conditions). 2460 However, in the case of external scheduling, there is no equivalent object which is associated with \code{waitfor}statements.2509 However, in the case of external scheduling, there is no equivalent object which is associated with @waitfor@ statements. 2461 2510 This absence means the queues holding the waiting threads must be stored inside at least one of the monitors that is acquired. 2462 These monitors being the only objects that have sufficient lifetime and are available on both sides of the \code{waitfor}statement.2511 These monitors being the only objects that have sufficient lifetime and are available on both sides of the @waitfor@ statement. 2463 2512 This requires an algorithm to choose which monitor holds the relevant queue. 2464 2513 It is also important that said algorithm be independent of the order in which users list parameters. … … 2477 2526 \begin{itemize} 2478 2527 \item The threads waiting on the entry queue need to keep track of which routine they are trying to enter, and using which set of monitors. 2479 The \code{mutex}routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information.2528 The @mutex@ routine already has all the required information on its stack, so the thread only needs to keep a pointer to that information. 2480 2529 \item The monitors need to keep a mask of acceptable routines. 2481 2530 This mask contains for each acceptable routine, a routine pointer and an array of monitors to go with it. 2482 2531 It also needs storage to keep track of which routine was accepted. 2483 2532 Since this information is not specific to any monitor, the monitors actually contain a pointer to an integer on the stack of the waiting thread. 2484 Note that if a thread has acquired two monitors but executes a \code{waitfor}with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless.2485 This becomes relevant when \code{when} clauses affect the number of monitors passed to a \code{waitfor}statement.2533 Note that if a thread has acquired two monitors but executes a @waitfor@ with only one monitor as a parameter, setting the mask of acceptable routines to both monitors will not cause any problems since the extra monitor will not change ownership regardless. 2534 This becomes relevant when @when@ clauses affect the number of monitors passed to a @waitfor@ statement. 2486 2535 \item The entry/exit routines need to be updated as shown in listing \ref{lst:entry3}. 2487 2536 \end{itemize} … … 2491 2540 This routine is needed because of the storage requirements of the call order inversion. 2492 2541 Indeed, when waiting for the destructors, storage is needed for the waiting context and the lifetime of said storage needs to outlive the waiting operation it is needed for. 2493 For regular \code{waitfor}statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later.2494 The \code{waitfor}semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor}2542 For regular @waitfor@ statements, the call stack of the routine itself matches this requirement but it is no longer the case when waiting for the destructor since it is pushed on to the AS-stack for later. 2543 The @waitfor@ semantics can then be adjusted correspondingly, as seen in listing \ref{lst:entry-dtor} 2495 2544 2496 2545 \begin{figure} 2497 2546 \begin{multicols}{2} 2498 2547 Entry 2499 \begin{ pseudo}2548 \begin{cfa} 2500 2549 if monitor is free 2501 2550 enter … … 2508 2557 block 2509 2558 increment recursion 2510 \end{ pseudo}2559 \end{cfa} 2511 2560 \columnbreak 2512 2561 Exit 2513 \begin{ pseudo}2562 \begin{cfa} 2514 2563 decrement recursion 2515 2564 if recursion == 0 … … 2524 2573 wake-up thread 2525 2574 endif 2526 \end{ pseudo}2575 \end{cfa} 2527 2576 \end{multicols} 2528 \begin{ pseudo}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}]2529 \end{ pseudo}2577 \begin{cfa}[caption={Entry and exit routine for monitors with internal scheduling and external scheduling},label={lst:entry3}] 2578 \end{cfa} 2530 2579 \end{figure} 2531 2580 … … 2533 2582 \begin{multicols}{2} 2534 2583 Destructor Entry 2535 \begin{ pseudo}2584 \begin{cfa} 2536 2585 if monitor is free 2537 2586 enter … … 2547 2596 wait 2548 2597 increment recursion 2549 \end{ pseudo}2598 \end{cfa} 2550 2599 \columnbreak 2551 2600 Waitfor 2552 \begin{ pseudo}2601 \begin{cfa} 2553 2602 if matching thread is already there 2554 2603 if found destructor … … 2570 2619 block 2571 2620 return 2572 \end{ pseudo}2621 \end{cfa} 2573 2622 \end{multicols} 2574 \begin{ pseudo}[caption={Pseudo code for the \code{waitfor} routine and the \code{mutex}entry routine for destructors},label={lst:entry-dtor}]2575 \end{ pseudo}2623 \begin{cfa}[caption={Pseudo code for the \protect\lstinline|waitfor| routine and the \protect\lstinline|mutex| entry routine for destructors},label={lst:entry-dtor}] 2624 \end{cfa} 2576 2625 \end{figure} 2577 2626 … … 2585 2634 2586 2635 \section{Threads As Monitors} 2587 As it was subtly alluded in section \ref{threads}, \code{thread}s in \CFA are in fact monitors, which means that all monitor features are available when using threads.2636 As it was subtly alluded in section \ref{threads}, @thread@s in \CFA are in fact monitors, which means that all monitor features are available when using threads. 2588 2637 For example, here is a very simple two thread pipeline that could be used for a simulator of a game engine: 2589 \begin{figure} [H]2590 \begin{cfa code}[caption={Toy simulator using \code{thread}s and \code{monitor}s.},label={lst:engine-v1}]2638 \begin{figure} 2639 \begin{cfa}[caption={Toy simulator using \protect\lstinline|thread|s and \protect\lstinline|monitor|s.},label={lst:engine-v1}] 2591 2640 // Visualization declaration 2592 2641 thread Renderer {} renderer; … … 2615 2664 } 2616 2665 } 2617 \end{cfa code}2666 \end{cfa} 2618 2667 \end{figure} 2619 2668 One of the obvious complaints of the previous code snippet (other than its toy-like simplicity) is that it does not handle exit conditions and just goes on forever. 2620 2669 Luckily, the monitor semantics can also be used to clearly enforce a shutdown order in a concise manner: 2621 \begin{figure} [H]2622 \begin{cfa code}[caption={Same toy simulator with proper termination condition.},label={lst:engine-v2}]2670 \begin{figure} 2671 \begin{cfa}[caption={Same toy simulator with proper termination condition.},label={lst:engine-v2}] 2623 2672 // Visualization declaration 2624 2673 thread Renderer {} renderer; … … 2658 2707 // Call destructor for simulator once simulator finishes 2659 2708 // Call destructor for renderer to signify shutdown 2660 \end{cfa code}2709 \end{cfa} 2661 2710 \end{figure} 2662 2711 … … 2664 2713 As mentioned in section \ref{preemption}, \CFA uses preemptive threads by default but can use fibers on demand. 2665 2714 Currently, using fibers is done by adding the following line of code to the program~: 2666 \begin{cfa code}2715 \begin{cfa} 2667 2716 unsigned int default_preemption() { 2668 2717 return 0; 2669 2718 } 2670 \end{cfa code}2719 \end{cfa} 2671 2720 This function is called by the kernel to fetch the default preemption rate, where 0 signifies an infinite time-slice, i.e., no preemption. 2672 2721 However, once clusters are fully implemented, it will be possible to create fibers and \textbf{uthread} in the same system, as in listing \ref{lst:fiber-uthread} 2673 2722 \begin{figure} 2674 \begin{cfacode}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={lst:fiber-uthread}] 2675 //Cluster forward declaration 2723 \lstset{language=CFA,deletedelim=**[is][]{`}{`}} 2724 \begin{cfa}[caption={Using fibers and \textbf{uthread} side-by-side in \CFA},label={lst:fiber-uthread}] 2725 // Cluster forward declaration 2676 2726 struct cluster; 2677 2727 2678 // Processor forward declaration2728 // Processor forward declaration 2679 2729 struct processor; 2680 2730 2681 // Construct clusters with a preemption rate2731 // Construct clusters with a preemption rate 2682 2732 void ?{}(cluster& this, unsigned int rate); 2683 // Construct processor and add it to cluster2733 // Construct processor and add it to cluster 2684 2734 void ?{}(processor& this, cluster& cluster); 2685 // Construct thread and schedule it on cluster2735 // Construct thread and schedule it on cluster 2686 2736 void ?{}(thread& this, cluster& cluster); 2687 2737 2688 // Declare two clusters2689 cluster thread_cluster = { 10`ms }; // Preempt every 10 ms2690 cluster fibers_cluster = { 0 }; // Never preempt2691 2692 // Construct 4 processors2738 // Declare two clusters 2739 cluster thread_cluster = { 10`ms }; // Preempt every 10 ms 2740 cluster fibers_cluster = { 0 }; // Never preempt 2741 2742 // Construct 4 processors 2693 2743 processor processors[4] = { 2694 2744 //2 for the thread cluster … … 2700 2750 }; 2701 2751 2702 // Declares thread2752 // Declares thread 2703 2753 thread UThread {}; 2704 2754 void ?{}(UThread& this) { 2705 // Construct underlying thread to automatically2706 // be scheduled on the thread cluster2755 // Construct underlying thread to automatically 2756 // be scheduled on the thread cluster 2707 2757 (this){ thread_cluster } 2708 2758 } … … 2710 2760 void main(UThread & this); 2711 2761 2712 // Declares fibers2762 // Declares fibers 2713 2763 thread Fiber {}; 2714 2764 void ?{}(Fiber& this) { 2715 // Construct underlying thread to automatically2716 // be scheduled on the fiber cluster2765 // Construct underlying thread to automatically 2766 // be scheduled on the fiber cluster 2717 2767 (this.__thread){ fibers_cluster } 2718 2768 } 2719 2769 2720 2770 void main(Fiber & this); 2721 \end{cfa code}2771 \end{cfa} 2722 2772 \end{figure} 2723 2773 … … 2731 2781 Table \ref{tab:machine} shows the characteristics of the machine used to run the benchmarks. 2732 2782 All tests were made on this machine. 2733 \begin{table} [H]2783 \begin{table} 2734 2784 \begin{center} 2735 2785 \begin{tabular}{| l | r | l | r |} … … 2763 2813 2764 2814 \section{Micro Benchmarks} 2765 All benchmarks are run using the same harness to produce the results, seen as the \code{BENCH()}macro in the following examples.2815 All benchmarks are run using the same harness to produce the results, seen as the @BENCH()@ macro in the following examples. 2766 2816 This macro uses the following logic to benchmark the code: 2767 \begin{ pseudo}2817 \begin{cfa} 2768 2818 #define BENCH(run, result) \ 2769 2819 before = gettime(); \ … … 2771 2821 after = gettime(); \ 2772 2822 result = (after - before) / N; 2773 \end{ pseudo}2774 The method used to get time is \code{clock_gettime(CLOCK_THREAD_CPUTIME_ID);}.2823 \end{cfa} 2824 The method used to get time is @clock_gettime(CLOCK_THREAD_CPUTIME_ID);@. 2775 2825 Each benchmark is using many iterations of a simple call to measure the cost of the call. 2776 2826 The specific number of iterations depends on the specific benchmark. … … 2787 2837 \begin{multicols}{2} 2788 2838 \CFA Coroutines 2789 \begin{cfa code}2839 \begin{cfa} 2790 2840 coroutine GreatSuspender {}; 2791 2841 void main(GreatSuspender& this) { … … 2803 2853 printf("%llu\n", result); 2804 2854 } 2805 \end{cfa code}2855 \end{cfa} 2806 2856 \columnbreak 2807 2857 \CFA Threads 2808 \begin{cfa code}2858 \begin{cfa} 2809 2859 2810 2860 … … 2822 2872 printf("%llu\n", result); 2823 2873 } 2824 \end{cfa code}2874 \end{cfa} 2825 2875 \end{multicols} 2826 \begin{cfa code}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={lst:ctx-switch}]2827 \end{cfa code}2876 \begin{cfa}[caption={\CFA benchmark code used to measure context-switches for coroutines and threads.},label={lst:ctx-switch}] 2877 \end{cfa} 2828 2878 \end{figure} 2829 2879 … … 2853 2903 For monitors, the simplest approach is to measure how long it takes to enter and leave a monitor routine. 2854 2904 Listing \ref{lst:mutex} shows the code for \CFA. 2855 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a \code{pthread_mutex}lock is also measured.2905 To put the results in context, the cost of entering a non-inline function and the cost of acquiring and releasing a @pthread_mutex@ lock is also measured. 2856 2906 The results can be shown in table \ref{tab:mutex}. 2857 2907 2858 2908 \begin{figure} 2859 \begin{cfa code}[caption={\CFA benchmark code used to measure mutex routines.},label={lst:mutex}]2909 \begin{cfa}[caption={\CFA benchmark code used to measure mutex routines.},label={lst:mutex}] 2860 2910 monitor M {}; 2861 2911 void __attribute__((noinline)) call( M & mutex m /*, m2, m3, m4*/ ) {} … … 2871 2921 printf("%llu\n", result); 2872 2922 } 2873 \end{cfa code}2923 \end{cfa} 2874 2924 \end{figure} 2875 2925 … … 2883 2933 FetchAdd + FetchSub & 26 & 26 & 0 \\ 2884 2934 Pthreads Mutex Lock & 31 & 31.86 & 0.99 \\ 2885 \uC \code{monitor}member routine & 30 & 30 & 0 \\2886 \CFA \code{mutex}routine, 1 argument & 41 & 41.57 & 0.9 \\2887 \CFA \code{mutex}routine, 2 argument & 76 & 76.96 & 1.57 \\2888 \CFA \code{mutex}routine, 4 argument & 145 & 146.68 & 3.85 \\2935 \uC @monitor@ member routine & 30 & 30 & 0 \\ 2936 \CFA @mutex@ routine, 1 argument & 41 & 41.57 & 0.9 \\ 2937 \CFA @mutex@ routine, 2 argument & 76 & 76.96 & 1.57 \\ 2938 \CFA @mutex@ routine, 4 argument & 145 & 146.68 & 3.85 \\ 2889 2939 Java synchronized routine & 27 & 28.57 & 2.6 \\ 2890 2940 \hline … … 2902 2952 2903 2953 \begin{figure} 2904 \begin{cfa code}[caption={Benchmark code for internal scheduling},label={lst:int-sched}]2954 \begin{cfa}[caption={Benchmark code for internal scheduling},label={lst:int-sched}] 2905 2955 volatile int go = 0; 2906 2956 condition c; … … 2932 2982 return do_wait(m1); 2933 2983 } 2934 \end{cfa code}2984 \end{cfa} 2935 2985 \end{figure} 2936 2986 … … 2942 2992 \hline 2943 2993 Pthreads Condition Variable & 5902.5 & 6093.29 & 714.78 \\ 2944 \uC \code{signal}& 322 & 323 & 3.36 \\2945 \CFA \code{signal}, 1 \code{monitor}& 352.5 & 353.11 & 3.66 \\2946 \CFA \code{signal}, 2 \code{monitor}& 430 & 430.29 & 8.97 \\2947 \CFA \code{signal}, 4 \code{monitor}& 594.5 & 606.57 & 18.33 \\2948 Java \code{notify}& 13831.5 & 15698.21 & 4782.3 \\2994 \uC @signal@ & 322 & 323 & 3.36 \\ 2995 \CFA @signal@, 1 @monitor@ & 352.5 & 353.11 & 3.66 \\ 2996 \CFA @signal@, 2 @monitor@ & 430 & 430.29 & 8.97 \\ 2997 \CFA @signal@, 4 @monitor@ & 594.5 & 606.57 & 18.33 \\ 2998 Java @notify@ & 13831.5 & 15698.21 & 4782.3 \\ 2949 2999 \hline 2950 3000 \end{tabular} … … 2956 3006 2957 3007 \subsection{External Scheduling} 2958 The Internal scheduling benchmark measures the cost of the \code{waitfor} statement (\code{_Accept}in \uC).3008 The Internal scheduling benchmark measures the cost of the @waitfor@ statement (@_Accept@ in \uC). 2959 3009 Listing \ref{lst:ext-sched} shows the code for \CFA, with results in table \ref{tab:ext-sched}. 2960 3010 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 2961 3011 2962 3012 \begin{figure} 2963 \begin{cfa code}[caption={Benchmark code for external scheduling},label={lst:ext-sched}]3013 \begin{cfa}[caption={Benchmark code for external scheduling},label={lst:ext-sched}] 2964 3014 volatile int go = 0; 2965 3015 monitor M {}; … … 2990 3040 return do_wait(m1); 2991 3041 } 2992 \end{cfa code}3042 \end{cfa} 2993 3043 \end{figure} 2994 3044 … … 2999 3049 \multicolumn{1}{c |}{} & \multicolumn{1}{c |}{ Median } &\multicolumn{1}{c |}{ Average } & \multicolumn{1}{c |}{ Standard Deviation} \\ 3000 3050 \hline 3001 \uC \code{Accept}& 350 & 350.61 & 3.11 \\3002 \CFA \code{waitfor}, 1 \code{monitor}& 358.5 & 358.36 & 3.82 \\3003 \CFA \code{waitfor}, 2 \code{monitor}& 422 & 426.79 & 7.95 \\3004 \CFA \code{waitfor}, 4 \code{monitor}& 579.5 & 585.46 & 11.25 \\3051 \uC @Accept@ & 350 & 350.61 & 3.11 \\ 3052 \CFA @waitfor@, 1 @monitor@ & 358.5 & 358.36 & 3.82 \\ 3053 \CFA @waitfor@, 2 @monitor@ & 422 & 426.79 & 7.95 \\ 3054 \CFA @waitfor@, 4 @monitor@ & 579.5 & 585.46 & 11.25 \\ 3005 3055 \hline 3006 3056 \end{tabular} … … 3013 3063 \subsection{Object Creation} 3014 3064 Finally, the last benchmark measures the cost of creation for concurrent objects. 3015 Listing \ref{lst:creation} shows the code for \texttt{pthread}s and \CFA threads, with results shown in table \ref{tab:creation}.3065 Listing \ref{lst:creation} shows the code for @pthread@s and \CFA threads, with results shown in table \ref{tab:creation}. 3016 3066 As with all other benchmarks, all omitted tests are functionally identical to one of these tests. 3017 3067 The only note here is that the call stacks of \CFA coroutines are lazily created, therefore without priming the coroutine, the creation cost is very low. … … 3019 3069 \begin{figure} 3020 3070 \begin{center} 3021 \texttt{pthread} 3022 \begin{c code}3071 @pthread@ 3072 \begin{cfa} 3023 3073 int main() { 3024 3074 BENCH( … … 3039 3089 printf("%llu\n", result); 3040 3090 } 3041 \end{c code}3091 \end{cfa} 3042 3092 3043 3093 3044 3094 3045 3095 \CFA Threads 3046 \begin{cfa code}3096 \begin{cfa} 3047 3097 int main() { 3048 3098 BENCH( … … 3054 3104 printf("%llu\n", result); 3055 3105 } 3056 \end{cfa code}3106 \end{cfa} 3057 3107 \end{center} 3058 \ begin{cfacode}[caption={Benchmark code for \texttt{pthread}s and \CFA to measure object creation},label={lst:creation}]3059 \ end{cfacode}3108 \caption{Benchmark code for \protect\lstinline|pthread|s and \CFA to measure object creation} 3109 \label{lst:creation} 3060 3110 \end{figure} 3061 3111 … … 3141 3191 \begin{tabular}[t]{|c|c|c|} 3142 3192 Sequential & Library Parallel & Language Parallel \\ 3143 \begin{cfa code}[tabsize=3]3193 \begin{cfa}[tabsize=3] 3144 3194 void big_sum( 3145 3195 int* a, int* b, … … 3165 3215 //... fill in a & b 3166 3216 big_sum(a,b,c,10000); 3167 \end{cfa code} &\begin{cfacode}[tabsize=3]3217 \end{cfa} &\begin{cfa}[tabsize=3] 3168 3218 void big_sum( 3169 3219 int* a, int* b, … … 3189 3239 //... fill in a & b 3190 3240 big_sum(a,b,c,10000); 3191 \end{cfa code}&\begin{cfacode}[tabsize=3]3241 \end{cfa}&\begin{cfa}[tabsize=3] 3192 3242 void big_sum( 3193 3243 int* a, int* b, … … 3213 3263 //... fill in a & b 3214 3264 big_sum(a,b,c,10000); 3215 \end{cfa code}3265 \end{cfa} 3216 3266 \end{tabular} 3217 3267 \end{center} -
doc/papers/concurrency/annex/local.bib
ra9b1b0c rb5563e1 21 21 @string{pldi="Programming Language Design and Implementation"} 22 22 23 24 @article{HPP:Study, 25 keywords = {Parallel, Productivity}, 26 author = {Lorin Hochstein and Jeff Carver and Forrest Shull and Sima Asgari and Victor Basili and Jeffrey K. Hollingsworth and Marvin V. Zelkowitz }, 27 title = {Parallel Programmer Productivity: A Case Study of Novice Parallel Programmers}, 23 @inproceedings{Hochstein05, 24 keywords = {Application software; Computer aided software engineering; Concurrent computing; Educational 25 institutions; High performance computing; Humans; Instruments; Productivity; Programming profession; 26 Software engineering}, 27 author = {Lorin Hochstein and Jeff Carver and Forrest Shull and Sima Asgari and Victor Basili and Jeffrey K. Hollingsworth and Marvin V. Zelkowitz}, 28 title = {Parallel Programmer Productivity: A Case Study of Novice Parallel Programmers}, 29 booktitle = {Supercomputing, 2005. Proceedings of the ACM/IEEE SC 2005 Conference}, 30 publisher = {IEEE}, 31 year = {2005}, 32 pages = {35-35}, 33 month = nov, 28 34 } 29 35 … … 35 41 } 36 42 37 @article{TBB, 38 key = {TBB}, 39 keywords = {Intel, TBB}, 40 title = {Intel Thread Building Blocks}, 41 note = "\url{https://www.threadingbuildingblocks.org/}" 43 @misc{TBB, 44 keywords = {Intel, TBB}, 45 key = {TBB}, 46 title = {Thread Building Blocks}, 47 howpublished= {Intel, \url{https://www.threadingbuildingblocks.org}}, 48 note = {Accessed: 2018-3}, 42 49 } 43 50 … … 48 55 title = {C$\forall$ Programmming Language}, 49 56 note = {\url{https://plg.uwaterloo.ca/~cforall}}, 50 }51 52 @mastersthesis{rob-thesis,53 keywords = {Constructors, Destructors, Tuples},54 author = {Rob Schluntz},55 title = {Resource Management and Tuples in Cforall},56 year = 2017,57 school = {University of Waterloo},58 note = {\url{https://uwspace.uwaterloo.ca/handle/10012/11830}},59 57 } 60 58 -
doc/papers/concurrency/style/cfa-format.tex
ra9b1b0c rb5563e1 11 11 % from https://gist.github.com/nikolajquorning/92bbbeef32e1dd80105c9bf2daceb89a 12 12 \lstdefinelanguage{sml} { 13 morekeywords= { 14 EQUAL, GREATER, LESS, NONE, SOME, abstraction, abstype, and, andalso, array, as, before, bool, case, char, datatype, do, else, end, eqtype, exception, exn, false, fn, fun, functor, handle, if, in, include, infix, infixr, int, let, list, local, nil, nonfix, not, o, of, op, open, option, orelse, overload, print, raise, real, rec, ref, sharing, sig, signature, string, struct, structure, substring, then, true, type, unit, val, vector, where, while, with, withtype, word 15 }, 16 morestring=[b]", 17 morecomment=[s]{(*}{*)}, 13 morekeywords= { 14 EQUAL, GREATER, LESS, NONE, SOME, abstraction, abstype, and, andalso, array, as, before, 15 bool, case, char, datatype, do, else, end, eqtype, exception, exn, false, fn, fun, functor, 16 handle, if, in, include, infix, infixr, int, let, list, local, nil, nonfix, not, o, of, op, 17 open, option, orelse, overload, print, raise, real, rec, ref, sharing, sig, signature, 18 string, struct, structure, substring, then, true, type, unit, val, vector, where, while, 19 with, withtype, word 20 }, 21 morestring=[b]", 22 morecomment=[s]{(*}{*)}, 18 23 } 19 24 20 25 \lstdefinelanguage{D}{ 21 % Keywords 22 morekeywords=[1]{ 23 abstract, alias, align, auto, body, break, cast, catch, class, const, 24 continue, debug, delegate, delete, deprecated, do, else, enum, export, 25 false, final, finally, for, foreach, foreach_reverse, function, goto, if, 26 immutable, import, in, inout, interface, invariant, is, lazy, macro, mixin, 27 module, new, nothrow, null, out, override, package, pragma, private, 28 protected, public, pure, ref, return, shared, static, struct, super, 29 switch, synchronized, template, this, throw, true, try, typedef, typeid, 30 typeof, union, unittest, volatile, while, with 31 }, 32 % Special identifiers, common functions 33 morekeywords=[2]{enforce}, 34 % Ugly identifiers 35 morekeywords=[3]{ 36 __DATE__, __EOF__, __FILE__, __LINE__, __TIMESTAMP__, __TIME__, __VENDOR__, 37 __VERSION__, __ctfe, __gshared, __monitor, __thread, __vptr, _argptr, 38 _arguments, _ctor, _dtor 39 }, 40 % Basic types 41 morekeywords=[4]{ 42 byte, ubyte, short, ushort, int, uint, long, ulong, cent, ucent, void, 43 bool, bit, float, double, real, ushort, int, uint, long, ulong, float, 44 char, wchar, dchar, string, wstring, dstring, ireal, ifloat, idouble, 45 creal, cfloat, cdouble, size_t, ptrdiff_t, sizediff_t, equals_t, hash_t 46 }, 47 % Strings 48 morestring=[b]{"}, 49 morestring=[b]{'}, 50 morestring=[b]{`}, 51 % Comments 52 comment=[l]{//}, 53 morecomment=[s]{/*}{*/}, 54 morecomment=[s][\color{blue}]{/**}{*/}, 55 morecomment=[n]{/+}{+/}, 56 morecomment=[n][\color{blue}]{/++}{+/}, 57 % Options 58 sensitive=true 26 % Keywords 27 morekeywords=[1]{ 28 abstract, alias, align, auto, body, break, cast, catch, class, const, continue, debug, 29 delegate, delete, deprecated, do, else, enum, export, false, final, finally, for, foreach, 30 foreach_reverse, function, goto, if, immutable, import, in, inout, interface, invariant, is, 31 lazy, macro, mixin, module, new, nothrow, null, out, override, package, pragma, private, 32 protected, public, pure, ref, return, shared, static, struct, super, switch, synchronized, 33 template, this, throw, true, try, typedef, typeid, typeof, union, unittest, volatile, while, 34 with 35 }, 36 % Special identifiers, common functions 37 morekeywords=[2]{enforce}, 38 % Ugly identifiers 39 morekeywords=[3]{ 40 __DATE__, __EOF__, __FILE__, __LINE__, __TIMESTAMP__, __TIME__, __VENDOR__, 41 __VERSION__, __ctfe, __gshared, __monitor, __thread, __vptr, _argptr, 42 _arguments, _ctor, _dtor 43 }, 44 % Basic types 45 morekeywords=[4]{ 46 byte, ubyte, short, ushort, int, uint, long, ulong, cent, ucent, void, bool, bit, float, 47 double, real, ushort, int, uint, long, ulong, float, char, wchar, dchar, string, wstring, 48 dstring, ireal, ifloat, idouble, creal, cfloat, cdouble, size_t, ptrdiff_t, sizediff_t, 49 equals_t, hash_t 50 }, 51 % Strings 52 morestring=[b]{"}, 53 morestring=[b]{'}, 54 morestring=[b]{`}, 55 % Comments 56 comment=[l]{//}, 57 morecomment=[s]{/*}{*/}, 58 morecomment=[s][\color{blue}]{/**}{*/}, 59 morecomment=[n]{/+}{+/}, 60 morecomment=[n][\color{blue}]{/++}{+/}, 61 % Options 62 sensitive=true 59 63 } 60 64 61 65 \lstdefinelanguage{rust}{ 62 % Keywords 63 morekeywords=[1]{ 64 abstract, alignof, as, become, box, 65 break, const, continue, crate, do, 66 else, enum, extern, false, final, 67 fn, for, if, impl, in, 68 let, loop, macro, match, mod, 69 move, mut, offsetof, override, priv, 70 proc, pub, pure, ref, return, 71 Self, self, sizeof, static, struct, 72 super, trait, true, type, typeof, 73 unsafe, unsized, use, virtual, where, 74 while, yield 75 }, 76 % Strings 77 morestring=[b]{"}, 78 % Comments 79 comment=[l]{//}, 80 morecomment=[s]{/*}{*/}, 81 % Options 82 sensitive=true 66 % Keywords 67 morekeywords=[1]{ 68 abstract, alignof, as, become, box, break, const, continue, crate, do, else, enum, extern, 69 false, final, fn, for, if, impl, in, let, loop, macro, match, mod, move, mut, offsetof, 70 override, priv, proc, pub, pure, ref, return, Self, self, sizeof, static, struct, super, 71 trait, true, type, typeof, unsafe, unsized, use, virtual, where, while, yield 72 }, 73 % Strings 74 morestring=[b]{"}, 75 % Comments 76 comment=[l]{//}, 77 morecomment=[s]{/*}{*/}, 78 % Options 79 sensitive=true 83 80 } 84 81 85 82 \lstdefinelanguage{pseudo}{ 86 morekeywords={string,uint,int,bool,float}, %87 sensitive=true, %88 morecomment=[l]{//}, %89 morecomment=[s]{/*}{*/}, %90 morestring=[b]', %91 morestring=[b]", %92 morestring=[s]{`}{`}, %93 } %83 morekeywords={string,uint,int,bool,float}, 84 sensitive=true, 85 morecomment=[l]{//}, 86 morecomment=[s]{/*}{*/}, 87 morestring=[b]', 88 morestring=[b]", 89 morestring=[s]{`}{`}, 90 } 94 91 95 92 \newcommand{\KWC}{K-W C\xspace} 96 93 97 94 \lstdefinestyle{pseudoStyle}{ 98 99 100 101 102 103 104 stringstyle=\sf\color{Mahogany},% use sanserif font105 106 107 aboveskip=4pt,% spacing above/below code block108 109 110 111 112 113 showlines=true,% show blank lines at end of code114 115 116 117 xleftmargin=\parindentlnth,% indent code to paragraph indentation118 119 120 95 escapeinside={@@}, 96 basicstyle=\linespread{0.9}\sf\footnotesize, % reduce line spacing and use typewriter font 97 keywordstyle=\bfseries\color{blue}, 98 keywordstyle=[2]\bfseries\color{Plum}, 99 commentstyle=\itshape\color{OliveGreen}, % green and italic comments 100 identifierstyle=\color{identifierCol}, 101 stringstyle=\sf\color{Mahogany}, % use sanserif font 102 mathescape=true, 103 columns=fixed, 104 aboveskip=4pt, % spacing above/below code block 105 belowskip=3pt, 106 keepspaces=true, 107 tabsize=4, 108 % frame=lines, 109 literate=, 110 showlines=true, % show blank lines at end of code 111 showspaces=false, 112 showstringspaces=false, 113 escapechar=\$, 114 xleftmargin=\parindentlnth, % indent code to paragraph indentation 115 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 116 % moredelim=* detects keywords, comments, strings, and other delimiters and applies their formatting 117 % moredelim=** allows cumulative application 121 118 } 122 119 123 120 \lstdefinestyle{defaultStyle}{ 124 125 126 127 128 129 130 stringstyle=\sf\color{Mahogany},% use sanserif font131 132 133 aboveskip=4pt,% spacing above/below code block134 135 136 137 138 139 showlines=true,% show blank lines at end of code140 141 142 143 xleftmargin=\parindentlnth,% indent code to paragraph indentation144 145 146 121 escapeinside={@@}, 122 basicstyle=\linespread{0.9}\tt\footnotesize, % reduce line spacing and use typewriter font 123 keywordstyle=\bfseries\color{blue}, 124 keywordstyle=[2]\bfseries\color{Plum}, 125 commentstyle=\itshape\color{OliveGreen}, % green and italic comments 126 identifierstyle=\color{identifierCol}, 127 stringstyle=\sf\color{Mahogany}, % use sanserif font 128 mathescape=true, 129 columns=fixed, 130 aboveskip=4pt, % spacing above/below code block 131 belowskip=3pt, 132 keepspaces=true, 133 tabsize=4, 134 % frame=lines, 135 literate=, 136 showlines=true, % show blank lines at end of code 137 showspaces=false, 138 showstringspaces=false, 139 escapechar=\$, 140 xleftmargin=\parindentlnth, % indent code to paragraph indentation 141 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 142 % moredelim=* detects keywords, comments, strings, and other delimiters and applies their formatting 143 % moredelim=** allows cumulative application 147 144 } 148 145 149 146 \lstdefinestyle{cfaStyle}{ 150 151 147 escapeinside={@@}, 148 basicstyle=\linespread{0.9}\sf, % reduce line spacing and use typewriter font 152 149 % keywordstyle=\bfseries\color{blue}, 153 150 keywordstyle=[2]\bfseries\color{red}, 154 151 % commentstyle=\sf\itshape\color{OliveGreen}, % green and italic comments 155 156 % stringstyle=\sf\color{Mahogany}, 157 158 159 160 aboveskip=4pt,% spacing above/below code block161 162 163 164 165 166 showlines=true,% show blank lines at end of code167 168 169 170 xleftmargin=\parindentlnth,% indent code to paragraph indentation171 172 152 identifierstyle=\color{identifierCol}, 153 % stringstyle=\sf\color{Mahogany}, % use sanserif font 154 stringstyle=\tt, % use typewriter font 155 mathescape=true, 156 columns=fixed, 157 aboveskip=4pt, % spacing above/below code block 158 belowskip=3pt, 159 keepspaces=true, 160 tabsize=4, 161 % frame=lines, 162 literate=, 163 showlines=true, % show blank lines at end of code 164 showspaces=false, 165 showstringspaces=false, 166 escapechar=\$, 167 xleftmargin=\parindentlnth, % indent code to paragraph indentation 168 moredelim=[is][\color{red}\bfseries]{**R**}{**R**}, % red highlighting 169 morekeywords=[2]{accept, signal, signal_block, wait, waitfor}, 173 170 } 174 171 … … 176 173 177 174 \lstnewenvironment{ccode}[1][]{ 178 179 180 181 182 183 175 \lstset{ 176 language = C, 177 style=defaultStyle, 178 captionpos=b, 179 #1 180 } 184 181 }{} 185 182 186 183 \lstnewenvironment{cfacode}[1][]{ 187 188 189 190 191 192 184 \lstset{ 185 language = CFA, 186 style=cfaStyle, 187 captionpos=b, 188 #1 189 } 193 190 }{} 194 191 195 192 \lstnewenvironment{pseudo}[1][]{ 196 197 198 199 200 201 193 \lstset{ 194 language = pseudo, 195 style=pseudoStyle, 196 captionpos=b, 197 #1 198 } 202 199 }{} 203 200 204 201 \lstnewenvironment{cppcode}[1][]{ 205 206 207 208 209 210 202 \lstset{ 203 language = c++, 204 style=defaultStyle, 205 captionpos=b, 206 #1 207 } 211 208 }{} 212 209 213 210 \lstnewenvironment{ucppcode}[1][]{ 214 215 216 217 218 219 211 \lstset{ 212 language = c++, 213 style=defaultStyle, 214 captionpos=b, 215 #1 216 } 220 217 }{} 221 218 222 219 \lstnewenvironment{javacode}[1][]{ 223 224 225 226 227 228 220 \lstset{ 221 language = java, 222 style=defaultStyle, 223 captionpos=b, 224 #1 225 } 229 226 }{} 230 227 231 228 \lstnewenvironment{scalacode}[1][]{ 232 233 234 235 236 237 229 \lstset{ 230 language = scala, 231 style=defaultStyle, 232 captionpos=b, 233 #1 234 } 238 235 }{} 239 236 240 237 \lstnewenvironment{smlcode}[1][]{ 241 242 243 244 245 246 238 \lstset{ 239 language = sml, 240 style=defaultStyle, 241 captionpos=b, 242 #1 243 } 247 244 }{} 248 245 249 246 \lstnewenvironment{dcode}[1][]{ 250 251 252 253 254 255 247 \lstset{ 248 language = D, 249 style=defaultStyle, 250 captionpos=b, 251 #1 252 } 256 253 }{} 257 254 258 255 \lstnewenvironment{rustcode}[1][]{ 259 260 261 262 263 264 256 \lstset{ 257 language = rust, 258 style=defaultStyle, 259 captionpos=b, 260 #1 261 } 265 262 }{} 266 263 267 264 \lstnewenvironment{gocode}[1][]{ 268 269 270 271 272 273 265 \lstset{ 266 language = Golang, 267 style=defaultStyle, 268 captionpos=b, 269 #1 270 } 274 271 }{} 275 272 … … 279 276 \newcommand{\code}[1]{\lstinline[language=CFA,style=cfaStyle]{#1}} 280 277 \newcommand{\pscode}[1]{\lstinline[language=pseudo,style=pseudoStyle]{#1}} 278 279 % Local Variables: % 280 % tab-width: 4 % 281 % fill-column: 100 % 282 % End: % -
doc/papers/general/Paper.tex
ra9b1b0c rb5563e1 671 671 \begin{cfa} 672 672 int f( int, int ); 673 intg( [int, int] );674 inth( int, [int, int] );673 [int] g( [int, int] ); 674 [int] h( int, [int, int] ); 675 675 [int, int] x; 676 676 int y; … … 706 706 This example shows mass, multiple, and cascading assignment used in one expression: 707 707 \begin{cfa} 708 voidf( [int, int] );708 [void] f( [int, int] ); 709 709 f( [x, y] = z = 1.5 ); $\C{// assignments in parameter list}$ 710 710 \end{cfa} … … 814 814 Flattening and restructuring conversions are also applied to tuple types in polymorphic type assertions. 815 815 \begin{cfa} 816 intf( [int, double], double );816 [int] f( [int, double], double ); 817 817 forall( otype T, otype U | { T f( T, U, U ); } ) void g( T, U ); 818 818 g( 5, 10.21 ); … … 1152 1152 case 4: 1153 1153 ... `fallthrough common;` 1154 common: // below fallthrough at same level as case clauses1154 `common`: // below fallthrough at same level as case clauses 1155 1155 ... // common code for cases 3 and 4 1156 1156 // implicit break … … 1857 1857 \begin{cfa} 1858 1858 struct S { double x, y; }; 1859 int i, j;1859 int x, y; 1860 1860 void f( int & i, int & j, S & s, int v[] ); 1861 f( 3, i + j, (S){ 1.0, 7.0 }, (int [3]){ 1, 2, 3 } );$\C{// pass rvalue to lvalue \(\Rightarrow\) implicit temporary}$1861 f( 3, x + y, (S){ 1.0, 7.0 }, (int [3]){ 1, 2, 3 } ); $\C{// pass rvalue to lvalue \(\Rightarrow\) implicit temporary}$ 1862 1862 \end{cfa} 1863 1863 This allows complex values to be succinctly and efficiently passed to functions, without the syntactic overhead of explicit definition of a temporary variable or the runtime cost of pass-by-value. … … 1946 1946 1947 1947 One of the strengths (and weaknesses) of C is memory-management control, allowing resource release to be precisely specified versus unknown release with garbage-collected memory-management. 1948 However, this manual approach is oftenverbose, and it is useful to manage resources other than memory (\eg file handles) using the same mechanism as memory.1948 However, this manual approach is verbose, and it is useful to manage resources other than memory (\eg file handles) using the same mechanism as memory. 1949 1949 \CC addresses these issues using Resource Aquisition Is Initialization (RAII), implemented by means of \newterm{constructor} and \newterm{destructor} functions; 1950 1950 \CFA adopts constructors and destructors (and @finally@) to facilitate RAII. … … 1998 1998 { 1999 1999 VLA x, y = { 20, 0x01 }, z = y; $\C{// z points to y}$ 2000 // ?{}( x ); ?{}( y, 20, 0x01 );?{}( z, y );2000 // ?{}( x ); ?{}( y, 20, 0x01 ); ?{}( z, y ); 2001 2001 ^x{}; $\C{// deallocate x}$ 2002 2002 x{}; $\C{// reallocate x}$ … … 2099 2099 2100 2100 For readability, it is useful to associate units to scale literals, \eg weight (stone, pound, kilogram) or time (seconds, minutes, hours). 2101 The left of Figure~\ref{f:UserLiteral} shows the \CFA alternative call-syntax ( literal argument before function name), using the backquote, to convert basic literals into user literals.2101 The left of Figure~\ref{f:UserLiteral} shows the \CFA alternative call-syntax (postfix: literal argument before function name), using the backquote, to convert basic literals into user literals. 2102 2102 The backquote is a small character, making the unit (function name) predominate. 2103 2103 For examples, the multi-precision integer-type in Section~\ref{s:MultiPrecisionIntegers} has user literals: … … 2107 2107 y = "12345678901234567890123456789"|`mp| + "12345678901234567890123456789"|`mp|; 2108 2108 \end{cfa} 2109 Because \CFA uses a standard function, all types and literals are applicable, as well as overloading and conversions .2109 Because \CFA uses a standard function, all types and literals are applicable, as well as overloading and conversions, where @?`@ denotes a postfix-function name and @`@ denotes a postfix-function call. 2110 2110 }% 2111 \begin{cquote} 2112 \lstset{language=CFA,moredelim=**[is][\color{red}]{|}{|},deletedelim=**[is][]{`}{`}} 2113 \lstDeleteShortInline@% 2114 \begin{tabular}{@{}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{\hspace{2\parindentlnth}}l@{}} 2115 \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{postfix function}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{constant}} & \multicolumn{1}{c@{\hspace{2\parindentlnth}}}{\textbf{variable/expression}} & \multicolumn{1}{c}{\textbf{postfix pointer}} \\ 2116 \begin{cfa} 2117 int ?`h( int s ); 2118 int ?`h( double s ); 2119 int ?`m( char c ); 2120 int ?`m( const char * s ); 2121 int ?`t( int a, int b, int c ); 2122 \end{cfa} 2123 & 2124 \begin{cfa} 2125 0 `h; 2126 3.5`h; 2127 '1'`m; 2128 "123" "456"`m; 2129 [1,2,3]`t; 2130 \end{cfa} 2131 & 2132 \begin{cfa} 2133 int i = 7; 2134 i`h; 2135 (i + 3)`h; 2136 (i + 3.5)`h; 2137 2138 \end{cfa} 2139 & 2140 \begin{cfa} 2141 int (* ?`p)( int i ); 2142 ?`p = ?`h; 2143 3`p; 2144 i`p; 2145 (i + 3)`p; 2146 \end{cfa} 2147 \end{tabular} 2148 \lstMakeShortInline@% 2149 \end{cquote} 2111 2150 2112 2151 The right of Figure~\ref{f:UserLiteral} shows the equivalent \CC version using the underscore for the call-syntax. -
src/Parser/ExpressionNode.cc
ra9b1b0c rb5563e1 10 10 // Created On : Sat May 16 13:17:07 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Mar 3 18:22:33201813 // Update Count : 79612 // Last Modified On : Thu Mar 22 11:57:39 2018 13 // Update Count : 801 14 14 // 15 15 … … 94 94 } // checkLNInt 95 95 96 static void sepNumeric( string & str, string & units ) {97 string::size_type posn = str.find_first_of( "`" );98 if ( posn != string::npos ) {99 units = "?" + str.substr( posn ); // extract units100 str.erase( posn ); // remove units101 } // if102 } // sepNumeric103 104 96 Expression * build_constantInteger( string & str ) { 105 97 static const BasicType::Kind kind[2][6] = { … … 108 100 { BasicType::ShortUnsignedInt, BasicType::UnsignedChar, BasicType::UnsignedInt, BasicType::LongUnsignedInt, BasicType::LongLongUnsignedInt, BasicType::UnsignedInt128, }, 109 101 }; 110 111 string units;112 sepNumeric( str, units ); // separate constant from units113 102 114 103 bool dec = true, Unsigned = false; // decimal, unsigned constant … … 232 221 } // if 233 222 CLEANUP: 234 if ( units.length() != 0 ) {235 ret = new UntypedExpr( new NameExpr( units ), { ret } );236 } // if237 223 238 224 delete &str; // created by lex … … 268 254 }; 269 255 270 string units;271 sepNumeric( str, units ); // separate constant from units272 273 256 bool complx = false; // real, complex 274 257 int size = 1; // 0 => float, 1 => double, 2 => long double … … 303 286 if ( lnth != -1 ) { // explicit length ? 304 287 ret = new CastExpr( ret, new BasicType( Type::Qualifiers(), kind[complx][size] ) ); 305 } // if306 if ( units.length() != 0 ) {307 ret = new UntypedExpr( new NameExpr( units ), { ret } );308 288 } // if 309 289 -
src/Parser/lex.ll
ra9b1b0c rb5563e1 10 10 * Created On : Sat Sep 22 08:58:10 2001 11 11 * Last Modified By : Peter A. Buhr 12 * Last Modified On : Sat Mar 3 18:38:16 201813 * Update Count : 6 4012 * Last Modified On : Thu Mar 22 16:47:06 2018 13 * Update Count : 668 14 14 */ 15 15 … … 54 54 55 55 void rm_underscore() { 56 // Remove underscores in numeric constant by copying the non-underscore characters to the front of the string.56 // SKULLDUGGERY: remove underscores (ok to shorten?) 57 57 yyleng = 0; 58 for ( int i = 0; yytext[i] != '\0'; i += 1 ) { 59 if ( yytext[i] == '`' ) { 60 // copy user suffix 61 for ( ; yytext[i] != '\0'; i += 1 ) { 62 yytext[yyleng] = yytext[i]; 63 yyleng += 1; 64 } // for 65 break; 66 } // if 58 for ( int i = 0; yytext[i] != '\0'; i += 1 ) { // copying non-underscore characters to front of string 67 59 if ( yytext[i] != '_' ) { 68 60 yytext[yyleng] = yytext[i]; … … 71 63 } // for 72 64 yytext[yyleng] = '\0'; 73 } 65 } // rm_underscore 74 66 75 67 // Stop warning due to incorrectly generated flex code. … … 90 82 attr_identifier "@"{identifier} 91 83 92 user_suffix_opt ("`"{identifier})?93 94 84 // numeric constants, CFA: '_' in constant 95 85 hex_quad {hex}("_"?{hex}){3} 96 86 size_opt (8|16|32|64|128)? 97 87 length ("ll"|"LL"|[lL]{size_opt})|("hh"|"HH"|[hH]) 98 integer_suffix_opt ("_"?(([uU]({length}?[iI]?)|([iI]{length}))|([iI]({length}?[uU]?)|([uU]{length}))|({length}([iI]?[uU]?)|([uU][iI]))|[zZ]))? {user_suffix_opt}88 integer_suffix_opt ("_"?(([uU]({length}?[iI]?)|([iI]{length}))|([iI]({length}?[uU]?)|([uU]{length}))|({length}([iI]?[uU]?)|([uU][iI]))|[zZ]))? 99 89 100 90 octal_digits ({octal})|({octal}({octal}|"_")*{octal}) … … 118 108 floating_length ([fFdDlL]|[lL]{floating_size}) 119 109 floating_suffix ({floating_length}?[iI]?)|([iI]{floating_length}) 120 floating_suffix_opt ("_"?({floating_suffix}|"DL"))? {user_suffix_opt}110 floating_suffix_opt ("_"?({floating_suffix}|"DL"))? 121 111 decimal_digits ({decimal})|({decimal}({decimal}|"_")*{decimal}) 122 112 floating_decimal {decimal_digits}"."{exponent}?{floating_suffix_opt} … … 125 115 126 116 binary_exponent "_"?[pP]"_"?[+-]?{decimal_digits} 127 hex_floating_suffix_opt ("_"?({floating_suffix}))? {user_suffix_opt}117 hex_floating_suffix_opt ("_"?({floating_suffix}))? 128 118 hex_floating_fraction ({hex_digits}?"."{hex_digits})|({hex_digits}".") 129 119 hex_floating_constant {hex_prefix}(({hex_floating_fraction}{binary_exponent})|({hex_digits}{binary_exponent})){hex_floating_suffix_opt} … … 314 304 /* identifier */ 315 305 {identifier} { IDENTIFIER_RETURN(); } 306 "`"{identifier}"`" { // CFA 307 yytext[yyleng - 1] = '\0'; yytext += 1; // SKULLDUGGERY: remove backquotes (ok to shorten?) 308 IDENTIFIER_RETURN(); 309 } 316 310 {attr_identifier} { ATTRIBUTE_RETURN(); } 317 "`" { BEGIN BKQUOTE; }318 <BKQUOTE>{identifier} { IDENTIFIER_RETURN(); }319 <BKQUOTE>"`" { BEGIN 0; }320 311 321 312 /* numeric constants */ … … 332 323 ({cwide_prefix}[_]?)?['] { BEGIN QUOTE; rm_underscore(); strtext = new string( yytext, yyleng ); } 333 324 <QUOTE>[^'\\\n]* { strtext->append( yytext, yyleng ); } 334 <QUOTE>['\n] {user_suffix_opt}{ BEGIN 0; strtext->append( yytext, yyleng ); RETURN_STR(CHARACTERconstant); }325 <QUOTE>['\n] { BEGIN 0; strtext->append( yytext, yyleng ); RETURN_STR(CHARACTERconstant); } 335 326 /* ' stop editor highlighting */ 336 327 … … 338 329 ({swide_prefix}[_]?)?["] { BEGIN STRING; rm_underscore(); strtext = new string( yytext, yyleng ); } 339 330 <STRING>[^"\\\n]* { strtext->append( yytext, yyleng ); } 340 <STRING>["\n] {user_suffix_opt}{ BEGIN 0; strtext->append( yytext, yyleng ); RETURN_STR(STRINGliteral); }331 <STRING>["\n] { BEGIN 0; strtext->append( yytext, yyleng ); RETURN_STR(STRINGliteral); } 341 332 /* " stop editor highlighting */ 342 333 … … 348 339 /* punctuation */ 349 340 "@" { ASCIIOP_RETURN(); } 341 "`" { ASCIIOP_RETURN(); } 350 342 "[" { ASCIIOP_RETURN(); } 351 343 "]" { ASCIIOP_RETURN(); } … … 412 404 "?"({op_unary_pre_post}|"()"|"[?]"|"{}") { IDENTIFIER_RETURN(); } 413 405 "^?{}" { IDENTIFIER_RETURN(); } 414 "?`"{identifier} { IDENTIFIER_RETURN(); } // unitoperator406 "?`"{identifier} { IDENTIFIER_RETURN(); } // postfix operator 415 407 "?"{op_binary_over}"?" { IDENTIFIER_RETURN(); } // binary 416 408 /* -
src/Parser/parser.yy
ra9b1b0c rb5563e1 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 16 17:24:44201813 // Update Count : 3 08112 // Last Modified On : Thu Mar 22 16:56:21 2018 13 // Update Count : 3125 14 14 // 15 15 … … 126 126 } // if 127 127 } // rebindForall 128 129 NameExpr * build_postfix_name( const string * name ) { 130 NameExpr * new_name = build_varref( new string( "?`" + *name ) ); 131 delete name; 132 return new_name; 133 } // build_postfix_name 128 134 129 135 bool forall = false; // aggregate have one or more forall qualifiers ? … … 480 486 | '(' compound_statement ')' // GCC, lambda expression 481 487 { $$ = new ExpressionNode( new StmtExpr( dynamic_cast< CompoundStmt * >(maybeMoveBuild< Statement >($2) ) ) ); } 488 | constant '`' IDENTIFIER // CFA, postfix call 489 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_postfix_name( $3 ) ), $1 ) ); } 490 | string_literal '`' IDENTIFIER // CFA, postfix call 491 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_postfix_name( $3 ) ), new ExpressionNode( $1 ) ) ); } 492 | IDENTIFIER '`' IDENTIFIER // CFA, postfix call 493 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_postfix_name( $3 ) ), new ExpressionNode( build_varref( $1 ) ) ) ); } 494 | tuple '`' IDENTIFIER // CFA, postfix call 495 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_postfix_name( $3 ) ), $1 ) ); } 496 | '(' comma_expression ')' '`' IDENTIFIER // CFA, postfix call 497 { $$ = new ExpressionNode( build_func( new ExpressionNode( build_postfix_name( $5 ) ), $2 ) ); } 482 498 | type_name '.' no_attr_identifier // CFA, nested type 483 499 { SemanticError( yylloc, "Qualified names are currently unimplemented." ); $$ = nullptr; } // FIX ME -
src/libcfa/Makefile.am
ra9b1b0c rb5563e1 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Fri Feb 9 15:51:24201814 ## Update Count : 22 313 ## Last Modified On : Thu Mar 22 17:14:15 2018 14 ## Update Count : 224 15 15 ############################################################################### 16 16 … … 99 99 ${stdhdr} \ 100 100 math \ 101 time \ 101 102 gmp \ 102 103 bits/align.h \ -
src/libcfa/Makefile.in
ra9b1b0c rb5563e1 263 263 containers/result containers/vector concurrency/coroutine \ 264 264 concurrency/thread concurrency/kernel concurrency/monitor \ 265 ${shell find stdhdr -type f -printf "%p "} math gmp \265 ${shell find stdhdr -type f -printf "%p "} math time gmp \ 266 266 bits/align.h bits/cfatime.h bits/containers.h bits/defs.h \ 267 267 bits/debug.h bits/locks.h concurrency/invoke.h … … 435 435 ${stdhdr} \ 436 436 math \ 437 time \ 437 438 gmp \ 438 439 bits/align.h \ -
src/tests/coroutine/fibonacci.c
ra9b1b0c rb5563e1 10 10 // Created On : Thu Jun 8 07:29:37 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Dec 5 22:27:54 201713 // Update Count : 1 412 // Last Modified On : Thu Mar 22 22:45:44 2018 13 // Update Count : 15 14 14 // 15 15 … … 21 21 void ?{}( Fibonacci & fib ) with( fib ) { fn = 0; } 22 22 23 // main automatically called on first resume 23 24 void main( Fibonacci & fib ) with( fib ) { 24 25 int fn1, fn2; // retained between resumes 25 26 fn = 0; fn1 = fn; // 1st case 26 fn = 0; fn1 = fn; // 1st case 27 27 suspend(); // restart last resume 28 29 fn = 1; fn2 = fn1; fn1 = fn; // 2nd case 28 fn = 1; fn2 = fn1; fn1 = fn; // 2nd case 30 29 suspend(); // restart last resume 31 32 30 for ( ;; ) { 33 31 fn = fn1 + fn2; fn2 = fn1; fn1 = fn; // general case
Note: See TracChangeset
for help on using the changeset viewer.