Changeset d800676
- Timestamp:
- Mar 23, 2023, 12:18:39 PM (19 months ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- c94b1f0
- Parents:
- 1afd9ccb (diff), 18ea270 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 48 added
- 1 deleted
- 30 edited
- 3 moved
Legend:
- Unmodified
- Added
- Removed
-
doc/bibliography/pl.bib
r1afd9ccb rd800676 2136 2136 address = {Eindhoven, Neth.}, 2137 2137 year = 1965, 2138 optnote = {Reprinted in \cite{Genuys68} pp. 43--112.} 2138 optnote = {Reprinted in \cite{Genuys68} pp. 43--112.}, 2139 2139 note = {\url{https://pure.tue.nl/ws/files/4279816/344354178746665.pdf}}, 2140 2140 } -
doc/theses/colby_parsons_MMAth/Makefile
r1afd9ccb rd800676 14 14 15 15 SOURCES = ${addsuffix .tex, \ 16 text/CFA_intro \ 17 text/actors \ 18 thesis \ 16 text/CFA_intro \ 17 text/actors \ 18 text/frontpgs \ 19 text/CFA_concurrency \ 20 thesis \ 21 text/mutex_stmt \ 19 22 } 20 23 21 FIGURES = ${addsuffix .tikz, \ 22 figures/standard_actor \ 23 figures/inverted_actor \ 24 figures/gulp \ 24 FIGURES = ${addsuffix .pgf, \ 25 figures/pykeExecutor \ 26 figures/pykeCFAExecutor \ 27 figures/nasusExecutor \ 28 figures/nasusCFAExecutor \ 29 figures/pykeMatrix \ 30 figures/pykeCFAMatrix \ 31 figures/nasusMatrix \ 32 figures/nasusCFAMatrix \ 33 figures/pykeRepeat \ 34 figures/pykeCFARepeat \ 35 figures/nasusRepeat \ 36 figures/nasusCFARepeat \ 37 figures/pykeCFABalance-One \ 38 figures/nasusCFABalance-One \ 39 figures/pykeCFABalance-Multi \ 40 figures/nasusCFABalance-Multi \ 25 41 } 26 42 27 PICTURES = ${addsuffix .pstex, \ 43 DATA = data/pykeSendStatic \ 44 data/pykeSendDynamic \ 45 data/pykeExecutorMem \ 46 data/nasusSendStatic \ 47 data/nasusSendDynamic \ 48 data/pykeExecutorMem \ 49 50 PICTURES = ${addsuffix .tikz, \ 51 diagrams/standard_actor \ 52 diagrams/inverted_actor \ 53 diagrams/gulp \ 54 diagrams/chain_swap \ 55 diagrams/M_to_one_swap \ 56 diagrams/acyclic_swap \ 57 diagrams/cyclic_swap \ 28 58 } 29 59 … … 56 86 dvips ${Build}/$< -o $@ 57 87 58 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} \88 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${DATA} \ 59 89 ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build} 60 90 # Must have *.aux file containing citations for bibtex … … 109 139 "\end{document}" > $@ 110 140 141 data/%: ; 142 %.tikz: ; 143 %.pgf: ; 144 111 145 # Local Variables: # 112 146 # compile-command: "make" # -
doc/theses/colby_parsons_MMAth/data/nasusExecutorMem
r1afd9ccb rd800676 1 441 .864MB & 139.912MB & 7714.548MB & 116.476MB & 549.812MB1 441MB & 139MB & 7714MB & 116MB & 549MB -
doc/theses/colby_parsons_MMAth/data/nasusSendDynamic
r1afd9ccb rd800676 1 43 & 9501 & 7483 & 72 & 55471 43ns & 9501ns & 7483ns & 72ns & 5547ns -
doc/theses/colby_parsons_MMAth/data/nasusSendStatic
r1afd9ccb rd800676 1 22 & 2779 & 59 & 40 & 681 22ns & 2779ns & 59ns & 40ns & 68ns -
doc/theses/colby_parsons_MMAth/data/pykeExecutorMem
r1afd9ccb rd800676 1 677 .536MB & 165.024MB & 8046.684MB & 185.552MB & 563.512MB1 677MB & 165MB & 8046MB & 185MB & 563MB -
doc/theses/colby_parsons_MMAth/data/pykeSendDynamic
r1afd9ccb rd800676 1 56 & 8476 & 5972 & 81 & 41791 56ns & 8476ns & 5972ns & 81ns & 4179ns -
doc/theses/colby_parsons_MMAth/data/pykeSendStatic
r1afd9ccb rd800676 1 23 & 1712 & 74 & 40 & 901 23ns & 1712ns & 74ns & 40ns & 90ns -
doc/theses/colby_parsons_MMAth/diagrams/gulp.tikz
r1afd9ccb rd800676 93 93 \draw (8,197) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize Sharded message queues}}; 94 94 % Text Node 95 \draw (21,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 0}};95 \draw (21,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 0}}; 96 96 % Text Node 97 \draw (52,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 1}};97 \draw (52,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 1}}; 98 98 % Text Node 99 \draw (85,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 2}};99 \draw (85,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 2}}; 100 100 % Text Node 101 101 \draw (70,71) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize Gulps queue 0}}; … … 107 107 \draw (297,198) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize Sharded message queues}}; 108 108 % Text Node 109 \draw (310,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 0}};109 \draw (310,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 0}}; 110 110 % Text Node 111 \draw (341,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 1}};111 \draw (341,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 1}}; 112 112 % Text Node 113 \draw (374,18 1) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 2}};113 \draw (374,184) node [anchor=north west][inner sep=0.75pt] [align=left] {{\footnotesize 2}}; 114 114 % Text Node 115 115 \draw (190,86) node [anchor=north west][inner sep=0.75pt] [font=\footnotesize] [align=left] {After gulp}; -
doc/theses/colby_parsons_MMAth/local.bib
r1afd9ccb rd800676 21 21 @string{pldi="Programming Language Design and Implementation"} 22 22 23 % actor work stealing papers24 23 @inproceedings{barghi18, 25 24 title={Work-stealing, locality-aware actor scheduling}, … … 38 37 year={2017} 39 38 } 39 40 @mastersthesis{Delisle18, 41 author={{Delisle, Thierry}}, 42 title={Concurrency in C∀}, 43 year={2018}, 44 publisher="UWSpace", 45 url={http://hdl.handle.net/10012/12888} 46 } 47 48 @phdthesis{Delisle22, 49 author={{Delisle, Thierry}}, 50 title={The C∀ Scheduler}, 51 year={2022}, 52 publisher="UWSpace", 53 url={http://hdl.handle.net/10012/18941} 54 } -
doc/theses/colby_parsons_MMAth/text/CFA_intro.tex
r1afd9ccb rd800676 5 5 % ====================================================================== 6 6 7 % potentially discuss rebindable references, with stmt, and operators8 9 7 \section{Overview} 8 The following serves as an introduction to \CFA. \CFA is a layer over C, is transpiled to C and is largely considered to be an extension of C. Beyond C, it adds productivity features, libraries, a type system, and many other language constructions. However, \CFA stays true to C as a language, with most code revolving around \code{struct}'s and routines, and respects the same rules as C. \CFA is not object oriented as it has no notion of \code{this} and no classes or methods, but supports some object oriented adjacent ideas including costructors, destructors, and limited inheritance. \CFA is rich with interesting features, but a subset that is pertinent to this work will be discussed. 9 10 \section{References} 11 References in \CFA are similar to references in \CC, however in \CFA references are rebindable, and support multi-level referencing. References in \CFA are a layer of syntactic sugar over pointers to reduce the number of ref/deref operations needed with pointer usage. Some examples of references in \CFA are shown in Listing~\ref{l:cfa_ref}. 12 13 14 \begin{cfacode}[tabsize=3,caption={Example of \CFA references},label={l:cfa_ref}] 15 int i = 2; 16 int & ref_i = i; // declare ref to i 17 int * ptr_i = &i; // ptr to i 18 19 // address of ref_i is the same as address of i 20 assert( &ref_i == ptr_i ); 21 22 int && ref_ref_i = ref_i; // can have a ref to a ref 23 ref_i = 3; // set i to 3 24 int new_i = 4; 25 26 // syntax to rebind ref_i (must cancel implicit deref) 27 &ref_i = &new_i; // (&*)ref_i = &new_i; (sets underlying ptr) 28 \end{cfacode} 29 30 31 \section{Overloading} 32 In \CFA routines can be overloaded on parameter type, number of parameters, and return type. Variables can also be overloaded on type, meaning that two variables can have the same name so long as they have different types. The variables will be disambiguated via type, sometimes requiring a cast. The code snippet in Listing~\ref{l:cfa_overload} contains examples of overloading. 33 34 35 \begin{cfacode}[tabsize=3,caption={Example of \CFA function overloading},label={l:cfa_overload}] 36 int foo() { printf("A\n"); return 0;} 37 int foo( int bar ) { printf("B\n"); return 1; } 38 int foo( double bar ) { printf("C\n"); return 2; } 39 double foo( double bar ) { printf("D\n"); return 3;} 40 void foo( double bar ) { printf("%.0f\n", bar); } 41 42 int main() { 43 foo(); // prints A 44 foo( 0 ); // prints B 45 int a = foo( 0.0 ); // prints C 46 double a = foo( 0.0 ); // prints D 47 foo( a ); // prints 3 48 } 49 \end{cfacode} 50 51 52 \section{With Statement} 53 The with statement is a tool for exposing members of aggregate types within a scope in \CFA. It allows users to use fields of aggregate types without using their fully qualified name. This feature is also implemented in Pascal. It can exist as a stand-alone statement or it can be used on routines to expose fields in the body of the routine. An example is shown in Listing~\ref{l:cfa_with}. 54 55 56 \begin{cfacode}[tabsize=3,caption={Usage of \CFA with statement},label={l:cfa_with}] 57 struct obj { 58 int a, b, c; 59 }; 60 struct pair { 61 double x, y; 62 }; 63 64 // Stand-alone with stmt: 65 pair p; 66 with( p ) { 67 x = 6.28; 68 y = 1.73; 69 } 70 71 // Can be used on routines: 72 void foo( obj o, pair p ) with( o, p ) { 73 a = 1; 74 b = 2; 75 c = 3; 76 x = 3.14; 77 y = 2.71; 78 } 79 80 // routine foo is equivalent to routine bar: 81 void bar( obj o, pair p ) { 82 o.a = 1; 83 o.b = 2; 84 o.c = 3; 85 p.x = 3.14; 86 p.y = 2.71; 87 } 88 \end{cfacode} 89 90 91 \section{Operators} 92 Operators can be overloaded in \CFA with operator routines. Operators in \CFA are named using the operator symbol and '?' to respresent operands. 93 An example is shown in Listing~\ref{l:cfa_operate}. 94 95 96 \begin{cfacode}[tabsize=3,caption={Example of \CFA operators},label={l:cfa_operate}] 97 struct coord { 98 double x; 99 double y; 100 double z; 101 }; 102 coord ++?( coord & c ) with(c) { 103 x++; 104 y++; 105 z++; 106 return c; 107 } 108 coord ?<=?( coord op1, coord op2 ) with( op1 ) { 109 return (x*x + y*y + z*z) <= 110 (op2.x*op2.x + op2.y*op2.y + op2.z*op2.z); 111 } 112 \end{cfacode} 113 114 115 \section{Constructors and Destructors} 116 Constructors and destructors in \CFA are two special operator routines that are used for creation and destruction of objects. The default constructor and destructor for a type are called implicitly upon creation and deletion respectively if they are defined. An example is shown in Listing~\ref{l:cfa_ctor}. 117 118 119 \begin{cfacode}[tabsize=3,caption={Example of \CFA constructors and destructors},label={l:cfa_ctor}] 120 struct discrete_point { 121 int x; 122 int y; 123 }; 124 void ?{}( discrete_point & this ) with(this) { // ctor 125 x = 0; 126 y = 0; 127 } 128 void ?{}( discrete_point & this, int x, int y ) { // ctor 129 this.x = x; 130 this.y = y; 131 } 132 void ^?{}( discrete_point & this ) with(this) { // dtor 133 x = 0; 134 y = 0; 135 } 136 137 int main() { 138 discrete_point d; // implicit call to ?{} 139 discrete_point p{}; // same call as line above 140 discrete_point dp{ 2, -4 }; // specialized ctor 141 } // ^d{}, ^p{}, ^dp{} all called as they go out of scope 142 \end{cfacode} 143 10 144 11 145 \section{Polymorphism}\label{s:poly} 12 13 \section{Overloading} 14 15 \section{Constructors and Destructors} 16 17 \section{With Statement} 18 19 \section{Threading Model}\label{s:threading} 146 C does not natively support polymorphism, and requires users to implement polymorphism themselves if they want to use it. \CFA extends C with two styles of polymorphism that it supports, parametric polymorphism and nominal inheritance. 147 148 \subsection{Parametric Polymorphism} 149 \CFA provides parametric polymorphism in the form of \code{forall}, and \code{trait}s. A \code{forall} takes in a set of types and a list of constraints. The declarations that follow the \code{forall} are parameterized over the types listed that satisfy the constraints. Sometimes the list of constraints can be long, which is where a \code{trait} can be used. A \code{trait} is a collection of constraints that is given a name and can be reused in foralls. An example of the usage of parametric polymorphism in \CFA is shown in Listing~\ref{l:cfa_poly}. 150 151 \begin{cfacode}[tabsize=3,caption={Example of \CFA polymorphism},label={l:cfa_poly}] 152 // sized() is a trait that means the type has a size 153 forall( V & | sized(V) ) // type params for trait 154 trait vector_space { 155 V add( V, V ); // vector addition 156 V scalar_mult( int, V ); // scalar multiplication 157 158 // dtor and copy ctor needed in constraints to pass by copy 159 void ?{}( V &, V & ); // copy ctor for return 160 void ^?{}( V & ); // dtor 161 }; 162 163 forall( V & | vector_space( V )) { 164 V get_inverse( V v1 ) { 165 return scalar_mult( -1, v1 ); // can use ?*? routine defined in trait 166 } 167 V add_and_invert( V v1, V v2 ) { 168 return get_inverse( add( v1, v2 ) ); // can use ?*? routine defined in trait 169 } 170 } 171 struct Vec1 { int x; }; 172 void ?{}( Vec1 & this, Vec1 & other ) { this.x = other.x; } 173 void ?{}( Vec1 & this, int x ) { this.x = x; } 174 void ^?{}( Vec1 & this ) {} 175 Vec1 add( Vec1 v1, Vec1 v2 ) { v1.x += v2.x; return v1; } 176 Vec1 scalar_mult( int c, Vec1 v1 ) { v1.x = v1.x * c; return v1; } 177 178 struct Vec2 { int x; int y; }; 179 void ?{}( Vec2 & this, Vec2 & other ) { this.x = other.x; this.y = other.y; } 180 void ?{}( Vec2 & this, int x ) { this.x = x; this.y = x; } 181 void ^?{}( Vec2 & this ) {} 182 Vec2 add( Vec2 v1, Vec2 v2 ) { v1.x += v2.x; v1.y += v2.y; return v1; } 183 Vec2 scalar_mult( int c, Vec2 v1 ) { v1.x = v1.x * c; v1.y = v1.y * c; return v1; } 184 185 int main() { 186 Vec1 v1{ 1 }; // create Vec1 and call ctor 187 Vec2 v2{ 2 }; // create Vec2 and call ctor 188 189 // can use forall defined routines since types satisfy trait 190 add_and_invert( get_inverse( v1 ), v1 ); 191 add_and_invert( get_inverse( v2 ), v2 ); 192 } 193 194 \end{cfacode} 195 196 \subsection{Inheritance} 197 Inheritance in \CFA copies its style from Plan-9 C nominal inheritance. In \CFA structs can \code{inline} another struct type to gain its fields and to be able to be passed to routines that require a parameter of the inlined type. An example of \CFA inheritance is shown in Listing~\ref{l:cfa_inherit}. 198 199 \begin{cfacode}[tabsize=3,caption={Example of \CFA inheritance},label={l:cfa_inherit}] 200 struct one_d { double x; }; 201 struct two_d { 202 inline one_d; 203 double y; 204 }; 205 struct three_d { 206 inline two_d; 207 double z; 208 }; 209 double get_x( one_d & d ){ return d.x; } 210 211 struct dog {}; 212 struct dog_food { 213 int count; 214 }; 215 struct pet { 216 inline dog; 217 inline dog_food; 218 }; 219 void pet_dog( dog & d ){printf("woof\n");} 220 void print_food( dog_food & f ){printf("%d\n", f.count);} 221 222 int main() { 223 one_d x; 224 two_d y; 225 three_d z; 226 x.x = 1; 227 y.x = 2; 228 z.x = 3; 229 get_x( x ); // returns 1; 230 get_x( y ); // returns 2; 231 get_x( z ); // returns 3; 232 pet p; 233 p.count = 5; 234 pet_dog( p ); // prints woof 235 print_food( p ); // prints 5 236 } 237 \end{cfacode} 238 239 -
doc/theses/colby_parsons_MMAth/text/actors.tex
r1afd9ccb rd800676 1 1 % ====================================================================== 2 2 % ====================================================================== 3 \chapter{ ConcurrentActors}\label{s:actors}3 \chapter{Actors}\label{s:actors} 4 4 % ====================================================================== 5 5 % ====================================================================== 6 6 7 % C_TODO: add citations 8 Before discussing \CFA's actor system in detail, it is important to first describe the actor model, and the classic approach to implementing an actor system.9 10 \section{The Actor Model} % \cite{Hewitt73}11 The actor model is a paradigm of concurrent computation, where tasks are broken into units of work that are distributed to actors in the form of messages \cite{ }. Actors execute asynchronously upon receiving a message and can modify their own state, make decisions, spawn more actors, and send messages to other actors. The actor model is an implicit model of concurrency. As such, one strength of the actor model is that it abstracts away many considerations that are needed in other paradigms of concurrent computation. Mutual exclusion, and locking are rarely relevant concepts to users of an actor model, as actors typically operate on local state.7 % C_TODO: add citations throughout chapter 8 Actors are a concurrent feature that abstracts threading away from a user, and instead provides \newterm{actors} and \newterm{messages} as building blocks for concurrency. This is a form of what is called \newterm{implicit concurrency}, where programmers write concurrent code without having to worry about explicit thread synchronization and mutual exclusion. The study of actors can be broken into two concepts, the \newterm{actor model}, which describes the model of computation and the \newterm{actor system}, which refers to the implementation of the model in practice. Before discussing \CFA's actor system in detail, it is important to first describe the actor model, and the classic approach to implementing an actor system. 9 10 \section{The Actor Model} 11 The actor model is a paradigm of concurrent computation, where tasks are broken into units of work that are distributed to actors in the form of messages \cite{Hewitt73}. Actors execute asynchronously upon receiving a message and can modify their own state, make decisions, spawn more actors, and send messages to other actors. The actor model is an implicit model of concurrency. As such, one strength of the actor model is that it abstracts away many considerations that are needed in other paradigms of concurrent computation. Mutual exclusion, and locking are rarely relevant concepts to users of an actor model, as actors typically operate on local state. 12 12 13 13 \section{Classic Actor System} … … 16 16 \begin{figure} 17 17 \begin{tabular}{l|l} 18 \subfloat[Actor-centric system]{\label{f:standard_actor}\input{ figures/standard_actor.tikz}} &19 \subfloat[Message-centric system]{\label{f:inverted_actor}\raisebox{.1\height}{\input{ figures/inverted_actor.tikz}}}18 \subfloat[Actor-centric system]{\label{f:standard_actor}\input{diagrams/standard_actor.tikz}} & 19 \subfloat[Message-centric system]{\label{f:inverted_actor}\raisebox{.1\height}{\input{diagrams/inverted_actor.tikz}}} 20 20 \end{tabular} 21 21 \caption{Classic and inverted actor implementation approaches with sharded queues.} … … 44 44 45 45 \item 46 Creates a static-typed actor system that catches actor-message mismatches at compile time. 47 % C_TODO: create and/or mention other safety features (allocation/deadlock/etc) 46 Provides a suite of safety and productivity features including static-typing, detection of erroneous message sends, statistics tracking, and more. 48 47 \end{enumerate} 49 48 50 \s ubsection{\CFA Actor Syntax}49 \section{\CFA Actor Syntax} 51 50 \CFA is not an object oriented language and it does not have run-time type information (RTTI). As such all message sends and receives between actors occur using exact type matching. To use actors in \CFA you must \code{\#include <actors.hfa>}. To create an actor type one must define a struct which inherits from the base \code{actor} struct via the \code{inline} keyword. This is the Plan-9 C-style nominal inheritance discussed in Section \ref{s:poly}. Similarly to create a message type a user must define a struct which \code{inline}'s the base \code{message} struct. 52 51 … … 70 69 Allocation receive( derived_actor & receiver, derived_msg & msg ) { 71 70 printf("The message contained the string: %s\n", msg.word); 72 return Finished; // Return inished since actor is done71 return Finished; // Return finished since actor is done 73 72 } 74 73 … … 77 76 derived_actor my_actor; 78 77 derived_msg my_msg{ "Hello World" }; // Constructor call 79 my_actor | my_msg; // Send message via baroperator78 my_actor << my_msg; // Send message via left shift operator 80 79 stop_actor_system(); // Waits until actors are finished 81 80 return 0; … … 109 108 110 109 \item[\LstBasicStyle{\textbf{Finished}}] 111 tells the executor to mark the respective actor as being finished executing. In this case the executor will not call any destructors or deallocate any objects. This status is used when the actors are stack allocated or if the user wants to manage deallocation of actors themselves. In the case of messages, \code{Finished} is no different than \code{Nodelete} since the executor does not need to track if messages are done work. 110 tells the executor to mark the respective actor as being finished executing. In this case the executor will not call any destructors or deallocate any objects. This status is used when the actors are stack allocated or if the user wants to manage deallocation of actors themselves. In the case of messages, \code{Finished} is no different than \code{Nodelete} since the executor does not need to track if messages are done work. As such \code{Finished} is not supported for messages and is used internally by the executor to track if messages have been used for debugging purposes. 112 111 \end{description} 113 112 … … 124 123 All actors must be created after calling \code{start_actor_system} so that the executor can keep track of the number of actors that have been allocated but have not yet terminated. 125 124 126 All message sends are done using the bar operater, \ie |. The signature of the baroperator is:125 All message sends are done using the left shift operater, \ie <<, similar to the syntax of \CC's output. The signature of the left shift operator is: 127 126 \begin{cfacode} 128 Allocation ? |?( derived_actor & receiver, derived_msg & msg )127 Allocation ?<<?( derived_actor & receiver, derived_msg & msg ) 129 128 \end{cfacode} 130 129 131 An astute eye will notice that this is the same signature as the \code{receive} routine which is no coincidence. The \CFA compiler generates a bar operator routine definition and forward declaration of the bar operator for each \code{receive} routine that has the appropriate signature. The generated routine packages the message and actor in an \hyperref[s:envelope]{envelope} and adds it to the executor's queues via an executor routine. As part of packaging the envelope, the bar operatorroutine sets a function pointer in the envelope to point to the appropriate receive routine for given actor and message types.132 133 \s ubsection{\CFA Executor}\label{s:executor}130 An astute eye will notice that this is the same signature as the \code{receive} routine which is no coincidence. The \CFA compiler generates a \code{?<<?} routine definition and forward declaration for each \code{receive} routine that has the appropriate signature. The generated routine packages the message and actor in an \hyperref[s:envelope]{envelope} and adds it to the executor's queues via an executor routine. As part of packaging the envelope, the \code{?<<?} routine sets a function pointer in the envelope to point to the appropriate receive routine for given actor and message types. 131 132 \section{\CFA Executor}\label{s:executor} 134 133 This section will describe the basic architecture of the \CFA executor. Any discussion of work stealing is omitted until Section \ref{s:steal}. The executor of an actor system is the scheduler that organizes where actors behaviours are run and how messages are sent and delivered. In \CFA the executor lays out actors across the sharded message queues in round robin order as they are created. The message queues are split across worker threads in equal chunks, or as equal as possible if the number of message queues is not divisible by the number of workers threads. 135 134 136 135 \begin{figure} 137 136 \begin{center} 138 \input{ figures/gulp.tikz}137 \input{diagrams/gulp.tikz} 139 138 \end{center} 140 139 \caption{Diagram of the queue gulping mechanism.} … … 146 145 To process its local queue, a worker thread takes each unit of work off the queue and executes it. Since all messages to a given actor will be in the same queue, this guarantees atomicity across behaviours of that actor since it can only execute on one thread at a time. After running behaviour, the worker thread looks at the returned allocation status and takes the corresponding action. Once all actors have marked themselves as being finished the executor initiates shutdown by inserting a sentinel value into the message queues. Once a worker thread sees a sentinel it stops running. After all workers stop running the actor system shutdown is complete. 147 146 148 \s ubsection{Envelopes}\label{s:envelope}147 \section{Envelopes}\label{s:envelope} 149 148 In actor systems messages are sent and received by actors. When a actor receives a message it executes its behaviour that is associated with that message type. However the unit of work that stores the message, the receiving actor's address, and other pertinent information needs to persist between send and the receive. Furthermore the unit of work needs to be able to be stored in some fashion, usually in a queue, until it is executed by an actor. All these requirements are fulfilled by a construct called an envelope. The envelope wraps up the unit of work and also stores any information needed by data structures such as link fields. To meet the persistence requirement the envelope is dynamically allocated and cleaned up after it has been delivered and its payload has run. 150 149 … … 153 152 This frequent allocation of envelopes with each message send between actors results in heavy contention on the memory allocator. As such, a way to alleviate contention on the memory allocator could result in performance improvement. Contention was reduced using a novel data structure that called a \newterm{copy queue}. 154 153 155 \subs ubsection{The CopyQueue}154 \subsection{The Copy Queue}\label{s:copyQueue} 156 155 The copy queue is a thin layer over a dynamically sized array that is designed with the envelope use case in mind. A copy queue supports the typical queue operations of push/pop but in a different way than a typical array based queue. The copy queue is designed to take advantage of the \hyperref[s:executor]{gulping} pattern. As such the amortized rutime cost of each push/pop operation for the copy queue is $O(1)$. In contrast, a naive array based queue often has either push or pop cost $O(n)$ and the other cost $O(1)$ since for at least one of the operations all the elements of the queue have to be shifted over. Since the worker threads gulp each queue to operate on locally, this creates a usage pattern of the queue where all elements are popped from the copy queue without any interleaved pushes. As such, during pop operations there is no need to shift array elements. An index is stored in the copy queue data structure which keeps track of which element to pop next allowing pop to be $O(1)$. Push operations are amortized $O(1)$ since pushes may cause doubling reallocations of the underlying dynamic sized array. 157 156 … … 162 161 To mitigate this a memory reclamation scheme was introduced. Initially the memory reclamation naively reclaimed one index of the array per gulp if the array size was above a low fixed threshold. This approach had a problem. The high memory usage watermark nearly doubled with this change! The issue with this approach can easily be highlighted with an example. Say that there is a fixed throughput workload where a queue never has more than 19 messages at a time. If the copy queue starts with a size of 10, it will end up doubling at some point to size 20 to accomodate 19 messages. However, after 2 gulps and subsequent reclamations the array would be size 18. The next time 19 messages are enqueued, the array size is doubled to 36! To avoid this issue a second check was added to reclamation. Each copy queue started tracking the utilization of their array size. Reclamation would only occur if less than half of the array was being utilized. In doing this the reclamation scheme was able to achieve a lower high watermark and a lower overall memory utilization when compared to the non-reclamation copy queues. However, the use of copy queues still incurs a higher memory cost than the list based queueing. With the inclusion of a memory reclamation scheme the increase in memory usage is reasonable considering the performance gains and will be discussed further in Section \ref{s:actor_perf}. 163 162 164 \s ubsection{Work Stealing}\label{s:steal}163 \section{Work Stealing}\label{s:steal} 165 164 Work stealing is a scheduling strategy that attempts to load balance, and increase resource untilization by having idle threads steal work. There are many parts that make up a work stealing actor scheduler, but the two that will be highlighted in this work are the stealing mechanism and victim selection. 166 165 167 166 % C_TODO enter citation for langs 168 \subs ubsection{Stealing Mechanism}167 \subsection{Stealing Mechanism} 169 168 In this discussion of work stealing the worker being stolen from will be referred to as the \newterm{victim} and the worker stealing work will be called the \newterm{thief}. The stealing mechanism presented here differs from existing work stealing actor systems due the inverted actor system. Other actor systems such as Akka \cite{} and CAF \cite{} have work stealing, but since they use an classic actor system that is actor-centric, stealing work is the act of stealing an actor from a dequeue. As an example, in CAF, the sharded actor queue is a set of double ended queues (dequeues). Whenever an actor is moved to a ready queue, it is inserted into a worker's dequeue. Workers then consume actors from the dequeue and execute their behaviours. To steal work, thieves take one or more actors from a victim's dequeue. This action creates contention on the dequeue, which can slow down the throughput of the victim. The notion of which end of the dequeue is used for stealing, consuming, and inserting is not discussed since it isn't relevant. By the pigeon hole principle there are three dequeue operations (push/victim pop/thief pop) that can occur concurrently and only two ends to a dequeue, so work stealing being present in a dequeue based system will always result in a potential increase in contention on the dequeues. 170 169 171 % C_TODO: insert stealing diagram172 173 In \CFA, the work stealing is unique, since existing work stealing systems do not use the inverted actor system. While other systems are concerned with stealing actors, the \CFA actor system steals queues.The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism. This does not means that stealing has no cost. This goal is to ensure that stealing work does not impact the performance of victim workers. This means that thieves can not contend with victims, and that victims should perform no stealing related work unless they become a thief. In theory this goal is not achieved, but results will be presented that show the goal is achieved in practice. In \CFA's actor system workers own a set of sharded queues which they iterate over and gulp. If a worker has iterated over the queues they own twice without finding any work, they try to steal a queue from another worker. Stealing a queue is done wait-free with a few atomic instructions that can only create contention with other stealing workers. To steal a queue a worker does the following:170 % C_TODO: maybe insert stealing diagram 171 172 In \CFA, the actor work stealing implementation is unique. While other systems are concerned with stealing actors, the \CFA actor system steals queues. This is a result of \CFA's use of the inverted actor system. The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism. This does not means that stealing has no cost. This goal is to ensure that stealing work does not impact the performance of victim workers. This means that thieves can not contend with victims, and that victims should perform no stealing related work unless they become a thief. In theory this goal is not achieved, but results will be presented that show the goal is achieved in practice. In \CFA's actor system workers own a set of sharded queues which they iterate over and gulp. If a worker has iterated over the queues they own twice without finding any work, they try to steal a queue from another worker. Stealing a queue is done wait-free with a few atomic instructions that can only create contention with other stealing workers. To steal a queue a worker does the following: 174 173 \begin{enumerate}[topsep=5pt,itemsep=3pt,parsep=0pt] 175 174 \item … … 183 182 \end{enumerate} 184 183 185 % C_TODO insert array of queues diagram186 184 Once a thief fails or succeeds in stealing a queue, it goes back to its own set of queues and iterates over them again. It will only try to steal again once it has completed two consecutive iterations over its owned queues without finding any work. The key to the stealing mechnism is that the queues can still be operated on while they are being swapped. This elimates any contention between thieves and victims. The first key to this is that actors and workers maintain two distinct arrays of references to queues. Actors will always receive messages via the same queues. Workers, on the other hand will swap the pointers to queues in their shared array and operate on queues in the range of that array that they own. Swapping queues is a matter of atomically swapping two pointers in the worker array. As such pushes to the queues can happen concurrently during the swap since pushes happen via the actor queue references. 187 185 … … 189 187 190 188 191 \subs ubsection{Queue Swap Correctness}189 \subsection{Queue Swap Correctness} 192 190 Given the wait-free swap used is novel, it is important to show that it is correct. Firstly, it is clear to show that the swap is wait-free since all workers will fail or succeed in swapping the queues in a finite number of steps since there are no locks or looping. There is no retry mechanism in the case of a failed swap, since a failed swap either means the work was already stolen, or that work was stolen from the thief. In both cases it is apropos for a thief to given up on stealing. \CFA-style pseudocode for the queue swap is presented below. The swap uses compare-and-swap (\code{CAS}) which is just pseudocode for C's \code{__atomic_compare_exchange_n}. A pseudocode implementation of \code{CAS} is also shown below. The correctness of the wait-free swap will now be discussed in detail. To first verify sequential correctness, consider the equivalent sequential swap below: 193 191 … … 275 273 In the failed case the outcome is correct in steps 1 and 2 since no writes have occured so the program state is unchanged. In the failed case of step 3 the program state is safely restored to its state it had prior to the \code{0p} write in step 2, thanks to the invariant that makes the write back to the \code{0p} pointer safe. 276 274 277 \subs ubsection{Stealing Guarantees}275 \subsection{Stealing Guarantees} 278 276 279 277 % C_TODO insert graphs for each proof 280 278 Given that the stealing operation can potentially fail, it is important to discuss the guarantees provided by the stealing implementation. Given a set of $N$ swaps a set of connected directed graphs can be constructed where each vertex is a queue and each edge is a swap directed from a thief queue to a victim queue. Since each thief can only steal from one victim at a time, each vertex can only have at most one outgoing edge. A corollary that can be drawn from this, is that there are at most $V$ edges in this constructed set of connected directed graphs, where $V$ is the total number of vertices. 279 280 \begin{figure} 281 \begin{center} 282 \input{diagrams/M_to_one_swap.tikz} 283 \end{center} 284 \caption{Graph of $M$ thieves swapping with one victim.} 285 \label{f:M_one_swap} 286 \end{figure} 281 287 282 288 \begin{theorem} 283 289 Given $M$ thieves queues all attempting to swap with one victim queue, and no other swaps occuring that involve these queues, at least one swap is guaranteed to succeed. 284 290 \end{theorem}\label{t:one_vic} 291 A graph of the $M$ thieves swapping with one victim discussed in this theorem is presented in Figure~\ref{f:M_one_swap}. 292 \\ 285 293 First it is important to state that a thief will not attempt to steal from themselves. As such, the victim here is not also a thief. Stepping through the code in \ref{c:swap}, for all thieves steps 0-1 succeed since the victim is not stealing and will have no queue pointers set to be \code{0p}. Similarly for all thieves step 2 will succeed since no one is stealing from any of the thieves. In step 3 the first thief to \code{CAS} will win the race and successfully swap the queue pointer. Since it is the first one to \code{CAS} and \code{CAS} is atomic, there is no way for the \code{CAS} to fail since no other thief could have written to the victim's queue pointer and the victim did not write to the pointer since they aren't stealing. Hence at least one swap is guaranteed to succeed in this case. 294 295 \begin{figure} 296 \begin{center} 297 \input{diagrams/chain_swap.tikz} 298 \end{center} 299 \caption{Graph of a chain of swaps.} 300 \label{f:chain_swap} 301 \end{figure} 286 302 287 303 \begin{theorem} 288 304 Given $M$ > 1, ordered queues pointers all attempting to swap with the queue in front of them in the ordering, except the first queue, and no other swaps occuring that involve these queues, at least one swap is guaranteed to succeed. 289 305 \end{theorem}\label{t:vic_chain} 306 A graph of the chain of swaps discussed in this theorem is presented in Figure~\ref{f:chain_swap}. 307 \\ 290 308 This is a proof by contradiction. Assume no swaps occur. Then all thieves must have failed at step 1, step 2 or step 3. For a given thief $b$ to fail at step 1, thief $b + 1$ must have succeded at step 2 before $b$ executes step 0. Hence, not all thieves can fail at step 1. Furthermore if a thief $b$ fails at step 1 it logically splits the chain into two subchains $0 <- b$ and $b + 1 <- M - 1$, where $b$ has become solely a victim since its swap has failed and it did not modify any state. There must exist at least one chain containing two or more queues after since it is impossible for a split to occur both before and after a thief, since that requires failing at step 1 and succeeding at step 2. Hence, without loss of generality, whether thieves succeed or fail at step 1, this proof can proceed inductively. 291 309 292 310 For a given thief $i$ to fail at step 2, it means that another thief $j$ had to have written to $i$'s queue pointer between $i$'s step 0 and step 2. The only way for $j$ to write to $i$'s queue pointer would be if $j$ was stealing from $i$ and had successfully finished step 3. If $j$ finished step 3 then the at least one swap was successful. Therefore all thieves did not fail at step 2. Hence all thieves must successfully complete step 2 and fail at step 3. However, since the first worker, thief $0$, is solely a victim and not a thief, it does not change the state of any of its queue pointers. Hence, in this case thief $1$ will always succeed in step 3 if all thieves succeed in step 2. Thus, by contradiction with the earlier assumption that no swaps occur, at least one swap must succeed. 311 312 % \raisebox{.1\height}{} 313 \begin{figure} 314 \centering 315 \begin{tabular}{l|l} 316 \subfloat[Cyclic Swap Graph]{\label{f:cyclic_swap}\input{diagrams/cyclic_swap.tikz}} & 317 \subfloat[Acyclic Swap Graph]{\label{f:acyclic_swap}\input{diagrams/acyclic_swap.tikz}} 318 \end{tabular} 319 \caption{Illustrations of cyclic and acyclic swap graphs.} 320 \end{figure} 293 321 294 322 \begin{theorem} 295 323 Given a set of $M > 1$ swaps occuring that form a single directed connected graph. At least one swap is guaranteed to suceed if and only if the graph does not contain a cycle. 296 324 \end{theorem}\label{t:vic_cycle} 325 Representations of cyclic and acylic swap graphs discussed in this theorem are presented in Figures~\ref{f:cyclic_swap} and \ref{f:acyclic_swap}. 326 \\ 297 327 First the reverse direction is proven. If the graph does not contain a cycle, then there must be at least one successful swap. Since the graph contains no cycles and is finite in size, then there must be a vertex $A$ with no outgoing edges. The graph can then be formulated as a tree with $A$ at the top since each node only has at most one outgoing edge and there are no cycles. The forward direction is proven by contradiction in a similar fashion to \ref{t:vic_chain}. Assume no swaps occur. Similar to \ref{t:vic_chain}, this graph can be inductively split into subgraphs of the same type by failure at step 1, so the proof proceeds without loss of generality. Similar to \ref{t:vic_chain} the conclusion is drawn that all thieves must successfully complete step 2 for no swaps to occur, since for step 2 to fail, a different thief has to successfully complete step 3, which would imply a successful swap. Hence, the only way forward is to assume all thieves successfully complete step 2. Hence for there to be no swaps all thieves must fail step 3. However, since $A$ has no outgoing edges, since the graph is connected there must be some $K$ such that $K < M - 1$ thieves are attempting to swap with $A$. Since all $K$ thieves have passed step 2, similar to \ref{t:one_vic} the first one of the $K$ thieves to attempt step 3 is guaranteed to succeed. Thus, by contradiction with the earlier assumption that no swaps occur, if the graph does not contain a cycle, at least one swap must succeed. 298 328 … … 300 330 301 331 % C_TODO: go through and use \paragraph to format to make it look nicer 302 \subs ubsection{Victim Selection}332 \subsection{Victim Selection}\label{s:victimSelect} 303 333 In any work stealing algorithm thieves have some heuristic to determine which victim to choose from. Choosing this algorithm is difficult and can have implications on performance. There is no one selection heuristic that is known to be the best on all workloads. Recent work focuses on locality aware scheduling in actor systems\cite{barghi18}\cite{wolke17}. However, while locality aware scheduling provides good performance on some workloads, something as simple as randomized selection performs better on other workloads\cite{barghi18}. Since locality aware scheduling has been explored recently, this work introduces a heuristic called \newterm{longest victim} and compares it to randomized work stealing. The longest victim heuristic maintains a timestamp per worker thread that is updated every time a worker attempts to steal work. Thieves then attempt to steal from the thread with the oldest timestamp. This means that if two thieves look to steal at the same time, they likely will attempt to steal from the same victim. This does increase the chance at contention between thieves, however given that workers have multiple queues under them, often in the tens or hundreds of queues per worker it is rare for two queues to attempt so steal the same queue. Furthermore in the case they attempt to steal the same queue at least one of them is guaranteed to successfully steal the queue as shown in Theorem \ref{t:one_vic}. Additonally, the longest victim heuristic makes it very improbable that the no swap scenario presented in Theorem \ref{t:vic_cycle} manifests. Given the longest victim heuristic, for a cycle to manifest it would require all workers to attempt to steal in a short timeframe. This is the only way that more than one thief could choose another thief as a victim, since timestamps are only updated upon attempts to steal. In this case, the probability of lack of any successful swaps is a non issue, since it is likely that these steals were not important if all workers are trying to steal. 304 334 305 \subsection{Safety} 306 307 \subsection{Performance}\label{s:actor_perf} 335 \section{Safety and Productivity} 336 \CFA's actor system comes with a suite of safety and productivity features. Most of these features are present in \CFA's debug mode, but are removed when code is compiled in nodebug mode. Some of the features include: 337 338 \begin{itemize} 339 \item Static-typed message sends. If an actor does not support receiving a given message type, the actor program is rejected at compile time, allowing unsupported messages to never be sent to actors. 340 \item Detection of message sends to Finished/Destroyed/Deleted actors. All actors have a ticket that assigns them to a respective queue. The maximum integer value of the ticket is reserved to indicate that an actor is dead, and subsequent message sends result in an error. 341 \item Actors made before the executor can result in undefined behaviour since an executor needs to be created beforehand so it can give out the tickets to actors. As such, this is detected and an error is printed. 342 \item When an executor is created, the queues are handed out to worker threads in round robin order. If there are fewer queues than worker threads, then some workers will spin and never do any work. There is no reasonable use case for this behaviour so an error is printed if the number of queues is fewer than the number of worker threads. 343 \item A warning is printed when messages are deallocated without being sent. Since the \code{Finished} allocation status is unused for messages, it is used internally to detect if a message has been sent. Deallocating a message without sending it could indicate to a user that they are touching freed memory later, or it could point out extra allocations that could be removed. 344 \end{itemize} 345 346 In addition to these features, \CFA's actor system comes with a suite of statistics that can be toggled on and off. These statistics have minimal impact on the actor system's performance since they are counted on a per worker thread basis. During shutdown of the actor system they are aggregated, ensuring that the only atomic instructions used by the statistics counting happen at shutdown. The statistics measured are as follows. 347 348 \begin{description} 349 \item[\LstBasicStyle{\textbf{Actors Created}}] 350 Actors created. Includes both actors made by the main and ones made by other actors. 351 \item[\LstBasicStyle{\textbf{Messages Sent}}] 352 Messages sent and received. Includes termination messages send to the worker threads. 353 \item[\LstBasicStyle{\textbf{Gulps}}] 354 Gulps that occured across the worker threads. 355 \item[\LstBasicStyle{\textbf{Average Gulp Size}}] 356 Average number of messages in a gulped queue. 357 \item[\LstBasicStyle{\textbf{Missed gulps}}] 358 Occurences where a worker missed a gulp due to the concurrent queue processing by another worker. 359 \item[\LstBasicStyle{\textbf{Steal attempts}}] 360 Worker threads attempts to steal work. 361 \item[\LstBasicStyle{\textbf{Steal failures (no candidates)}}] 362 Work stealing failures due to selected victim not having any non empty or non-being-processed queues. 363 \item[\LstBasicStyle{\textbf{Steal failures (failed swaps)}}] 364 Work stealing failures due to the two stage atomic swap failing. 365 \item[\LstBasicStyle{\textbf{Messages stolen}}] 366 Aggregate of the number of messages in queues as they were stolen. 367 \item[\LstBasicStyle{\textbf{Average steal size}}] 368 Average number of messages in a stolen queue. 369 \end{description} 370 371 These statistics enable a user of \CFA's actor system to make informed choices about how to configure their executor, or how to structure their actor program. For example, if there is a lot of messages being stolen relative to the number of messages sent, it could indicate to a user that their workload is heavily imbalanced across worker threads. In another example, if the average gulp size is very high, it could indicate that the executor could use more queue sharding. 372 373 % C_TODO cite poison pill messages and add languages 374 Another productivity feature that is included is a group of poison-pill messages. Poison-pill messages are common across actor systems, including Akka and ProtoActor \cite{}. Poison-pill messages inform an actor to terminate. In \CFA, due to the allocation of actors and lack of garbage collection, there needs to be a suite of poison-pills. The messages that \CFA provides are \code{DeleteMsg}, \code{DestroyMsg}, and \code{FinishedMsg}. These messages are supported on all actor types via inheritance and when sent to an actor, the actor takes the corresponding allocation action after receiving the message. Note that any pending messages to the actor will still be sent. It is still the user's responsibility to ensure that an actor does not receive any messages after termination. 375 376 \section{Performance}\label{s:actor_perf} 377 The performance of \CFA's actor system is tested using a suite of microbenchmarks, and compared with other actor systems. 378 Most of the benchmarks are the same as those presented in \ref{}, with a few additions. % C_TODO cite actor paper 379 At the time of this work the versions of the actor systems are as follows. \CFA 1.0, \uC 7.0.0, Akka Typed 2.7.0, CAF 0.18.6, and ProtoActor-Go v0.0.0-20220528090104-f567b547ea07. Akka Classic is omitted as Akka Typed is their newest version and seems to be the direction they are headed in. 380 The experiments are run on 381 \begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=5pt\parsep=5pt\itemsep=0pt} 382 \item 383 Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets (48 process\-ing units) 2.2GHz, running Linux v5.8.0--59--generic 384 \item 385 Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units) 2.0 GHz, running Linux v5.8.0--55--generic 386 \end{list} 387 388 The benchmarks are run on up to 48 cores. On the Intel, when going beyond 24 cores there is the choice to either hop sockets or to use hyperthreads. Either choice will cause a blip in performance trends, which can be seen in the following performance figures. On the Intel the choice was made to hyperthread instead of hopping sockets for experiments with more than 24 cores. 389 390 All benchmarks presented are run 5 times and the median is taken. Error bars showing the 95\% confidence intervals are drawn on each point on the graphs. If the confidence bars are small enough, they may be obscured by the point. In this section \uC will be compared to \CFA frequently, as the actor system in \CFA was heavily based off \uC's actor system. As such the peformance differences that arise are largely due to the contributions of this work. 391 392 \begin{table}[t] 393 \centering 394 \setlength{\extrarowheight}{2pt} 395 \setlength{\tabcolsep}{5pt} 396 397 \caption{Static Actor/Message Performance: message send, program memory} 398 \label{t:StaticActorMessagePerformance} 399 \begin{tabular}{*{5}{r|}r} 400 & \multicolumn{1}{c|}{\CFA (100M)} & \multicolumn{1}{c|}{CAF (10M)} & \multicolumn{1}{c|}{Akka (100M)} & \multicolumn{1}{c|}{\uC (100M)} & \multicolumn{1}{c@{}}{ProtoActor (100M)} \\ 401 \hline 402 AMD & \input{data/pykeSendStatic} \\ 403 \hline 404 Intel & \input{data/nasusSendStatic} 405 \end{tabular} 406 407 \bigskip 408 409 \caption{Dynamic Actor/Message Performance: message send, program memory} 410 \label{t:DynamicActorMessagePerformance} 411 412 \begin{tabular}{*{5}{r|}r} 413 & \multicolumn{1}{c|}{\CFA (20M)} & \multicolumn{1}{c|}{CAF (2M)} & \multicolumn{1}{c|}{Akka (2M)} & \multicolumn{1}{c|}{\uC (20M)} & \multicolumn{1}{c@{}}{ProtoActor (2M)} \\ 414 \hline 415 AMD & \input{data/pykeSendDynamic} \\ 416 \hline 417 Intel & \input{data/nasusSendDynamic} 418 \end{tabular} 419 \end{table} 420 421 \subsection{Message Sends} 422 Message sending is the key component of actor communication. As such latency of a single message send is the fundamental unit of fast-path performance for an actor system. The following two microbenchmarks evaluate the average latency for a static actor/message send and a dynamic actor/message send. Static and dynamic refer to the allocation of the message and actor. In the static send benchmark a message and actor are allocated once and then the message is sent to the same actor repeatedly until it has been sent 100 million (100M) times. The average latency per message send is then calculated by dividing the duration by the number of sends. This benchmark evaluates the cost of message sends in the actor use case where all actors and messages are allocated ahead of time and do not need to be created dynamically during execution. The CAF static send benchmark only sends a message 10M times to avoid extensively long run times. 423 424 In the dynamic send benchmark the same experiment is performed, with the change that with each send a new actor and message is allocated. This evaluates the cost of message sends in the other common actor pattern where actors and message are created on the fly as the actor program tackles a workload of variable or unknown size. Since dynamic sends are more expensive, this benchmark repeats the actor/message creation and send 20M times (\uC, \CFA), or 2M times (Akka, CAF, ProtoActor), to give an appropriate benchmark duration. 425 426 The results from the static/dynamic send benchmarks are shown in Figures~\ref{t:StaticActorMessagePerformance} and \ref{t:DynamicActorMessagePerformance} respectively. \CFA leads the charts in both benchmarks, largely due to the copy queue removing the majority of the envelope allocations. In the static send benchmark all systems except CAF have static send costs that are in the same ballpark, only varying by ~70ns. In the dynamic send benchmark all systems experience slower message sends, as expected due to the extra allocations. However, Akka and ProtoActor, slow down by a more significant margin than the \uC and \CFA. This is likely a result of Akka and ProtoActor's garbage collection, which can suffer from hits in performance for allocation heavy workloads, whereas \uC and \CFA have explicit allocation/deallocation. 427 428 \subsection{Work Stealing} 429 \CFA's actor system has a work stealing mechanism which uses the longest victim heuristic, introduced in Section~ref{s:victimSelect}. In this performance section, \CFA with the longest victim heuristic is compared with other actor systems on the benchmark suite, and is separately compared with vanilla non-stealing \CFA and \CFA with randomized work stealing. 430 431 \begin{figure} 432 \centering 433 \begin{subfigure}{0.5\textwidth} 434 \centering 435 \scalebox{0.5}{\input{figures/nasusCFABalance-One.pgf}} 436 \subcaption{AMD \CFA Balance-One Benchmark} 437 \label{f:BalanceOneAMD} 438 \end{subfigure}\hfill 439 \begin{subfigure}{0.5\textwidth} 440 \centering 441 \scalebox{0.5}{\input{figures/pykeCFABalance-One.pgf}} 442 \subcaption{Intel \CFA Balance-One Benchmark} 443 \label{f:BalanceOneIntel} 444 \end{subfigure} 445 \caption{The balance-one benchmark comparing stealing heuristics (lower is better).} 446 \end{figure} 447 448 \begin{figure} 449 \centering 450 \begin{subfigure}{0.5\textwidth} 451 \centering 452 \scalebox{0.5}{\input{figures/nasusCFABalance-Multi.pgf}} 453 \subcaption{AMD \CFA Balance-Multi Benchmark} 454 \label{f:BalanceMultiAMD} 455 \end{subfigure}\hfill 456 \begin{subfigure}{0.5\textwidth} 457 \centering 458 \scalebox{0.5}{\input{figures/pykeCFABalance-Multi.pgf}} 459 \subcaption{Intel \CFA Balance-Multi Benchmark} 460 \label{f:BalanceMultiIntel} 461 \end{subfigure} 462 \caption{The balance-multi benchmark comparing stealing heuristics (lower is better).} 463 \end{figure} 464 465 There are two benchmarks in which \CFA's work stealing is solely evaluated. The main goal of introducing work stealing to \CFA's actor system is to eliminate the pathological unbalanced cases that can present themselves in a system without some form of load balancing. The following two microbenchmarks construct two such pathological cases, and compare the work stealing variations of \CFA. The balance benchmarks adversarily takes advantage of the round robin assignment of actors to load all actors that will do work on specific cores and create 'dummy' actors that terminate after a single message send on all other cores. The workload on the loaded cores is the same as the executor benchmark described in \ref{s:executorPerf}, but with fewer rounds. The balance-one benchmark loads all the work on a single core, whereas the balance-multi loads all the work on half the cores (every other core). Given this layout, one expects the ideal speedup of work stealing in the balance-one case to be $N / N - 1$ where $N$ is the number of threads. In the balance-multi case the ideal speedup is 0.5. Note that in the balance-one benchmark the workload is fixed so decreasing runtime is expected. In the balance-multi experiment, the workload increases with the number of cores so an increasing or constant runtime is expected. 466 467 On both balance microbenchmarks slightly less than ideal speedup compared to the non stealing variation is achieved by both the random and longest victim stealing heuristics. On the balance-multi benchmark \ref{f:BalanceMultiAMD},\ref{f:BalanceMultiIntel} the random heuristic outperforms the longest victim. This is likely a result of the longest victim heuristic having a higher stealing cost as it needs to maintain timestamps and look at all timestamps before stealing. Additionally, a performance cost can be observed when hyperthreading kicks in in Figure~\ref{f:BalanceMultiIntel}. 468 469 In the balance-one benchmark on AMD \ref{f:BalanceOneAMD}, the performance bottoms out at 32 cores onwards likely due to the amount of work becoming less than the cost to steal it and move it across cores and cache. On Intel \ref{f:BalanceOneIntel}, above 32 cores the performance gets worse for all variants due to hyperthreading. Note that the non stealing variation of balance-one will slow down marginally as the cores increase due to having to create more dummy actors on the inactive cores during startup. 470 471 \subsection{Executor}\label{s:executorPerf} 472 The microbenchmarks in this section are designed to stress the executor. The executor is the scheduler of an actor system and is responsible for organizing the interaction of worker threads to service the needs of a workload. In the executor benchmark, 40'000 actors are created and assigned a group. Each group of actors is a group of 100 actors who send and receive 100 messages from all other actors in their group. Each time an actor completes all their sends and receives, they are done a round. After all groups have completed 400 rounds the system terminates. This microbenchmark is designed to flood the executor with a large number of messages flowing between actors. Given there is no work associated with each message, other than sending more messages, the intended bottleneck of this experiment is the executor message send process. 473 474 \begin{figure} 475 \centering 476 \begin{subfigure}{0.5\textwidth} 477 \centering 478 \scalebox{0.5}{\input{figures/nasusExecutor.pgf}} 479 \subcaption{AMD Executor Benchmark} 480 \label{f:ExecutorAMD} 481 \end{subfigure}\hfill 482 \begin{subfigure}{0.5\textwidth} 483 \centering 484 \scalebox{0.5}{\input{figures/pykeExecutor.pgf}} 485 \subcaption{Intel Executor Benchmark} 486 \label{f:ExecutorIntel} 487 \end{subfigure} 488 \caption{The executor benchmark comparing actor systems (lower is better).} 489 \end{figure} 490 491 The results of the executor benchmark in Figures~\ref{f:ExecutorIntel} and \ref{f:ExecutorAMD} show \CFA with the lowest runtime relative to its peers. The difference in runtime between \uC and \CFA is largely due to the usage of the copy queue described in Section~\ref{s:copyQueue}. The copy queue both reduces and consolidates allocations, heavily reducing contention on the memory allocator. Additionally, due to the static typing in \CFA's actor system, it is able to get rid of expensive dynamic casts that occur in \uC to disciminate messages by type. Note that dynamic casts are ususally not very expensive, but relative to the high performance of the rest of the implementation of the \uC actor system, the cost is significant. 492 493 \begin{figure} 494 \centering 495 \begin{subfigure}{0.5\textwidth} 496 \centering 497 \scalebox{0.5}{\input{figures/nasusCFAExecutor.pgf}} 498 \subcaption{AMD \CFA Executor Benchmark}\label{f:cfaExecutorAMD} 499 \end{subfigure}\hfill 500 \begin{subfigure}{0.5\textwidth} 501 \centering 502 \scalebox{0.5}{\input{figures/pykeCFAExecutor.pgf}} 503 \subcaption{Intel \CFA Executor Benchmark}\label{f:cfaExecutorIntel} 504 \end{subfigure} 505 \caption{Executor benchmark comparing \CFA stealing heuristics (lower is better).} 506 \end{figure} 507 508 When comparing the \CFA stealing heuristics in Figure~\ref{f:cfaExecutorAMD} it can be seen that the random heuristic falls slightly behind the other two, but in Figure~\ref{f:cfaExecutorIntel} the runtime of all heuristics are nearly identical to eachother. 509 510 \begin{figure} 511 \centering 512 \begin{subfigure}{0.5\textwidth} 513 \centering 514 \scalebox{0.5}{\input{figures/nasusRepeat.pgf}} 515 \subcaption{AMD Repeat Benchmark}\label{f:RepeatAMD} 516 \end{subfigure}\hfill 517 \begin{subfigure}{0.5\textwidth} 518 \centering 519 \scalebox{0.5}{\input{figures/pykeRepeat.pgf}} 520 \subcaption{Intel Repeat Benchmark}\label{f:RepeatIntel} 521 \end{subfigure} 522 \caption{The repeat benchmark comparing actor systems (lower is better).} 523 \end{figure} 524 525 The repeat microbenchmark also evaluates the executor. It stresses the executor's ability to withstand contention on queues, as it repeatedly fans out messages from a single client to 100000 servers who then all respond to the client. After this scatter and gather repeats 200 times the benchmark terminates. The messages from the servers to the client will likely all come in on the same queue, resulting in high contention. As such this benchmark will not scale with the number of processors, since more processors will result in higher contention. In Figure~\ref{f:RepeatAMD} we can see that \CFA performs well compared to \uC, however by less of a margin than the executor benchmark. One factor in this result is that the contention on the queues poses a significant bottleneck. As such the gains from using the copy queue are much less apparent. 526 527 \begin{figure} 528 \centering 529 \begin{subfigure}{0.5\textwidth} 530 \centering 531 \scalebox{0.5}{\input{figures/nasusCFARepeat.pgf}} 532 \subcaption{AMD \CFA Repeat Benchmark}\label{f:cfaRepeatAMD} 533 \end{subfigure}\hfill 534 \begin{subfigure}{0.5\textwidth} 535 \centering 536 \scalebox{0.5}{\input{figures/pykeCFARepeat.pgf}} 537 \subcaption{Intel \CFA Repeat Benchmark}\label{f:cfaRepeatIntel} 538 \end{subfigure} 539 \caption{The repeat benchmark comparing \CFA stealing heuristics (lower is better).} 540 \end{figure} 541 542 In Figure~\ref{f:RepeatIntel} \uC and \CFA are very comparable. 543 In comparison with the other systems \uC does well on the repeat benchmark since it does not have work stealing. The client of this experiment is long running and maintains a lot of state, as it needs to know the handles of all the servers. When stealing the client or its respective queue (in \CFA's inverted model), moving the client incurs a high cost due to cache invalidation. As such stealing the client can result in a hit in performance. 544 This result is shown in Figure~\ref{f:cfaRepeatAMD} and \ref{f:cfaRepeatIntel} where the no-stealing version of \CFA performs better than both stealing variations. 545 In particular on the Intel machine in Figure~\ref{f:cfaRepeatIntel}, the cost of stealing is higher, which can be seen in the vertical shift of Akka, CAF and CFA results in Figure~\ref{f:RepeatIntel} (\uC and ProtoActor do not have work stealing). The shift for CAF is particularly large, which further supports the hypothesis that CAF's work stealing is particularly eager. 546 In both the executor and the repeat benchmark CAF performs poorly. It is hypothesized that CAF has an aggressive work stealing algorithm, that eagerly attempts to steal. This results in poor performance in benchmarks with small messages containing little work per message. On the other hand, in \ref{f:MatrixAMD} CAF performs much better since each message has a large amount of work, and few messages are sent, so the eager work stealing allows for the clean up of loose ends to occur faster. This hypothesis stems from experimentation with \CFA. CAF uses a randomized work stealing heuristic. In \CFA if the system is tuned so that it steals work much more eagerly with a randomized it was able to replicate the results that CAF achieves in the matrix benchmark, but this tuning performed much worse on all other microbenchmarks that we present, since they all perform a small amount of work per message. 547 548 \begin{table}[t] 549 \centering 550 \setlength{\extrarowheight}{2pt} 551 \setlength{\tabcolsep}{5pt} 552 553 \caption{Executor Program Memory High Watermark} 554 \label{t:ExecutorMemory} 555 \begin{tabular}{*{5}{r|}r} 556 & \multicolumn{1}{c|}{\CFA} & \multicolumn{1}{c|}{CAF} & \multicolumn{1}{c|}{Akka} & \multicolumn{1}{c|}{\uC} & \multicolumn{1}{c@{}}{ProtoActor} \\ 557 \hline 558 AMD & \input{data/pykeExecutorMem} \\ 559 \hline 560 Intel & \input{data/nasusExecutorMem} 561 \end{tabular} 562 \end{table} 563 564 Figure~\ref{t:ExecutorMemory} shows the high memory watermark of the actor systems when running the executor benchmark on 48 cores. \CFA has a high watermark relative to the other non-garbage collected systems \uC, and CAF. This is a result of the copy queue data structure, as it will overallocate storage and not clean up eagerly, whereas the per envelope allocations will always allocate exactly the amount of storage needed. 565 566 \subsection{Matrix Multiply} 567 The matrix benchmark evaluates the actor systems in a practical application, where actors concurrently multiplies two matrices. The majority of the computation in this benchmark involves computing the final matrix, so this benchmark stresses the actor systems' ability to have actors run work, rather than stressing the executor or message sending system. 568 569 Given $Z_{m,r} = X_{m,n} \cdot Y_{n,r}$, the matrix multiply is defined as: 570 \begin{displaymath} 571 X_{i,j} \cdot Y_{j,k} = \left( \sum_{c=1}^{j} X_{row,c}Y_{c,column} \right)_{i,k} 572 \end{displaymath} 573 574 The benchmark uses input matrices $X$ and $Y$ that are both $3072$ by $3072$ in size. An actor is made for each row of $X$ and is passed via message the information needed to calculate a row of the result matrix $Z$. 575 576 Given that the bottleneck of the benchmark is the computation of the result matrix, it follows that the results in Figures~\ref{f:MatrixAMD} and \ref{f:MatrixIntel} are clustered closer than other experiments. In Figure~\ref{f:MatrixAMD} \uC and \CFA have identical performance and in Figure~\ref{f:MatrixIntel} \uC pulls ahead og \CFA after 24 cores likely due to costs associated with work stealing while hyperthreading. As mentioned in \label{s:executorPerf}, it is hypothesized that CAF performs better in this benchmark compared to others due to its eager work stealing implementation. In Figures~\ref{f:cfaMatrixAMD} and \ref{f:cfaMatrixIntel} there is little negligible performance difference across \CFA stealing heuristics. 577 578 \begin{figure} 579 \centering 580 \begin{subfigure}{0.5\textwidth} 581 \centering 582 \scalebox{0.5}{\input{figures/nasusMatrix.pgf}} 583 \subcaption{AMD Matrix Benchmark}\label{f:MatrixAMD} 584 \end{subfigure}\hfill 585 \begin{subfigure}{0.5\textwidth} 586 \centering 587 \scalebox{0.5}{\input{figures/pykeMatrix.pgf}} 588 \subcaption{Intel Matrix Benchmark}\label{f:MatrixIntel} 589 \end{subfigure} 590 \caption{The matrix benchmark comparing actor systems (lower is better).} 591 \end{figure} 592 593 \begin{figure} 594 \centering 595 \begin{subfigure}{0.5\textwidth} 596 \centering 597 \scalebox{0.5}{\input{figures/nasusCFAMatrix.pgf}} 598 \subcaption{AMD \CFA Matrix Benchmark}\label{f:cfaMatrixAMD} 599 \end{subfigure}\hfill 600 \begin{subfigure}{0.5\textwidth} 601 \centering 602 \scalebox{0.5}{\input{figures/pykeCFAMatrix.pgf}} 603 \subcaption{Intel \CFA Matrix Benchmark}\label{f:cfaMatrixIntel} 604 \end{subfigure} 605 \caption{The matrix benchmark comparing \CFA stealing heuristics (lower is better).} 606 \end{figure} -
doc/theses/colby_parsons_MMAth/thesis.tex
r1afd9ccb rd800676 23 23 \usepackage{calc} 24 24 \usepackage{xspace} 25 \usepackage[labelformat=simple]{subfig} 26 \renewcommand{\thesubfigure}{(\alph{subfigure})} 25 % \usepackage[labelformat=simple]{subfig} 26 % \renewcommand{\thesubfigure}{(\alph{subfigure})} 27 \usepackage{subcaption} 28 % \usepackage{subfigure} 27 29 \usepackage{graphicx} 28 30 \usepackage{tabularx} … … 102 104 % FRONT MATERIAL 103 105 %---------------------------------------------------------------------- 104 %\input{frontpgs}106 \input{frontpgs} 105 107 106 108 %---------------------------------------------------------------------- … … 111 113 112 114 \input{CFA_intro} 115 116 \input{CFA_concurrency} 117 118 \input{mutex_stmt} 113 119 114 120 \input{actors} -
doc/theses/mike_brooks_MMath/Makefile
r1afd9ccb rd800676 8 8 PicSRC = ${notdir ${wildcard ${Pictures}/*.png}} 9 9 DemoSRC = ${notdir ${wildcard ${Programs}/*-demo.cfa}} 10 PgmSRC = ${notdir ${wildcard ${Programs}/*.cfa}} 10 PgmSRC = ${notdir ${wildcard ${Programs}/*}} 11 RunPgmSRC = ${notdir ${wildcard ${Programs}/*.run.*}} 11 12 BibSRC = ${wildcard *.bib} 12 13 … … 14 15 BibLIB = .:../../bibliography # common citation repository 15 16 16 MAKEFLAGS = --no-print-directory # --silent17 #MAKEFLAGS = --no-print-directory # --silent 17 18 VPATH = ${Build} ${Pictures} ${Programs} # extra search path for file names used in document 18 19 … … 20 21 BASE = ${basename ${DOCUMENT}} # remove suffix 21 22 23 DemoTex = ${DemoSRC:%.cfa=${Build}/%.tex} 24 RunPgmExe = ${addprefix ${Build}/,${basename ${basename ${RunPgmSRC}}}} 25 RunPgmOut = ${RunPgmExe:%=%.out} 26 22 27 # Commands 23 28 24 29 LaTeX = TEXINPUTS=${TeXLIB} && export TEXINPUTS && pdflatex -halt-on-error -output-directory=${Build} 25 30 BibTeX = BIBINPUTS=${BibLIB} && export BIBINPUTS && bibtex 26 CFA = cfa 31 CFA = cfa -O0 -g 32 CC = gcc -O0 -g 33 CXX = g++-11 --std=c++20 -O0 -g 27 34 28 35 # Rules and Recipes 29 36 30 .PHONY : all clean # not file names 37 .PHONY : all fragments_ran clean # not file names 38 .PRECIOUS : ${Build}/% ${Build}/%-demo # don't delete intermediates 31 39 .ONESHELL : 32 40 33 all : ${DOCUMENT} 41 all : fragments_ran ${DOCUMENT} 42 43 fragments_ran : $(RunPgmOut) 34 44 35 45 clean : … … 38 48 # File Dependencies 39 49 40 %.pdf : ${TeXSRC} ${Demo SRC:%.cfa=%.tex} ${PicSRC} ${PgmSRC} ${BibSRC} Makefile | ${Build}50 %.pdf : ${TeXSRC} ${DemoTex} ${PicSRC} ${PgmSRC} ${BibSRC} Makefile | ${Build} 41 51 ${LaTeX} ${BASE} 42 52 ${BibTeX} ${Build}/${BASE} … … 52 62 53 63 %-demo.tex: %-demo | ${Build} 54 $ {Build}/$< > ${Build}/$@64 $< > $@ 55 65 56 %-demo: %-demo.cfa 57 ${CFA} $< -o $ {Build}/$@66 ${Build}/%-demo: ${Programs}/%-demo.cfa | ${Build} 67 ${CFA} $< -o $@ 58 68 69 ${Build}/%: ${Programs}/%.run.cfa | ${Build} 70 ${CFA} $< -o $@ 71 72 ${Build}/%: ${Programs}/%.run.c | ${Build} 73 ${CC} $< -o $@ 74 75 ${Build}/%: ${Programs}/%.run.cpp | ${Build} 76 ${CXX} -MMD $< -o $@ 77 78 ${Build}/%.out: ${Build}/% | ${Build} 79 $< > $@ 80 81 -include ${Build}/*.d -
doc/theses/mike_brooks_MMath/uw-ethesis.bib
r1afd9ccb rd800676 65 65 bibsource = {dblp computer science bibliography, https://dblp.org} 66 66 } 67 68 % -------------------------------------------------- 69 % Linked-list prior work 70 71 @misc{CFAStackEvaluation, 72 contributer = {a3moss@plg}, 73 author = {Aaron Moss}, 74 title = {\textsf{C}$\mathbf{\forall}$ Stack Evaluation Programs}, 75 year = 2018, 76 howpublished= {\href{https://cforall.uwaterloo.ca/CFAStackEvaluation.zip}{https://cforall.uwaterloo.ca/\-CFAStackEvaluation.zip}}, 77 } 78 79 @misc{lst:linuxq, 80 title = {queue(7) — Linux manual page}, 81 howpublished= {\href{https://man7.org/linux/man-pages/man3/queue.3.html}{https://man7.org/linux/man-pages/man3/queue.3.html}}, 82 } 83 % see also https://man7.org/linux/man-pages/man7/queue.7.license.html 84 % https://man7.org/tlpi/ 85 % https://www.kernel.org/doc/man-pages/ 86 87 @misc{lst:stl, 88 title = {std::list}, 89 howpublished= {\href{https://en.cppreference.com/w/cpp/container/list}{https://en.cppreference.com/w/cpp/container/list}}, 90 } 91 -
doc/theses/mike_brooks_MMath/uw-ethesis.tex
r1afd9ccb rd800676 60 60 % For hyperlinked PDF, suitable for viewing on a computer, use this: 61 61 \documentclass[letterpaper,12pt,titlepage,oneside,final]{book} 62 \usepackage{times} 62 63 \usepackage[T1]{fontenc} % Latin-1 => 256-bit characters, => | not dash, <> not Spanish question marks 63 64 … … 87 88 \usepackage{comment} % Removes large sections of the document. 88 89 \usepackage{tabularx} 89 \usepackage{subfigure} 90 \usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt,font=normalsize]{subfig} 91 \renewcommand\thesubfigure{(\alph{subfigure})} 90 92 91 93 \usepackage{algorithm} … … 115 117 citecolor=blue, % color of links to bibliography 116 118 filecolor=magenta, % color of file links 117 urlcolor=blue % color of external links 119 urlcolor=blue, % color of external links 120 breaklinks=true 118 121 } 119 122 \ifthenelse{\boolean{PrintVersion}}{ % for improved print quality, change some hyperref options … … 129 132 % although it's supposed to be in both the TeX Live and MikTeX distributions. There are also documentation and 130 133 % installation instructions there. 134 135 % Customizing tabularx 136 \newcolumntype{Y}{>{\centering\arraybackslash}X} 131 137 132 138 % Setting up the page margins... … … 175 181 \CFAStyle % CFA code-style 176 182 \lstset{language=CFA} % default language 177 \lstset{basicstyle=\linespread{0.9}\ tt} % CFA typewriter font183 \lstset{basicstyle=\linespread{0.9}\sf} % CFA typewriter font 178 184 \lstset{inputpath={programs}} 179 185 \newcommand{\PAB}[1]{{\color{red}PAB: #1}} 186 187 \newcommand{\uCpp}{$\mu$\CC} 180 188 181 189 %====================================================================== … … 201 209 %---------------------------------------------------------------------- 202 210 \begin{sloppypar} 203 204 211 \input{intro} 205 212 \input{background} 213 \input{list} 206 214 \input{array} 207 215 \input{string} -
libcfa/src/Makefile.am
r1afd9ccb rd800676 48 48 math.hfa \ 49 49 time_t.hfa \ 50 50 virtual_dtor.hfa \ 51 51 bits/algorithm.hfa \ 52 52 bits/align.hfa \ … … 69 69 vec/vec2.hfa \ 70 70 vec/vec3.hfa \ 71 vec/vec4.hfa 71 vec/vec4.hfa 72 72 73 73 inst_headers_src = \ -
libcfa/src/bits/random.hfa
r1afd9ccb rd800676 10 10 // Created On : Fri Jan 14 07:18:11 2022 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Dec 22 20:54:22 202213 // Update Count : 1 7812 // Last Modified On : Mon Mar 20 21:45:24 2023 13 // Update Count : 186 14 14 // 15 15 … … 28 28 #define XOSHIRO256PP 29 29 //#define KISS_64 30 // #define SPLITMIX_64 30 31 31 32 // 32-bit generators 32 33 //#define XORSHIFT_6_21_7 33 34 #define XOSHIRO128PP 35 // #define SPLITMIX_32 34 36 #else // 32-bit architecture 35 37 // 64-bit generators 36 38 //#define XORSHIFT_13_7_17 37 39 #define XOSHIRO256PP 40 // #define SPLITMIX_64 38 41 39 42 // 32-bit generators 40 43 //#define XORSHIFT_6_21_7 41 44 #define XOSHIRO128PP 45 // #define SPLITMIX_32 42 46 #endif // __x86_64__ 43 47 44 48 // Define C/CFA PRNG name and random-state. 45 46 // SKULLDUGGERY: typedefs name struct and typedef with the same name to deal with CFA typedef numbering problem.47 49 48 50 #ifdef XOSHIRO256PP 49 51 #define PRNG_NAME_64 xoshiro256pp 50 52 #define PRNG_STATE_64_T GLUE(PRNG_NAME_64,_t) 51 typedef struct PRNG_STATE_64_T{ uint64_t s0, s1, s2, s3; } PRNG_STATE_64_T;53 typedef struct { uint64_t s0, s1, s2, s3; } PRNG_STATE_64_T; 52 54 #endif // XOSHIRO256PP 53 55 … … 55 57 #define PRNG_NAME_32 xoshiro128pp 56 58 #define PRNG_STATE_32_T GLUE(PRNG_NAME_32,_t) 57 typedef struct PRNG_STATE_32_T{ uint32_t s0, s1, s2, s3; } PRNG_STATE_32_T;59 typedef struct { uint32_t s0, s1, s2, s3; } PRNG_STATE_32_T; 58 60 #endif // XOSHIRO128PP 59 61 … … 83 85 #endif // XORSHIFT_12_25_27 84 86 87 #ifdef SPLITMIX_64 88 #define PRNG_NAME_64 splitmix64 89 #define PRNG_STATE_64_T uint64_t 90 #endif // SPLITMIX32 91 92 #ifdef SPLITMIX_32 93 #define PRNG_NAME_32 splitmix32 94 #define PRNG_STATE_32_T uint32_t 95 #endif // SPLITMIX32 96 85 97 #ifdef KISS_64 86 98 #define PRNG_NAME_64 kiss_64 87 99 #define PRNG_STATE_64_T GLUE(PRNG_NAME_64,_t) 88 typedef struct PRNG_STATE_64_T{ uint64_t z, w, jsr, jcong; } PRNG_STATE_64_T;100 typedef struct { uint64_t z, w, jsr, jcong; } PRNG_STATE_64_T; 89 101 #endif // KISS_^64 90 102 … … 92 104 #define PRNG_NAME_32 xorwow 93 105 #define PRNG_STATE_32_T GLUE(PRNG_NAME_32,_t) 94 typedef struct PRNG_STATE_32_T{ uint32_t a, b, c, d, counter; } PRNG_STATE_32_T;106 typedef struct { uint32_t a, b, c, d, counter; } PRNG_STATE_32_T; 95 107 #endif // XOSHIRO128PP 96 108 … … 119 131 #ifdef __cforall // don't include in C code (invoke.h) 120 132 133 // https://rosettacode.org/wiki/Pseudo-random_numbers/Splitmix64 134 // 135 // Splitmix64 is not recommended for demanding random number requirements, but is often used to calculate initial states 136 // for other more complex pseudo-random number generators (see https://prng.di.unimi.it). 137 // Also https://rosettacode.org/wiki/Pseudo-random_numbers/Splitmix64. 138 static inline uint64_t splitmix64( uint64_t & state ) { 139 state += 0x9e3779b97f4a7c15; 140 uint64_t z = state; 141 z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9; 142 z = (z ^ (z >> 27)) * 0x94d049bb133111eb; 143 return z ^ (z >> 31); 144 } // splitmix64 145 146 static inline void splitmix64_set_seed( uint64_t & state , uint64_t seed ) { 147 state = seed; 148 splitmix64( state ); // prime 149 } // splitmix64_set_seed 150 151 // https://github.com/bryc/code/blob/master/jshash/PRNGs.md#splitmix32 152 // 153 // Splitmix32 is not recommended for demanding random number requirements, but is often used to calculate initial states 154 // for other more complex pseudo-random number generators (see https://prng.di.unimi.it). 155 156 static inline uint32_t splitmix32( uint32_t & state ) { 157 state += 0x9e3779b9; 158 uint64_t z = state; 159 z = (z ^ (z >> 15)) * 0x85ebca6b; 160 z = (z ^ (z >> 13)) * 0xc2b2ae35; 161 return z ^ (z >> 16); 162 } // splitmix32 163 164 static inline void splitmix32_set_seed( uint32_t & state, uint64_t seed ) { 165 state = seed; 166 splitmix32( state ); // prime 167 } // splitmix32_set_seed 168 169 #ifdef __SIZEOF_INT128__ 170 //-------------------------------------------------- 171 static inline uint64_t lehmer64( __uint128_t & state ) { 172 __uint128_t ret = state; 173 state *= 0x_da94_2042_e4dd_58b5; 174 return ret >> 64; 175 } // lehmer64 176 177 static inline void lehmer64_set_seed( __uint128_t & state, uint64_t seed ) { 178 // The seed needs to be coprime with the 2^64 modulus to get the largest period, so no factors of 2 in the seed. 179 state = splitmix64( seed ); // prime 180 } // lehmer64_set_seed 181 182 //-------------------------------------------------- 183 static inline uint64_t wyhash64( uint64_t & state ) { 184 uint64_t ret = state; 185 state += 0x_60be_e2be_e120_fc15; 186 __uint128_t tmp; 187 tmp = (__uint128_t) ret * 0x_a3b1_9535_4a39_b70d; 188 uint64_t m1 = (tmp >> 64) ^ tmp; 189 tmp = (__uint128_t)m1 * 0x_1b03_7387_12fa_d5c9; 190 uint64_t m2 = (tmp >> 64) ^ tmp; 191 return m2; 192 } // wyhash64 193 194 static inline void wyhash64_set_seed( uint64_t & state, uint64_t seed ) { 195 state = splitmix64( seed ); // prime 196 } // wyhash64_set_seed 197 #endif // __SIZEOF_INT128__ 198 121 199 // https://prng.di.unimi.it/xoshiro256starstar.c 122 200 // … … 130 208 131 209 #ifndef XOSHIRO256PP 132 typedef struct xoshiro256pp_t{ uint64_t s0, s1, s2, s3; } xoshiro256pp_t;210 typedef struct { uint64_t s0, s1, s2, s3; } xoshiro256pp_t; 133 211 #endif // ! XOSHIRO256PP 134 212 … … 151 229 152 230 static inline void xoshiro256pp_set_seed( xoshiro256pp_t & state, uint64_t seed ) { 153 state = (xoshiro256pp_t){ seed, seed, seed, seed }; 154 xoshiro256pp( state ); 231 // To attain repeatable seeding, compute seeds separately because the order of argument evaluation is undefined. 232 uint64_t seed1 = splitmix64( seed ); // prime 233 uint64_t seed2 = splitmix64( seed ); 234 uint64_t seed3 = splitmix64( seed ); 235 uint64_t seed4 = splitmix64( seed ); 236 state = (xoshiro256pp_t){ seed1, seed2, seed3, seed4 }; 155 237 } // xoshiro256pp_set_seed 156 238 … … 165 247 166 248 #ifndef XOSHIRO128PP 167 typedef struct xoshiro128pp_t{ uint32_t s0, s1, s2, s3; } xoshiro128pp_t;249 typedef struct { uint32_t s0, s1, s2, s3; } xoshiro128pp_t; 168 250 #endif // ! XOSHIRO128PP 169 251 … … 186 268 187 269 static inline void xoshiro128pp_set_seed( xoshiro128pp_t & state, uint32_t seed ) { 188 state = (xoshiro128pp_t){ seed, seed, seed, seed }; 189 xoshiro128pp( state ); // prime 270 // To attain repeatable seeding, compute seeds separately because the order of argument evaluation is undefined. 271 uint32_t seed1 = splitmix32( seed ); // prime 272 uint32_t seed2 = splitmix32( seed ); 273 uint32_t seed3 = splitmix32( seed ); 274 uint32_t seed4 = splitmix32( seed ); 275 state = (xoshiro128pp_t){ seed1, seed2, seed3, seed4 }; 190 276 } // xoshiro128pp_set_seed 191 192 #ifdef __SIZEOF_INT128__193 //--------------------------------------------------194 static inline uint64_t lehmer64( __uint128_t & state ) {195 __uint128_t ret = state;196 state *= 0x_da94_2042_e4dd_58b5;197 return ret >> 64;198 } // lehmer64199 200 static inline void lehmer64_set_seed( __uint128_t & state, uint64_t seed ) {201 // The seed needs to be coprime with the 2^64 modulus to get the largest period, so no factors of 2 in the seed.202 state = seed;203 lehmer64( state ); // prime204 } // lehmer64_set_seed205 206 //--------------------------------------------------207 static inline uint64_t wyhash64( uint64_t & state ) {208 uint64_t ret = state;209 state += 0x_60be_e2be_e120_fc15;210 __uint128_t tmp;211 tmp = (__uint128_t) ret * 0x_a3b1_9535_4a39_b70d;212 uint64_t m1 = (tmp >> 64) ^ tmp;213 tmp = (__uint128_t)m1 * 0x_1b03_7387_12fa_d5c9;214 uint64_t m2 = (tmp >> 64) ^ tmp;215 return m2;216 } // wyhash64217 218 static inline void wyhash64_set_seed( uint64_t & state, uint64_t seed ) {219 state = seed;220 wyhash64( state ); // prime221 } // wyhash64_set_seed222 #endif // __SIZEOF_INT128__223 277 224 278 //-------------------------------------------------- … … 232 286 233 287 static inline void xorshift_13_7_17_set_seed( uint64_t & state, uint64_t seed ) { 234 state = seed; 235 xorshift_13_7_17( state ); // prime 288 state = splitmix64( seed ); // prime 236 289 } // xorshift_13_7_17_set_seed 237 290 … … 250 303 251 304 static inline void xorshift_6_21_7_set_seed( uint32_t & state, uint32_t seed ) { 252 state = seed; 253 xorshift_6_21_7( state ); // prime 305 state = splitmix32( seed ); // prime 254 306 } // xorshift_6_21_7_set_seed 255 307 … … 265 317 266 318 static inline void xorshift_12_25_27_set_seed( uint64_t & state, uint64_t seed ) { 267 state = seed; 268 xorshift_12_25_27( state ); // prime 319 state = splitmix64( seed ); // prime 269 320 } // xorshift_12_25_27_set_seed 270 321 … … 272 323 // The state must be seeded with a nonzero value. 273 324 #ifndef KISS_64 274 typedef struct kiss_64_t{ uint64_t z, w, jsr, jcong; } kiss_64_t;325 typedef struct { uint64_t z, w, jsr, jcong; } kiss_64_t; 275 326 #endif // ! KISS_64 276 327 … … 287 338 288 339 static inline void kiss_64_set_seed( kiss_64_t & rs, uint64_t seed ) with(rs) { 289 z = 1; w = 1; jsr = 4; jcong = seed; 290 kiss_64( rs ); // prime 340 z = 1; w = 1; jsr = 4; jcong = splitmix64( seed ); // prime 291 341 } // kiss_64_set_seed 292 342 … … 294 344 // The state array must be initialized to non-zero in the first four words. 295 345 #ifndef XORWOW 296 typedef struct xorwow_t{ uint32_t a, b, c, d, counter; } xorwow_t;346 typedef struct { uint32_t a, b, c, d, counter; } xorwow_t; 297 347 #endif // ! XORWOW 298 348 … … 316 366 317 367 static inline void xorwow_set_seed( xorwow_t & rs, uint32_t seed ) { 318 rs = (xorwow_t){ seed, seed, seed, seed, 0 }; 319 xorwow( rs ); // prime 368 // To attain repeatable seeding, compute seeds separately because the order of argument evaluation is undefined. 369 uint32_t seed1 = splitmix32( seed ); // prime 370 uint32_t seed2 = splitmix32( seed ); 371 uint32_t seed3 = splitmix32( seed ); 372 uint32_t seed4 = splitmix32( seed ); 373 rs = (xorwow_t){ seed1, seed2, seed3, seed4, 0 }; 320 374 } // xorwow_set_seed 321 375 … … 323 377 // Used in __tls_rand_fwd 324 378 #define M (1_l64u << 48_l64u) 325 #define A (25 214903917_l64u)326 #define AI (18 446708753438544741_l64u)379 #define A (25_214_903_917_l64u) 380 #define AI (18_446_708_753_438_544_741_l64u) 327 381 #define C (11_l64u) 328 382 #define D (16_l64u) -
libcfa/src/concurrency/channel.hfa
r1afd9ccb rd800676 28 28 exp_backoff_then_block_lock c_lock, p_lock; 29 29 __spinlock_t mutex_lock; 30 char __padding[64]; // avoid false sharing in arrays 30 31 }; 31 32 -
libcfa/src/concurrency/mutex_stmt.hfa
r1afd9ccb rd800676 27 27 // Sort locks based on address 28 28 __libcfa_small_sort(this.lockarr, count); 29 30 // acquire locks in order31 // for ( size_t i = 0; i < count; i++ ) {32 // lock(*this.lockarr[i]);33 // }34 }35 36 static inline void ^?{}( __mutex_stmt_lock_guard & this ) with(this) {37 // for ( size_t i = count; i > 0; i-- ) {38 // unlock(*lockarr[i - 1]);39 // }40 29 } 41 30 -
libcfa/src/containers/list.hfa
r1afd9ccb rd800676 32 32 static inline tytagref(void, T) ?`inner ( T & this ) { tytagref( void, T ) ret = {this}; return ret; } 33 33 34 // use this on every case of plan-9 inheritance, to make embedded a closure of plan-9 inheritance 35 #define P9_EMBEDDED( derived, immedBase ) \ 36 forall( Tbase &, TdiscardPath & | { tytagref( TdiscardPath, Tbase ) ?`inner( immedBase & ); } ) \ 37 static inline tytagref(immedBase, Tbase) ?`inner( derived & this ) { \ 34 35 // 36 // P9_EMBEDDED: Use on every case of plan-9 inheritance, to make "implements embedded" be a closure of plan-9 inheritance. 37 // 38 // struct foo { 39 // int a, b, c; 40 // inline (bar); 41 // }; 42 // P9_EMBEDDED( foo, bar ) 43 // 44 45 // usual version, for structs that are top-level declarations 46 #define P9_EMBEDDED( derived, immedBase ) P9_EMBEDDED_DECL_( derived, immedBase, static ) P9_EMBEDDED_BDY_( immedBase ) 47 48 // special version, for structs that are declared in functions 49 #define P9_EMBEDDED_INFUNC( derived, immedBase ) P9_EMBEDDED_DECL_( derived, immedBase, ) P9_EMBEDDED_BDY_( immedBase ) 50 51 // forward declarations of both the above; generally not needed 52 // may help you control where the P9_EMBEEDED cruft goes, in case "right after the stuct" isn't where you want it 53 #define P9_EMBEDDED_FWD( derived, immedBase ) P9_EMBEDDED_DECL_( derived, immedBase, static ) ; 54 #define P9_EMBEDDED_FWD_INFUNC( derived, immedBase ) auto P9_EMBEDDED_DECL_( derived, immedBase, ) ; 55 56 // private helpers 57 #define P9_EMBEDDED_DECL_( derived, immedBase, STORAGE ) \ 58 forall( Tbase &, TdiscardPath & | { tytagref( TdiscardPath, Tbase ) ?`inner( immedBase & ); } ) \ 59 STORAGE inline tytagref(immedBase, Tbase) ?`inner( derived & this ) 60 61 #define P9_EMBEDDED_BDY_( immedBase ) { \ 38 62 immedBase & ib = this; \ 39 63 Tbase & b = ib`inner; \ -
src/ControlStruct/ExceptTranslateNew.cpp
r1afd9ccb rd800676 314 314 nullptr, 315 315 ast::Storage::Classes{}, 316 ast::Linkage::Cforall 316 ast::Linkage::Cforall, 317 {}, 318 { ast::Function::Inline } 317 319 ); 318 320 } -
src/Parser/ExpressionNode.cc
r1afd9ccb rd800676 164 164 } else { 165 165 // At least one digit in integer constant, so safe to backup while looking for suffix. 166 // This declaration and the comma expressions in the conditions mimic 167 // the declare and check pattern allowed in later compiler versions. 168 // (Only some early compilers/C++ standards do not support it.) 166 169 string::size_type posn; 167 170 // pointer value 168 if ( posn = str.find_last_of( "pP" ) ;posn != string::npos ) {171 if ( posn = str.find_last_of( "pP" ), posn != string::npos ) { 169 172 ltype = 5; str.erase( posn, 1 ); 170 173 // size_t 171 } else if ( posn = str.find_last_of( "zZ" ) ;posn != string::npos ) {174 } else if ( posn = str.find_last_of( "zZ" ), posn != string::npos ) { 172 175 Unsigned = true; type = 2; ltype = 4; str.erase( posn, 1 ); 173 176 // signed char 174 } else if ( posn = str.rfind( "hh" ) ;posn != string::npos ) {177 } else if ( posn = str.rfind( "hh" ), posn != string::npos ) { 175 178 type = 1; str.erase( posn, 2 ); 176 179 // signed char 177 } else if ( posn = str.rfind( "HH" ) ;posn != string::npos ) {180 } else if ( posn = str.rfind( "HH" ), posn != string::npos ) { 178 181 type = 1; str.erase( posn, 2 ); 179 182 // short 180 } else if ( posn = str.find_last_of( "hH" ) ;posn != string::npos ) {183 } else if ( posn = str.find_last_of( "hH" ), posn != string::npos ) { 181 184 type = 0; str.erase( posn, 1 ); 182 185 // int (natural number) 183 } else if ( posn = str.find_last_of( "nN" ) ;posn != string::npos ) {186 } else if ( posn = str.find_last_of( "nN" ), posn != string::npos ) { 184 187 type = 2; str.erase( posn, 1 ); 185 188 } else if ( str.rfind( "ll" ) != string::npos || str.rfind( "LL" ) != string::npos ) { -
src/Parser/parser.yy
r1afd9ccb rd800676 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Mar 14 09:37:58202313 // Update Count : 599012 // Last Modified On : Wed Mar 22 21:26:01 2023 13 // Update Count : 6002 14 14 // 15 15 … … 270 270 SemanticError( yylloc, ::toString( "Identifier \"", identifier, "\" cannot appear before a ", kind, ".\n" 271 271 "Possible cause is misspelled storage/CV qualifier, misspelled typename, or missing generic parameter." ) ); 272 } // IdentifierBeforeType 273 274 static bool TypedefForall( DeclarationNode * decl ) { 275 if ( decl->type->forall || (decl->type->kind == TypeData::Aggregate && decl->type->aggregate.params) ) { 276 SemanticError( yylloc, "forall qualifier in typedef is currently unimplemented." ); 277 return true; 278 } // if 279 return false; 272 280 } // IdentifierBeforeType 273 281 … … 496 504 %type<decl> typedef_name typedef_declaration typedef_expression 497 505 498 %type<decl> variable_type_redeclarator type_ptr type_array type_function 506 %type<decl> variable_type_redeclarator variable_type_ptr variable_type_array variable_type_function 507 %type<decl> general_function_declarator function_type_redeclarator function_type_array function_type_no_ptr function_type_ptr 499 508 500 509 %type<decl> type_parameter_redeclarator type_parameter_ptr type_parameter_array type_parameter_function … … 1957 1966 TYPEDEF type_specifier declarator 1958 1967 { 1959 // if type_specifier is an anon aggregate => name1960 1968 typedefTable.addToEnclosingScope( *$3->name, TYPEDEFname, "4" ); 1961 $$ = $3->addType( $2 )->addTypedef(); // watchout frees $2 and $3 1969 if ( TypedefForall( $2 ) ) $$ = nullptr; 1970 else $$ = $3->addType( $2 )->addTypedef(); // watchout frees $2 and $3 1962 1971 } 1963 1972 | typedef_declaration pop ',' push declarator … … 1969 1978 { 1970 1979 typedefTable.addToEnclosingScope( *$4->name, TYPEDEFname, "6" ); 1971 $$ = $4->addQualifiers( $1 )->addType( $3 )->addTypedef(); 1980 if ( TypedefForall( $1 ) ) $$ = nullptr; 1981 else $$ = $4->addQualifiers( $1 )->addType( $3 )->addTypedef(); 1972 1982 } 1973 1983 | type_specifier TYPEDEF declarator 1974 1984 { 1975 1985 typedefTable.addToEnclosingScope( *$3->name, TYPEDEFname, "7" ); 1976 $$ = $3->addType( $1 )->addTypedef(); 1986 if ( TypedefForall( $1 ) ) $$ = nullptr; 1987 else $$ = $3->addType( $1 )->addTypedef(); 1977 1988 } 1978 1989 | type_specifier TYPEDEF type_qualifier_list declarator 1979 1990 { 1980 1991 typedefTable.addToEnclosingScope( *$4->name, TYPEDEFname, "8" ); 1981 $$ = $4->addQualifiers( $1 )->addType( $1 )->addTypedef(); 1992 if ( TypedefForall( $3 ) ) $$ = nullptr; 1993 else $$ = $4->addQualifiers( $1 )->addType( $1 )->addTypedef(); 1982 1994 } 1983 1995 ; … … 2016 2028 // A semantic check is required to ensure asm_name only appears on declarations with implicit or explicit static 2017 2029 // storage-class 2018 declarator asm_name_opt initializer_opt2030 variable_declarator asm_name_opt initializer_opt 2019 2031 { $$ = $1->addAsmName( $2 )->addInitializer( $3 ); } 2032 | variable_type_redeclarator asm_name_opt initializer_opt 2033 { $$ = $1->addAsmName( $2 )->addInitializer( $3 ); } 2034 2035 | general_function_declarator asm_name_opt 2036 { $$ = $1->addAsmName( $2 )->addInitializer( nullptr ); } 2037 | general_function_declarator asm_name_opt '=' VOID 2038 { $$ = $1->addAsmName( $2 )->addInitializer( new InitializerNode( true ) ); } 2039 2020 2040 | declaring_list ',' attribute_list_opt declarator asm_name_opt initializer_opt 2021 2041 { $$ = $1->appendList( $4->addQualifiers( $3 )->addAsmName( $5 )->addInitializer( $6 ) ); } 2042 ; 2043 2044 general_function_declarator: 2045 function_type_redeclarator 2046 | function_declarator 2022 2047 ; 2023 2048 … … 2543 2568 // A semantic check is required to ensure bit_subrange only appears on integral types. 2544 2569 { $$ = $1->addBitfield( $2 ); } 2570 | function_type_redeclarator bit_subrange_size_opt 2571 // A semantic check is required to ensure bit_subrange only appears on integral types. 2572 { $$ = $1->addBitfield( $2 ); } 2545 2573 ; 2546 2574 … … 3195 3223 $$ = $2->addFunctionBody( $4, $3 )->addType( $1 ); 3196 3224 } 3197 | declaration_specifier variable_type_redeclarator with_clause_opt compound_statement3225 | declaration_specifier function_type_redeclarator with_clause_opt compound_statement 3198 3226 { 3199 3227 rebindForall( $1, $2 ); … … 3231 3259 | variable_type_redeclarator 3232 3260 | function_declarator 3261 | function_type_redeclarator 3233 3262 ; 3234 3263 … … 3481 3510 ; 3482 3511 3483 // This pattern parses a declaration for a variable or function prototypethat redefines a type name, e.g.:3512 // This pattern parses a declaration for a variable that redefines a type name, e.g.: 3484 3513 // 3485 3514 // typedef int foo; … … 3487 3516 // int foo; // redefine typedef name in new scope 3488 3517 // } 3489 //3490 // The pattern precludes declaring an array of functions versus a pointer to an array of functions, and returning arrays3491 // and functions versus pointers to arrays and functions.3492 3518 3493 3519 paren_type: … … 3504 3530 paren_type attribute_list_opt 3505 3531 { $$ = $1->addQualifiers( $2 ); } 3506 | type_ptr3507 | type_array attribute_list_opt3532 | variable_type_ptr 3533 | variable_type_array attribute_list_opt 3508 3534 { $$ = $1->addQualifiers( $2 ); } 3509 | type_function attribute_list_opt3535 | variable_type_function attribute_list_opt 3510 3536 { $$ = $1->addQualifiers( $2 ); } 3511 3537 ; 3512 3538 3513 type_ptr:3539 variable_type_ptr: 3514 3540 ptrref_operator variable_type_redeclarator 3515 3541 { $$ = $2->addPointer( DeclarationNode::newPointer( nullptr, $1 ) ); } 3516 3542 | ptrref_operator type_qualifier_list variable_type_redeclarator 3517 3543 { $$ = $3->addPointer( DeclarationNode::newPointer( $2, $1 ) ); } 3518 | '(' type_ptr ')' attribute_list_opt// redundant parenthesis3544 | '(' variable_type_ptr ')' attribute_list_opt // redundant parenthesis 3519 3545 { $$ = $2->addQualifiers( $4 ); } 3520 | '(' attribute_list type_ptr ')' attribute_list_opt // redundant parenthesis3546 | '(' attribute_list variable_type_ptr ')' attribute_list_opt // redundant parenthesis 3521 3547 { $$ = $3->addQualifiers( $2 )->addQualifiers( $5 ); } 3522 3548 ; 3523 3549 3524 type_array:3550 variable_type_array: 3525 3551 paren_type array_dimension 3526 3552 { $$ = $1->addArray( $2 ); } 3527 | '(' type_ptr ')' array_dimension3553 | '(' variable_type_ptr ')' array_dimension 3528 3554 { $$ = $2->addArray( $4 ); } 3529 | '(' attribute_list type_ptr ')' array_dimension3555 | '(' attribute_list variable_type_ptr ')' array_dimension 3530 3556 { $$ = $3->addQualifiers( $2 )->addArray( $5 ); } 3531 | '(' type_array ')' multi_array_dimension// redundant parenthesis3557 | '(' variable_type_array ')' multi_array_dimension // redundant parenthesis 3532 3558 { $$ = $2->addArray( $4 ); } 3533 | '(' attribute_list type_array ')' multi_array_dimension // redundant parenthesis3559 | '(' attribute_list variable_type_array ')' multi_array_dimension // redundant parenthesis 3534 3560 { $$ = $3->addQualifiers( $2 )->addArray( $5 ); } 3535 | '(' type_array ')'// redundant parenthesis3561 | '(' variable_type_array ')' // redundant parenthesis 3536 3562 { $$ = $2; } 3537 | '(' attribute_list type_array ')'// redundant parenthesis3563 | '(' attribute_list variable_type_array ')' // redundant parenthesis 3538 3564 { $$ = $3->addQualifiers( $2 ); } 3539 3565 ; 3540 3566 3541 type_function: 3567 variable_type_function: 3568 '(' variable_type_ptr ')' '(' push parameter_type_list_opt pop ')' // empty parameter list OBSOLESCENT (see 3) 3569 { $$ = $2->addParamList( $6 ); } 3570 | '(' attribute_list variable_type_ptr ')' '(' push parameter_type_list_opt pop ')' // empty parameter list OBSOLESCENT (see 3) 3571 { $$ = $3->addQualifiers( $2 )->addParamList( $7 ); } 3572 | '(' variable_type_function ')' // redundant parenthesis 3573 { $$ = $2; } 3574 | '(' attribute_list variable_type_function ')' // redundant parenthesis 3575 { $$ = $3->addQualifiers( $2 ); } 3576 ; 3577 3578 // This pattern parses a declaration for a function prototype that redefines a type name. It precludes declaring an 3579 // array of functions versus a pointer to an array of functions, and returning arrays and functions versus pointers to 3580 // arrays and functions. 3581 3582 function_type_redeclarator: 3583 function_type_no_ptr attribute_list_opt 3584 { $$ = $1->addQualifiers( $2 ); } 3585 | function_type_ptr 3586 | function_type_array attribute_list_opt 3587 { $$ = $1->addQualifiers( $2 ); } 3588 ; 3589 3590 function_type_no_ptr: 3542 3591 paren_type '(' push parameter_type_list_opt pop ')' // empty parameter list OBSOLESCENT (see 3) 3543 3592 { $$ = $1->addParamList( $4 ); } 3544 | '(' type_ptr ')' '(' push parameter_type_list_opt pop ')' // empty parameter list OBSOLESCENT (see 3)3593 | '(' function_type_ptr ')' '(' push parameter_type_list_opt pop ')' 3545 3594 { $$ = $2->addParamList( $6 ); } 3546 | '(' attribute_list type_ptr ')' '(' push parameter_type_list_opt pop ')' // empty parameter list OBSOLESCENT (see 3)3595 | '(' attribute_list function_type_ptr ')' '(' push parameter_type_list_opt pop ')' 3547 3596 { $$ = $3->addQualifiers( $2 )->addParamList( $7 ); } 3548 | '(' type_function ')'// redundant parenthesis3597 | '(' function_type_no_ptr ')' // redundant parenthesis 3549 3598 { $$ = $2; } 3550 | '(' attribute_list type_function ')' // redundant parenthesis 3599 | '(' attribute_list function_type_no_ptr ')' // redundant parenthesis 3600 { $$ = $3->addQualifiers( $2 ); } 3601 ; 3602 3603 function_type_ptr: 3604 ptrref_operator function_type_redeclarator 3605 { $$ = $2->addPointer( DeclarationNode::newPointer( nullptr, $1 ) ); } 3606 | ptrref_operator type_qualifier_list function_type_redeclarator 3607 { $$ = $3->addPointer( DeclarationNode::newPointer( $2, $1 ) ); } 3608 | '(' function_type_ptr ')' attribute_list_opt 3609 { $$ = $2->addQualifiers( $4 ); } 3610 | '(' attribute_list function_type_ptr ')' attribute_list_opt 3611 { $$ = $3->addQualifiers( $2 )->addQualifiers( $5 ); } 3612 ; 3613 3614 function_type_array: 3615 '(' function_type_ptr ')' array_dimension 3616 { $$ = $2->addArray( $4 ); } 3617 | '(' attribute_list function_type_ptr ')' array_dimension 3618 { $$ = $3->addQualifiers( $2 )->addArray( $5 ); } 3619 | '(' function_type_array ')' multi_array_dimension // redundant parenthesis 3620 { $$ = $2->addArray( $4 ); } 3621 | '(' attribute_list function_type_array ')' multi_array_dimension // redundant parenthesis 3622 { $$ = $3->addQualifiers( $2 )->addArray( $5 ); } 3623 | '(' function_type_array ')' // redundant parenthesis 3624 { $$ = $2; } 3625 | '(' attribute_list function_type_array ')' // redundant parenthesis 3551 3626 { $$ = $3->addQualifiers( $2 ); } 3552 3627 ; -
tests/.expect/PRNG.x64.txt
r1afd9ccb rd800676 1 1 2 2 PRNG() PRNG(5) PRNG(0,5) 3 8464106481 4 44 5215204710507639537 1 25 1880401021892145483 0 46 12503840966285181348 2 57 801971300205459356 0 28 6123812066052045228 3 19 7691074772031490538 4 310 4793575011534070065 0011 1 0647551928893428440 1 312 1 0865128702974868079 0 313 530720947131684825 3014 10520125295812061287 1515 7539957561855178679 4 416 1 3739826796006269835 0 217 4289714351582916365 3 218 1 6911914987551424434 2119 5327155553462670435 4 020 1 6251986870929071204 4 421 1 3394433706240223001 0 322 4814982023332666924 4 03 13944458589275087071 3 2 4 129977468648444256 0 4 5 2357727400298891021 2 2 6 8855179187835660146 3 3 7 9957620185645882382 4 1 8 13396406983727409795 0 5 9 3342782395220265920 0 5 10 1707651271867677937 1 0 11 16402561450140881681 0 1 12 17838519215740313729 4 2 13 7425936020594490136 4 0 14 4174865704721714670 3 5 15 16055269689200152092 0 2 16 15091270195803594018 1 5 17 11807315541476180798 1 1 18 10697186588988060306 4 1 19 14665526411527044929 3 2 20 11289342279096164771 2 5 21 16126980828050300615 1 4 22 7821578301767524260 4 1 23 23 seed 1009 24 24 25 25 Sequential 26 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%26 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 27 27 28 28 Concurrent 29 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%30 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%31 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%32 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%29 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 30 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 31 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 32 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 33 33 34 34 prng() prng(5) prng(0,5) 35 8464106481 4 436 5215204710507639537 1 237 1880401021892145483 0 438 12503840966285181348 2 539 801971300205459356 0 240 6123812066052045228 3 141 7691074772031490538 4 342 4793575011534070065 0043 1 0647551928893428440 1 344 1 0865128702974868079 0 345 530720947131684825 3046 10520125295812061287 1547 7539957561855178679 4 448 1 3739826796006269835 0 249 4289714351582916365 3 250 1 6911914987551424434 2151 5327155553462670435 4 052 1 6251986870929071204 4 453 1 3394433706240223001 0 354 4814982023332666924 4 035 13944458589275087071 3 2 36 129977468648444256 0 4 37 2357727400298891021 2 2 38 8855179187835660146 3 3 39 9957620185645882382 4 1 40 13396406983727409795 0 5 41 3342782395220265920 0 5 42 1707651271867677937 1 0 43 16402561450140881681 0 1 44 17838519215740313729 4 2 45 7425936020594490136 4 0 46 4174865704721714670 3 5 47 16055269689200152092 0 2 48 15091270195803594018 1 5 49 11807315541476180798 1 1 50 10697186588988060306 4 1 51 14665526411527044929 3 2 52 11289342279096164771 2 5 53 16126980828050300615 1 4 54 7821578301767524260 4 1 55 55 seed 1009 56 56 57 57 Sequential 58 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%58 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 59 59 60 60 Concurrent 61 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%62 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%63 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%64 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%61 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 62 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 63 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 64 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 65 65 66 66 prng(t) prng(t,5) prng(t,0,5) 67 8464106481 4 468 5215204710507639537 1 269 1880401021892145483 0 470 12503840966285181348 2 571 801971300205459356 0 272 6123812066052045228 3 173 7691074772031490538 4 374 4793575011534070065 0075 1 0647551928893428440 1 376 1 0865128702974868079 0 377 530720947131684825 3078 10520125295812061287 1579 7539957561855178679 4 480 1 3739826796006269835 0 281 4289714351582916365 3 282 1 6911914987551424434 2183 5327155553462670435 4 084 1 6251986870929071204 4 485 1 3394433706240223001 0 386 4814982023332666924 4 067 13944458589275087071 3 2 68 129977468648444256 0 4 69 2357727400298891021 2 2 70 8855179187835660146 3 3 71 9957620185645882382 4 1 72 13396406983727409795 0 5 73 3342782395220265920 0 5 74 1707651271867677937 1 0 75 16402561450140881681 0 1 76 17838519215740313729 4 2 77 7425936020594490136 4 0 78 4174865704721714670 3 5 79 16055269689200152092 0 2 80 15091270195803594018 1 5 81 11807315541476180798 1 1 82 10697186588988060306 4 1 83 14665526411527044929 3 2 84 11289342279096164771 2 5 85 16126980828050300615 1 4 86 7821578301767524260 4 1 87 87 seed 1009 88 88 89 89 Sequential 90 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%90 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 91 91 92 92 Concurrent 93 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%94 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%95 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%96 trials 100000000 buckets 100000 min 87 1 max 1144avg 1000.0 std 31.6 rstd 3.2%93 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 94 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 95 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% 96 trials 100000000 buckets 100000 min 875 max 1146 avg 1000.0 std 31.6 rstd 3.2% -
tests/.expect/PRNG.x86.txt
r1afd9ccb rd800676 1 1 2 2 PRNG() PRNG(5) PRNG(0,5) 3 130161 1 14 4074541490 0 05 927506267 0 36 1991273445 1 37 669918146 2 38 519546860 119 1136699882 4310 2130185384 3 111 992239050 0 512 2250903111 0 113 1544429724 3 214 1591091660 3 315 2511657707 2 416 1065770984 2417 2412763405 4418 18 34447239 4 219 360289337 0 420 2449452027 1 121 3370425396 2 122 3109103043 0 33 2884683541 0 0 4 3465286746 2 4 5 3268922916 0 1 6 2396374907 3 0 7 2135076892 4 1 8 944377718 3 1 9 2204845346 3 3 10 3736609533 0 4 11 4063231336 0 2 12 1075394776 0 2 13 712844808 4 0 14 4246343110 3 1 15 3793873837 2 1 16 3690340337 1 4 17 319207944 1 4 18 1815791072 3 5 19 2581617261 1 5 20 3873329448 1 3 21 832631329 4 0 22 651551615 3 5 23 23 seed 1009 24 24 25 25 Sequential 26 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%26 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 27 27 28 28 Concurrent 29 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%30 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%31 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%32 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%29 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 30 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 31 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 32 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 33 33 34 34 prng() prng(5) prng(0,5) 35 130161 1 136 4074541490 0 037 927506267 0 338 1991273445 1 339 669918146 2 340 519546860 1141 1136699882 4342 2130185384 3 143 992239050 0 544 2250903111 0 145 1544429724 3 246 1591091660 3 347 2511657707 2 448 1065770984 2449 2412763405 4450 18 34447239 4 251 360289337 0 452 2449452027 1 153 3370425396 2 154 3109103043 0 335 2884683541 0 0 36 3465286746 2 4 37 3268922916 0 1 38 2396374907 3 0 39 2135076892 4 1 40 944377718 3 1 41 2204845346 3 3 42 3736609533 0 4 43 4063231336 0 2 44 1075394776 0 2 45 712844808 4 0 46 4246343110 3 1 47 3793873837 2 1 48 3690340337 1 4 49 319207944 1 4 50 1815791072 3 5 51 2581617261 1 5 52 3873329448 1 3 53 832631329 4 0 54 651551615 3 5 55 55 seed 1009 56 56 57 57 Sequential 58 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%58 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 59 59 60 60 Concurrent 61 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%62 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%63 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%64 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%61 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 62 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 63 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 64 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 65 65 66 66 prng(t) prng(t,5) prng(t,0,5) 67 130161 1 168 4074541490 0 069 927506267 0 370 1991273445 1 371 669918146 2 372 519546860 1173 1136699882 4374 2130185384 3 175 992239050 0 576 2250903111 0 177 1544429724 3 278 1591091660 3 379 2511657707 2 480 1065770984 2481 2412763405 4482 18 34447239 4 283 360289337 0 484 2449452027 1 185 3370425396 2 186 3109103043 0 367 2884683541 0 0 68 3465286746 2 4 69 3268922916 0 1 70 2396374907 3 0 71 2135076892 4 1 72 944377718 3 1 73 2204845346 3 3 74 3736609533 0 4 75 4063231336 0 2 76 1075394776 0 2 77 712844808 4 0 78 4246343110 3 1 79 3793873837 2 1 80 3690340337 1 4 81 319207944 1 4 82 1815791072 3 5 83 2581617261 1 5 84 3873329448 1 3 85 832631329 4 0 86 651551615 3 5 87 87 seed 1009 88 88 89 89 Sequential 90 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%90 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 91 91 92 92 Concurrent 93 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%94 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%95 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%96 trials 100000000 buckets 100000 min 8 67 max 1135 avg 1000.0 std 31.7rstd 3.2%93 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 94 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 95 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% 96 trials 100000000 buckets 100000 min 858 max 1147 avg 1000.0 std 31.5 rstd 3.2% -
tests/.expect/nested_function.x64.txt
r1afd9ccb rd800676 1 total 1 551 total 145 -
tests/.expect/nested_function.x86.txt
r1afd9ccb rd800676 1 total 1051 total 245 -
tests/concurrent/channels/parallel_harness.hfa
r1afd9ccb rd800676 100 100 101 101 int test( size_t Processors, size_t Channels, size_t Producers, size_t Consumers, size_t ChannelSize ) { 102 size_t Clusters = 1;102 size_t Clusters = Processors; 103 103 // create a cluster 104 104 cluster clus[Clusters]; … … 108 108 } 109 109 110 channels = a new( Channels );110 channels = aalloc( Channels ); 111 111 112 112 // sout | "Processors: " | Processors | " ProdsPerChan: " | Producers | " ConsPerChan: " | Consumers | "Channels: " | Channels | " Channel Size: " | ChannelSize; … … 150 150 151 151 } 152 // for ( i; Channels ) { 153 // // sout | get_count( channels[i] ); 154 // if ( get_count( channels[i] ) < Consumers ){ 155 // #ifdef BIG 156 // bigObject b{0}; 157 // #endif 158 // for ( j; Consumers ) { 159 // #ifdef BIG 160 // insert( channels[i], b ); 161 // #else 162 // insert( channels[i], 0 ); 163 // #endif 164 // } 165 // } 166 // } 152 167 153 sout | "cons"; 168 154 for ( i; Consumers * Channels ) { -
tests/concurrent/pthread/.expect/bounded_buffer.x64.txt
r1afd9ccb rd800676 1 producer total value is 442802 consumer total value is 442801 producer total value is 39780 2 consumer total value is 39780 -
tests/concurrent/pthread/.expect/bounded_buffer.x86.txt
r1afd9ccb rd800676 1 producer total value is 450602 consumer total value is 450601 producer total value is 1770 2 consumer total value is 1770
Note: See TracChangeset
for help on using the changeset viewer.