Changeset deda7e6
- Timestamp:
- Sep 21, 2023, 10:15:58 PM (3 months ago)
- Branches:
- master
- Children:
- 62c6cfa
- Parents:
- c1e66d9 (diff), 5a1ae14 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 11 added
- 2 deleted
- 54 edited
- 2 moved
Legend:
- Unmodified
- Added
- Removed
-
doc/theses/colby_parsons_MMAth/local.bib
rc1e66d9 rdeda7e6 212 212 year = 2023, 213 213 } 214 215 @inproceedings{Harris02, 216 title={A practical multi-word compare-and-swap operation}, 217 author={Harris, Timothy L and Fraser, Keir and Pratt, Ian A}, 218 booktitle={Distributed Computing: 16th International Conference, DISC 2002 Toulouse, France, October 28--30, 2002 Proceedings 16}, 219 pages={265--279}, 220 year={2002}, 221 organization={Springer} 222 } 223 224 @misc{kotlin:channel, 225 author = "Kotlin Documentation", 226 title = "Channel", 227 howpublished = {\url{https://kotlinlang.org/api/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.channels/-channel/}}, 228 note = "[Online; accessed 11-September-2023]" 229 } -
doc/theses/colby_parsons_MMAth/text/CFA_concurrency.tex
rc1e66d9 rdeda7e6 1 1 \chapter{Concurrency in \CFA}\label{s:cfa_concurrency} 2 2 3 The groundwork for concurrency in \CFA was laid by Thierry Delisle in his Master's Thesis~\cite{Delisle18}. 4 In that work, he introduced generators, coroutines, monitors, and user-level threading. 5 Not listed in that work were basic concurrency features needed as building blocks, such as locks, futures, and condition variables , which he also added to \CFA.3 The groundwork for concurrency in \CFA was laid by Thierry Delisle in his Master's Thesis~\cite{Delisle18}. 4 In that work, he introduced generators, coroutines, monitors, and user-level threading. 5 Not listed in that work were basic concurrency features needed as building blocks, such as locks, futures, and condition variables. 6 6 7 7 \section{Threading Model}\label{s:threading} 8 \CFA provides user-level threading and supports an $M$:$N$ threading model where $M$ user threads are scheduled on $N$ kernel threads, where both $M$ and $N$ can be explicitly set by the user.9 Kernel threads are created by declaring a @processor@ structure.10 User-thread types are defined by creating a @thread@ aggregate-type, \ie replace @struct@ with @thread@.11 For each thread type a corresponding @main@ routine must be defined, which is where the thread starts running once it is created.12 Examples of \CFA user thread and processor creation are shown in \VRef[Listing]{l:cfa_thd_init}.13 8 9 \CFA provides user-level threading and supports an $M$:$N$ threading model where $M$ user threads are scheduled on $N$ kernel threads and both $M$ and $N$ can be explicitly set by the programmer. 10 Kernel threads are created by declaring processor objects; 11 user threads are created by declaring a thread objects. 12 \VRef[Listing]{l:cfa_thd_init} shows a typical examples of creating a \CFA user-thread type, and then as declaring processor ($N$) and thread objects ($M$). 14 13 \begin{cfa}[caption={Example of \CFA user thread and processor creation},label={l:cfa_thd_init}] 15 @thread@ my_thread {...}; $\C{// user thread type}$ 16 void @main@( my_thread & this ) { $\C{// thread start routine}$ 14 @thread@ my_thread { $\C{// user thread type (like structure)}$ 15 ... // arbitrary number of field declarations 16 }; 17 void @main@( @my_thread@ & this ) { $\C{// thread start routine}$ 17 18 sout | "Hello threading world"; 18 19 } 19 20 int main() { 20 int main() { $\C{// program starts with a processor (kernel thread)}$ 21 21 @processor@ p[2]; $\C{// add 2 processors = 3 total with starting processor}$ 22 22 { 23 my_thread t[2], * t3 = new(); $\C{// create 3 user threads, running in main routine}$23 @my_thread@ t[2], * t3 = new(); $\C{// create 2 stack allocated, 1 dynamic allocated user threads}$ 24 24 ... // execute concurrently 25 delete( t3 ); $\C{// wait for t hreadto end and deallocate}$26 } // wait for threadsto end and deallocate27 } 25 delete( t3 ); $\C{// wait for t3 to end and deallocate}$ 26 } // wait for threads t[0] and t[1] to end and deallocate 27 } // deallocate additional kernel threads 28 28 \end{cfa} 29 30 When processors are added, they are added alongside the existing processor given to each program. 31 Thus, for $N$ processors, allocate $N-1$ processors. 32 A thread is implicitly joined at deallocation, either implicitly at block exit for stack allocation or explicitly at @delete@ for heap allocation. 33 The thread performing the deallocation must wait for the thread to terminate before the deallocation can occur. 29 A thread type is are defined using the aggregate kind @thread@. 30 For each thread type, a corresponding @main@ routine must be defined, which is where the thread starts running once when a thread object are is created. 31 The @processor@ declaration adds addition kernel threads alongside the existing processor given to each program. 32 Thus, for $N$ processors, allocate $N-1$ processors. 33 A thread is implicitly joined at deallocation, either implicitly at block exit for stack allocation or explicitly at @delete@ for heap allocation. 34 The thread performing the deallocation must wait for the thread to terminate before the deallocation can occur. 34 35 A thread terminates by returning from the main routine where it starts. 35 36 36 \section{Existing Concurrency Features} 37 \section{Existing and New Concurrency Features} 38 37 39 \CFA currently provides a suite of concurrency features including futures, locks, condition variables, generators, coroutines, monitors. 38 40 Examples of these features are omitted as most of them are the same as their counterparts in other languages. 39 41 It is worthwhile to note that all concurrency features added to \CFA are made to be compatible each other. 40 The laundry list of features above and the ones introduced in this thesis can be used in the same program without issue. 42 The laundry list of features above and the ones introduced in this thesis can be used in the same program without issue, and the features are designed to interact in meaningful ways. 43 For example, a thread can inteact with a monitor, which can interact with a coroutine, which can interact with a generator. 41 44 42 45 Solving concurrent problems requires a diverse toolkit. -
doc/theses/colby_parsons_MMAth/text/CFA_intro.tex
rc1e66d9 rdeda7e6 9 9 \CFA is a layer over C, is transpiled\footnote{Source to source translator.} to C, and is largely considered to be an extension of C. 10 10 Beyond C, it adds productivity features, extended libraries, an advanced type-system, and many control-flow/concurrency constructions. 11 However, \CFA stays true to the C programming style, with most code revolving around @struct@ 's and routines, and respects the same rules as C.11 However, \CFA stays true to the C programming style, with most code revolving around @struct@s and routines, and respects the same rules as C. 12 12 \CFA is not object oriented as it has no notion of @this@ (receiver) and no structures with methods, but supports some object oriented ideas including constructors, destructors, and limited nominal inheritance. 13 13 While \CFA is rich with interesting features, only the subset pertinent to this work is discussed here. … … 17 17 References in \CFA are a layer of syntactic sugar over pointers to reduce the number of syntactic ref/deref operations needed with pointer usage. 18 18 Pointers in \CFA differ from C and \CC in their use of @0p@ instead of C's @NULL@ or \CC's @nullptr@. 19 References can contain 0p in \CFA, which is the equivalent of a null reference. 19 20 Examples of references are shown in \VRef[Listing]{l:cfa_ref}. 20 21 … … 64 65 This feature is also implemented in Pascal~\cite{Pascal}. 65 66 It can exist as a stand-alone statement or wrap a routine body to expose aggregate fields. 67 If exposed fields share a name, the type system will attempt to disambiguate them based on type. 68 If the type system is unable to disambiguate the fields then the user must qualify those names to avoid a compilation error. 66 69 Examples of the @with@ statement are shown in \VRef[Listing]{l:cfa_with}. 67 70 -
doc/theses/colby_parsons_MMAth/text/actors.tex
rc1e66d9 rdeda7e6 7 7 Actors are an indirect concurrent feature that abstracts threading away from a programmer, and instead provides \gls{actor}s and messages as building blocks for concurrency. 8 8 Hence, actors are in the realm of \gls{impl_concurrency}, where programmers write concurrent code without dealing with explicit thread creation or interaction. 9 Actor message-passing is similar to channels, but with more abstraction, so there is no shared data to protect, making actors amenable ina distributed environment.9 Actor message-passing is similar to channels, but with more abstraction, so there is no shared data to protect, making actors amenable to a distributed environment. 10 10 Actors are often used for high-performance computing and other data-centric problems, where the ease of use and scalability of an actor system provides an advantage over channels. 11 11 … … 14 14 15 15 \section{Actor Model} 16 The \Newterm{actor model} is a concurrent paradigm where computation is broken into units of work called actors, and the data for computation is distributed to actors in the form of messages~\cite{Hewitt73}.16 The \Newterm{actor model} is a concurrent paradigm where an actor is used as the fundamental building-block for computation, and the data for computation is distributed to actors in the form of messages~\cite{Hewitt73}. 17 17 An actor is composed of a \Newterm{mailbox} (message queue) and a set of \Newterm{behaviours} that receive from the mailbox to perform work. 18 18 Actors execute asynchronously upon receiving a message and can modify their own state, make decisions, spawn more actors, and send messages to other actors. … … 22 22 For example, mutual exclusion and locking are rarely relevant concepts in an actor model, as actors typically only operate on local state. 23 23 24 An actor does not have a thread. 24 \subsection{Classic Actor System} 25 An implementation of the actor model with a theatre (group) of actors is called an \Newterm{actor system}. 26 Actor systems largely follow the actor model, but can differ in some ways. 27 28 In an actor system, an actor does not have a thread. 25 29 An actor is executed by an underlying \Newterm{executor} (kernel thread-pool) that fairly invokes each actor, where an actor invocation processes one or more messages from its mailbox. 26 30 The default number of executor threads is often proportional to the number of computer cores to achieve good performance. 27 31 An executor is often tunable with respect to the number of kernel threads and its scheduling algorithm, which optimize for specific actor applications and workloads \see{Section~\ref{s:ActorSystem}}. 28 32 29 \subsection{Classic Actor System}30 An implementation of the actor model with a community of actors is called an \Newterm{actor system}.31 Actor systems largely follow the actor model, but can differ in some ways.32 33 While the semantics of message \emph{send} is asynchronous, the implementation may be synchronous or a combination. 33 The default semantics for message \emph{receive} is \gls{fifo}, so an actor receives messages from its mailbox in temporal (arrival) order ;34 however, messages sent among actors arrive in any order.34 The default semantics for message \emph{receive} is \gls{fifo}, so an actor receives messages from its mailbox in temporal (arrival) order. 35 % however, messages sent among actors arrive in any order. 35 36 Some actor systems provide priority-based mailboxes and/or priority-based message-selection within a mailbox, where custom message dispatchers search among or within a mailbox(es) with a predicate for specific kinds of actors and/or messages. 36 Some actor systems provide a shared mailbox where multiple actors receive from a common mailbox~\cite{Akka}, which is contrary to the no-sharing design of the basic actor-model (and requiresadditional locking).37 For non-\gls{fifo} service, some notion of fairness (eventual progress) must exist, otherwise messages have a high latency or starve, \ie never received.38 Finally, some actor systems provide multiple typed-mailboxes, which then lose the actor-\lstinline{become} mechanism \see{Section~\ref{s:SafetyProductivity}}.37 Some actor systems provide a shared mailbox where multiple actors receive from a common mailbox~\cite{Akka}, which is contrary to the no-sharing design of the basic actor-model (and may require additional locking). 38 For non-\gls{fifo} service, some notion of fairness (eventual progress) should exist, otherwise messages have a high latency or starve, \ie are never received. 39 % Finally, some actor systems provide multiple typed-mailboxes, which then lose the actor-\lstinline{become} mechanism \see{Section~\ref{s:SafetyProductivity}}. 39 40 %While the definition of the actor model provides no restrictions on message ordering, actor systems tend to guarantee that messages sent from a given actor $i$ to actor $j$ arrive at actor $j$ in the order they were sent. 40 41 Another way an actor system varies from the model is allowing access to shared global-state. … … 60 61 Figure \ref{f:inverted_actor} shows an actor system designed as \Newterm{message-centric}, where a set of messages are scheduled and run on underlying executor threads~\cite{uC++,Nigro21}. 61 62 This design is \Newterm{inverted} because actors belong to a message queue, whereas in the classic approach a message queue belongs to each actor. 62 Now a message send must quer ies the actor to know which message queue to post the message.63 Now a message send must query the actor to know which message queue to post the message to. 63 64 Again, the simplest design has a single global queue of messages accessed by the executor threads, but this approach has the same contention problem by the executor threads. 64 65 Therefore, the messages (mailboxes) are sharded and executor threads schedule each message, which points to its corresponding actor. … … 176 177 @actor | str_msg | int_msg;@ $\C{// cascade sends}$ 177 178 @actor | int_msg;@ $\C{// send}$ 178 @actor | finished_msg;@ $\C{// send => terminate actor ( deallocation deferred)}$179 @actor | finished_msg;@ $\C{// send => terminate actor (builtin Poison-Pill)}$ 179 180 stop_actor_system(); $\C{// waits until actors finish}\CRT$ 180 181 } // deallocate actor, int_msg, str_msg … … 492 493 Each executor thread iterates over its own message queues until it finds one with messages. 493 494 At this point, the executor thread atomically \gls{gulp}s the queue, meaning it moves the contents of message queue to a local queue of the executor thread. 495 Gulping moves the contents of the message queue as a batch rather than removing individual elements. 494 496 An example of the queue gulping operation is shown in the right side of Figure \ref{f:gulp}, where an executor thread gulps queue 0 and begins to process it locally. 495 497 This step allows the executor thread to process the local queue without any atomics until the next gulp. … … 523 525 524 526 Since the copy queue is an array, envelopes are allocated first on the stack and then copied into the copy queue to persist until they are no longer needed. 525 For many workloads, the copy queues grow in size to facilitate the average number of messages in flight and there are no further dynamic allocations.527 For many workloads, the copy queues reallocate and grow in size to facilitate the average number of messages in flight and there are no further dynamic allocations. 526 528 The downside of this approach is that more storage is allocated than needed, \ie each copy queue is only partially full. 527 529 Comparatively, the individual envelope allocations of a list-based queue mean that the actor system always uses the minimum amount of heap space and cleans up eagerly. … … 562 564 To ensure sequential actor execution and \gls{fifo} message delivery in a message-centric system, stealing requires finding and removing \emph{all} of an actor's messages, and inserting them consecutively in another message queue. 563 565 This operation is $O(N)$ with a non-trivial constant. 564 The only way for work stealing to become practical is to shard each worker's message queue , which also reduces contention, and to steal queues to eliminate queue searching.566 The only way for work stealing to become practical is to shard each worker's message queue \see{Section~\ref{s:executor}}, which also reduces contention, and to steal queues to eliminate queue searching. 565 567 566 568 Given queue stealing, the goal of the presented stealing implementation is to have an essentially zero-contention-cost stealing mechanism. … … 576 578 577 579 The outline for lazy-stealing by a thief is: select a victim, scan its queues once, and return immediately if a queue is stolen. 578 The thief then assumes it normal operation of scanning over its own queues looking for work, where stolen work is placed at the end of the scan.580 The thief then assumes its normal operation of scanning over its own queues looking for work, where stolen work is placed at the end of the scan. 579 581 Hence, only one victim is affected and there is a reasonable delay between stealing events as the thief scans its ready queue looking for its own work before potentially stealing again. 580 582 This lazy examination by the thief has a low perturbation cost for victims, while still finding work in a moderately loaded system. … … 636 638 % Note that a thief never exceeds its $M$/$N$ worker range because it is always exchanging queues with other workers. 637 639 If no appropriate victim mailbox is found, no swap is attempted. 640 Note that since the mailbox checks happen non-atomically, the thieves effectively guess which mailbox is ripe for stealing. 641 The thief may read stale data and can end up stealing an ineligible or empty mailbox. 642 This is not a correctness issue and is addressed in Section~\ref{s:steal_prob}, but the steal will likely not be productive. 643 These unproductive steals are uncommon, but do occur with some frequency, and are a tradeoff that is made to achieve minimal victim contention. 638 644 639 645 \item … … 644 650 \end{enumerate} 645 651 646 \subsection{Stealing Problem} 652 \subsection{Stealing Problem}\label{s:steal_prob} 647 653 Each queue access (send or gulp) involving any worker (thief or victim) is protected using spinlock @mutex_lock@. 648 654 However, to achieve the goal of almost zero contention for the victim, it is necessary that the thief does not acquire any queue spinlocks in the stealing protocol. … … 703 709 None of the work-stealing actor-systems examined in this work perform well on the repeat benchmark. 704 710 Hence, for all non-pathological cases, the claim is made that this stealing mechanism has a (probabilistically) zero-victim-cost in practice. 711 Future work on the work stealing system could include a backoff mechanism after failed steals to further address the pathological cases. 705 712 706 713 \subsection{Queue Pointer Swap}\label{s:swap} … … 709 716 The \gls{cas} is a read-modify-write instruction available on most modern architectures. 710 717 It atomically compares two memory locations, and if the values are equal, it writes a new value into the first memory location. 711 A s oftware implementation of \gls{cas} is:718 A sequential specification of \gls{cas} is: 712 719 \begin{cfa} 713 720 // assume this routine executes atomically … … 755 762 756 763 Either a true memory/memory swap instruction or a \gls{dcas} would provide the ability to atomically swap two memory locations, but unfortunately neither of these instructions are supported on the architectures used in this work. 764 There are lock-free implemetions of DCAS, or more generally K-word CAS (also known as MCAS or CASN)~\cite{Harris02} and LLX/SCX~\cite{Brown14} that can be used to provide the desired atomic swap capability. 765 However, these lock-free implementations were not used as it is advantageous in the work stealing case to let an attempted atomic swap fail instead of retrying. 757 766 Hence, a novel atomic swap specific to the actor use case is simulated, called \gls{qpcas}. 767 Note that this swap is \emph{not} lock-free. 758 768 The \gls{qpcas} is effectively a \gls{dcas} special cased in a few ways: 759 769 \begin{enumerate} … … 766 776 \end{cfa} 767 777 \item 768 The values swapped are never null pointers, so a null pointer can be used as an intermediate value during the swap. 778 The values swapped are never null pointers, so a null pointer can be used as an intermediate value during the swap. As such, null is effectively used as a lock for the swap. 769 779 \end{enumerate} 770 780 Figure~\ref{f:qpcasImpl} shows the \CFA pseudocode for the \gls{qpcas}. … … 862 872 The concurrent proof of correctness is shown through the existence of an invariant. 863 873 The invariant states when a queue pointer is set to @0p@ by a thief, then the next write to the pointer can only be performed by the same thief. 874 This is effictively a mutual exclusion condition for the later write. 864 875 To show that this invariant holds, it is shown that it is true at each step of the swap. 865 876 \begin{itemize} … … 1011 1022 The intuition behind this heuristic is that the slowest worker receives help via work stealing until it becomes a thief, which indicates that it has caught up to the pace of the rest of the workers. 1012 1023 This heuristic should ideally result in lowered latency for message sends to victim workers that are overloaded with work. 1024 It must be acknowledged that this linear search could cause a lot of cache coherence traffic. 1025 Future work on this heuristic could include introducing a search that has less impact on caching. 1013 1026 A negative side-effect of this heuristic is that if multiple thieves steal at the same time, they likely steal from the same victim, which increases the chance of contention. 1014 1027 However, given that workers have multiple queues, often in the tens or hundreds of queues, it is rare for two thieves to attempt stealing from the same queue. … … 1028 1041 \CFA's actor system comes with a suite of safety and productivity features. 1029 1042 Most of these features are only present in \CFA's debug mode, and hence, have zero-cost in no-debug mode. 1030 The suit of features include the following.1043 The suite of features include the following. 1031 1044 \begin{itemize} 1032 1045 \item Static-typed message sends: 1033 If an actor does not support receiving a given message type, the receive call is rejected at compile time, allowing unsupported messages to never besent to an actor.1046 If an actor does not support receiving a given message type, the receive call is rejected at compile time, preventing unsupported messages from being sent to an actor. 1034 1047 1035 1048 \item Detection of message sends to Finished/Destroyed/Deleted actors: … … 1042 1055 1043 1056 \item When an executor is configured, $M >= N$. 1044 That is, each worker must receive at least one mailbox queue, otherwise the worker spins and never does any work.1057 That is, each worker must receive at least one mailbox queue, since otherwise a worker cannot receive any work without a queue pull messages from. 1045 1058 1046 1059 \item Detection of unsent messages: … … 1100 1113 \begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=5pt\parsep=5pt\itemsep=0pt} 1101 1114 \item 1102 Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets ( 48 process\-ing units) 2.2GHz, running Linux v5.8.0--59--generic1103 \item 1104 Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units) 2.0 GHz, running Linux v5.8.0--55--generic1115 Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets (96 process\-ing units), running Linux v5.8.0--59--generic 1116 \item 1117 Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units), running Linux v5.8.0--55--generic 1105 1118 \end{list} 1106 1119 … … 1112 1125 All benchmarks are run 5 times and the median is taken. 1113 1126 Error bars showing the 95\% confidence intervals appear on each point in the graphs. 1127 The confidence intervals are calculated using bootstrapping to avoid normality assumptions. 1114 1128 If the confidence bars are small enough, they may be obscured by the data point. 1115 1129 In this section, \uC is compared to \CFA frequently, as the actor system in \CFA is heavily based off of \uC's actor system. -
doc/theses/colby_parsons_MMAth/text/channels.tex
rc1e66d9 rdeda7e6 20 20 Neither Go nor \CFA channels have the restrictions of the early channel-based concurrent systems. 21 21 22 Other popular languages and libraries that provide channels include C++ Boost~\cite{boost:channel}, Rust~\cite{rust:channel}, Haskell~\cite{haskell:channel}, and OCaml~\cite{ocaml:channel}.22 Other popular languages and libraries that provide channels include C++ Boost~\cite{boost:channel}, Rust~\cite{rust:channel}, Haskell~\cite{haskell:channel}, OCaml~\cite{ocaml:channel}, and Kotlin~\cite{kotlin:channel}. 23 23 Boost channels only support asynchronous (non-blocking) operations, and Rust channels are limited to only having one consumer per channel. 24 24 Haskell channels are unbounded in size, and OCaml channels are zero-size. 25 25 These restrictions in Haskell and OCaml are likely due to their functional approach, which results in them both using a list as the underlying data structure for their channel. 26 26 These languages and libraries are not discussed further, as their channel implementation is not comparable to the bounded-buffer style channels present in Go and \CFA. 27 Kotlin channels are comparable to Go and \CFA, but unfortunately they were not identified as a comparator until after presentation of this thesis and are omitted due to time constraints. 27 28 28 29 \section{Producer-Consumer Problem} … … 31 32 In the problem, threads interact with a buffer in two ways: producing threads insert values into the buffer and consuming threads remove values from the buffer. 32 33 In general, a buffer needs protection to ensure a producer only inserts into a non-full buffer and a consumer only removes from a non-empty buffer (synchronization). 33 As well, a buffer needs protection from concurrent access by multiple producers or consumers attempting to insert or remove simultaneously (MX).34 As well, a buffer needs protection from concurrent access by multiple producers or consumers attempting to insert or remove simultaneously, which is often provided by MX. 34 35 35 36 \section{Channel Size}\label{s:ChannelSize} … … 41 42 Fixed sized (bounded) implies the communication is mostly asynchronous, \ie the producer can proceed up to the buffer size and vice versa for the consumer with respect to removal, at which point the producer/consumer would wait. 42 43 \item 43 Infinite sized (unbounded) implies the communication is asynchronous, \ie the producer never waits but the consumer waits when the buffer is empty. 44 Since memory is finite, all unbounded buffers are ultimately bounded; 45 this restriction must be part of its implementation. 44 Infinite sized (unbounded) implies the communication is asymmetrically asynchronous, \ie the producer never waits but the consumer waits when the buffer is empty. 46 45 \end{enumerate} 47 46 … … 50 49 However, like MX, a buffer should ensure every value is eventually removed after some reasonable bounded time (no long-term starvation). 51 50 The simplest way to prevent starvation is to implement the buffer as a queue, either with a cyclic array or linked nodes. 51 While \gls{fifo} is not required for producer-consumer problem correctness, it is a desired property in channels as it provides predictable and often relied upon channel ordering behaviour to users. 52 52 53 53 \section{First-Come First-Served} 54 As pointed out, a bounded buffer requires MX among multiple producers or consumers.54 As pointed out, a bounded buffer implementation often provides MX among multiple producers or consumers. 55 55 This MX should be fair among threads, independent of the \gls{fifo} buffer being fair among values. 56 56 Fairness among threads is called \gls{fcfs} and was defined by Lamport~\cite[p.~454]{Lamport74}. … … 66 66 67 67 \section{Channel Implementation}\label{s:chan_impl} 68 Currently, only the Go and Erlang programming languagesprovide user-level threading where the primary communication mechanism is channels.69 Both Go and Erlanghave user-level threading and preemptive scheduling, and both use channels for communication.70 Go providesmultiple homogeneous channels; each have a single associated type.68 The programming languages Go, Kotlin, and Erlang provide user-level threading where the primary communication mechanism is channels. 69 These languages have user-level threading and preemptive scheduling, and both use channels for communication. 70 Go and Kotlin provide multiple homogeneous channels; each have a single associated type. 71 71 Erlang, which is closely related to actor systems, provides one heterogeneous channel per thread (mailbox) with a typed receive pattern. 72 Go encouragesusers to communicate via channels, but provides them as an optional language feature.72 Go and Kotlin encourage users to communicate via channels, but provides them as an optional language feature. 73 73 On the other hand, Erlang's single heterogeneous channel is a fundamental part of the threading system design; using it is unavoidable. 74 Similar to Go , \CFA's channels are offered as an optional language feature.74 Similar to Go and Kotlin, \CFA's channels are offered as an optional language feature. 75 75 76 76 While iterating on channel implementation, experiments were conducted that varied the producer-consumer algorithm and lock type used inside the channel. … … 83 83 The Go channel implementation utilizes cooperation among threads to achieve good performance~\cite{go:chan}. 84 84 This cooperation only occurs when producers or consumers need to block due to the buffer being full or empty. 85 After a producer blocks it must wait for a consumer to signal it and vice versa. 86 The consumer or producer that signals a blocked thread is called the signalling thread. 85 87 In these cases, a blocking thread stores their relevant data in a shared location and the signalling thread completes the blocking thread's operation before waking them; 86 88 \ie the blocking thread has no work to perform after it unblocks because the signalling threads has done this work. … … 88 90 First, each thread interacting with the channel only acquires and releases the internal channel lock once. 89 91 As a result, contention on the internal lock is decreased; only entering threads compete for the lock since unblocking threads do not reacquire the lock. 90 The other advantage of Go's wait-morphing approach is that it eliminates the bottleneck of waitingfor signalled threads to run.92 The other advantage of Go's wait-morphing approach is that it eliminates the need to wait for signalled threads to run. 91 93 Note that the property of acquiring/releasing the lock only once can also be achieved with a different form of cooperation, called \Newterm{baton passing}. 92 94 Baton passing occurs when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section, conceptually ``passing'' the mutual exclusion from the signalling thread to the signalled thread. … … 94 96 the wait-morphing approach has threads cooperate by completing the signalled thread's operation, thus removing a signalled thread's need for mutual exclusion after unblocking. 95 97 While baton passing is useful in some algorithms, it results in worse channel performance than the Go approach. 96 In the baton-passing approach, all threads need to wait for the signalled thread to reach the front of the ready queue, context switch,and run before other operations on the channel can proceed, since the signalled thread holds mutual exclusion;98 In the baton-passing approach, all threads need to wait for the signalled thread to unblock and run before other operations on the channel can proceed, since the signalled thread holds mutual exclusion; 97 99 in the wait-morphing approach, since the operation is completed before the signal, other threads can continue to operate on the channel without waiting for the signalled thread to run. 98 100 … … 154 156 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads performing the termination may end up unexpectedly blocking in their attempt to help other threads exit the system. 155 157 158 \subsubsection{Go Channel Close} 156 159 Go channels provide a set of tools to help with concurrent shutdown~\cite{go:chan} using a @close@ operation in conjunction with the \Go{select} statement. 157 160 The \Go{select} statement is discussed in \ref{s:waituntil}, where \CFA's @waituntil@ statement is compared with the Go \Go{select} statement. … … 175 178 Hence, due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown. 176 179 177 \paragraph{\CFA channels} have access to an extensive exception handling mechanism~\cite{Beach21}. 180 \subsubsection{\CFA Channel Close} 181 \CFA channels have access to an extensive exception handling mechanism~\cite{Beach21}. 178 182 As such \CFA uses an exception-based approach to channel shutdown that is symmetric for both producers and consumers, and supports graceful shutdown. 179 183 -
doc/theses/colby_parsons_MMAth/text/conclusion.tex
rc1e66d9 rdeda7e6 5 5 % ====================================================================== 6 6 7 The goal of this thesis was to expand the concurrent support that \CFA offers to fill in gaps and support language users' ability to write safe and efficient concurrent programs. 8 The presented features achieves this goal, and provides users with the means to write scalable programs in \CFA through multiple avenues. 9 Additionally, the tools presented include safety and productivity features from deadlock detection, to detection of common programming errors, easy concurrent shutdown, and toggleable performance statistics. 10 Programmers often have preferences between computing paradigms and concurrency is no exception. 11 If users prefer the message passing paradigm of concurrency, \CFA now provides message passing utilities in the form of an actor system and channels. 12 For shared memory concurrency, the mutex statement provides a safe and easy-to-use interface for mutual exclusion. 13 The @waituntil@ statement aids in writing concurrent programs in both the message passing and shared memory paradigms of concurrency. 14 Furthermore, no other language provides a synchronous multiplexing tool polymorphic over resources like \CFA's @waituntil@. 15 This work successfully provides users with familiar concurrent language support, but with additional value added over similar utilities in other popular languages. 7 The goal of this thesis is to expand concurrent support in \CFA to fill in gaps and increase support for writing safe and efficient concurrent programs. 8 The presented features achieve this goal and provide users with the means to write scalable concurrent programs in \CFA through multiple avenues. 9 Additionally, the tools presented provide safety and productivity features including: detection of deadlock and other common concurrency errors, easy concurrent shutdown, and toggleable performance statistics. 16 10 17 On overview of the contributions in this thesis include the following: 11 For locking, the mutex statement provides a safe and easy-to-use interface for mutual exclusion. 12 If programmers prefer the message-passing paradigm, \CFA now supports it in the form of channels and actors. 13 The @waituntil@ statement simplifies writing concurrent programs in both the message-passing and shared-memory paradigms of concurrency. 14 Finally, no other programming language provides a synchronous multiplexing tool that is polymorphic over resources like \CFA's @waituntil@. 15 This work successfully provides users with familiar concurrent-language support, but with additional value added over similar utilities in other popular languages. 16 17 On overview of the contributions made in this thesis include the following: 18 18 \begin{enumerate} 19 \item The mutex statement, which provides performant and deadlock-free multiple lock acquisition. 20 \item Channels with comparable performance to Go, that have safety and productivity features including deadlock detection, and an easy-to-use exception-based channel @close@ routine. 21 \item An in-memory actor system that achieved the lowest latency message send of systems tested due to the novel copy-queue data structure. The actor system presented has built-in detection of six common actor errors, and it has good performance compared to other systems on all benchmarks. 22 \item A @waituntil@ statement which tackles the hard problem of allowing a thread to safely synch\-ronously wait for some set of concurrent resources. 19 \item The mutex statement, which provides performant and deadlock-free multi-lock acquisition. 20 \item Channels with comparable performance to Go, which have safety and productivity features including deadlock detection and an easy-to-use exception-based channel @close@ routine. 21 \item An in-memory actor system, which achieves the lowest latency message send of systems tested due to the novel copy-queue data structure. 22 \item As well, the actor system has built-in detection of six common actor errors, with excellent performance compared to other systems across all benchmarks presented in this thesis. 23 \item A @waituntil@ statement, which tackles the hard problem of allowing a thread wait synchronously for an arbitrary set of concurrent resources. 23 24 \end{enumerate} 24 25 25 The features presented are commonly used in conjunction to solve concurrent problems. 26 The @waituntil@ statement, the @mutex@ statement, and channels will all likely see use in a program where a thread operates as an administrator or server which accepts and distributes work among channels based on some shared state. 27 The @mutex@ statement sees use across almost all concurrent code in \CFA, since it is used with the stream operator @sout@ to provide thread-safe output. 28 While not yet implemented, the polymorphic support of the @waituntil@ statement could see use in conjunction with the actor system to enable user threads outside the actor system to wait for work to be done, or for actors to finish. 29 A user of \CFA does not have to solely subscribe to the message passing or shared memory concurrent paradigm. 30 As such, channels in \CFA are often used to pass pointers to shared memory that may still need mutual exclusion, requiring the @mutex@ statement to also be used. 26 The added features are now commonly used to solve concurrent problems in \CFA. 27 The @mutex@ statement sees use across almost all concurrent code in \CFA, as it is the simplest mechanism for providing thread-safe input and output. 28 The channels and the @waituntil@ statement see use in programs where a thread operates as a server or administrator, which accepts and distributes work among channels based on some shared state. 29 When implemented, the polymorphic support of the @waituntil@ statement will see use with the actor system to enable user threads outside the actor system to wait for work to be done or for actors to finish. 30 Finally, the new features are often combined, \eg channels pass pointers to shared memory that may still need mutual exclusion, requiring the @mutex@ statement to be used. 31 31 32 32 From the novel copy-queue data structure in the actor system and the plethora of user-supporting safety features, all these utilities build upon existing concurrent tooling with value added. 33 33 Performance results verify that each new feature is comparable or better than similar features in other programming languages. 34 All in all, this suite of concurrent tools expands users' ability to easily write safe and performant multi-threaded programs in \CFA.34 All in all, this suite of concurrent tools expands a \CFA programmer's ability to easily write safe and performant multi-threaded programs. 35 35 36 36 \section{Future Work} … … 40 40 This thesis only scratches the surface of implicit concurrency by providing an actor system. 41 41 There is room for more implicit concurrency tools in \CFA. 42 User-defined implicit concurrency in the form of annotated loops or recursive concurrent functions exists in manyother languages and libraries~\cite{uC++,OpenMP}.42 User-defined implicit concurrency in the form of annotated loops or recursive concurrent functions exists in other languages and libraries~\cite{uC++,OpenMP}. 43 43 Similar implicit concurrency mechanisms could be implemented and expanded on in \CFA. 44 44 Additionally, the problem of automatic parallelism of sequential programs via the compiler is an interesting research space that other languages have approached~\cite{wilson94,haskell:parallel} and could be explored in \CFA. … … 46 46 \subsection{Advanced Actor Stealing Heuristics} 47 47 48 In this thesis, two basic victim-selection heuristics are chosen when implementing the work stealing actorsystem.49 Good victim selection is an active area of work 48 In this thesis, two basic victim-selection heuristics are chosen when implementing the work-stealing actor-system. 49 Good victim selection is an active area of work-stealing research, especially when taking into account NUMA effects and cache locality~\cite{barghi18,wolke17}. 50 50 The actor system in \CFA is modular and exploration of other victim-selection heuristics for queue stealing in \CFA could be provided by pluggable modules. 51 51 Another question in work stealing is: when should a worker thread steal? 52 Work 52 Work-stealing systems can often be too aggressive when stealing, causing the cost of the steal to be have a negative rather positive effect on performance. 53 53 Given that thief threads often have cycles to spare, there is room for a more nuanced approaches when stealing. 54 54 Finally, there is the very difficult problem of blocking and unblocking idle threads for workloads with extreme oscillations in CPU needs. … … 56 56 \subsection{Synchronously Multiplexing System Calls} 57 57 58 There are many tools that try to synchronously wait for or asynchronously check I/O, since improvements in this area pay dividends in many areas of computer science~\cite{linux:select,linux:poll,linux:epoll,linux:iouring}. 58 There are many tools that try to synchronously wait for or asynchronously check I/O. 59 Improvements in this area pay dividends in many areas of I/O based programming~\cite{linux:select,linux:poll,linux:epoll,linux:iouring}. 59 60 Research on improving user-space tools to synchronize over I/O and other system calls is an interesting area to explore in the world of concurrent tooling. 60 61 Specifically, incorporating I/O into the @waituntil@ to allow a network server to work with multiple kinds of asynchronous I/O interconnects without using tradition event loops. … … 69 70 The semantics and safety of these builtins require careful navigation since they require the user to have a deep understanding of concurrent memory-ordering models. 70 71 Furthermore, these atomics also often require a user to understand how to fence appropriately to ensure correctness. 71 All these problems and more could benefit from language support in \CFA.72 All these problems and more would benefit from language support in \CFA. 72 73 Adding good language support for atomics is a difficult problem, which if solved well, would allow for easier and safer writing of low-level concurrent code. 73 74 -
doc/theses/colby_parsons_MMAth/text/intro.tex
rc1e66d9 rdeda7e6 5 5 % ====================================================================== 6 6 7 Concurrent programs are the wild west of programming because determinism and simple ordering of program operations go out the window. 8 To seize the reins and write performant and safe concurrent code, high-level concurrent-language features are needed. 9 Like any other craftsmen, programmers are only as good as their tools, and concurrent tooling and features are no exception. 7 Concurrent programs are the wild west of programming because determinism and simple ordering of program operations go out the window. 8 To seize the reins and write performant and safe concurrent code, high-level concurrent-language features are needed. 9 Like any other craftsmen, programmers are only as good as their tools, and concurrent tooling and features are no exception. 10 10 11 This thesis presents a suite of high-level concurrent-language features implemented in the new programming-language \CFA. 12 These features aim to improve the performance of concurrent programs, aid in writing safe programs, and assist user productivity by improving the ease of concurrent programming. 13 The groundwork for concurrent features in \CFA was implemented by Thierry Delisle~\cite{Delisle18}, who contributed the threading system, coroutines, monitors and other tools. 14 This thesis builds on top of that foundation by providing a suite of high-level concurrent features. 15 The features include a @mutex@ statement, channels, a @waituntil@ statement, and an actor system. 16 All of these features exist in other programming languages in some shape or form, however this thesis extends the original ideas by improving performance, productivity, and safety. 11 This thesis presents a suite of high-level concurrent-language features implemented in the new programming-language \CFA. 12 These features aim to improve the performance of concurrent programs, aid in writing safe programs, and assist user productivity by improving the ease of concurrent programming. 13 The groundwork for concurrent features in \CFA was designed and implemented by Thierry Delisle~\cite{Delisle18}, who contributed the threading system, coroutines, monitors and other basic concurrency tools. 14 This thesis builds on top of that foundation by providing a suite of high-level concurrent features. 15 The features include a @mutex@ statement, channels, a @waituntil@ statement, and an actor system. 16 All of these features exist in other programming languages in some shape or form; 17 however, this thesis extends the original ideas by improving performance, productivity, and safety. 17 18 18 19 \section{The Need For Concurrent Features} 19 Asking a programmer to write a complex concurrent program without any concurrent language features is asking them to undertake a very difficult task.20 They would only be able to rely on the atomicity that their hardware provides and would have to build up from there.21 This would be like asking a programmer to write a complex sequential program only in assembly.22 Both are doable, but would often be easier and less error prone with higher level tooling.23 20 24 Concurrent programming has many pitfalls that are unique and do not show up in sequential code: 21 % Asking a programmer to write a complex concurrent program without any concurrent language features is asking them to undertake a very difficult task. 22 % They would only be able to rely on the atomicity that their hardware provides and would have to build up from there. 23 % This would be like asking a programmer to write a complex sequential program only in assembly. 24 % Both are doable, but would often be easier and less error prone with higher level tooling. 25 26 Concurrent programming has many unique pitfalls that do not appear in sequential programming: 25 27 \begin{enumerate} 26 \item Deadlock, where threads cyclically wait on resources, blocking them indefinitely.28 \item Race conditions, where thread orderings can result in arbitrary behaviours, resulting in correctness problems. 27 29 \item Livelock, where threads constantly attempt a concurrent operation unsuccessfully, resulting in no progress being made. 28 \item Race conditions, where thread orderings can result in differing behaviours and correctness of a program execution.29 \item Starvation, where threads may be deprived of access to some shared resource due to unfairness and never make progress.30 \item Starvation, where \emph{some} threads constantly attempt a concurrent operation unsuccessfully, resulting in partial progress being made. 31 \item Deadlock, where some threads wait for an event that cannot occur, blocking them indefinitely, resulting in no progress being made. 30 32 \end{enumerate} 31 Even with the guiding hand of concurrent tools these pitfalls can still catch unwary programmers, but good language support canprevent, detect, and mitigate these problems.33 Even with the guiding hand of concurrent tools these pitfalls still catch unwary programmers, but good language support helps significantly to prevent, detect, and mitigate these problems. 32 34 33 \section{ A BriefOverview}35 \section{Thesis Overview} 34 36 35 The first chapter of this thesis aims to familiarize the reader with the language \CFA. 36 In this chapter, syntax and features of the \CFA language that appear in this work are discussed The next chapter briefly discusses prior concurrency work in \CFA and how this work builds on top of existing features. 37 The remaining chapters each introduce a concurrent language feature, discuss prior related work, and present contributions which are then benchmarked against other languages and systems. 38 The first of these chapters discusses the @mutex@ statement, a language feature that improves ease of use and safety of lock usage. 39 The @mutex@ statement is compared both in terms of safety and performance with similar tools in \CC and Java. 40 The following chapter discusses channels, a message passing concurrency primitive that provides an avenue for safe synchronous and asynchronous communication across threads. 41 Channels in \CFA are compared to Go, which popularized the use of channels in modern concurrent programs. 42 The following chapter discusses the \CFA actor system. 43 The \CFA actor system is a close cousin of channels, as it also belongs to the message passing paradigm of concurrency. 44 However, the actor system provides a great degree of abstraction and ease of scalability, making it useful for a different range of problems than channels. 45 The actor system in \CFA is compared with a variety of other systems on a suite of benchmarks, where it achieves significant performance gains over other systems due to its design. 46 The final chapter discusses the \CFA @waituntil@ statement which provides the ability to synchronize while waiting for a resource, such as acquiring a lock, accessing a future, or writing to a channel. 47 The @waituntil@ statement presented provides greater flexibility and expressibility than similar features in other languages. 48 All in all, the features presented aim to fill in gaps in the current \CFA concurrent language support, and enable users to write a wider range of complex concurrent programs with ease. 37 Chapter~\ref{s:cfa} of this thesis aims to familiarize the reader with the language \CFA. 38 In this chapter, syntax and features of the \CFA language that appear in this work are discussed. 39 Chapter~\ref{s:cfa_concurrency} briefly discusses prior concurrency work in \CFA, and how the work in this thesis builds on top of the existing framework. 40 Each remaining chapter introduces an additional \CFA concurrent-language feature, which includes discussing prior related work for the feature, extensions over prior features, and uses benchmarks to compare the performance the feature with corresponding or similar features in other languages and systems. 41 42 Chapter~\ref{s:mutexstmt} discusses the @mutex@ statement, a language feature that provides safe and simple lock usage. 43 The @mutex@ statement is compared both in terms of safety and performance with similar mechanisms in \CC and Java. 44 Chapter~\ref{s:channels} discusses channels, a message passing concurrency primitive that provides for safe synchronous and asynchronous communication among threads. 45 Channels in \CFA are compared to Go's channels, which popularized the use of channels in modern concurrent programs. 46 Chapter~\ref{s:actors} discusses the \CFA actor system. 47 An actor system is a close cousin of channels, as it also belongs to the message passing paradigm of concurrency. 48 However, an actor system provides a greater degree of abstraction and ease of scalability, making it useful for a different range of problems than channels. 49 The actor system in \CFA is compared with a variety of other systems on a suite of benchmarks. 50 Chapter~\ref{s:waituntil} discusses the \CFA @waituntil@ statement, which provides the ability to synchronize while waiting for a resource, such as acquiring a lock, accessing a future, or writing to a channel. 51 The \CFA @waituntil@ statement provides greater flexibility and expressibility than similar features in other languages. 52 All in all, the features presented aim to fill in gaps in the current \CFA concurrent-language support, enabling users to write a wider range of complex concurrent programs with ease. 49 53 50 54 \section{Contributions} 51 This work presents the following contributions :52 \begin{enumerate} 53 \item The @mutex@ statement which:54 \begin{itemize}[itemsep=0pt]55 \item56 provides deadlock-free multiple lock acquisition,57 \item58 clearly denotes lock acquisition and release,59 \item60 and has good performance irrespective of lock ordering.61 \end{itemize}62 \item Channels which:55 This work presents the following contributions within each of the additional language features: 56 \begin{enumerate}[leftmargin=*] 57 \item The @mutex@ statement that: 58 \begin{itemize}[itemsep=0pt] 59 \item 60 provides deadlock-free multiple lock acquisition, 61 \item 62 clearly denotes lock acquisition and release, 63 \item 64 and has good performance irrespective of lock ordering. 65 \end{itemize} 66 \item The channel that: 63 67 \begin{itemize}[itemsep=0pt] 64 68 \item … … 71 75 and provides toggle-able statistics for performance tuning. 72 76 \end{itemize} 73 \item Anin-memory actor system that:77 \item The in-memory actor system that: 74 78 \begin{itemize}[itemsep=0pt] 75 79 \item … … 82 86 gains performance through static-typed message sends, eliminating the need for dynamic dispatch, 83 87 \item 84 introduces the copy queue, an array based queue specialized for the actor usecase to minimize calls to the memory allocator,88 introduces the copy queue, an array-based queue specialized for the actor use-case to minimize calls to the memory allocator, 85 89 \item 86 90 has robust detection of six tricky, but common actor programming errors, … … 90 94 and provides toggle-able statistics for performance tuning. 91 95 \end{itemize} 92 93 \item A @waituntil@ statement which: 96 \item The @waituntil@ statement that: 94 97 \begin{itemize}[itemsep=0pt] 95 98 \item 96 99 is the only known polymorphic synchronous multiplexing language feature, 97 100 \item 98 provides greater expressibility ofwaiting conditions than other languages,101 provides greater expressibility for waiting conditions than other languages, 99 102 \item 100 and achieves comparable performance to similar features in two other languages ,103 and achieves comparable performance to similar features in two other languages. 101 104 \end{itemize} 102 105 \end{enumerate} -
doc/theses/colby_parsons_MMAth/text/mutex_stmt.tex
rc1e66d9 rdeda7e6 83 83 \end{figure} 84 84 85 Like Java, \CFA monitors have \Newterm{multi-acquire} semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling of other MX functions.85 Like Java, \CFA monitors have \Newterm{multi-acquire} (reentrant locking) semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling of other MX functions. 86 86 For robustness, \CFA monitors ensure the monitor lock is released regardless of how an acquiring function ends, normal or exceptional, and returning a shared variable is safe via copying before the lock is released. 87 87 Monitor objects can be passed through multiple helper functions without acquiring mutual exclusion, until a designated function associated with the object is called. … … 104 104 } 105 105 \end{cfa} 106 The \CFA monitor implementation ensures multi-lock acquisition is done in a deadlock-free manner regardless of the number of MX parameters and monitor arguments. It it important to note that \CFA monitors do not attempt to solve the nested monitor problem~\cite{Lister77}. 106 The \CFA monitor implementation ensures multi-lock acquisition is done in a deadlock-free manner regardless of the number of MX parameters and monitor arguments via resource ordering. 107 It it important to note that \CFA monitors do not attempt to solve the nested monitor problem~\cite{Lister77}. 107 108 108 109 \section{\lstinline{mutex} statement} … … 165 166 In detail, the mutex statement has a clause and statement block, similar to a conditional or loop statement. 166 167 The clause accepts any number of lockable objects (like a \CFA MX function prototype), and locks them for the duration of the statement. 167 The locks are acquired in a deadlock free manner and released regardless of how control-flow exits the statement. 168 The locks are acquired in a deadlock-free manner and released regardless of how control-flow exits the statement. 169 Note that this deadlock-freedom has some limitations \see{\VRef{s:DeadlockAvoidance}}. 168 170 The mutex statement provides easy lock usage in the common case of lexically wrapping a CS. 169 171 Examples of \CFA mutex statement are shown in \VRef[Listing]{l:cfa_mutex_ex}. … … 210 212 Like Java, \CFA introduces a new statement rather than building from existing language features, although \CFA has sufficient language features to mimic \CC RAII locking. 211 213 This syntactic choice makes MX explicit rather than implicit via object declarations. 212 Hence, it is eas ier for programmers and language tools to identify MX points in a program, \eg scan for all @mutex@ parameters and statements in a body of code.214 Hence, it is easy for programmers and language tools to identify MX points in a program, \eg scan for all @mutex@ parameters and statements in a body of code; similar scanning can be done with Java's @synchronized@. 213 215 Furthermore, concurrent safety is provided across an entire program for the complex operation of acquiring multiple locks in a deadlock-free manner. 214 216 Unlike Java, \CFA's mutex statement and \CC's @scoped_lock@ both use parametric polymorphism to allow user defined types to work with this feature. … … 231 233 thread$\(_2\)$ : sout | "uvw" | "xyz"; 232 234 \end{cfa} 233 any of the outputs can appear , included a segment fault due to I/O buffer corruption:235 any of the outputs can appear: 234 236 \begin{cquote} 235 237 \small\tt … … 260 262 mutex( sout ) { // acquire stream lock for sout for block duration 261 263 sout | "abc"; 262 mutex( sout ) sout | "uvw" | "xyz"; // OK because sout lock is recursive264 sout | "uvw" | "xyz"; 263 265 sout | "def"; 264 266 } // implicitly release sout lock 265 267 \end{cfa} 266 The inner lock acquire is likely to occur through a function call that does a thread-safe print.267 268 268 269 \section{Deadlock Avoidance}\label{s:DeadlockAvoidance} … … 309 310 For fewer than 7 locks ($2^3-1$), the sort is unrolled performing the minimum number of compare and swaps for the given number of locks; 310 311 for 7 or more locks, insertion sort is used. 311 Since it is extremely rare to hold more than 6 locks at a time, the algorithm is fast and executes in $O(1)$ time. 312 Furthermore, lock addresses are unique across program execution, even for dynamically allocated locks, so the algorithm is safe across the entire program execution. 312 It is assumed to be rare to hold more than 6 locks at a time. 313 For 6 or fewer locks the algorithm is fast and executes in $O(1)$ time. 314 Furthermore, lock addresses are unique across program execution, even for dynamically allocated locks, so the algorithm is safe across the entire program execution, as long as lifetimes of objects are appropriately managed. 315 For example, deleting a lock and allocating another one could give the new lock the same address as the deleted one, however deleting a lock in use by another thread is a programming error irrespective of the usage of the @mutex@ statement. 313 316 314 317 The downside to the sorting approach is that it is not fully compatible with manual usages of the same locks outside the @mutex@ statement, \ie the lock are acquired without using the @mutex@ statement. … … 338 341 \end{cquote} 339 342 Comparatively, if the @scoped_lock@ is used and the same locks are acquired elsewhere, there is no concern of the @scoped_lock@ deadlocking, due to its avoidance scheme, but it may livelock. 340 The convenience and safety of the @mutex@ statement, \ie guaranteed lock release with exceptions, should encourage programmers to always use it for locking, mitigating any deadlock scenarioversus combining manual locking with the mutex statement.343 The convenience and safety of the @mutex@ statement, \ie guaranteed lock release with exceptions, should encourage programmers to always use it for locking, mitigating most deadlock scenarios versus combining manual locking with the mutex statement. 341 344 Both \CC and the \CFA do not provide any deadlock guarantees for nested @scoped_lock@s or @mutex@ statements. 342 345 To do so would require solving the nested monitor problem~\cite{Lister77}, which currently does not have any practical solutions. … … 344 347 \section{Performance} 345 348 Given the two multi-acquisition algorithms in \CC and \CFA, each with differing advantages and disadvantages, it interesting to compare their performance. 346 Comparison with Java is not possible, since it only takes a single lock.349 Comparison with Java was not conducted, since the synchronized statement only takes a single object and does not provide deadlock avoidance or prevention. 347 350 348 351 The comparison starts with a baseline that acquires the locks directly without a mutex statement or @scoped_lock@ in a fixed ordering and then releases them. … … 356 359 Each variation is run 11 times on 2, 4, 8, 16, 24, 32 cores and with 2, 4, and 8 locks being acquired. 357 360 The median is calculated and is plotted alongside the 95\% confidence intervals for each point. 361 The confidence intervals are calculated using bootstrapping to avoid normality assumptions. 358 362 359 363 \begin{figure} … … 388 392 } 389 393 \end{cfa} 390 \caption{Deadlock avoidance benchmark pseudocode}394 \caption{Deadlock avoidance benchmark \CFA pseudocode} 391 395 \label{l:deadlock_avoid_pseudo} 392 396 \end{figure} … … 396 400 % sudo dmidecode -t system 397 401 \item 398 Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units) 2.0 GHz, TSO memory model, running Linux v5.8.0--55--generic, gcc--10 compiler402 Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units), TSO memory model, running Linux v5.8.0--55--generic, gcc--10 compiler 399 403 \item 400 Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets ( 48 processing units) 2.2GHz, TSO memory model, running Linux v5.8.0--59--generic, gcc--10 compiler404 Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets (96 processing units), TSO memory model, running Linux v5.8.0--59--generic, gcc--10 compiler 401 405 \end{list} 402 406 %The hardware architectures are different in threading (multithreading vs hyper), cache structure (MESI or MESIF), NUMA layout (QPI vs HyperTransport), memory model (TSO vs WO), and energy/thermal mechanisms (turbo-boost). … … 411 415 For example, on the AMD machine with 32 threads and 8 locks, the benchmarks would occasionally livelock indefinitely, with no threads making any progress for 3 hours before the experiment was terminated manually. 412 416 It is likely that shorter bouts of livelock occurred in many of the experiments, which would explain large confidence intervals for some of the data points in the \CC data. 413 In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} there is the counter-intuitive result of the mutexstatement performing better than the baseline.417 In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} there is the counter-intuitive result of the @mutex@ statement performing better than the baseline. 414 418 At 7 locks and above the mutex statement switches from a hard coded sort to insertion sort, which should decrease performance. 415 419 The hard coded sort is branch-free and constant-time and was verified to be faster than insertion sort for 6 or fewer locks. 416 It is likely the increase in throughput compared to baseline is due to the delay spent in the insertion sort, which decreases contention on the locks. 417 420 Part of the difference in throughput compared to baseline is due to the delay spent in the insertion sort, which decreases contention on the locks. 421 This was verified to be part of the difference in throughput by experimenting with varying NCS delays in the baseline; however it only comprises a small portion of difference. 422 It is possible that the baseline is slowed down or the @mutex@ is sped up by other factors that are not easily identifiable. 418 423 419 424 \begin{figure} -
doc/theses/colby_parsons_MMAth/text/waituntil.tex
rc1e66d9 rdeda7e6 168 168 Go's @select@ has the same exclusive-or semantics as the ALT primitive from Occam and associated code blocks for each clause like ALT and Ada. 169 169 However, unlike Ada and ALT, Go does not provide guards for the \lstinline[language=go]{case} clauses of the \lstinline[language=go]{select}. 170 As such, the exponential blowup can be seen comparing Go and \uC in Figure~\ label{f:AdaMultiplexing}.170 As such, the exponential blowup can be seen comparing Go and \uC in Figure~\ref{f:AdaMultiplexing}. 171 171 Go also provides a timeout via a channel and a @default@ clause like Ada @else@ for asynchronous multiplexing. 172 172 … … 519 519 In following example, either channel @C1@ or @C2@ must be satisfied but nothing can be done for at least 1 or 3 seconds after the channel read, respectively. 520 520 \begin{cfa}[deletekeywords={timeout}] 521 waituntil( i << C1 ) ; and waituntil( timeout( 1`s ) );522 or waituntil( i << C2 ) ; and waituntil( timeout( 3`s ) );521 waituntil( i << C1 ){} and waituntil( timeout( 1`s ) ){} 522 or waituntil( i << C2 ){} and waituntil( timeout( 3`s ) ){} 523 523 \end{cfa} 524 524 If only @C2@ is satisfied, \emph{both} timeout code-blocks trigger because 1 second occurs before 3 seconds. … … 542 542 Now the unblocked WUT is guaranteed to have a satisfied resource and its code block can safely executed. 543 543 The insertion circumvents the channel buffer via the wait-morphing in the \CFA channel implementation \see{Section~\ref{s:chan_impl}}, allowing @waituntil@ channel unblocking to not be special-cased. 544 Note that all channel operations are fair and no preference is given between @waituntil@ and direct channel operations when unblocking. 544 545 545 546 Furthermore, if both @and@ and @or@ operators are used, the @or@ operations stop behaving like exclusive-or due to the race among channel operations, \eg: -
libcfa/prelude/extras.c
rc1e66d9 rdeda7e6 3 3 #include <uchar.h> // char16_t, char32_t 4 4 #include <wchar.h> // wchar_t 5 #include <stdlib.h> // malloc, free, exit, atexit, abort5 #include <stdlib.h> // malloc, free, getenv, exit, atexit, abort, printf 6 6 #include <stdio.h> // printf 7 #include <string.h> // strlen, strcmp, strncmp -
libcfa/prelude/extras.regx2
rc1e66d9 rdeda7e6 1 1 extern void \*malloc[^;]*; 2 2 extern void free[^;]*; 3 extern char \*getenv[^;]*; 3 4 extern void exit[^;]*; 4 5 extern int atexit[^;]*; 5 6 extern void abort[^;]*; 6 7 extern int printf[^;]*; 8 int strcmp[^;]*; 9 int strncmp[^;]*; 10 size_t strlen[^;]*; -
libcfa/src/Makefile.am
rc1e66d9 rdeda7e6 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Wed Aug 30 21:22:45202314 ## Update Count : 26 313 ## Last Modified On : Mon Sep 18 17:06:56 2023 14 ## Update Count : 264 15 15 ############################################################################### 16 16 … … 118 118 concurrency/mutex_stmt.hfa \ 119 119 concurrency/channel.hfa \ 120 concurrency/actor.hfa 120 concurrency/actor.hfa 121 121 122 122 inst_thread_headers_src = \ … … 130 130 concurrency/mutex.hfa \ 131 131 concurrency/select.hfa \ 132 concurrency/thread.hfa 132 concurrency/thread.hfa \ 133 concurrency/cofor.hfa 133 134 134 135 thread_libsrc = ${inst_thread_headers_src} ${inst_thread_headers_src:.hfa=.cfa} \ -
libcfa/src/clock.hfa
rc1e66d9 rdeda7e6 10 10 // Created On : Thu Apr 12 14:36:06 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S un Apr 18 08:12:16 202113 // Update Count : 2812 // Last Modified On : Sat Sep 9 14:07:17 2023 13 // Update Count : 30 14 14 // 15 15 … … 91 91 // discontinuous jumps when the OS is not running the kernal thread. A duration is returned because the value is 92 92 // relative and cannot be converted to real-time (wall-clock) time. 93 Duration processor () {// non-monotonic duration of kernel thread93 Duration processor_cpu() { // non-monotonic duration of kernel thread 94 94 timespec ts; 95 95 clock_gettime( CLOCK_THREAD_CPUTIME_ID, &ts ); 96 96 return (Duration){ ts }; 97 } // processor 97 } // processor_cpu 98 98 99 99 // Program CPU-time watch measures CPU time consumed by all processors (kernel threads) in the UNIX process. This 100 100 // watch is affected by discontinuous jumps when the OS is not running the kernel threads. A duration is returned 101 101 // because the value is relative and cannot be converted to real-time (wall-clock) time. 102 Duration program () {// non-monotonic duration of program CPU102 Duration program_cpu() { // non-monotonic duration of program CPU 103 103 timespec ts; 104 104 clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts ); 105 105 return (Duration){ ts }; 106 } // program 106 } // program_cpu 107 107 108 108 // Monotonic duration from machine boot and including system suspension. This watch is unaffected by discontinuous -
libcfa/src/collections/string.cfa
rc1e66d9 rdeda7e6 157 157 // Comparison 158 158 159 bool ?==?(const string & s, const string & other) { 160 return *s.inner == *other.inner; 161 } 162 163 bool ?!=?(const string & s, const string & other) { 164 return *s.inner != *other.inner; 165 } 166 167 bool ?==?(const string & s, const char * other) { 168 return *s.inner == other; 169 } 170 171 bool ?!=?(const string & s, const char * other) { 172 return *s.inner != other; 173 } 159 int cmp (const string &s1, const string &s2) { return cmp(*s1.inner , *s2.inner); } 160 bool ?==?(const string &s1, const string &s2) { return *s1.inner == *s2.inner ; } 161 bool ?!=?(const string &s1, const string &s2) { return *s1.inner != *s2.inner ; } 162 bool ?>? (const string &s1, const string &s2) { return *s1.inner > *s2.inner ; } 163 bool ?>=?(const string &s1, const string &s2) { return *s1.inner >= *s2.inner ; } 164 bool ?<=?(const string &s1, const string &s2) { return *s1.inner <= *s2.inner ; } 165 bool ?<? (const string &s1, const string &s2) { return *s1.inner < *s2.inner ; } 166 167 int cmp (const string &s1, const char* s2) { return cmp(*s1.inner , s2 ); } 168 bool ?==?(const string &s1, const char* s2) { return *s1.inner == s2 ; } 169 bool ?!=?(const string &s1, const char* s2) { return *s1.inner != s2 ; } 170 bool ?>? (const string &s1, const char* s2) { return *s1.inner > s2 ; } 171 bool ?>=?(const string &s1, const char* s2) { return *s1.inner >= s2 ; } 172 bool ?<=?(const string &s1, const char* s2) { return *s1.inner <= s2 ; } 173 bool ?<? (const string &s1, const char* s2) { return *s1.inner < s2 ; } 174 175 int cmp (const char* s1, const string &s2) { return cmp( s1 , *s2.inner); } 176 bool ?==?(const char* s1, const string &s2) { return s1 == *s2.inner ; } 177 bool ?!=?(const char* s1, const string &s2) { return s1 != *s2.inner ; } 178 bool ?>? (const char* s1, const string &s2) { return s1 > *s2.inner ; } 179 bool ?>=?(const char* s1, const string &s2) { return s1 >= *s2.inner ; } 180 bool ?<=?(const char* s1, const string &s2) { return s1 <= *s2.inner ; } 181 bool ?<? (const char* s1, const string &s2) { return s1 < *s2.inner ; } 182 174 183 175 184 //////////////////////////////////////////////////////// -
libcfa/src/collections/string.hfa
rc1e66d9 rdeda7e6 116 116 117 117 // Comparisons 118 bool ?==?(const string & s, const string & other); 119 bool ?!=?(const string & s, const string & other); 120 bool ?==?(const string & s, const char * other); 121 bool ?!=?(const string & s, const char * other); 118 int cmp (const string &, const string &); 119 bool ?==?(const string &, const string &); 120 bool ?!=?(const string &, const string &); 121 bool ?>? (const string &, const string &); 122 bool ?>=?(const string &, const string &); 123 bool ?<=?(const string &, const string &); 124 bool ?<? (const string &, const string &); 125 126 int cmp (const string &, const char*); 127 bool ?==?(const string &, const char*); 128 bool ?!=?(const string &, const char*); 129 bool ?>? (const string &, const char*); 130 bool ?>=?(const string &, const char*); 131 bool ?<=?(const string &, const char*); 132 bool ?<? (const string &, const char*); 133 134 int cmp (const char*, const string &); 135 bool ?==?(const char*, const string &); 136 bool ?!=?(const char*, const string &); 137 bool ?>? (const char*, const string &); 138 bool ?>=?(const char*, const string &); 139 bool ?<=?(const char*, const string &); 140 bool ?<? (const char*, const string &); 141 122 142 123 143 // Slicing -
libcfa/src/collections/string_res.cfa
rc1e66d9 rdeda7e6 637 637 // Comparisons 638 638 639 640 bool ?==?(const string_res &s1, const string_res &s2) { 641 return ByteCmp( s1.Handle.s, 0, s1.Handle.lnth, s2.Handle.s, 0, s2.Handle.lnth) == 0; 642 } 643 644 bool ?!=?(const string_res &s1, const string_res &s2) { 645 return !(s1 == s2); 646 } 647 bool ?==?(const string_res &s, const char* other) { 648 string_res sother = other; 649 return s == sother; 650 } 651 bool ?!=?(const string_res &s, const char* other) { 652 return !(s == other); 653 } 639 int cmp(const string_res &s1, const string_res &s2) { 640 // return 0; 641 int ans1 = memcmp(s1.Handle.s, s2.Handle.s, min(s1.Handle.lnth, s2.Handle.lnth)); 642 if (ans1 != 0) return ans1; 643 return s1.Handle.lnth - s2.Handle.lnth; 644 } 645 646 bool ?==?(const string_res &s1, const string_res &s2) { return cmp(s1, s2) == 0; } 647 bool ?!=?(const string_res &s1, const string_res &s2) { return cmp(s1, s2) != 0; } 648 bool ?>? (const string_res &s1, const string_res &s2) { return cmp(s1, s2) > 0; } 649 bool ?>=?(const string_res &s1, const string_res &s2) { return cmp(s1, s2) >= 0; } 650 bool ?<=?(const string_res &s1, const string_res &s2) { return cmp(s1, s2) <= 0; } 651 bool ?<? (const string_res &s1, const string_res &s2) { return cmp(s1, s2) < 0; } 652 653 int cmp (const string_res &s1, const char* s2) { 654 string_res s2x = s2; 655 return cmp(s1, s2x); 656 } 657 658 bool ?==?(const string_res &s1, const char* s2) { return cmp(s1, s2) == 0; } 659 bool ?!=?(const string_res &s1, const char* s2) { return cmp(s1, s2) != 0; } 660 bool ?>? (const string_res &s1, const char* s2) { return cmp(s1, s2) > 0; } 661 bool ?>=?(const string_res &s1, const char* s2) { return cmp(s1, s2) >= 0; } 662 bool ?<=?(const string_res &s1, const char* s2) { return cmp(s1, s2) <= 0; } 663 bool ?<? (const string_res &s1, const char* s2) { return cmp(s1, s2) < 0; } 664 665 int cmp (const char* s1, const string_res & s2) { 666 string_res s1x = s1; 667 return cmp(s1x, s2); 668 } 669 670 bool ?==?(const char* s1, const string_res &s2) { return cmp(s1, s2) == 0; } 671 bool ?!=?(const char* s1, const string_res &s2) { return cmp(s1, s2) != 0; } 672 bool ?>? (const char* s1, const string_res &s2) { return cmp(s1, s2) > 0; } 673 bool ?>=?(const char* s1, const string_res &s2) { return cmp(s1, s2) >= 0; } 674 bool ?<=?(const char* s1, const string_res &s2) { return cmp(s1, s2) <= 0; } 675 bool ?<? (const char* s1, const string_res &s2) { return cmp(s1, s2) < 0; } 676 654 677 655 678 -
libcfa/src/collections/string_res.hfa
rc1e66d9 rdeda7e6 142 142 143 143 // Comparisons 144 bool ?==?(const string_res &s, const string_res &other); 145 bool ?!=?(const string_res &s, const string_res &other); 146 bool ?==?(const string_res &s, const char* other); 147 bool ?!=?(const string_res &s, const char* other); 144 int cmp (const string_res &, const string_res &); 145 bool ?==?(const string_res &, const string_res &); 146 bool ?!=?(const string_res &, const string_res &); 147 bool ?>? (const string_res &, const string_res &); 148 bool ?>=?(const string_res &, const string_res &); 149 bool ?<=?(const string_res &, const string_res &); 150 bool ?<? (const string_res &, const string_res &); 151 152 int cmp (const string_res &, const char*); 153 bool ?==?(const string_res &, const char*); 154 bool ?!=?(const string_res &, const char*); 155 bool ?>? (const string_res &, const char*); 156 bool ?>=?(const string_res &, const char*); 157 bool ?<=?(const string_res &, const char*); 158 bool ?<? (const string_res &, const char*); 159 160 int cmp (const char*, const string_res &); 161 bool ?==?(const char*, const string_res &); 162 bool ?!=?(const char*, const string_res &); 163 bool ?>? (const char*, const string_res &); 164 bool ?>=?(const char*, const string_res &); 165 bool ?<=?(const char*, const string_res &); 166 bool ?<? (const char*, const string_res &); 148 167 149 168 // String search -
libcfa/src/common.hfa
rc1e66d9 rdeda7e6 69 69 T min( T v1, T v2 ) { return v1 < v2 ? v1 : v2; } 70 70 71 forall( T, Ts... | { T min( T, T ); T min( T, T s ); } )72 T min( T v1, T v2, T s vs ) { return min( min( v1, v2 ), vs ); }71 forall( T, Ts... | { T min( T, T ); T min( T, T, Ts ); } ) 72 T min( T v1, T v2, T v3, Ts vs ) { return min( min( v1, v2 ), v3, vs ); } 73 73 74 74 forall( T | { int ?>?( T, T ); } ) 75 75 T max( T v1, T v2 ) { return v1 > v2 ? v1 : v2; } 76 76 77 forall( T, Ts... | { T max( T, T ); T max( T, T s ); } )78 T max( T v1, T v2, T s vs ) { return max( max( v1, v2 ), vs ); }77 forall( T, Ts... | { T max( T, T ); T max( T, T, Ts ); } ) 78 T max( T v1, T v2, T v3, Ts vs ) { return max( max( v1, v2 ), v3, vs ); } 79 79 80 80 forall( T | { T min( T, T ); T max( T, T ); } ) -
libcfa/src/concurrency/coroutine.cfa
rc1e66d9 rdeda7e6 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 16 15:34:46202313 // Update Count : 2 412 // Last Modified On : Mon Sep 18 21:47:12 2023 13 // Update Count : 25 14 14 // 15 15 … … 364 364 // resume non local exception at receiver (i.e. enqueue in ehm buffer) 365 365 forall(exceptT *, T & | ehm_resume_at( exceptT, T )) 366 void resumeAt( T & receiver, exceptT & ex ) 366 void resumeAt( T & receiver, exceptT & ex ) libcfa_public { 367 367 coroutine$ * cor = get_coroutine( receiver ); 368 368 nonlocal_exception * nl_ex = alloc(); -
libcfa/src/concurrency/kernel/cluster.hfa
rc1e66d9 rdeda7e6 31 31 32 32 // warn normally all ints 33 #define warn_large_before warnf( !strict || old_avg < 33_000_000_000, "Suspiciously large previous average: %'llu (%llx), %'" PRId64 "ms \n", old_avg, old_avg, program ()`ms )34 #define warn_large_after warnf( !strict || ret < 33_000_000_000, "Suspiciously large new average after %'" PRId64 "ms cputime: %'llu (%llx) from %'llu-%'llu (%'llu, %'llu) and %'llu\n", program ()`ms, ret, ret, currtsc, intsc, new_val, new_val / 1000000, old_avg )33 #define warn_large_before warnf( !strict || old_avg < 33_000_000_000, "Suspiciously large previous average: %'llu (%llx), %'" PRId64 "ms \n", old_avg, old_avg, program_cpu()`ms ) 34 #define warn_large_after warnf( !strict || ret < 33_000_000_000, "Suspiciously large new average after %'" PRId64 "ms cputime: %'llu (%llx) from %'llu-%'llu (%'llu, %'llu) and %'llu\n", program_cpu()`ms, ret, ret, currtsc, intsc, new_val, new_val / 1000000, old_avg ) 35 35 36 36 // 8X linear factor is just 8 * x … … 42 42 static inline __readyQ_avg_t __to_readyQ_avg(unsigned long long intsc) { if(unlikely(0 == intsc)) return 0.0; else return log2((__readyQ_avg_t)intsc); } 43 43 44 #define warn_large_before warnf( !strict || old_avg < 35.0, "Suspiciously large previous average: %'lf, %'" PRId64 "ms \n", old_avg, program ()`ms )45 #define warn_large_after warnf( !strict || ret < 35.3, "Suspiciously large new average after %'" PRId64 "ms cputime: %'lf from %'llu-%'llu (%'llu, %'llu) and %'lf\n", program ()`ms, ret, currtsc, intsc, new_val, new_val / 1000000, old_avg ); \44 #define warn_large_before warnf( !strict || old_avg < 35.0, "Suspiciously large previous average: %'lf, %'" PRId64 "ms \n", old_avg, program_cpu()`ms ) 45 #define warn_large_after warnf( !strict || ret < 35.3, "Suspiciously large new average after %'" PRId64 "ms cputime: %'lf from %'llu-%'llu (%'llu, %'llu) and %'lf\n", program_cpu()`ms, ret, currtsc, intsc, new_val, new_val / 1000000, old_avg ); \ 46 46 verify(ret >= 0) 47 47 -
libcfa/src/heap.cfa
rc1e66d9 rdeda7e6 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Aug 2 18:48:30 202313 // Update Count : 161 412 // Last Modified On : Mon Sep 11 11:21:10 2023 13 // Update Count : 1615 14 14 // 15 15 … … 691 691 return stats; 692 692 } // collectStats 693 694 static inline void clearStats() { 695 lock( mgrLock ); 696 697 // Zero the heap master and all active thread heaps. 698 HeapStatisticsCtor( heapMaster.stats ); 699 for ( Heap * heap = heapMaster.heapManagersList; heap; heap = heap->nextHeapManager ) { 700 HeapStatisticsCtor( heap->stats ); 701 } // for 702 703 unlock( mgrLock ); 704 } // clearStats 693 705 #endif // __STATISTICS__ 694 706 … … 1556 1568 1557 1569 1570 // Zero the heap master and all active thread heaps. 1571 void malloc_stats_clear() { 1572 #ifdef __STATISTICS__ 1573 clearStats(); 1574 #else 1575 #define MALLOC_STATS_MSG "malloc_stats statistics disabled.\n" 1576 if ( write( STDERR_FILENO, MALLOC_STATS_MSG, sizeof( MALLOC_STATS_MSG ) - 1 /* size includes '\0' */ ) == -1 ) { 1577 abort( "**** Error **** write failed in malloc_stats" ); 1578 } // if 1579 #endif // __STATISTICS__ 1580 } // malloc_stats_clear 1581 1582 1558 1583 // Changes the file descriptor where malloc_stats() writes statistics. 1559 1584 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { -
libcfa/src/heap.hfa
rc1e66d9 rdeda7e6 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Oct 4 19:08:55 202213 // Update Count : 2 312 // Last Modified On : Mon Sep 11 11:18:18 2023 13 // Update Count : 24 14 14 // 15 15 … … 43 43 size_t malloc_mmap_start(); // crossover allocation size from sbrk to mmap 44 44 size_t malloc_unfreed(); // heap unfreed size (bytes) 45 void malloc_stats_clear(); // clear heap statistics 45 46 } // extern "C" 46 47 -
libcfa/src/iostream.cfa
rc1e66d9 rdeda7e6 1 1 2 // 2 3 // Cforall Version 1.0.0 Copyright (C) 2015 University of Waterloo … … 976 977 if ( f.flags.ignore ) { fmtstr[1] = '*'; start += 1; } 977 978 // no maximum width necessary because text ignored => width is read width 978 if ( f.wd != -1 ) { start += sprintf( &fmtstr[start], "%d", f.wd ); } 979 if ( f.wd != -1 ) { 980 // wd is buffer bytes available (for input chars + null terminator) 981 // rwd is count of input chars 982 int rwd = f.flags.rwd ? f.wd : (f.wd - 1); 983 start += sprintf( &fmtstr[start], "%d", rwd ); 984 } 979 985 980 986 if ( ! scanset ) { … … 993 999 } // if 994 1000 995 int check = f.wd - 1;1001 int check = f.wd - 2; 996 1002 if ( ! f.flags.rwd ) f.s[check] = '\0'; // insert sentinel 997 1003 len = fmt( is, fmtstr, f.s ); -
src/AST/Util.cpp
rc1e66d9 rdeda7e6 104 104 } 105 105 assertf( false, "Member not found." ); 106 } 107 108 template<typename node_t> 109 void oneOfExprOrType( const node_t * node ) { 110 if ( node->expr ) { 111 assertf( node->expr && !node->type, "Exactly one of expr or type should be set." ); 112 } else { 113 assertf( !node->expr && node->type, "Exactly one of expr or type should be set." ); 114 } 106 115 } 107 116 … … 152 161 } 153 162 163 void previsit( const SizeofExpr * node ) { 164 previsit( (const ParseNode *)node ); 165 oneOfExprOrType( node ); 166 } 167 168 void previsit( const AlignofExpr * node ) { 169 previsit( (const ParseNode *)node ); 170 oneOfExprOrType( node ); 171 } 172 154 173 void previsit( const StructInstType * node ) { 155 174 previsit( (const Node *)node ); … … 181 200 /// referring to is in scope by the structural rules of code. 182 201 // Any escapes marked with a bug should be removed once the bug is fixed. 202 // This is a separate pass because of it changes the visit pattern and 203 // must always be run on the entire translation unit. 183 204 struct InScopeCore : public ast::WithShortCircuiting { 184 205 ScopedSet<DeclWithType const *> typedDecls; -
src/ControlStruct/MultiLevelExit.cpp
rc1e66d9 rdeda7e6 10 10 // Created On : Mon Nov 1 13:48:00 2021 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Wed Sep 6 12:00:00 202313 // Update Count : 3 512 // Last Modified On : Fri Sep 8 17:04:00 2023 13 // Update Count : 36 14 14 // 15 15 … … 27 27 28 28 namespace { 29 30 /// The return context is used to remember if returns are allowed and if 31 /// not, why not. It is the nearest local control flow blocking construct. 32 enum ReturnContext { 33 MayReturn, 34 InTryWithHandler, 35 InResumeHandler, 36 InTerminateHandler, 37 InFinally, 38 }; 29 39 30 40 class Entry { … … 126 136 void previsit( const TryStmt * ); 127 137 void postvisit( const TryStmt * ); 138 void previsit( const CatchClause * ); 128 139 void previsit( const FinallyClause * ); 129 140 … … 134 145 vector<Entry> enclosing_control_structures; 135 146 Label break_label; 136 bool inFinally;147 ReturnContext ret_context; 137 148 138 149 template<typename LoopNode> … … 144 155 const list<ptr<Stmt>> & kids, bool caseClause ); 145 156 157 void enterSealedContext( ReturnContext ); 158 146 159 template<typename UnaryPredicate> 147 160 auto findEnclosingControlStructure( UnaryPredicate pred ) { … … 157 170 MultiLevelExitCore::MultiLevelExitCore( const LabelToStmt & lt ) : 158 171 target_table( lt ), break_label( CodeLocation(), "" ), 159 inFinally( false)172 ret_context( ReturnContext::MayReturn ) 160 173 {} 161 174 … … 488 501 489 502 void MultiLevelExitCore::previsit( const ReturnStmt * stmt ) { 490 if ( inFinally ) { 491 SemanticError( stmt->location, "'return' may not appear in a finally clause" ); 492 } 503 char const * context; 504 switch ( ret_context ) { 505 case ReturnContext::MayReturn: 506 return; 507 case ReturnContext::InTryWithHandler: 508 context = "try statement with a catch clause"; 509 break; 510 case ReturnContext::InResumeHandler: 511 context = "catchResume clause"; 512 break; 513 case ReturnContext::InTerminateHandler: 514 context = "catch clause"; 515 break; 516 case ReturnContext::InFinally: 517 context = "finally clause"; 518 break; 519 default: 520 assert(0); 521 } 522 SemanticError( stmt->location, toString( "'return' may not appear in a ", context ) ); 493 523 } 494 524 … … 500 530 GuardAction([this](){ enclosing_control_structures.pop_back(); } ); 501 531 } 532 533 // Try statements/try blocks are only sealed with a termination handler. 534 for ( auto clause : stmt->handlers ) { 535 if ( ast::Terminate == clause->kind ) { 536 return enterSealedContext( ReturnContext::InTryWithHandler ); 537 } 538 } 502 539 } 503 540 … … 512 549 } 513 550 551 void MultiLevelExitCore::previsit( const CatchClause * clause ) { 552 ReturnContext context = ( ast::Terminate == clause->kind ) 553 ? ReturnContext::InTerminateHandler : ReturnContext::InResumeHandler; 554 enterSealedContext( context ); 555 } 556 514 557 void MultiLevelExitCore::previsit( const FinallyClause * ) { 515 GuardAction([this, old = std::move( enclosing_control_structures)](){ enclosing_control_structures = std::move(old); }); 516 enclosing_control_structures = vector<Entry>(); 517 GuardValue( inFinally ) = true; 558 enterSealedContext( ReturnContext::InFinally ); 518 559 } 519 560 … … 617 658 } 618 659 660 void MultiLevelExitCore::enterSealedContext( ReturnContext enter_context ) { 661 GuardAction([this, old = std::move(enclosing_control_structures)](){ enclosing_control_structures = std::move(old); }); 662 enclosing_control_structures = vector<Entry>(); 663 GuardValue( ret_context ) = enter_context; 664 } 665 619 666 } // namespace 620 667 -
src/GenPoly/GenPoly.cc
rc1e66d9 rdeda7e6 48 48 } 49 49 50 bool hasPolyParams( const std::vector<ast::ptr<ast::Expr>> & params, const ast::TypeSubstitution * env) { 51 for (auto ¶m : params) { 52 auto paramType = param.strict_as<ast::TypeExpr>(); 53 if (isPolyType(paramType->type, env)) return true; 50 bool hasPolyParams( const std::vector<ast::ptr<ast::Expr>> & params, const ast::TypeSubstitution * env ) { 51 for ( auto ¶m : params ) { 52 auto paramType = param.as<ast::TypeExpr>(); 53 assertf( paramType, "Aggregate parameters should be type expressions" ); 54 if ( isPolyType( paramType->type, env ) ) return true; 54 55 } 55 56 return false; … … 62 63 assertf(paramType, "Aggregate parameters should be type expressions"); 63 64 if ( isPolyType( paramType->get_type(), tyVars, env ) ) return true; 65 } 66 return false; 67 } 68 69 bool hasPolyParams( const std::vector<ast::ptr<ast::Expr>> & params, const TypeVarMap & typeVars, const ast::TypeSubstitution * env ) { 70 for ( auto & param : params ) { 71 auto paramType = param.as<ast::TypeExpr>(); 72 assertf( paramType, "Aggregate parameters should be type expressions" ); 73 if ( isPolyType( paramType->type, typeVars, env ) ) return true; 64 74 } 65 75 return false; … … 185 195 } 186 196 187 const ast::Type * isPolyType(const ast::Type * type, const TyVarMap & tyVars, const ast::TypeSubstitution * env) {188 type = replaceTypeInst( type, env );189 190 if ( auto typeInst = dynamic_cast< const ast::TypeInstType * >( type ) ) {191 if ( tyVars.contains( typeInst->typeString() ) ) return type;192 } else if ( auto arrayType = dynamic_cast< const ast::ArrayType * >( type ) ) {193 return isPolyType( arrayType->base, env );194 } else if ( auto structType = dynamic_cast< const ast::StructInstType* >( type ) ) {195 if ( hasPolyParams( structType->params, env ) ) return type;196 } else if ( auto unionType = dynamic_cast< const ast::UnionInstType* >( type ) ) {197 if ( hasPolyParams( unionType->params, env ) ) return type;198 }199 return nullptr;200 }201 202 197 const ast::Type * isPolyType( const ast::Type * type, 203 198 const TypeVarMap & typeVars, const ast::TypeSubstitution * subst ) { … … 207 202 if ( typeVars.contains( *inst ) ) return type; 208 203 } else if ( auto array = dynamic_cast< const ast::ArrayType * >( type ) ) { 209 return isPolyType( array->base, subst );204 return isPolyType( array->base, typeVars, subst ); 210 205 } else if ( auto sue = dynamic_cast< const ast::StructInstType * >( type ) ) { 211 if ( hasPolyParams( sue->params, subst ) ) return type;206 if ( hasPolyParams( sue->params, typeVars, subst ) ) return type; 212 207 } else if ( auto sue = dynamic_cast< const ast::UnionInstType * >( type ) ) { 213 if ( hasPolyParams( sue->params, subst ) ) return type;208 if ( hasPolyParams( sue->params, typeVars, subst ) ) return type; 214 209 } 215 210 return nullptr; -
tests/.expect/minmax.txt
rc1e66d9 rdeda7e6 20 20 double 4. 3.1 max 4. 21 21 long double 4. 3.1 max 4. 22 23 3 arguments 24 2 3 4 min 2 max 4 25 4 2 3 min 2 max 4 26 3 4 2 min 2 max 4 27 4 arguments 28 3 2 5 4 min 2 max 5 29 5 3 4 2 min 2 max 5 -
tests/concurrency/actors/dynamic.cfa
rc1e66d9 rdeda7e6 9 9 struct derived_actor { inline actor; }; 10 10 struct derived_msg { 11 12 11 inline message; 12 int cnt; 13 13 }; 14 14 15 15 void ?{}( derived_msg & this, int cnt ) { 16 ((message &) this){ Delete };17 16 set_allocation( this, Delete ); 17 this.cnt = cnt; 18 18 } 19 19 void ?{}( derived_msg & this ) { ((derived_msg &)this){ 0 }; } 20 20 21 21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 23 24 25 26 27 28 29 30 31 22 if ( msg.cnt >= Times ) { 23 sout | "Done"; 24 return Delete; 25 } 26 derived_msg * d_msg = alloc(); 27 (*d_msg){ msg.cnt + 1 }; 28 derived_actor * d_actor = alloc(); 29 (*d_actor){}; 30 *d_actor | *d_msg; 31 return Delete; 32 32 } 33 33 34 34 int main( int argc, char * argv[] ) { 35 35 switch ( argc ) { 36 36 case 2: 37 37 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 38 Times = ato i( argv[1] );39 if ( Times < 1 ) goto Usage;38 Times = ato( argv[1] ); 39 if ( Times < 1 ) fallthru default; 40 40 } // if 41 41 case 1: // use defaults 42 42 break; 43 43 default: 44 Usage: 45 sout | "Usage: " | argv[0] | " [ times (> 0) ]"; 46 exit( EXIT_FAILURE ); 44 exit | "Usage: " | argv[0] | " [ times (> 0) ]"; 47 45 } // switch 48 46 49 printf("starting\n");47 sout | "starting"; 50 48 51 52 49 executor e{ 0, 1, 1, false }; 50 start_actor_system( e ); 53 51 54 printf("started\n");52 sout | "started"; 55 53 56 57 58 59 60 54 derived_msg * d_msg = alloc(); 55 (*d_msg){}; 56 derived_actor * d_actor = alloc(); 57 (*d_actor){}; 58 *d_actor | *d_msg; 61 59 62 printf("stopping\n");60 sout | "stopping"; 63 61 64 62 stop_actor_system(); 65 63 66 printf("stopped\n"); 67 68 return 0; 64 sout | "stopped"; 69 65 } -
tests/concurrency/actors/executor.cfa
rc1e66d9 rdeda7e6 10 10 static int ids = 0; 11 11 struct d_actor { 12 13 14 12 inline actor; 13 d_actor * gstart; 14 int id, rounds, recs, sends; 15 15 }; 16 16 void ?{}( d_actor & this ) with(this) { 17 18 19 20 21 17 id = ids++; 18 gstart = (&this + (id / Set * Set - id)); // remember group-start array-element 19 rounds = Set * Rounds; // send at least one message to each group member 20 recs = 0; 21 sends = 0; 22 22 } 23 23 … … 25 25 26 26 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 27 28 29 30 31 32 33 34 35 27 if ( recs == rounds ) return Finished; 28 if ( recs % Batch == 0 ) { 29 for ( i; Batch ) { 30 gstart[sends % Set] | shared_msg; 31 sends += 1; 32 } 33 } 34 recs += 1; 35 return Nodelete; 36 36 } 37 37 38 38 int main( int argc, char * argv[] ) { 39 39 switch ( argc ) { 40 40 case 7: 41 41 if ( strcmp( argv[6], "d" ) != 0 ) { // default ? 42 BufSize = ato i( argv[6] );43 if ( BufSize < 0 ) goto Usage;42 BufSize = ato( argv[6] ); 43 if ( BufSize < 0 ) fallthru default; 44 44 } // if 45 45 case 6: 46 46 if ( strcmp( argv[5], "d" ) != 0 ) { // default ? 47 Batch = ato i( argv[5] );48 if ( Batch < 1 ) goto Usage;47 Batch = ato( argv[5] ); 48 if ( Batch < 1 ) fallthru default; 49 49 } // if 50 50 case 5: 51 51 if ( strcmp( argv[4], "d" ) != 0 ) { // default ? 52 Processors = ato i( argv[4] );53 if ( Processors < 1 ) goto Usage;52 Processors = ato( argv[4] ); 53 if ( Processors < 1 ) fallthru default; 54 54 } // if 55 55 case 4: 56 56 if ( strcmp( argv[3], "d" ) != 0 ) { // default ? 57 Rounds = ato i( argv[3] );58 if ( Rounds < 1 ) goto Usage;57 Rounds = ato( argv[3] ); 58 if ( Rounds < 1 ) fallthru default; 59 59 } // if 60 60 case 3: 61 61 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 62 Set = ato i( argv[2] );63 if ( Set < 1 ) goto Usage;62 Set = ato( argv[2] ); 63 if ( Set < 1 ) fallthru default; 64 64 } // if 65 65 case 2: 66 66 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 67 Actors = ato i( argv[1] );68 if ( Actors < 1 || Actors <= Set || Actors % Set != 0 ) goto Usage;67 Actors = ato( argv[1] ); 68 if ( Actors < 1 || Actors <= Set || Actors % Set != 0 ) fallthru default; 69 69 } // if 70 70 case 1: // use defaults 71 71 break; 72 72 default: 73 Usage: 74 sout | "Usage: " | argv[0] 75 | " [ actors (> 0 && > set && actors % set == 0 ) | 'd' (default " | Actors 73 exit | "Usage: " | argv[0] 74 | " [ actors (> 0 && > set && actors % set == 0 ) | 'd' (default " | Actors 76 75 | ") ] [ set (> 0) | 'd' (default " | Set 77 76 | ") ] [ rounds (> 0) | 'd' (default " | Rounds … … 80 79 | ") ] [ buffer size (>= 0) | 'd' (default " | BufSize 81 80 | ") ]" ; 82 exit( EXIT_FAILURE );83 81 } // switch 84 82 85 83 executor e{ Processors, Processors, Processors == 1 ? 1 : Processors * 512, true }; 86 84 87 printf("starting\n");85 sout | "starting"; 88 86 89 87 start_actor_system( e ); 90 88 91 printf("started\n");89 sout | "started"; 92 90 93 91 d_actor actors[ Actors ]; 94 92 95 93 for ( i; Actors ) { … … 97 95 } // for 98 96 99 printf("stopping\n");97 sout | "stopping"; 100 98 101 99 stop_actor_system(); 102 100 103 printf("stopped\n"); 104 105 return 0; 101 sout | "stopped"; 106 102 } -
tests/concurrency/actors/inherit.cfa
rc1e66d9 rdeda7e6 18 18 19 19 allocation handle() { 20 20 return Finished; 21 21 } 22 22 … … 27 27 28 28 int main() { 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 29 sout | "Start"; 30 { 31 start_actor_system(); 32 D_msg * dm = alloc(); 33 (*dm){}; 34 D_msg2 * dm2 = alloc(); 35 (*dm2){}; 36 Server2 * s = alloc(); 37 (*s){}; 38 Server2 * s2 = alloc(); 39 (*s2){}; 40 *s | *dm; 41 *s2 | *dm2; 42 stop_actor_system(); 43 } 44 { 45 start_actor_system(); 46 Server s[2]; 47 D_msg * dm = alloc(); 48 (*dm){}; 49 D_msg2 * dm2 = alloc(); 50 (*dm2){}; 51 s[0] | *dm; 52 s[1] | *dm2; 53 stop_actor_system(); 54 } 55 sout | "Finished"; 56 56 } -
tests/concurrency/actors/inline.cfa
rc1e66d9 rdeda7e6 3 3 4 4 struct d_actor { 5 5 inline actor; 6 6 }; 7 7 struct msg_wrapper { 8 9 8 int b; 9 inline message; 10 10 }; 11 11 void ^?{}( msg_wrapper & this ) { sout | "msg_wrapper dtor"; } 12 12 13 13 struct d_msg { 14 15 14 int m; 15 inline msg_wrapper; 16 16 }; 17 17 void ?{}( d_msg & this, int m, int b ) { this.m = m; this.b = b; set_allocation( this, Delete ); } … … 19 19 20 20 allocation receive( d_actor &, d_msg & msg ) { 21 22 23 21 sout | msg.m; 22 sout | msg.b; 23 return Finished; 24 24 } 25 25 26 26 struct d_msg2 { 27 28 27 int m; 28 inline msg_wrapper; 29 29 }; 30 30 void ^?{}( d_msg2 & this ) { sout | "d_msg2 dtor";} 31 31 32 32 allocation receive( d_actor &, d_msg2 & msg ) { 33 34 33 sout | msg.m; 34 return Finished; 35 35 } 36 36 37 37 int main() { 38 39 40 start_actor_system();// sets up executor41 42 43 44 45 stop_actor_system();// waits until actors finish46 47 48 start_actor_system();// sets up executor49 50 51 52 53 54 55 56 stop_actor_system();// waits until actors finish57 38 processor p; 39 { 40 start_actor_system(); // sets up executor 41 d_actor da; 42 d_msg * dm = alloc(); 43 (*dm){ 42, 2423 }; 44 da | *dm; 45 stop_actor_system(); // waits until actors finish 46 } 47 { 48 start_actor_system(); // sets up executor 49 d_actor da; 50 d_msg2 dm{ 29079 }; 51 set_allocation( dm, Nodelete ); 52 msg_wrapper * mw = &dm; 53 message * mg = &dm; 54 virtual_dtor * v = &dm; 55 da | dm; 56 stop_actor_system(); // waits until actors finish 57 } 58 58 } -
tests/concurrency/actors/pingpong.cfa
rc1e66d9 rdeda7e6 10 10 11 11 struct p_msg { 12 13 12 inline message; 13 size_t count; 14 14 }; 15 static inline void ?{}( p_msg & this ) { ((message &)this){}; this.count = 0; } 15 //static inline void ?{}( p_msg & this ) { ((message &)this){}; this.count = 0; } 16 static inline void ?{}( p_msg & this ) { this.count = 0; } 16 17 17 18 ping * pi; … … 20 21 21 22 allocation receive( ping & receiver, p_msg & msg ) { 22 23 23 msg.count++; 24 if ( msg.count > times ) return Finished; 24 25 25 26 27 28 26 allocation retval = Nodelete; 27 if ( msg.count == times ) retval = Finished; 28 *po | msg; 29 return retval; 29 30 } 30 31 31 32 allocation receive( pong & receiver, p_msg & msg ) { 32 33 34 35 36 37 38 33 msg.count++; 34 if ( msg.count > times ) return Finished; 35 36 allocation retval = Nodelete; 37 if ( msg.count == times ) retval = Finished; 38 *pi | msg; 39 return retval; 39 40 } 40 41 … … 42 43 43 44 int main( int argc, char * argv[] ) { 44 printf("start\n");45 sout | "start"; 45 46 46 47 processor p[Processors - 1]; 47 48 48 start_actor_system( Processors ); // test passing number of processors 49 start_actor_system( Processors ); // test passing number of processors 50 ping pi_actor; 51 pong po_actor; 52 po = &po_actor; 53 pi = &pi_actor; 54 p_msg m; 55 pi_actor | m; 56 stop_actor_system(); 49 57 50 ping pi_actor; 51 pong po_actor; 52 po = &po_actor; 53 pi = &pi_actor; 54 p_msg m; 55 pi_actor | m; 56 stop_actor_system(); 57 58 printf("end\n"); 59 return 0; 58 sout | "end"; 60 59 } -
tests/concurrency/actors/poison.cfa
rc1e66d9 rdeda7e6 11 11 12 12 int main() { 13 13 sout | "Start"; 14 14 15 16 17 18 19 20 21 22 23 15 sout | "Finished"; 16 { 17 start_actor_system(); 18 Server s[10]; 19 for ( i; 10 ) { 20 s[i] | finished_msg; 21 } 22 stop_actor_system(); 23 } 24 24 25 26 27 28 29 30 31 32 33 34 25 sout | "Delete"; 26 { 27 start_actor_system(); 28 for ( i; 10 ) { 29 Server * s = alloc(); 30 (*s){}; 31 (*s) | delete_msg; 32 } 33 stop_actor_system(); 34 } 35 35 36 37 38 39 40 41 42 43 44 45 46 36 sout | "Destroy"; 37 { 38 start_actor_system(); 39 Server s[10]; 40 for ( i; 10 ) 41 s[i] | destroy_msg; 42 stop_actor_system(); 43 for ( i; 10 ) 44 if (s[i].val != 777) 45 sout | "Error: dtor not called correctly."; 46 } 47 47 48 sout | "Done"; 49 return 0; 48 sout | "Done"; 50 49 } -
tests/concurrency/actors/static.cfa
rc1e66d9 rdeda7e6 9 9 struct derived_actor { inline actor; }; 10 10 struct derived_msg { 11 12 11 inline message; 12 int cnt; 13 13 }; 14 14 15 15 void ?{}( derived_msg & this, int cnt ) { 16 ((message &) this){ Nodelete };17 16 set_allocation( this, Nodelete ); 17 this.cnt = cnt; 18 18 } 19 19 void ?{}( derived_msg & this ) { ((derived_msg &)this){ 0 }; } 20 20 21 21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 23 24 25 26 27 28 22 if ( msg.cnt >= Times ) { 23 sout | "Done"; 24 return Finished; 25 } 26 msg.cnt++; 27 receiver | msg; 28 return Nodelete; 29 29 } 30 30 31 31 int main( int argc, char * argv[] ) { 32 32 switch ( argc ) { 33 33 case 2: 34 34 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 35 Times = ato i( argv[1] );36 if ( Times < 1 ) goto Usage;35 Times = ato( argv[1] ); 36 if ( Times < 1 ) fallthru default; 37 37 } // if 38 38 case 1: // use defaults 39 39 break; 40 40 default: 41 Usage: 42 sout | "Usage: " | argv[0] | " [ times (> 0) ]"; 43 exit( EXIT_FAILURE ); 41 exit | "Usage: " | argv[0] | " [ times (> 0) ]"; 44 42 } // switch 45 43 46 printf("starting\n");44 sout | "starting"; 47 45 48 49 46 executor e{ 0, 1, 1, false }; 47 start_actor_system( e ); 50 48 51 printf("started\n");49 sout | "started"; 52 50 53 51 derived_msg msg; 54 52 55 53 derived_actor actor; 56 54 57 55 actor | msg; 58 56 59 printf("stopping\n");57 sout | "stopping"; 60 58 61 59 stop_actor_system(); 62 60 63 printf("stopped\n"); 64 65 return 0; 61 sout | "stopped"; 66 62 } -
tests/concurrency/actors/types.cfa
rc1e66d9 rdeda7e6 9 9 10 10 struct derived_actor { 11 12 11 inline actor; 12 int counter; 13 13 }; 14 14 static inline void ?{}( derived_actor & this ) { ((actor &)this){}; this.counter = 0; } 15 15 16 16 struct d_msg { 17 18 17 inline message; 18 int num; 19 19 }; 20 20 21 21 // this isn't a valid receive routine since int is not a message type 22 22 allocation receive( derived_actor & receiver, int i ) with( receiver ) { 23 24 25 26 23 mutex(sout) sout | i; 24 counter++; 25 if ( counter == 2 ) return Finished; 26 return Nodelete; 27 27 } 28 28 29 29 allocation receive( derived_actor & receiver, d_msg & msg ) { 30 30 return receive( receiver, msg.num ); 31 31 } 32 32 33 33 struct derived_actor2 { 34 35 34 struct nested { int i; }; // testing nested before inline 35 inline actor; 36 36 }; 37 37 38 38 allocation receive( derived_actor2 & receiver, d_msg & msg ) { 39 40 39 mutex(sout) sout | msg.num; 40 return Finished; 41 41 } 42 42 … … 44 44 struct derived_actor4 { inline derived_actor3; }; 45 45 struct d_msg2 { 46 47 46 inline message; 47 int num; 48 48 }; 49 49 50 50 allocation receive( derived_actor3 & receiver, d_msg & msg ) { 51 52 53 51 mutex(sout) sout | msg.num; 52 if ( msg.num == -1 ) return Nodelete; 53 return Finished; 54 54 } 55 55 56 56 allocation receive( derived_actor3 & receiver, d_msg2 & msg ) { 57 58 57 mutex(sout) sout | msg.num; 58 return Finished; 59 59 } 60 60 … … 62 62 63 63 int main( int argc, char * argv[] ) { 64 printf("start\n");64 sout | "start"; 65 65 66 66 processor p[Processors - 1]; 67 67 68 printf("basic test\n");69 70 71 72 73 74 75 68 sout | "basic test"; 69 start_actor_system( Processors ); // test passing number of processors 70 derived_actor a; 71 d_msg b, c; 72 b.num = 1; 73 c.num = 2; 74 a | b | c; 75 stop_actor_system(); 76 76 77 printf("same message and different actors test\n");78 79 80 81 82 83 84 77 sout | "same message and different actors test"; 78 start_actor_system(); // let system detect # of processors 79 derived_actor2 d_ac2_0, d_ac2_1; 80 d_msg d_ac2_msg; 81 d_ac2_msg.num = 3; 82 d_ac2_0 | d_ac2_msg; 83 d_ac2_1 | d_ac2_msg; 84 stop_actor_system(); 85 85 86 87 88 printf("same message and different actor types test\n");89 90 91 92 93 94 95 96 97 98 86 87 { 88 sout | "same message and different actor types test"; 89 executor e{ 0, Processors, Processors == 1 ? 1 : Processors * 4, false }; 90 start_actor_system( e ); // pass an explicit executor 91 derived_actor2 d_ac2_2; 92 derived_actor3 d_ac3_0; 93 d_msg d_ac23_msg; 94 d_ac23_msg.num = 4; 95 d_ac3_0 | d_ac23_msg; 96 d_ac2_2 | d_ac23_msg; 97 stop_actor_system(); 98 } // RAII to clean up executor 99 99 100 101 printf("different message types, one actor test\n");102 103 104 105 106 107 108 109 110 111 100 { 101 sout | "different message types, one actor test"; 102 executor e{ 1, Processors, Processors == 1 ? 1 : Processors * 4, true }; 103 start_actor_system( Processors ); 104 derived_actor3 a3; 105 d_msg b1; 106 d_msg2 c2; 107 b1.num = -1; 108 c2.num = 5; 109 a3 | b1 | c2; 110 stop_actor_system(); 111 } // RAII to clean up executor 112 112 113 114 printf("nested inheritance actor test\n");115 116 117 118 119 120 121 122 123 124 113 { 114 sout | "nested inheritance actor test"; 115 executor e{ 1, Processors, Processors == 1 ? 1 : Processors * 4, true }; 116 start_actor_system( Processors ); 117 derived_actor4 a4; 118 d_msg b1; 119 d_msg2 c2; 120 b1.num = -1; 121 c2.num = 5; 122 a4 | b1 | c2; 123 stop_actor_system(); 124 } // RAII to clean up executor 125 125 126 printf("end\n"); 127 return 0; 126 sout | "end"; 128 127 } -
tests/concurrency/channels/barrier.cfa
rc1e66d9 rdeda7e6 8 8 9 9 size_t total_operations = 0; 10 int Processors = 1, Tasks = 5, BarrierSize = 2; 10 ssize_t Processors = 1, Tasks = 5, BarrierSize = 2; // must be signed 11 11 12 12 typedef channel( int ) Channel; … … 65 65 case 3: 66 66 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 67 BarrierSize = ato i( argv[2] );68 if ( Processors < 1 ) goto Usage;67 BarrierSize = ato( argv[2] ); 68 if ( Processors < 1 ) fallthru default; 69 69 } // if 70 70 case 2: 71 71 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 72 Processors = ato i( argv[1] );73 if ( Processors < 1 ) goto Usage;72 Processors = ato( argv[1] ); 73 if ( Processors < 1 ) fallthru default; 74 74 } // if 75 75 case 1: // use defaults 76 76 break; 77 77 default: 78 Usage: 79 sout | "Usage: " | argv[0] 78 exit | "Usage: " | argv[0] 80 79 | " [ processors (> 0) | 'd' (default " | Processors 81 80 | ") ] [ BarrierSize (> 0) | 'd' (default " | BarrierSize 82 81 | ") ]" ; 83 exit( EXIT_FAILURE );84 82 } // switch 85 83 if ( Tasks < BarrierSize ) -
tests/concurrency/channels/big_elems.cfa
rc1e66d9 rdeda7e6 2 2 #include "parallel_harness.hfa" 3 3 4 s ize_t Processors = 10, Channels = 10, Producers = 40, Consumers = 40, ChannelSize = 128;4 ssize_t Processors = 10, Channels = 10, Producers = 40, Consumers = 40, ChannelSize = 128; 5 5 6 6 int main() { -
tests/concurrency/channels/churn.cfa
rc1e66d9 rdeda7e6 7 7 #include <time.hfa> 8 8 9 s ize_t Processors = 1, Channels = 4, Producers = 2, Consumers = 2, ChannelSize = 128;9 ssize_t Processors = 1, Channels = 4, Producers = 2, Consumers = 2, ChannelSize = 128; 10 10 11 11 owner_lock o; … … 90 90 case 4: 91 91 if ( strcmp( argv[3], "d" ) != 0 ) { // default ? 92 if ( atoi( argv[3] ) < 1 ) goto Usage;93 ChannelSize = atoi( argv[3] );92 ChannelSize = ato( argv[3] ); 93 if ( ChannelSize < 1 ) fallthru default; 94 94 } // if 95 95 case 3: 96 96 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 97 if ( atoi( argv[2] ) < 1 ) goto Usage;98 Channels = atoi( argv[2] );97 Channels = ato( argv[2] ); 98 if ( Channels < 1 ) fallthru default; 99 99 } // if 100 100 case 2: 101 101 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 102 if ( atoi( argv[1] ) < 1 ) goto Usage;103 Processors = atoi( argv[1] );102 Processors = ato( argv[1] ); 103 if ( Processors < 1 ) fallthru default; 104 104 } // if 105 105 case 1: // use defaults 106 106 break; 107 107 default: 108 Usage: 109 sout | "Usage: " | argv[0] 108 exit | "Usage: " | argv[0] 110 109 | " [ processors > 0 | d ]" 111 110 | " [ producers > 0 | d ]" 112 111 | " [ consumers > 0 | d ]" 113 112 | " [ channels > 0 | d ]"; 114 exit( EXIT_FAILURE );115 113 } 116 114 processor p[Processors - 1]; -
tests/concurrency/channels/contend.cfa
rc1e66d9 rdeda7e6 127 127 case 3: 128 128 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 129 ChannelSize = atoi( argv[2] ); 129 ChannelSize = ato( argv[2] ); 130 if ( ChannelSize < 1 ) fallthru default; 130 131 } // if 131 132 case 2: 132 133 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 133 Processors = ato i( argv[1] );134 if ( Processors < 1 ) goto Usage;134 Processors = ato( argv[1] ); 135 if ( Processors < 1 ) fallthru default; 135 136 } // if 136 137 case 1: // use defaults 137 138 break; 138 139 default: 139 Usage: 140 sout | "Usage: " | argv[0] 140 exit | "Usage: " | argv[0] 141 141 | " [ processors (> 0) | 'd' (default " | Processors 142 142 | ") ] [ channel size (>= 0) | 'd' (default " | ChannelSize 143 143 | ") ]" ; 144 exit( EXIT_FAILURE );145 144 } // switch 145 146 146 test(Processors, Channels, Producers, Consumers, ChannelSize); 147 147 } -
tests/concurrency/channels/daisy_chain.cfa
rc1e66d9 rdeda7e6 8 8 9 9 size_t total_operations = 0; 10 s ize_t Processors = 1, Tasks = 4;10 ssize_t Processors = 1, Tasks = 4; // must be signed 11 11 12 12 owner_lock o; … … 37 37 case 3: 38 38 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 39 Tasks = ato i( argv[2] );40 if ( Tasks < 1 ) goto Usage;39 Tasks = ato( argv[2] ); 40 if ( Tasks < 1 ) fallthru default; 41 41 } // if 42 42 case 2: 43 43 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 44 Processors = ato i( argv[1] );45 if ( Processors < 1 ) goto Usage;44 Processors = ato( argv[1] ); 45 if ( Processors < 1 ) fallthru default; 46 46 } // if 47 47 case 1: // use defaults 48 48 break; 49 49 default: 50 Usage: 51 sout | "Usage: " | argv[0] 50 exit | "Usage: " | argv[0] 52 51 | " [ processors (> 0) | 'd' (default " | Processors 53 52 | ") ] [ channel size (>= 0) | 'd' (default " | Tasks 54 53 | ") ]" ; 55 exit( EXIT_FAILURE );56 54 } // switch 57 55 processor proc[Processors - 1]; … … 71 69 // sout | total_operations; 72 70 sout | "done"; 73 74 return 0;75 71 } -
tests/concurrency/channels/hot_potato.cfa
rc1e66d9 rdeda7e6 8 8 9 9 size_t total_operations = 0; 10 s ize_t Processors = 1, Tasks = 4;10 ssize_t Processors = 1, Tasks = 4; // must be signed 11 11 12 12 owner_lock o; … … 38 38 } 39 39 40 41 40 int main( int argc, char * argv[] ) { 42 41 switch ( argc ) { 43 42 case 3: 44 43 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 45 Tasks = ato i( argv[2] );46 if ( Tasks < 1 ) goto Usage;44 Tasks = ato( argv[2] ); 45 if ( Tasks < 1 ) fallthru default; 47 46 } // if 48 47 case 2: 49 48 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 50 Processors = ato i( argv[1] );51 if ( Processors < 1 ) goto Usage;49 Processors = ato( argv[1] ); 50 if ( Processors < 1 ) fallthru default; 52 51 } // if 53 52 case 1: // use defaults 54 53 break; 55 54 default: 56 Usage: 57 sout | "Usage: " | argv[0] 55 exit | "Usage: " | argv[0] 58 56 | " [ processors (> 0) | 'd' (default " | Processors 59 57 | ") ] [ channel size (>= 0) | 'd' (default " | Tasks 60 58 | ") ]" ; 61 exit( EXIT_FAILURE );62 59 } // switch 60 63 61 processor proc[Processors - 1]; 64 62 -
tests/concurrency/channels/pub_sub.cfa
rc1e66d9 rdeda7e6 87 87 case 3: 88 88 if ( strcmp( argv[2], "d" ) != 0 ) { // default ? 89 Tasks = ato i( argv[2] );90 if ( Tasks < 1 ) goto Usage;89 Tasks = ato( argv[2] ); 90 if ( Tasks < 1 ) fallthru default; 91 91 } // if 92 92 case 2: 93 93 if ( strcmp( argv[1], "d" ) != 0 ) { // default ? 94 Processors = ato i( argv[1] );95 if ( Processors < 1 ) goto Usage;94 Processors = ato( argv[1] ); 95 if ( Processors < 1 ) fallthru default; 96 96 } // if 97 97 case 1: // use defaults 98 98 break; 99 99 default: 100 Usage: 101 sout | "Usage: " | argv[0] 100 exit | "Usage: " | argv[0] 102 101 | " [ processors (> 0) | 'd' (default " | Processors 103 102 | ") ] [ Tasks (> 0) | 'd' (default " | Tasks 104 103 | ") ]" ; 105 exit( EXIT_FAILURE );106 104 } // switch 107 105 BarrierSize = Tasks; -
tests/concurrency/examples/matrixSum.cfa
rc1e66d9 rdeda7e6 10 10 // Created On : Mon Oct 9 08:29:28 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Feb 20 08:37:53 201913 // Update Count : 1 612 // Last Modified On : Fri Sep 8 19:05:34 2023 13 // Update Count : 19 14 14 // 15 15 16 16 #include <fstream.hfa> 17 #include <kernel.hfa>18 17 #include <thread.hfa> 19 18 … … 35 34 36 35 int main() { 37 /* const */int rows = 10, cols = 1000;36 const int rows = 10, cols = 1000; 38 37 int matrix[rows][cols], subtotals[rows], total = 0; 39 38 processor p; // add kernel thread 40 39 41 for ( r; rows ) { 40 for ( r; rows ) { // initialize 42 41 for ( c; cols ) { 43 42 matrix[r][c] = 1; 44 43 } // for 45 44 } // for 45 46 46 Adder * adders[rows]; 47 47 for ( r; rows ) { // start threads to sum rows 48 48 adders[r] = &(*malloc()){ matrix[r], cols, subtotals[r] }; 49 // adders[r] = new( matrix[r], cols, &subtotals[r] );49 // adders[r] = new( matrix[r], cols, subtotals[r] ); 50 50 } // for 51 51 52 for ( r; rows ) { // wait for threads to finish 52 53 delete( adders[r] ); … … 57 58 58 59 // Local Variables: // 59 // tab-width: 4 //60 60 // compile-command: "cfa matrixSum.cfa" // 61 61 // End: // -
tests/concurrency/unified_locking/locks.cfa
rc1e66d9 rdeda7e6 1 1 #include <stdio.h> 2 #include "locks.hfa"2 #include <locks.hfa> 3 3 #include <stdlib.hfa> 4 4 #include <thread.hfa> -
tests/concurrency/unified_locking/pthread_locks.cfa
rc1e66d9 rdeda7e6 1 1 #include <stdio.h> 2 #include "locks.hfa"2 #include <locks.hfa> 3 3 #include <stdlib.hfa> 4 4 #include <thread.hfa> -
tests/concurrency/unified_locking/test_debug.cfa
rc1e66d9 rdeda7e6 1 #include "locks.hfa"1 #include <locks.hfa> 2 2 3 3 fast_block_lock f; -
tests/concurrency/unified_locking/thread_test.cfa
rc1e66d9 rdeda7e6 1 1 #include <stdio.h> 2 #include "locks.hfa"2 #include <locks.hfa> 3 3 #include <stdlib.hfa> 4 4 #include <thread.hfa> -
tests/concurrency/waituntil/locks.cfa
rc1e66d9 rdeda7e6 73 73 printf("done\n"); 74 74 } 75 -
tests/exceptions/try-ctrl-flow.cfa
rc1e66d9 rdeda7e6 1 // All of these should be caught as long as the check remains in the same2 // pass. (Although not even all of the checks are in place yet.)1 // Check all the local control flow structures that are "sealed" by some the 2 // try statement clauses; where structured programming is stricter. 3 3 4 4 void break_in_finally() { … … 151 151 } 152 152 153 // Now just use return to test the other try control flow interactions. 154 155 exception nil_exception {}; 156 157 void return_in_try_with_catch() { 158 try { 159 return; 160 } catch (nil_exception *) { 161 ; 162 } 163 } 164 165 // Allowed. 166 void return_in_try_with_catchReturn() { 167 try { 168 return; 169 } catchResume (nil_exception *) { 170 ; 171 } 172 } 173 174 // Allowed. 175 void return_in_try_with_finally() { 176 try { 177 return; 178 } finally { 179 ; 180 } 181 } 182 183 void return_in_catch() { 184 try { 185 ; 186 } catch (nil_exception *) { 187 return; 188 } 189 } 190 191 void return_in_catchResume() { 192 try { 193 ; 194 } catchResume (nil_exception *) { 195 return; 196 } 197 } 198 153 199 void main() { 154 200 // Should not compile. -
tests/io/.expect/manipulatorsInput.arm64.txt
rc1e66d9 rdeda7e6 1 pre1 "123456", canary ok 2 pre2a "1234567", exception occurred, canary ok 3 pre2b "89", canary ok 1 4 1 yyyyyyyyyyyyyyyyyyyy 2 5 2 abcxxx -
tests/io/.expect/manipulatorsInput.x64.txt
rc1e66d9 rdeda7e6 1 pre1 "123456", canary ok 2 pre2a "1234567", exception occurred, canary ok 3 pre2b "89", canary ok 1 4 1 yyyyyyyyyyyyyyyyyyyy 2 5 2 abcxxx -
tests/io/.expect/manipulatorsInput.x86.txt
rc1e66d9 rdeda7e6 1 pre1 "123456", canary ok 2 pre2a "1234567", exception occurred, canary ok 3 pre2b "89", canary ok 1 4 1 yyyyyyyyyyyyyyyyyyyy 2 5 2 abcxxx -
tests/io/.in/manipulatorsInput.txt
rc1e66d9 rdeda7e6 1 123456 2 123456789 1 3 abc 2 4 abc -
tests/io/manipulatorsInput.cfa
rc1e66d9 rdeda7e6 15 15 16 16 int main() { 17 { 18 // Upfront checks to ensure buffer safety. Once these pass, the simpler `wdi(sizeof(s),s)` 19 // usage, as in the scanf alignment cases below, is justified. 20 struct { 21 char buf[8]; 22 char canary; 23 } data; 24 static_assert( sizeof(data.buf) == 8 ); 25 static_assert( &data.buf[8] == &data.canary ); // canary comes right after buf 26 27 void rep(const char* casename) { 28 data.canary = 42; 29 bool caught = false; 30 try { 31 sin | wdi( sizeof(data.buf), data.buf ); 32 } catch (cstring_length*) { 33 caught = true; 34 } 35 printf( "%s \"%s\"", casename, data.buf ); 36 if ( caught ) { 37 printf(", exception occurred"); 38 } 39 if ( data.canary == 42 ) { 40 printf(", canary ok"); 41 } else { 42 printf(", canary overwritten to %d", data.canary); 43 } 44 printf("\n"); 45 } 46 47 rep("pre1"); 48 rep("pre2a"); 49 rep("pre2b"); 50 scanf("\n"); // next test does not start with %s so does not tolerate leading whitespace 51 } 17 52 { 18 53 char s[] = "yyyyyyyyyyyyyyyyyyyy"; -
tests/minmax.cfa
rc1e66d9 rdeda7e6 45 45 sout | "double\t\t\t" | 4.0 | 3.1 | "\tmax" | max( 4.0, 3.1 ); 46 46 sout | "long double\t\t" | 4.0l | 3.1l | "\tmax" | max( 4.0l, 3.1l ); 47 48 sout | nl; 49 50 sout | "3 arguments"; 51 sout | 2 | 3 | 4 | "\tmin" | min(2, 3, 4) | "\tmax" | max(2, 3, 4); 52 sout | 4 | 2 | 3 | "\tmin" | min(4, 2, 3) | "\tmax" | max(4, 2, 3); 53 sout | 3 | 4 | 2 | "\tmin" | min(3, 4, 2) | "\tmax" | max(3, 4, 2); 54 55 sout | "4 arguments"; 56 sout | 3 | 2 | 5 | 4 | "\tmin" | min(3, 2, 5, 4) | "\tmax" | max(3, 2, 5, 4); 57 sout | 5 | 3 | 4 | 2 | "\tmin" | min(5, 3, 4, 2) | "\tmax" | max(5, 3, 4, 2); 47 58 } // main 48 59
Note: See TracChangeset
for help on using the changeset viewer.