Changeset 8421d3f


Ignore:
Timestamp:
Jun 1, 2023, 11:59:02 AM (18 months ago)
Author:
caparson <caparson@…>
Branches:
ast-experimental, master
Children:
0aef549
Parents:
6f774be (diff), 5d81edb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
8 edited

Legend:

Unmodified
Added
Removed
  • doc/bibliography/pl.bib

    r6f774be r8421d3f  
    12091209    year        = 2018,
    12101210    pages       = {2111-2146},
    1211     note        = {\href{http://dx.doi.org/10.1002/spe.2624}{http://\-dx.doi.org/\-10.1002/\-spe.2624}},
     1211    optnote     = {\href{http://dx.doi.org/10.1002/spe.2624}{http://\-dx.doi.org/\-10.1002/\-spe.2624}},
    12121212}
    12131213
     
    18701870    month       = sep,
    18711871    year        = 2020,
    1872     note        = {\href{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}{https://\-plg.uwaterloo.ca/\-$\sim$usystem/\-pub/\-uSystem/uC++.pdf}},
     1872    note        = {\url{https://plg.uwaterloo.ca/~usystem/pub/uSystem/uC++.pdf}},
    18731873}
    18741874
     
    20042004    number      = 5,
    20052005    pages       = {1005-1042},
    2006     note        = {\href{https://onlinelibrary.wiley.com/doi/10.1002/spe.2925}{https://\-onlinelibrary.wiley.com/\-doi/\-10.1002/\-spe.2925}},
     2006    optnote     = {\href{https://onlinelibrary.wiley.com/doi/10.1002/spe.2925}{https://\-onlinelibrary.wiley.com/\-doi/\-10.1002/\-spe.2925}},
    20072007}
    20082008
     
    42234223    title       = {Implementing Lock-Free Queues},
    42244224    booktitle   = {Seventh International Conference on Parallel and Distributed Computing Systems},
     4225    organization= {International Society for Computers and Their Applications},
    42254226    address     = {Las Vegas, Nevada, U.S.A.},
    42264227    year        = {1994},
     
    50865087}
    50875088
    5088 @manual{MMTk,
     5089@misc{MMTk,
    50895090    keywords    = {Java memory management},
    50905091    contributer = {pabuhr@plg},
     
    50935094    month       = sep,
    50945095    year        = 2006,
    5095     note        = {\href{http://cs.anu.edu.au/~Robin.Garner/mmtk-guide.pdf}
    5096                   {http://cs.anu.edu.au/\-$\sim$Robin.Garner/\-mmtk-guide.pdf}},
     5096    howpublished= {\url{http://cs.anu.edu.au/~Robin.Garner/mmtk-guide.pdf}},
    50975097}
    50985098
     
    74027402}
    74037403
     7404@misc{rpmalloc,
     7405    author      = {Mattias Jansson},
     7406    title       = {rpmalloc version 1.4.1},
     7407    month       = apr,
     7408    year        = 2022,
     7409    howpublished= {\href{https://github.com/mjansson/rpmalloc}{https://\-github.com/\-mjansson/\-rpmalloc}},
     7410}
     7411
    74047412@manual{Rust,
    74057413    keywords    = {Rust programming language},
     
    74567464    booktitle   = {PLDI '04: Proceedings of the ACM SIGPLAN 2004 Conference on Programming Language Design and Implementation},
    74577465    location    = {Washington DC, USA},
    7458     publisher   = {ACM},
     7466    organization= {ACM},
    74597467    address     = {New York, NY, USA},
    74607468    volume      = 39,
  • doc/theses/colby_parsons_MMAth/Makefile

    r6f774be r8421d3f  
    9898
    9999${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${DATA} \
    100                 style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build}
     100                glossary.tex style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build}
    101101        # Must have *.aux file containing citations for bibtex
    102102        if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi
  • doc/theses/colby_parsons_MMAth/glossary.tex

    r6f774be r8421d3f  
    3232% Examples from template above
    3333
    34 \newabbreviation{raii}{RAII}{Resource Acquisition Is Initialization}
    35 \newabbreviation{rtti}{RTTI}{Run-Time Type Information}
    36 \newabbreviation{fcfs}{FCFS}{First Come First Served}
    37 \newabbreviation{toctou}{TOCTOU}{time-of-check to time-of-use}
     34\newabbreviation{raii}{RAII}{\Newterm{resource acquisition is initialization}}
     35\newabbreviation{rtti}{RTTI}{\Newterm{run-time type information}}
     36\newabbreviation{fcfs}{FCFS}{\Newterm{first-come first-served}}
     37\newabbreviation{toctou}{TOCTOU}{\Newterm{time-of-check to time-of-use}}
    3838
    3939\newglossaryentry{actor}
  • doc/theses/colby_parsons_MMAth/style/style.tex

    r6f774be r8421d3f  
    1515\newsavebox{\myboxB}
    1616
     17\lstnewenvironment{Golang}[1][]
     18{\lstset{language=Go,literate={<-}{\makebox[2ex][c]{\textless\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}}2,
     19        moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
     20{}
     21
    1722\lstnewenvironment{java}[1][]
    1823{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
  • doc/theses/colby_parsons_MMAth/text/channels.tex

    r6f774be r8421d3f  
    1717Additionally all channel operations in CSP are synchronous (no buffering).
    1818Advanced channels as a programming language feature has been popularized in recent years by the language Go~\cite{Go}, which encourages the use of channels as its fundamental concurrent feature.
    19 It was the popularity of Go channels that lead to their implemention in \CFA.
     19It was the popularity of Go channels that lead to their implementation in \CFA.
    2020Neither Go nor \CFA channels have the restrictions of the early channel-based concurrent systems.
    2121
     
    6161\section{Channel Implementation}
    6262Currently, only the Go programming language provides user-level threading where the primary communication mechanism is channels.
    63 Experiments were conducted that varied the producer-consumer problem algorithm and lock type used inside the channel.
     63Experiments were conducted that varied the producer-consumer algorithm and lock type used inside the channel.
    6464With the exception of non-\gls{fcfs} or non-FIFO algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation.
    6565Performance of channels can be improved by sharding the underlying buffer \cite{Dice11}.
    66 In doing so the FIFO property is lost, which is undesireable for user-facing channels.
     66However, the FIFO property is lost, which is undesirable for user-facing channels.
    6767Therefore, the low-level channel implementation in \CFA is largely copied from the Go implementation, but adapted to the \CFA type and runtime systems.
    6868As such the research contributions added by \CFA's channel implementation lie in the realm of safety and productivity features.
    6969
    70 The Go channel implementation utilitizes cooperation between threads to achieve good performance~\cite{go:chan}.
    71 The cooperation between threads only occurs when producers or consumers need to block due to the buffer being full or empty.
    72 In these cases the blocking thread stores their relevant data in a shared location and the signalling thread will complete their operation before waking them.
    73 This helps improve performance in a few ways.
    74 First, each thread interacting with the channel with only acquire and release the internal channel lock exactly once.
    75 This decreases contention on the internal lock, as only entering threads will compete for the lock since signalled threads never reacquire the lock.
    76 The other advantage of the cooperation approach is that it eliminates the potential bottleneck of waiting for signalled threads.
    77 The property of acquiring/releasing the lock only once can be achieved without cooperation by \Newterm{baton passing} the lock.
    78 Baton passing is when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section conceptually "passing" the mutual exclusion to the signalled thread.
    79 While baton passing is useful in some algorithms, it results in worse performance than the cooperation approach in channel implementations since all entering threads then need to wait for the blocked thread to reach the front of the ready queue and run before other operations on the channel can proceed.
     70The Go channel implementation utilizes cooperation among threads to achieve good performance~\cite{go:chan}.
     71This cooperation only occurs when producers or consumers need to block due to the buffer being full or empty.
     72In these cases, a blocking thread stores their relevant data in a shared location and the signalling thread completes the blocking thread's operation before waking them;
     73\ie the blocking thread has no work to perform after it unblocks because the signalling threads has done this work.
     74This approach is similar to wait morphing for locks~\cite[p.~82]{Butenhof97} and improves performance in a few ways.
     75First, each thread interacting with the channel only acquires and releases the internal channel lock once.
     76As a result, contention on the internal lock is decreased, as only entering threads compete for the lock as unblocking threads do not reacquire the lock.
     77The other advantage of Go's wait-morphing approach is that it eliminates the bottleneck of waiting for signalled threads to run.
     78Note, the property of acquiring/releasing the lock only once can also be achieved with a different form of cooperation, called \Newterm{baton passing}.
     79Baton passing occurs when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section, conceptually ``passing'' the mutual exclusion from the signalling thread to the signalled thread.
     80The baton-passing approach has threads cooperate to pass mutual exclusion without additional lock acquires or releases;
     81the wait-morphing approach has threads cooperate by completing the signalled thread's operation, thus removing a signalled thread's need for mutual exclusion after unblocking.
     82While baton passing is useful in some algorithms, it results in worse channel performance than the Go approach.
     83In the baton-passing approach, all threads need to wait for the signalled thread to reach the front of the ready queue, context switch, and run before other operations on the channel can proceed, since the signalled thread holds mutual exclusion;
     84in the wait-morphing approach, since the operation is completed before the signal, other threads can continue to operate on the channel without waiting for the signalled thread to run.
    8085
    8186In this work, all channel sizes \see{Sections~\ref{s:ChannelSize}} are implemented with bounded buffers.
     
    100105\subsection{Toggle-able Statistics}
    101106As discussed, a channel is a concurrent layer over a bounded buffer.
    102 To achieve efficient buffering users should aim for as few blocking operations on a channel as possible.
    103 Often to achieve this users may change the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads.
    104 Fo users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided.
    105 The statistics are toggled at compile time via the @CHAN_STATS@ macro to ensure that they are entirely elided when not used.
    106 When statistics are turned on, four counters are maintained per channel, two for producers and two for consumers.
     107To achieve efficient buffering, users should aim for as few blocking operations on a channel as possible.
     108Mechanisms to reduce blocking are: change the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads.
     109For users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided.
     110The statistics are toggled on during the \CFA build by defining the @CHAN_STATS@ macro, which guarantees zero cost when not using this feature.
     111When statistics are turned on, four counters are maintained per channel, two for inserting (producers) and two for removing (consumers).
    107112The two counters per type of operation track the number of blocking operations and total operations.
    108 In the channel destructor the counters are printed out aggregated and also per type of operation.
    109 An example use case of the counters follows.
    110 A user is buffering information between producer and consumer threads and wants to analyze channel performance.
    111 Via the statistics they see that producers block for a large percentage of their operations while consumers do not block often.
    112 They then can use this information to adjust their number of producers/consumers or channel size to achieve a larger percentage of non-blocking producer operations, thus increasing their channel throughput.
     113In the channel destructor, the counters are printed out aggregated and also per type of operation.
     114An example use case is noting that producer inserts are blocking often while consumer removes do not block often.
     115This information can be used to increase the number of consumers to decrease the blocking producer operations, thus increasing the channel throughput.
     116Whereas, increasing the channel size in this scenario is unlikely to produce a benefit because the consumers can never keep up with the producers.
    113117
    114118\subsection{Deadlock Detection}
    115 The deadlock detection in the \CFA channels is fairly basic.
    116 It only detects the case where threads are blocked on the channel during deallocation.
    117 This case is guaranteed to deadlock since the list holding the blocked thread is internal to the channel and will be deallocated.
    118 If a user maintained a separate reference to a thread and unparked it outside the channel they could avoid the deadlock, but would run into other runtime errors since the thread would access channel data after waking that is now deallocated.
    119 More robust deadlock detection surrounding channel usage would have to be implemented separate from the channel implementation since it would require knowledge about the threading system and other channel/thread state.
     119The deadlock detection in the \CFA channels is fairly basic but detects a very common channel mistake during termination.
     120That is, it detects the case where threads are blocked on the channel during channel deallocation.
     121This case is guaranteed to deadlock since there are no producer threads to supply values needed by the waiting consumer threads.
     122Only if a user maintained a separate reference to the consumer threads and manually unblocks them outside the channel could the deadlock be avoid.
     123However, without special consumer semantics, this unblocking would generate other runtime errors where the consumer attempts to access non-existing channel data or even a deallocated channel.
     124More robust deadlock detection needs to be implemented separate from channels since it requires knowledge about the threading system and other channel/thread state.
    120125
    121126\subsection{Program Shutdown}
    122127Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed.
    123 The difficulty of graceful termination often arises from the usage of synchronization primitives that need to be handled carefully during shutdown.
     128The difficulty for graceful termination often arises from the usage of synchronization primitives that need to be handled carefully during shutdown.
    124129It is easy to deadlock during termination if threads are left behind on synchronization primitives.
    125130Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state.
    126131\gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it.
    127132Channels are a particularly hard synchronization primitive to terminate since both sending and receiving to/from a channel can block.
    128 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads trying to perform the termination may end up unexpectedly blocking in their attempt to help other threads exit the system.
    129 
    130 \paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan}.
    131 Channels in Go have a @close@ operation and a \Go{select} statement that both can be used to help threads terminate.
     133Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads performing the termination may end up unexpectedly blocking in their attempt to help other threads exit the system.
     134
     135\paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan} using a @close@ operation in conjunction with the \Go{select} statement.
    132136The \Go{select} statement is discussed in \ref{s:waituntil}, where \CFA's @waituntil@ statement is compared with the Go \Go{select} statement.
    133137
     
    143147Note, panics in Go can be caught, but it is not the idiomatic way to write Go programs.
    144148
    145 While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.
     149While Go's channel-closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.
    146150Since both closing and sending panic once a channel is closed, a user often has to synchronize the senders (producers) before the channel can be closed to avoid panics.
    147151However, in doing so it renders the @close@ operation nearly useless, as the only utilities it provides are the ability to ensure receivers no longer block on the channel and receive zero-valued elements.
    148152This functionality is only useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is necessary, then @close@ only provides the non-blocking feature.
    149153To avoid \gls{toctou} issues during shutdown, a busy wait with a \Go{select} statement is often used to add or remove elements from a channel.
    150 Due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.
     154Hence, due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.
    151155
    152156\paragraph{\CFA channels} have access to an extensive exception handling mechanism~\cite{Beach21}.
     
    161165When a channel in \CFA is closed, all subsequent calls to the channel raise a resumption exception at the caller.
    162166If the resumption is handled, the caller attempts to complete the channel operation.
    163 However, if channel operation would block, a termination exception is thrown.
     167However, if the channel operation would block, a termination exception is thrown.
    164168If the resumption is not handled, the exception is rethrown as a termination.
    165169These termination exceptions allow for non-local transfer that is used to great effect to eagerly and gracefully shut down a thread.
    166170When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them.
    167 The resumption exception, @channel_closed@, has a couple fields to aid in handling the exception.
    168 The exception contains a pointer to the channel it was thrown from, and a pointer to an element.
    169 In exceptions thrown from remove the element pointer will be null.
    170 In the case of insert the element pointer points to the element that the thread attempted to insert.
     171The resumption exception, @channel_closed@, has internal fields to aid in handling the exception.
     172The exception contains a pointer to the channel it is thrown from and a pointer to a buffer element.
     173For exceptions thrown from @remove@, the buffer element pointer is null.
     174For exceptions thrown from @insert@, the element pointer points to the buffer element that the thread attempted to insert.
    171175This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler.
    172 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed.
    173 Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception.
    174 It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go.
    175 This should not be an issue, since termination is rarely an fast-path of an application and ensuring that termination can be implemented correctly with ease is the aim of the exception approach.
     176Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based on which channel and operation failed.
     177For example, exception handlers in \CFA have an optional predicate which can be used to trigger or skip handlers based on the content of the matching exception.
     178It is worth mentioning that using exceptions for termination may incur a larger performance cost than the Go approach.
     179However, this should not be an issue, since termination is rarely on the fast-path of an application.
     180In contrast, ensuring termination can be easily implemented correctly is the aim of the exception approach.
    176181
    177182\section{\CFA / Go channel Examples}
    178 To highlight the differences between \CFA's and Go's close semantics, three examples will be presented.
     183To highlight the differences between \CFA's and Go's close semantics, three examples are presented.
    179184The first example is a simple shutdown case, where there are producer threads and consumer threads operating on a channel for a fixed duration.
    180 Once the duration ends, producers and consumers terminate without worrying about any leftover values in the channel.
    181 The second example extends the first example by requiring the channel to be empty upon shutdown.
     185Once the duration ends, producers and consumers terminate immediately leaving unprocessed elements in the channel.
     186The second example extends the first by requiring the channel to be empty after shutdown.
    182187Both the first and second example are shown in Figure~\ref{f:ChannelTermination}.
    183 
    184 
    185 First the Go solutions to these examples shown in Figure~\ref{l:go_chan_term} are discussed.
    186 Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown.
    187 Instead, a different mechanism to communicate with the consumers and producers needs to be used.
    188 This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs.
    189 In this example, a flag is used to communicate with producers and another flag is used for consumers.
    190 Producers and consumers need separate avenues of communication both so that producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full.
    191 The producer flag is set first, then after producers terminate the consumer flag is set and the channel is closed.
    192 In the second example where all values need to be consumed, the main thread iterates over the closed channel to process any remaining values.
    193 
    194 
    195 In the \CFA solutions in Figure~\ref{l:cfa_chan_term}, shutdown is communicated directly to both producers and consumers via the @close@ call.
    196 In the first example where all values do not need to be consumed, both producers and consumers do not handle the resumption and finish once they receive the termination exception.
    197 The second \CFA example where all values must be consumed highlights how resumption is used with channel shutdown.
    198 The @Producer@ thread-main knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@.
    199 The @Consumer@ thread-main knows to stop consuming after all elements of a closed channel are removed and the call to @remove@ would block.
    200 Hence, the consumer knows the moment the channel closes because a resumption exception is raised, caught, and ignored, and then control returns to @remove@ to return another item from the buffer.
    201 Only when the buffer is drained and the call to @remove@ would block, a termination exception is raised to stop consuming.
    202 The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads.
    203 Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads.
    204 If one wishes to consume the leftover values in the consumer threads in Go, extra synchronization between the main thread and the consumer threads is needed.
    205188
    206189\begin{figure}
     
    208191
    209192\begin{lrbox}{\myboxA}
     193\begin{Golang}[aboveskip=0pt,belowskip=0pt]
     194var channel chan int = make( chan int, 128 )
     195var prodJoin chan int = make( chan int, 4 )
     196var consJoin chan int = make( chan int, 4 )
     197var cons_done, prod_done bool = false, false;
     198func producer() {
     199        for {
     200                if prod_done { break }
     201                channel <- 5
     202        }
     203        prodJoin <- 0 // synch with main thd
     204}
     205
     206func consumer() {
     207        for {
     208                if cons_done { break }
     209                <- channel
     210        }
     211        consJoin <- 0 // synch with main thd
     212}
     213
     214
     215func main() {
     216        for j := 0; j < 4; j++ { go consumer() }
     217        for j := 0; j < 4; j++ { go producer() }
     218        time.Sleep( time.Second * 10 )
     219        prod_done = true
     220        for j := 0; j < 4 ; j++ { <- prodJoin }
     221        cons_done = true
     222        close(channel) // ensure no cons deadlock
     223        @for elem := range channel {@
     224                // process leftover values
     225        @}@
     226        for j := 0; j < 4; j++ { <- consJoin }
     227}
     228\end{Golang}
     229\end{lrbox}
     230
     231\begin{lrbox}{\myboxB}
    210232\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    211 channel( size_t ) Channel{ ChannelSize };
    212 
     233channel( size_t ) chan{ 128 };
    213234thread Consumer {};
     235thread Producer {};
     236
     237void main( Producer & this ) {
     238        try {
     239                for ()
     240                        insert( chan, 5 );
     241        } catch( channel_closed * ) {
     242                // unhandled resume or full
     243        }
     244}
    214245void main( Consumer & this ) {
    215     try {
    216         for ( ;; )
    217             remove( Channel );
    218     @} catchResume( channel_closed * ) { @
    219     // handled resume => consume from chan
    220     } catch( channel_closed * ) {
    221         // empty or unhandled resume
    222     }
    223 }
    224 
    225 thread Producer {};
    226 void main( Producer & this ) {
    227     size_t count = 0;
    228     try {
    229         for ( ;; )
    230             insert( Channel, count++ );
    231     } catch ( channel_closed * ) {
    232         // unhandled resume or full
    233     }
    234 }
    235 
    236 int main( int argc, char * argv[] ) {
    237     Consumer c[Consumers];
    238     Producer p[Producers];
    239     sleep(Duration`s);
    240     close( Channel );
    241     return 0;
    242 }
     246        try {
     247                for () { int i = remove( chan ); }
     248        @} catchResume( channel_closed * ) {@
     249                // handled resume => consume from chan
     250        } catch( channel_closed * ) {
     251                // empty or unhandled resume
     252        }
     253}
     254int main() {
     255        Consumer c[4];
     256        Producer p[4];
     257        sleep( 10`s );
     258        close( chan );
     259}
     260
     261
     262
     263
     264
     265
     266
    243267\end{cfa}
    244268\end{lrbox}
    245269
    246 \begin{lrbox}{\myboxB}
    247 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
    248 var cons_done, prod_done bool = false, false;
    249 var prodJoin chan int = make(chan int, Producers)
    250 var consJoin chan int = make(chan int, Consumers)
    251 
    252 func consumer( channel chan uint64 ) {
    253     for {
    254         if cons_done { break }
    255         <-channel
    256     }
    257     consJoin <- 0 // synch with main thd
    258 }
    259 
    260 func producer( channel chan uint64 ) {
    261     var count uint64 = 0
    262     for {
    263         if prod_done { break }
    264         channel <- count++
    265     }
    266     prodJoin <- 0 // synch with main thd
    267 }
    268 
    269 func main() {
    270     channel = make(chan uint64, ChannelSize)
    271     for j := 0; j < Consumers; j++ {
    272         go consumer( channel )
    273     }
    274     for j := 0; j < Producers; j++ {
    275         go producer( channel )
    276     }
    277     time.Sleep(time.Second * Duration)
    278     prod_done = true
    279     for j := 0; j < Producers ; j++ {
    280         <-prodJoin // wait for prods
    281     }
    282     cons_done = true
    283     close(channel) // ensure no cons deadlock
    284     @for elem := range channel { @
    285         // process leftover values
    286     @}@
    287     for j := 0; j < Consumers; j++{
    288         <-consJoin // wait for cons
    289     }
    290 }
    291 \end{cfa}
    292 \end{lrbox}
    293 
    294 \subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxA}
     270\subfloat[Go style]{\label{l:go_chan_term}\usebox\myboxA}
    295271\hspace*{3pt}
    296272\vrule
    297273\hspace*{3pt}
    298 \subfloat[Go style]{\label{l:go_chan_term}\usebox\myboxB}
     274\subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxB}
    299275\caption{Channel Termination Examples 1 and 2. Code specific to example 2 is highlighted.}
    300276\label{f:ChannelTermination}
    301277\end{figure}
    302278
    303 The final shutdown example uses channels to implement a barrier.
    304 It is shown in Figure~\ref{f:ChannelBarrierTermination}.
    305 The problem of implementing a barrier is chosen since threads are both producers and consumers on the barrier-internal channels, which removes the ability to easily synchronize producers before consumers during shutdown.
    306 As such, while the shutdown details will be discussed with this problem in mind, they are also applicable to other problems taht have individual threads both producing and consuming from channels.
    307 Both of these examples are implemented using \CFA syntax so that they can be easily compared.
    308 Figure~\ref{l:cfa_chan_bar} uses \CFA-style channel close semantics and Figure~\ref{l:go_chan_bar} uses Go-style close semantics.
    309 In this example it is infeasible to use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization.
    310 As such in Figure~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of @-1@ has to be used to indicate to threads that they need to leave the barrier.
    311 This sentinel value has to be checked at two points.
     279Figure~\ref{l:go_chan_term} shows the Go solution.
     280Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown.
     281Instead, a different mechanism to communicate with the consumers and producers needs to be used.
     282Flag variables are common in Go-channel shutdown-code to avoid panics on a channel, meaning the channel shutdown has to be communicated with threads before it occurs.
     283Hence, the two flags @cons_done@ and @prod_done@ are used to communicate with the producers and consumers, respectively.
     284Furthermore, producers and consumers need separate shutdown channels so producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full.
     285The producer flag is set first;
     286then after all producers terminate, the consumer flag is set and the channel is closed leaving elements in the buffer.
     287To purge the buffer, a loop is added (red) that iterates over the closed channel to process any remaining values.
     288
     289Figure~\ref{l:cfa_chan_term} shows the \CFA solution.
     290Here, shutdown is communicated directly to both producers and consumers via the @close@ call.
     291A @Producer@ thread knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@.
     292If a @Consumer@ thread ignores the first resumption exception from the @close@, the exception is reraised as a termination exception and elements are left in the buffer.
     293If a @Consumer@ thread handles the resumptions exceptions (red), control returns to complete the remove.
     294A @Consumer@ thread knows to stop consuming after all elements of a closed channel are removed and the consumer would block, which causes a termination raise of @channel_closed@.
     295The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads.
     296Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads.
     297
     298Figure~\ref{f:ChannelBarrierTermination} shows a final shutdown example using channels to implement a barrier.
     299A Go and \CFA style solution are presented but both are implemented using \CFA syntax so they can be easily compared.
     300Implementing a barrier is interesting because threads are both producers and consumers on the barrier-internal channels, @entryWait@ and @barWait@.
     301The outline for the barrier implementation starts by initially filling the @entryWait@ channel with $N$ tickets in the barrier constructor, allowing $N$ arriving threads to remove these values and enter the barrier.
     302After @entryWait@ is empty, arriving threads block when removing.
     303However, the arriving threads that entered the barrier cannot leave the barrier until $N$ threads have arrived.
     304Hence, the entering threads block on the empty @barWait@ channel until the $N$th arriving thread inserts $N-1$ elements into @barWait@ to unblock the $N-1$ threads calling @remove@.
     305The race between these arriving threads blocking on @barWait@ and the $N$th thread inserting values into @barWait@ does not affect correctness;
     306\ie an arriving thread may or may not block on channel @barWait@ to get its value.
     307Finally, the last thread to remove from @barWait@ with ticket $N-2$, refills channel @entryWait@ with $N$ values to start the next group into the barrier.
     308
     309Now, the two channels makes termination synchronization between producers and consumers difficult.
     310Interestingly, the shutdown details for this problem are also applicable to other problems with threads producing and consuming from the same channel.
     311The Go-style solution cannot use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization.
     312As such in Figure \ref{l:go_chan_bar}, a flush routine is needed to insert a sentinel value, @-1@, to inform threads waiting in the buffer they need to leave the barrier.
     313This sentinel value has to be checked at two points along the fast-path and sentinel values daisy-chained into the buffers.
    312314Furthermore, an additional flag @done@ is needed to communicate to threads once they have left the barrier that they are done.
    313 
    314 In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate.
     315Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.
     316For The \CFA solution~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, to inform waiting threads they must leave the barrier.
    315317This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation.
    316 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.
    317318
    318319\begin{figure}
     
    320321
    321322\begin{lrbox}{\myboxA}
     323\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     324struct barrier {
     325        channel( int ) barWait, entryWait;
     326        int size;
     327};
     328void ?{}( barrier & this, int size ) with(this) {
     329        barWait{size + 1};   entryWait{size + 1};
     330        this.size = size;
     331        for ( i; size )
     332                insert( entryWait, i );
     333}
     334void wait( barrier & this ) with(this) {
     335        int ticket = remove( entryWait );
     336        @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@
     337        if ( ticket == size - 1 ) {
     338                for ( i; size - 1 )
     339                        insert( barWait, i );
     340                return;
     341        }
     342        ticket = remove( barWait );
     343        @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@
     344        if ( size == 1 || ticket == size - 2 ) { // last ?
     345                for ( i; size )
     346                        insert( entryWait, i );
     347        }
     348}
     349void flush(barrier & this) with(this) {
     350        @insert( entryWait, -1 );   insert( barWait, -1 );@
     351}
     352enum { Threads = 4 };
     353barrier b{Threads};
     354@bool done = false;@
     355thread Thread {};
     356void main( Thread & this ) {
     357        for () {
     358          @if ( done ) break;@
     359                wait( b );
     360        }
     361}
     362int main() {
     363        Thread t[Threads];
     364        sleep(10`s);
     365        done = true;
     366        flush( b );
     367} // wait for threads to terminate
     368\end{cfa}
     369\end{lrbox}
     370
     371\begin{lrbox}{\myboxB}
    322372\begin{cfa}[aboveskip=0pt,belowskip=0pt]
    323373struct barrier {
     
    368418\end{lrbox}
    369419
    370 \begin{lrbox}{\myboxB}
    371 \begin{cfa}[aboveskip=0pt,belowskip=0pt]
    372 struct barrier {
    373         channel( int ) barWait, entryWait;
    374         int size;
    375 };
    376 void ?{}( barrier & this, int size ) with(this) {
    377         barWait{size + 1};   entryWait{size + 1};
    378         this.size = size;
    379         for ( i; size )
    380                 insert( entryWait, i );
    381 }
    382 void wait( barrier & this ) with(this) {
    383         int ticket = remove( entryWait );
    384         @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@
    385         if ( ticket == size - 1 ) {
    386                 for ( i; size - 1 )
    387                         insert( barWait, i );
    388                 return;
    389         }
    390         ticket = remove( barWait );
    391         @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@
    392         if ( size == 1 || ticket == size - 2 ) { // last ?
    393                 for ( i; size )
    394                         insert( entryWait, i );
    395         }
    396 }
    397 void flush(barrier & this) with(this) {
    398         @insert( entryWait, -1 );   insert( barWait, -1 );@
    399 }
    400 enum { Threads = 4 };
    401 barrier b{Threads};
    402 @bool done = false;@
    403 thread Thread {};
    404 void main( Thread & this ) {
    405         for () {
    406           @if ( done ) break;@
    407                 wait( b );
    408         }
    409 }
    410 int main() {
    411         Thread t[Threads];
    412         sleep(10`s);
    413         done = true;
    414         flush( b );
    415 } // wait for threads to terminate
    416 \end{cfa}
    417 \end{lrbox}
    418 
    419 \subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxA}
     420\subfloat[Go style]{\label{l:go_chan_bar}\usebox\myboxA}
    420421\hspace*{3pt}
    421422\vrule
    422423\hspace*{3pt}
    423 \subfloat[Go style]{\label{l:go_chan_bar}\usebox\myboxB}
     424\subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxB}
    424425\caption{Channel Barrier Termination}
    425426\label{f:ChannelBarrierTermination}
  • doc/theses/colby_parsons_MMAth/thesis.tex

    r6f774be r8421d3f  
    111111    colorlinks=true,        % false: boxed links; true: colored links
    112112    linkcolor=blue,         % color of internal links
    113     citecolor=blue,        % color of links to bibliography
     113    citecolor=blue,         % color of links to bibliography
    114114    filecolor=magenta,      % color of file links
    115     urlcolor=cyan           % color of external links
     115    urlcolor=cyan,          % color of external links
     116    breaklinks=true
    116117}
    117118\ifthenelse{\boolean{PrintVersion}}{   % for improved print quality, change some hyperref options
     
    126127% \usepackage[acronym]{glossaries}
    127128\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
     129\renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}}
    128130% If glossaries-extra is not in your LaTeX distribution, get it from CTAN (http://ctan.org/pkg/glossaries-extra),
    129131% although it's supposed to be in both the TeX Live and MikTeX distributions. There are also documentation and
  • driver/cfa.cc

    r6f774be r8421d3f  
    1010// Created On       : Tue Aug 20 13:44:49 2002
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 23 16:22:47 2023
    13 // Update Count     : 477
     12// Last Modified On : Tue May 30 10:47:52 2023
     13// Update Count     : 478
    1414//
    1515
     
    329329        #endif // __x86_64__
    330330
     331        // ARM -mno-outline-atomics => use LL/SC instead of calls to atomic routines: __aarch64_swp_acq_rel, __aarch64_cas8_acq_rel
     332        // ARM -march=armv8.2-a+lse => generate Arm LSE extension instructions SWAP and CAS
     333        // https://community.arm.com/developer/tools-software/tools/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
    331334        #ifdef __ARM_ARCH
    332335        args[nargs++] = "-mno-outline-atomics";                         // use ARM LL/SC instructions for atomics
  • tests/concurrency/lockfree_stack.cfa

    r6f774be r8421d3f  
    1010// Created On       : Thu May 25 15:36:50 2023
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri May 26 08:08:47 2023
    13 // Update Count     : 4
     12// Last Modified On : Tue May 30 19:02:32 2023
     13// Update Count     : 18
    1414//
    1515
     
    2020struct Node;                                                                                    // forward declaration
    2121union Link {
    22         struct {                                                                                        // 64-bit x 2
     22        struct {                                                                                        // 32/64-bit x 2
    2323                Node * volatile top;                                                    // pointer to stack top
    2424                uintptr_t count;                                                                // count each push
     
    5757Stack stack;                                                                                    // global stack
    5858
     59enum { Times =
     60        #if defined( __ARM_ARCH )                                                       // ARM CASV is very slow
     61        10_000
     62        #else
     63        1_000_000
     64        #endif // __arm_64__
     65};
     66
    5967thread Worker {};
    6068void main( Worker & w ) {
    61         for ( i; 100000 ) {
    62                 Node & n = *pop( stack );
     69        for ( i; Times ) {
     70                Node & n = *pop( stack );                                               // pop any node
    6371                assert( &n != NULL );
    64                 n.next.top = 0p;                                                                // shrub fields
     72                n.next.top = 0p;                                                                // scrub fields
    6573                n.next.count = 0;
    6674                //yield( rand() % 3 );
    67                 push( stack, n );
     75                push( stack, n );                                                               // push it back
    6876        }
    6977}
    7078
    71 
    7279int main() {
    73         enum { N = 10 };
    74         processor p[N - 1];                                                                     // kernel threads
     80        enum { N = 8 };                                                                         // kernel threads
     81        processor p[N - 1];                                                                     // add kernel threads
    7582
    7683        for ( i; N ) {                                                                          // push N values on stack
Note: See TracChangeset for help on using the changeset viewer.