Changeset 8a930c03 for doc/theses
- Timestamp:
- Jun 12, 2023, 12:05:58 PM (3 years ago)
- Branches:
- master, stuck-waitfor-destruct
- Children:
- fec8bd1
- Parents:
- 2b78949 (diff), 38e266ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- doc/theses/colby_parsons_MMAth
- Files:
-
- 24 added
- 17 edited
-
Makefile (modified) (1 diff)
-
benchmarks/actors/cfa/balance.cfa (modified) (3 diffs)
-
benchmarks/actors/cfa/dynamic.cfa (modified) (1 diff)
-
benchmarks/actors/cfa/executor.cfa (modified) (1 diff)
-
benchmarks/actors/cfa/matrix.cfa (modified) (1 diff)
-
benchmarks/actors/cfa/repeat.cfa (modified) (4 diffs)
-
benchmarks/actors/cfa/static.cfa (modified) (1 diff)
-
benchmarks/actors/plotData.py (modified) (1 diff)
-
benchmarks/channels/plotData.py (modified) (1 diff)
-
benchmarks/mutex_stmt/plotData.py (modified) (1 diff)
-
benchmarks/waituntil/cfa/contend.cfa (added)
-
benchmarks/waituntil/cfa/future.cfa (added)
-
benchmarks/waituntil/cfa/sidechan.cfa (added)
-
benchmarks/waituntil/cfa/spin.cfa (added)
-
benchmarks/waituntil/go/contend/contend.go (added)
-
benchmarks/waituntil/go/contend/go.mod (added)
-
benchmarks/waituntil/go/contend2/contend.go (added)
-
benchmarks/waituntil/go/contend2/go.mod (added)
-
benchmarks/waituntil/go/contend4/contend.go (added)
-
benchmarks/waituntil/go/contend4/go.mod (added)
-
benchmarks/waituntil/go/contend8/contend.go (added)
-
benchmarks/waituntil/go/contend8/go.mod (added)
-
benchmarks/waituntil/go/sidechan/go.mod (added)
-
benchmarks/waituntil/go/sidechan/sidechan.go (added)
-
benchmarks/waituntil/go/spin/go.mod (added)
-
benchmarks/waituntil/go/spin/spin.go (added)
-
benchmarks/waituntil/go/spin2/go.mod (added)
-
benchmarks/waituntil/go/spin2/spin.go (added)
-
benchmarks/waituntil/go/spin4/go.mod (added)
-
benchmarks/waituntil/go/spin4/spin.go (added)
-
benchmarks/waituntil/go/spin8/go.mod (added)
-
benchmarks/waituntil/go/spin8/spin.go (added)
-
benchmarks/waituntil/run (added)
-
benchmarks/waituntil/ucpp/future.cc (added)
-
code/basic_actor_example.cfa (modified) (1 diff)
-
glossary.tex (modified) (1 diff)
-
local.bib (modified) (1 diff)
-
style/style.tex (modified) (1 diff)
-
text/channels.tex (modified) (8 diffs)
-
text/waituntil.tex (modified) (5 diffs)
-
thesis.tex (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
doc/theses/colby_parsons_MMAth/Makefile
r2b78949 r8a930c03 98 98 99 99 ${BASE}.dvi : Makefile ${GRAPHS} ${PROGRAMS} ${PICTURES} ${FIGURES} ${SOURCES} ${DATA} \ 100 style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build}100 glossary.tex style/style.tex ${Macros}/common.tex ${Macros}/indexstyle local.bib ../../bibliography/pl.bib | ${Build} 101 101 # Must have *.aux file containing citations for bibtex 102 102 if [ ! -r ${basename $@}.aux ] ; then ${LaTeX} ${basename $@}.tex ; fi -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/balance.cfa
r2b78949 r8a930c03 31 31 32 32 d_actor ** actor_arr; 33 Allocation receive( d_actor & this, start_msg & msg ) with( this ) {33 allocation receive( d_actor & this, start_msg & msg ) with( this ) { 34 34 for ( i; Set ) { 35 35 *actor_arr[i + gstart] << shared_msg; … … 38 38 } 39 39 40 Allocation receive( d_actor & this, d_msg & msg ) with( this ) {40 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 41 41 if ( recs == rounds ) return Delete; 42 42 if ( recs % Batch == 0 ) { … … 50 50 } 51 51 52 Allocation receive( filler & this, d_msg & msg ) { return Delete; }52 allocation receive( filler & this, d_msg & msg ) { return Delete; } 53 53 54 54 int main( int argc, char * argv[] ) { -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/dynamic.cfa
r2b78949 r8a930c03 24 24 25 25 uint64_t start_time; 26 Allocation receive( derived_actor & receiver, derived_msg & msg ) {26 allocation receive( derived_actor & receiver, derived_msg & msg ) { 27 27 if ( msg.cnt >= Times ) { 28 28 printf("%.2f\n", ((double)(bench_time() - start_time)) / ((double)Times) ); // ns -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/executor.cfa
r2b78949 r8a930c03 25 25 struct d_msg { inline message; } shared_msg; 26 26 27 Allocation receive( d_actor & this, d_msg & msg ) with( this ) {27 allocation receive( d_actor & this, d_msg & msg ) with( this ) { 28 28 if ( recs == rounds ) return Finished; 29 29 if ( recs % Batch == 0 ) { -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/matrix.cfa
r2b78949 r8a930c03 24 24 } 25 25 26 Allocation receive( derived_actor & receiver, derived_msg & msg ) {26 allocation receive( derived_actor & receiver, derived_msg & msg ) { 27 27 for ( unsigned int i = 0; i < yc; i += 1 ) { // multiply X_row by Y_col and sum products 28 28 msg.Z[i] = 0; -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/repeat.cfa
r2b78949 r8a930c03 46 46 47 47 Client * cl; 48 Allocation receive( Server & this, IntMsg & msg ) { msg.val = 7; *cl << msg; return Nodelete; }49 Allocation receive( Server & this, CharMsg & msg ) { msg.val = 'x'; *cl << msg; return Nodelete; }50 Allocation receive( Server & this, StateMsg & msg ) { return Finished; }48 allocation receive( Server & this, IntMsg & msg ) { msg.val = 7; *cl << msg; return Nodelete; } 49 allocation receive( Server & this, CharMsg & msg ) { msg.val = 'x'; *cl << msg; return Nodelete; } 50 allocation receive( Server & this, StateMsg & msg ) { return Finished; } 51 51 52 52 void terminateServers( Client & this ) with(this) { … … 56 56 } 57 57 58 Allocation reset( Client & this ) with(this) {58 allocation reset( Client & this ) with(this) { 59 59 times += 1; 60 60 if ( times == Times ) { terminateServers( this ); return Finished; } … … 64 64 } 65 65 66 Allocation process( Client & this ) with(this) {66 allocation process( Client & this ) with(this) { 67 67 this.results++; 68 68 if ( results == 2 * Messages ) { return reset( this ); } … … 70 70 } 71 71 72 Allocation receive( Client & this, IntMsg & msg ) { return process( this ); }73 Allocation receive( Client & this, CharMsg & msg ) { return process( this ); }74 Allocation receive( Client & this, StateMsg & msg ) with(this) {72 allocation receive( Client & this, IntMsg & msg ) { return process( this ); } 73 allocation receive( Client & this, CharMsg & msg ) { return process( this ); } 74 allocation receive( Client & this, StateMsg & msg ) with(this) { 75 75 for ( i; Messages ) { 76 76 servers[i] << intmsg[i]; -
doc/theses/colby_parsons_MMAth/benchmarks/actors/cfa/static.cfa
r2b78949 r8a930c03 23 23 24 24 uint64_t start_time; 25 Allocation receive( derived_actor & receiver, derived_msg & msg ) {25 allocation receive( derived_actor & receiver, derived_msg & msg ) { 26 26 if ( msg.cnt >= Times ) { 27 27 printf("%.2f\n", ((double)(bench_time() - start_time)) / ((double)Times) ); // ns -
doc/theses/colby_parsons_MMAth/benchmarks/actors/plotData.py
r2b78949 r8a930c03 160 160 161 161 if currVariant == numVariants: 162 fig, ax = plt.subplots( )162 fig, ax = plt.subplots(layout='constrained') 163 163 plt.title(name + " Benchmark") 164 164 plt.ylabel("Runtime (seconds)") -
doc/theses/colby_parsons_MMAth/benchmarks/channels/plotData.py
r2b78949 r8a930c03 124 124 125 125 if currVariant == numVariants: 126 fig, ax = plt.subplots( )126 fig, ax = plt.subplots(layout='constrained') 127 127 plt.title(name + " Benchmark") 128 128 plt.ylabel("Throughput (channel operations)") -
doc/theses/colby_parsons_MMAth/benchmarks/mutex_stmt/plotData.py
r2b78949 r8a930c03 97 97 98 98 if currVariant == numVariants: 99 fig, ax = plt.subplots( )99 fig, ax = plt.subplots(layout='constrained') 100 100 plt.title(name + " Benchmark: " + str(currLocks) + " Locks") 101 101 plt.ylabel("Throughput (entries)") -
doc/theses/colby_parsons_MMAth/code/basic_actor_example.cfa
r2b78949 r8a930c03 19 19 } 20 20 21 Allocation receive( derived_actor & receiver, derived_msg & msg ) {21 allocation receive( derived_actor & receiver, derived_msg & msg ) { 22 22 printf("The message contained the string: %s\n", msg.word); 23 23 return Finished; // Return allocation status of Finished now that the actor is done work -
doc/theses/colby_parsons_MMAth/glossary.tex
r2b78949 r8a930c03 32 32 % Examples from template above 33 33 34 \newabbreviation{raii}{RAII}{ Resource Acquisition Is Initialization}35 \newabbreviation{rtti}{RTTI}{ Run-Time Type Information}36 \newabbreviation{fcfs}{FCFS}{ First Come First Served}37 \newabbreviation{toctou}{TOCTOU}{ time-of-check to time-of-use}34 \newabbreviation{raii}{RAII}{\Newterm{resource acquisition is initialization}} 35 \newabbreviation{rtti}{RTTI}{\Newterm{run-time type information}} 36 \newabbreviation{fcfs}{FCFS}{\Newterm{first-come first-served}} 37 \newabbreviation{toctou}{TOCTOU}{\Newterm{time-of-check to time-of-use}} 38 38 39 39 \newglossaryentry{actor} -
doc/theses/colby_parsons_MMAth/local.bib
r2b78949 r8a930c03 95 95 @misc{go:select, 96 96 author = "The Go Programming Language", 97 title = "src/runtime/ chan.go",97 title = "src/runtime/select.go", 98 98 howpublished = {\href{https://go.dev/src/runtime/select.go}}, 99 99 note = "[Online; accessed 23-May-2023]" 100 100 } 101 101 102 @misc{go:selectref, 103 author = "The Go Programming Language Specification", 104 title = "Select statements", 105 howpublished = {\href{https://go.dev/ref/spec#Select\_statements}}, 106 note = "[Online; accessed 23-May-2023]" 107 } 108 109 @misc{boost:channel, 110 author = "Boost C++ Libraries", 111 title = "experimental::basic\_concurrent\_channel", 112 howpublished = {\href{https://www.boost.org/doc/libs/master/doc/html/boost\_asio/reference/experimental\__basic\_concurrent\_channel.html}}, 113 note = "[Online; accessed 23-May-2023]" 114 } 115 116 @misc{rust:channel, 117 author = "The Rust Standard Library", 118 title = "std::sync::mpsc::sync\_channel", 119 howpublished = {\href{https://doc.rust-lang.org/std/sync/mpsc/fn.sync\_channel.html}}, 120 note = "[Online; accessed 23-May-2023]" 121 } 122 123 @misc{rust:select, 124 author = "The Rust Standard Library", 125 title = "Macro futures::select", 126 howpublished = {\href{https://docs.rs/futures/latest/futures/macro.select.html}}, 127 note = "[Online; accessed 23-May-2023]" 128 } 129 130 @misc{ocaml:channel, 131 author = "The OCaml Manual", 132 title = "OCaml library : Event", 133 howpublished = {\href{https://v2.ocaml.org/api/Event.html}}, 134 note = "[Online; accessed 23-May-2023]" 135 } 136 137 @misc{haskell:channel, 138 author = "The Haskell Package Repository", 139 title = "Control.Concurrent.Chan", 140 howpublished = {\href{https://hackage.haskell.org/package/base-4.18.0.0/docs/Control-Concurrent-Chan.html}}, 141 note = "[Online; accessed 23-May-2023]" 142 } 143 144 @misc{linux:select, 145 author = "Linux man pages", 146 title = "select(2) — Linux manual page", 147 howpublished = {\href{https://man7.org/linux/man-pages/man2/select.2.html}}, 148 note = "[Online; accessed 23-May-2023]" 149 } 150 151 @misc{linux:poll, 152 author = "Linux man pages", 153 title = "poll(2) — Linux manual page", 154 howpublished = {\href{https://man7.org/linux/man-pages/man2/poll.2.html}}, 155 note = "[Online; accessed 23-May-2023]" 156 } 157 158 @misc{linux:epoll, 159 author = "Linux man pages", 160 title = "epoll(7) — Linux manual page", 161 howpublished = {\href{https://man7.org/linux/man-pages/man7/epoll.7.html}}, 162 note = "[Online; accessed 23-May-2023]" 163 } 164 165 @article{Ichbiah79, 166 title={Preliminary Ada reference manual}, 167 author={Ichbiah, Jean D}, 168 journal={ACM Sigplan Notices}, 169 volume={14}, 170 number={6a}, 171 pages={1--145}, 172 year={1979}, 173 publisher={ACM New York, NY, USA} 174 } 175 176 @misc{cpp:whenany, 177 author = "C++ reference", 178 title = "std::experimental::when\_any", 179 howpublished = {\href{https://en.cppreference.com/w/cpp/experimental/when\_any}}, 180 note = "[Online; accessed 23-May-2023]" 181 } 182 183 184 -
doc/theses/colby_parsons_MMAth/style/style.tex
r2b78949 r8a930c03 15 15 \newsavebox{\myboxB} 16 16 17 \lstnewenvironment{Golang}[1][] 18 {\lstset{language=Go,literate={<-}{\makebox[2ex][c]{\textless\raisebox{0.4ex}{\rule{0.8ex}{0.075ex}}}}2, 19 moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}} 20 {} 21 17 22 \lstnewenvironment{java}[1][] 18 23 {\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}} -
doc/theses/colby_parsons_MMAth/text/channels.tex
r2b78949 r8a930c03 17 17 Additionally all channel operations in CSP are synchronous (no buffering). 18 18 Advanced channels as a programming language feature has been popularized in recent years by the language Go~\cite{Go}, which encourages the use of channels as its fundamental concurrent feature. 19 It was the popularity of Go channels that lead to their implement ion in \CFA.19 It was the popularity of Go channels that lead to their implementation in \CFA. 20 20 Neither Go nor \CFA channels have the restrictions of the early channel-based concurrent systems. 21 22 Other popular languages and libraries that provide channels include C++ Boost~\cite{boost:channel}, Rust~\cite{rust:channel}, Haskell~\cite{haskell:channel}, and OCaml~\cite{ocaml:channel}. 23 Boost channels only support asynchronous (non-blocking) operations, and Rust channels are limited to only having one consumer per channel. 24 Haskell channels are unbounded in size, and OCaml channels are zero-size. 25 These restrictions in Haskell and OCaml are likely due to their functional approach, which results in them both using a list as the underlying data structure for their channel. 26 These languages and libraries are not discussed further, as their channel implementation is not comparable to the bounded-buffer style channels present in Go and \CFA. 21 27 22 28 \section{Producer-Consumer Problem} … … 61 67 \section{Channel Implementation} 62 68 Currently, only the Go programming language provides user-level threading where the primary communication mechanism is channels. 63 Experiments were conducted that varied the producer-consumer problemalgorithm and lock type used inside the channel.69 Experiments were conducted that varied the producer-consumer algorithm and lock type used inside the channel. 64 70 With the exception of non-\gls{fcfs} or non-FIFO algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation. 65 71 Performance of channels can be improved by sharding the underlying buffer \cite{Dice11}. 66 In doing so the FIFO property is lost, which is undesireable for user-facing channels.72 However, the FIFO property is lost, which is undesirable for user-facing channels. 67 73 Therefore, the low-level channel implementation in \CFA is largely copied from the Go implementation, but adapted to the \CFA type and runtime systems. 68 74 As such the research contributions added by \CFA's channel implementation lie in the realm of safety and productivity features. 69 75 70 The Go channel implementation utilitizes cooperation between threads to achieve good performance~\cite{go:chan}. 71 The cooperation between threads only occurs when producers or consumers need to block due to the buffer being full or empty. 72 In these cases the blocking thread stores their relevant data in a shared location and the signalling thread will complete their operation before waking them. 73 This helps improve performance in a few ways. 74 First, each thread interacting with the channel with only acquire and release the internal channel lock exactly once. 75 This decreases contention on the internal lock, as only entering threads will compete for the lock since signalled threads never reacquire the lock. 76 The other advantage of the cooperation approach is that it eliminates the potential bottleneck of waiting for signalled threads. 77 The property of acquiring/releasing the lock only once can be achieved without cooperation by \Newterm{baton passing} the lock. 78 Baton passing is when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section conceptually "passing" the mutual exclusion to the signalled thread. 79 While baton passing is useful in some algorithms, it results in worse performance than the cooperation approach in channel implementations since all entering threads then need to wait for the blocked thread to reach the front of the ready queue and run before other operations on the channel can proceed. 76 The Go channel implementation utilizes cooperation among threads to achieve good performance~\cite{go:chan}. 77 This cooperation only occurs when producers or consumers need to block due to the buffer being full or empty. 78 In these cases, a blocking thread stores their relevant data in a shared location and the signalling thread completes the blocking thread's operation before waking them; 79 \ie the blocking thread has no work to perform after it unblocks because the signalling threads has done this work. 80 This approach is similar to wait morphing for locks~\cite[p.~82]{Butenhof97} and improves performance in a few ways. 81 First, each thread interacting with the channel only acquires and releases the internal channel lock once. 82 As a result, contention on the internal lock is decreased, as only entering threads compete for the lock as unblocking threads do not reacquire the lock. 83 The other advantage of Go's wait-morphing approach is that it eliminates the bottleneck of waiting for signalled threads to run. 84 Note, the property of acquiring/releasing the lock only once can also be achieved with a different form of cooperation, called \Newterm{baton passing}. 85 Baton passing occurs when one thread acquires a lock but does not release it, and instead signals a thread inside the critical section, conceptually ``passing'' the mutual exclusion from the signalling thread to the signalled thread. 86 The baton-passing approach has threads cooperate to pass mutual exclusion without additional lock acquires or releases; 87 the wait-morphing approach has threads cooperate by completing the signalled thread's operation, thus removing a signalled thread's need for mutual exclusion after unblocking. 88 While baton passing is useful in some algorithms, it results in worse channel performance than the Go approach. 89 In the baton-passing approach, all threads need to wait for the signalled thread to reach the front of the ready queue, context switch, and run before other operations on the channel can proceed, since the signalled thread holds mutual exclusion; 90 in the wait-morphing approach, since the operation is completed before the signal, other threads can continue to operate on the channel without waiting for the signalled thread to run. 80 91 81 92 In this work, all channel sizes \see{Sections~\ref{s:ChannelSize}} are implemented with bounded buffers. … … 100 111 \subsection{Toggle-able Statistics} 101 112 As discussed, a channel is a concurrent layer over a bounded buffer. 102 To achieve efficient buffering users should aim for as few blocking operations on a channel as possible.103 Often to achieve this users maychange the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads.104 Fo users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided.105 The statistics are toggled at compile time via the @CHAN_STATS@ macro to ensure that they are entirely elided when not used.106 When statistics are turned on, four counters are maintained per channel, two for producers and two for consumers.113 To achieve efficient buffering, users should aim for as few blocking operations on a channel as possible. 114 Mechanisms to reduce blocking are: change the buffer size, shard a channel into multiple channels, or tweak the number of producer and consumer threads. 115 For users to be able to make informed decisions when tuning channel usage, toggle-able channel statistics are provided. 116 The statistics are toggled on during the \CFA build by defining the @CHAN_STATS@ macro, which guarantees zero cost when not using this feature. 117 When statistics are turned on, four counters are maintained per channel, two for inserting (producers) and two for removing (consumers). 107 118 The two counters per type of operation track the number of blocking operations and total operations. 108 In the channel destructor the counters are printed out aggregated and also per type of operation. 109 An example use case of the counters follows. 110 A user is buffering information between producer and consumer threads and wants to analyze channel performance. 111 Via the statistics they see that producers block for a large percentage of their operations while consumers do not block often. 112 They then can use this information to adjust their number of producers/consumers or channel size to achieve a larger percentage of non-blocking producer operations, thus increasing their channel throughput. 119 In the channel destructor, the counters are printed out aggregated and also per type of operation. 120 An example use case is noting that producer inserts are blocking often while consumer removes do not block often. 121 This information can be used to increase the number of consumers to decrease the blocking producer operations, thus increasing the channel throughput. 122 Whereas, increasing the channel size in this scenario is unlikely to produce a benefit because the consumers can never keep up with the producers. 113 123 114 124 \subsection{Deadlock Detection} 115 The deadlock detection in the \CFA channels is fairly basic. 116 It only detects the case where threads are blocked on the channel during deallocation. 117 This case is guaranteed to deadlock since the list holding the blocked thread is internal to the channel and will be deallocated. 118 If a user maintained a separate reference to a thread and unparked it outside the channel they could avoid the deadlock, but would run into other runtime errors since the thread would access channel data after waking that is now deallocated. 119 More robust deadlock detection surrounding channel usage would have to be implemented separate from the channel implementation since it would require knowledge about the threading system and other channel/thread state. 125 The deadlock detection in the \CFA channels is fairly basic but detects a very common channel mistake during termination. 126 That is, it detects the case where threads are blocked on the channel during channel deallocation. 127 This case is guaranteed to deadlock since there are no other threads to supply or consume values needed by the waiting threads. 128 Only if a user maintained a separate reference to the blocked threads and manually unblocks them outside the channel could the deadlock be avoid. 129 However, without special semantics, this unblocking would generate other runtime errors where the unblocked thread attempts to access non-existing channel data or even a deallocated channel. 130 More robust deadlock detection needs to be implemented separate from channels since it requires knowledge about the threading system and other channel/thread state. 120 131 121 132 \subsection{Program Shutdown} 122 133 Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed. 123 The difficulty of graceful termination often arises from the usage ofsynchronization primitives that need to be handled carefully during shutdown.134 Graceful termination can be difficult to achieve with synchronization primitives that need to be handled carefully during shutdown. 124 135 It is easy to deadlock during termination if threads are left behind on synchronization primitives. 125 136 Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state. 126 137 \gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it. 127 138 Channels are a particularly hard synchronization primitive to terminate since both sending and receiving to/from a channel can block. 128 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads trying to perform the termination may end up unexpectedly blocking in their attempt to help other threads exit the system. 129 130 \paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan}. 131 Channels in Go have a @close@ operation and a \Go{select} statement that both can be used to help threads terminate. 139 Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads performing the termination may end up unexpectedly blocking in their attempt to help other threads exit the system. 140 141 \paragraph{Go channels} provide a set of tools to help with concurrent shutdown~\cite{go:chan} using a @close@ operation in conjunction with the \Go{select} statement. 132 142 The \Go{select} statement is discussed in \ref{s:waituntil}, where \CFA's @waituntil@ statement is compared with the Go \Go{select} statement. 133 143 … … 143 153 Note, panics in Go can be caught, but it is not the idiomatic way to write Go programs. 144 154 145 While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.155 While Go's channel-closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired. 146 156 Since both closing and sending panic once a channel is closed, a user often has to synchronize the senders (producers) before the channel can be closed to avoid panics. 147 157 However, in doing so it renders the @close@ operation nearly useless, as the only utilities it provides are the ability to ensure receivers no longer block on the channel and receive zero-valued elements. 148 158 This functionality is only useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is necessary, then @close@ only provides the non-blocking feature. 149 159 To avoid \gls{toctou} issues during shutdown, a busy wait with a \Go{select} statement is often used to add or remove elements from a channel. 150 Due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.160 Hence, due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown. 151 161 152 162 \paragraph{\CFA channels} have access to an extensive exception handling mechanism~\cite{Beach21}. … … 161 171 When a channel in \CFA is closed, all subsequent calls to the channel raise a resumption exception at the caller. 162 172 If the resumption is handled, the caller attempts to complete the channel operation. 163 However, if channel operation would block, a termination exception is thrown.173 However, if the channel operation would block, a termination exception is thrown. 164 174 If the resumption is not handled, the exception is rethrown as a termination. 165 175 These termination exceptions allow for non-local transfer that is used to great effect to eagerly and gracefully shut down a thread. 166 176 When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them. 167 The resumption exception, @channel_closed@, has a couple fields to aid in handling the exception. 168 The exception contains a pointer to the channel it was thrown from, and a pointer to an element. 169 In exceptions thrown from remove the element pointer will be null. 170 In the case of insert the element pointer points to the element that the thread attempted to insert. 177 The resumption exception, @channel_closed@, has internal fields to aid in handling the exception. 178 The exception contains a pointer to the channel it is thrown from and a pointer to a buffer element. 179 For exceptions thrown from @remove@, the buffer element pointer is null. 180 For exceptions thrown from @insert@, the element pointer points to the buffer element that the thread attempted to insert. 181 Utility routines @bool is_insert( channel_closed & e );@ and @bool is_remove( channel_closed & e );@ are provided for convenient checking of the element pointer. 171 182 This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler. 172 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed. 173 Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception. 174 It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go. 175 This should not be an issue, since termination is rarely an fast-path of an application and ensuring that termination can be implemented correctly with ease is the aim of the exception approach. 183 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based on which channel and operation failed. 184 For example, exception handlers in \CFA have an optional predicate which can be used to trigger or skip handlers based on the content of the matching exception. 185 It is worth mentioning that using exceptions for termination may incur a larger performance cost than the Go approach. 186 However, this should not be an issue, since termination is rarely on the fast-path of an application. 187 In contrast, ensuring termination can be easily implemented correctly is the aim of the exception approach. 176 188 177 189 \section{\CFA / Go channel Examples} 178 To highlight the differences between \CFA's and Go's close semantics, three examples will be presented.190 To highlight the differences between \CFA's and Go's close semantics, three examples are presented. 179 191 The first example is a simple shutdown case, where there are producer threads and consumer threads operating on a channel for a fixed duration. 180 Once the duration ends, producers and consumers terminate without worrying about any leftover values in the channel.181 The second example extends the first example by requiring the channel to be empty uponshutdown.192 Once the duration ends, producers and consumers terminate immediately leaving unprocessed elements in the channel. 193 The second example extends the first by requiring the channel to be empty after shutdown. 182 194 Both the first and second example are shown in Figure~\ref{f:ChannelTermination}. 183 184 185 First the Go solutions to these examples shown in Figure~\ref{l:go_chan_term} are discussed.186 Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown.187 Instead, a different mechanism to communicate with the consumers and producers needs to be used.188 This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs.189 In this example, a flag is used to communicate with producers and another flag is used for consumers.190 Producers and consumers need separate avenues of communication both so that producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full.191 The producer flag is set first, then after producers terminate the consumer flag is set and the channel is closed.192 In the second example where all values need to be consumed, the main thread iterates over the closed channel to process any remaining values.193 194 195 In the \CFA solutions in Figure~\ref{l:cfa_chan_term}, shutdown is communicated directly to both producers and consumers via the @close@ call.196 In the first example where all values do not need to be consumed, both producers and consumers do not handle the resumption and finish once they receive the termination exception.197 The second \CFA example where all values must be consumed highlights how resumption is used with channel shutdown.198 The @Producer@ thread-main knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@.199 The @Consumer@ thread-main knows to stop consuming after all elements of a closed channel are removed and the call to @remove@ would block.200 Hence, the consumer knows the moment the channel closes because a resumption exception is raised, caught, and ignored, and then control returns to @remove@ to return another item from the buffer.201 Only when the buffer is drained and the call to @remove@ would block, a termination exception is raised to stop consuming.202 The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads.203 Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads.204 If one wishes to consume the leftover values in the consumer threads in Go, extra synchronization between the main thread and the consumer threads is needed.205 195 206 196 \begin{figure} … … 208 198 209 199 \begin{lrbox}{\myboxA} 200 \begin{Golang}[aboveskip=0pt,belowskip=0pt] 201 var channel chan int = make( chan int, 128 ) 202 var prodJoin chan int = make( chan int, 4 ) 203 var consJoin chan int = make( chan int, 4 ) 204 var cons_done, prod_done bool = false, false; 205 func producer() { 206 for { 207 if prod_done { break } 208 channel <- 5 209 } 210 prodJoin <- 0 // synch with main thd 211 } 212 213 func consumer() { 214 for { 215 if cons_done { break } 216 <- channel 217 } 218 consJoin <- 0 // synch with main thd 219 } 220 221 222 func main() { 223 for j := 0; j < 4; j++ { go consumer() } 224 for j := 0; j < 4; j++ { go producer() } 225 time.Sleep( time.Second * 10 ) 226 prod_done = true 227 for j := 0; j < 4 ; j++ { <- prodJoin } 228 cons_done = true 229 close(channel) // ensure no cons deadlock 230 @for elem := range channel {@ 231 // process leftover values 232 @}@ 233 for j := 0; j < 4; j++ { <- consJoin } 234 } 235 \end{Golang} 236 \end{lrbox} 237 238 \begin{lrbox}{\myboxB} 210 239 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 211 channel( size_t ) Channel{ ChannelSize }; 212 240 channel( size_t ) chan{ 128 }; 213 241 thread Consumer {}; 242 thread Producer {}; 243 244 void main( Producer & this ) { 245 try { 246 for () 247 insert( chan, 5 ); 248 } catch( channel_closed * ) { 249 // unhandled resume or full 250 } 251 } 214 252 void main( Consumer & this ) { 215 try { 216 for ( ;; ) 217 remove( Channel ); 218 @} catchResume( channel_closed * ) { @ 219 // handled resume => consume from chan 220 } catch( channel_closed * ) { 221 // empty or unhandled resume 222 } 223 } 224 225 thread Producer {}; 226 void main( Producer & this ) { 227 size_t count = 0; 228 try { 229 for ( ;; ) 230 insert( Channel, count++ ); 231 } catch ( channel_closed * ) { 232 // unhandled resume or full 233 } 234 } 235 236 int main( int argc, char * argv[] ) { 237 Consumer c[Consumers]; 238 Producer p[Producers]; 239 sleep(Duration`s); 240 close( Channel ); 241 return 0; 242 } 253 try { 254 for () { int i = remove( chan ); } 255 @} catchResume( channel_closed * ) {@ 256 // handled resume => consume from chan 257 } catch( channel_closed * ) { 258 // empty or unhandled resume 259 } 260 } 261 int main() { 262 Consumer c[4]; 263 Producer p[4]; 264 sleep( 10`s ); 265 close( chan ); 266 } 267 268 269 270 271 272 273 243 274 \end{cfa} 244 275 \end{lrbox} 245 276 246 \begin{lrbox}{\myboxB} 247 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 248 var cons_done, prod_done bool = false, false; 249 var prodJoin chan int = make(chan int, Producers) 250 var consJoin chan int = make(chan int, Consumers) 251 252 func consumer( channel chan uint64 ) { 253 for { 254 if cons_done { break } 255 <-channel 256 } 257 consJoin <- 0 // synch with main thd 258 } 259 260 func producer( channel chan uint64 ) { 261 var count uint64 = 0 262 for { 263 if prod_done { break } 264 channel <- count++ 265 } 266 prodJoin <- 0 // synch with main thd 267 } 268 269 func main() { 270 channel = make(chan uint64, ChannelSize) 271 for j := 0; j < Consumers; j++ { 272 go consumer( channel ) 273 } 274 for j := 0; j < Producers; j++ { 275 go producer( channel ) 276 } 277 time.Sleep(time.Second * Duration) 278 prod_done = true 279 for j := 0; j < Producers ; j++ { 280 <-prodJoin // wait for prods 281 } 282 cons_done = true 283 close(channel) // ensure no cons deadlock 284 @for elem := range channel { @ 285 // process leftover values 286 @}@ 287 for j := 0; j < Consumers; j++{ 288 <-consJoin // wait for cons 289 } 290 } 291 \end{cfa} 292 \end{lrbox} 293 294 \subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxA} 277 \subfloat[Go style]{\label{l:go_chan_term}\usebox\myboxA} 295 278 \hspace*{3pt} 296 279 \vrule 297 280 \hspace*{3pt} 298 \subfloat[ Go style]{\label{l:go_chan_term}\usebox\myboxB}281 \subfloat[\CFA style]{\label{l:cfa_chan_term}\usebox\myboxB} 299 282 \caption{Channel Termination Examples 1 and 2. Code specific to example 2 is highlighted.} 300 283 \label{f:ChannelTermination} 301 284 \end{figure} 302 285 303 The final shutdown example uses channels to implement a barrier. 304 It is shown in Figure~\ref{f:ChannelBarrierTermination}. 305 The problem of implementing a barrier is chosen since threads are both producers and consumers on the barrier-internal channels, which removes the ability to easily synchronize producers before consumers during shutdown. 306 As such, while the shutdown details will be discussed with this problem in mind, they are also applicable to other problems taht have individual threads both producing and consuming from channels. 307 Both of these examples are implemented using \CFA syntax so that they can be easily compared. 308 Figure~\ref{l:cfa_chan_bar} uses \CFA-style channel close semantics and Figure~\ref{l:go_chan_bar} uses Go-style close semantics. 309 In this example it is infeasible to use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization. 310 As such in Figure~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of @-1@ has to be used to indicate to threads that they need to leave the barrier. 311 This sentinel value has to be checked at two points. 286 Figure~\ref{l:go_chan_term} shows the Go solution. 287 Since some of the elements being passed through the channel are zero-valued, closing the channel in Go does not aid in communicating shutdown. 288 Instead, a different mechanism to communicate with the consumers and producers needs to be used. 289 Flag variables are common in Go-channel shutdown-code to avoid panics on a channel, meaning the channel shutdown has to be communicated with threads before it occurs. 290 Hence, the two flags @cons_done@ and @prod_done@ are used to communicate with the producers and consumers, respectively. 291 Furthermore, producers and consumers need to shutdown separately to ensure that producers terminate before the channel is closed to avoid panicking, and to avoid the case where all the consumers terminate first, which can result in a deadlock for producers if the channel is full. 292 The producer flag is set first; 293 then after all producers terminate, the consumer flag is set and the channel is closed leaving elements in the buffer. 294 To purge the buffer, a loop is added (red) that iterates over the closed channel to process any remaining values. 295 296 Figure~\ref{l:cfa_chan_term} shows the \CFA solution. 297 Here, shutdown is communicated directly to both producers and consumers via the @close@ call. 298 A @Producer@ thread knows to stop producing when the @insert@ call on a closed channel raises exception @channel_closed@. 299 If a @Consumer@ thread ignores the first resumption exception from the @close@, the exception is reraised as a termination exception and elements are left in the buffer. 300 If a @Consumer@ thread handles the resumptions exceptions (red), control returns to complete the remove. 301 A @Consumer@ thread knows to stop consuming after all elements of a closed channel are removed and the consumer would block, which causes a termination raise of @channel_closed@. 302 The \CFA semantics allow users to communicate channel shutdown directly through the channel, without having to share extra state between threads. 303 Additionally, when the channel needs to be drained, \CFA provides users with easy options for processing the leftover channel values in the main thread or in the consumer threads. 304 305 Figure~\ref{f:ChannelBarrierTermination} shows a final shutdown example using channels to implement a barrier. 306 A Go and \CFA style solution are presented but both are implemented using \CFA syntax so they can be easily compared. 307 Implementing a barrier is interesting because threads are both producers and consumers on the barrier-internal channels, @entryWait@ and @barWait@. 308 The outline for the barrier implementation starts by initially filling the @entryWait@ channel with $N$ tickets in the barrier constructor, allowing $N$ arriving threads to remove these values and enter the barrier. 309 After @entryWait@ is empty, arriving threads block when removing. 310 However, the arriving threads that entered the barrier cannot leave the barrier until $N$ threads have arrived. 311 Hence, the entering threads block on the empty @barWait@ channel until the $N$th arriving thread inserts $N-1$ elements into @barWait@ to unblock the $N-1$ threads calling @remove@. 312 The race between these arriving threads blocking on @barWait@ and the $N$th thread inserting values into @barWait@ does not affect correctness; 313 \ie an arriving thread may or may not block on channel @barWait@ to get its value. 314 Finally, the last thread to remove from @barWait@ with ticket $N-2$, refills channel @entryWait@ with $N$ values to start the next group into the barrier. 315 316 Now, the two channels makes termination synchronization between producers and consumers difficult. 317 Interestingly, the shutdown details for this problem are also applicable to other problems with threads producing and consuming from the same channel. 318 The Go-style solution cannot use the Go @close@ call since all threads are both potentially producers and consumers, causing panics on close to be unavoidable without complex synchronization. 319 As such in Figure \ref{l:go_chan_bar}, a flush routine is needed to insert a sentinel value, @-1@, to inform threads waiting in the buffer they need to leave the barrier. 320 This sentinel value has to be checked at two points along the fast-path and sentinel values daisy-chained into the buffers. 312 321 Furthermore, an additional flag @done@ is needed to communicate to threads once they have left the barrier that they are done. 313 314 In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate.322 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier. 323 For The \CFA solution~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, to inform waiting threads they must leave the barrier. 315 324 This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation. 316 Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.317 325 318 326 \begin{figure} … … 320 328 321 329 \begin{lrbox}{\myboxA} 330 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 331 struct barrier { 332 channel( int ) barWait, entryWait; 333 int size; 334 }; 335 void ?{}( barrier & this, int size ) with(this) { 336 barWait{size + 1}; entryWait{size + 1}; 337 this.size = size; 338 for ( i; size ) 339 insert( entryWait, i ); 340 } 341 void wait( barrier & this ) with(this) { 342 int ticket = remove( entryWait ); 343 @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@ 344 if ( ticket == size - 1 ) { 345 for ( i; size - 1 ) 346 insert( barWait, i ); 347 return; 348 } 349 ticket = remove( barWait ); 350 @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@ 351 if ( size == 1 || ticket == size - 2 ) { // last ? 352 for ( i; size ) 353 insert( entryWait, i ); 354 } 355 } 356 void flush(barrier & this) with(this) { 357 @insert( entryWait, -1 ); insert( barWait, -1 );@ 358 } 359 enum { Threads = 4 }; 360 barrier b{Threads}; 361 @bool done = false;@ 362 thread Thread {}; 363 void main( Thread & this ) { 364 for () { 365 @if ( done ) break;@ 366 wait( b ); 367 } 368 } 369 int main() { 370 Thread t[Threads]; 371 sleep(10`s); 372 done = true; 373 flush( b ); 374 } // wait for threads to terminate 375 \end{cfa} 376 \end{lrbox} 377 378 \begin{lrbox}{\myboxB} 322 379 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 323 380 struct barrier { … … 368 425 \end{lrbox} 369 426 370 \begin{lrbox}{\myboxB} 371 \begin{cfa}[aboveskip=0pt,belowskip=0pt] 372 struct barrier { 373 channel( int ) barWait, entryWait; 374 int size; 375 }; 376 void ?{}( barrier & this, int size ) with(this) { 377 barWait{size + 1}; entryWait{size + 1}; 378 this.size = size; 379 for ( i; size ) 380 insert( entryWait, i ); 381 } 382 void wait( barrier & this ) with(this) { 383 int ticket = remove( entryWait ); 384 @if ( ticket == -1 ) { insert( entryWait, -1 ); return; }@ 385 if ( ticket == size - 1 ) { 386 for ( i; size - 1 ) 387 insert( barWait, i ); 388 return; 389 } 390 ticket = remove( barWait ); 391 @if ( ticket == -1 ) { insert( barWait, -1 ); return; }@ 392 if ( size == 1 || ticket == size - 2 ) { // last ? 393 for ( i; size ) 394 insert( entryWait, i ); 395 } 396 } 397 void flush(barrier & this) with(this) { 398 @insert( entryWait, -1 ); insert( barWait, -1 );@ 399 } 400 enum { Threads = 4 }; 401 barrier b{Threads}; 402 @bool done = false;@ 403 thread Thread {}; 404 void main( Thread & this ) { 405 for () { 406 @if ( done ) break;@ 407 wait( b ); 408 } 409 } 410 int main() { 411 Thread t[Threads]; 412 sleep(10`s); 413 done = true; 414 flush( b ); 415 } // wait for threads to terminate 416 \end{cfa} 417 \end{lrbox} 418 419 \subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxA} 427 \subfloat[Go style]{\label{l:go_chan_bar}\usebox\myboxA} 420 428 \hspace*{3pt} 421 429 \vrule 422 430 \hspace*{3pt} 423 \subfloat[ Go style]{\label{l:go_chan_bar}\usebox\myboxB}431 \subfloat[\CFA style]{\label{l:cfa_chan_bar}\usebox\myboxB} 424 432 \caption{Channel Barrier Termination} 425 433 \label{f:ChannelBarrierTermination} -
doc/theses/colby_parsons_MMAth/text/waituntil.tex
r2b78949 r8a930c03 14 14 The ability to wait for the first stall available without spinning can be done with concurrent tools that provide \gls{synch_multiplex}, the ability to wait synchronously for a resource or set of resources. 15 15 16 % C_TODO: fill in citations in following section17 16 \section{History of Synchronous Multiplexing} 18 17 There is a history of tools that provide \gls{synch_multiplex}. 19 Some of the most well known include the set o r unix system utilities signal(2)\cite{}, poll(2)\cite{}, and epoll(7)\cite{}, and the select statement provided by Go\cite{}.18 Some of the most well known include the set of unix system utilities: select(2)\cite{linux:select}, poll(2)\cite{linux:poll}, and epoll(7)\cite{linux:epoll}, and the select statement provided by Go\cite{go:selectref}. 20 19 21 20 Before one can examine the history of \gls{synch_multiplex} implementations in detail, the preceding theory must be discussed. … … 27 26 If a guard is false then the resource it guards is considered to not be in the set of resources being waited on. 28 27 Guards can be simulated using if statements, but to do so requires \[2^N\] if cases, where @N@ is the number of guards. 29 This transformation from guards to if statements will be discussed further in Section~\ref{}. % C_TODO: fill ref when writing semantics section later 28 The equivalence between guards and exponential if statements comes from an Occam ALT statement rule~\cite{Roscoe88}, which is presented in \CFA syntax in Figure~\ref{f:wu_if}. 29 Providing guards allows for easy toggling of waituntil clauses without introducing repeated code. 30 31 \begin{figure} 32 \begin{cfa} 33 when( predicate ) waituntil( A ) {} 34 or waituntil( B ) {} 35 // === 36 if ( predicate ) { 37 waituntil( A ) {} 38 or waituntil( B ) {} 39 } else { 40 waituntil( B ) {} 41 } 42 \end{cfa} 43 \caption{Occam's guard to if statement equivalence shown in \CFA syntax.} 44 \label{f:wu_if} 45 \end{figure} 30 46 31 47 Switching to implementations, it is important to discuss the resources being multiplexed. … … 44 60 It is worth noting these \gls{synch_multiplex} tools mentioned so far interact directly with the operating system and are often used to communicate between processes. 45 61 Later \gls{synch_multiplex} started to appear in user-space to support fast multiplexed concurrent communication between threads. 46 An early example of \gls{synch_multiplex} is the select statement in Ada .62 An early example of \gls{synch_multiplex} is the select statement in Ada~\cite[\S~9.7]{Ichbiah79}. 47 63 The select statement in Ada allows a task to multiplex over some subset of its own methods that it would like to @accept@ calls to. 48 64 Tasks in Ada can be thought of as threads which are an object of a specific class, and as such have methods, fields, etc. … … 53 69 The @else@ changes the synchronous multiplexing to asynchronous multiplexing. 54 70 If an @else@ clause is in a select statement and no calls to the @accept@ed methods are immediately available the code block associated with the @else@ is run and the task does not block. 55 The most popular example of user-space \gls{synch_multiplex} is Go with their select statement. 71 72 A popular example of user-space \gls{synch_multiplex} is Go with their select statement~\cite{go:selectref}. 56 73 Go's select statement operates on channels and has the same exclusive-or semantics as the ALT primitive from Occam, and has associated code blocks for each clause like ALT and Ada. 57 74 However, unlike Ada and ALT, Go does not provide any guards for their select statement cases. 58 75 Go provides a timeout utility and also provides a @default@ clause which has the same semantics as Ada's @else@ clause. 76 77 \uC provides \gls{synch_multiplex} over futures with their @_Select@ statement and Ada-style \gls{synch_multiplex} over monitor methods with their @_Accept@ statement~\cite{uC++}. 78 Their @_Accept@ statement builds upon the select statement offered by Ada, by offering both @and@ and @or@ semantics, which can be used together in the same statement. 79 These semantics are also supported for \uC's @_Select@ statement. 80 This enables fully expressive \gls{synch_multiplex} predicates. 81 82 There are many other languages that provide \gls{synch_multiplex}, including Rust's @select!@ over futures~\cite{rust:select}, OCaml's @select@ over channels~\cite{ocaml:channe}, and C++14's @when_any@ over futures~\cite{cpp:whenany}. 83 Note that while C++14 and Rust provide \gls{synch_multiplex}, their implemetations leave much to be desired as they both rely on busy-waiting polling to wait on multiple resources. 59 84 60 85 \section{Other Approaches to Synchronous Multiplexing} … … 69 94 If the requests for the other resources need to be retracted, the burden falls on the programmer to determine how to synchronize appropriately to ensure that only one resource is delivered. 70 95 71 72 96 \section{\CFA's Waituntil Statement} 73 74 75 97 The new \CFA \gls{synch_multiplex} utility introduced in this work is the @waituntil@ statement. 98 There is a @waitfor@ statement in \CFA that supports Ada-style \gls{synch_multiplex} over monitor methods, so this @waituntil@ focuses on synchronizing over other resources. 99 All of the \gls{synch_multiplex} features mentioned so far are monomorphic, only supporting one resource to wait on, select(2) supports file descriptors, Go's select supports channel operations, \uC's select supports futures, and Ada's select supports monitor method calls. 100 The waituntil statement in \CFA is polymorphic and provides \gls{synch_multiplex} over any objects that satisfy the trait in Figure~\ref{f:wu_trait}. 101 102 \begin{figure} 103 \begin{cfa} 104 forall(T & | sized(T)) 105 trait is_selectable { 106 // For registering a waituntil stmt on a selectable type 107 bool register_select( T &, select_node & ); 108 109 // For unregistering a waituntil stmt from a selectable type 110 bool unregister_select( T &, select_node & ); 111 112 // on_selected is run on the selecting thread prior to executing the statement associated with the select_node 113 void on_selected( T &, select_node & ); 114 }; 115 \end{cfa} 116 \caption{Trait for types that can be passed into \CFA's waituntil statement.} 117 \label{f:wu_trait} 118 \end{figure} 119 120 Currently locks, channels, futures and timeouts are supported by the waituntil statement, but this will be expanded as other use cases arise. 121 The waituntil statement supports guarded clauses, like Ada, and Occam, supports both @or@, and @and@ semantics, like \uC, and provides an @else@ for asynchronous multiplexing. An example of \CFA waituntil usage is shown in Figure~\ref{f:wu_example}. In Figure~\ref{f:wu_example} the waituntil statement is waiting for either @Lock@ to be available or for a value to be read from @Channel@ into @i@ and for @Future@ to be fulfilled. The semantics of the waituntil statement will be discussed in detail in the next section. 122 123 \begin{figure} 124 \begin{cfa} 125 future(int) Future; 126 channel(int) Channel; 127 owner_lock Lock; 128 int i = 0; 129 130 waituntil( Lock ) { ... } 131 or when( i == 0 ) waituntil( i << Channel ) { ... } 132 and waituntil( Future ) { ... } 133 \end{cfa} 134 \caption{Example of \CFA's waituntil statement} 135 \label{f:wu_example} 136 \end{figure} 137 138 \section{Waituntil Semantics} 139 There are two parts of the waituntil semantics to discuss, the semantics of the statement itself, \ie @and@, @or@, @when@ guards, and @else@ semantics, and the semantics of how the waituntil interacts with types like channels, locks and futures. 140 To start, the semantics of the statement itself will be discussed. 141 142 \subsection{Waituntil Statement Semantics} 143 The @or@ semantics are the most straightforward and nearly match those laid out in the ALT statement from Occam, the clauses have an exclusive-or relationship where the first one to be available will be run and only one clause is run. 144 \CFA's @or@ semantics differ from ALT semantics in one respect, instead of randomly picking a clause when multiple are available, the clause that appears first in the order of clauses will be picked. 145 \eg in the following example, if @foo@ and @bar@ are both available, @foo@ will always be selected since it comes first in the order of waituntil clauses. 146 \begin{cfa} 147 future(int) bar; 148 future(int) foo; 149 waituntil( foo ) { ... } 150 or waituntil( bar ) { ... } 151 \end{cfa} 152 153 The @and@ semantics match the @and@ semantics used by \uC. 154 When multiple clauses are joined by @and@, the waituntil will make a thread wait for all to be available, but will run the corresponding code blocks \emph{as they become available}. 155 As @and@ clauses are made available, the thread will be woken to run those clauses' code blocks and then the thread will wait again until all clauses have been run. 156 This allows work to be done in parallel while synchronizing over a set of resources, and furthermore gives a good reason to use the @and@ operator. 157 If the @and@ operator waited for all clauses to be available before running, it would not provide much more use that just acquiring those resources one by one in subsequent lines of code. 158 The @and@ operator binds more tightly than the @or@ operator. 159 To give an @or@ operator higher precedence brackets can be used. 160 \eg the following waituntil unconditionally waits for @C@ and one of either @A@ or @B@, since the @or@ is given higher precendence via brackets. 161 \begin{cfa} 162 (waituntil( A ) { ... } 163 or waituntil( B ) { ... } ) 164 and waituntil( C ) { ... } 165 \end{cfa} 166 167 The guards in the waituntil statement are called @when@ clauses. 168 The @when@ clause is passed a boolean expression. 169 All the @when@ boolean expressions are evaluated before the waituntil statement is run. 170 The guards in Occam's ALT effectively toggle clauses on and off, where a clause will only be evaluated and waited on if the corresponding guard is @true@. 171 The guards in the waituntil statement operate the same way, but require some nuance since both @and@ and @or@ operators are supported. 172 When a guard is false and a clause is removed, it can be thought of as removing that clause and its preceding operator from the statement. 173 \eg in the following example the two waituntil statements are semantically the same. 174 \begin{cfa} 175 when(true) waituntil( A ) { ... } 176 or when(false) waituntil( B ) { ... } 177 and waituntil( C ) { ... } 178 // === 179 waituntil( A ) { ... } 180 and waituntil( C ) { ... } 181 \end{cfa} 182 183 The @else@ clause on the waituntil has identical semantics to the @else@ clause in Ada. 184 If all resources are not immediately available and there is an @else@ clause, the @else@ clause is run and the thread will not block. 185 186 \subsection{Waituntil Type Semantics} 187 As described earlier, to support interaction with the waituntil statement a type must support the trait shown in Figure~\ref{f:wu_trait}. 188 The waituntil statement expects types to register and unregister themselves via calls to @register_select@ and @unregister_select@ respectively. 189 When a resource becomes available, @on_selected@ is run. 190 Many types may not need @on_selected@, but it is provided since some types may need to check and set things before the resource can be accessed in the code block. 191 The register/unregister routines in the trait return booleans. 192 The return value of @register_select@ is @true@ if the resource is immediately available, and @false@ otherwise. 193 The return value of @unregister_select@ is @true@ if the corresponding code block should be run after unregistration and @false@ otherwise. 194 The routine @on_selected@, and the return value of @unregister_select@ were needed to support channels as a resource. 195 More detail on channels and their interaction with waituntil will be discussed in Section~\ref{s:wu_chans}. 196 197 \section{Waituntil Implementation} 198 The waituntil statement is not inherently complex, and can be described as a few steps. 199 The complexity of the statement comes from the consideration of race conditions and synchronization needed when supporting various primitives. 200 The basic steps that the waituntil statement follows are the following. 201 202 First the waituntil statement creates a @select_node@ per resource that is being waited on. 203 The @select_node@ is an object that stores the waituntil data pertaining to one of the resources. 204 Then, each @select_node@ is then registered with the corresponding resource. 205 The thread executing the waituntil then enters a loop that will loop until the entire waituntil statement being satisfied. 206 In each iteration of the loop the thread attempts to block. 207 If any clauses are satified the block will fail and the thread will proceed, otherwise the block succeeds. 208 After proceeding past the block all clauses are checked for completion and the completed clauses have their code blocks run. 209 Once the thread escapes the loop, the @select_nodes@ are unregistered from the resources. 210 In the case where the block suceeds, the thread will be woken by the thread that marks one of the resources as available. 211 Pseudocode detailing these steps is presented in the following code block. 212 213 \begin{cfa} 214 select_nodes s[N]; // N select nodes 215 for ( node in s ) 216 register_select( resource, node ); 217 while( statement not satisfied ) { 218 // try to block 219 for ( resource in waituntil statement ) 220 if ( resource is avail ) run code block 221 } 222 for ( node in s ) 223 unregister_select( resource, node ); 224 \end{cfa} 225 226 These steps give a basic, but mildly inaccurate overview of how the statement works. 227 Digging into some parts of the implementation will shed light on more of the specifics and provide some accuracy. 228 229 \subsection{Locks} 230 Locks are one of the resources supported in the waituntil statement. 231 When a thread waits on multiple locks via a waituntil, it enqueues a @select_node@ in each of the lock's waiting queues. 232 When a @select_node@ reaches the front of the queue and gains ownership of a lock, the blocked thread is notified. 233 The lock will be held until the node is unregistered. 234 To prevent the waiting thread from holding many locks at once and potentially introducing a deadlock, the node is unregistered right after the corresponding code block is executed. 235 This prevents deadlocks since the waiting thread will never hold a lock while waiting on another resource. 236 As such the only nodes unregistered at the end are the ones that have not run. 237 238 \subsection{Timeouts} 239 Timeouts in the waituntil take the form of a duration being passed to a @sleep@ or @timeout@ call. 240 An example is shown in the following code. 241 242 \begin{cfa} 243 waituntil( sleep( 1`ms ) ) {} 244 waituntil( timeout( 1`s ) ) {} or waituntil( timeout( 2`s ) ) {} 245 waituntil( timeout( 1`ns ) ) {} and waituntil( timeout( 2`s ) ) {} 246 \end{cfa} 247 248 The timeout implementation highlights a key part of the waituntil semantics, the expression is evaluated before the waituntil runs. 249 As such calls to @sleep@ and @timeout@ do not block, but instead return a type that supports the @is_selectable@ trait. 250 This mechanism is needed for types that want to support multiple operations such as channels that support reading and writing. 251 252 \subsection{Channels}\label{s:wu_chans} 253 To support both waiting on both reading and writing to channels, the opperators @?<<?@ and @?>>?@ are used to show reading and writing to a channel respectively, where the lefthand operand is the value and the righthand operand is the channel. 254 Channels require significant complexity to wait on for a few reasons. 255 The first reason is that reading or writing to a channel is a mutating operation. 256 What this means is that if a read or write to a channel occurs, the state of the channel has changed. 257 In comparison, for standard locks and futures, if a lock is acquired then released or a future is ready but not accessed, the states of the lock and the future are not modified. 258 In this way if a waituntil over locks or futures have some resources available that were not consumed, it is not an issue. 259 However, if a thread modifies a channel on behalf of a thread blocked on a waituntil statement, it is important that the corresponding waituntil code block is run, otherwise there is a potentially erroneous mismatch between the channel state and associated side effects. 260 As such, the @unregister_select@ routine has a boolean return that is used by channels to indicate when the operation was completed but the block was not run yet. 261 As such some channel code blocks may be run as part of the unregister. 262 Furthermore if there are both @and@ and @or@ operators, the @or@ operators stop behaving like exclusive-or semantics since this race between operations and unregisters exists. 263 264 It was deemed important that exclusive-or semantics were maintained when only @or@ operators were used, so this situation has been special-cased, and is handled by having all clauses race to set a value \emph{before} operating on the channel. 265 This approach is infeasible in the case where @and@ and @or@ operators are used. 266 To show this consider the following waituntil statement. 267 268 \begin{cfa} 269 waituntil( i >> A ) {} and waituntil( i >> B ) {} 270 or waituntil( i >> C ) {} and waituntil( i >> D ) {} 271 \end{cfa} 272 273 If exclusive-or semantics were followed, this waituntil would only run the code blocks for @A@ and @B@, or the code blocks for @C@ and @D@. 274 However, to race before operation completion in this case introduces a race whose complexity increases with the size of the waituntil statement. 275 In the example above, for @i@ to be inserted into @C@, to ensure the exclusive-or it must be ensured that @i@ can also be inserted into @D@. 276 Furthermore, the race for the @or@ would also need to be won. 277 However, due to TOCTOU issues, one cannot know that all resources are available without acquiring all the internal locks of channels in the subtree. 278 This is not a good solution for two reasons. 279 It is possible that once all the locks are acquired that the subtree is not satisfied and they must all be released. 280 This would incur high cost for signalling threads and also heavily increase contention on internal channel locks. 281 Furthermore, the waituntil statement is polymorphic and can support resources that do not have internal locks, which also makes this approach infeasible. 282 As such, the exclusive-or semantics are lost when using both @and@ and @or@ operators since they can not be supported without significant complexity and hits to waituntil statement performance. 283 284 The mechanism by which the predicate of the waituntil is checked is discussed in more detail in Section~\ref{s:wu_guards}. 285 286 Another consideration introduced by channels is that supporting both reading and writing to a channel in a waituntil means that one waituntil clause may be the notifier for another waituntil clause. 287 This becomes a problem when dealing with the special-cased @or@ where the clauses need to win a race to operate on a channel. 288 When you have both a special-case @or@ inserting on one thread and another special-case @or@ consuming is blocked on another thread there is not one but two races that need to be consolidated by the inserting thread. 289 (The race can occur in the opposite case with a blocked producer and signalling consumer too.) 290 For them to know that the insert succeeded, they need to win the race for their own waituntil and win the race for the other waituntil. 291 Go solves this problem in their select statement by acquiring the internal locks of all channels before registering the select on the channels. 292 This eliminates the race since no other threads can operate on the blocked channel since its lock will be held. 293 294 This approach is not used in \CFA since the waituntil is polymorphic. 295 Not all types in a waituntil have an internal lock, and when using non-channel types acquiring all the locks incurs extra uneeded overhead. 296 Instead this race is consolidated in \CFA in two phases by having an intermediate pending status value for the race. 297 This case is detectable, and if detected the thread attempting to signal will first race to set the race flag to be pending. 298 If it succeeds, it then attempts to set the consumer's race flag to its success value. 299 If the producer successfully sets the consumer race flag, then the operation can proceed, if not the signalling thread will set its own race flag back to the initial value. 300 If any other threads attempt to set the producer's flag and see a pending value, they will wait until the value changes before proceeding to ensure that in the case that the producer fails, the signal will not be lost. 301 This protocol ensures that signals will not be lost and that the two races can be resolved in a safe manner. 302 303 Channels in \CFA have exception based shutdown mechanisms that the waituntil statement needs to support. 304 These exception mechanisms were what brought in the @on_selected@ routine. 305 This routine is needed by channels to detect if they are closed upon waking from a waituntil statement, to ensure that the appropriate behaviour is taken. 306 307 \subsection{Guards and Statement Predicate}\label{s:wu_guards} 308 Checking for when a synchronous multiplexing utility is done is trivial when it has an or/xor relationship, since any resource becoming available means that the blocked thread can proceed. 309 In \uC and \CFA, their \gls{synch_multiplex} utilities involve both an @and@ and @or@ operator, which make the problem of checking for completion of the statement more difficult. 310 311 In the \uC @_Select@ statement, they solve this problem by constructing a tree of the resources, where the internal nodes are operators and the leafs are the resources. 312 The internal nodes also store the status of each of the subtrees beneath them. 313 When resources become available, their status is modified and the status of the leaf nodes percolate into the internal nodes update the state of the statement. 314 Once the root of the tree has both subtrees marked as @true@ then the statement is complete. 315 As an optimization, when the internal nodes are updated, their subtrees marked as @true@ are effectively pruned and are not touched again. 316 To support \uC's @_Select@ statement guards, the tree prunes the branch if the guard is false. 317 318 The \CFA waituntil statement blocks a thread until a set of resources have become available that satisfy the underlying predicate. 319 The waiting condition of the waituntil statement can be represented as a predicate over the resources, joined by the waituntil operators, where a resource is @true@ if it is available, and @false@ otherwise. 320 In \CFA, this representation is used as the mechanism to check if a thread is done waiting on the waituntil. 321 Leveraging the compiler, a routine is generated per waituntil that is passed the statuses of the resources and returns a boolean that is @true@ when the waituntil is done, and false otherwise. 322 To support guards on the \CFA waituntil statement, the status of a resource disabled by a guard is set to ensure that the predicate function behaves as if that resource is no longer part of the predicate. 323 324 In \uC's @_Select@, it supports operators both inside and outside the clauses of their statement. 325 \eg in the following example the code blocks will run once their corresponding predicate inside the round braces is satisfied. 326 327 % C_TODO put this is uC++ code style not cfa-style 328 \begin{cfa} 329 Future_ISM<int> A, B, C, D; 330 _Select( A || B && C ) { ... } 331 and _Select( D && E ) { ... } 332 \end{cfa} 333 334 This is more expressive that the waituntil statement in \CFA. 335 In \CFA, since the waituntil statement supports more resources than just futures, implmenting operators inside clauses was avoided for a few reasons. 336 As an example, suppose \CFA supported operators inside clauses and consider the code snippet in Figure~\ref{f:wu_inside_op}. 337 338 \begin{figure} 339 \begin{cfa} 340 owner_lock A, B, C, D; 341 waituntil( A && B ) { ... } 342 or waituntil( C && D ) { ... } 343 \end{cfa} 344 \caption{Example of unsupported operators inside clauses in \CFA.} 345 \label{f:wu_inside_op} 346 \end{figure} 347 348 If the waituntil in Figure~\ref{f:wu_inside_op} works with the same semantics as described and acquires each lock as it becomes available, it opens itself up to possible deadlocks since it is now holding locks and waiting on other resources. 349 As such other semantics would be needed to ensure that this operation is safe. 350 One possibility is to use \CC's @scoped_lock@ approach that was described in Section~\ref{s:DeadlockAvoidance}, however the potential for livelock leaves much to be desired. 351 Another possibility would be to use resource ordering similar to \CFA's @mutex@ statement, but that alone is not sufficient if the resource ordering is not used everywhere. 352 Additionally, using resource ordering could conflict with other semantics of the waituntil statement. 353 To show this conflict, consider if the locks in Figure~\ref{f:wu_inside_op} were ordered @D@, @B@, @C@, @A@. 354 If all the locks are available, it becomes complex to both respect the ordering of the waituntil in Figure~\ref{f:wu_inside_op} when choosing which code block to run and also respect the lock ordering of @D@, @B@, @C@, @A@ at the same time. 355 One other way this could be implemented is to wait until all resources for a given clause are available before proceeding to acquire them, but this also quickly becomes a poor approach. 356 This approach won't work due to TOCTOU issues, as it is not possible to ensure that the full set resources are available without holding them all first. 357 Operators inside clauses in \CFA could potentially be implemented with careful circumvention of the problems involved, but it was not deemed an important feature when taking into account the runtime cost that would need to be paid to handle these situations. 358 The problem of operators inside clauses also becomes a difficult issue to handle when supporting channels. 359 If internal operators were supported, it would require some way to ensure that channels with internal operators are modified on if and only if the corresponding code block is run, but that is not feasible due to reasons described in the exclusive-or portion of Section~\ref{s:wu_chans}. 360 361 \section{Waituntil Performance} 362 The two \gls{synch_multiplex} utilities that are in the realm of comparability with the \CFA waituntil statement are the Go @select@ statement and the \uC @_Select@ statement. 363 As such, two microbenchmarks are presented, one for Go and one for \uC to contrast the systems. 364 The similar utilities discussed at the start of this chapter in C, Ada, Rust, \CC, and OCaml are either not meaningful or feasible to benchmark against. 365 The select(2) and related utilities in C are not comparable since they are system calls that go into the kernel and operate on file descriptors, whereas the waituntil exists solely in userspace. 366 Ada's @select@ only operates on methods, which is done in \CFA via the @waitfor@ utility so it is not feasible to benchmark against the @waituntil@, which cannot wait on the same resource. 367 Rust and \CC only offer a busy-wait based approach which is not meaningly comparable to a blocking approach. 368 OCaml's @select@ waits on channels that are not comparable with \CFA and Go channels, which makes the OCaml @select@ infeasible to compare it with Go's @select@ and \CFA's @waituntil@. 369 Given the differences in features, polymorphism, and expressibility between the waituntil and @select@, and @_Select@, the aim of the microbenchmarking in this chapter is to show that these implementations lie in the same realm of performance, not to pick a winner. 370 371 \subsection{Channel Benchmark} 372 The channel microbenchmark compares \CFA's waituntil and Go's select, where the resource being waited on is a set of channels. 373 374 %C_TODO explain benchmark 375 376 %C_TODO show results 377 378 %C_TODO discuss results 379 380 \subsection{Future Benchmark} 381 The future benchmark compares \CFA's waituntil with \uC's @_Select@, with both utilities waiting on futures. 382 383 %C_TODO explain benchmark 384 385 %C_TODO show results 386 387 %C_TODO discuss results -
doc/theses/colby_parsons_MMAth/thesis.tex
r2b78949 r8a930c03 111 111 colorlinks=true, % false: boxed links; true: colored links 112 112 linkcolor=blue, % color of internal links 113 citecolor=blue, % color of links to bibliography113 citecolor=blue, % color of links to bibliography 114 114 filecolor=magenta, % color of file links 115 urlcolor=cyan % color of external links 115 urlcolor=cyan, % color of external links 116 breaklinks=true 116 117 } 117 118 \ifthenelse{\boolean{PrintVersion}}{ % for improved print quality, change some hyperref options … … 126 127 % \usepackage[acronym]{glossaries} 127 128 \usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package 129 \renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}} 128 130 % If glossaries-extra is not in your LaTeX distribution, get it from CTAN (http://ctan.org/pkg/glossaries-extra), 129 131 % although it's supposed to be in both the TeX Live and MikTeX distributions. There are also documentation and
Note:
See TracChangeset
for help on using the changeset viewer.