Changeset a085470 for doc


Ignore:
Timestamp:
Apr 10, 2023, 12:03:31 PM (14 months ago)
Author:
Mike Brooks <mlbrooks@…>
Branches:
ADT, ast-experimental, master
Children:
6adeb5f
Parents:
2b01f8e (diff), ea2759b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
doc/theses/colby_parsons_MMAth
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • doc/theses/colby_parsons_MMAth/style/style.tex

    r2b01f8e ra085470  
    33\lstset{language=CFA}                                   % default language
    44
     5\newcommand{\newtermFont}{\emph}
     6\newcommand{\Newterm}[1]{\newtermFont{#1}}
     7
    58\newcommand{\code}[1]{\lstinline[language=CFA]{#1}}
    69\newcommand{\uC}{$\mu$\CC}
     10\newcommand{\PAB}[1]{{\color{red}PAB: #1}}
    711
     12\newsavebox{\myboxA}                                    % used with subfigure
     13\newsavebox{\myboxB}
     14
     15\lstnewenvironment{java}[1][]
     16{\lstset{language=java,moredelim=**[is][\protect\color{red}]{@}{@}}\lstset{#1}}
     17{}
  • doc/theses/colby_parsons_MMAth/text/actors.tex

    r2b01f8e ra085470  
    9090\begin{cfa}
    9191struct derived_actor {
    92     inline actor;       // Plan-9 C inheritance
     92        inline actor;      // Plan-9 C inheritance
    9393};
    9494void ?{}( derived_actor & this ) { // Default ctor
    95     ((actor &)this){};  // Call to actor ctor
     95        ((actor &)this){};  // Call to actor ctor
    9696}
    9797
    9898struct derived_msg {
    99     inline message;    // Plan-9 C nominal inheritance
    100     char word[12];
     99        inline message; // Plan-9 C nominal inheritance
     100        char word[12];
    101101};
    102102void ?{}( derived_msg & this, char * new_word ) { // Overloaded ctor
    103     ((message &) this){ Nodelete }; // Passing allocation to ctor
    104     strcpy(this.word, new_word);
     103        ((message &) this){ Nodelete }; // Passing allocation to ctor
     104        strcpy(this.word, new_word);
    105105}
    106106
    107107Allocation receive( derived_actor & receiver, derived_msg & msg ) {
    108     printf("The message contained the string: %s\n", msg.word);
    109     return Finished; // Return finished since actor is done
     108        printf("The message contained the string: %s\n", msg.word);
     109        return Finished; // Return finished since actor is done
    110110}
    111111
    112112int main() {
    113     start_actor_system(); // Sets up executor
    114     derived_actor my_actor;       
    115     derived_msg my_msg{ "Hello World" }; // Constructor call
    116     my_actor << my_msg;   // Send message via left shift operator
    117     stop_actor_system(); // Waits until actors are finished
    118     return 0;
     113        start_actor_system(); // Sets up executor
     114        derived_actor my_actor;         
     115        derived_msg my_msg{ "Hello World" }; // Constructor call
     116        my_actor << my_msg;   // Send message via left shift operator
     117        stop_actor_system(); // Waits until actors are finished
     118        return 0;
    119119}
    120120\end{cfa}
     
    229229\section{Envelopes}\label{s:envelope}
    230230In actor systems messages are sent and received by actors.
    231 When a actor receives a message it  executes its behaviour that is associated with that message type.
     231When a actor receives a message it executes its behaviour that is associated with that message type.
    232232However the unit of work that stores the message, the receiving actor's address, and other pertinent information needs to persist between send and the receive.
    233233Furthermore the unit of work needs to be able to be stored in some fashion, usually in a queue, until it is executed by an actor.
     
    301301While other systems are concerned with stealing actors, the \CFA actor system steals queues.
    302302This is a result of \CFA's use of the inverted actor system.
    303  The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism.
     303The goal of the \CFA actor work stealing mechanism is to have a zero-victim-cost stealing mechanism.
    304304This does not means that stealing has no cost.
    305305This goal is to ensure that stealing work does not impact the performance of victim workers.
     
    369369
    370370\begin{cfa}
    371 void swap( uint victim_idx, uint my_idx  ) {
    372     // Step 0:
    373     work_queue * my_queue = request_queues[my_idx];
    374     work_queue * vic_queue = request_queues[victim_idx];
    375     // Step 2:
    376     request_queues[my_idx] = 0p;
    377     // Step 3:
    378     request_queues[victim_idx] = my_queue;
    379     // Step 4:
    380     request_queues[my_idx] = vic_queue;
     371void swap( uint victim_idx, uint my_idx ) {
     372        // Step 0:
     373        work_queue * my_queue = request_queues[my_idx];
     374        work_queue * vic_queue = request_queues[victim_idx];
     375        // Step 2:
     376        request_queues[my_idx] = 0p;
     377        // Step 3:
     378        request_queues[victim_idx] = my_queue;
     379        // Step 4:
     380        request_queues[my_idx] = vic_queue;
    381381}
    382382\end{cfa}
     
    389389// This routine is atomic
    390390bool CAS( work_queue ** ptr, work_queue ** old, work_queue * new ) {
    391     if ( *ptr != *old )
    392         return false;
    393     *ptr = new;
    394     return true;
     391        if ( *ptr != *old )
     392                return false;
     393        *ptr = new;
     394        return true;
    395395}
    396396
    397397bool try_swap_queues( worker & this, uint victim_idx, uint my_idx ) with(this) {
    398     // Step 0:
    399     // request_queues is the shared array of all sharded queues
    400     work_queue * my_queue = request_queues[my_idx];
    401     work_queue * vic_queue = request_queues[victim_idx];
    402 
    403     // Step 1:
    404     // If either queue is 0p then they are in the process of being stolen
    405     // 0p is CForAll's equivalent of C++'s nullptr
    406     if ( vic_queue == 0p ) return false;
    407 
    408     // Step 2:
    409     // Try to set thief's queue ptr to be 0p.
    410     // If this CAS fails someone stole thief's queue so return false
    411     if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) )
    412         return false;
    413    
    414     // Step 3:
    415     // Try to set victim queue ptr to be thief's queue ptr.
    416     // If it fails someone stole the other queue, so fix up then return false
    417     if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) {
    418         request_queues[my_idx] = my_queue; // reset queue ptr back to prev val
    419         return false;
    420     }
    421 
    422     // Step 4:
    423     // Successfully swapped.
    424     // Thief's ptr is 0p so no one will touch it
    425     // Write back without CAS is safe
    426     request_queues[my_idx] = vic_queue;
    427     return true;
     398        // Step 0:
     399        // request_queues is the shared array of all sharded queues
     400        work_queue * my_queue = request_queues[my_idx];
     401        work_queue * vic_queue = request_queues[victim_idx];
     402
     403        // Step 1:
     404        // If either queue is 0p then they are in the process of being stolen
     405        // 0p is CForAll's equivalent of C++'s nullptr
     406        if ( vic_queue == 0p ) return false;
     407
     408        // Step 2:
     409        // Try to set thief's queue ptr to be 0p.
     410        // If this CAS fails someone stole thief's queue so return false
     411        if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) )
     412                return false;
     413       
     414        // Step 3:
     415        // Try to set victim queue ptr to be thief's queue ptr.
     416        // If it fails someone stole the other queue, so fix up then return false
     417        if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) {
     418                request_queues[my_idx] = my_queue; // reset queue ptr back to prev val
     419                return false;
     420        }
     421
     422        // Step 4:
     423        // Successfully swapped.
     424        // Thief's ptr is 0p so no one will touch it
     425        // Write back without CAS is safe
     426        request_queues[my_idx] = vic_queue;
     427        return true;
    428428}
    429429\end{cfa}\label{c:swap}
     
    706706\label{t:StaticActorMessagePerformance}
    707707\begin{tabular}{*{5}{r|}r}
    708     & \multicolumn{1}{c|}{\CFA (100M)} & \multicolumn{1}{c|}{CAF (10M)} & \multicolumn{1}{c|}{Akka (100M)} & \multicolumn{1}{c|}{\uC (100M)} & \multicolumn{1}{c@{}}{ProtoActor (100M)} \\
    709     \hline                                                                                                                                         
    710     AMD         & \input{data/pykeSendStatic} \\
    711     \hline                                                                                                                                         
    712     Intel       & \input{data/nasusSendStatic}
     708        & \multicolumn{1}{c|}{\CFA (100M)} & \multicolumn{1}{c|}{CAF (10M)} & \multicolumn{1}{c|}{Akka (100M)} & \multicolumn{1}{c|}{\uC (100M)} & \multicolumn{1}{c@{}}{ProtoActor (100M)} \\
     709        \hline                                                                                                                                                 
     710        AMD             & \input{data/pykeSendStatic} \\
     711        \hline                                                                                                                                                 
     712        Intel   & \input{data/nasusSendStatic}
    713713\end{tabular}
    714714
     
    719719
    720720\begin{tabular}{*{5}{r|}r}
    721     & \multicolumn{1}{c|}{\CFA (20M)} & \multicolumn{1}{c|}{CAF (2M)} & \multicolumn{1}{c|}{Akka (2M)} & \multicolumn{1}{c|}{\uC (20M)} & \multicolumn{1}{c@{}}{ProtoActor (2M)} \\
    722     \hline                                                                                                                                         
    723     AMD         & \input{data/pykeSendDynamic} \\
    724     \hline                                                                                                                                         
    725     Intel       & \input{data/nasusSendDynamic}
     721        & \multicolumn{1}{c|}{\CFA (20M)} & \multicolumn{1}{c|}{CAF (2M)} & \multicolumn{1}{c|}{Akka (2M)} & \multicolumn{1}{c|}{\uC (20M)} & \multicolumn{1}{c@{}}{ProtoActor (2M)} \\
     722        \hline                                                                                                                                                 
     723        AMD             & \input{data/pykeSendDynamic} \\
     724        \hline                                                                                                                                                 
     725        Intel   & \input{data/nasusSendDynamic}
    726726\end{tabular}
    727727\end{table}
     
    745745In the static send benchmark all systems except CAF have static send costs that are in the same ballpark, only varying by ~70ns.
    746746In the dynamic send benchmark all systems experience slower message sends, as expected due to the extra allocations.
    747 However,  Akka and ProtoActor, slow down by a more significant margin than the \uC and \CFA.
     747However, Akka and ProtoActor, slow down by a more significant margin than the \uC and \CFA.
    748748This is likely a result of Akka and ProtoActor's garbage collection, which can suffer from hits in performance for allocation heavy workloads, whereas \uC and \CFA have explicit allocation/deallocation.
    749749
     
    753753
    754754\begin{figure}
    755     \centering
    756     \begin{subfigure}{0.5\textwidth}
    757         \centering
    758         \scalebox{0.5}{\input{figures/nasusCFABalance-One.pgf}}
    759         \subcaption{AMD \CFA Balance-One Benchmark}
    760         \label{f:BalanceOneAMD}
    761     \end{subfigure}\hfill
    762     \begin{subfigure}{0.5\textwidth}
    763         \centering
    764         \scalebox{0.5}{\input{figures/pykeCFABalance-One.pgf}}
    765         \subcaption{Intel \CFA Balance-One Benchmark}
    766         \label{f:BalanceOneIntel}
    767     \end{subfigure}
    768     \caption{The balance-one benchmark comparing stealing heuristics (lower is better).}
    769 \end{figure}
    770 
    771 \begin{figure}
    772     \centering
    773     \begin{subfigure}{0.5\textwidth}
    774         \centering
    775         \scalebox{0.5}{\input{figures/nasusCFABalance-Multi.pgf}}
    776         \subcaption{AMD \CFA Balance-Multi Benchmark}
    777         \label{f:BalanceMultiAMD}
    778     \end{subfigure}\hfill
    779     \begin{subfigure}{0.5\textwidth}
    780         \centering
    781         \scalebox{0.5}{\input{figures/pykeCFABalance-Multi.pgf}}
    782         \subcaption{Intel \CFA Balance-Multi Benchmark}
    783         \label{f:BalanceMultiIntel}
    784     \end{subfigure}
    785     \caption{The balance-multi benchmark comparing stealing heuristics (lower is better).}
     755        \centering
     756        \subfloat[AMD \CFA Balance-One Benchmark]{
     757                \resizebox{0.5\textwidth}{!}{\input{figures/nasusCFABalance-One.pgf}}
     758                \label{f:BalanceOneAMD}
     759        }
     760        \subfloat[Intel \CFA Balance-One Benchmark]{
     761                \resizebox{0.5\textwidth}{!}{\input{figures/pykeCFABalance-One.pgf}}
     762                \label{f:BalanceOneIntel}
     763        }
     764        \caption{The balance-one benchmark comparing stealing heuristics (lower is better).}
     765\end{figure}
     766
     767\begin{figure}
     768        \centering
     769        \subfloat[AMD \CFA Balance-Multi Benchmark]{
     770                \resizebox{0.5\textwidth}{!}{\input{figures/nasusCFABalance-Multi.pgf}}
     771                \label{f:BalanceMultiAMD}
     772        }
     773        \subfloat[Intel \CFA Balance-Multi Benchmark]{
     774                \resizebox{0.5\textwidth}{!}{\input{figures/pykeCFABalance-Multi.pgf}}
     775                \label{f:BalanceMultiIntel}
     776        }
     777        \caption{The balance-multi benchmark comparing stealing heuristics (lower is better).}
    786778\end{figure}
    787779
     
    817809
    818810\begin{figure}
    819     \centering
    820     \begin{subfigure}{0.5\textwidth}
    821         \centering
    822         \scalebox{0.5}{\input{figures/nasusExecutor.pgf}}
    823         \subcaption{AMD Executor Benchmark}
    824         \label{f:ExecutorAMD}
    825     \end{subfigure}\hfill
    826     \begin{subfigure}{0.5\textwidth}
    827         \centering
    828         \scalebox{0.5}{\input{figures/pykeExecutor.pgf}}
    829         \subcaption{Intel Executor Benchmark}
    830         \label{f:ExecutorIntel}
    831     \end{subfigure}
    832     \caption{The executor benchmark comparing actor systems (lower is better).}
     811        \centering
     812        \subfloat[AMD Executor Benchmark]{
     813                \resizebox{0.5\textwidth}{!}{\input{figures/nasusExecutor.pgf}}
     814                \label{f:ExecutorAMD}
     815        }
     816        \subfloat[Intel Executor Benchmark]{
     817                \resizebox{0.5\textwidth}{!}{\input{figures/pykeExecutor.pgf}}
     818                \label{f:ExecutorIntel}
     819        }
     820        \caption{The executor benchmark comparing actor systems (lower is better).}
    833821\end{figure}
    834822
     
    840828
    841829\begin{figure}
    842     \centering
    843     \begin{subfigure}{0.5\textwidth}
    844         \centering
    845         \scalebox{0.5}{\input{figures/nasusCFAExecutor.pgf}}
    846         \subcaption{AMD \CFA Executor Benchmark}\label{f:cfaExecutorAMD}
    847     \end{subfigure}\hfill
    848     \begin{subfigure}{0.5\textwidth}
    849         \centering
    850         \scalebox{0.5}{\input{figures/pykeCFAExecutor.pgf}}
    851         \subcaption{Intel \CFA Executor Benchmark}\label{f:cfaExecutorIntel}
    852     \end{subfigure}
    853     \caption{Executor benchmark comparing \CFA stealing heuristics (lower is better).}
     830        \centering
     831        \subfloat[AMD \CFA Executor Benchmark]{
     832                \resizebox{0.5\textwidth}{!}{\input{figures/nasusCFAExecutor.pgf}}
     833                \label{f:cfaExecutorAMD}
     834        }
     835        \subfloat[Intel \CFA Executor Benchmark]{
     836                \resizebox{0.5\textwidth}{!}{\input{figures/pykeCFAExecutor.pgf}}
     837                \label{f:cfaExecutorIntel}
     838        }
     839        \caption{Executor benchmark comparing \CFA stealing heuristics (lower is better).}
    854840\end{figure}
    855841
     
    857843
    858844\begin{figure}
    859     \centering
    860     \begin{subfigure}{0.5\textwidth}
    861         \centering
    862         \scalebox{0.5}{\input{figures/nasusRepeat.pgf}}
    863         \subcaption{AMD Repeat Benchmark}\label{f:RepeatAMD}
    864     \end{subfigure}\hfill
    865     \begin{subfigure}{0.5\textwidth}
    866         \centering
    867         \scalebox{0.5}{\input{figures/pykeRepeat.pgf}}
    868         \subcaption{Intel Repeat Benchmark}\label{f:RepeatIntel}
    869     \end{subfigure}
    870     \caption{The repeat benchmark comparing actor systems (lower is better).}
     845        \centering
     846        \subfloat[AMD Repeat Benchmark]{
     847                \resizebox{0.5\textwidth}{!}{\input{figures/nasusRepeat.pgf}}
     848                \label{f:RepeatAMD}
     849        }
     850        \subfloat[Intel Repeat Benchmark]{
     851                \resizebox{0.5\textwidth}{!}{\input{figures/pykeRepeat.pgf}}
     852                \label{f:RepeatIntel}
     853        }
     854        \caption{The repeat benchmark comparing actor systems (lower is better).}
    871855\end{figure}
    872856
     
    881865
    882866\begin{figure}
    883     \centering
    884     \begin{subfigure}{0.5\textwidth}
    885         \centering
    886         \scalebox{0.5}{\input{figures/nasusCFARepeat.pgf}}
    887         \subcaption{AMD \CFA Repeat Benchmark}\label{f:cfaRepeatAMD}
    888     \end{subfigure}\hfill
    889     \begin{subfigure}{0.5\textwidth}
    890         \centering
    891         \scalebox{0.5}{\input{figures/pykeCFARepeat.pgf}}
    892         \subcaption{Intel \CFA Repeat Benchmark}\label{f:cfaRepeatIntel}
    893     \end{subfigure}
    894     \caption{The repeat benchmark comparing \CFA stealing heuristics (lower is better).}
     867        \centering
     868        \subfloat[AMD \CFA Repeat Benchmark]{
     869                \resizebox{0.5\textwidth}{!}{\input{figures/nasusCFARepeat.pgf}}
     870                \label{f:cfaRepeatAMD}
     871        }
     872        \subfloat[Intel \CFA Repeat Benchmark]{
     873                \resizebox{0.5\textwidth}{!}{\input{figures/pykeCFARepeat.pgf}}
     874                \label{f:cfaRepeatIntel}
     875        }
     876        \caption{The repeat benchmark comparing \CFA stealing heuristics (lower is better).}
    895877\end{figure}
    896878
     
    913895
    914896\begin{table}[t]
    915     \centering
    916     \setlength{\extrarowheight}{2pt}
    917     \setlength{\tabcolsep}{5pt}
    918    
    919     \caption{Executor Program Memory High Watermark}
    920     \label{t:ExecutorMemory}
    921     \begin{tabular}{*{5}{r|}r}
    922         & \multicolumn{1}{c|}{\CFA} & \multicolumn{1}{c|}{CAF} & \multicolumn{1}{c|}{Akka} & \multicolumn{1}{c|}{\uC} & \multicolumn{1}{c@{}}{ProtoActor} \\
    923         \hline                                                                                                                                     
    924         AMD             & \input{data/pykeExecutorMem} \\
    925         \hline                                                                                                                                     
    926         Intel   & \input{data/nasusExecutorMem}
    927     \end{tabular}
     897        \centering
     898        \setlength{\extrarowheight}{2pt}
     899        \setlength{\tabcolsep}{5pt}
     900       
     901        \caption{Executor Program Memory High Watermark}
     902        \label{t:ExecutorMemory}
     903        \begin{tabular}{*{5}{r|}r}
     904                & \multicolumn{1}{c|}{\CFA} & \multicolumn{1}{c|}{CAF} & \multicolumn{1}{c|}{Akka} & \multicolumn{1}{c|}{\uC} & \multicolumn{1}{c@{}}{ProtoActor} \\
     905                \hline                                                                                                                                                 
     906                AMD             & \input{data/pykeExecutorMem} \\
     907                \hline                                                                                                                                                 
     908                Intel   & \input{data/nasusExecutorMem}
     909        \end{tabular}
    928910\end{table}
    929911
     
    951933
    952934\begin{figure}
    953     \centering
    954     \begin{subfigure}{0.5\textwidth}
    955         \centering
    956         \scalebox{0.5}{\input{figures/nasusMatrix.pgf}}
    957         \subcaption{AMD Matrix Benchmark}\label{f:MatrixAMD}
    958     \end{subfigure}\hfill
    959     \begin{subfigure}{0.5\textwidth}
    960         \centering
    961         \scalebox{0.5}{\input{figures/pykeMatrix.pgf}}
    962         \subcaption{Intel Matrix Benchmark}\label{f:MatrixIntel}
    963     \end{subfigure}
    964     \caption{The matrix benchmark comparing actor systems (lower is better).}
    965 \end{figure}
    966 
    967 \begin{figure}
    968     \centering
    969     \begin{subfigure}{0.5\textwidth}
    970         \centering
    971         \scalebox{0.5}{\input{figures/nasusCFAMatrix.pgf}}
    972         \subcaption{AMD \CFA Matrix Benchmark}\label{f:cfaMatrixAMD}
    973     \end{subfigure}\hfill
    974     \begin{subfigure}{0.5\textwidth}
    975         \centering
    976         \scalebox{0.5}{\input{figures/pykeCFAMatrix.pgf}}
    977         \subcaption{Intel \CFA Matrix Benchmark}\label{f:cfaMatrixIntel}
    978     \end{subfigure}
    979     \caption{The matrix benchmark comparing \CFA stealing heuristics (lower is better).}
    980 \end{figure}
     935        \centering
     936        \subfloat[AMD Matrix Benchmark]{
     937                \resizebox{0.5\textwidth}{!}{\input{figures/nasusMatrix.pgf}}
     938                \label{f:MatrixAMD}
     939        }
     940        \subfloat[Intel Matrix Benchmark]{
     941                \resizebox{0.5\textwidth}{!}{\input{figures/pykeMatrix.pgf}}
     942                \label{f:MatrixIntel}
     943        }
     944        \caption{The matrix benchmark comparing actor systems (lower is better).}
     945\end{figure}
     946
     947\begin{figure}
     948        \centering
     949        \subfloat[AMD \CFA Matrix Benchmark]{
     950                \resizebox{0.5\textwidth}{!}{\input{figures/nasusCFAMatrix.pgf}}
     951                \label{f:cfaMatrixAMD}
     952        }
     953        \subfloat[Intel \CFA Matrix Benchmark]{
     954                \resizebox{0.5\textwidth}{!}{\input{figures/pykeCFAMatrix.pgf}}
     955                \label{f:cfaMatrixIntel}
     956        }
     957        \caption{The matrix benchmark comparing \CFA stealing heuristics (lower is better).}
     958\end{figure}
     959
     960% Local Variables: %
     961% tab-width: 4 %
     962% End: %
  • doc/theses/colby_parsons_MMAth/text/channels.tex

    r2b01f8e ra085470  
    55% ======================================================================
    66
    7 Channels were first introduced by Hoare in his paper Communicating Sequentual Processes~\cite{Hoare78}, where he proposes a concurrent language that communicates across processes using input/output channels to send data. 
    8 Channels are a concurrent language feature used to perform message passing concurrency, a model of concurrency where threads communicate by sending data as messages, and synchronizing via the message passing mechanism. 
    9 This is an alternative to shared memory concurrency, where threads can communicate directly by changing shared memory state. 
    10 Most modern concurrent programming languages do not subscribe to just one style of communication between threads, and provide features that support both. 
     7Channels were first introduced by Hoare in his paper Communicating Sequentual Processes~\cite{Hoare78}, where he proposes a concurrent language that communicates across processes using input/output channels to send data.
     8Channels are a concurrent language feature used to perform message passing concurrency, a model of concurrency where threads communicate by sending data as messages, and synchronizing via the message passing mechanism.
     9This is an alternative to shared memory concurrency, where threads can communicate directly by changing shared memory state.
     10Most modern concurrent programming languages do not subscribe to just one style of communication between threads, and provide features that support both.
    1111Channels as a programming language feature has been popularized in recent years due to the language Go, which encourages the use of channels as its fundamental concurrent feature.
    1212
    1313\section{Producer-Consumer Problem}
    14 Most channels in modern programming languages are built on top of a shared memory buffer. 
    15 While it is possible to create a channel that contains an unbounded buffer, most implementations opt to only support a fixed size channel, where the size is given at the time of channel creation. 
    16 This turns the implementation of a channel into the producer-consumer problem. 
    17 The producer-consumer problem, also known as the bounded buffer problem, was introduced by Dijkstra in his book Cooperating Sequential Processes\cite{Dijkstra65}. 
    18 In the problem threads interact with the buffer in two ways, either consuming values by removing them from the buffer, or producing values and inserting them in the buffer. 
    19 The buffer needs to be protected from concurrent access since each item in the buffer should only be produced and consumed once. 
     14Most channels in modern programming languages are built on top of a shared memory buffer.
     15While it is possible to create a channel that contains an unbounded buffer, most implementations opt to only support a fixed size channel, where the size is given at the time of channel creation.
     16This turns the implementation of a channel into the producer-consumer problem.
     17The producer-consumer problem, also known as the bounded buffer problem, was introduced by Dijkstra in his book Cooperating Sequential Processes\cite{Dijkstra65}.
     18In the problem threads interact with the buffer in two ways, either consuming values by removing them from the buffer, or producing values and inserting them in the buffer.
     19The buffer needs to be protected from concurrent access since each item in the buffer should only be produced and consumed once.
    2020Additionally, a consumer can only remove from a non-empty buffer and a producer can only insert into a non-full buffer.
    2121
    2222\section{First-Come First-Served}
    23 The channel implementations that will be discussed are \gls{fcfs}. 
    24 This term was defined by Lamport~\cite{Lamport74}. 
    25 \gls{fcfs} is defined in relation to a doorway~\cite[p.~330]{Lamport86II}, which is the point at which an ordering among threads can be established. 
    26 Given this doorway, a critical section is said to be \gls{fcfs}, if threads access the shared resource in the order they proceed through the doorway. 
    27 \gls{fcfs} is a fairness property which prevents unequal access to the shared resource and prevents starvation, however it can come at a cost. 
    28 Implementing an algorithm with \gls{fcfs} can lead to double blocking, where entering threads may need to block to allow other threads to proceed first, resulting in blocking both inside and outside the doorway. 
     23The channel implementations that will be discussed are \gls{fcfs}.
     24This term was defined by Lamport~\cite{Lamport74}.
     25\gls{fcfs} is defined in relation to a doorway~\cite[p.~330]{Lamport86II}, which is the point at which an ordering among threads can be established.
     26Given this doorway, a critical section is said to be \gls{fcfs}, if threads access the shared resource in the order they proceed through the doorway.
     27\gls{fcfs} is a fairness property which prevents unequal access to the shared resource and prevents starvation, however it can come at a cost.
     28Implementing an algorithm with \gls{fcfs} can lead to double blocking, where entering threads may need to block to allow other threads to proceed first, resulting in blocking both inside and outside the doorway.
    2929As such algorithms that are not \gls{fcfs} may be more performant but that performance comes with the downside of likely introducing starvation and unfairness.
    3030
    3131\section{Channel Implementation}
    32 The channel implementation in \CFA is a near carbon copy of the Go implementation. 
    33 Experimentation was conducted that varied the producer-consumer problem algorithm and lock type used inside the channel. 
    34 With the exception of non-\gls{fcfs} algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation. 
     32The channel implementation in \CFA is a near carbon copy of the Go implementation.
     33Experimentation was conducted that varied the producer-consumer problem algorithm and lock type used inside the channel.
     34With the exception of non-\gls{fcfs} algorithms, no algorithm or lock usage in the channel implementation was found to be consistently more performant that Go's choice of algorithm and lock implementation.
    3535As such the research contributions added by \CFA's channel implementation lie in the realm of safety and productivity features.
    3636
    3737\section{Safety and Productivity}
    38 Channels in \CFA come with safety and productivity features to aid users. 
     38Channels in \CFA come with safety and productivity features to aid users.
    3939The features include the following.
    4040
    4141\begin{itemize}
    42 \item Toggle-able statistic collection on channel behvaiour that counts channel operations, and the number of the operations that block. 
     42\item Toggle-able statistic collection on channel behvaiour that counts channel operations, and the number of the operations that block.
    4343Tracking blocking operations helps users tune their channel size or channel usage when the channel is used for buffering, where the aim is to have as few blocking operations as possible.
    44 \item Deadlock detection on deallocation of the channel. 
     44\item Deadlock detection on deallocation of the channel.
    4545If any threads are blocked inside the channel when it terminates it is detected and informs the user, as this would cause a deadlock.
    46 \item A \code{flush} routine that delivers copies of an element to all waiting consumers, flushing the buffer. 
    47 Programmers can use this to easily to broadcast data to multiple consumers. 
     46\item A \code{flush} routine that delivers copies of an element to all waiting consumers, flushing the buffer.
     47Programmers can use this to easily to broadcast data to multiple consumers.
    4848Additionally, the \code{flush} routine is more performant then looping around the \code{insert} operation since it can deliver the elements without having to reaquire mutual exclusion for each element sent.
    4949\end{itemize}
    5050
    51 The other safety and productivity feature of \CFA channels deals with concurrent termination. 
    52 Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed. 
    53 The difficulty of graceful termination often arises from the usage of synchronization primitives which need to be handled carefully during shutdown. 
    54 It is easy to deadlock during termination if threads are left behind on synchronization primitives. 
    55 Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state. 
    56 \gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it. 
    57 Channels are a particularly hard synchronization primitive to terminate since both sending and receiving off a channel can block. 
     51The other safety and productivity feature of \CFA channels deals with concurrent termination.
     52Terminating concurrent programs is often one of the most difficult parts of writing concurrent code, particularly if graceful termination is needed.
     53The difficulty of graceful termination often arises from the usage of synchronization primitives which need to be handled carefully during shutdown.
     54It is easy to deadlock during termination if threads are left behind on synchronization primitives.
     55Additionally, most synchronization primitives are prone to \gls{toctou} issues where there is race between one thread checking the state of a concurrent object and another thread changing the state.
     56\gls{toctou} issues with synchronization primitives often involve a race between one thread checking the primitive for blocked threads and another thread blocking on it.
     57Channels are a particularly hard synchronization primitive to terminate since both sending and receiving off a channel can block.
    5858Thus, improperly handled \gls{toctou} issues with channels often result in deadlocks as threads trying to perform the termination may end up unexpectedly blocking in their attempt to help other threads exit the system.
    5959
    6060% C_TODO: add reference to select chapter, add citation to go channels info
    61 Go channels provide a set of tools to help with concurrent shutdown. 
    62 Channels in Go have a \code{close} operation and a \code{select} statement that both can be used to help threads terminate. 
    63 The \code{select} statement will be discussed in \ref{}, where \CFA's \code{waituntil} statement will be compared with the Go \code{select} statement. 
    64 The \code{close} operation on a channel in Go changes the state of the channel. 
    65 When a channel is closed, sends to the channel will panic and additional calls to \code{close} will panic. 
    66 Receives are handled differently where receivers will never block on a closed channel and will continue to remove elements from the channel. 
    67 Once a channel is empty, receivers can continue to remove elements, but will receive the zero-value version of the element type. 
    68 To aid in avoiding unwanted zero-value elements, Go provides the ability to iterate over a closed channel to remove the remaining elements. 
    69 These design choices for Go channels enforce a specific interaction style with channels during termination, where careful thought is needed to ensure that additional \code{close} calls don't occur and that no sends occur after channels are closed. 
    70 These design choices fit Go's paradigm of error management, where users are expected to explicitly check for errors, rather than letting errors occur and catching them. 
    71 If errors need to occur in Go, return codes are used to pass error information where they are needed. 
     61Go channels provide a set of tools to help with concurrent shutdown.
     62Channels in Go have a \code{close} operation and a \code{select} statement that both can be used to help threads terminate.
     63The \code{select} statement will be discussed in \ref{}, where \CFA's \code{waituntil} statement will be compared with the Go \code{select} statement.
     64The \code{close} operation on a channel in Go changes the state of the channel.
     65When a channel is closed, sends to the channel will panic and additional calls to \code{close} will panic.
     66Receives are handled differently where receivers will never block on a closed channel and will continue to remove elements from the channel.
     67Once a channel is empty, receivers can continue to remove elements, but will receive the zero-value version of the element type.
     68To aid in avoiding unwanted zero-value elements, Go provides the ability to iterate over a closed channel to remove the remaining elements.
     69These design choices for Go channels enforce a specific interaction style with channels during termination, where careful thought is needed to ensure that additional \code{close} calls don't occur and that no sends occur after channels are closed.
     70These design choices fit Go's paradigm of error management, where users are expected to explicitly check for errors, rather than letting errors occur and catching them.
     71If errors need to occur in Go, return codes are used to pass error information where they are needed.
    7272Note that panics in Go can be caught, but it is not considered an idiomatic way to write Go programs.
    7373
    74 While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired. 
    75 Since both closing and sending panic, once a channel is closed, a user often has to synchronize the senders to a channel before the channel can be closed to avoid panics. 
    76 However, in doing so it renders the \code{close} operation nearly useless, as the only utilities it provides are the ability to ensure that receivers no longer block on the channel, and will receive zero-valued elements. 
    77 This can be useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is preferred, then \code{close} only provides its non-blocking feature. 
    78 To avoid \gls{toctou} issues during shutdown, a busy wait with a \code{select} statement is often used to add or remove elements from a channel. 
     74While Go's channel closing semantics are powerful enough to perform any concurrent termination needed by a program, their lack of ease of use leaves much to be desired.
     75Since both closing and sending panic, once a channel is closed, a user often has to synchronize the senders to a channel before the channel can be closed to avoid panics.
     76However, in doing so it renders the \code{close} operation nearly useless, as the only utilities it provides are the ability to ensure that receivers no longer block on the channel, and will receive zero-valued elements.
     77This can be useful if the zero-typed element is recognized as a sentinel value, but if another sentinel value is preferred, then \code{close} only provides its non-blocking feature.
     78To avoid \gls{toctou} issues during shutdown, a busy wait with a \code{select} statement is often used to add or remove elements from a channel.
    7979Due to Go's asymmetric approach to channel shutdown, separate synchronization between producers and consumers of a channel has to occur during shutdown.
    8080
     
    8282As such \CFA uses an exception based approach to channel shutdown that is symmetric for both producers and consumers, and supports graceful shutdown.Exceptions in \CFA support both termination and resumption.Termination exceptions operate in the same way as exceptions seen in many popular programming languages such as \CC, Python and Java.
    8383Resumption exceptions are a style of exception that when caught run the corresponding catch block in the same way that termination exceptions do.
    84 The difference between the exception handling mechanisms arises after the exception is handled. 
    85 In termination handling, the control flow continues into the code following the catch after the exception is handled. 
    86 In resumption handling, the control flow returns to the site of the \code{throw}, allowing the control to continue where it left off. 
    87 Note that in resumption, since control can return to the point of error propagation, the stack is not unwound during resumption propagation. 
    88 In \CFA if a resumption is not handled, it is reraised as a termination. 
     84The difference between the exception handling mechanisms arises after the exception is handled.
     85In termination handling, the control flow continues into the code following the catch after the exception is handled.
     86In resumption handling, the control flow returns to the site of the \code{throw}, allowing the control to continue where it left off.
     87Note that in resumption, since control can return to the point of error propagation, the stack is not unwound during resumption propagation.
     88In \CFA if a resumption is not handled, it is reraised as a termination.
    8989This mechanism can be used to create a flexible and robust termination system for channels.
    9090
    91 When a channel in \CFA is closed, all subsequent calls to the channel will throw a resumption exception at the caller. 
    92 If the resumption is handled, then the caller will proceed to attempt to complete their operation. 
    93 If the resumption is not handled it is then rethrown as a termination exception. 
    94 Or, if the resumption is handled, but the subsequent attempt at an operation would block, a termination exception is thrown. 
    95 These termination exceptions allow for non-local transfer that can be used to great effect to eagerly and gracefully shut down a thread. 
    96 When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them. 
    97 The resumption exception, \code{channel_closed}, has a couple fields to aid in handling the exception. 
    98 The exception contains a pointer to the channel it was thrown from, and a pointer to an element. 
    99 In exceptions thrown from remove the element pointer will be null. 
    100 In the case of insert the element pointer points to the element that the thread attempted to insert. 
    101 This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler. 
    102 Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed. 
    103 Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception. 
    104 It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go. 
     91When a channel in \CFA is closed, all subsequent calls to the channel will throw a resumption exception at the caller.
     92If the resumption is handled, then the caller will proceed to attempt to complete their operation.
     93If the resumption is not handled it is then rethrown as a termination exception.
     94Or, if the resumption is handled, but the subsequent attempt at an operation would block, a termination exception is thrown.
     95These termination exceptions allow for non-local transfer that can be used to great effect to eagerly and gracefully shut down a thread.
     96When a channel is closed, if there are any blocked producers or consumers inside the channel, they are woken up and also have a resumption thrown at them.
     97The resumption exception, \code{channel_closed}, has a couple fields to aid in handling the exception.
     98The exception contains a pointer to the channel it was thrown from, and a pointer to an element.
     99In exceptions thrown from remove the element pointer will be null.
     100In the case of insert the element pointer points to the element that the thread attempted to insert.
     101This element pointer allows the handler to know which operation failed and also allows the element to not be lost on a failed insert since it can be moved elsewhere in the handler.
     102Furthermore, due to \CFA's powerful exception system, this data can be used to choose handlers based which channel and operation failed.
     103Exception handlers in \CFA have an optional predicate after the exception type which can be used to optionally trigger or skip handlers based on the content of an exception.
     104It is worth mentioning that the approach of exceptions for termination may incur a larger performance cost during termination that the approach used in Go.
    105105This should not be an issue, since termination is rarely an fast-path of an application and ensuring that termination can be implemented correctly with ease is the aim of the exception approach.
    106106
    107 To highlight the differences between \CFA's and Go's close semantics, an example program is presented. 
    108 The program is a barrier implemented using two channels shown in Listings~\ref{l:cfa_chan_bar} and \ref{l:go_chan_bar}. 
    109 Both of these exaples are implmented using \CFA syntax so that they can be easily compared. 
    110 Listing~\ref{l:go_chan_bar} uses go-style channel close semantics and Listing~\ref{l:cfa_chan_bar} uses \CFA close semantics. 
    111 In this problem it is infeasible to use the Go \code{close} call since all tasks are both potentially producers and consumers, causing panics on close to be unavoidable. 
    112 As such in Listing~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of $-1$ has to be used to indicate to threads that they need to leave the barrier. 
    113 This sentinel value has to be checked at two points. 
    114 Furthermore, an additional flag \code{done} is needed to communicate to threads once they have left the barrier that they are done. 
    115 This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs. 
    116 In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate. 
    117 This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation. 
     107To highlight the differences between \CFA's and Go's close semantics, an example program is presented.
     108The program is a barrier implemented using two channels shown in Listings~\ref{l:cfa_chan_bar} and \ref{l:go_chan_bar}.
     109Both of these exaples are implmented using \CFA syntax so that they can be easily compared.
     110Listing~\ref{l:go_chan_bar} uses go-style channel close semantics and Listing~\ref{l:cfa_chan_bar} uses \CFA close semantics.
     111In this problem it is infeasible to use the Go \code{close} call since all tasks are both potentially producers and consumers, causing panics on close to be unavoidable.
     112As such in Listing~\ref{l:go_chan_bar} to implement a flush routine for the buffer, a sentinel value of $-1$ has to be used to indicate to threads that they need to leave the barrier.
     113This sentinel value has to be checked at two points.
     114Furthermore, an additional flag \code{done} is needed to communicate to threads once they have left the barrier that they are done.
     115This use of an additional flag or communication method is common in Go channel shutdown code, since to avoid panics on a channel, the shutdown of a channel often has to be communicated with threads before it occurs.
     116In the \CFA version~\ref{l:cfa_chan_bar}, the barrier shutdown results in an exception being thrown at threads operating on it, which informs the threads that they must terminate.
     117This avoids the need to use a separate communication method other than the barrier, and avoids extra conditional checks on the fast path of the barrier implementation.
    118118Also note that in the Go version~\ref{l:go_chan_bar}, the size of the barrier channels has to be larger than in the \CFA version to ensure that the main thread does not block when attempting to clear the barrier.
    119119
    120120\begin{cfa}[tabsize=3,caption={\CFA channel barrier termination},label={l:cfa_chan_bar}]
    121121struct barrier {
    122     channel( int ) barWait;
    123     channel( int ) entryWait;
    124     int size;
     122        channel( int ) barWait;
     123        channel( int ) entryWait;
     124        int size;
    125125}
    126126void ?{}(barrier & this, int size) with(this) {
    127     barWait{size};
    128     entryWait{size};
    129     this.size = size;
    130     for ( j; size )
    131         insert( *entryWait, j );
     127        barWait{size};
     128        entryWait{size};
     129        this.size = size;
     130        for ( j; size )
     131                insert( *entryWait, j );
    132132}
    133133
    134134void flush(barrier & this) with(this) {
    135     close(barWait);
    136     close(entryWait);
     135        close(barWait);
     136        close(entryWait);
    137137}
    138138void wait(barrier & this) with(this) {
    139     int ticket = remove( *entryWait );
    140     if ( ticket == size - 1 ) {
    141         for ( j; size - 1 )
    142             insert( *barWait, j );
    143         return;
    144     }
    145     ticket = remove( *barWait );
    146 
    147     // last one out
    148     if ( size == 1 || ticket == size - 2 ) {
    149         for ( j; size )
    150             insert( *entryWait, j );
    151     }
     139        int ticket = remove( *entryWait );
     140        if ( ticket == size - 1 ) {
     141                for ( j; size - 1 )
     142                        insert( *barWait, j );
     143                return;
     144        }
     145        ticket = remove( *barWait );
     146
     147        // last one out
     148        if ( size == 1 || ticket == size - 2 ) {
     149                for ( j; size )
     150                        insert( *entryWait, j );
     151        }
    152152}
    153153barrier b{Tasks};
     
    155155// thread main
    156156void main(Task & this) {
    157     try {
    158         for ( ;; ) {
    159             wait( b );
    160         }
    161     } catch ( channel_closed * e ) {}
     157        try {
     158                for ( ;; ) {
     159                        wait( b );
     160                }
     161        } catch ( channel_closed * e ) {}
    162162}
    163163
    164164int main() {
    165     {
    166         Task t[Tasks];
    167 
    168         sleep(10`s);
    169         flush( b );
    170     } // wait for tasks to terminate
    171     return 0;
     165        {
     166                Task t[Tasks];
     167
     168                sleep(10`s);
     169                flush( b );
     170        } // wait for tasks to terminate
     171        return 0;
    172172}
    173173\end{cfa}
     
    176176
    177177struct barrier {
    178     channel( int ) barWait;
    179     channel( int ) entryWait;
    180     int size;
     178        channel( int ) barWait;
     179        channel( int ) entryWait;
     180        int size;
    181181}
    182182void ?{}(barrier & this, int size) with(this) {
    183     barWait{size + 1};
    184     entryWait{size + 1};
    185     this.size = size;
    186     for ( j; size )
    187         insert( *entryWait, j );
     183        barWait{size + 1};
     184        entryWait{size + 1};
     185        this.size = size;
     186        for ( j; size )
     187                insert( *entryWait, j );
    188188}
    189189
    190190void flush(barrier & this) with(this) {
    191     insert( *entryWait, -1 );
    192     insert( *barWait, -1 );
     191        insert( *entryWait, -1 );
     192        insert( *barWait, -1 );
    193193}
    194194void wait(barrier & this) with(this) {
    195     int ticket = remove( *entryWait );
    196     if ( ticket == -1 ) {
    197         insert( *entryWait, -1 );
    198         return;
    199     }
    200     if ( ticket == size - 1 ) {
    201         for ( j; size - 1 )
    202             insert( *barWait, j );
    203         return;
    204     }
    205     ticket = remove( *barWait );
    206     if ( ticket == -1 ) {
    207         insert( *barWait, -1 );
    208         return;
    209     }
    210 
    211     // last one out
    212     if ( size == 1 || ticket == size - 2 ) {
    213         for ( j; size )
    214             insert( *entryWait, j );
    215     }
     195        int ticket = remove( *entryWait );
     196        if ( ticket == -1 ) {
     197                insert( *entryWait, -1 );
     198                return;
     199        }
     200        if ( ticket == size - 1 ) {
     201                for ( j; size - 1 )
     202                        insert( *barWait, j );
     203                return;
     204        }
     205        ticket = remove( *barWait );
     206        if ( ticket == -1 ) {
     207                insert( *barWait, -1 );
     208                return;
     209        }
     210
     211        // last one out
     212        if ( size == 1 || ticket == size - 2 ) {
     213                for ( j; size )
     214                        insert( *entryWait, j );
     215        }
    216216}
    217217barrier b;
     
    220220// thread main
    221221void main(Task & this) {
    222     for ( ;; ) {
    223         if ( done ) break;
    224         wait( b );
    225     }
     222        for ( ;; ) {
     223                if ( done ) break;
     224                wait( b );
     225        }
    226226}
    227227
    228228int main() {
    229     {
    230         Task t[Tasks];
    231 
    232         sleep(10`s);
    233         done = true;
    234 
    235         flush( b );
    236     } // wait for tasks to terminate
    237     return 0;
     229        {
     230                Task t[Tasks];
     231
     232                sleep(10`s);
     233                done = true;
     234
     235                flush( b );
     236        } // wait for tasks to terminate
     237        return 0;
    238238}
    239239\end{cfa}
    240240
    241 In Listing~\ref{l:cfa_resume} an example of channel closing with resumption is used. 
    242 This program uses resumption in the \code{Consumer} thread main to ensure that all elements in the channel are removed before the consumer thread terminates. 
    243 The producer only has a \code{catch} so the moment it receives an exception it terminates, whereas the consumer will continue to remove from the closed channel via handling resumptions until the buffer is empty, which then throws a termination exception. 
     241In Listing~\ref{l:cfa_resume} an example of channel closing with resumption is used.
     242This program uses resumption in the \code{Consumer} thread main to ensure that all elements in the channel are removed before the consumer thread terminates.
     243The producer only has a \code{catch} so the moment it receives an exception it terminates, whereas the consumer will continue to remove from the closed channel via handling resumptions until the buffer is empty, which then throws a termination exception.
    244244If the same program was implemented in Go it would require explicit synchronization with both producers and consumers by some mechanism outside the channel to ensure that all elements were removed before task termination.
    245245
     
    249249// Consumer thread main
    250250void main(Consumer & this) {
    251     size_t runs = 0;
    252     try {
    253         for ( ;; ) {
    254             remove( chan );
    255         }
    256     } catchResume ( channel_closed * e ) {}
    257     catch ( channel_closed * e ) {}
     251        size_t runs = 0;
     252        try {
     253                for ( ;; ) {
     254                        remove( chan );
     255                }
     256        } catchResume ( channel_closed * e ) {}
     257        catch ( channel_closed * e ) {}
    258258}
    259259
    260260// Producer thread main
    261261void main(Producer & this) {
    262     int j = 0;
    263     try {
    264         for ( ;;j++ ) {
    265             insert( chan, j );
    266         }
    267     } catch ( channel_closed * e ) {}
     262        int j = 0;
     263        try {
     264                for ( ;;j++ ) {
     265                        insert( chan, j );
     266                }
     267        } catch ( channel_closed * e ) {}
    268268}
    269269
    270270int main( int argc, char * argv[] ) {
    271     {
    272         Consumers c[4];
    273         Producer p[4];
    274 
    275         sleep(10`s);
    276 
    277         for ( i; Channels )
    278             close( channels[i] );
    279     }
    280     return 0;
     271        {
     272                Consumers c[4];
     273                Producer p[4];
     274
     275                sleep(10`s);
     276
     277                for ( i; Channels )
     278                        close( channels[i] );
     279        }
     280        return 0;
    281281}
    282282\end{cfa}
     
    284284\section{Performance}
    285285
    286 Given that the base implementation of the \CFA channels is very similar to the Go implementation, this section aims to show that the performance of the two implementations are comparable. 
    287 One microbenchmark is conducted to compare Go and \CFA. 
    288 The benchmark is a ten second experiment where producers and consumers operate on a channel in parallel and throughput is measured. 
    289 The number of cores is varied to measure how throughtput scales. 
    290 The cores are divided equally between producers and consumers, with one producer or consumer owning each core. 
    291 The results of the benchmark are shown in Figure~\ref{f:chanPerf}. 
    292 The performance of Go and \CFA channels on this microbenchmark is comparable. 
     286Given that the base implementation of the \CFA channels is very similar to the Go implementation, this section aims to show that the performance of the two implementations are comparable.
     287One microbenchmark is conducted to compare Go and \CFA.
     288The benchmark is a ten second experiment where producers and consumers operate on a channel in parallel and throughput is measured.
     289The number of cores is varied to measure how throughtput scales.
     290The cores are divided equally between producers and consumers, with one producer or consumer owning each core.
     291The results of the benchmark are shown in Figure~\ref{f:chanPerf}.
     292The performance of Go and \CFA channels on this microbenchmark is comparable.
    293293Note, it is expected for the performance to decline as the number of cores increases as the channel operations all occur in a critical section so an increase in cores results in higher contention with no increase in parallelism.
    294294
    295295
    296296\begin{figure}
    297     \centering
    298     \begin{subfigure}{0.5\textwidth}
    299         \centering
    300         \scalebox{0.5}{\input{figures/nasus_Channel_Contention.pgf}}
    301         \subcaption{AMD \CFA Channel Benchmark}\label{f:chanAMD}
    302     \end{subfigure}\hfill
    303     \begin{subfigure}{0.5\textwidth}
    304         \centering
    305         \scalebox{0.5}{\input{figures/pyke_Channel_Contention.pgf}}
    306         \subcaption{Intel \CFA Channel Benchmark}\label{f:chanIntel}
    307     \end{subfigure}
    308     \caption{The channel contention benchmark comparing \CFA and Go channel throughput (higher is better).}
    309     \label{f:chanPerf}
     297        \centering
     298        \subfloat[AMD \CFA Channel Benchmark]{
     299                \resizebox{0.5\textwidth}{!}{\input{figures/nasus_Channel_Contention.pgf}}
     300                \label{f:chanAMD}
     301        }
     302        \subfloat[Intel \CFA Channel Benchmark]{
     303                \resizebox{0.5\textwidth}{!}{\input{figures/pyke_Channel_Contention.pgf}}
     304                \label{f:chanIntel}
     305        }
     306        \caption{The channel contention benchmark comparing \CFA and Go channel throughput (higher is better).}
     307        \label{f:chanPerf}
    310308\end{figure}
     309
     310% Local Variables: %
     311% tab-width: 4 %
     312% End: %
  • doc/theses/colby_parsons_MMAth/text/mutex_stmt.tex

    r2b01f8e ra085470  
    55% ======================================================================
    66
    7 The mutex statement is a concurrent language feature that aims to support easy lock usage.
    8 The mutex statement is in the form of a clause and following statement, similar to a loop or conditional statement.
    9 In the clause the mutex statement accepts a number of lockable objects, and then locks them for the duration of the following statement.
    10 The locks are acquired in a deadlock free manner and released using \gls{raii}.
    11 The mutex statement provides an avenue for easy lock usage in the common case where locks are used to wrap a critical section.
    12 Additionally, it provides the safety guarantee of deadlock-freedom, both by acquiring the locks in a deadlock-free manner, and by ensuring that the locks release on error, or normal program execution via \gls{raii}.
    13 
    14 \begin{cfa}[tabsize=3,caption={\CFA mutex statement usage},label={l:cfa_mutex_ex}]
     7The mutual exclusion problem was introduced by Dijkstra in 1965~\cite{Dijkstra65,Dijkstra65a}.
     8There are several concurrent processes or threads that communicate by shared variables and from time to time need exclusive access to shared resources.
     9A shared resource and code manipulating it form a pairing called a \Newterm{critical section (CS)}, which is a many-to-one relationship;
     10\eg if multiple files are being written to by multiple threads, only the pairings of simultaneous writes to the same files are CSs.
     11Regions of code where the thread is not interested in the resource are combined into the \Newterm{non-critical section (NCS)}.
     12
     13Exclusive access to a resource is provided by \Newterm{mutual exclusion (MX)}.
     14MX is implemented by some form of \emph{lock}, where the CS is bracketed by lock procedures @acquire@ and @release@.
     15Threads execute a loop of the form:
     16\begin{cfa}
     17loop of $thread$ p:
     18        NCS;
     19        acquire( lock );  CS;  release( lock ); // protected critical section with MX
     20end loop.
     21\end{cfa}
     22MX guarantees there is never more than one thread in the CS.
     23MX must also guarantee eventual progress: when there are competing threads attempting access, eventually some competing thread succeeds, \ie acquires the CS, releases it, and returns to the NCS.
     24% Lamport \cite[p.~329]{Lam86mx} extends this requirement to the exit protocol.
     25A stronger constraint is that every thread that calls @acquire@ eventually succeeds after some reasonable bounded time.
     26
     27\section{Monitor}
     28\CFA provides a high-level locking object, called a \Newterm{monitor}, an elegant, efficient, high-level mechanisms for mutual exclusion and synchronization for shared-memory systems.
     29First proposed by Brinch Hansen~\cite{Hansen73} and later described and extended by C.A.R.~Hoare~\cite{Hoare74}, several concurrent programming languages provide monitors as an explicit language construct: \eg Concurrent Pascal~\cite{ConcurrentPascal}, Mesa~\cite{Mesa}, Turing~\cite{Turing:old}, Modula-3~\cite{Modula-3}, \uC~\cite{Buhr92a} and Java~\cite{Java}.
     30In addition, operating-system kernels and device drivers have a monitor-like structure, although they often use lower-level primitives such as mutex locks or semaphores to manually implement a monitor.
     31
     32Figure~\ref{f:AtomicCounter} shows a \CFA and Java monitor implementing an atomic counter.
     33A \Newterm{monitor} is a programming technique that implicitly binds mutual exclusion to static function scope by call and return.
     34Lock mutual exclusion, defined by acquire/release calls, is independent of lexical context (analogous to block versus heap storage allocation).
     35Restricting acquire and release points in a monitor eases programming, comprehension, and maintenance, at a slight cost in flexibility and efficiency.
     36Ultimately, a monitor is implemented using a combination of basic locks and atomic instructions.
     37
     38\begin{figure}
     39\centering
     40
     41\begin{lrbox}{\myboxA}
     42\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     43@monitor@ Aint {
     44        int cnt;
     45};
     46int ++?( Aint & @mutex@ m ) { return ++m.cnt; }
     47int ?=?( Aint & @mutex@ l, int r ) { l.cnt = r; }
     48int ?=?(int & l, Aint & r) { l = r.cnt; }
     49
     50int i = 0, j = 0;
     51Aint x = { 0 }, y = { 0 };      $\C[1.5in]{// no mutex}$
     52++x;  ++y;                      $\C{// mutex}$
     53x = 2;  y = i;          $\C{// mutex}$
     54i = x;  j = y;          $\C{// no mutex}\CRT$
     55\end{cfa}
     56\end{lrbox}
     57
     58\begin{lrbox}{\myboxB}
     59\begin{java}[aboveskip=0pt,belowskip=0pt]
     60class Aint {
     61        private int cnt;
     62        public Aint( int init ) { cnt = init; }
     63        @synchronized@ public int inc() { return ++cnt; }
     64        @synchronized@ public void set( int r ) {cnt = r;}
     65        public int get() { return cnt; }
     66}
     67int i = 0, j = 0;
     68Aint x = new Aint( 0 ), y = new Aint( 0 );
     69x.inc();  y.inc();
     70x.set( 2 );  y.set( i );
     71i = x.get();  j = y.get();
     72\end{java}
     73\end{lrbox}
     74
     75\subfloat[\CFA]{\label{f:AtomicCounterCFA}\usebox\myboxA}
     76\hspace*{3pt}
     77\vrule
     78\hspace*{3pt}
     79\subfloat[Java]{\label{f:AtomicCounterJava}\usebox\myboxB}
     80\caption{Atomic integer counter}
     81\label{f:AtomicCounter}
     82\end{figure}
     83
     84Like Java, \CFA monitors have \Newterm{multi-acquire} semantics so the thread in the monitor may acquire it multiple times without deadlock, allowing recursion and calling other MX functions.
     85For robustness, \CFA monitors ensure the monitor lock is released regardless of how an acquiring function ends, normal or exceptional, and returning a shared variable is safe via copying before the lock is released.
     86Monitor objects can be passed through multiple helper functions without acquiring mutual exclusion, until a designated function associated with the object is called.
     87\CFA functions are designated MX by one or more pointer/reference parameters having qualifier @mutex@.
     88Java members are designated MX with \lstinline[language=java]{synchronized}, which applies only to the implicit receiver parameter.
     89In the example, the increment and setter operations need mutual exclusion, while the read-only getter operation is not MX because reading an integer is atomic.
     90
     91As stated, the non-object-oriented nature of \CFA monitors allows a function to acquire multiple mutex objects.
     92For example, the bank-transfer problem requires locking two bank accounts to safely debit and credit money between accounts.
     93\begin{cfa}
     94monitor BankAccount {
     95        int balance;
     96};
     97void deposit( BankAccount & mutex b, int deposit ) with( b ) {
     98        balance += deposit;
     99}
     100void transfer( BankAccount & mutex my, BankAccount & mutex your, int me2you ) {
     101        deposit( my, -me2you );         $\C{// debit}$
     102        deposit( your, me2you );        $\C{// credit}$
     103}
     104\end{cfa}
     105The \CFA monitor implementation ensures multi-lock acquisition is done in a deadlock-free manner regardless of the number of MX parameters and monitor arguments.
     106
     107
     108\section{\lstinline{mutex} statement}
     109Restricting implicit lock acquisition to function entry and exit can be awkward for certain problems.
     110To increase locking flexibility, some languages introduce a mutex statement.
     111\VRef[Figure]{f:ReadersWriter} shows the outline of a reader/writer lock written as a \CFA monitor and mutex statements.
     112(The exact lock implement is irrelevant.)
     113The @read@ and @write@ functions are called with a reader/write lock and any arguments to perform reading or writing.
     114The @read@ function is not MX because multiple readers can read simultaneously.
     115MX is acquired within @read@ by calling the (nested) helper functions @StartRead@ and @EndRead@ or executing the mutex statements.
     116Between the calls or statements, reads can execute simultaneous within the body of @read@.
     117The @write@ function does not require refactoring because writing is a CS.
     118The mutex-statement version is better because it has fewer names, less argument/parameter passing, and can possibly hold MX for a shorter duration.
     119
     120\begin{figure}
     121\centering
     122
     123\begin{lrbox}{\myboxA}
     124\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     125monitor RWlock { ... };
     126void read( RWlock & rw, ... ) {
     127        void StartRead( RWlock & @mutex@ rw ) { ... }
     128        void EndRead( RWlock & @mutex@ rw ) { ... }
     129        StartRead( rw );
     130        ... // read without MX
     131        EndRead( rw );
     132}
     133void write( RWlock & @mutex@ rw, ... ) {
     134        ... // write with MX
     135}
     136\end{cfa}
     137\end{lrbox}
     138
     139\begin{lrbox}{\myboxB}
     140\begin{cfa}[aboveskip=0pt,belowskip=0pt]
     141
     142void read( RWlock & rw, ... ) {
     143
     144
     145        @mutex@( rw ) { ... }
     146        ... // read without MX
     147        @mutex@{ rw ) { ... }
     148}
     149void write( RWlock & @mutex@ rw, ... ) {
     150        ... // write with MX
     151}
     152\end{cfa}
     153\end{lrbox}
     154
     155\subfloat[monitor]{\label{f:RWmonitor}\usebox\myboxA}
     156\hspace*{3pt}
     157\vrule
     158\hspace*{3pt}
     159\subfloat[mutex statement]{\label{f:RWmutexstmt}\usebox\myboxB}
     160\caption{Readers writer problem}
     161\label{f:ReadersWriter}
     162\end{figure}
     163
     164This work adds a mutex statement to \CFA, but generalizes it beyond implicit monitor locks.
     165In detail, the mutex statement has a clause and statement block, similar to a conditional or loop statement.
     166The clause accepts any number of lockable objects (like a \CFA MX function prototype), and locks them for the duration of the statement.
     167The locks are acquired in a deadlock free manner and released regardless of how control-flow exits the statement.
     168The mutex statement provides easy lock usage in the common case of lexically wrapping a CS.
     169Examples of \CFA mutex statement are shown in \VRef[Listing]{l:cfa_mutex_ex}.
     170
     171\begin{cfa}[caption={\CFA mutex statement usage},label={l:cfa_mutex_ex}]
    15172owner_lock lock1, lock2, lock3;
    16 int count = 0;
    17 mutex( lock1, lock2, lock3 ) {
    18     // can use block statement
    19     // ...
    20 }
    21 mutex( lock2, lock3 ) count++; // or inline statement
     173@mutex@( lock2, lock3 ) ...;    $\C{// inline statement}$
     174@mutex@( lock1, lock2, lock3 ) { ... }  $\C{// statement block}$
     175void transfer( BankAccount & my, BankAccount & your, int me2you ) {
     176        ... // check values, no MX
     177        @mutex@( my, your ) { // MX is shorter duration that function body
     178                deposit( my, -me2you );  $\C{// debit}$
     179                deposit( your, me2you ); $\C{// credit}$
     180        }
     181}
    22182\end{cfa}
    23183
    24184\section{Other Languages}
    25 There are similar concepts to the mutex statement that exist in other languages.
    26 Java has a feature called a synchronized statement, which looks identical to \CFA's mutex statement, but it has some differences.
    27 The synchronized statement only accepts a single object in its clause.
    28 Any object can be passed to the synchronized statement in Java since all objects in Java are monitors, and the synchronized statement acquires that object's monitor.
    29 In \CC there is a feature in the standard library \code{<mutex>} header called scoped\_lock, which is also similar to the mutex statement.
    30 The scoped\_lock is a class that takes in any number of locks in its constructor, and acquires them in a deadlock-free manner.
    31 It then releases them when the scoped\_lock object is deallocated, thus using \gls{raii}.
    32 An example of \CC scoped\_lock usage is shown in Listing~\ref{l:cc_scoped_lock}.
    33 
    34 \begin{cfa}[tabsize=3,caption={\CC scoped\_lock usage},label={l:cc_scoped_lock}]
    35 std::mutex lock1, lock2, lock3;
    36 {
    37     scoped_lock s( lock1, lock2, lock3 )
    38     // locks are released via raii at end of scope
    39 }
     185There are similar constructs to the mutex statement in other programming languages.
     186Java has a feature called a synchronized statement, which looks like the \CFA's mutex statement, but only accepts a single object in the clause and only handles monitor locks.
     187The \CC standard library has a @scoped_lock@, which is also similar to the mutex statement.
     188The @scoped_lock@ takes any number of locks in its constructor, and acquires them in a deadlock-free manner.
     189It then releases them when the @scoped_lock@ object is deallocated using \gls{raii}.
     190An example of \CC @scoped_lock@ is shown in \VRef[Listing]{l:cc_scoped_lock}.
     191
     192\begin{cfa}[caption={\CC \lstinline{scoped_lock} usage},label={l:cc_scoped_lock}]
     193struct BankAccount {
     194        @recursive_mutex m;@            $\C{// must be recursive}$
     195        int balance = 0;
     196};
     197void deposit( BankAccount & b, int deposit ) {
     198        @scoped_lock lock( b.m );@      $\C{// RAII acquire}$
     199        b.balance += deposit;
     200}                                                               $\C{// RAII release}$
     201void transfer( BankAccount & my, BankAccount & your, int me2you ) {
     202        @scoped_lock lock( my.m, your.m );@     $\C{// RAII acquire}$
     203        deposit( my, -me2you );         $\C{// debit}$
     204        deposit( your, me2you );        $\C{// credit}$
     205}                                                               $\C{// RAII release}$
    40206\end{cfa}
    41207
    42208\section{\CFA implementation}
    43 The \CFA mutex statement takes some ideas from both the Java and \CC features.
    44 The mutex statement can acquire more that one lock in a deadlock-free manner, and releases them via \gls{raii} like \CC, however the syntax is identical to the Java synchronized statement.
    45 This syntactic choice was made so that the body of the mutex statement is its own scope.
    46 Compared to the scoped\_lock, which relies on its enclosing scope, the mutex statement's introduced scope can provide visual clarity as to what code is being protected by the mutex statement, and where the mutual exclusion ends.
    47 \CFA's mutex statement and \CC's scoped\_lock both use parametric polymorphism to allow user defined types to work with the feature.
    48 \CFA's implementation requires types to support the routines \code{lock()} and \code{unlock()}, whereas \CC requires those routines, plus \code{try_lock()}.
    49 The scoped\_lock requires an additional routine since it differs from the mutex statement in how it implements deadlock avoidance.
    50 
    51 The parametric polymorphism allows for locking to be defined for types that may want convenient mutual exclusion.
    52 An example of one such use case in \CFA is \code{sout}.
    53 The output stream in \CFA is called \code{sout}, and functions similarly to \CC's \code{cout}.
    54 \code{sout} has routines that satisfy the mutex statement trait, so the mutex statement can be used to lock the output stream while producing output.
    55 In this case, the mutex statement allows the programmer to acquire mutual exclusion over an object without having to know the internals of the object or what locks need to be acquired.
    56 The ability to do so provides both improves safety and programmer productivity since it abstracts away the concurrent details and provides an interface for optional thread-safety.
    57 This is a commonly used feature when producing output from a concurrent context, since producing output is not thread safe by default.
    58 This use case is shown in Listing~\ref{l:sout}.
    59 
    60 \begin{cfa}[tabsize=3,caption={\CFA sout with mutex statement},label={l:sout}]
    61 mutex( sout )
    62     sout | "This output is protected by mutual exclusion!";
    63 \end{cfa}
    64 
    65 \section{Deadlock Avoidance}
    66 The mutex statement uses the deadlock prevention technique of lock ordering, where the circular-wait condition of a deadlock cannot occur if all locks are acquired in the same order.
    67 The scoped\_lock uses a deadlock avoidance algorithm where all locks after the first are acquired using \code{try_lock} and if any of the attempts to lock fails, all locks so far are released.
    68 This repeats until all locks are acquired successfully.
    69 The deadlock avoidance algorithm used by scoped\_lock is shown in Listing~\ref{l:cc_deadlock_avoid}.
    70 The algorithm presented is taken directly from the source code of the \code{<mutex>} header, with some renaming and comments for clarity.
    71 
    72 \begin{cfa}[caption={\CC scoped\_lock deadlock avoidance algorithm},label={l:cc_deadlock_avoid}]
     209The \CFA mutex statement takes some ideas from both the Java and \CC features.
     210Like Java, \CFA introduces a new statement rather than building from existing language features.
     211(\CFA has sufficient language features to mimic \CC RAII locking.)
     212This syntactic choice makes MX explicit rather than implicit via object declarations.
     213Hence, it is easier for programmers and language tools to identify MX points in a program, \eg scan for all @mutex@ parameters and statements in a body of code.
     214Furthermore, concurrent safety is provided across an entire program for the complex operation of acquiring multiple locks in a deadlock-free manner.
     215Unlike Java, \CFA's mutex statement and \CC's @scoped_lock@ both use parametric polymorphism to allow user defined types to work with this feature.
     216In this case, the polymorphism allows a locking mechanism to acquire MX over an object without having to know the object internals or what kind of lock it is using.
     217\CFA's provides and uses this locking trait:
     218\begin{cfa}
     219forall( L & | sized(L) )
     220trait is_lock {
     221        void lock( L & );
     222        void unlock( L & );
     223};
     224\end{cfa}
     225\CC @scoped_lock@ has this trait implicitly based on functions accessed in a template.
     226@scoped_lock@ also requires @try_lock@ because of its technique for deadlock avoidance \see{\VRef{s:DeadlockAvoidance}}.
     227
     228The following shows how the @mutex@ statement is used with \CFA streams to eliminate unpredictable results when printing in a concurrent program.
     229For example, if two threads execute:
     230\begin{cfa}
     231thread$\(_1\)$ : sout | "abc" | "def";
     232thread$\(_2\)$ : sout | "uvw" | "xyz";
     233\end{cfa}
     234any of the outputs can appear, included a segment fault due to I/O buffer corruption:
     235\begin{cquote}
     236\small\tt
     237\begin{tabular}{@{}l|l|l|l|l@{}}
     238abc def & abc uvw xyz & uvw abc xyz def & abuvwc dexf &  uvw abc def \\
     239uvw xyz & def & & yz & xyz
     240\end{tabular}
     241\end{cquote}
     242The stream type for @sout@ is defined to satisfy the @is_lock@ trait, so the @mutex@ statement can be used to lock an output stream while producing output.
     243From the programmer's perspective, it is sufficient to know an object can be locked and then any necessary MX is easily available via the @mutex@ statement.
     244This ability improves safety and programmer productivity since it abstracts away the concurrent details.
     245Hence, a  programmer can easily protect cascaded I/O expressions:
     246\begin{cfa}
     247thread$\(_1\)$ : mutex( sout )  sout | "abc" | "def";
     248thread$\(_2\)$ : mutex( sout )  sout | "uvw" | "xyz";
     249\end{cfa}
     250constraining the output to two different lines in either order:
     251\begin{cquote}
     252\small\tt
     253\begin{tabular}{@{}l|l@{}}
     254abc def & uvw xyz \\
     255uvw xyz & abc def
     256\end{tabular}
     257\end{cquote}
     258where this level of safe nondeterministic output is acceptable.
     259Alternatively, multiple I/O statements can be protected using the mutex statement block:
     260\begin{cfa}
     261mutex( sout ) { // acquire stream lock for sout for block duration
     262        sout | "abc";
     263        mutex( sout ) sout | "uvw" | "xyz"; // OK because sout lock is recursive
     264        sout | "def";
     265} // implicitly release sout lock
     266\end{cfa}
     267The inner lock acquire is likely to occur through a function call that does a thread-safe print.
     268
     269\section{Deadlock Avoidance}\label{s:DeadlockAvoidance}
     270The mutex statement uses the deadlock avoidance technique of lock ordering, where the circular-wait condition of a deadlock cannot occur if all locks are acquired in the same order.
     271The @scoped_lock@ uses a deadlock avoidance algorithm where all locks after the first are acquired using @try_lock@ and if any of the lock attempts fail, all acquired locks are released.
     272This repeats after selecting a new starting point in a cyclic manner until all locks are acquired successfully.
     273This deadlock avoidance algorithm is shown in Listing~\ref{l:cc_deadlock_avoid}.
     274The algorithm is taken directly from the source code of the @<mutex>@ header, with some renaming and comments for clarity.
     275
     276\begin{cfa}[caption={\CC \lstinline{scoped_lock} deadlock avoidance algorithm},label={l:cc_deadlock_avoid}]
    73277int first = 0;  // first lock to attempt to lock
    74278do {
    75     // locks is the array of locks to acquire
    76     locks[first].lock();    // lock first lock
    77     for (int i = 1; i < Num_Locks; ++i) {   // iterate over rest of locks
    78         const int idx = (first + i) % Num_Locks;
    79         if (!locks[idx].try_lock()) {       // try lock each one
    80             for (int j = i; j != 0; --j)    // release all locks
    81                 locks[(first + j - 1) % Num_Locks].unlock();
    82             first = idx;    // rotate which lock to acquire first
    83             break;
    84         }
    85     }
     279        // locks is the array of locks to acquire
     280        locks[first].lock();                            $\C{// lock first lock}$
     281        for ( int i = 1; i < Num_Locks; i += 1 ) { $\C{// iterate over rest of locks}$
     282                const int idx = (first + i) % Num_Locks;
     283                if ( ! locks[idx].try_lock() ) {   $\C{// try lock each one}$
     284                        for ( int j = i; j != 0; j -= 1 )       $\C{// release all locks}$
     285                                locks[(first + j - 1) % Num_Locks].unlock();
     286                        first = idx;                            $\C{// rotate which lock to acquire first}$
     287                        break;
     288                }
     289        }
    86290// if first lock is still held then all have been acquired
    87 } while (!locks[first].owns_lock());  // is first lock held?
    88 \end{cfa}
    89 
    90 The algorithm in \ref{l:cc_deadlock_avoid} successfully avoids deadlock, however there is a potential livelock scenario.
    91 Given two threads $A$ and $B$, who create a scoped\_lock with two locks $L1$ and $L2$, a livelock can form as follows.
    92 Thread $A$ creates a scoped\_lock with $L1$, $L2$, and $B$ creates a scoped lock with the order $L2$, $L1$.
    93 Both threads acquire the first lock in their order and then fail the try\_lock since the other lock is held.
    94 They then reset their start lock to be their 2nd lock and try again.
    95 This time $A$ has order $L2$, $L1$, and $B$ has order $L1$, $L2$.
    96 This is identical to the starting setup, but with the ordering swapped among threads.
    97 As such, if they each acquire their first lock before the other acquires their second, they can livelock indefinitely.
    98 
    99 The lock ordering algorithm used in the mutex statement in \CFA is both deadlock and livelock free.
    100 It sorts the locks based on memory address and then acquires them.
    101 For locks fewer than 7, it sorts using hard coded sorting methods that perform the minimum number of swaps for a given number of locks.
    102 For 7 or more locks insertion sort is used.
    103 These sorting algorithms were chosen since it is rare to have to hold more than  a handful of locks at a time.
    104 It is worth mentioning that the downside to the sorting approach is that it is not fully compatible with usages of the same locks outside the mutex statement.
    105 If more than one lock is held by a mutex statement, if more than one lock is to be held elsewhere, it must be acquired via the mutex statement, or else the required ordering will not occur.
    106 Comparitively, if the scoped\_lock is used and the same locks are acquired elsewhere, there is no concern of the scoped\_lock deadlocking, due to its avoidance scheme, but it may livelock.
     291} while ( ! locks[first].owns_lock() );  $\C{// is first lock held?}$
     292\end{cfa}
     293
     294While the algorithm in \ref{l:cc_deadlock_avoid} successfully avoids deadlock, there is a livelock scenario.
     295Assume two threads, $A$ and $B$, create a @scoped_lock@ accessing two locks, $L1$ and $L2$.
     296A livelock can form as follows.
     297Thread $A$ creates a @scoped_lock@ with arguments $L1$, $L2$, and $B$ creates a scoped lock with the lock arguments in the opposite order $L2$, $L1$.
     298Both threads acquire the first lock in their order and then fail the @try_lock@ since the other lock is held.
     299Both threads then reset their starting lock to be their second lock and try again.
     300This time $A$ has order $L2$, $L1$, and $B$ has order $L1$, $L2$, which is identical to the starting setup but with the ordering swapped between threads.
     301If the threads perform this action in lock-step, they cycle indefinitely without entering the CS, \ie livelock.
     302Hence, to use @scoped_lock@ safely, a programmer must manually construct and maintain a global ordering of lock arguments passed to @scoped_lock@.
     303
     304The lock ordering algorithm used in \CFA mutex functions and statements is deadlock and livelock free.
     305The algorithm uses the lock memory addresses as keys, sorts the keys, and then acquires the locks in sorted order.
     306For fewer than 7 locks ($2^3-1$), the sort is unrolled performing the minimum number of compare and swaps for the given number of locks;
     307for 7 or more locks, insertion sort is used.
     308Since it is extremely rare to hold more than 6 locks at a time, the algorithm is fast and executes in $O(1)$ time.
     309Furthermore, lock addresses are unique across program execution, even for dynamically allocated locks, so the algorithm is safe across the entire program execution.
     310
     311The downside to the sorting approach is that it is not fully compatible with manual usages of the same locks outside the @mutex@ statement, \ie the lock are acquired without using the @mutex@ statement.
     312The following scenario is a classic deadlock.
     313\begin{cquote}
     314\begin{tabular}{@{}l@{\hspace{30pt}}l@{}}
     315\begin{cfa}
     316lock L1, L2; // assume &L1 < &L2
     317        $\textbf{thread\(_1\)}$
     318acquire( L2 );
     319        acquire( L1 );
     320                CS
     321        release( L1 );
     322release( L2 );
     323\end{cfa}
     324&
     325\begin{cfa}
     326
     327        $\textbf{thread\(_2\)}$
     328mutex( L1, L2 ) {
     329
     330        CS
     331
     332}
     333\end{cfa}
     334\end{tabular}
     335\end{cquote}
     336Comparatively, if the @scoped_lock@ is used and the same locks are acquired elsewhere, there is no concern of the @scoped_lock@ deadlocking, due to its avoidance scheme, but it may livelock.
     337The convenience and safety of the @mutex@ statement, \eg guaranteed lock release with exceptions, should encourage programmers to always use it for locking, mitigating any deadlock scenario.
     338
     339\section{Performance}
     340Given the two multi-acquisition algorithms in \CC and \CFA, each with differing advantages and disadvantages, it interesting to compare their performance.
     341Comparison with Java is not possible, since it only takes a single lock.
     342
     343The comparison starts with a baseline that acquires the locks directly without a mutex statement or @scoped_lock@ in a fixed ordering and then releases them.
     344The baseline helps highlight the cost of the deadlock avoidance/prevention algorithms for each implementation.
     345
     346The benchmark used to evaluate the avoidance algorithms repeatedly acquires a fixed number of locks in a random order and then releases them.
     347The pseudo code for the deadlock avoidance benchmark is shown in \VRef[Listing]{l:deadlock_avoid_pseudo}.
     348To ensure the comparison exercises the implementation of each lock avoidance algorithm, an identical spinlock is implemented in each language using a set of builtin atomics available in both \CC and \CFA.
     349The benchmarks are run for a fixed duration of 10 seconds and then terminate.
     350The total number of times the group of locks is acquired is returned for each thread.
     351Each variation is run 11 times on 2, 4, 8, 16, 24, 32 cores and with 2, 4, and 8 locks being acquired.
     352The median is calculated and is plotted alongside the 95\% confidence intervals for each point.
     353
     354\begin{cfa}[caption={Deadlock avoidance bendchmark pseudo code},label={l:deadlock_avoid_pseudo}]
     355
     356
     357
     358$\PAB{// add pseudo code}$
     359
     360
     361
     362\end{cfa}
     363
     364The performance experiments were run on the following multi-core hardware systems to determine differences across platforms:
     365\begin{list}{\arabic{enumi}.}{\usecounter{enumi}\topsep=5pt\parsep=5pt\itemsep=0pt}
     366% sudo dmidecode -t system
     367\item
     368Supermicro AS--1123US--TR4 AMD EPYC 7662 64--core socket, hyper-threading $\times$ 2 sockets (256 processing units) 2.0 GHz, TSO memory model, running Linux v5.8.0--55--generic, gcc--10 compiler
     369\item
     370Supermicro SYS--6029U--TR4 Intel Xeon Gold 5220R 24--core socket, hyper-threading $\times$ 2 sockets (48 processing units) 2.2GHz, TSO memory model, running Linux v5.8.0--59--generic, gcc--10 compiler
     371\end{list}
     372%The hardware architectures are different in threading (multithreading vs hyper), cache structure (MESI or MESIF), NUMA layout (QPI vs HyperTransport), memory model (TSO vs WO), and energy/thermal mechanisms (turbo-boost).
     373%Software that runs well on one architecture may run poorly or not at all on another.
     374
     375Figure~\ref{f:mutex_bench} shows the results of the benchmark experiments.
     376\PAB{Make the points in the graphs for each line different.
     377Also, make the text in the graphs larger.}
     378The baseline results for both languages are mostly comparable, except for the 8 locks results in \ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel}, where the \CFA baseline is slightly slower.
     379The avoidance result for both languages is significantly different, where \CFA's mutex statement achieves throughput that is magnitudes higher than \CC's @scoped_lock@.
     380The slowdown for @scoped_lock@ is likely due to its deadlock-avoidance implementation.
     381Since it uses a retry based mechanism, it can take a long time for threads to progress.
     382Additionally the potential for livelock in the algorithm can result in very little throughput under high contention.
     383For example, on the AMD machine with 32 threads and 8 locks, the benchmarks would occasionally livelock indefinitely, with no threads making any progress for 3 hours before the experiment was terminated manually.
     384It is likely that shorter bouts of livelock occurred in many of the experiments, which would explain large confidence intervals for some of the data points in the \CC data.
     385In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} the mutex statement performs better than the baseline.
     386At 7 locks and above the mutex statement switches from a hard coded sort to insertion sort.
     387It is likely that the improvement in throughput compared to baseline is due to the time spent in the insertion sort, which decreases contention on the locks.
    107388
    108389\begin{figure}
    109     \centering
    110     \begin{subfigure}{0.5\textwidth}
    111         \centering
    112         \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_2.pgf}}
    113         \subcaption{AMD}
    114     \end{subfigure}\hfill
    115     \begin{subfigure}{0.5\textwidth}
    116         \centering
    117         \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_2.pgf}}
    118         \subcaption{Intel}
    119     \end{subfigure}
    120 
    121     \begin{subfigure}{0.5\textwidth}
    122         \centering
    123         \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_4.pgf}}
    124         \subcaption{AMD}
    125     \end{subfigure}\hfill
    126     \begin{subfigure}{0.5\textwidth}
    127         \centering
    128         \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_4.pgf}}
    129         \subcaption{Intel}
    130     \end{subfigure}
    131 
    132     \begin{subfigure}{0.5\textwidth}
    133         \centering
    134         \scalebox{0.5}{\input{figures/nasus_Aggregate_Lock_8.pgf}}
    135         \subcaption{AMD}\label{f:mutex_bench8_AMD}
    136     \end{subfigure}\hfill
    137     \begin{subfigure}{0.5\textwidth}
    138         \centering
    139         \scalebox{0.5}{\input{figures/pyke_Aggregate_Lock_8.pgf}}
    140         \subcaption{Intel}\label{f:mutex_bench8_Intel}
    141     \end{subfigure}
    142     \caption{The aggregate lock benchmark comparing \CC scoped\_lock and \CFA mutex statement throughput (higher is better).}
    143     \label{f:mutex_bench}
     390        \centering
     391        \subfloat[AMD]{
     392                \resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_2.pgf}}
     393        }
     394        \subfloat[Intel]{
     395                \resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_2.pgf}}
     396        }
     397
     398        \subfloat[AMD]{
     399                \resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_4.pgf}}
     400        }
     401        \subfloat[Intel]{
     402                \resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_4.pgf}}
     403        }
     404
     405        \subfloat[AMD]{
     406                \resizebox{0.5\textwidth}{!}{\input{figures/nasus_Aggregate_Lock_8.pgf}}
     407                \label{f:mutex_bench8_AMD}
     408        }
     409        \subfloat[Intel]{
     410                \resizebox{0.5\textwidth}{!}{\input{figures/pyke_Aggregate_Lock_8.pgf}}
     411                \label{f:mutex_bench8_Intel}
     412        }
     413        \caption{The aggregate lock benchmark comparing \CC \lstinline{scoped_lock} and \CFA mutex statement throughput (higher is better).}
     414        \label{f:mutex_bench}
    144415\end{figure}
    145416
    146 \section{Performance}
    147 Performance is compared between \CC's scoped\_lock and \CFA's mutex statement.
    148 Comparison with Java is omitted, since it only takes a single lock.
    149 To ensure that the comparison between \CC and \CFA exercises the implementation of each feature, an identical spinlock is implemented in each language using a set of builtin atomics available in both \CFA and \CC.
    150 Each feature is evaluated on a benchmark which acquires a fixed number of locks in a random order and then releases them.
    151 A baseline is included that acquires the locks directly without a mutex statement or scoped\_lock in a fixed ordering and then releases them.
    152 The baseline helps highlight the cost of the deadlock avoidance/prevention algorithms for each implementation.
    153 The benchmarks are run for a fixed duration of 10 seconds and then terminate and return the total number of times the group of locks were acquired.
    154 Each variation is run 11 times on a variety up to 32 cores and with 2, 4, and 8 locks being acquired.
    155 The median is calculated and is plotted alongside the 95\% confidence intervals for each point.
    156 
    157 Figure~\ref{f:mutex_bench} shows the results of the benchmark.
    158 The baseline runs for both languages are mostly comparable, except for the 8 locks results in \ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel}, where the \CFA baseline is slower.
    159 \CFA's mutex statement achieves throughput that is magnitudes higher than \CC's scoped\_lock.
    160 This is likely due to the scoped\_lock deadlock avoidance implementation.
    161 Since it uses a retry based mechanism, it can take a long time for threads to progress.
    162 Additionally the potential for livelock in the algorithm can result in very little throughput under high contention.
    163 It was observed on the AMD machine that with 32 threads and 8 locks the benchmarks would occasionally livelock indefinitely, with no threads making any progress for 3 hours before the experiment was terminated manually.
    164 It is likely that shorter bouts of livelock occured in many of the experiments, which would explain large confidence intervals for some of the data points in the \CC data.
    165 In Figures~\ref{f:mutex_bench8_AMD} and \ref{f:mutex_bench8_Intel} the mutex statement performs better than the baseline.
    166 At 7 locks and above the mutex statement switches from a hard coded sort to insertion sort.
    167 It is likely that the improvement in throughput compared to baseline is due to the time spent in the insertion sort, which decreases contention on the locks.
     417% Local Variables: %
     418% tab-width: 4 %
     419% End: %
  • doc/theses/colby_parsons_MMAth/thesis.tex

    r2b01f8e ra085470  
    8484\usepackage{tikz} % for diagrams and figures
    8585\def\checkmark{\tikz\fill[scale=0.4](0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle;}
    86 \usepackage{subcaption}
    8786\usepackage{fullpage,times,comment}
    8887\usepackage{textcomp}
    8988\usepackage{graphicx}
    9089\usepackage{tabularx}
     90\usepackage[labelformat=simple,aboveskip=0pt,farskip=0pt,font=normalsize]{subfig}
     91\renewcommand\thesubfigure{(\alph{subfigure})}
    9192\input{style}
    9293
Note: See TracChangeset for help on using the changeset viewer.