Changeset 2f19e03


Ignore:
Timestamp:
Jun 15, 2021, 12:28:48 PM (3 years ago)
Author:
Andrew Beach <ajbeach@…>
Branches:
ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
b51e389c
Parents:
4aba055 (diff), 4f1b8f3f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into andrew-mmath, collecting updates.

Files:
7 added
20 edited
1 moved

Legend:

Unmodified
Added
Removed
  • Jenkins/Distribute

    r4aba055 r2f19e03  
    22
    33import groovy.transform.Field
    4 
    5 // For skipping stages
    6 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
    74
    85//===========================================================================================================
     
    107//===========================================================================================================
    118
    12 node('master') {
    13         // Globals
    14         BuildDir  = pwd tmp: true
    15         SrcDir    = pwd tmp: false
    16         Settings  = null
    17         Version   = ''
     9// Globals
     10BuildDir  = null
     11SrcDir    = null
     12Settings  = null
     13Version   = ''
    1814
    19         // Local variables
    20         def err = null
    21         def log_needed = false
     15// Local variables
     16def err = null
     17def log_needed = false
    2218
    23         currentBuild.result = "SUCCESS"
     19currentBuild.result = "SUCCESS"
     20
     21final commit, build
     22node {
    2423
    2524        //Wrap build to add timestamp to command line
    2625        wrap([$class: 'TimestamperBuildWrapper']) {
     26                (commit, build) = prepare_build()
     27        }
     28}
    2729
    28                 final commit, build
    29                 (commit, build) = prepare_build()
    30 
    31                 node('x64') {
    32                         BuildDir  = pwd tmp: true
    33                         SrcDir    = pwd tmp: false
    34 
    35                         Tools.Clean()
    36 
    37                         Tools.Checkout( commit )
    38 
    39                         Version = GetVersion( build )
    40 
    41                         Configure()
    42 
    43                         Package()
    44 
    45                         Test()
    46 
    47                         Archive()
    48                 }
    49 
    50                 // Update the build directories when exiting the node
     30node('x64') {
     31        //Wrap build to add timestamp to command line
     32        wrap([$class: 'TimestamperBuildWrapper']) {
    5133                BuildDir  = pwd tmp: true
    5234                SrcDir    = pwd tmp: false
     35
     36                Tools.Clean()
     37
     38                Tools.Checkout( commit )
     39
     40                Version = GetVersion( build )
     41
     42                Configure()
     43
     44                Package()
     45
     46                Test()
     47
     48                Archive()
    5349        }
    54 
    5550}
    5651
  • Jenkins/FullBuild

    r4aba055 r2f19e03  
    55//===========================================================================================================
    66
    7 node ('master') {
     7node {
    88        def err = null
    99
     
    106106
    107107        if(result.result != 'SUCCESS') {
    108                 sh("wget -q -O - http://localhost:8084/jenkins/job/Cforall/job/master/${result.number}/consoleText")
     108                sh("wget -q -O - https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/${result.number}/consoleText")
    109109                error(result.result)
    110110        }
     
    144144//Email notification on a full build failure
    145145def promote_email(boolean success) {
    146         echo('notifying users')
     146        node {
     147                echo('notifying users')
    147148
    148         def result = success ? "PROMOTE - SUCCESS" : "PROMOTE - FAILURE"
     149                def result = success ? "PROMOTE - SUCCESS" : "PROMOTE - FAILURE"
    149150
    150         //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line
    151         //Configurations for email format
    152         def email_subject = "[cforall git][${result}]"
    153         def email_body = """<p>This is an automated email from the Jenkins build machine. It was
    154 generated following the result of the C\u2200 nightly build.</p>
     151                //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line
     152                //Configurations for email format
     153                def email_subject = "[cforall git][${result}]"
     154                def email_body = """<p>This is an automated email from the Jenkins build machine. It was
     155        generated following the result of the C\u2200 nightly build.</p>
    155156
    156 <p>Check console output at ${env.BUILD_URL} to view the results.</p>
     157        <p>Check console output at ${env.BUILD_URL} to view the results.</p>
    157158
    158 <p>- Status --------------------------------------------------------------</p>
     159        <p>- Status --------------------------------------------------------------</p>
    159160
    160 <p>${result}</p>
     161        <p>${result}</p>
    161162
    162 <p>- Performance ---------------------------------------------------------</p>
     163        <p>- Performance ---------------------------------------------------------</p>
    163164
    164 <img src="https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/plot/Compilation/getPlot?index=0" >
    165 <img src="https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/plot/Compilation/getPlot?index=1" >
     165        <img src="https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/plot/Compilation/getPlot?index=0" >
     166        <img src="https://cforall.uwaterloo.ca/jenkins/job/Cforall/job/master/plot/Compilation/getPlot?index=1" >
    166167
    167 <p>- Logs ----------------------------------------------------------------</p>
    168 """
     168        <p>- Logs ----------------------------------------------------------------</p>
     169        """
    169170
    170         def email_to = "cforall@lists.uwaterloo.ca"
     171                def email_to = "cforall@lists.uwaterloo.ca"
    171172
    172         //send email notification
    173         emailext body: email_body, subject: email_subject, to: email_to, attachLog: !success
     173                //send email notification
     174                emailext body: email_body, subject: email_subject, to: email_to, attachLog: !success
     175        }
    174176}
  • Jenkins/tools.groovy

    r4aba055 r2f19e03  
    6161}
    6262
    63 PrevGitOldRef = ''
    64 PrevGitNewRef = ''
    65 def GitLogMessage(String oldRef = '', String newRef = '') {
    66         if (!oldRef) { if(!PrevGitOldRef) { return "\nERROR retrieveing current git information!\n"  } else { oldRef = PrevGitOldRef } }
    67         if (!newRef) { if(!PrevGitNewRef) { return "\nERROR retrieveing previous git information!\n" } else { newRef = PrevGitNewRef } }
    68 
     63def ConstructGitLogMessage(String oldRef, String newRef) {
    6964        def revText = sh(returnStdout: true, script: "git rev-list ${oldRef}..${newRef}").trim()
    7065        def revList = SplitLines( revText )
     
    8782        gitDiff = gitDiff.replace('[m', '</span>')
    8883
    89         PrevGitOldRef = oldRef
    90         PrevGitNewRef = newRef
     84        return """
     85<p>- Changes -------------------------------------------------------------</p>
    9186
    92         return """
    9387<pre>
    9488The branch ${env.BRANCH_NAME} has been updated.
    9589${gitUpdate}
    9690</pre>
    97 
    98 <p>Check console output at ${env.BUILD_URL} to view the results.</p>
    99 
    100 <p>- Status --------------------------------------------------------------</p>
    101 
    102 <p>BUILD# ${env.BUILD_NUMBER} - ${currentBuild.result}</p>
    10391
    10492<p>- Log -----------------------------------------------------------------</p>
     
    116104}
    117105
     106EmailMessage = ''
     107def GitLogMessage(String oldRef = '', String newRef = '') {
     108        if(!EmailMessage) {
     109                if (!oldRef) { return "\nERROR retrieveing current git information!\n"  }
     110                if (!newRef) { return "\nERROR retrieveing previous git information!\n" }
     111
     112                echo "Constructing new git message"
     113
     114                EmailMessage = ConstructGitLogMessage(oldRef, newRef)
     115        }
     116        else {
     117                echo "Reusing previously constructed message"
     118        }
     119        return EmailMessage;
     120}
     121
    118122return this;
  • Jenkinsfile

    r4aba055 r2f19e03  
    77//===========================================================================================================
    88
    9 node('master') {
    10         // Globals
    11         BuildDir  = pwd tmp: true
    12         SrcDir    = pwd tmp: false
    13         Settings  = null
    14         Tools     = null
    15 
    16         // Local variables
    17         def err = null
    18         def log_needed = false
    19 
    20         currentBuild.result = "SUCCESS"
    21 
    22         try {
     9// Globals
     10BuildDir  = null
     11SrcDir    = null
     12Settings  = null
     13Tools     = null
     14
     15// Local variables
     16def err = null
     17def log_needed = false
     18
     19currentBuild.result = "SUCCESS"
     20
     21try {
     22        node {
    2323                //Wrap build to add timestamp to command line
    2424                wrap([$class: 'TimestamperBuildWrapper']) {
    25 
    2625                        Settings = prepare_build()
    27 
    28                         node(Settings.Architecture.node) {
    29                                 BuildDir  = pwd tmp: true
    30                                 SrcDir    = pwd tmp: false
    31                                 currentBuild.description = "${currentBuild.description} on ${env.NODE_NAME}"
    32 
    33                                 Tools.Clean()
    34 
    35                                 Tools.Checkout()
    36 
    37                                 build()
    38 
    39                                 test()
    40 
    41                                 benchmark()
    42 
    43                                 build_doc()
    44 
    45                                 publish()
    46                         }
    47 
    48                         // Update the build directories when exiting the node
     26                }
     27        }
     28
     29        node(Settings.Architecture.node) {
     30                //Wrap build to add timestamp to command line
     31                wrap([$class: 'TimestamperBuildWrapper']) {
    4932                        BuildDir  = pwd tmp: true
    5033                        SrcDir    = pwd tmp: false
    51                 }
    52         }
    53 
    54         //If an exception is caught we need to change the status and remember to
    55         //attach the build log to the email
    56         catch (Exception caughtError) {
    57                 // Store the result of the build log
    58                 currentBuild.result = "FAILURE"
    59 
    60                 // An error has occured, the build log is relevent
    61                 log_needed = true
    62 
    63                 // rethrow error later
    64                 err = caughtError
    65 
    66                 // print the error so it shows in the log
    67                 echo err.toString()
    68         }
    69 
    70         finally {
    71                 //Send email with final results if this is not a full build
    72                 email(log_needed)
    73 
    74                 echo 'Build Completed'
    75 
    76                 /* Must re-throw exception to propagate error */
    77                 if (err) {
    78                         throw err
    79                 }
     34                        currentBuild.description = "${currentBuild.description} on ${env.NODE_NAME}"
     35
     36                        Tools.Clean()
     37
     38                        Tools.Checkout()
     39
     40                        build()
     41
     42                        test()
     43
     44                        benchmark()
     45
     46                        build_doc()
     47
     48                        publish()
     49                }
     50        }
     51}
     52
     53//If an exception is caught we need to change the status and remember to
     54//attach the build log to the email
     55catch (Exception caughtError) {
     56        // Store the result of the build log
     57        currentBuild.result = "FAILURE"
     58
     59        // An error has occured, the build log is relevent
     60        log_needed = true
     61
     62        // rethrow error later
     63        err = caughtError
     64
     65        // print the error so it shows in the log
     66        echo err.toString()
     67}
     68
     69finally {
     70        //Send email with final results if this is not a full build
     71        email(log_needed)
     72
     73        echo 'Build Completed'
     74
     75        /* Must re-throw exception to propagate error */
     76        if (err) {
     77                throw err
    8078        }
    8179}
     
    228226//Standard build email notification
    229227def email(boolean log) {
    230         //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line
    231         //Configurations for email format
    232         echo 'Notifying users of result'
    233 
    234         def project_name = (env.JOB_NAME =~ /(.+)\/.+/)[0][1].toLowerCase()
    235         def email_subject = "[${project_name} git][BUILD# ${env.BUILD_NUMBER} - ${currentBuild.result}] - branch ${env.BRANCH_NAME}"
    236         def email_body = """<p>This is an automated email from the Jenkins build machine. It was
    237 generated because of a git hooks/post-receive script following
    238 a ref change which was pushed to the C\u2200 repository.</p>
    239 """ + Tools.GitLogMessage()
    240 
    241         def email_to = !Settings.IsSandbox ? "cforall@lists.uwaterloo.ca" : "tdelisle@uwaterloo.ca"
    242 
    243         if( Settings && !Settings.Silent ) {
    244                 //send email notification
    245                 emailext body: email_body, subject: email_subject, to: email_to, attachLog: log
    246         } else {
    247                 echo "Would send email to: ${email_to}"
    248                 echo "With title: ${email_subject}"
    249                 echo "Content: \n${email_body}"
     228        node {
     229                //Since tokenizer doesn't work, figure stuff out from the environnement variables and command line
     230                //Configurations for email format
     231                echo 'Notifying users of result'
     232
     233                def project_name = (env.JOB_NAME =~ /(.+)\/.+/)[0][1].toLowerCase()
     234                def email_subject = "[${project_name} git][BUILD# ${env.BUILD_NUMBER} - ${currentBuild.result}] - branch ${env.BRANCH_NAME}"
     235                def email_body = """<p>This is an automated email from the Jenkins build machine. It was
     236                        generated because of a git hooks/post-receive script following
     237                        a ref change which was pushed to the C\u2200 repository.</p>
     238
     239                        <p>- Status --------------------------------------------------------------</p>
     240
     241                        <p>BUILD# ${env.BUILD_NUMBER} - ${currentBuild.result}</p>
     242                        <p>Check console output at ${env.BUILD_URL} to view the results.</p>
     243                        """ + Tools.GitLogMessage()
     244
     245                def email_to = !Settings.IsSandbox ? "cforall@lists.uwaterloo.ca" : "tdelisle@uwaterloo.ca"
     246
     247                if( Settings && !Settings.Silent ) {
     248                        //send email notification
     249                        emailext body: email_body, subject: email_subject, to: email_to, attachLog: log
     250                } else {
     251                        echo "Would send email to: ${email_to}"
     252                        echo "With title: ${email_subject}"
     253                        echo "Content: \n${email_body}"
     254                }
    250255        }
    251256}
  • doc/theses/andrew_beach_MMath/future.tex

    r4aba055 r2f19e03  
    77that I had to workaround while building an exception handling system largely in
    88the \CFA language (some C components).  The following are a few of these
    9 issues, and once implemented/fixed, how this would affect the exception system.
     9issues, and once implemented/fixed, how they would affect the exception system.
    1010\begin{itemize}
    1111\item
     
    1313hand-crafted assembly statements. These sections must be ported by hand to
    1414support more hardware architectures, such as the ARM processor.
     15\PAB{I think this is a straw-man problem because the hand-coded assembler code
     16has to be generated somewhere, and that somewhere is hand-coded.}
    1517\item
    1618Due to a type-system problem, the catch clause cannot bind the exception to a
    1719reference instead of a pointer. Since \CFA has a very general reference
    1820capability, programmers will want to use it. Once fixed, this capability should
    19 result in little or no change in the exception system.
     21result in little or no change in the exception system but simplify usage.
    2022\item
    2123Termination handlers cannot use local control-flow transfers, \eg by @break@,
     
    2830There is no detection of colliding unwinds. It is possible for clean-up code
    2931run during an unwind to trigger another unwind that escapes the clean-up code
    30 itself; such as a termination exception caught further down the stack or a
    31 cancellation. There do exist ways to handle this but currently they are not
    32 even detected and the first unwind will simply be forgotten, often leaving
    33 it in a bad state.
     32itself, \eg, a termination exception caught further down the stack or a
     33cancellation. There do exist ways to handle this issue, but currently they are not
     34even detected and the first unwind is simply dropped, often leaving
     35it in a bad state. \Cpp terminates the program in this case, and Java picks the ...
    3436\item
    3537Also the exception system did not have a lot of time to be tried and tested.
     
    4143The virtual system should be completed. It was not supposed to be part of this
    4244project, but was thrust upon it to do exception inheritance; hence, only
    43 minimal work was done. A draft for a complete virtual system is available but
     45minimal work is done. A draft for a complete virtual system is available but
    4446it is not finalized.  A future \CFA project is to complete that work and then
    4547update the exception system that uses the current version.
     
    6769bad software engineering.
    6870
    69 Non-local/concurrent requires more coordination between the concurrency system
     71Non-local/concurrent raise requires more coordination between the concurrency system
    7072and the exception system. Many of the interesting design decisions centre
    71 around masking (controlling which exceptions may be thrown at a stack). It
     73around masking, \ie controlling which exceptions may be thrown at a stack. It
    7274would likely require more of the virtual system and would also effect how
    7375default handlers are set.
     
    8587
    8688\section{Checked Exceptions}
    87 Checked exceptions make exceptions part of a function's type by adding the
     89Checked exceptions make exceptions part of a function's type by adding an
    8890exception signature. An exception signature must declare all checked
    89 exceptions that could propogate from the function (either because they were
     91exceptions that could propagate from the function (either because they were
    9092raised inside the function or came from a sub-function). This improves safety
    9193by making sure every checked exception is either handled or consciously
    9294passed on.
    9395
    94 However checked exceptions were never seriously considered for this project
    95 for two reasons. The first is due to time constraints, even copying an
    96 existing checked exception system would be pushing the remaining time and
    97 trying to address the second problem would take even longer. The second
    98 problem is that checked exceptions have some real usability trade-offs in
     96However checked exceptions were never seriously considered for this project because
     97they have significant usability and reuse trade-offs in
    9998exchange for the increased safety.
    100 
    10199These trade-offs are most problematic when trying to pass exceptions through
    102100higher-order functions from the functions the user passed into the
    103101higher-order function. There are no well known solutions to this problem
    104 that were statifactory for \CFA (which carries some of C's flexability
    105 over safety design) so one would have to be researched and developed.
     102that were satisfactory for \CFA (which carries some of C's flexibility
     103over safety design) so additional research is needed.
    106104
    107 Follow-up work might add checked exceptions to \CFA, possibly using
    108 polymorphic exception signatures, a form of tunneling\cite{Zhang19} or
     105Follow-up work might find a compromise design for checked exceptions in \CFA, possibly using
     106polymorphic exception signatures, a form of tunneling\cite{Zhang19}, or
    109107checked and unchecked raises.
    110108
     
    150148For instance, resumption could be extended to cover this use by allowing local
    151149control flow out of it. This approach would require an unwind as part of the
    152 transition as there are stack frames that have to be removed.  This approach
    153 means there is no notify raise, but because \CFA does not have exception
    154 signatures, a termination can be thrown from within any resumption handler so
    155 there is already a way to do mimic this in existing \CFA.
     150transition as there are stack frames that have to be removed back to the resumption handler.  This approach
     151means no special statement is required in the handler to continue after it.
     152Currently, \CFA allows a termination exception to be thrown from within any resumption handler so
     153there is already a way to partially mimic signal exceptions.
    156154
    157155% Maybe talk about the escape; and escape CONTROL_STMT; statements or how
  • doc/theses/andrew_beach_MMath/implement.tex

    r4aba055 r2f19e03  
    22\label{c:implement}
    33
    4 The implementation work for this thesis covers two components: the virtual
     4The implementation work for this thesis covers the two components: virtual
    55system and exceptions. Each component is discussed in detail.
    66
     
    1717pointer to the virtual table, which is called the \emph{virtual-table pointer}.
    1818Internally, the field is called \snake{virtual_table}.
    19 The field is fixed after construction. It is always the first field in the
     19The field is fixed after construction and is the first field in the
    2020structure so that its location is always known.
    2121\todo{Talk about constructors for virtual types (after they are working).}
    2222
    23 This is what binds an instance of a virtual type to a virtual table. This
    24 pointer can be used as an identity check. It can also be used to access the
     23The virtual-table pointer is what binds an instance of a virtual type to its virtual table. This
     24pointer is used as an identity check, and to access the
    2525virtual table and the virtual members there.
    2626
    2727\subsection{Type Id}
    2828Every virtual type has a unique id.
    29 Type ids can be compared for equality (the types reperented are the same)
     29Type ids can be compared for equality (\ie the types represented are the same)
    3030or used to access the type's type information.
    3131The type information currently is only the parent's type id or, if the
    32 type has no parent, zero.
     32type has no parent, @0p@.
    3333
    3434The id's are implemented as pointers to the type's type information instance.
    35 Derefencing the pointer gets the type information.
    36 By going back-and-forth between the type id and
    37 the type info one can find every ancestor of a virtual type.
    38 It also pushes the issue of creating a unique value (for
     35Dereferencing the pointer gets the type information.
     36The ancestors of a virtual type are found by traversing the type id through
     37the type information.
     38An id also pushes the issue of creating a unique value (for
    3939the type id) to the problem of creating a unique instance (for type
    40 information) which the linker can solve.
     40information), which the linker can solve.
    4141
    4242Advanced linker support is required because there is no place that appears
    4343only once to attach the type information to. There should be one structure
    44 definition but it is included in multiple translation units. Each virtual
    45 table definition should be unique but there are an arbitrary number of thoses.
    46 So the special section prefix \texttt{.gnu.linkonce} is used.
    47 With a unique suffix (making the entire section name unique) the linker will
    48 remove multiple definition making sure only one version exists after linking.
     44definition but it is included in multiple translation units because of separate compilation. Each virtual
     45table definition should be unique but there are an arbitrary number of these,
     46so the special section prefix \texttt{.gnu.linkonce} is used.
     47With a generated unique suffix (making the entire section name unique) the linker
     48removes multiple definition ensuring only one version exists after linking.
    4949Then it is just a matter of making sure there is a unique name for each type.
    5050
    51 This is done in three phases.
     51These steps are done in three phases.
     52\begin{enumerate}
     53\item
    5254The first phase is to generate a new structure definition to store the type
    5355information. The layout is the same in each case, just the parent's type id,
     
    5557The structure's name is change, it is based off the virtual type's name, and
    5658the type of the parent's type id.
    57 If the virtual type is polymorphic then the type information structure is
     59If the virtual type is polymorphic, then the type information structure is
    5860polymorphic as well, with the same polymorphic arguments.
    59 
     61\item
    6062The second phase is to generate an instance of the type information with a
    6163almost unique name, generated by mangling the virtual type name.
    62 
     64\item
    6365The third phase is implicit with \CFA's overloading scheme. \CFA mangles
    6466names with type information so that all of the symbols exported to the linker
    65 are unique even if in \CFA code they are the same. Having two declarations
     67are unique even if in the \CFA code they are the same. Having two declarations
    6668with the same name and same type is forbidden because it is impossible for
    67 overload resolution to pick between them. This is why a unique type is
     69overload resolution to pick between them. This is the reason why a unique type is
    6870generated for each virtual type.
    6971Polymorphic information is included in this mangling so polymorphic
    70 types will have seperate instances for each set of polymorphic arguments.
    71 
     72types have separate instances for each set of polymorphic arguments.
     73\end{enumerate}
     74The following example shows the components for a generated virtual type.
    7275\begin{cfa}
    7376struct TYPE_ID_TYPE {
     
    8184\end{cfa}
    8285
    83 \subsubsection{cfa\_linkonce Attribute}
     86\subsubsection{\lstinline{cfa_linkonce} Attribute}
    8487Another feature added to \CFA is a new attribute: \texttt{cfa\_linkonce}.
    85 This attribute can be put on an object or function definition
    86 (any global declaration with a name and a type).
    87 This allows you to define that object or function multiple times.
    88 All definitions should have the link-once attribute on them and all should
     88This attribute is attached to an object or function definition
     89(any global declaration with a name and a type)
     90allowing it to be defined multiple times.
     91All matching definitions must have the link-once attribute on them and should
    8992be identical.
    90 
    91 The simplist way to use it is to put a definition in a header where the
    92 forward declaration would usually go.
    93 This is how it is used for type-id instances. There was is no unique location
    94 associated with a type except for the type definition which is in a header.
    95 This allows the unique type-id object to be generated there.
    96 
    97 Internally @cfa_linkonce@ removes all @section@ attributes
    98 from the declaration (as well as itself) and replaces them with
     93This attributed prototype is placed in a header file with other
     94forward declaration.
     95
     96This technique is used for type-id instances, as there is no unique location
     97associated with a type, except for the type definition in a header.
     98The result is the unique type-id object generated by the linker.
     99
     100Internally, @cfa_linkonce@ is replaced with
    99101@section(".gnu.linkonce.NAME")@ where \texttt{NAME} is replaced by the
    100102mangled name of the object.
     103Any other @section@ attributes are also removed from the declaration.
    101104The prefix \texttt{.gnu.linkonce} in section names is recognized by the
    102 linker. If two of these sections with the same name, including everything
    103 that comes after the special prefix, then only one will be used and the other
    104 will be discarded.
     105linker. If two of these sections appear with the same name, including everything
     106that comes after the special prefix, then only one is used and the other
     107discarded.
    105108
    106109\subsection{Virtual Table}
     
    112115below.
    113116
    114 The layout always comes in three parts.
     117Figure~\ref{f:VirtualTableLayout} shows the layout is in three parts.
     118\PAB{Number the parts in the figure.}
     119\begin{enumerate}
     120\item
    115121The first section is just the type id at the head of the table. It is always
    116 there to ensure that
     122there to ensure that \PAB{... missing text to end this sentence}
     123\item
    117124The second section are all the virtual members of the parent, in the same
    118125order as they appear in the parent's virtual table. Note that the type may
    119 change slightly as references to the ``this" will change. This is limited to
     126change slightly as references to the @this@ change. This structure is limited to
    120127inside pointers/references and via function pointers so that the size (and
    121128hence the offsets) are the same.
     129\item
    122130The third section is similar to the second except that it is the new virtual
    123131members introduced at this level in the hierarchy.
     132\end{enumerate}
    124133
    125134\begin{figure}
     
    133142prefix that has the same layout and types as its parent virtual table.
    134143This, combined with the fixed offset to the virtual table pointer, means that
    135 for any virtual type it doesn't matter if we have it or any of its
    136 descendants, it is still always safe to access the virtual table through
     144for any virtual type, it or any of its
     145descendants can be accessed through
    137146the virtual table pointer.
    138 From there it is safe to check the type id to identify the exact type of the
    139 underlying object, access any of the virtual members and pass the object to
     147From there, it is safe to check the type id to identify the exact type of the
     148underlying object, access any of the virtual members, and pass the object to
    140149any of the method-like virtual members.
    141150
    142 When a virtual table is declared the user decides where to declare it and its
     151When a virtual table is declared, the user decides where to declare it and its
    143152name. The initialization of the virtual table is entirely automatic based on
    144153the context of the declaration.
    145154
    146 The type id is always fixed, each virtual table type will always have one
     155The type id is always fixed with each virtual table type having
    147156exactly one possible type id.
    148 The virtual members are usually filled in by resolution. The best match for
    149 a given name and type at the declaration site is filled in.
     157The virtual members are usually filled in during type resolution. The best match for
     158a given name and type at the declaration site is used.
    150159There are two exceptions to that rule: the @size@ field is the type's size
    151 and is set to the result of a @sizeof@ expression, the @align@ field is the
    152 type's alignment and similarly uses an @alignof@ expression.
     160set using a @sizeof@ expression, and the @align@ field is the
     161type's alignment set using an @alignof@ expression.
    153162
    154163\subsubsection{Concurrency Integration}
    155164Coroutines and threads need instances of @CoroutineCancelled@ and
    156165@ThreadCancelled@ respectively to use all of their functionality. When a new
    157 data type is declared with @coroutine@ or @thread@ the forward declaration for
     166data type is declared with @coroutine@ or @thread@, a forward declaration for
    158167the instance is created as well. The definition of the virtual table is created
    159168at the definition of the main function.
     169
     170Figure~\ref{f:ConcurrencyTransformations} shows ...
     171\todo{Improve Concurrency Transformations figure.}
    160172
    161173\begin{figure}
     
    194206\label{f:ConcurrencyTransformations}
    195207\end{figure}
    196 \todo{Improve Concurrency Transformations figure.}
    197208
    198209\subsection{Virtual Cast}
     
    211222the cast target is passed in as @child@.
    212223
    213 For C generation both arguments and the result are wrapped with type casts.
    214 There is also an internal store inside the compiler to make sure that the
     224The generated C code wraps both arguments and the result with type casts.
     225There is also an internal check inside the compiler to make sure that the
    215226target type is a virtual type.
    216227% It also checks for conflicting definitions.
    217228
    218 The virtual cast either returns the original pointer as a new type or null.
    219 So the function just does the parent check and returns the approprate value.
     229The virtual cast either returns the original pointer as a new type or 0p.
     230So the function just does the parent check and returns the appropriate value.
    220231The parent check is a simple linear search of child's ancestors using the
    221232type information.
     
    229240% resumption doesn't as well.
    230241
    231 % Many modern languages work with an interal stack that function push and pop
     242% Many modern languages work with an internal stack that function push and pop
    232243% their local data to. Stack unwinding removes large sections of the stack,
    233244% often across functions.
     
    236247stack. On function entry and return, unwinding is handled directly by the
    237248call/return code embedded in the function.
    238 In many cases the position of the instruction pointer (relative to parameter
     249In many cases, the position of the instruction pointer (relative to parameter
    239250and local declarations) is enough to know the current size of the stack
    240251frame.
    241252
    242253Usually, the stack-frame size is known statically based on parameter and
    243 local variable declarations. Even with dynamic stack-size the information
    244 to determain how much of the stack has to be removed is still contained
     254local variable declarations. Even with dynamic stack-size, the information
     255to determine how much of the stack has to be removed is still contained
    245256within the function.
    246257Allocating/deallocating stack space is usually an $O(1)$ operation achieved by
    247258bumping the hardware stack-pointer up or down as needed.
    248 Constructing/destructing values on the stack takes longer put in terms of
    249 figuring out what needs to be done is of similar complexity.
     259In fact, constructing/destructing values within a stack frame is of similar complexity but often takes longer.
    250260
    251261Unwinding across multiple stack frames is more complex because that
    252262information is no longer contained within the current function.
    253 With seperate compilation a function has no way of knowing what its callers
    254 are so it can't know how large those frames are.
    255 Without altering the main code path it is also hard to pass that work off
     263With separate compilation a function has no way of knowing what its callers
     264so it can not know how large those frames are.
     265Without altering the main code path, it is also hard to pass that work off
    256266to the caller.
    257267
     
    261271reseting to a snap-shot of an arbitrary but existing function frame on the
    262272stack. It is up to the programmer to ensure the snap-shot is valid when it is
    263 reset and that all required clean-up from the unwound stacks is preformed.
    264 This approach is fragile and forces a work onto the surounding code.
    265 
    266 With respect to that work forced onto the surounding code,
     273reset and that all required clean-up from the unwound stacks is performed.
     274This approach is fragile and forces extra work in the surrounding code.
     275
     276With respect to the extra work in the surrounding code,
    267277many languages define clean-up actions that must be taken when certain
    268278sections of the stack are removed. Such as when the storage for a variable
    269 is removed from the stack or when a try statement with a finally clause is
     279is removed from the stack or when a @try@ statement with a finally clause is
    270280(conceptually) popped from the stack.
    271 None of these should be handled by the user, that would contradict the
    272 intention of these features, so they need to be handled automatically.
    273 
    274 To safely remove sections of the stack the language must be able to find and
     281None of these should be handled explicitly by the user --- that would contradict the
     282intention of these features --- so they need to be handled automatically.
     283
     284To safely remove sections of the stack, the language must be able to find and
    275285run these clean-up actions even when removing multiple functions unknown at
    276286the beginning of the unwinding.
     
    294304current stack frame, and what handlers should be checked. Theoretically, the
    295305LSDA can contain any information but conventionally it is a table with entries
    296 representing regions of the function and what has to be done there during
     306representing regions of a function and what has to be done there during
    297307unwinding. These regions are bracketed by instruction addresses. If the
    298308instruction pointer is within a region's start/end, then execution is currently
    299309executing in that region. Regions are used to mark out the scopes of objects
    300 with destructors and try blocks.
     310with destructors and @try@ blocks.
    301311
    302312% Libunwind actually does very little, it simply moves down the stack from
     
    314324int avar __attribute__(( cleanup(clean_up) ));
    315325\end{cfa}
    316 The attribue is used on a variable and specifies a function,
     326The attribute is used on a variable and specifies a function,
    317327in this case @clean_up@, run when the variable goes out of scope.
    318 This is enough to mimic destructors, but not try statements which can effect
     328This capability is enough to mimic destructors, but not @try@ statements which can effect
    319329the unwinding.
    320330
    321 To get full unwinding support all of this has to be done directly with
    322 assembly and assembler directives. Partiularly the cfi directives
    323 \snake{.cfi_lsda} and \snake{.cfi_personality}.
     331To get full unwinding support, all of these components must done directly with
     332assembly and assembler directives, particularly the cfi directives
     333\snake{.cfi_Leda} and \snake{.cfi_personality}.
    324334
    325335\subsection{Personality Functions}
     
    327337section covers some of the important parts of the interface.
    328338
    329 A personality function can preform different actions depending on how it is
     339A personality function can perform different actions depending on how it is
    330340called.
    331341\begin{lstlisting}
     
    364374
    365375The @exception_class@ argument is a copy of the
    366 \code{C}{exception}'s @exception_class@ field.
    367 This a number that identifies the exception handling mechanism that created
    368 the
    369 
    370 The \code{C}{exception} argument is a pointer to the user
     376\code{C}{exception}'s @exception_class@ field,
     377which is a number that identifies the exception handling mechanism that created
     378the \PAB{... missing text to end this sentence}
     379
     380The \code{C}{exception} argument is a pointer to a user
    371381provided storage object. It has two public fields: the @exception_class@,
    372382which is described above, and the @exception_cleanup@ function.
    373 The clean-up function is used by the EHM to clean-up the exception if it
     383The clean-up function is used by the EHM to clean-up the exception, if it
    374384should need to be freed at an unusual time, it takes an argument that says
    375385why it had to be cleaned up.
     
    382392messages for special cases (some of which should never be used by the
    383393personality function) and error codes. However, unless otherwise noted, the
    384 personality function should always return @_URC_CONTINUE_UNWIND@.
     394personality function always return @_URC_CONTINUE_UNWIND@.
    385395
    386396\subsection{Raise Exception}
    387 Raising an exception is the central function of libunwind and it performs a
     397Raising an exception is the central function of libunwind and it performs the
    388398two-staged unwinding.
    389399\begin{cfa}
     
    472482% catches. Talk about GCC nested functions.
    473483
    474 \CFA termination exceptions use libunwind heavily because they match \Cpp
     484\CFA termination exceptions use libunwind heavily because they match
    475485\Cpp exceptions closely. The main complication for \CFA is that the
    476486compiler generates C code, making it very difficult to generate the assembly to
    477 form the LSDA for try blocks or destructors.
     487form the LSDA for @try@ blocks or destructors.
    478488
    479489\subsection{Memory Management}
     
    485495
    486496\begin{figure}
     497\centering
    487498\input{exception-layout}
    488499\caption{Exception Layout}
     
    491502\todo*{Convert the exception layout to an actual diagram.}
    492503
    493 Exceptions are stored in variable-sized blocks (see \vref{f:ExceptionLayout}).
     504Exceptions are stored in variable-sized blocks (see Figure~\vref{f:ExceptionLayout}).
    494505The first component is a fixed-sized data structure that contains the
    495506information for libunwind and the exception system. The second component is an
     
    498509@_Unwind_Exception@ to the entire node.
    499510
    500 Multipe exceptions can exist at the same time because exceptions can be
     511Multiple exceptions can exist at the same time because exceptions can be
    501512raised inside handlers, destructors and finally blocks.
    502513Figure~\vref{f:MultipleExceptions} shows a program that has multiple
    503514exceptions active at one time.
    504515Each time an exception is thrown and caught the stack unwinds and the finally
    505 clause runs. This will throw another exception (until @num_exceptions@ gets
    506 high enough) which must be allocated. The previous exceptions may not be
     516clause runs. This handler throws another exception (until @num_exceptions@ gets
     517high enough), which must be allocated. The previous exceptions may not be
    507518freed because the handler/catch clause has not been run.
    508 So the EHM must keep them alive while it allocates exceptions for new throws.
     519Therefore, the EHM must keep all of these exceptions alive while it allocates exceptions for new throws.
    509520
    510521\begin{figure}
     
    559570\todo*{Work on multiple exceptions code sample.}
    560571
    561 All exceptions are stored in nodes which are then linked together in lists,
     572All exceptions are stored in nodes, which are then linked together in lists
    562573one list per stack, with the
    563574list head stored in the exception context. Within each linked list, the most
     
    566577exception is being handled. The exception at the head of the list is currently
    567578being handled, while other exceptions wait for the exceptions before them to be
    568 removed.
     579handled and removed.
    569580
    570581The virtual members in the exception's virtual table provide the size of the
     
    573584exception into managed memory. After the exception is handled, the free
    574585function is used to clean up the exception and then the entire node is
    575 passed to free so the memory can be given back to the heap.
     586passed to free so the memory is returned to the heap.
    576587
    577588\subsection{Try Statements and Catch Clauses}
    578 The try statement with termination handlers is complex because it must
    579 compensate for the lack of assembly-code generated from \CFA. Libunwind
     589The @try@ statement with termination handlers is complex because it must
     590compensate for the C code-generation versus assembly-code generation from \CFA. Libunwind
    580591requires an LSDA and personality function for control to unwind across a
    581592function. The LSDA in particular is hard to mimic in generated C code.
    582593
    583594The workaround is a function called @__cfaehm_try_terminate@ in the standard
    584 library. The contents of a try block and the termination handlers are converted
     595library. The contents of a @try@ block and the termination handlers are converted
    585596into functions. These are then passed to the try terminate function and it
    586597calls them.
    587598Because this function is known and fixed (and not an arbitrary function that
    588 happens to contain a try statement), the LSDA can be generated ahead
     599happens to contain a @try@ statement), the LSDA can be generated ahead
    589600of time.
    590601
     
    592603embedded assembly. This assembly code is handcrafted using C @asm@ statements
    593604and contains
    594 enough information for the single try statement the function repersents.
     605enough information for a single @try@ statement the function represents.
    595606
    596607The three functions passed to try terminate are:
    597608\begin{description}
    598 \item[try function:] This function is the try block, all the code inside the
    599 try block is placed inside the try function. It takes no parameters and has no
     609\item[try function:] This function is the @try@ block, where all the code inside the
     610@try@ block is wrapped inside the function. It takes no parameters and has no
    600611return value. This function is called during regular execution to run the try
    601612block.
     
    609620handler that matches the exception.
    610621
    611 \item[handler function:] This function handles the exception. It takes a
     622\item[handler function:] This function handles the exception, where the code inside
     623is constructed by stitching together the bodies of
     624each handler of a @try@ statement and dispatches to the selected handler.
     625It takes a
    612626pointer to the exception and the handler's id and returns nothing. It is called
    613 after the cleanup phase. It is constructed by stitching together the bodies of
    614 each handler and dispatches to the selected handler.
     627after the cleanup phase.
    615628\end{description}
    616629All three functions are created with GCC nested functions. GCC nested functions
    617 can be used to create closures, functions that can refer to the state of other
     630can be used to create closures, \ie functions that can refer to the state of other
    618631functions on the stack. This approach allows the functions to refer to all the
    619632variables in scope for the function containing the @try@ statement. These
     
    623636Using this pattern, \CFA implements destructors with the cleanup attribute.
    624637
     638Figure~\ref{f:TerminationTransformation} shows an example transformation for a \CFA @try@
     639statement with  @catch@ clauses into corresponding C functions. \PAB{Walk the reader through the example code.}
     640
    625641\begin{figure}
    626642\begin{cfa}
     
    633649}
    634650\end{cfa}
     651
     652\medskip
     653\hrule
     654\medskip
    635655
    636656\begin{cfa}
     
    683703% The stack-local data, the linked list of nodes.
    684704
    685 Resumption simpler to implement than termination
     705Resumption is simpler to implement than termination
    686706because there is no stack unwinding.
    687707Instead of storing the data in a special area using assembly,
    688708there is just a linked list of possible handlers for each stack,
    689 with each node on the list reperenting a try statement on the stack.
     709with each list node representing a @try@ statement on the stack.
    690710
    691711The head of the list is stored in the exception context.
    692 The nodes are stored in order, with the more recent try statements closer
     712The nodes are stored in order, with the more recent @try@ statements closer
    693713to the head of the list.
    694 Instead of traversing the stack resumption handling traverses the list.
    695 At each node the EHM checks to see if the try statement the node repersents
     714Instead of traversing the stack, resumption handling traverses the list.
     715At each node, the EHM checks to see if the @try@ statement it represents
    696716can handle the exception. If it can, then the exception is handled and
    697717the operation finishes, otherwise the search continues to the next node.
    698 If the search reaches the end of the list without finding a try statement
    699 that can handle the exception the default handler is executed and the
     718If the search reaches the end of the list without finding a @try@ statement
     719that can handle the exception, the default handler is executed and the
    700720operation finishes.
    701721
    702 In each node is a handler function which does most of the work there.
    703 The handler function is passed the raised the exception and returns true
    704 if the exception is handled and false if it cannot be handled here.
    705 
    706 For each @catchResume@ clause the handler function will:
    707 check to see if the raised exception is a descendant type of the declared
    708 exception type, if it is and there is a conditional expression then it will
    709 run the test, if both checks pass the handling code for the clause is run
    710 and the function returns true, otherwise it moves onto the next clause.
     722Each node has a handler function that does most of the work.
     723The handler function is passed the raised exception and returns true
     724if the exception is handled and false otherwise.
     725
     726For each @catchResume@ clause, the handler function:
     727\begin{itemize}
     728\item
     729checks to see if the raised exception is a descendant type of the declared
     730exception type,
     731\item
     732if it is and there is a conditional expression then it
     733runs the test,
     734\item
     735if both checks pass the handling code for the clause is run and the function returns true,
     736\item
     737otherwise it moves onto the next clause.
     738\end{itemize}
    711739If this is the last @catchResume@ clause then instead of moving onto
    712740the next clause the function returns false as no handler could be found.
     741
     742Figure~\ref{f:ResumptionTransformation} shows an example transformation for a \CFA @try@
     743statement with @catchResume@ clauses into corresponding C functions. \PAB{Walk the reader through the example code.}
    713744
    714745\begin{figure}
     
    753784
    754785% Recursive Resumption Stuff:
    755 Search skipping (see \vpageref{s:ResumptionMarking}), which ignores parts of
     786Figure~\ref{f:ResumptionMarking} shows the search skipping (see \vpageref{s:ResumptionMarking}), which ignores parts of
    756787the stack
    757788already examined, is accomplished by updating the front of the list as the
     
    759790is updated to the next node of the current node. After the search is complete,
    760791successful or not, the head of the list is reset.
    761 
    762792This mechanism means the current handler and every handler that has already
    763793been checked are not on the list while a handler is run. If a resumption is
    764 thrown during the handling of another resumption the active handlers and all
     794thrown during the handling of another resumption, the active handlers and all
    765795the other handler checked up to this point are not checked again.
    766 
    767 This structure also supports new handler added while the resumption is being
     796This structure also supports new handlers added while the resumption is being
    768797handled. These are added to the front of the list, pointing back along the
    769798stack -- the first one points over all the checked handlers -- and the ordering
    770799is maintained.
     800\PAB{Maybe number the figure and use the numbers in the description to help the reader follow.}
    771801
    772802\begin{figure}
     
    778808
    779809\label{p:zero-cost}
    780 Note, the resumption implementation has a cost for entering/exiting a @try@
     810Finally, the resumption implementation has a cost for entering/exiting a @try@
    781811statement with @catchResume@ clauses, whereas a @try@ statement with @catch@
    782812clauses has zero-cost entry/exit. While resumption does not need the stack
     
    798828around the context of the associated @try@ statement.
    799829
    800 The rest is handled by GCC. The try block and all handlers are inside this
     830The rest is handled by GCC. The @try@ block and all handlers are inside this
    801831block. At completion, control exits the block and the empty object is cleaned
    802832up, which runs the function that contains the finally code.
     
    812842coroutine or thread. Fortunately, the thread library stores the main thread
    813843pointer and the current thread pointer, and every thread stores a pointer to
    814 its main coroutine and the coroutine it is currently executing.
    815 \todo*{Consider adding a description of how threads are coroutines.}
    816 
    817 If a the current thread's main and current coroutines are the same then the
    818 current stack is a thread stack. Furthermore it is easy to compare the
    819 current thread to the main thread to see if they are the same. And if this
    820 is not a thread stack then it must be a coroutine stack.
     844its coroutine and the coroutine it is currently executing.
     845If the current thread's main and current coroutines are the same then the
     846current stack is a thread stack, otherwise it is a coroutine stack.
     847Note, the runtime considers a thread as a coroutine with an associated user-level thread;
     848hence, for many operations a thread and coroutine are treated uniformly.
     849%\todo*{Consider adding a description of how threads are coroutines.}
     850
     851% Furthermore it is easy to compare the
     852% current thread to the main thread to see if they are the same. And if this
     853% is not a thread stack then it must be a coroutine stack.
    821854
    822855However, if the threading library is not linked, the sequential execution is on
    823856the main stack. Hence, the entire check is skipped because the weak-symbol
    824 function is loaded. Therefore, a main thread cancellation is unconditionally
     857function is loaded. Therefore, main thread cancellation is unconditionally
    825858performed.
    826859
    827860Regardless of how the stack is chosen, the stop function and parameter are
    828861passed to the forced-unwind function. The general pattern of all three stop
    829 functions is the same: they continue unwinding until the end of stack and
    830 then preform their transfer.
     862functions is the same: continue unwinding until the end of stack and
     863then perform the appropriate transfer.
    831864
    832865For main stack cancellation, the transfer is just a program abort.
     
    834867For coroutine cancellation, the exception is stored on the coroutine's stack,
    835868and the coroutine context switches to its last resumer. The rest is handled on
    836 the backside of the resume, which check if the resumed coroutine is
     869the backside of the resume, which checks if the resumed coroutine is
    837870cancelled. If cancelled, the exception is retrieved from the resumed coroutine,
    838871and a @CoroutineCancelled@ exception is constructed and loaded with the
    839872cancelled exception. It is then resumed as a regular exception with the default
    840873handler coming from the context of the resumption call.
     874This semantics allows a cancellation to cascade through an arbitrary set of resumed
     875coroutines back to the thread's coroutine, performing cleanup along the way.
    841876
    842877For thread cancellation, the exception is stored on the thread's main stack and
     
    848883null (as it is for the auto-generated joins on destructor call), the default is
    849884used, which is a program abort.
     885This semantics allows a cancellation to cascade through an arbitrary set of joining
     886threads back to the program's main, performing cleanup along the way.
    850887%; which gives the required handling on implicate join.
  • doc/theses/andrew_beach_MMath/performance.tex

    r4aba055 r2f19e03  
    66
    77Performance has been of secondary importance for most of this project.
    8 The driving for has been to get the features working, the only performance
    9 requirements were to make sure the tests for correctness rain in a reasonable
     8Instead, the goal has been to get the features working.
     9The only performance
     10requirements is to ensure the exception tests for correctness ran in a reasonable
    1011amount of time.
    11 Still this is an implementation others could use for similar prototypes and
    12 so the results still have some use.
     12Much of the implementation is still reasonable and could be used for similar prototypes.
     13Hence,
     14the work still has some use.
     15To get a rough idea about the \CFA implementation, tests are run on \CFA, C++ and Java, which have similar termination-handling exceptions.
     16Tests are also run on \CFA and uC++, which has similar resumption-handling exceptions.
    1317
    14 \section{Test Set-Up}
    15 Tests will be run on \CFA, C++ and Java.
    16 
     18\section{Termination Comparison}
    1719C++ is the most comparable language because both it and \CFA use the same
    1820framework, libunwind.
    19 In fact the comparison is almost entirely a quality of implementation
     21In fact, the comparison is almost entirely a quality of implementation
    2022comparison. \CFA's EHM has had significantly less time to be optimized and
    2123does not generate its own assembly. It does have a slight advantage in that
    2224there are some features it does not handle.
    2325
    24 % Some languages I left out:
    25 % Python: Its a scripting language, different
    26 % uC++: Not well known and should the same results as C++, except for
    27 %   resumption which should be the same.
    28 \todo{Can we find a good language to compare resumptions in.}
     26The Java comparison is an opportunity to compare a managed memory model with unmanaged,
     27to see if there are any effects related to the exception model.
    2928
    30 All tests will be run inside a main loop which will perform the test
    31 repeatedly. This is to avoid letting and start-up or tear-down time from
     29\subsection{Test Set-Up}
     30All tests are run inside a main loop that performs the test
     31repeatedly. This design avoids start-up or tear-down time from
    3232affecting the timing results.
    33 This also means that tests cannot terminate the program, which does limit
     33A consequence is that tests cannot terminate the program, which does limit
    3434how tests can be implemented. There are catch-alls to keep unhandled
    35 exceptions from terminating the program.
     35exceptions from terminating tests.
    3636
    37 The exceptions used in this test will always be a new exception based off of
    38 the base exception. This should minimize and preformance differences based
     37The exceptions used in this test are always a new exception based off of
     38the base exception. This requirement minimizes performance differences based
    3939on the object model.
    40 Catch-alls will be done by catching the root exception type (not using \Cpp's
     40Catch-alls are done by catching the root exception type (not using \Cpp's
    4141\code{C++}{catch(...)}).
    4242
     
    4444hot.
    4545
    46 \section{Tests}
     46\subsection{Tests}
     47The following tests capture the most important aspects of exception handling and should provide
     48a reasonable guide to programmers of where EHM costs occur.
     49
    4750\paragraph{Raise/Handle}
    4851What is the basic cost to raise and handle an exception?
    4952
    50 There are a number of factors that can effect this, for \CFA this includes
     53There are a number of factors that can effect this.
     54For \CFA this includes
    5155the type of raise,
    5256
     
    6165This has the same set-up as the raise/handle test except the intermediate
    6266stack frames contain either an object declaration with a destructor or a
    63 try statement with no handlers except for a finally clause.
     67@try@ statement with no handlers except and a @finally@ clause.
    6468
    6569\paragraph{Enter/Leave}
     
    6771is thrown?
    6872
    69 This is the simplist pattern to test as it is a simple matter of entering
     73The test is a simple matter of entering
    7074and leaving a try statement.
    7175
     
    7478
    7579\paragraph{Re-throw and Conditional-Catch}
    76 How expencive it is to run a non-exception type check for a handler?
     80How expensive it is to run a non-exception type check for a handler?
    7781
    7882In this case different languages approach this problem differently, either
    7983through a re-throw or a conditional-catch.
    80 Where \CFA uses its condition other languages will have to unconditionally
     84Where \CFA uses its condition, other languages must unconditionally
    8185catch the exception then re-throw if the condition if the condition is false.
    8286
     
    8690% We could do a Cforall test without the catch all and a new default handler
    8791% that does a catch all.
    88 As a point of comparison one of the raise/handle tests (which one?) has
     92As a point of comparison, one of the raise/handle tests (which one?) has
    8993same layout but never catches anything.
    9094
     
    100104%(I haven't actually figured out how to compare this, probably using something
    101105%related to -fexceptions.)
     106
     107
     108\section{Resumption Comparison}
     109% Some languages I left out:
     110% Python: Its a scripting language, different
     111% uC++: Not well known and should the same results as C++, except for
     112%   resumption which should be the same.
     113\todo{Can we find a good language to compare resumptions in.}
  • doc/theses/mubeen_zulfiqar_MMath/benchmarks.tex

    r4aba055 r2f19e03  
    3434\noindent
    3535====================
     36
     37\section Performance Matrices of Memory Allocators
     38
     39When it comes to memory allocators, there are no set standards of performance. Performance of a memory allocator depends highly on the usage pattern of the application. A memory allocator that is the best performer for a certain application X might be the worst for some other application which has completely different memory usage pattern compared to the application X. It is extremely difficult to make one universally best memory allocator which will outperform every other memory allocator for every usage pattern. So, there is a lack of a set of standard benchmarks that are used to evaluate a memory allocators's performance.
     40
     41If we breakdown the goals of a memory allocator, there are two basic matrices on which a memory allocator's performance is evaluated.
     42
     431. Memory Overhead
     442. Speed
     45
     46        /subsection Memory Overhead
     47        Memory overhead is the extra memory that a memory allocator takes from OS which is not requested by the application. Ideally, an allocator should get just enough memory from OS that can fulfill application's request and should return this memory to OS as soon as applications frees it. But, allocators retain more memory compared to what application has asked for which causes memory overhead. Memory overhead can happen for various reasons.
     48
     49                /subsubsection Fragmentation
     50                Fragmentation is one of the major reasons behind memory overhead. Fragmentation happens because of situations that are either necassary for proper functioning of the allocator such as internal memory management and book-keeping or are out of allocator's control such as application's usage pattern.
     51
     52                        /subsubsubsection Internal Fragmentation
     53                        For internal book-keeping, allocators divide raw memory given by OS into chunks, blocks, or lists that can fulfill application's requested size. Allocators use memory given by OS for creating headers, footers etc. to store information about these chunks, blocks, or lists. This increases usage of memory in-addition to the memory requested by application as the allocators need to store their book-keeping information. This extra usage of memory for allocator's own book-keeping is called Internal Fragmentation. Although it cases memory overhead but this overhead is necassary for an allocator's proper funtioning.
     54
     55
     56*** FIX ME: Insert a figure of internal fragmentation with explanation
     57
     58                        /subsubsubsection External Fragmentation
     59                        External fragmentation is the free bits of memory between or around chunks of memory that are currently in-use of the application. Segmentation in memory due to application's usage pattern causes external fragmentation. The memory which is part of external fragmentation is completely free as it is neither used by allocator's internal book-keeping nor by the application. Ideally, an allocator should return a segment of memory back to the OS as soon as application frees it. But, this is not always the case. Allocators get memory from OS in one of the two ways.
     60
     61                        \begin{itemize}
     62                        \item
     63                        MMap: an allocator can ask OS for whole pages in mmap area. Then, the allocator segments the page internally and fulfills application's request.
     64                        \item
     65                        Heap: an allocator can ask OS for memory in heap area using system calls such as sbrk. Heap are grows downwards and shrinks upwards.
     66                        \begin{itemize}
     67
     68                        If an allocator uses mmap area, it can only return extra memory back to OS if the whole page is free i.e. no chunk on the page is in-use of the application. Even if one chunk on the whole page is currently in-use of the application, the allocator has to retain the whole page.
     69
     70                        If an allocator uses the heap area, it can only return the continous free memory at the end of the heap area that is currently in allocator's possession as heap area shrinks upwards. If there are free bits of memory in-between chunks of memory that are currently in-use of the application, the allocator can not return these free bits.
     71
     72*** FIX ME: Insert a figure of above scenrio with explanation
     73
     74                        Even if the entire heap area is free except one small chunk at the end of heap area that is being used by the application, the allocator cannot return the free heap area back to the OS as it is not a continous region at the end of heap area.
     75
     76*** FIX ME: Insert a figure of above scenrio with explanation
     77
     78                        Such scenerios cause external fragmentation but it is out of the allocator's control and depend on application's usage pattern.
     79
     80                /subsubsection Internal Memory Management
     81                Allocators such as je-malloc (FIX ME: insert reference) pro-actively get some memory from the OS and divide it into chunks of certain sizes that can be used in-future to fulfill application's request. This causes memory overhead as these chunks are made before application's request. There is also the possibility that an application may not even request memory of these sizes during their whole life-time.
     82
     83*** FIX ME: Insert a figure of above scenrio with explanation
     84
     85                Allocators such as rp-malloc (FIX ME: insert reference) maintain lists or blocks of sized memory segments that is freed by the application for future use. These lists are maintained without any guarantee that application will even request these sizes again.
     86
     87                Such tactics are usually used to gain speed as allocator will not have to get raw memory from OS and manage it at the time of application's request but they do cause memory overhead.
     88
     89        Fragmentation and managed sized chunks of free memory can lead to Heap Blowup as the allocator may not be able to use the fragments or sized free chunks of free memory to fulfill application's requests of other sizes.
     90
     91        /subsection Speed
     92        When it comes to performance evaluation of any piece of software, its runtime is usually the first thing that is evaluated. The same is true for memory allocators but, in case of memory allocators, speed does not only mean the runtime of memory allocator's routines but there are other factors too.
     93
     94                /subsubsection Runtime Speed
     95                Low runtime is the main goal of a memory allocator when it comes it proving its speed. Runtime is the time that it takes for a routine of memory allocator to complete its execution. As mentioned in (FIX ME: refernce to routines' list), there four basic routines that are used in memory allocation. Ideally, each routine of a memory allocator should be fast. Some memory allocator designs use pro-active measures (FIX ME: local refernce) to gain speed when allocating some memory to the application. Some memory allocators do memory allocation faster than memory freeing (FIX ME: graph refernce) while others show similar speed whether memory is allocated or freed.
     96
     97                /subsubsection Memory Access Speed
     98                Runtime speed is not the only speed matrix in memory allocators. The memory that a memory allocator has allocated to the application also needs to be accessible as quick as possible. The application should be able to read/write allocated memory quickly. The allocation method of a memory allocator may introduce some delays when it comes to memory access speed, which is specially important in concurrent applications. Ideally, a memory allocator should allocate all memory on a cache-line to only one thread and no cache-line should be shared among multiple threads. If a memory allocator allocates memory to multple threads on a same cache line, then cache may get invalidated more frequesntly when two different threads running on two different processes will try to read/write the same memory region. On the other hand, if one cache-line is used by only one thread then the cache may get invalidated less frequently. This sharing of one cache-line among multiple threads is called false sharing (FIX ME: cite wasik).
     99
     100                        /subsubsubsection Active False Sharing
     101                        Active false sharing is the sharing of one cache-line among multiple threads that is caused by memory allocator. It happens when two threads request memory from memory allocator and the allocator allocates memory to both of them on the same cache-line. After that, if the threads are running on different processes who have their own caches and both threads start reading/writing the allocated memory simultanously, their caches will start getting invalidated every time the other thread writes something to the memory. This will cause the application to slow down as the process has to load cache much more frequently.
     102
     103*** FIX ME: Insert a figure of above scenrio with explanation
     104
     105                        /subsubsubsection Passive False Sharing
     106                        Passive false sharing is the kind of false sharing which is caused by the application and not the memory allocator. The memory allocator may preservce passive false sharing in future instead of eradicating it. But, passive false sharing is initiated by the application.
     107
     108                                /subsubsubsubsection Program Induced Passive False Sharing
     109                                Program induced false sharing is completely out of memory allocator's control and is purely caused by the application. When a thread in the application creates multiple objects in the dynamic area and allocator allocates memory for these objects on the same cache-line as the objects are created by the same thread. Passive false sharing will occur if this thread passes one of these objects to another thread but it retains the rest of these objects or it passes some/all of the remaining objects to some third thread(s). Now, one cache-line is shared among multiple threads but it is caused by the application and not the allocator. It is out of allocator's control and has the similar performance impact as Active False Sharing (FIX ME: cite local) if these threads, who are sharing the same cache-line, start reading/writing the given objects simultanously.
     110
     111*** FIX ME: Insert a figure of above scenrio 1 with explanation
     112
     113*** FIX ME: Insert a figure of above scenrio 2 with explanation
     114
     115                                /subsubsubsubsection Program Induced Allocator Preserved Passive False Sharing
     116                                Program induced allocator preserved passive false sharing is another interesting case of passive false sharing. Both the application and the allocator are partially responsible for it. It starts the same as Program Induced False Sharing (FIX ME: cite local). Once, an application thread has created multiple dynamic objects on the same cache-line and ditributed these objects among multiple threads causing sharing of one cache-line among multiple threads (Program Induced Passive False Sharing). This kind of false sharing occurs when one of these threads, which got the object on the shared cache-line, frees the passed object then re-allocates another object but the allocator returns the same object (on the shared cache-line) that this thread just freed. Although, the application caused the false sharing to happen in the frst place however, to prevent furthur false sharing, the allocator should have returned the new object on some other cache-line which is only shared by the allocating thread. When it comes to performnce impact, this passive false sharing will slow down the application just like any other kind of false sharing if the threads sharing the cache-line start reading/writing the objects simultanously.
     117
     118*** FIX ME: Insert a figure of above scenrio with explanation
  • libcfa/configure.ac

    r4aba055 r2f19e03  
    131131#io_uring 5.5 uses enum values
    132132#io_uring 5.6 and later uses probes
     133
     134AH_TEMPLATE([CFA_HAVE_LINUX_RSEQ_H],[Defined if rseq support is present when compiling libcfathread.])
     135AC_CHECK_HEADERS([linux/rseq.h], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)])
     136
     137AH_TEMPLATE([CFA_HAVE_LINUX_LIBRSEQ],[Defined if librseq support is present when compiling libcfathread.])
     138AC_CHECK_LIB([rseq], [rseq_available], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)], [])
    133139
    134140AH_TEMPLATE([CFA_HAVE_LINUX_IO_URING_H],[Defined if io_uring support is present when compiling libcfathread.])
  • libcfa/src/Makefile.am

    r4aba055 r2f19e03  
    6969        common.hfa \
    7070        fstream.hfa \
    71         strstream.hfa \
    7271        heap.hfa \
    7372        iostream.hfa \
     
    7877        rational.hfa \
    7978        stdlib.hfa \
     79        strstream.hfa \
    8080        time.hfa \
    8181        bits/weakso_locks.hfa \
     
    8383        containers/pair.hfa \
    8484        containers/result.hfa \
    85         containers/vector.hfa
     85        containers/vector.hfa \
     86        device/cpu.hfa
    8687
    8788libsrc = ${inst_headers_src} ${inst_headers_src:.hfa=.cfa} \
  • libcfa/src/concurrency/kernel.cfa

    r4aba055 r2f19e03  
    422422                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
    423423                // when __cfactx_switch returns we are back in the processor coroutine
     424
     425
    424426
    425427                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
     
    522524
    523525        /* paranoid */ verify( ! __preemption_enabled() );
    524         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
    525         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
     526        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
     527        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
    526528}
    527529
  • libcfa/src/concurrency/locks.hfa

    r4aba055 r2f19e03  
    2121#include "bits/weakso_locks.hfa"
    2222#include "containers/queueLockFree.hfa"
    23 
     23#include "limits.hfa"
    2424#include "thread.hfa"
    2525
     
    8585        bool tryP(BinaryBenaphore & this) {
    8686                ssize_t c = this.counter;
     87                /* paranoid */ verify( c > MIN );
    8788                return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
    8889        }
     
    9293                ssize_t c = 0;
    9394                for () {
     95                        /* paranoid */ verify( this.counter < MAX );
    9496                        if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    9597                                if (c == 0) return true;
  • libcfa/src/exception.c

    r4aba055 r2f19e03  
    2727#include "stdhdr/assert.h"
    2828#include "virtual.h"
    29 
    30 #if defined( __ARM_ARCH )
    31 #warning FIX ME: temporary hack to keep ARM build working
    32 #ifndef _URC_FATAL_PHASE1_ERROR
    33 #define _URC_FATAL_PHASE1_ERROR 3
    34 #endif // ! _URC_FATAL_PHASE1_ERROR
    35 #ifndef _URC_FATAL_PHASE2_ERROR
    36 #define _URC_FATAL_PHASE2_ERROR 2
    37 #endif // ! _URC_FATAL_PHASE2_ERROR
    38 #endif // __ARM_ARCH
    39 
    4029#include "lsda.h"
    4130
     
    301290}
    302291
    303 #if defined( __x86_64 ) || defined( __i386 )
     292#if defined( __x86_64 ) || defined( __i386 ) || defined( __ARM_ARCH )
    304293// This is our personality routine. For every stack frame annotated with
    305294// ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding.
     
    419408                                    _Unwind_GetCFA(unwind_context) + 24;
    420409#                               elif defined( __ARM_ARCH )
    421 #                                   warning FIX ME: check if anything needed for ARM
    422                                     42;
     410                                    _Unwind_GetCFA(unwind_context) + 40;
    423411#                               endif
    424412                                int (*matcher)(exception_t *) = *(int(**)(exception_t *))match_pos;
     
    537525        // HEADER
    538526        ".LFECFA1:\n"
     527#if defined( __x86_64 ) || defined( __i386 )
    539528        "       .globl  __gcfa_personality_v0\n"
     529#else // defined( __ARM_ARCH )
     530        "       .global __gcfa_personality_v0\n"
     531#endif
    540532        "       .section        .gcc_except_table,\"a\",@progbits\n"
    541533        // TABLE HEADER (important field is the BODY length at the end)
     
    569561        // No clue what this does specifically
    570562        "       .section        .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n"
     563#if defined( __x86_64 ) || defined( __i386 )
    571564        "       .align 8\n"
     565#else // defined( __ARM_ARCH )
     566        "       .align 3\n"
     567#endif
    572568        "       .type CFA.ref.__gcfa_personality_v0, @object\n"
    573569        "       .size CFA.ref.__gcfa_personality_v0, 8\n"
     
    575571#if defined( __x86_64 )
    576572        "       .quad __gcfa_personality_v0\n"
    577 #else // then __i386
     573#elif defined( __i386 )
    578574        "       .long __gcfa_personality_v0\n"
     575#else // defined( __ARM_ARCH )
     576        "       .xword __gcfa_personality_v0\n"
    579577#endif
    580578);
     
    583581        // HEADER
    584582        ".LFECFA1:\n"
     583#if defined( __x86_64 ) || defined( __i386 )
    585584        "       .globl  __gcfa_personality_v0\n"
     585#else // defined( __ARM_ARCH )
     586        "       .global __gcfa_personality_v0\n"
     587#endif
    586588        "       .section        .gcc_except_table,\"a\",@progbits\n"
    587589        // TABLE HEADER (important field is the BODY length at the end)
     
    612614#pragma GCC pop_options
    613615
    614 #elif defined( __ARM_ARCH )
    615 _Unwind_Reason_Code __gcfa_personality_v0(
    616                 int version,
    617                 _Unwind_Action actions,
    618                 unsigned long long exception_class,
    619                 struct _Unwind_Exception * unwind_exception,
    620                 struct _Unwind_Context * unwind_context) {
    621         return _URC_CONTINUE_UNWIND;
    622 }
    623 
    624 __attribute__((noinline))
    625 void __cfaehm_try_terminate(void (*try_block)(),
    626                 void (*catch_block)(int index, exception_t * except),
    627                 __attribute__((unused)) int (*match_block)(exception_t * except)) {
    628 }
    629616#else
    630617        #error unsupported hardware architecture
    631 #endif // __x86_64 || __i386
     618#endif // __x86_64 || __i386 || __ARM_ARCH
  • libcfa/src/interpose.cfa

    r4aba055 r2f19e03  
    9595
    9696extern "C" {
    97         void __cfaabi_interpose_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_CORE ) ));
    9897        void __cfaabi_interpose_startup( void ) {
    9998                const char *version = 0p;
  • libcfa/src/startup.cfa

    r4aba055 r2f19e03  
    2020
    2121extern "C" {
    22     void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) ));
    23     void __cfaabi_appready_startup( void ) {
     22        void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) ));
     23        void __cfaabi_appready_startup( void ) {
    2424                tzset();                                                                                // initialize time global variables
    2525                setlocale( LC_NUMERIC, getenv("LANG") );
     
    2828                heapAppStart();
    2929                #endif // __CFA_DEBUG__
    30     } // __cfaabi_appready_startup
     30        } // __cfaabi_appready_startup
    3131
    32     void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) ));
    33     void __cfaabi_appready_shutdown( void ) {
     32        void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) ));
     33        void __cfaabi_appready_shutdown( void ) {
    3434                #ifdef __CFA_DEBUG__
    3535                extern void heapAppStop();
    3636                heapAppStop();
    3737                #endif // __CFA_DEBUG__
    38     } // __cfaabi_appready_shutdown
     38        } // __cfaabi_appready_shutdown
    3939
    40     void disable_interrupts() __attribute__(( weak )) {}
    41     void enable_interrupts() __attribute__(( weak )) {}
     40        void disable_interrupts() __attribute__(( weak )) {}
     41        void enable_interrupts() __attribute__(( weak )) {}
     42
     43
     44        extern void __cfaabi_interpose_startup( void );
     45        extern void __cfaabi_device_startup   ( void );
     46        extern void __cfaabi_device_shutdown  ( void );
     47
     48        void __cfaabi_core_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_CORE ) ));
     49        void __cfaabi_core_startup( void ) {
     50                __cfaabi_interpose_startup();
     51                __cfaabi_device_startup();
     52        } // __cfaabi_core_startup
     53
     54        void __cfaabi_core_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_CORE ) ));
     55        void __cfaabi_core_shutdown( void ) {
     56                __cfaabi_device_shutdown();
     57        } // __cfaabi_core_shutdown
    4258} // extern "C"
    4359
  • tests/coroutine/fibonacci.cfa

    r4aba055 r2f19e03  
    3131}
    3232
    33 int next( Fibonacci & fib ) with( fib ) {
    34         resume( fib );                                                                          // restart last suspend
    35         return fn;
    36 }
    37 
    3833int main() {
    3934        Fibonacci f1, f2;
    4035        for ( 10 ) {                                                                            // print N Fibonacci values
    41                 sout | next( f1 ) | next( f2 );
     36                sout | resume( f1 ).fn | resume( f2 ).fn;
    4237        } // for
    4338}
  • tests/generator/fibonacci.cfa

    r4aba055 r2f19e03  
    88//
    99// Author           : Thierry Delisle
    10 // Created On       : Mon Mar  1 16:54:23 2020
    11 // Last Modified By :
    12 // Last Modified On :
    13 // Update Count     :
     10// Created On       : Mon Mar 1 16:54:23 2020
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Thu Jun 10 21:54:14 2021
     13// Update Count     : 3
    1414//
     15
     16#include <fstream.hfa>
    1517
    1618generator Fib {
     
    1820};
    1921
    20 void main(Fib & b) with (b) {
     22void main(Fib & fib) with (fib) {
    2123        [fn1, fn] = [1, 0];
    2224        for () {
     
    2931        Fib f1, f2;
    3032        for ( 10 ) {
    31                 resume( f1 );
    32                 resume( f2 );
    33                 printf("%d %d\n", f1.fn, f2.fn);
     33                resume( f1 ); resume( f2 );
     34                sout | f1.fn | f2.fn;
     35                // sout | resume( f1 ).fn | resume( f2 ).fn; // compiler bug
    3436        }
    35 
    3637}
    3738
  • tests/generator/fmtLines.cfa

    r4aba055 r2f19e03  
    99// Author           : Thierry Delisle
    1010// Created On       : Thu Mar  5 16:09:08 2020
    11 // Last Modified By :
    12 // Last Modified On :
    13 // Update Count     :
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Thu Jun 10 21:56:22 2021
     13// Update Count     : 2
    1414//
    1515
  • tests/generator/suspend_then.cfa

    r4aba055 r2f19e03  
    99// Author           : Peter A. Buhr
    1010// Created On       : Mon Apr 29 12:01:35 2019
    11 // Last Modified By :
    12 // Last Modified On :
    13 // Update Count     :
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Thu Jun 10 21:55:51 2021
     13// Update Count     : 1
    1414//
    1515
  • tests/unified_locking/fast.cfa

    r4aba055 r2f19e03  
    2222uint32_t cs() {
    2323        $thread * me = active_thread();
    24         uint32_t value = (uint32_t)me;
     24        uint32_t value;
    2525        lock(mo.l);
    2626        {
     
    2828                mo.id = me;
    2929                yield(random(5));
     30                value = ((uint32_t)random()) ^ ((uint32_t)me);
    3031                if(mo.id != me) sout | "Intruder!";
    3132                mo.sum = tsum + value;
Note: See TracChangeset for help on using the changeset viewer.