Changeset c083c3d


Ignore:
Timestamp:
May 1, 2023, 6:15:26 PM (19 months ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, ast-experimental, master
Children:
67408114
Parents:
4daf79f (diff), 6e1e2d0 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Peter A. Buhr <pabuhr@…> (05/01/23 16:59:14)
git-committer:
Peter A. Buhr <pabuhr@…> (05/01/23 18:15:26)
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Files:
12 added
5 deleted
34 edited

Legend:

Unmodified
Added
Removed
  • doc/theses/colby_parsons_MMAth/benchmarks/actors/data/nasus_CFA.txt

    r4daf79f rc083c3d  
    115
    221 2 4 8 16 24 32 48
    3 CFA-LV CFA-NS CFA-R
     3Longest-Victim No-Stealing Random
    44executor
    5 CFA-LV:
     5Longest-Victim:
    66proc    time (s)
    771       29.22
     
    454548      1.20
    464648      1.20
    47 CFA-NS:
     47No-Stealing:
    4848proc    time (s)
    49491       28.25
     
    878748      1.18
    888848      1.16
    89 CFA-R:
     89Random:
    9090proc    time (s)
    91911       28.58
     
    131131
    132132matrix
    133 CFA-LV:
     133Longest-Victim:
    134134proc    time (s)
    1351351       105.48
     
    17317348      2.75
    17417448      2.96
    175 CFA-NS:
     175No-Stealing:
    176176proc    time (s)
    1771771       106.01
     
    21521548      2.78
    21621648      2.92
    217 CFA-R:
     217Random:
    218218proc    time (s)
    2192191       105.91
     
    259259
    260260repeat
    261 CFA-LV:
     261Longest-Victim:
    262262proc    time (s)
    2632631       1.17
     
    30130148      13.73
    30230248      14.55
    303 CFA-NS:
     303No-Stealing:
    304304proc    time (s)
    3053051       1.15
     
    34334348      13.03
    34434448      12.83
    345 CFA-R:
     345Random:
    346346proc    time (s)
    3473471       1.15
     
    387387
    388388balance_one
    389 CFA-LV:
     389Longest-Victim:
    390390proc    time (s)
    3913911       20.06
     
    42942948      1.11
    43043048      1.12
    431 CFA-NS:
     431No-Stealing:
    432432proc    time (s)
    4334331       20.13
     
    47147148      19.95
    47247248      20.00
    473 CFA-R:
     473Random:
    474474proc    time (s)
    4754751       19.92
     
    515515
    516516balance_multi
    517 CFA-LV:
     517Longest-Victim:
    518518proc    time (s)
    5195191       8.17
     
    55755748      5.75
    55855848      5.68
    559 CFA-NS:
     559No-Stealing:
    560560proc    time (s)
    5615611       8.10
     
    59959948      9.28
    60060048      9.26
    601 CFA-R:
     601Random:
    602602proc    time (s)
    6036031       8.08
  • doc/theses/colby_parsons_MMAth/benchmarks/actors/data/pyke_CFA.txt

    r4daf79f rc083c3d  
    115
    221 2 4 8 16 24 32 48
    3 CFA-LV CFA-NS CFA-R
     3Longest-Victim No-Stealing Random
    44executor
    5 CFA-LV:
     5Longest-Victim:
    66proc    time (s)
    771       29.04
     
    454548      2.58
    464648      2.55
    47 CFA-NS:
     47No-Stealing:
    4848proc    time (s)
    49491       28.15
     
    878748      2.59
    888848      2.60
    89 CFA-R:
     89Random:
    9090proc    time (s)
    91911       29.06
     
    131131
    132132matrix
    133 CFA-LV:
     133Longest-Victim:
    134134proc    time (s)
    1351351       127.44
     
    17317348      6.83
    17417448      6.81
    175 CFA-NS:
     175No-Stealing:
    176176proc    time (s)
    1771771       127.64
     
    21521548      6.77
    21621648      6.74
    217 CFA-R:
     217Random:
    218218proc    time (s)
    2192191       127.26
     
    259259
    260260repeat
    261 CFA-LV:
     261Longest-Victim:
    262262proc    time (s)
    2632631       1.16
     
    30130148      19.75
    30230248      19.71
    303 CFA-NS:
     303No-Stealing:
    304304proc    time (s)
    3053051       1.18
     
    34334348      13.88
    34434448      13.71
    345 CFA-R:
     345Random:
    346346proc    time (s)
    3473471       1.18
     
    387387
    388388balance_one
    389 CFA-LV:
     389Longest-Victim:
    390390proc    time (s)
    3913911       19.46
     
    42942948      2.12
    43043048      2.17
    431 CFA-NS:
     431No-Stealing:
    432432proc    time (s)
    4334331       21.00
     
    47147148      47.50
    47247248      47.72
    473 CFA-R:
     473Random:
    474474proc    time (s)
    4754751       20.81
     
    515515
    516516balance_multi
    517 CFA-LV:
     517Longest-Victim:
    518518proc    time (s)
    5195191       7.94
     
    55755748      14.38
    55855848      14.50
    559 CFA-NS:
     559No-Stealing:
    560560proc    time (s)
    5615611       8.48
     
    59959948      21.50
    60060048      21.15
    601 CFA-R:
     601Random:
    602602proc    time (s)
    6036031       8.49
  • doc/theses/colby_parsons_MMAth/benchmarks/channels/plotData.py

    r4daf79f rc083c3d  
    7070    if currBench == Bench.Unset:
    7171        if line == "contend:":
    72             name = "Contend"
     72            name = "Channel Contention"
    7373            currBench = Bench.Contend
    7474        elif line == "zero:":
  • doc/theses/colby_parsons_MMAth/code/swap_queues.cfa

    r4daf79f rc083c3d  
    1 // this is a code stub and will not compile
    2 
    3 // tries to atomically swap two queues and returns 0p if the swap failed
    4 // returns ptr to newly owned queue if swap succeeds
    5 static inline work_queue * try_swap_queues( worker & this, unsigned int victim_idx, unsigned int my_idx ) with(this) {
     1// sequential equivalent swap
     2void swap( uint victim_idx, uint my_idx  ) {
     3    // Step 0:
    64    work_queue * my_queue = request_queues[my_idx];
    7     work_queue * other_queue = request_queues[victim_idx];
    8 
    9     // if either queue is 0p then they are in the process of being stolen
    10     if ( other_queue == 0p || my_queue == 0p ) return 0p;
    11 
    12     // try to set our queue ptr to be 0p. If it fails someone moved our queue so return false
    13     if ( !__atomic_compare_exchange_n( &request_queues[my_idx], &my_queue, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
    14         return 0p;
    15 
    16     // try to set other queue ptr to be our queue ptr. If it fails someone moved the other queue so fix up then return false
    17     if ( !__atomic_compare_exchange_n( &request_queues[victim_idx], &other_queue, my_queue, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
    18         /* paranoid */ verify( request_queues[my_idx] == 0p );
    19         request_queues[my_idx] = my_queue; // reset my queue ptr back to appropriate val
    20         return 0p;
    21     }
    22 
    23     // we have successfully swapped and since our queue is 0p no one will touch it so write back new queue ptr non atomically
    24     request_queues[my_idx] = other_queue; // last write does not need to be atomic
    25     return other_queue;
     5    work_queue * vic_queue = request_queues[victim_idx];
     6    // Step 2:
     7    request_queues[my_idx] = 0p;
     8    // Step 3:
     9    request_queues[victim_idx] = my_queue;
     10    // Step 4:
     11    request_queues[my_idx] = vic_queue;
    2612}
    2713
     
    3521
    3622bool try_swap_queues( worker & this, uint victim_idx, uint my_idx ) with(this) {
     23    // Step 0:
     24    // request_queues is the shared array of all sharded queues
    3725    work_queue * my_queue = request_queues[my_idx];
    3826    work_queue * vic_queue = request_queues[victim_idx];
    3927
     28    // Step 1:
    4029    // If either queue is 0p then they are in the process of being stolen
    4130    // 0p is CForAll's equivalent of C++'s nullptr
    42     if ( vic_queue == 0p || my_queue == 0p ) return false;
     31    if ( vic_queue == 0p ) return false;
    4332
    44     // Try to set our queue ptr to be 0p.
    45     // If this CAS fails someone moved our queue so return false
     33    // Step 2:
     34    // Try to set thief's queue ptr to be 0p.
     35    // If this CAS fails someone stole thief's queue so return false
    4636    if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) )
    4737        return false;
    48 
    49     // Try to set other queue ptr to be our queue ptr.
    50     // If it fails someone moved the other queue, so fix up then return false
     38   
     39    // Step 3:
     40    // Try to set victim queue ptr to be thief's queue ptr.
     41    // If it fails someone stole the other queue, so fix up then return false
    5142    if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) {
    5243        request_queues[my_idx] = my_queue; // reset queue ptr back to prev val
     
    5445    }
    5546
     47    // Step 4:
    5648    // Successfully swapped.
    57     // Our queue is 0p so no one will touch it so write back without CAS is safe
     49    // Thief's ptr is 0p so no one will touch it
     50    // Write back without CAS is safe
    5851    request_queues[my_idx] = vic_queue;
    5952    return true;
  • doc/theses/colby_parsons_MMAth/version.sh

    • Property mode changed from 100755 to 100644
  • libcfa/src/bits/weakso_locks.cfa

    r4daf79f rc083c3d  
    1515// Update Count     :
    1616//
    17 
    1817#include "bits/weakso_locks.hfa"
    19 
    2018#pragma GCC visibility push(default)
    2119
     
    3028void on_wakeup( blocking_lock &, size_t ) {}
    3129size_t wait_count( blocking_lock & ) { return 0; }
     30bool register_select( blocking_lock & this, select_node & node ) { return false; }
     31bool unregister_select( blocking_lock & this, select_node & node ) { return false; }
     32bool on_selected( blocking_lock & this, select_node & node ) { return true; }
     33
  • libcfa/src/bits/weakso_locks.hfa

    r4daf79f rc083c3d  
    2323#include "containers/list.hfa"
    2424
    25 struct thread$;
     25struct select_node;
    2626
    2727//-----------------------------------------------------------------------------
     
    3232
    3333        // List of blocked threads
    34         dlist( thread$ ) blocked_threads;
     34        dlist( select_node ) blocked_threads;
    3535
    3636        // Count of current blocked threads
     
    6060void on_wakeup( blocking_lock & this, size_t ) OPTIONAL_THREAD;
    6161size_t wait_count( blocking_lock & this ) OPTIONAL_THREAD;
     62bool register_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD;
     63bool unregister_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD;
     64bool on_selected( blocking_lock & this, select_node & node ) OPTIONAL_THREAD;
    6265
    6366//----------
     
    7578static inline void   on_wakeup( multiple_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    7679static inline void   on_notify( multiple_acquisition_lock & this, struct thread$ * t ){ on_notify( (blocking_lock &)this, t ); }
     80static inline bool   register_select( multiple_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     81static inline bool   unregister_select( multiple_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     82static inline bool   on_selected( multiple_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
  • libcfa/src/concurrency/channel.hfa

    r4daf79f rc083c3d  
    44#include <list.hfa>
    55#include <mutex_stmt.hfa>
    6 
    7 // link field used for threads waiting on channel
    8 struct wait_link {
    9     // used to put wait_link on a dl queue
    10     inline dlink(wait_link);
    11 
    12     // waiting thread
    13     struct thread$ * t;
    14 
    15     // shadow field
    16     void * elem;
    17 };
    18 P9_EMBEDDED( wait_link, dlink(wait_link) )
    19 
    20 static inline void ?{}( wait_link & this, thread$ * t, void * elem ) {
    21     this.t = t;
    22     this.elem = elem;
    23 }
    24 
    25 // wake one thread from the list
    26 static inline void wake_one( dlist( wait_link ) & queue ) {
    27     wait_link & popped = try_pop_front( queue );
    28     unpark( popped.t );
    29 }
     6#include "select.hfa"
    307
    318// returns true if woken due to shutdown
    329// blocks thread on list and releases passed lock
    33 static inline bool block( dlist( wait_link ) & queue, void * elem_ptr, go_mutex & lock ) {
    34     wait_link w{ active_thread(), elem_ptr };
    35     insert_last( queue, w );
     10static inline bool block( dlist( select_node ) & queue, void * elem_ptr, go_mutex & lock ) {
     11    select_node sn{ active_thread(), elem_ptr };
     12    insert_last( queue, sn );
    3613    unlock( lock );
    3714    park();
    38     return w.elem == 0p;
     15    return sn.extra == 0p;
     16}
     17
     18// Waituntil support (un)register_select helper routine
     19// Sets select node avail if not special OR case and then unlocks
     20static inline void __set_avail_then_unlock( select_node & node, go_mutex & mutex_lock ) {
     21    if ( node.park_counter ) __make_select_node_available( node );
     22    unlock( mutex_lock );
    3923}
    4024
     
    5943    size_t size, front, back, count;
    6044    T * buffer;
    61     dlist( wait_link ) prods, cons; // lists of blocked threads
     45    dlist( select_node ) prods, cons; // lists of blocked threads
    6246    go_mutex mutex_lock;            // MX lock
    6347    bool closed;                    // indicates channel close/open
     
    7054    size = _size;
    7155    front = back = count = 0;
    72     buffer = aalloc( size );
     56    if ( size != 0 ) buffer = aalloc( size );
    7357    prods{};
    7458    cons{};
     
    8771    #endif
    8872    verifyf( cons`isEmpty && prods`isEmpty, "Attempted to delete channel with waiting threads (Deadlock).\n" );
    89     delete( buffer );
     73    if ( size != 0 ) delete( buffer );
    9074}
    9175static inline size_t get_count( channel(T) & chan ) with(chan) { return count; }
     
    10286    // flush waiting consumers and producers
    10387    while ( has_waiting_consumers( chan ) ) {
    104         cons`first.elem = 0p;
     88        if( !__handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race
     89            break;  // if __handle_waituntil_OR returns false cons is empty so break
     90        cons`first.extra = 0p;
    10591        wake_one( cons );
    10692    }
    10793    while ( has_waiting_producers( chan ) ) {
    108         prods`first.elem = 0p;
     94        if( !__handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race
     95            break;  // if __handle_waituntil_OR returns false prods is empty so break
     96        prods`first.extra = 0p;
    10997        wake_one( prods );
    11098    }
     
    114102static inline void is_closed( channel(T) & chan ) with(chan) { return closed; }
    115103
     104// used to hand an element to a blocked consumer and signal it
     105static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
     106    memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
     107    wake_one( cons );
     108}
     109
     110// used to hand an element to a blocked producer and signal it
     111static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
     112    memcpy( (void *)&retval, prods`first.extra, sizeof(T) );
     113    wake_one( prods );
     114}
     115
    116116static inline void flush( channel(T) & chan, T elem ) with(chan) {
    117117    lock( mutex_lock );
    118118    while ( count == 0 && !cons`isEmpty ) {
    119         memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work
    120         wake_one( cons );
     119        __cons_handoff( chan, elem );
    121120    }
    122121    unlock( mutex_lock );
     
    125124// handles buffer insert
    126125static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) {
    127     memcpy((void *)&buffer[back], (void *)&elem, sizeof(T));
     126    memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) );
    128127    count += 1;
    129128    back++;
     
    131130}
    132131
    133 // does the buffer insert or hands elem directly to consumer if one is waiting
    134 static inline void __do_insert( channel(T) & chan, T & elem ) with(chan) {
    135     if ( count == 0 && !cons`isEmpty ) {
    136         memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work
    137         wake_one( cons );
    138     } else __buf_insert( chan, elem );
    139 }
    140 
    141132// needed to avoid an extra copy in closed case
    142133static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) {
     
    145136    operations++;
    146137    #endif
     138
     139    ConsEmpty: if ( !cons`isEmpty ) {
     140        if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
     141        __cons_handoff( chan, elem );
     142        unlock( mutex_lock );
     143        return true;
     144    }
     145
    147146    if ( count == size ) { unlock( mutex_lock ); return false; }
    148     __do_insert( chan, elem );
     147
     148    __buf_insert( chan, elem );
    149149    unlock( mutex_lock );
    150150    return true;
     
    157157// handles closed case of insert routine
    158158static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) {
    159     channel_closed except{&channel_closed_vt, &elem, &chan };
     159    channel_closed except{ &channel_closed_vt, &elem, &chan };
    160160    throwResume except; // throw closed resumption
    161161    if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination
     
    182182    }
    183183
    184     // have to check for the zero size channel case
    185     if ( size == 0 && !cons`isEmpty ) {
    186         memcpy(cons`first.elem, (void *)&elem, sizeof(T));
    187         wake_one( cons );
    188         unlock( mutex_lock );
    189         return true;
     184    // buffer count must be zero if cons are blocked (also handles zero-size case)
     185    ConsEmpty: if ( !cons`isEmpty ) {
     186        if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
     187        __cons_handoff( chan, elem );
     188        unlock( mutex_lock );
     189        return;
    190190    }
    191191
     
    202202    } // if
    203203
    204     if ( count == 0 && !cons`isEmpty ) {
    205         memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work
    206         wake_one( cons );
    207     } else __buf_insert( chan, elem );
    208    
    209     unlock( mutex_lock );
    210     return;
    211 }
    212 
    213 // handles buffer remove
    214 static inline void __buf_remove( channel(T) & chan, T & retval ) with(chan) {
    215     memcpy((void *)&retval, (void *)&buffer[front], sizeof(T));
     204    __buf_insert( chan, elem );
     205    unlock( mutex_lock );
     206}
     207
     208// does the buffer remove and potentially does waiting producer work
     209static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) {
     210    memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) );
    216211    count -= 1;
    217212    front = (front + 1) % size;
    218 }
    219 
    220 // does the buffer remove and potentially does waiting producer work
    221 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) {
    222     __buf_remove( chan, retval );
    223213    if (count == size - 1 && !prods`isEmpty ) {
    224         __buf_insert( chan, *(T *)prods`first.elem );  // do waiting producer work
     214        if ( !__handle_waituntil_OR( prods ) ) return;
     215        __buf_insert( chan, *(T *)prods`first.extra );  // do waiting producer work
    225216        wake_one( prods );
    226217    }
     
    233224    operations++;
    234225    #endif
     226
     227    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
     228        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
     229        __prods_handoff( chan, retval );
     230        unlock( mutex_lock );
     231        return true;
     232    }
     233
    235234    if ( count == 0 ) { unlock( mutex_lock ); return false; }
     235
    236236    __do_remove( chan, retval );
    237237    unlock( mutex_lock );
     
    244244static inline [T, bool] try_remove( channel(T) & chan ) {
    245245    T retval;
    246     return [ retval, __internal_try_remove( chan, retval ) ];
    247 }
    248 
    249 static inline T try_remove( channel(T) & chan, T elem ) {
     246    bool success = __internal_try_remove( chan, retval );
     247    return [ retval, success ];
     248}
     249
     250static inline T try_remove( channel(T) & chan ) {
    250251    T retval;
    251252    __internal_try_remove( chan, retval );
     
    255256// handles closed case of insert routine
    256257static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) {
    257     channel_closed except{&channel_closed_vt, 0p, &chan };
     258    channel_closed except{ &channel_closed_vt, 0p, &chan };
    258259    throwResume except; // throw resumption
    259260    if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination
     
    279280
    280281    // have to check for the zero size channel case
    281     if ( size == 0 && !prods`isEmpty ) {
    282         memcpy((void *)&retval, (void *)prods`first.elem, sizeof(T));
    283         wake_one( prods );
     282    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
     283        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
     284        __prods_handoff( chan, retval );
    284285        unlock( mutex_lock );
    285286        return retval;
     
    287288
    288289    // wait if buffer is empty, work will be completed by someone else
    289     if (count == 0) {
     290    if ( count == 0 ) {
    290291        #ifdef CHAN_STATS
    291292        blocks++;
     
    299300    // Remove from buffer
    300301    __do_remove( chan, retval );
    301 
    302302    unlock( mutex_lock );
    303303    return retval;
    304304}
     305
     306///////////////////////////////////////////////////////////////////////////////////////////
     307// The following is support for waituntil (select) statements
     308///////////////////////////////////////////////////////////////////////////////////////////
     309static inline bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) {
     310    if ( !node`isListed && !node.park_counter ) return false; // handle special OR case
     311    lock( mutex_lock );
     312    if ( node`isListed ) { // op wasn't performed
     313        #ifdef CHAN_STATS
     314        operations--;
     315        #endif
     316        remove( node );
     317        unlock( mutex_lock );
     318        return false;
     319    }
     320    unlock( mutex_lock );
     321
     322    // only return true when not special OR case, not exceptional calse and status is SAT
     323    return ( node.extra == 0p || !node.park_counter ) ? false : *node.clause_status == __SELECT_SAT;
     324}
     325
     326// type used by select statement to capture a chan read as the selected operation
     327struct chan_read {
     328    channel(T) & chan;
     329    T & ret;
     330};
     331
     332static inline void ?{}( chan_read(T) & cr, channel(T) & chan, T & ret ) {
     333    &cr.chan = &chan;
     334    &cr.ret = &ret;
     335}
     336static inline chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ chan, ret }; return cr; }
     337
     338static inline void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(this.chan, this) {
     339    __closed_remove( chan, ret );
     340    // if we get here then the insert succeeded
     341    __make_select_node_available( node );
     342}
     343
     344static inline bool register_select( chan_read(T) & this, select_node & node ) with(this.chan, this) {
     345    // mutex(sout) sout | "register_read";
     346    lock( mutex_lock );
     347    node.extra = &ret; // set .extra so that if it == 0p later in on_selected it is due to channel close
     348
     349    #ifdef CHAN_STATS
     350    if ( !closed ) operations++;
     351    #endif
     352
     353    // check if we can complete operation. If so race to establish winner in special OR case
     354    if ( !node.park_counter && ( count != 0 || !prods`isEmpty || unlikely(closed) ) ) {
     355        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     356           unlock( mutex_lock );
     357           return false;
     358        }
     359    }
     360
     361    if ( unlikely(closed) ) {
     362        unlock( mutex_lock );
     363        __handle_select_closed_read( this, node );
     364        return true;
     365    }
     366
     367    // have to check for the zero size channel case
     368    ZeroSize: if ( size == 0 && !prods`isEmpty ) {
     369        if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
     370        __prods_handoff( chan, ret );
     371        __set_avail_then_unlock( node, mutex_lock );
     372        return true;
     373    }
     374
     375    // wait if buffer is empty, work will be completed by someone else
     376    if ( count == 0 ) {
     377        #ifdef CHAN_STATS
     378        blocks++;
     379        #endif
     380       
     381        insert_last( cons, node );
     382        unlock( mutex_lock );
     383        return false;
     384    }
     385
     386    // Remove from buffer
     387    __do_remove( chan, ret );
     388    __set_avail_then_unlock( node, mutex_lock );
     389    return true;
     390}
     391static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); }
     392static inline bool on_selected( chan_read(T) & this, select_node & node ) with(this) {
     393    if ( node.extra == 0p ) // check if woken up due to closed channel
     394        __closed_remove( chan, ret );
     395    // This is only reachable if not closed or closed exception was handled
     396    return true;
     397}
     398
     399// type used by select statement to capture a chan write as the selected operation
     400struct chan_write {
     401    channel(T) & chan;
     402    T elem;
     403};
     404
     405static inline void ?{}( chan_write(T) & cw, channel(T) & chan, T elem ) {
     406    &cw.chan = &chan;
     407    memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) );
     408}
     409static inline chan_write(T) ?>>?( T elem, channel(T) & chan ) { chan_write(T) cw{ chan, elem }; return cw; }
     410
     411static inline void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(this.chan, this) {
     412    __closed_insert( chan, elem );
     413    // if we get here then the insert succeeded
     414    __make_select_node_available( node );
     415}
     416
     417static inline bool register_select( chan_write(T) & this, select_node & node ) with(this.chan, this) {
     418    // mutex(sout) sout | "register_write";
     419    lock( mutex_lock );
     420    node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close
     421
     422    #ifdef CHAN_STATS
     423    if ( !closed ) operations++;
     424    #endif
     425
     426    // check if we can complete operation. If so race to establish winner in special OR case
     427    if ( !node.park_counter && ( count != size || !cons`isEmpty || unlikely(closed) ) ) {
     428        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     429           unlock( mutex_lock );
     430           return false;
     431        }
     432    }
     433
     434    // if closed handle
     435    if ( unlikely(closed) ) {
     436        unlock( mutex_lock );
     437        __handle_select_closed_write( this, node );
     438        return true;
     439    }
     440
     441    // handle blocked consumer case via handoff (buffer is implicitly empty)
     442    ConsEmpty: if ( !cons`isEmpty ) {
     443        if ( !__handle_waituntil_OR( cons ) ) {
     444            // mutex(sout) sout | "empty";
     445            break ConsEmpty;
     446        }
     447        // mutex(sout) sout | "signal";
     448        __cons_handoff( chan, elem );
     449        __set_avail_then_unlock( node, mutex_lock );
     450        return true;
     451    }
     452
     453    // insert node in list if buffer is full, work will be completed by someone else
     454    if ( count == size ) {
     455        #ifdef CHAN_STATS
     456        blocks++;
     457        #endif
     458
     459        insert_last( prods, node );
     460        unlock( mutex_lock );
     461        return false;
     462    } // if
     463
     464    // otherwise carry out write either via normal insert
     465    __buf_insert( chan, elem );
     466    __set_avail_then_unlock( node, mutex_lock );
     467    return true;
     468}
     469static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); }
     470
     471static inline bool on_selected( chan_write(T) & this, select_node & node ) with(this) {
     472    if ( node.extra == 0p ) // check if woken up due to closed channel
     473        __closed_insert( chan, elem );
     474
     475    // This is only reachable if not closed or closed exception was handled
     476    return true;
     477}
     478
     479
    305480} // forall( T )
     481
     482
     483
  • libcfa/src/concurrency/future.hfa

    r4daf79f rc083c3d  
    1919#include "monitor.hfa"
    2020#include "select.hfa"
     21#include "locks.hfa"
    2122
    2223//----------------------------------------------------------------------------
     
    2627//  future_t is lockfree and uses atomics which aren't needed given we use locks here
    2728forall( T ) {
    28     // enum(int) { FUTURE_EMPTY = 0, FUTURE_FULFILLED = 1 }; // Enums seem to be broken so feel free to add this back afterwards
     29    // enum { FUTURE_EMPTY = 0, FUTURE_FULFILLED = 1 }; // Enums seem to be broken so feel free to add this back afterwards
    2930
    3031    // temporary enum replacement
     
    4445    };
    4546
    46     // C_TODO: perhaps allow exceptions to be inserted like uC++?
    47 
    4847        static inline {
    4948
     
    8281        void _internal_flush( future(T) & this ) with(this) {
    8382            while( ! waiters`isEmpty ) {
     83                if ( !__handle_waituntil_OR( waiters ) ) // handle special waituntil OR case
     84                    break; // if handle_OR returns false then waiters is empty so break
    8485                select_node &s = try_pop_front( waiters );
    8586
    86                 if ( s.race_flag == 0p )
     87                if ( s.clause_status == 0p )
    8788                    // poke in result so that woken threads do not need to reacquire any locks
    88                     // *(((future_node(T) &)s).my_result) = result;
    8989                    copy_T( result, *(((future_node(T) &)s).my_result) );
    90                 else if ( !install_select_winner( s, &this ) ) continue;
     90                else if ( !__make_select_node_available( s ) ) continue;
    9191               
    9292                // only unpark if future is not selected
     
    9797
    9898                // Fulfil the future, returns whether or not someone was unblocked
    99                 bool fulfil( future(T) & this, T & val ) with(this) {
     99                bool fulfil( future(T) & this, T val ) with(this) {
    100100            lock( lock );
    101101            if( state != FUTURE_EMPTY )
     
    153153        }
    154154
    155         void * register_select( future(T) & this, select_node & s ) with(this) {
    156             lock( lock );
    157 
    158             // future not ready -> insert select node and return 0p
     155        bool register_select( future(T) & this, select_node & s ) with(this) {
     156            lock( lock );
     157
     158            // check if we can complete operation. If so race to establish winner in special OR case
     159            if ( !s.park_counter && state != FUTURE_EMPTY ) {
     160                if ( !__make_select_node_available( s ) ) { // we didn't win the race so give up on registering
     161                    unlock( lock );
     162                    return false;
     163                }
     164            }
     165
     166            // future not ready -> insert select node and return
    159167            if( state == FUTURE_EMPTY ) {
    160168                insert_last( waiters, s );
    161169                unlock( lock );
    162                 return 0p;
    163             }
    164 
    165             // future ready and we won race to install it as the select winner return 1p
    166             if ( install_select_winner( s, &this ) ) {
    167                 unlock( lock );
    168                 return 1p;
    169             }
    170 
    171             unlock( lock );
    172             // future ready and we lost race to install it as the select winner
    173             return 2p;
    174         }
    175 
    176         void unregister_select( future(T) & this, select_node & s ) with(this) {
     170                return false;
     171            }
     172
     173            __make_select_node_available( s );
     174            unlock( lock );
     175            return true;
     176        }
     177
     178        bool unregister_select( future(T) & this, select_node & s ) with(this) {
     179            if ( ! s`isListed ) return false;
    177180            lock( lock );
    178181            if ( s`isListed ) remove( s );
    179182            unlock( lock );
     183            return false;
    180184        }
    181185               
     186        bool on_selected( future(T) & this, select_node & node ) { return true; }
    182187        }
    183188}
     
    186191// These futures below do not support select statements so they may not be as useful as 'future'
    187192//  however the 'single_future' is cheap and cheerful and is most likely more performant than 'future'
    188 //  since it uses raw atomics and no locks afaik
     193//  since it uses raw atomics and no locks
    189194//
    190195// As far as 'multi_future' goes I can't see many use cases as it will be less performant than 'future'
  • libcfa/src/concurrency/invoke.h

    r4daf79f rc083c3d  
    217217                struct __thread_user_link cltr_link;
    218218
    219                 // used to point to this thd's current clh node
    220                 volatile bool * clh_node;
    221 
    222219                struct processor * last_proc;
     220
     221        // ptr used during handover between blocking lists to allow for stack allocation of intrusive nodes
     222        // main use case is wait-morphing to allow a different node to be used to block on condvar vs lock
     223        void * link_node;
    223224
    224225                PRNG_STATE_T random_state;                                              // fast random numbers
  • libcfa/src/concurrency/locks.cfa

    r4daf79f rc083c3d  
    7979        // lock is held by some other thread
    8080        if ( owner != 0p && owner != thrd ) {
    81                 insert_last( blocked_threads, *thrd );
     81        select_node node;
     82                insert_last( blocked_threads, node );
    8283                wait_count++;
    8384                unlock( lock );
    8485                park( );
    85         }
    86         // multi acquisition lock is held by current thread
    87         else if ( owner == thrd && multi_acquisition ) {
     86        return;
     87        } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
    8888                recursion_count++;
    89                 unlock( lock );
    90         }
    91         // lock isn't held
    92         else {
     89        } else {  // lock isn't held
    9390                owner = thrd;
    9491                recursion_count = 1;
    95                 unlock( lock );
    96         }
     92        }
     93    unlock( lock );
    9794}
    9895
     
    117114}
    118115
    119 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
    120         thread$ * t = &try_pop_front( blocked_threads );
    121         owner = t;
    122         recursion_count = ( t ? 1 : 0 );
    123         if ( t ) wait_count--;
    124         unpark( t );
     116// static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
     117//      thread$ * t = &try_pop_front( blocked_threads );
     118//      owner = t;
     119//      recursion_count = ( t ? 1 : 0 );
     120//      if ( t ) wait_count--;
     121//      unpark( t );
     122// }
     123
     124static inline void pop_node( blocking_lock & this ) with( this ) {
     125    __handle_waituntil_OR( blocked_threads );
     126    select_node * node = &try_pop_front( blocked_threads );
     127    if ( node ) {
     128        wait_count--;
     129        owner = node->blocked_thread;
     130        recursion_count = 1;
     131        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     132        wake_one( blocked_threads, *node );
     133    } else {
     134        owner = 0p;
     135        recursion_count = 0;
     136    }
    125137}
    126138
     
    134146        recursion_count--;
    135147        if ( recursion_count == 0 ) {
    136                 pop_and_set_new_owner( this );
     148                pop_node( this );
    137149        }
    138150        unlock( lock );
     
    147159        // lock held
    148160        if ( owner != 0p ) {
    149                 insert_last( blocked_threads, *t );
     161                insert_last( blocked_threads, *(select_node *)t->link_node );
    150162                wait_count++;
    151                 unlock( lock );
    152163        }
    153164        // lock not held
     
    156167                recursion_count = 1;
    157168                unpark( t );
    158                 unlock( lock );
    159         }
     169        }
     170    unlock( lock );
    160171}
    161172
     
    167178        size_t ret = recursion_count;
    168179
    169         pop_and_set_new_owner( this );
     180        pop_node( this );
     181
     182    select_node node;
     183    active_thread()->link_node = (void *)&node;
    170184        unlock( lock );
     185
     186    park();
     187
    171188        return ret;
    172189}
     
    175192        recursion_count = recursion;
    176193}
     194
     195// waituntil() support
     196bool register_select( blocking_lock & this, select_node & node ) with(this) {
     197    lock( lock __cfaabi_dbg_ctx2 );
     198        thread$ * thrd = active_thread();
     199
     200        // single acquisition lock is held by current thread
     201        /* paranoid */ verifyf( owner != thrd || multi_acquisition, "Single acquisition lock holder (%p) attempted to reacquire the lock %p resulting in a deadlock.", owner, &this );
     202
     203    if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case
     204        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     205           unlock( lock );
     206           return false;
     207        }
     208    }
     209
     210        // lock is held by some other thread
     211        if ( owner != 0p && owner != thrd ) {
     212                insert_last( blocked_threads, node );
     213                wait_count++;
     214                unlock( lock );
     215        return false;
     216        } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
     217                recursion_count++;
     218        } else {  // lock isn't held
     219                owner = thrd;
     220                recursion_count = 1;
     221        }
     222
     223    if ( node.park_counter ) __make_select_node_available( node );
     224    unlock( lock );
     225    return true;
     226}
     227
     228bool unregister_select( blocking_lock & this, select_node & node ) with(this) {
     229    lock( lock __cfaabi_dbg_ctx2 );
     230    if ( node`isListed ) {
     231        remove( node );
     232        wait_count--;
     233        unlock( lock );
     234        return false;
     235    }
     236   
     237    if ( owner == active_thread() ) {
     238        /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count );
     239        // if recursion count is zero release lock and set new owner if one is waiting
     240        recursion_count--;
     241        if ( recursion_count == 0 ) {
     242            pop_node( this );
     243        }
     244    }
     245        unlock( lock );
     246    return false;
     247}
     248
     249bool on_selected( blocking_lock & this, select_node & node ) { return true; }
    177250
    178251//-----------------------------------------------------------------------------
     
    311384        int counter( condition_variable(L) & this ) with(this) { return count; }
    312385
    313         static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
     386        static void enqueue_thread( condition_variable(L) & this, info_thread(L) * i ) with(this) {
    314387                // add info_thread to waiting queue
    315388                insert_last( blocked_threads, *i );
    316389                count++;
    317                 size_t recursion_count = 0;
    318                 if (i->lock) {
     390                // size_t recursion_count = 0;
     391                // if (i->lock) {
     392                //      // if lock was passed get recursion count to reset to after waking thread
     393                //      recursion_count = on_wait( *i->lock );
     394                // }
     395                // return recursion_count;
     396        }
     397
     398    static size_t block_and_get_recursion( info_thread(L) & i ) {
     399        size_t recursion_count = 0;
     400                if ( i.lock ) {
    319401                        // if lock was passed get recursion count to reset to after waking thread
    320                         recursion_count = on_wait( *i->lock );
    321                 }
    322                 return recursion_count;
    323         }
     402                        recursion_count = on_wait( *i.lock ); // this call blocks
     403                } else park( );
     404        return recursion_count;
     405    }
    324406
    325407        // helper for wait()'s' with no timeout
    326408        static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
    327409                lock( lock __cfaabi_dbg_ctx2 );
    328                 size_t recursion_count = queue_and_get_recursion(this, &i);
     410        enqueue_thread( this, &i );
     411                // size_t recursion_count = queue_and_get_recursion( this, &i );
    329412                unlock( lock );
    330413
    331414                // blocks here
    332                 park( );
     415        size_t recursion_count = block_and_get_recursion( i );
     416                // park( );
    333417
    334418                // resets recursion count here after waking
    335                 if (i.lock) on_wakeup(*i.lock, recursion_count);
     419                if ( i.lock ) on_wakeup( *i.lock, recursion_count );
    336420        }
    337421
     
    343427        static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    344428                lock( lock __cfaabi_dbg_ctx2 );
    345                 size_t recursion_count = queue_and_get_recursion(this, &info);
     429        enqueue_thread( this, &info );
     430                // size_t recursion_count = queue_and_get_recursion( this, &info );
    346431                alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
    347432                unlock( lock );
     
    351436
    352437                // blocks here
    353                 park();
     438        size_t recursion_count = block_and_get_recursion( info );
     439                // park();
    354440
    355441                // unregisters alarm so it doesn't go off if this happens first
     
    357443
    358444                // resets recursion count here after waking
    359                 if (info.lock) on_wakeup(*info.lock, recursion_count);
     445                if ( info.lock ) on_wakeup( *info.lock, recursion_count );
    360446        }
    361447
     
    417503                info_thread( L ) i = { active_thread(), info, &l };
    418504                insert_last( blocked_threads, i );
    419                 size_t recursion_count = on_wait( *i.lock );
    420                 park( );
     505                size_t recursion_count = on_wait( *i.lock ); // blocks here
     506                // park( );
    421507                on_wakeup(*i.lock, recursion_count);
    422508        }
     
    459545        bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
    460546
    461         static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) {
    462                 // add info_thread to waiting queue
    463                 insert_last( blocked_threads, *i );
    464                 size_t recursion_count = 0;
    465                 recursion_count = on_wait( *i->lock );
    466                 return recursion_count;
    467         }
     547        // static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) {
     548        //      // add info_thread to waiting queue
     549        //      insert_last( blocked_threads, *i );
     550        //      size_t recursion_count = 0;
     551        //      recursion_count = on_wait( *i->lock );
     552        //      return recursion_count;
     553        // }
     554
    468555       
    469556        static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    470557                lock( lock __cfaabi_dbg_ctx2 );
    471                 size_t recursion_count = queue_and_get_recursion(this, &info);
     558                // size_t recursion_count = queue_and_get_recursion(this, &info);
     559        insert_last( blocked_threads, info );
    472560                pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
    473561                unlock( lock );
     
    477565
    478566                // blocks here
    479                 park();
     567        size_t recursion_count = block_and_get_recursion( info );
     568                // park();
    480569
    481570                // unregisters alarm so it doesn't go off if this happens first
     
    483572
    484573                // resets recursion count here after waking
    485                 if (info.lock) on_wakeup(*info.lock, recursion_count);
     574                if ( info.lock ) on_wakeup( *info.lock, recursion_count );
    486575        }
    487576
     
    493582                lock( lock __cfaabi_dbg_ctx2 );
    494583                info_thread( L ) i = { active_thread(), info, &l };
    495                 size_t recursion_count = queue_and_get_recursion(this, &i);
    496                 unlock( lock );
    497                 park( );
    498                 on_wakeup(*i.lock, recursion_count);
     584        insert_last( blocked_threads, i );
     585                // size_t recursion_count = queue_and_get_recursion( this, &i );
     586                unlock( lock );
     587
     588        // blocks here
     589                size_t recursion_count = block_and_get_recursion( i );
     590                // park();
     591                on_wakeup( *i.lock, recursion_count );
    499592        }
    500593
  • libcfa/src/concurrency/locks.hfa

    r4daf79f rc083c3d  
    3030#include "time.hfa"
    3131
     32#include "select.hfa"
     33
    3234#include <fstream.hfa>
    3335
     
    7072static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    7173static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     74static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     75static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     76static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    7277
    7378//----------
     
    8489static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    8590static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     91static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     92static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     93static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    8694
    8795//-----------------------------------------------------------------------------
     
    180188
    181189// if this is called recursively IT WILL DEADLOCK!!!!!
    182 static inline void lock(futex_mutex & this) with(this) {
     190static inline void lock( futex_mutex & this ) with(this) {
    183191        int state;
    184192
     
    190198                for (int i = 0; i < spin; i++) Pause();
    191199        }
    192 
    193         // // no contention try to acquire
    194         // if (internal_try_lock(this, state)) return;
    195200       
    196201        // if not in contended state, set to be in contended state
     
    213218
    214219static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
    215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
     220static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; }
    216221
    217222// to set recursion count after getting signalled;
     
    244249
    245250// if this is called recursively IT WILL DEADLOCK!!!!!
    246 static inline void lock(go_mutex & this) with(this) {
     251static inline void lock( go_mutex & this ) with( this ) {
    247252        int state, init_state;
    248253
     
    255260            while( !val ) { // lock unlocked
    256261                state = 0;
    257                 if (internal_try_lock(this, state, init_state)) return;
     262                if ( internal_try_lock( this, state, init_state ) ) return;
    258263            }
    259264            for (int i = 0; i < 30; i++) Pause();
     
    262267        while( !val ) { // lock unlocked
    263268            state = 0;
    264             if (internal_try_lock(this, state, init_state)) return;
     269            if ( internal_try_lock( this, state, init_state ) ) return;
    265270        }
    266271        sched_yield();
    267272       
    268273        // if not in contended state, set to be in contended state
    269         state = internal_exchange(this, 2);
     274        state = internal_exchange( this, 2 );
    270275        if ( !state ) return; // state == 0
    271276        init_state = 2;
    272         futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
     277        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
    273278    }
    274279}
     
    276281static inline void unlock( go_mutex & this ) with(this) {
    277282        // if uncontended do atomic unlock and then return
    278     if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
     283    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
    279284       
    280285        // otherwise threads are blocked so we must wake one
    281         futex((int *)&val, FUTEX_WAKE, 1);
    282 }
    283 
    284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
    285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
     286        futex( (int *)&val, FUTEX_WAKE, 1 );
     287}
     288
     289static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); }
     290static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; }
    286291static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
    287 
    288 //-----------------------------------------------------------------------------
    289 // CLH Spinlock
    290 // - No recursive acquisition
    291 // - Needs to be released by owner
    292 
    293 struct clh_lock {
    294         volatile bool * volatile tail;
    295     volatile bool * volatile head;
    296 };
    297 
    298 static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
    299 static inline void ^?{}( clh_lock & this ) { free(this.tail); }
    300 
    301 static inline void lock(clh_lock & l) {
    302         thread$ * curr_thd = active_thread();
    303         *(curr_thd->clh_node) = false;
    304         volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
    305         while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
    306     __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
    307     curr_thd->clh_node = prev;
    308 }
    309 
    310 static inline void unlock(clh_lock & l) {
    311         __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
    312 }
    313 
    314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
    315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
    316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
    317292
    318293//-----------------------------------------------------------------------------
     
    337312static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
    338313
    339 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
     314static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
    340315        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
    341316}
    342317
    343 static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
    344 
    345 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
    346         return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
    347 }
    348 
    349 static inline bool block(exp_backoff_then_block_lock & this) with(this) {
     318static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
     319
     320static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
     321        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
     322}
     323
     324static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
    350325    lock( spinlock __cfaabi_dbg_ctx2 );
    351326    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
     
    359334}
    360335
    361 static inline void lock(exp_backoff_then_block_lock & this) with(this) {
     336static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
    362337        size_t compare_val = 0;
    363338        int spin = 4;
     
    378353}
    379354
    380 static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
     355static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
    381356    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    382357    lock( spinlock __cfaabi_dbg_ctx2 );
     
    386361}
    387362
    388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
    390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
     363static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); }
     364static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; }
     365static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); }
    391366
    392367//-----------------------------------------------------------------------------
     
    418393
    419394// if this is called recursively IT WILL DEADLOCK!!!!!
    420 static inline void lock(fast_block_lock & this) with(this) {
     395static inline void lock( fast_block_lock & this ) with(this) {
    421396        lock( lock __cfaabi_dbg_ctx2 );
    422397        if ( held ) {
     
    430405}
    431406
    432 static inline void unlock(fast_block_lock & this) with(this) {
     407static inline void unlock( fast_block_lock & this ) with(this) {
    433408        lock( lock __cfaabi_dbg_ctx2 );
    434409        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
     
    439414}
    440415
    441 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
     416static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
    442417    lock( lock __cfaabi_dbg_ctx2 );
    443418    insert_last( blocked_threads, *t );
    444419    unlock( lock );
    445420}
    446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     421static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; }
     422static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }
    448423
    449424//-----------------------------------------------------------------------------
     
    456431struct simple_owner_lock {
    457432        // List of blocked threads
    458         dlist( thread$ ) blocked_threads;
     433        dlist( select_node ) blocked_threads;
    459434
    460435        // Spin lock used for mutual exclusion
     
    477452static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
    478453
    479 static inline void lock(simple_owner_lock & this) with(this) {
    480         if (owner == active_thread()) {
     454static inline void lock( simple_owner_lock & this ) with(this) {
     455        if ( owner == active_thread() ) {
    481456                recursion_count++;
    482457                return;
     
    484459        lock( lock __cfaabi_dbg_ctx2 );
    485460
    486         if (owner != 0p) {
    487                 insert_last( blocked_threads, *active_thread() );
     461        if ( owner != 0p ) {
     462        select_node node;
     463                insert_last( blocked_threads, node );
    488464                unlock( lock );
    489465                park( );
     
    495471}
    496472
    497 // TODO: fix duplicate def issue and bring this back
    498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
    499         // thread$ * t = &try_pop_front( blocked_threads );
    500         // owner = t;
    501         // recursion_count = ( t ? 1 : 0 );
    502         // unpark( t );
    503 // }
    504 
    505 static inline void unlock(simple_owner_lock & this) with(this) {
     473static inline void pop_node( simple_owner_lock & this ) with(this) {
     474    __handle_waituntil_OR( blocked_threads );
     475    select_node * node = &try_pop_front( blocked_threads );
     476    if ( node ) {
     477        owner = node->blocked_thread;
     478        recursion_count = 1;
     479        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     480        wake_one( blocked_threads, *node );
     481    } else {
     482        owner = 0p;
     483        recursion_count = 0;
     484    }
     485}
     486
     487static inline void unlock( simple_owner_lock & this ) with(this) {
    506488        lock( lock __cfaabi_dbg_ctx2 );
    507489        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    510492        recursion_count--;
    511493        if ( recursion_count == 0 ) {
    512                 // pop_and_set_new_owner( this );
    513                 thread$ * t = &try_pop_front( blocked_threads );
    514                 owner = t;
    515                 recursion_count = ( t ? 1 : 0 );
    516                 unpark( t );
     494                pop_node( this );
    517495        }
    518496        unlock( lock );
    519497}
    520498
    521 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     499static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) {
    522500        lock( lock __cfaabi_dbg_ctx2 );
    523501        // lock held
    524502        if ( owner != 0p ) {
    525                 insert_last( blocked_threads, *t );
     503                insert_last( blocked_threads, *(select_node *)t->link_node );
    526504        }
    527505        // lock not held
     
    534512}
    535513
    536 static inline size_t on_wait(simple_owner_lock & this) with(this) {
     514static inline size_t on_wait( simple_owner_lock & this ) with(this) {
    537515        lock( lock __cfaabi_dbg_ctx2 );
    538516        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    541519        size_t ret = recursion_count;
    542520
    543         // pop_and_set_new_owner( this );
    544 
    545         thread$ * t = &try_pop_front( blocked_threads );
    546         owner = t;
    547         recursion_count = ( t ? 1 : 0 );
    548         unpark( t );
    549 
     521        pop_node( this );
     522
     523    select_node node;
     524    active_thread()->link_node = (void *)&node;
    550525        unlock( lock );
     526    park();
     527
    551528        return ret;
    552529}
    553530
    554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     531static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     532
     533// waituntil() support
     534static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
     535    lock( lock __cfaabi_dbg_ctx2 );
     536
     537    // check if we can complete operation. If so race to establish winner in special OR case
     538    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
     539        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     540           unlock( lock );
     541           return false;
     542        }
     543    }
     544
     545    if ( owner == active_thread() ) {
     546                recursion_count++;
     547        if ( node.park_counter ) __make_select_node_available( node );
     548        unlock( lock );
     549                return true;
     550        }
     551
     552    if ( owner != 0p ) {
     553                insert_last( blocked_threads, node );
     554                unlock( lock );
     555                return false;
     556        }
     557   
     558        owner = active_thread();
     559        recursion_count = 1;
     560
     561    if ( node.park_counter ) __make_select_node_available( node );
     562    unlock( lock );
     563    return true;
     564}
     565
     566static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
     567    lock( lock __cfaabi_dbg_ctx2 );
     568    if ( node`isListed ) {
     569        remove( node );
     570        unlock( lock );
     571        return false;
     572    }
     573
     574    if ( owner == active_thread() ) {
     575        recursion_count--;
     576        if ( recursion_count == 0 ) {
     577            pop_node( this );
     578        }
     579    }
     580    unlock( lock );
     581    return false;
     582}
     583
     584static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
     585
    555586
    556587//-----------------------------------------------------------------------------
     
    578609
    579610// if this is called recursively IT WILL DEADLOCK!
    580 static inline void lock(spin_queue_lock & this) with(this) {
     611static inline void lock( spin_queue_lock & this ) with(this) {
    581612        mcs_spin_node node;
    582613        lock( lock, node );
     
    586617}
    587618
    588 static inline void unlock(spin_queue_lock & this) with(this) {
     619static inline void unlock( spin_queue_lock & this ) with(this) {
    589620        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    590621}
    591622
    592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
     623static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {
    593624        unpark(t);
    594625}
    595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
    596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
     626static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; }
     627static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); }
    597628
    598629
     
    621652
    622653// if this is called recursively IT WILL DEADLOCK!!!!!
    623 static inline void lock(mcs_block_spin_lock & this) with(this) {
     654static inline void lock( mcs_block_spin_lock & this ) with(this) {
    624655        mcs_node node;
    625656        lock( lock, node );
     
    633664}
    634665
    635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
    636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
    637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
     666static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); }
     667static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; }
     668static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); }
    638669
    639670//-----------------------------------------------------------------------------
     
    661692
    662693// if this is called recursively IT WILL DEADLOCK!!!!!
    663 static inline void lock(block_spin_lock & this) with(this) {
     694static inline void lock( block_spin_lock & this ) with(this) {
    664695        lock( lock );
    665696        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    668699}
    669700
    670 static inline void unlock(block_spin_lock & this) with(this) {
     701static inline void unlock( block_spin_lock & this ) with(this) {
    671702        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    672703}
    673704
    674 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
     705static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
    675706        // first we acquire internal fast_block_lock
    676707        lock( lock __cfaabi_dbg_ctx2 );
     
    686717        unpark(t);
    687718}
    688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
    689 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
     719static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; }
     720static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
    690721        // now we acquire the entire block_spin_lock upon waking up
    691722        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    714745forall(L & | is_blocking_lock(L)) {
    715746        struct info_thread;
    716 
    717         // // for use by sequence
    718         // info_thread(L) *& Back( info_thread(L) * this );
    719         // info_thread(L) *& Next( info_thread(L) * this );
    720747}
    721748
  • libcfa/src/concurrency/mutex_stmt.hfa

    r4daf79f rc083c3d  
    1515};
    1616
    17 
    1817struct __mutex_stmt_lock_guard {
    1918    void ** lockarr;
     
    3029
    3130forall(L & | is_lock(L)) {
    32 
    33     struct scoped_lock {
    34         L * internal_lock;
    35     };
    36 
    37     static inline void ?{}( scoped_lock(L) & this, L & internal_lock ) {
    38         this.internal_lock = &internal_lock;
    39         lock(internal_lock);
    40     }
    41    
    42     static inline void ^?{}( scoped_lock(L) & this ) with(this) {
    43         unlock(*internal_lock);
    44     }
    45 
    46     static inline void * __get_mutexstmt_lock_ptr( L & this ) {
    47         return &this;
    48     }
    49 
    50     static inline L __get_mutexstmt_lock_type( L & this );
    51 
    52     static inline L __get_mutexstmt_lock_type( L * this );
     31    static inline void * __get_mutexstmt_lock_ptr( L & this ) { return &this; }
     32    static inline L __get_mutexstmt_lock_type( L & this ) {}
     33    static inline L __get_mutexstmt_lock_type( L * this ) {}
    5334}
  • libcfa/src/concurrency/select.hfa

    r4daf79f rc083c3d  
    22
    33#include "containers/list.hfa"
    4 #include <stdint.h>
    5 #include <kernel.hfa>
    6 #include <locks.hfa>
     4#include "stdint.h"
     5#include "kernel.hfa"
    76
     7struct select_node;
     8
     9// node status
     10static const unsigned long int __SELECT_UNSAT = 0;
     11static const unsigned long int __SELECT_SAT = 1;
     12static const unsigned long int __SELECT_RUN = 2;
     13
     14static inline bool __CFA_has_clause_run( unsigned long int status ) { return status == __SELECT_RUN; }
     15static inline void __CFA_maybe_park( int * park_counter ) {
     16    if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 )
     17        park();
     18}
     19
     20// node used for coordinating waituntil synchronization
    821struct select_node {
     22    int * park_counter;                 // If this is 0p then the node is in a special OR case waituntil
     23    unsigned long int * clause_status;  // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil
     24
     25    void * extra;                       // used to store arbitrary data needed by some primitives
     26
    927    thread$ * blocked_thread;
    10     void ** race_flag;
    1128    inline dlink(select_node);
    1229};
    1330P9_EMBEDDED( select_node, dlink(select_node) )
    1431
    15 void ?{}( select_node & this ) {
    16     this.blocked_thread = 0p;
    17     this.race_flag = 0p;
     32static inline void ?{}( select_node & this ) {
     33    this.blocked_thread = active_thread();
     34    this.clause_status = 0p;
     35    this.park_counter = 0p;
     36    this.extra = 0p;
    1837}
    1938
    20 void ?{}( select_node & this, thread$ * blocked_thread ) {
     39static inline void ?{}( select_node & this, thread$ * blocked_thread ) {
    2140    this.blocked_thread = blocked_thread;
    22     this.race_flag = 0p;
     41    this.clause_status = 0p;
     42    this.park_counter = 0p;
     43    this.extra = 0p;
    2344}
    2445
    25 void ?{}( select_node & this, thread$ * blocked_thread, void ** race_flag ) {
     46static inline void ?{}( select_node & this, thread$ * blocked_thread, void * extra ) {
    2647    this.blocked_thread = blocked_thread;
    27     this.race_flag = race_flag;
     48    this.clause_status = 0p;
     49    this.park_counter = 0p;
     50    this.extra = extra;
    2851}
    2952
    30 void ^?{}( select_node & this ) {}
     53static inline void ^?{}( select_node & this ) {}
    3154
     55static inline unsigned long int * __get_clause_status( select_node & s ) { return s.clause_status; }
    3256
    3357//-----------------------------------------------------------------------------
    3458// is_selectable
    35 trait is_selectable(T & | sized(T)) {
    36     // For registering a select on a selectable concurrency primitive
    37     // return 0p if primitive not accessible yet
    38     // return 1p if primitive gets acquired
    39     // return 2p if primitive is accessible but some other primitive won the race
    40     // C_TODO: add enum for return values
    41     void * register_select( T &, select_node & );
     59forall(T & | sized(T))
     60trait is_selectable {
     61    // For registering a select stmt on a selectable concurrency primitive
     62    // Returns bool that indicates if operation is already SAT
     63    bool register_select( T &, select_node & );
    4264
    43     void unregister_select( T &, select_node &  );
     65    // For unregistering a select stmt on a selectable concurrency primitive
     66    // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN)
     67    bool unregister_select( T &, select_node &  );
     68
     69    // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node
     70    //    passed as an arg to this routine
     71    // If on_selected returns false, the statement is not run, if it returns true it is run.
     72    bool on_selected( T &, select_node & );
    4473};
    4574
    46 static inline bool install_select_winner( select_node & this, void * primitive_ptr ) with(this) {
    47     // temporary needed for atomic instruction
    48     void * cmp_flag = 0p;
    49    
    50     // if we dont win the selector race we need to potentially
    51     //   ignore this node and move to the next one so we return accordingly
    52     if ( *race_flag != 0p ||
    53         !__atomic_compare_exchange_n(
    54             race_flag,
    55             &cmp_flag,
    56             primitive_ptr,
    57             false,
    58             __ATOMIC_SEQ_CST,
    59             __ATOMIC_SEQ_CST
    60         )
    61     ) return false; // lost race and some other node triggered select
    62     return true; // won race so this node is what the select proceeds with
     75// this is used inside the compiler to attempt to establish an else clause as a winner in the OR special case race
     76static inline bool __select_node_else_race( select_node & this ) with( this ) {
     77    unsigned long int cmp_status = __SELECT_UNSAT;
     78    return *clause_status == 0
     79            && __atomic_compare_exchange_n( clause_status, &cmp_status, 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
    6380}
     81
     82// when a primitive becomes available it calls the following routine on it's node to update the select state:
     83// return true if we want to unpark the thd
     84static inline bool __make_select_node_available( select_node & this ) with( this ) {
     85    unsigned long int cmp_status = __SELECT_UNSAT;
     86
     87    if( !park_counter )
     88        return *clause_status == 0
     89            && __atomic_compare_exchange_n( clause_status, &cmp_status, (unsigned long int)&this, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ); // OR specific case where race was won
     90
     91    return *clause_status == 0
     92        && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write
     93        && !__atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);
     94}
     95
     96// Handles the special OR case of the waituntil statement
     97// Since only one select node can win in the OR case, we need to race to set the node available BEFORE
     98//    performing the operation since if we lose the race the operation should not be performed as it will be lost
     99// Returns true if execution can continue normally and false if the queue has now been drained
     100static inline bool __handle_waituntil_OR( dlist( select_node ) & queue ) {
     101    if ( queue`isEmpty ) return false;
     102    if ( queue`first.clause_status && !queue`first.park_counter ) {
     103        while ( !queue`isEmpty ) {
     104            // if node not a special OR case or if we win the special OR case race break
     105            if ( !queue`first.clause_status || queue`first.park_counter || __make_select_node_available( queue`first ) ) { return true; }
     106            // otherwise we lost the special OR race so discard node
     107            try_pop_front( queue );
     108        }
     109        return false;
     110    }
     111    return true;
     112}
     113
     114// wake one thread from the list
     115static inline void wake_one( dlist( select_node ) & queue, select_node & popped ) {
     116    if ( !popped.clause_status                              // normal case, node is not a select node
     117        || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
     118        || __make_select_node_available( popped ) )         // check if popped link belongs to a selecting thread
     119        unpark( popped.blocked_thread );
     120}
     121
     122static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, try_pop_front( queue ) ); }
     123
     124static inline void setup_clause( select_node & this, unsigned long int * clause_status, int * park_counter ) {
     125    this.blocked_thread = active_thread();
     126    this.clause_status = clause_status;
     127    this.park_counter = park_counter;
     128}
     129
  • libcfa/src/concurrency/thread.cfa

    r4daf79f rc083c3d  
    5353        preferred = ready_queue_new_preferred();
    5454        last_proc = 0p;
     55    link_node = 0p;
    5556        PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
    5657        #if defined( __CFA_WITH_VERIFY__ )
     
    5960        #endif
    6061
    61         clh_node = malloc( );
    62         *clh_node = false;
    63 
    6462        doregister(curr_cluster, this);
    6563        monitors{ &self_mon_p, 1, (fptr_t)0 };
     
    7068                canary = 0xDEADDEADDEADDEADp;
    7169        #endif
    72         free(clh_node);
    7370        unregister(curr_cluster, this);
    7471        ^self_cor{};
  • src/AST/Convert.cpp

    r4daf79f rc083c3d  
    567567        }
    568568
     569    const ast::WhenClause * visit( const ast::WhenClause * node ) override final {
     570                // There is no old-AST WhenClause, so this should never be called.
     571                assert( !node );
     572                return nullptr;
     573        }
     574
    569575        const ast::Stmt * visit( const ast::WaitForStmt * node ) override final {
    570576                if ( inCache( node ) ) return nullptr;
     
    573579                for ( auto clause : node->clauses ) {
    574580                        stmt->clauses.push_back({{
    575                                         get<Expression>().accept1( clause->target_func ),
     581                                        get<Expression>().accept1( clause->target ),
    576582                                        get<Expression>().acceptL( clause->target_args ),
    577583                                },
    578584                                get<Statement>().accept1( clause->stmt ),
    579                                 get<Expression>().accept1( clause->cond ),
     585                                get<Expression>().accept1( clause->when_cond ),
    580586                        });
    581587                }
     
    594600        const ast::WaitForClause * visit( const ast::WaitForClause * node ) override final {
    595601                // There is no old-AST WaitForClause, so this should never be called.
     602                assert( !node );
     603                return nullptr;
     604        }
     605
     606    const ast::Stmt * visit( const ast::WaitUntilStmt * node ) override final {
     607        // There is no old-AST WaitUntilStmt, so this should never be called.
    596608                assert( !node );
    597609                return nullptr;
     
    21582170                        auto clause = new ast::WaitForClause( old->location );
    21592171
    2160                         clause->target_func = GET_ACCEPT_1(clauses[i].target.function, Expr);
     2172                        clause->target = GET_ACCEPT_1(clauses[i].target.function, Expr);
    21612173                        clause->target_args = GET_ACCEPT_V(clauses[i].target.arguments, Expr);
    21622174                        clause->stmt = GET_ACCEPT_1(clauses[i].statement, Stmt);
    2163                         clause->cond = GET_ACCEPT_1(clauses[i].condition, Expr);
     2175                        clause->when_cond = GET_ACCEPT_1(clauses[i].condition, Expr);
    21642176
    21652177                        stmt->clauses.push_back( clause );
  • src/AST/Fwd.hpp

    r4daf79f rc083c3d  
    5858class FinallyClause;
    5959class SuspendStmt;
     60class WhenClause;
    6061class WaitForStmt;
    6162class WaitForClause;
     63class WaitUntilStmt;
    6264class WithStmt;
    6365class DeclStmt;
  • src/AST/Node.cpp

    r4daf79f rc083c3d  
    174174template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::weak >;
    175175template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::strong >;
     176template class ast::ptr_base< ast::WhenClause, ast::Node::ref_type::weak >;
     177template class ast::ptr_base< ast::WhenClause, ast::Node::ref_type::strong >;
    176178template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::weak >;
    177179template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::strong >;
    178180template class ast::ptr_base< ast::WaitForClause, ast::Node::ref_type::weak >;
    179181template class ast::ptr_base< ast::WaitForClause, ast::Node::ref_type::strong >;
     182template class ast::ptr_base< ast::WaitUntilStmt, ast::Node::ref_type::weak >;
     183template class ast::ptr_base< ast::WaitUntilStmt, ast::Node::ref_type::strong >;
    180184template class ast::ptr_base< ast::WithStmt, ast::Node::ref_type::weak >;
    181185template class ast::ptr_base< ast::WithStmt, ast::Node::ref_type::strong >;
  • src/AST/Pass.hpp

    r4daf79f rc083c3d  
    162162        const ast::FinallyClause *    visit( const ast::FinallyClause        * ) override final;
    163163        const ast::Stmt *             visit( const ast::SuspendStmt          * ) override final;
     164    const ast::WhenClause *       visit( const ast::WhenClause           * ) override final;
    164165        const ast::Stmt *             visit( const ast::WaitForStmt          * ) override final;
    165166        const ast::WaitForClause *    visit( const ast::WaitForClause        * ) override final;
     167    const ast::Stmt *             visit( const ast::WaitUntilStmt        * ) override final;
    166168        const ast::Decl *             visit( const ast::WithStmt             * ) override final;
    167169        const ast::NullStmt *         visit( const ast::NullStmt             * ) override final;
  • src/AST/Pass.impl.hpp

    r4daf79f rc083c3d  
    10661066
    10671067//--------------------------------------------------------------------------
     1068// WhenClause
     1069template< typename core_t >
     1070const ast::WhenClause * ast::Pass< core_t >::visit( const ast::WhenClause * node ) {
     1071        VISIT_START( node );
     1072
     1073        if ( __visit_children() ) {
     1074                maybe_accept( node, &WhenClause::target );
     1075                maybe_accept( node, &WhenClause::stmt );
     1076                maybe_accept( node, &WhenClause::when_cond );
     1077        }
     1078
     1079        VISIT_END( WhenClause, node );
     1080}
     1081
     1082//--------------------------------------------------------------------------
    10681083// WaitForStmt
    10691084template< typename core_t >
     
    10901105
    10911106        if ( __visit_children() ) {
    1092                 maybe_accept( node, &WaitForClause::target_func );
     1107                maybe_accept( node, &WaitForClause::target );
    10931108                maybe_accept( node, &WaitForClause::target_args );
    10941109                maybe_accept( node, &WaitForClause::stmt );
    1095                 maybe_accept( node, &WaitForClause::cond );
     1110                maybe_accept( node, &WaitForClause::when_cond );
    10961111        }
    10971112
    10981113        VISIT_END( WaitForClause, node );
     1114}
     1115
     1116//--------------------------------------------------------------------------
     1117// WaitUntilStmt
     1118template< typename core_t >
     1119const ast::Stmt * ast::Pass< core_t >::visit( const ast::WaitUntilStmt * node ) {
     1120        VISIT_START( node );
     1121
     1122        if ( __visit_children() ) {
     1123                maybe_accept( node, &WaitUntilStmt::clauses );
     1124                maybe_accept( node, &WaitUntilStmt::timeout_time );
     1125                maybe_accept( node, &WaitUntilStmt::timeout_stmt );
     1126                maybe_accept( node, &WaitUntilStmt::timeout_cond );
     1127                maybe_accept( node, &WaitUntilStmt::else_stmt );
     1128                maybe_accept( node, &WaitUntilStmt::else_cond );
     1129        }
     1130
     1131        VISIT_END( Stmt, node );
    10991132}
    11001133
  • src/AST/Print.cpp

    r4daf79f rc083c3d  
    208208        }
    209209
     210    void print( const ast::WaitStmt * node ) {
     211                if ( node->timeout_time ) {
     212                        os << indent-1 << "timeout of:" << endl;
     213                        node->timeout_time->accept( *this );
     214
     215                        if ( node->timeout_stmt ) {
     216                                os << indent-1 << "... with statment:" << endl;
     217                                node->timeout_stmt->accept( *this );
     218                        }
     219
     220                        if ( node->timeout_cond ) {
     221                                os << indent-1 << "... with condition:" << endl;
     222                                node->timeout_cond->accept( *this );
     223                        }
     224                }
     225
     226                if ( node->else_stmt ) {
     227                        os << indent-1 << "else:" << endl;
     228                        node->else_stmt->accept( *this );
     229
     230                        if ( node->else_cond ) {
     231                                os << indent-1 << "... with condition:" << endl;
     232                                node->else_cond->accept( *this );
     233                        }
     234                }
     235        }
     236
    210237        void preprint( const ast::NamedTypeDecl * node ) {
    211238                if ( ! node->name.empty() ) {
     
    756783        }
    757784
     785        virtual const ast::WhenClause * visit( const ast::WhenClause * node ) override final {
     786                os << indent-1 << "target: ";
     787                safe_print( node->target );
     788
     789                if ( node->stmt ) {
     790                        os << indent-1 << "... with statment:" << endl;
     791                        node->stmt->accept( *this );
     792                }
     793
     794                if ( node->when_cond ) {
     795                        os << indent-1 << "... with when condition:" << endl;
     796                        node->when_cond->accept( *this );
     797                }
     798
     799                return node;
     800        }
     801
    758802        virtual const ast::Stmt * visit( const ast::WaitForStmt * node ) override final {
    759803                os << "Waitfor Statement" << endl;
     
    793837        virtual const ast::WaitForClause * visit( const ast::WaitForClause * node ) override final {
    794838                os << indent-1 << "target function: ";
    795                 safe_print( node->target_func );
     839                safe_print( node->target );
    796840
    797841                if ( !node->target_args.empty() ) {
     
    807851                }
    808852
    809                 if ( node->cond ) {
     853                if ( node->when_cond ) {
    810854                        os << indent-1 << "... with condition:" << endl;
    811                         node->cond->accept( *this );
    812                 }
    813 
     855                        node->when_cond->accept( *this );
     856                }
     857
     858                return node;
     859        }
     860
     861    virtual const ast::Stmt * visit( const ast::WaitUntilStmt * node ) override final {
     862                os << "Waituntil Statement" << endl;
     863                indent += 2;
     864                for( const auto & clause : node->clauses ) {
     865                        clause->accept( *this );
     866                }
     867        print(node);    // calls print( const ast::WaitStmt * node )
    814868                return node;
    815869        }
  • src/AST/Stmt.hpp

    r4daf79f rc083c3d  
    378378};
    379379
    380 // Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ...
    381 class WaitForStmt final : public Stmt {
    382   public:
    383         std::vector<ptr<WaitForClause>> clauses;
    384         ptr<Expr> timeout_time;
     380// Base class of WaitFor/WaitUntil statements
     381// form: KEYWORD(...) ... timeout(...) ... else ...
     382class WaitStmt : public Stmt {
     383  public:
     384    ptr<Expr> timeout_time;
    385385        ptr<Stmt> timeout_stmt;
    386386        ptr<Expr> timeout_cond;
     
    388388        ptr<Expr> else_cond;
    389389
     390    WaitStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
     391                : Stmt(loc, std::move(labels)) {}
     392
     393  private:
     394    WaitStmt * clone() const override = 0;
     395        MUTATE_FRIEND
     396};
     397
     398// Base class for WaitFor/WaitUntil clauses
     399// form: when( when_cond ) KEYWORD( target ) stmt
     400class WhenClause : public StmtClause {
     401  public:
     402        ptr<Expr> target;
     403        ptr<Stmt> stmt;
     404        ptr<Expr> when_cond;
     405
     406        WhenClause( const CodeLocation & loc )
     407                : StmtClause( loc ) {}
     408
     409        const WhenClause * accept( Visitor & v ) const override { return v.visit( this ); }
     410  private:
     411        WhenClause * clone() const override { return new WhenClause{ *this }; }
     412        MUTATE_FRIEND
     413};
     414
     415// Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ...
     416class WaitForStmt final : public WaitStmt {
     417  public:
     418        std::vector<ptr<WaitForClause>> clauses;
     419
    390420        WaitForStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
    391                 : Stmt(loc, std::move(labels)) {}
     421                : WaitStmt(loc, std::move(labels)) {}
    392422
    393423        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     
    398428
    399429// Clause in a waitfor statement: waitfor (..., ...) ...
    400 class WaitForClause final : public StmtClause {
    401   public:
    402         ptr<Expr> target_func;
     430class WaitForClause final : public WhenClause {
     431  public:
    403432        std::vector<ptr<Expr>> target_args;
    404         ptr<Stmt> stmt;
    405         ptr<Expr> cond;
    406433
    407434        WaitForClause( const CodeLocation & loc )
    408                 : StmtClause( loc ) {}
     435                : WhenClause( loc ) {}
    409436
    410437        const WaitForClause * accept( Visitor & v ) const override { return v.visit( this ); }
    411438  private:
    412439        WaitForClause * clone() const override { return new WaitForClause{ *this }; }
     440        MUTATE_FRIEND
     441};
     442
     443// waituntil statement: when (...) waituntil (...) ... timeout(...) ... else ...
     444class WaitUntilStmt final : public WaitStmt {
     445  public:
     446    // Non-ast node used during compilation to store data needed to generate predicates
     447    //    and set initial status values for clauses
     448    // Used to create a tree corresponding to the structure of the clauses in a WaitUntil
     449    struct ClauseNode {
     450        enum Op { AND, OR, LEFT_OR, LEAF, ELSE, TIMEOUT } op; // operation/type tag
     451        // LEFT_OR used with TIMEOUT/ELSE to indicate that we ignore right hand side after parsing
     452
     453        ClauseNode * left;
     454        ClauseNode * right;
     455        WhenClause * leaf;  // only set if this node is a leaf (points into vector of clauses)
     456
     457        bool ambiguousWhen; // used to paint nodes of predicate tree based on when() clauses
     458        bool whenState;     // used to track if when_cond is toggled on or off for generating init values
     459        bool childOfAnd;      // true on leaf nodes that are children of AND, false otherwise
     460
     461        ClauseNode( Op op, ClauseNode * left, ClauseNode * right )
     462            : op(op), left(left), right(right), leaf(nullptr),
     463            ambiguousWhen(false), whenState(true), childOfAnd(false) {}
     464        ClauseNode( Op op, WhenClause * leaf )
     465            : op(op), left(nullptr), right(nullptr), leaf(leaf),
     466            ambiguousWhen(false), whenState(true), childOfAnd(false) {}
     467        ClauseNode( WhenClause * leaf ) : ClauseNode(LEAF, leaf) {}
     468       
     469        ~ClauseNode() {
     470            if ( left ) delete left;
     471            if ( right ) delete right;
     472        }
     473    };
     474
     475        std::vector<ptr<WhenClause>> clauses;
     476    ClauseNode * predicateTree;
     477
     478        WaitUntilStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} )
     479                : WaitStmt(loc, std::move(labels)) {}
     480
     481    ~WaitUntilStmt() { delete predicateTree; }
     482
     483        const Stmt * accept( Visitor & v ) const override { return v.visit( this ); }
     484  private:
     485        WaitUntilStmt * clone() const override { return new WaitUntilStmt{ *this }; }
    413486        MUTATE_FRIEND
    414487};
  • src/AST/Visitor.hpp

    r4daf79f rc083c3d  
    5050    virtual const ast::FinallyClause *    visit( const ast::FinallyClause        * ) = 0;
    5151    virtual const ast::Stmt *             visit( const ast::SuspendStmt          * ) = 0;
     52    virtual const ast::WhenClause *       visit( const ast::WhenClause           * ) = 0;
    5253    virtual const ast::Stmt *             visit( const ast::WaitForStmt          * ) = 0;
    5354    virtual const ast::WaitForClause *    visit( const ast::WaitForClause        * ) = 0;
     55    virtual const ast::Stmt *             visit( const ast::WaitUntilStmt        * ) = 0;
    5456    virtual const ast::Decl *             visit( const ast::WithStmt             * ) = 0;
    5557    virtual const ast::NullStmt *         visit( const ast::NullStmt             * ) = 0;
  • src/Common/CodeLocationTools.cpp

    r4daf79f rc083c3d  
    128128    macro(FinallyClause, FinallyClause) \
    129129    macro(SuspendStmt, Stmt) \
     130    macro(WhenClause, WhenClause) \
    130131    macro(WaitForStmt, Stmt) \
    131132    macro(WaitForClause, WaitForClause) \
     133    macro(WaitUntilStmt, Stmt) \
    132134    macro(WithStmt, Decl) \
    133135    macro(NullStmt, NullStmt) \
  • src/Concurrency/WaitforNew.cpp

    r4daf79f rc083c3d  
    305305
    306306        const ast::VariableExpr * variableExpr =
    307                 clause->target_func.as<ast::VariableExpr>();
     307                clause->target.as<ast::VariableExpr>();
    308308        ast::Expr * castExpr = new ast::CastExpr(
    309309                location,
    310310                new ast::CastExpr(
    311311                        location,
    312                         clause->target_func,
     312                        clause->target,
    313313                        ast::deepCopy( variableExpr->result.get() ),
    314314                        ast::GeneratedCast ),
     
    325325
    326326        ResolveContext context{ symtab, transUnit().global };
    327         out->push_back( maybeCond( location, clause->cond.get(), {
     327        out->push_back( maybeCond( location, clause->when_cond.get(), {
    328328                makeAccStmt( location, acceptables, index, "is_dtor",
    329                         detectIsDtor( location, clause->target_func ), context ),
     329                        detectIsDtor( location, clause->target ), context ),
    330330                makeAccStmt( location, acceptables, index, "func",
    331331                        funcExpr, context ),
  • src/Concurrency/module.mk

    r4daf79f rc083c3d  
    2323        Concurrency/WaitforNew.cpp \
    2424        Concurrency/Waitfor.cc \
    25         Concurrency/Waitfor.h
     25        Concurrency/Waitfor.h \
     26        Concurrency/Waituntil.cpp \
     27        Concurrency/Waituntil.hpp
  • src/Parser/StatementNode.cc

    r4daf79f rc083c3d  
    328328ast::WaitForStmt * build_waitfor( const CodeLocation & location, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt ) {
    329329        auto clause = new ast::WaitForClause( location );
    330         clause->target_func = maybeBuild( targetExpr );
     330        clause->target = maybeBuild( targetExpr );
    331331        clause->stmt = maybeMoveBuild( stmt );
    332         clause->cond = notZeroExpr( maybeMoveBuild( when ) );
     332        clause->when_cond = notZeroExpr( maybeMoveBuild( when ) );
    333333
    334334        ExpressionNode * next = dynamic_cast<ExpressionNode *>( targetExpr->get_next() );
     
    359359        return existing;
    360360} // build_waitfor_timeout
     361
     362ast::WaitUntilStmt::ClauseNode * build_waituntil_clause( const CodeLocation & loc, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt ) {
     363    ast::WhenClause * clause = new ast::WhenClause( loc );
     364    clause->when_cond = notZeroExpr( maybeMoveBuild( when ) );
     365    clause->stmt = maybeMoveBuild( stmt );
     366    clause->target = maybeMoveBuild( targetExpr );
     367    return new ast::WaitUntilStmt::ClauseNode( clause );
     368}
     369ast::WaitUntilStmt::ClauseNode * build_waituntil_else( const CodeLocation & loc, ExpressionNode * when, StatementNode * stmt ) {
     370    ast::WhenClause * clause = new ast::WhenClause( loc );
     371    clause->when_cond = notZeroExpr( maybeMoveBuild( when ) );
     372    clause->stmt = maybeMoveBuild( stmt );
     373    return new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::ELSE, clause );
     374}
     375ast::WaitUntilStmt::ClauseNode * build_waituntil_timeout( const CodeLocation & loc, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt ) {
     376    ast::WhenClause * clause = new ast::WhenClause( loc );
     377    clause->when_cond = notZeroExpr( maybeMoveBuild( when ) );
     378    clause->stmt = maybeMoveBuild( stmt );
     379    clause->target = maybeMoveBuild( timeout );
     380    return new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::TIMEOUT, clause );
     381}
     382
     383ast::WaitUntilStmt * build_waituntil_stmt( const CodeLocation & loc, ast::WaitUntilStmt::ClauseNode * root ) {
     384    ast::WaitUntilStmt * retStmt = new ast::WaitUntilStmt( loc );
     385    retStmt->predicateTree = root;
     386   
     387    // iterative tree traversal
     388    std::vector<ast::WaitUntilStmt::ClauseNode *> nodeStack; // stack needed for iterative traversal
     389    ast::WaitUntilStmt::ClauseNode * currNode = nullptr;
     390    ast::WaitUntilStmt::ClauseNode * lastInternalNode = nullptr;
     391    ast::WaitUntilStmt::ClauseNode * cleanup = nullptr; // used to cleanup removed else/timeout
     392    nodeStack.push_back(root);
     393
     394    do {
     395        currNode = nodeStack.back();
     396        nodeStack.pop_back(); // remove node since it will be processed
     397
     398        switch (currNode->op) {
     399            case ast::WaitUntilStmt::ClauseNode::LEAF:
     400                retStmt->clauses.push_back(currNode->leaf);
     401                break;
     402            case ast::WaitUntilStmt::ClauseNode::ELSE:
     403                retStmt->else_stmt = currNode->leaf->stmt
     404                    ? ast::deepCopy( currNode->leaf->stmt )
     405                    : nullptr;
     406               
     407                retStmt->else_cond = currNode->leaf->when_cond
     408                    ? ast::deepCopy( currNode->leaf->when_cond )
     409                    : nullptr;
     410
     411                delete currNode->leaf;
     412                break;
     413            case ast::WaitUntilStmt::ClauseNode::TIMEOUT:
     414                retStmt->timeout_time = currNode->leaf->target
     415                    ? ast::deepCopy( currNode->leaf->target )
     416                    : nullptr;
     417                retStmt->timeout_stmt = currNode->leaf->stmt
     418                    ? ast::deepCopy( currNode->leaf->stmt )
     419                    : nullptr;
     420                retStmt->timeout_cond = currNode->leaf->when_cond
     421                    ? ast::deepCopy( currNode->leaf->when_cond )
     422                    : nullptr;
     423
     424                delete currNode->leaf;
     425                break;
     426            default:
     427                nodeStack.push_back( currNode->right ); // process right after left
     428                nodeStack.push_back( currNode->left );
     429
     430                // Cut else/timeout out of the tree
     431                if ( currNode->op == ast::WaitUntilStmt::ClauseNode::LEFT_OR ) {
     432                    if ( lastInternalNode )
     433                        lastInternalNode->right = currNode->left;
     434                    else    // if not set then root is LEFT_OR
     435                        retStmt->predicateTree = currNode->left;
     436   
     437                    currNode->left = nullptr;
     438                    cleanup = currNode;
     439                }
     440               
     441                lastInternalNode = currNode;
     442                break;
     443        }
     444    } while ( !nodeStack.empty() );
     445
     446    if ( cleanup ) delete cleanup;
     447
     448    return retStmt;
     449}
    361450
    362451ast::Stmt * build_with( const CodeLocation & location, ExpressionNode * exprs, StatementNode * stmt ) {
  • src/Parser/StatementNode.h

    r4daf79f rc083c3d  
    100100ast::WaitForStmt * build_waitfor_else( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, StatementNode * stmt );
    101101ast::WaitForStmt * build_waitfor_timeout( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt );
     102ast::WaitUntilStmt::ClauseNode * build_waituntil_clause( const CodeLocation &, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt );
     103ast::WaitUntilStmt::ClauseNode * build_waituntil_else( const CodeLocation &, ExpressionNode * when, StatementNode * stmt );
     104ast::WaitUntilStmt::ClauseNode * build_waituntil_timeout( const CodeLocation &, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt );
     105ast::WaitUntilStmt * build_waituntil_stmt( const CodeLocation &, ast::WaitUntilStmt::ClauseNode * root );
    102106ast::Stmt * build_with( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
    103107ast::Stmt * build_mutex( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt );
  • src/Parser/parser.yy

    r4daf79f rc083c3d  
    307307        ClauseNode * clause;
    308308        ast::WaitForStmt * wfs;
     309    ast::WaitUntilStmt::ClauseNode * wucn;
    309310        CondCtl * ifctl;
    310311        ForCtrl * forctl;
     
    427428%type<expr> when_clause                                 when_clause_opt                         waitfor         waituntil               timeout
    428429%type<stmt> waitfor_statement                           waituntil_statement
    429 %type<wfs> wor_waitfor_clause                   waituntil_clause                        wand_waituntil_clause   wor_waituntil_clause
     430%type<wfs> wor_waitfor_clause
     431%type<wucn> waituntil_clause                    wand_waituntil_clause       wor_waituntil_clause
    430432
    431433// declarations
     
    16851687waituntil_clause:
    16861688        when_clause_opt waituntil statement
    1687                 { printf( "waituntil_clause 1\n" ); $$ = nullptr; }
     1689                { $$ = build_waituntil_clause( yylloc, $1, $2, maybe_build_compound( yylloc, $3 ) ); }
    16881690        | '(' wor_waituntil_clause ')'
    1689                 { printf( "waituntil_clause 2\n" ); $$ = nullptr; }
     1691                { $$ = $2; }
    16901692        ;
    16911693
    16921694wand_waituntil_clause:
    16931695        waituntil_clause                                                                        %prec THEN
    1694                 { printf( "wand_waituntil_clause 1\n" ); $$ = nullptr; }
     1696                { $$ = $1; }
    16951697        | waituntil_clause wand wand_waituntil_clause
    1696                 { printf( "wand_waituntil_clause 2\n" ); $$ = nullptr; }
     1698                { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::AND, $1, $3 ); }
    16971699        ;
    16981700
    16991701wor_waituntil_clause:
    17001702        wand_waituntil_clause
    1701                 { printf( "wor_waituntil_clause 1\n" ); $$ = nullptr; }
     1703                { $$ = $1; }
    17021704        | wor_waituntil_clause wor wand_waituntil_clause
    1703                 { printf( "wor_waituntil_clause 2\n" ); $$ = nullptr; }
     1705                { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::OR, $1, $3 ); }
    17041706        | wor_waituntil_clause wor when_clause_opt ELSE statement
    1705                 { printf( "wor_waituntil_clause 3\n" ); $$ = nullptr; }
     1707                { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, build_waituntil_else( yylloc, $3, maybe_build_compound( yylloc, $5 ) ) ); }
    17061708        | wor_waituntil_clause wor when_clause_opt timeout statement    %prec THEN
    1707                 { printf( "wor_waituntil_clause 4\n" ); $$ = nullptr; }
     1709                { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, build_waituntil_timeout( yylloc, $3, $4, maybe_build_compound( yylloc, $5 ) ) ); }
    17081710        // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless)
    17091711        | wor_waituntil_clause wor when_clause_opt timeout statement wor ELSE statement // syntax error
    17101712                { SemanticError( yylloc, "else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; }
    17111713        | wor_waituntil_clause wor when_clause_opt timeout statement wor when_clause ELSE statement
    1712                 { printf( "wor_waituntil_clause 6\n" ); $$ = nullptr; }
     1714                { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1,
     1715                new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::OR,
     1716                    build_waituntil_timeout( yylloc, $3, $4, maybe_build_compound( yylloc, $5 ) ),
     1717                    build_waituntil_else( yylloc, $7, maybe_build_compound( yylloc, $9 ) ) ) ); }
    17131718        ;
    17141719
     
    17161721        wor_waituntil_clause                                                            %prec THEN
    17171722                // SKULLDUGGERY: create an empty compound statement to test parsing of waituntil statement.
    1718                 { $$ = new StatementNode( build_compound( yylloc, nullptr ) ); }
     1723                {
     1724            $$ = new StatementNode( build_waituntil_stmt( yylloc, $1 ) );
     1725            // $$ = new StatementNode( build_compound( yylloc, nullptr ) );
     1726        }
    17191727        ;
    17201728
  • src/ResolvExpr/Resolver.cc

    r4daf79f rc083c3d  
    17301730
    17311731                        // Find all candidates for a function in canonical form
    1732                         funcFinder.find( clause.target_func, ResolvMode::withAdjustment() );
     1732                        funcFinder.find( clause.target, ResolvMode::withAdjustment() );
    17331733
    17341734                        if ( funcFinder.candidates.empty() ) {
    17351735                                stringstream ss;
    17361736                                ss << "Use of undeclared indentifier '";
    1737                                 ss << clause.target_func.strict_as< ast::NameExpr >()->name;
     1737                                ss << clause.target.strict_as< ast::NameExpr >()->name;
    17381738                                ss << "' in call to waitfor";
    17391739                                SemanticError( stmt->location, ss.str() );
     
    19221922                        auto clause2 = new ast::WaitForClause( clause.location );
    19231923
    1924                         clause2->target_func = funcCandidates.front()->expr;
     1924                        clause2->target = funcCandidates.front()->expr;
    19251925
    19261926                        clause2->target_args.reserve( clause.target_args.size() );
     
    19451945
    19461946                        // Resolve the conditions as if it were an IfStmt, statements normally
    1947                         clause2->cond = findSingleExpression( clause.cond, context );
     1947                        clause2->when_cond = findSingleExpression( clause.when_cond, context );
    19481948                        clause2->stmt = clause.stmt->accept( *visitor );
    19491949
  • src/main.cc

    r4daf79f rc083c3d  
    4848#include "Concurrency/Keywords.h"           // for implementMutex, implement...
    4949#include "Concurrency/Waitfor.h"            // for generateWaitfor
     50#include "Concurrency/Waituntil.hpp"        // for generateWaitUntil
    5051#include "ControlStruct/ExceptDecl.h"       // for translateExcept
    5152#include "ControlStruct/ExceptTranslate.h"  // for translateThrows, translat...
     
    340341                PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords, transUnit );
    341342                PASS( "Forall Pointer Decay", Validate::decayForallPointers, transUnit );
     343        PASS( "Implement Waituntil", Concurrency::generateWaitUntil, transUnit  );
    342344                PASS( "Hoist Control Declarations", ControlStruct::hoistControlDecls, transUnit );
    343345
  • tests/Makefile.am

    r4daf79f rc083c3d  
    1111## Created On       : Sun May 31 09:08:15 2015
    1212## Last Modified By : Peter A. Buhr
    13 ## Last Modified On : Mon May  1 16:45:07 2023
    14 ## Update Count     : 144
     13## Last Modified On : Mon May  1 17:25:24 2023
     14## Update Count     : 145
    1515###############################################################################
    1616
     
    116116#----------------------------------------------------------------------------------------------------------------
    117117
    118 all-local :
     118all-local : # This name is important to automake and implies the default build target.
    119119        @+$(TEST_PY) --debug=$(debug) --install=$(installed) --archive-errors=$(archiveerrors) $(concurrent) $(timeouts) $(ARCH) --all # '@' => do not echo command (SILENT), '+' => allows recursive make from within python program
    120120
  • tests/concurrent/futures/select_future.cfa

    r4daf79f rc083c3d  
    196196    delete( shared_future );
    197197        printf( "done 3\n" );
    198 
    199     // C_TODO: add test for select statement once it is implemented
    200198}
Note: See TracChangeset for help on using the changeset viewer.