Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rbd72c284 r5ece8ce  
    3030#include "time.hfa"
    3131
     32#include "select.hfa"
     33
    3234#include <fstream.hfa>
    3335
     
    3739#include <unistd.h>
    3840
    39 // C_TODO: cleanup this and locks.cfa
    40 // - appropriate separation of interface and impl
    41 // - clean up unused/unneeded locks
    42 // - change messy big blocking lock from inheritance to composition to remove need for flags
     41typedef void (*__cfa_pre_park)( void * );
     42
     43static inline void pre_park_noop( void * ) {}
     44
     45//-----------------------------------------------------------------------------
     46// is_blocking_lock
     47forall( L & | sized(L) )
     48trait is_blocking_lock {
     49        // For synchronization locks to use when acquiring
     50        void on_notify( L &, struct thread$ * );
     51
     52        // For synchronization locks to use when releasing
     53        size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
     54
     55        // to set recursion count after getting signalled;
     56        void on_wakeup( L &, size_t recursion );
     57};
     58
     59static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
     60    pp_fn( pp_datum );
     61    park();
     62}
     63
     64// macros for default routine impls for is_blocking_lock trait that do not wait-morph
     65
     66#define DEFAULT_ON_NOTIFY( lock_type ) \
     67    static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
     68
     69#define DEFAULT_ON_WAIT( lock_type ) \
     70    static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
     71        unlock( this ); \
     72        pre_park_then_park( pp_fn, pp_datum ); \
     73        return 0; \
     74    }
     75
     76// on_wakeup impl if lock should be reacquired after waking up
     77#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
     78    static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
     79
     80// on_wakeup impl if lock will not be reacquired after waking up
     81#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
     82    static inline void on_wakeup( lock_type & this, size_t recursion ) {}
     83
     84
    4385
    4486//-----------------------------------------------------------------------------
     
    67109static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
    68110static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
    69 static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
     111static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
    70112static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    71113static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     114static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     115static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     116static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    72117
    73118//----------
     
    81126static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
    82127static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
    83 static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
     128static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
    84129static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    85130static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     131static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     132static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     133static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    86134
    87135//-----------------------------------------------------------------------------
     
    128176static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
    129177
    130 static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
    131         return node->next;
    132 }
    133 
    134178struct mcs_spin_lock {
    135179        mcs_spin_queue queue;
     
    137181
    138182static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
     183    n.locked = true;
    139184        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
    140         n.locked = true;
    141         if(prev == 0p) return;
     185        if( prev == 0p ) return;
    142186        prev->next = &n;
    143         while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
     187        while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
    144188}
    145189
     
    147191        mcs_spin_node * n_ptr = &n;
    148192        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
    149         while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
     193        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
    150194        n.next->locked = false;
    151195}
     
    156200// - Kernel thd blocking alternative to the spinlock
    157201// - No ownership (will deadlock on reacq)
     202// - no reacq on wakeup
    158203struct futex_mutex {
    159204        // lock state any state other than UNLOCKED is locked
     
    169214}
    170215
    171 static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
    172 
    173 static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
     216static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
     217
     218static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
    174219        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
    175220}
    176221
    177 static inline int internal_exchange(futex_mutex & this) with(this) {
     222static inline int internal_exchange( futex_mutex & this ) with(this) {
    178223        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
    179224}
    180225
    181226// if this is called recursively IT WILL DEADLOCK!!!!!
    182 static inline void lock(futex_mutex & this) with(this) {
     227static inline void lock( futex_mutex & this ) with(this) {
    183228        int state;
    184229
     
    190235                for (int i = 0; i < spin; i++) Pause();
    191236        }
    192 
    193         // // no contention try to acquire
    194         // if (internal_try_lock(this, state)) return;
    195237       
    196238        // if not in contended state, set to be in contended state
     
    212254}
    213255
    214 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
    215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
    216 
    217 // to set recursion count after getting signalled;
    218 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
     256DEFAULT_ON_NOTIFY( futex_mutex )
     257DEFAULT_ON_WAIT( futex_mutex )
     258DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
    219259
    220260//-----------------------------------------------------------------------------
     
    232272        int val;
    233273};
    234 
    235274static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
     275// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
     276// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
    236277
    237278static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
     
    244285
    245286// if this is called recursively IT WILL DEADLOCK!!!!!
    246 static inline void lock(go_mutex & this) with(this) {
     287static inline void lock( go_mutex & this ) with( this ) {
    247288        int state, init_state;
    248289
     
    255296            while( !val ) { // lock unlocked
    256297                state = 0;
    257                 if (internal_try_lock(this, state, init_state)) return;
     298                if ( internal_try_lock( this, state, init_state ) ) return;
    258299            }
    259300            for (int i = 0; i < 30; i++) Pause();
     
    262303        while( !val ) { // lock unlocked
    263304            state = 0;
    264             if (internal_try_lock(this, state, init_state)) return;
     305            if ( internal_try_lock( this, state, init_state ) ) return;
    265306        }
    266307        sched_yield();
    267308       
    268309        // if not in contended state, set to be in contended state
    269         state = internal_exchange(this, 2);
     310        state = internal_exchange( this, 2 );
    270311        if ( !state ) return; // state == 0
    271312        init_state = 2;
    272         futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
     313        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
    273314    }
    274315}
     
    276317static inline void unlock( go_mutex & this ) with(this) {
    277318        // if uncontended do atomic unlock and then return
    278     if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
     319    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
    279320       
    280321        // otherwise threads are blocked so we must wake one
    281         futex((int *)&val, FUTEX_WAKE, 1);
    282 }
    283 
    284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
    285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
    286 static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
    287 
    288 //-----------------------------------------------------------------------------
    289 // CLH Spinlock
    290 // - No recursive acquisition
    291 // - Needs to be released by owner
    292 
    293 struct clh_lock {
    294         volatile bool * volatile tail;
    295     volatile bool * volatile head;
    296 };
    297 
    298 static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
    299 static inline void ^?{}( clh_lock & this ) { free(this.tail); }
    300 
    301 static inline void lock(clh_lock & l) {
    302         thread$ * curr_thd = active_thread();
    303         *(curr_thd->clh_node) = false;
    304         volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
    305         while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
    306     __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
    307     curr_thd->clh_node = prev;
    308 }
    309 
    310 static inline void unlock(clh_lock & l) {
    311         __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
    312 }
    313 
    314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
    315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
    316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
     322        futex( (int *)&val, FUTEX_WAKE, 1 );
     323}
     324
     325DEFAULT_ON_NOTIFY( go_mutex )
     326DEFAULT_ON_WAIT( go_mutex )
     327DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
    317328
    318329//-----------------------------------------------------------------------------
     
    334345        this.lock_value = 0;
    335346}
     347static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
     348static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
    336349
    337350static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
    338351
    339 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
     352static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
    340353        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
    341354}
    342355
    343 static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
    344 
    345 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
    346         return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
    347 }
    348 
    349 static inline bool block(exp_backoff_then_block_lock & this) with(this) {
     356static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
     357
     358static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
     359        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
     360}
     361
     362static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
    350363    lock( spinlock __cfaabi_dbg_ctx2 );
    351364    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
     
    359372}
    360373
    361 static inline void lock(exp_backoff_then_block_lock & this) with(this) {
     374static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
    362375        size_t compare_val = 0;
    363376        int spin = 4;
     
    378391}
    379392
    380 static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
     393static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
    381394    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    382395    lock( spinlock __cfaabi_dbg_ctx2 );
     
    386399}
    387400
    388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
    390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
     401DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
     402DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
     403DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
    391404
    392405//-----------------------------------------------------------------------------
     
    418431
    419432// if this is called recursively IT WILL DEADLOCK!!!!!
    420 static inline void lock(fast_block_lock & this) with(this) {
     433static inline void lock( fast_block_lock & this ) with(this) {
    421434        lock( lock __cfaabi_dbg_ctx2 );
    422435        if ( held ) {
     
    430443}
    431444
    432 static inline void unlock(fast_block_lock & this) with(this) {
     445static inline void unlock( fast_block_lock & this ) with(this) {
    433446        lock( lock __cfaabi_dbg_ctx2 );
    434447        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
     
    439452}
    440453
    441 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
     454static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
    442455    lock( lock __cfaabi_dbg_ctx2 );
    443456    insert_last( blocked_threads, *t );
    444457    unlock( lock );
    445458}
    446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     459DEFAULT_ON_WAIT( fast_block_lock )
     460DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
    448461
    449462//-----------------------------------------------------------------------------
     
    456469struct simple_owner_lock {
    457470        // List of blocked threads
    458         dlist( thread$ ) blocked_threads;
     471        dlist( select_node ) blocked_threads;
    459472
    460473        // Spin lock used for mutual exclusion
     
    477490static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
    478491
    479 static inline void lock(simple_owner_lock & this) with(this) {
    480         if (owner == active_thread()) {
     492static inline void lock( simple_owner_lock & this ) with(this) {
     493        if ( owner == active_thread() ) {
    481494                recursion_count++;
    482495                return;
     
    484497        lock( lock __cfaabi_dbg_ctx2 );
    485498
    486         if (owner != 0p) {
    487                 insert_last( blocked_threads, *active_thread() );
     499        if ( owner != 0p ) {
     500        select_node node;
     501                insert_last( blocked_threads, node );
    488502                unlock( lock );
    489503                park( );
     
    495509}
    496510
    497 // TODO: fix duplicate def issue and bring this back
    498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
    499         // thread$ * t = &try_pop_front( blocked_threads );
    500         // owner = t;
    501         // recursion_count = ( t ? 1 : 0 );
    502         // unpark( t );
    503 // }
    504 
    505 static inline void unlock(simple_owner_lock & this) with(this) {
     511static inline void pop_node( simple_owner_lock & this ) with(this) {
     512    __handle_waituntil_OR( blocked_threads );
     513    select_node * node = &try_pop_front( blocked_threads );
     514    if ( node ) {
     515        owner = node->blocked_thread;
     516        recursion_count = 1;
     517        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     518        wake_one( blocked_threads, *node );
     519    } else {
     520        owner = 0p;
     521        recursion_count = 0;
     522    }
     523}
     524
     525static inline void unlock( simple_owner_lock & this ) with(this) {
    506526        lock( lock __cfaabi_dbg_ctx2 );
    507527        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    510530        recursion_count--;
    511531        if ( recursion_count == 0 ) {
    512                 // pop_and_set_new_owner( this );
    513                 thread$ * t = &try_pop_front( blocked_threads );
    514                 owner = t;
    515                 recursion_count = ( t ? 1 : 0 );
    516                 unpark( t );
     532                pop_node( this );
    517533        }
    518534        unlock( lock );
    519535}
    520536
    521 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     537static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
    522538        lock( lock __cfaabi_dbg_ctx2 );
    523539        // lock held
    524540        if ( owner != 0p ) {
    525                 insert_last( blocked_threads, *t );
     541                insert_last( blocked_threads, *(select_node *)t->link_node );
    526542        }
    527543        // lock not held
     
    534550}
    535551
    536 static inline size_t on_wait(simple_owner_lock & this) with(this) {
     552static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
    537553        lock( lock __cfaabi_dbg_ctx2 );
    538554        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    541557        size_t ret = recursion_count;
    542558
    543         // pop_and_set_new_owner( this );
    544 
    545         thread$ * t = &try_pop_front( blocked_threads );
    546         owner = t;
    547         recursion_count = ( t ? 1 : 0 );
    548         unpark( t );
    549 
     559        pop_node( this );
     560
     561    select_node node;
     562    active_thread()->link_node = (void *)&node;
    550563        unlock( lock );
     564
     565    pre_park_then_park( pp_fn, pp_datum );
     566
    551567        return ret;
    552568}
    553569
    554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     570static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     571
     572// waituntil() support
     573static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
     574    lock( lock __cfaabi_dbg_ctx2 );
     575
     576    // check if we can complete operation. If so race to establish winner in special OR case
     577    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
     578        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     579           unlock( lock );
     580           return false;
     581        }
     582    }
     583
     584    if ( owner == active_thread() ) {
     585                recursion_count++;
     586        if ( node.park_counter ) __make_select_node_available( node );
     587        unlock( lock );
     588                return true;
     589        }
     590
     591    if ( owner != 0p ) {
     592                insert_last( blocked_threads, node );
     593                unlock( lock );
     594                return false;
     595        }
     596   
     597        owner = active_thread();
     598        recursion_count = 1;
     599
     600    if ( node.park_counter ) __make_select_node_available( node );
     601    unlock( lock );
     602    return true;
     603}
     604
     605static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
     606    lock( lock __cfaabi_dbg_ctx2 );
     607    if ( node`isListed ) {
     608        remove( node );
     609        unlock( lock );
     610        return false;
     611    }
     612
     613    if ( owner == active_thread() ) {
     614        recursion_count--;
     615        if ( recursion_count == 0 ) {
     616            pop_node( this );
     617        }
     618    }
     619    unlock( lock );
     620    return false;
     621}
     622
     623static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
     624
    555625
    556626//-----------------------------------------------------------------------------
     
    578648
    579649// if this is called recursively IT WILL DEADLOCK!
    580 static inline void lock(spin_queue_lock & this) with(this) {
     650static inline void lock( spin_queue_lock & this ) with(this) {
    581651        mcs_spin_node node;
    582652        lock( lock, node );
     
    586656}
    587657
    588 static inline void unlock(spin_queue_lock & this) with(this) {
     658static inline void unlock( spin_queue_lock & this ) with(this) {
    589659        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    590660}
    591661
    592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
    593         unpark(t);
    594 }
    595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
    596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
    597 
     662DEFAULT_ON_NOTIFY( spin_queue_lock )
     663DEFAULT_ON_WAIT( spin_queue_lock )
     664DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
    598665
    599666//-----------------------------------------------------------------------------
     
    621688
    622689// if this is called recursively IT WILL DEADLOCK!!!!!
    623 static inline void lock(mcs_block_spin_lock & this) with(this) {
     690static inline void lock( mcs_block_spin_lock & this ) with(this) {
    624691        mcs_node node;
    625692        lock( lock, node );
     
    633700}
    634701
    635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
    636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
    637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
     702DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
     703DEFAULT_ON_WAIT( mcs_block_spin_lock )
     704DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
    638705
    639706//-----------------------------------------------------------------------------
     
    661728
    662729// if this is called recursively IT WILL DEADLOCK!!!!!
    663 static inline void lock(block_spin_lock & this) with(this) {
     730static inline void lock( block_spin_lock & this ) with(this) {
    664731        lock( lock );
    665732        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    668735}
    669736
    670 static inline void unlock(block_spin_lock & this) with(this) {
     737static inline void unlock( block_spin_lock & this ) with(this) {
    671738        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    672739}
    673740
    674 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
     741static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
    675742        // first we acquire internal fast_block_lock
    676743        lock( lock __cfaabi_dbg_ctx2 );
     
    686753        unpark(t);
    687754}
    688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
    689 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
     755DEFAULT_ON_WAIT( block_spin_lock )
     756static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
    690757        // now we acquire the entire block_spin_lock upon waking up
    691758        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    693760        unlock( lock ); // Now we release the internal fast_spin_lock
    694761}
    695 
    696 //-----------------------------------------------------------------------------
    697 // is_blocking_lock
    698 forall( L & | sized(L) )
    699 trait is_blocking_lock {
    700         // For synchronization locks to use when acquiring
    701         void on_notify( L &, struct thread$ * );
    702 
    703         // For synchronization locks to use when releasing
    704         size_t on_wait( L & );
    705 
    706         // to set recursion count after getting signalled;
    707         void on_wakeup( L &, size_t recursion );
    708 };
    709762
    710763//-----------------------------------------------------------------------------
     
    714767forall(L & | is_blocking_lock(L)) {
    715768        struct info_thread;
    716 
    717         // // for use by sequence
    718         // info_thread(L) *& Back( info_thread(L) * this );
    719         // info_thread(L) *& Next( info_thread(L) * this );
    720769}
    721770
Note: See TracChangeset for help on using the changeset viewer.