Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    r5a05946 rbd72c284  
    3030#include "time.hfa"
    3131
    32 #include "select.hfa"
    33 
    3432#include <fstream.hfa>
    3533
     
    3937#include <unistd.h>
    4038
    41 typedef void (*__cfa_pre_park)( void * );
    42 
    43 static inline void pre_park_noop( void * ) {}
    44 
    45 //-----------------------------------------------------------------------------
    46 // is_blocking_lock
    47 forall( L & | sized(L) )
    48 trait is_blocking_lock {
    49         // For synchronization locks to use when acquiring
    50         void on_notify( L &, struct thread$ * );
    51 
    52         // For synchronization locks to use when releasing
    53         size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
    54 
    55         // to set recursion count after getting signalled;
    56         void on_wakeup( L &, size_t recursion );
    57 };
    58 
    59 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
    60     pp_fn( pp_datum );
    61     park();
    62 }
    63 
    64 // macros for default routine impls for is_blocking_lock trait that do not wait-morph
    65 
    66 #define DEFAULT_ON_NOTIFY( lock_type ) \
    67     static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
    68 
    69 #define DEFAULT_ON_WAIT( lock_type ) \
    70     static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
    71         unlock( this ); \
    72         pre_park_then_park( pp_fn, pp_datum ); \
    73         return 0; \
    74     }
    75 
    76 // on_wakeup impl if lock should be reacquired after waking up
    77 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
    78     static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
    79 
    80 // on_wakeup impl if lock will not be reacquired after waking up
    81 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
    82     static inline void on_wakeup( lock_type & this, size_t recursion ) {}
    83 
    84 
     39// C_TODO: cleanup this and locks.cfa
     40// - appropriate separation of interface and impl
     41// - clean up unused/unneeded locks
     42// - change messy big blocking lock from inheritance to composition to remove need for flags
    8543
    8644//-----------------------------------------------------------------------------
     
    10967static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
    11068static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
    111 static inline size_t on_wait  ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
     69static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
    11270static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    11371static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
    114 static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
    115 static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
    116 static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    11772
    11873//----------
     
    12681static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
    12782static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
    128 static inline size_t on_wait  ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
     83static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
    12984static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    13085static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
    131 static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
    132 static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
    133 static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    13486
    13587//-----------------------------------------------------------------------------
     
    204156// - Kernel thd blocking alternative to the spinlock
    205157// - No ownership (will deadlock on reacq)
    206 // - no reacq on wakeup
    207158struct futex_mutex {
    208159        // lock state any state other than UNLOCKED is locked
     
    218169}
    219170
    220 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
    221 
    222 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
     171static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
     172
     173static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
    223174        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
    224175}
    225176
    226 static inline int internal_exchange( futex_mutex & this ) with(this) {
     177static inline int internal_exchange(futex_mutex & this) with(this) {
    227178        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
    228179}
    229180
    230181// if this is called recursively IT WILL DEADLOCK!!!!!
    231 static inline void lock( futex_mutex & this ) with(this) {
     182static inline void lock(futex_mutex & this) with(this) {
    232183        int state;
    233184
     
    239190                for (int i = 0; i < spin; i++) Pause();
    240191        }
     192
     193        // // no contention try to acquire
     194        // if (internal_try_lock(this, state)) return;
    241195       
    242196        // if not in contended state, set to be in contended state
     
    258212}
    259213
    260 DEFAULT_ON_NOTIFY( futex_mutex )
    261 DEFAULT_ON_WAIT( futex_mutex )
    262 DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
     214static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
     215static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
     216
     217// to set recursion count after getting signalled;
     218static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
    263219
    264220//-----------------------------------------------------------------------------
     
    276232        int val;
    277233};
     234
    278235static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
    279 // static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
    280 // static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
    281236
    282237static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
     
    289244
    290245// if this is called recursively IT WILL DEADLOCK!!!!!
    291 static inline void lock( go_mutex & this ) with( this ) {
     246static inline void lock(go_mutex & this) with(this) {
    292247        int state, init_state;
    293248
     
    300255            while( !val ) { // lock unlocked
    301256                state = 0;
    302                 if ( internal_try_lock( this, state, init_state ) ) return;
     257                if (internal_try_lock(this, state, init_state)) return;
    303258            }
    304259            for (int i = 0; i < 30; i++) Pause();
     
    307262        while( !val ) { // lock unlocked
    308263            state = 0;
    309             if ( internal_try_lock( this, state, init_state ) ) return;
     264            if (internal_try_lock(this, state, init_state)) return;
    310265        }
    311266        sched_yield();
    312267       
    313268        // if not in contended state, set to be in contended state
    314         state = internal_exchange( this, 2 );
     269        state = internal_exchange(this, 2);
    315270        if ( !state ) return; // state == 0
    316271        init_state = 2;
    317         futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
     272        futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
    318273    }
    319274}
     
    321276static inline void unlock( go_mutex & this ) with(this) {
    322277        // if uncontended do atomic unlock and then return
    323     if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
     278    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
    324279       
    325280        // otherwise threads are blocked so we must wake one
    326         futex( (int *)&val, FUTEX_WAKE, 1 );
    327 }
    328 
    329 DEFAULT_ON_NOTIFY( go_mutex )
    330 DEFAULT_ON_WAIT( go_mutex )
    331 DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
     281        futex((int *)&val, FUTEX_WAKE, 1);
     282}
     283
     284static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
     285static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
     286static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
     287
     288//-----------------------------------------------------------------------------
     289// CLH Spinlock
     290// - No recursive acquisition
     291// - Needs to be released by owner
     292
     293struct clh_lock {
     294        volatile bool * volatile tail;
     295    volatile bool * volatile head;
     296};
     297
     298static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
     299static inline void ^?{}( clh_lock & this ) { free(this.tail); }
     300
     301static inline void lock(clh_lock & l) {
     302        thread$ * curr_thd = active_thread();
     303        *(curr_thd->clh_node) = false;
     304        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
     305        while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
     306    __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
     307    curr_thd->clh_node = prev;
     308}
     309
     310static inline void unlock(clh_lock & l) {
     311        __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
     312}
     313
     314static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
     315static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
     316static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
    332317
    333318//-----------------------------------------------------------------------------
     
    349334        this.lock_value = 0;
    350335}
    351 static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
    352 static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
    353336
    354337static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
    355338
    356 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
     339static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
    357340        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
    358341}
    359342
    360 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
    361 
    362 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
    363         return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
    364 }
    365 
    366 static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
     343static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
     344
     345static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
     346        return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
     347}
     348
     349static inline bool block(exp_backoff_then_block_lock & this) with(this) {
    367350    lock( spinlock __cfaabi_dbg_ctx2 );
    368351    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
     
    376359}
    377360
    378 static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
     361static inline void lock(exp_backoff_then_block_lock & this) with(this) {
    379362        size_t compare_val = 0;
    380363        int spin = 4;
     
    395378}
    396379
    397 static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
     380static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
    398381    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    399382    lock( spinlock __cfaabi_dbg_ctx2 );
     
    403386}
    404387
    405 DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
    406 DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
    407 DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
     388static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
     389static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
     390static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
    408391
    409392//-----------------------------------------------------------------------------
     
    435418
    436419// if this is called recursively IT WILL DEADLOCK!!!!!
    437 static inline void lock( fast_block_lock & this ) with(this) {
     420static inline void lock(fast_block_lock & this) with(this) {
    438421        lock( lock __cfaabi_dbg_ctx2 );
    439422        if ( held ) {
     
    447430}
    448431
    449 static inline void unlock( fast_block_lock & this ) with(this) {
     432static inline void unlock(fast_block_lock & this) with(this) {
    450433        lock( lock __cfaabi_dbg_ctx2 );
    451434        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
     
    456439}
    457440
    458 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
     441static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
    459442    lock( lock __cfaabi_dbg_ctx2 );
    460443    insert_last( blocked_threads, *t );
    461444    unlock( lock );
    462445}
    463 DEFAULT_ON_WAIT( fast_block_lock )
    464 DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
     446static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
     447static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
    465448
    466449//-----------------------------------------------------------------------------
     
    473456struct simple_owner_lock {
    474457        // List of blocked threads
    475         dlist( select_node ) blocked_threads;
     458        dlist( thread$ ) blocked_threads;
    476459
    477460        // Spin lock used for mutual exclusion
     
    494477static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
    495478
    496 static inline void lock( simple_owner_lock & this ) with(this) {
    497         if ( owner == active_thread() ) {
     479static inline void lock(simple_owner_lock & this) with(this) {
     480        if (owner == active_thread()) {
    498481                recursion_count++;
    499482                return;
     
    501484        lock( lock __cfaabi_dbg_ctx2 );
    502485
    503         if ( owner != 0p ) {
    504         select_node node;
    505                 insert_last( blocked_threads, node );
     486        if (owner != 0p) {
     487                insert_last( blocked_threads, *active_thread() );
    506488                unlock( lock );
    507489                park( );
     
    513495}
    514496
    515 static inline void pop_node( simple_owner_lock & this ) with(this) {
    516     __handle_waituntil_OR( blocked_threads );
    517     select_node * node = &try_pop_front( blocked_threads );
    518     if ( node ) {
    519         owner = node->blocked_thread;
    520         recursion_count = 1;
    521         // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
    522         wake_one( blocked_threads, *node );
    523     } else {
    524         owner = 0p;
    525         recursion_count = 0;
    526     }
    527 }
    528 
    529 static inline void unlock( simple_owner_lock & this ) with(this) {
     497// TODO: fix duplicate def issue and bring this back
     498// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
     499        // thread$ * t = &try_pop_front( blocked_threads );
     500        // owner = t;
     501        // recursion_count = ( t ? 1 : 0 );
     502        // unpark( t );
     503// }
     504
     505static inline void unlock(simple_owner_lock & this) with(this) {
    530506        lock( lock __cfaabi_dbg_ctx2 );
    531507        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    534510        recursion_count--;
    535511        if ( recursion_count == 0 ) {
    536                 pop_node( this );
     512                // pop_and_set_new_owner( this );
     513                thread$ * t = &try_pop_front( blocked_threads );
     514                owner = t;
     515                recursion_count = ( t ? 1 : 0 );
     516                unpark( t );
    537517        }
    538518        unlock( lock );
    539519}
    540520
    541 static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
     521static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
    542522        lock( lock __cfaabi_dbg_ctx2 );
    543523        // lock held
    544524        if ( owner != 0p ) {
    545                 insert_last( blocked_threads, *(select_node *)t->link_node );
     525                insert_last( blocked_threads, *t );
    546526        }
    547527        // lock not held
     
    554534}
    555535
    556 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
     536static inline size_t on_wait(simple_owner_lock & this) with(this) {
    557537        lock( lock __cfaabi_dbg_ctx2 );
    558538        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    561541        size_t ret = recursion_count;
    562542
    563         pop_node( this );
    564 
    565     select_node node;
    566     active_thread()->link_node = (void *)&node;
     543        // pop_and_set_new_owner( this );
     544
     545        thread$ * t = &try_pop_front( blocked_threads );
     546        owner = t;
     547        recursion_count = ( t ? 1 : 0 );
     548        unpark( t );
     549
    567550        unlock( lock );
    568 
    569     pre_park_then_park( pp_fn, pp_datum );
    570 
    571551        return ret;
    572552}
    573553
    574 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
    575 
    576 // waituntil() support
    577 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
    578     lock( lock __cfaabi_dbg_ctx2 );
    579 
    580     // check if we can complete operation. If so race to establish winner in special OR case
    581     if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
    582         if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
    583            unlock( lock );
    584            return false;
    585         }
    586     }
    587 
    588     if ( owner == active_thread() ) {
    589                 recursion_count++;
    590         if ( node.park_counter ) __make_select_node_available( node );
    591         unlock( lock );
    592                 return true;
    593         }
    594 
    595     if ( owner != 0p ) {
    596                 insert_last( blocked_threads, node );
    597                 unlock( lock );
    598                 return false;
    599         }
    600    
    601         owner = active_thread();
    602         recursion_count = 1;
    603 
    604     if ( node.park_counter ) __make_select_node_available( node );
    605     unlock( lock );
    606     return true;
    607 }
    608 
    609 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
    610     lock( lock __cfaabi_dbg_ctx2 );
    611     if ( node`isListed ) {
    612         remove( node );
    613         unlock( lock );
    614         return false;
    615     }
    616 
    617     if ( owner == active_thread() ) {
    618         recursion_count--;
    619         if ( recursion_count == 0 ) {
    620             pop_node( this );
    621         }
    622     }
    623     unlock( lock );
    624     return false;
    625 }
    626 
    627 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
    628 
     554static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
    629555
    630556//-----------------------------------------------------------------------------
     
    652578
    653579// if this is called recursively IT WILL DEADLOCK!
    654 static inline void lock( spin_queue_lock & this ) with(this) {
     580static inline void lock(spin_queue_lock & this) with(this) {
    655581        mcs_spin_node node;
    656582        lock( lock, node );
     
    660586}
    661587
    662 static inline void unlock( spin_queue_lock & this ) with(this) {
     588static inline void unlock(spin_queue_lock & this) with(this) {
    663589        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    664590}
    665591
    666 DEFAULT_ON_NOTIFY( spin_queue_lock )
    667 DEFAULT_ON_WAIT( spin_queue_lock )
    668 DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
     592static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
     593        unpark(t);
     594}
     595static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
     596static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
     597
    669598
    670599//-----------------------------------------------------------------------------
     
    692621
    693622// if this is called recursively IT WILL DEADLOCK!!!!!
    694 static inline void lock( mcs_block_spin_lock & this ) with(this) {
     623static inline void lock(mcs_block_spin_lock & this) with(this) {
    695624        mcs_node node;
    696625        lock( lock, node );
     
    704633}
    705634
    706 DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
    707 DEFAULT_ON_WAIT( mcs_block_spin_lock )
    708 DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
     635static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     636static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
     637static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
    709638
    710639//-----------------------------------------------------------------------------
     
    732661
    733662// if this is called recursively IT WILL DEADLOCK!!!!!
    734 static inline void lock( block_spin_lock & this ) with(this) {
     663static inline void lock(block_spin_lock & this) with(this) {
    735664        lock( lock );
    736665        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    739668}
    740669
    741 static inline void unlock( block_spin_lock & this ) with(this) {
     670static inline void unlock(block_spin_lock & this) with(this) {
    742671        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    743672}
    744673
    745 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
     674static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
    746675        // first we acquire internal fast_block_lock
    747676        lock( lock __cfaabi_dbg_ctx2 );
     
    757686        unpark(t);
    758687}
    759 DEFAULT_ON_WAIT( block_spin_lock )
    760 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
     688static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
     689static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
    761690        // now we acquire the entire block_spin_lock upon waking up
    762691        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    764693        unlock( lock ); // Now we release the internal fast_spin_lock
    765694}
     695
     696//-----------------------------------------------------------------------------
     697// is_blocking_lock
     698forall( L & | sized(L) )
     699trait is_blocking_lock {
     700        // For synchronization locks to use when acquiring
     701        void on_notify( L &, struct thread$ * );
     702
     703        // For synchronization locks to use when releasing
     704        size_t on_wait( L & );
     705
     706        // to set recursion count after getting signalled;
     707        void on_wakeup( L &, size_t recursion );
     708};
    766709
    767710//-----------------------------------------------------------------------------
     
    771714forall(L & | is_blocking_lock(L)) {
    772715        struct info_thread;
     716
     717        // // for use by sequence
     718        // info_thread(L) *& Back( info_thread(L) * this );
     719        // info_thread(L) *& Next( info_thread(L) * this );
    773720}
    774721
Note: See TracChangeset for help on using the changeset viewer.