Ignore:
Timestamp:
May 1, 2023, 4:00:06 PM (12 months ago)
Author:
caparsons <caparson@…>
Branches:
ADT, ast-experimental, master
Children:
73bf7ddc
Parents:
bb7422a
Message:

some cleanup and a bunch of changes to support waituntil statement

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rbb7422a rbeeff61e  
    3030#include "time.hfa"
    3131
     32#include "select.hfa"
     33
    3234#include <fstream.hfa>
    3335
     
    7072static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    7173static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     74static inline bool   register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     75static inline bool   unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     76static inline bool   on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    7277
    7378//----------
     
    8489static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    8590static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
     91static inline bool   register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
     92static inline bool   unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
     93static inline bool   on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
    8694
    8795//-----------------------------------------------------------------------------
     
    180188
    181189// if this is called recursively IT WILL DEADLOCK!!!!!
    182 static inline void lock(futex_mutex & this) with(this) {
     190static inline void lock( futex_mutex & this ) with(this) {
    183191        int state;
    184192
     
    190198                for (int i = 0; i < spin; i++) Pause();
    191199        }
    192 
    193         // // no contention try to acquire
    194         // if (internal_try_lock(this, state)) return;
    195200       
    196201        // if not in contended state, set to be in contended state
     
    213218
    214219static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
    215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
     220static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; }
    216221
    217222// to set recursion count after getting signalled;
     
    244249
    245250// if this is called recursively IT WILL DEADLOCK!!!!!
    246 static inline void lock(go_mutex & this) with(this) {
     251static inline void lock( go_mutex & this ) with( this ) {
    247252        int state, init_state;
    248253
     
    255260            while( !val ) { // lock unlocked
    256261                state = 0;
    257                 if (internal_try_lock(this, state, init_state)) return;
     262                if ( internal_try_lock( this, state, init_state ) ) return;
    258263            }
    259264            for (int i = 0; i < 30; i++) Pause();
     
    262267        while( !val ) { // lock unlocked
    263268            state = 0;
    264             if (internal_try_lock(this, state, init_state)) return;
     269            if ( internal_try_lock( this, state, init_state ) ) return;
    265270        }
    266271        sched_yield();
    267272       
    268273        // if not in contended state, set to be in contended state
    269         state = internal_exchange(this, 2);
     274        state = internal_exchange( this, 2 );
    270275        if ( !state ) return; // state == 0
    271276        init_state = 2;
    272         futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
     277        futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
    273278    }
    274279}
     
    276281static inline void unlock( go_mutex & this ) with(this) {
    277282        // if uncontended do atomic unlock and then return
    278     if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
     283    if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
    279284       
    280285        // otherwise threads are blocked so we must wake one
    281         futex((int *)&val, FUTEX_WAKE, 1);
    282 }
    283 
    284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
    285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
     286        futex( (int *)&val, FUTEX_WAKE, 1 );
     287}
     288
     289static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); }
     290static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; }
    286291static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
    287 
    288 //-----------------------------------------------------------------------------
    289 // CLH Spinlock
    290 // - No recursive acquisition
    291 // - Needs to be released by owner
    292 
    293 struct clh_lock {
    294         volatile bool * volatile tail;
    295     volatile bool * volatile head;
    296 };
    297 
    298 static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
    299 static inline void ^?{}( clh_lock & this ) { free(this.tail); }
    300 
    301 static inline void lock(clh_lock & l) {
    302         thread$ * curr_thd = active_thread();
    303         *(curr_thd->clh_node) = false;
    304         volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
    305         while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
    306     __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
    307     curr_thd->clh_node = prev;
    308 }
    309 
    310 static inline void unlock(clh_lock & l) {
    311         __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
    312 }
    313 
    314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
    315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
    316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
    317292
    318293//-----------------------------------------------------------------------------
     
    337312static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
    338313
    339 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
     314static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
    340315        return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
    341316}
    342317
    343 static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
    344 
    345 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
    346         return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
    347 }
    348 
    349 static inline bool block(exp_backoff_then_block_lock & this) with(this) {
     318static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
     319
     320static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
     321        return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
     322}
     323
     324static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
    350325    lock( spinlock __cfaabi_dbg_ctx2 );
    351326    if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
     
    359334}
    360335
    361 static inline void lock(exp_backoff_then_block_lock & this) with(this) {
     336static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
    362337        size_t compare_val = 0;
    363338        int spin = 4;
     
    378353}
    379354
    380 static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
     355static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
    381356    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    382357    lock( spinlock __cfaabi_dbg_ctx2 );
     
    386361}
    387362
    388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
    390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
     363static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); }
     364static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; }
     365static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); }
    391366
    392367//-----------------------------------------------------------------------------
     
    418393
    419394// if this is called recursively IT WILL DEADLOCK!!!!!
    420 static inline void lock(fast_block_lock & this) with(this) {
     395static inline void lock( fast_block_lock & this ) with(this) {
    421396        lock( lock __cfaabi_dbg_ctx2 );
    422397        if ( held ) {
     
    430405}
    431406
    432 static inline void unlock(fast_block_lock & this) with(this) {
     407static inline void unlock( fast_block_lock & this ) with(this) {
    433408        lock( lock __cfaabi_dbg_ctx2 );
    434409        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
     
    439414}
    440415
    441 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
     416static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
    442417    lock( lock __cfaabi_dbg_ctx2 );
    443418    insert_last( blocked_threads, *t );
    444419    unlock( lock );
    445420}
    446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     421static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; }
     422static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }
    448423
    449424//-----------------------------------------------------------------------------
     
    456431struct simple_owner_lock {
    457432        // List of blocked threads
    458         dlist( thread$ ) blocked_threads;
     433        dlist( select_node ) blocked_threads;
    459434
    460435        // Spin lock used for mutual exclusion
     
    477452static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
    478453
    479 static inline void lock(simple_owner_lock & this) with(this) {
    480         if (owner == active_thread()) {
     454static inline void lock( simple_owner_lock & this ) with(this) {
     455        if ( owner == active_thread() ) {
    481456                recursion_count++;
    482457                return;
     
    484459        lock( lock __cfaabi_dbg_ctx2 );
    485460
    486         if (owner != 0p) {
    487                 insert_last( blocked_threads, *active_thread() );
     461        if ( owner != 0p ) {
     462        select_node node;
     463                insert_last( blocked_threads, node );
    488464                unlock( lock );
    489465                park( );
     
    495471}
    496472
    497 // TODO: fix duplicate def issue and bring this back
    498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
    499         // thread$ * t = &try_pop_front( blocked_threads );
    500         // owner = t;
    501         // recursion_count = ( t ? 1 : 0 );
    502         // unpark( t );
    503 // }
    504 
    505 static inline void unlock(simple_owner_lock & this) with(this) {
     473static inline void pop_node( simple_owner_lock & this ) with(this) {
     474    __handle_waituntil_OR( blocked_threads );
     475    select_node * node = &try_pop_front( blocked_threads );
     476    if ( node ) {
     477        owner = node->blocked_thread;
     478        recursion_count = 1;
     479        // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     480        wake_one( blocked_threads, *node );
     481    } else {
     482        owner = 0p;
     483        recursion_count = 0;
     484    }
     485}
     486
     487static inline void unlock( simple_owner_lock & this ) with(this) {
    506488        lock( lock __cfaabi_dbg_ctx2 );
    507489        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    510492        recursion_count--;
    511493        if ( recursion_count == 0 ) {
    512                 // pop_and_set_new_owner( this );
    513                 thread$ * t = &try_pop_front( blocked_threads );
    514                 owner = t;
    515                 recursion_count = ( t ? 1 : 0 );
    516                 unpark( t );
     494                pop_node( this );
    517495        }
    518496        unlock( lock );
    519497}
    520498
    521 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     499static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) {
    522500        lock( lock __cfaabi_dbg_ctx2 );
    523501        // lock held
    524502        if ( owner != 0p ) {
    525                 insert_last( blocked_threads, *t );
     503                insert_last( blocked_threads, *(select_node *)t->link_node );
    526504        }
    527505        // lock not held
     
    534512}
    535513
    536 static inline size_t on_wait(simple_owner_lock & this) with(this) {
     514static inline size_t on_wait( simple_owner_lock & this ) with(this) {
    537515        lock( lock __cfaabi_dbg_ctx2 );
    538516        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     
    541519        size_t ret = recursion_count;
    542520
    543         // pop_and_set_new_owner( this );
    544 
    545         thread$ * t = &try_pop_front( blocked_threads );
    546         owner = t;
    547         recursion_count = ( t ? 1 : 0 );
    548         unpark( t );
    549 
     521        pop_node( this );
     522
     523    select_node node;
     524    active_thread()->link_node = (void *)&node;
    550525        unlock( lock );
     526    park();
     527
    551528        return ret;
    552529}
    553530
    554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     531static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     532
     533// waituntil() support
     534static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
     535    lock( lock __cfaabi_dbg_ctx2 );
     536
     537    // check if we can complete operation. If so race to establish winner in special OR case
     538    if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
     539        if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     540           unlock( lock );
     541           return false;
     542        }
     543    }
     544
     545    if ( owner == active_thread() ) {
     546                recursion_count++;
     547        if ( node.park_counter ) __make_select_node_available( node );
     548        unlock( lock );
     549                return true;
     550        }
     551
     552    if ( owner != 0p ) {
     553                insert_last( blocked_threads, node );
     554                unlock( lock );
     555                return false;
     556        }
     557   
     558        owner = active_thread();
     559        recursion_count = 1;
     560
     561    if ( node.park_counter ) __make_select_node_available( node );
     562    unlock( lock );
     563    return true;
     564}
     565
     566static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
     567    lock( lock __cfaabi_dbg_ctx2 );
     568    if ( node`isListed ) {
     569        remove( node );
     570        unlock( lock );
     571        return false;
     572    }
     573
     574    if ( owner == active_thread() ) {
     575        recursion_count--;
     576        if ( recursion_count == 0 ) {
     577            pop_node( this );
     578        }
     579    }
     580    unlock( lock );
     581    return false;
     582}
     583
     584static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
     585
    555586
    556587//-----------------------------------------------------------------------------
     
    578609
    579610// if this is called recursively IT WILL DEADLOCK!
    580 static inline void lock(spin_queue_lock & this) with(this) {
     611static inline void lock( spin_queue_lock & this ) with(this) {
    581612        mcs_spin_node node;
    582613        lock( lock, node );
     
    586617}
    587618
    588 static inline void unlock(spin_queue_lock & this) with(this) {
     619static inline void unlock( spin_queue_lock & this ) with(this) {
    589620        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    590621}
    591622
    592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
     623static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {
    593624        unpark(t);
    594625}
    595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
    596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
     626static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; }
     627static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); }
    597628
    598629
     
    621652
    622653// if this is called recursively IT WILL DEADLOCK!!!!!
    623 static inline void lock(mcs_block_spin_lock & this) with(this) {
     654static inline void lock( mcs_block_spin_lock & this ) with(this) {
    624655        mcs_node node;
    625656        lock( lock, node );
     
    633664}
    634665
    635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
    636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
    637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
     666static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); }
     667static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; }
     668static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); }
    638669
    639670//-----------------------------------------------------------------------------
     
    661692
    662693// if this is called recursively IT WILL DEADLOCK!!!!!
    663 static inline void lock(block_spin_lock & this) with(this) {
     694static inline void lock( block_spin_lock & this ) with(this) {
    664695        lock( lock );
    665696        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    668699}
    669700
    670 static inline void unlock(block_spin_lock & this) with(this) {
     701static inline void unlock( block_spin_lock & this ) with(this) {
    671702        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    672703}
    673704
    674 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
     705static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
    675706        // first we acquire internal fast_block_lock
    676707        lock( lock __cfaabi_dbg_ctx2 );
     
    686717        unpark(t);
    687718}
    688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
    689 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
     719static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; }
     720static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
    690721        // now we acquire the entire block_spin_lock upon waking up
    691722        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     
    714745forall(L & | is_blocking_lock(L)) {
    715746        struct info_thread;
    716 
    717         // // for use by sequence
    718         // info_thread(L) *& Back( info_thread(L) * this );
    719         // info_thread(L) *& Next( info_thread(L) * this );
    720747}
    721748
Note: See TracChangeset for help on using the changeset viewer.