Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rd30e3eb ra45e21c  
    3232#include <fstream.hfa>
    3333
    34 
    3534// futex headers
    3635#include <linux/futex.h>      /* Definition of FUTEX_* constants */
     
    155154// futex_mutex
    156155
    157 // - No cond var support
    158156// - Kernel thd blocking alternative to the spinlock
    159157// - No ownership (will deadlock on reacq)
     
    185183        int state;
    186184
    187        
    188         // // linear backoff omitted for now
    189         // for( int spin = 4; spin < 1024; spin += spin) {
    190         //      state = 0;
    191         //      // if unlocked, lock and return
    192         //      if (internal_try_lock(this, state)) return;
    193         //      if (2 == state) break;
    194         //      for (int i = 0; i < spin; i++) Pause();
    195         // }
    196 
    197         // no contention try to acquire
    198         if (internal_try_lock(this, state)) return;
     185        for( int spin = 4; spin < 1024; spin += spin) {
     186                state = 0;
     187                // if unlocked, lock and return
     188                if (internal_try_lock(this, state)) return;
     189                if (2 == state) break;
     190                for (int i = 0; i < spin; i++) Pause();
     191        }
     192
     193        // // no contention try to acquire
     194        // if (internal_try_lock(this, state)) return;
    199195       
    200196        // if not in contended state, set to be in contended state
     
    209205
    210206static inline void unlock(futex_mutex & this) with(this) {
    211         // if uncontended do atomice unlock and then return
    212         if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
     207        // if uncontended do atomic unlock and then return
     208    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
    213209       
    214210        // otherwise threads are blocked so we must wake one
    215         __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
    216211        futex((int *)&val, FUTEX_WAKE, 1);
    217212}
     
    222217// to set recursion count after getting signalled;
    223218static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
     219
     220//-----------------------------------------------------------------------------
     221// go_mutex
     222
     223// - Kernel thd blocking alternative to the spinlock
     224// - No ownership (will deadlock on reacq)
     225// - Golang's flavour of mutex
     226// - Impl taken from Golang: src/runtime/lock_futex.go
     227struct go_mutex {
     228        // lock state any state other than UNLOCKED is locked
     229        // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
     230       
     231        // stores a lock state
     232        int val;
     233};
     234
     235static inline void  ?{}( go_mutex & this ) with(this) { val = 0; }
     236
     237static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
     238        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
     239}
     240
     241static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
     242        return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
     243}
     244
     245const int __go_mtx_spins = 4;
     246const int __go_mtx_pauses = 30;
     247// if this is called recursively IT WILL DEADLOCK!!!!!
     248static inline void lock(go_mutex & this) with(this) {
     249        int state, init_state;
     250
     251    // speculative grab
     252    state = internal_exchange(this, 1);
     253    if ( !state ) return; // state == 0
     254    init_state = state;
     255    for (;;) {
     256        for( int i = 0; i < __go_mtx_spins; i++ ) {
     257            while( !val ) { // lock unlocked
     258                state = 0;
     259                if (internal_try_lock(this, state, init_state)) return;
     260            }
     261            for (int i = 0; i < __go_mtx_pauses; i++) Pause();
     262        }
     263
     264        while( !val ) { // lock unlocked
     265            state = 0;
     266            if (internal_try_lock(this, state, init_state)) return;
     267        }
     268        sched_yield();
     269       
     270        // if not in contended state, set to be in contended state
     271        state = internal_exchange(this, 2);
     272        if ( !state ) return; // state == 0
     273        init_state = 2;
     274        futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
     275    }
     276}
     277
     278static inline void unlock( go_mutex & this ) with(this) {
     279        // if uncontended do atomic unlock and then return
     280    if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
     281       
     282        // otherwise threads are blocked so we must wake one
     283        futex((int *)&val, FUTEX_WAKE, 1);
     284}
     285
     286static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
     287static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
     288static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
    224289
    225290//-----------------------------------------------------------------------------
     
    271336        this.lock_value = 0;
    272337}
     338
     339static inline void  ^?{}( exp_backoff_then_block_lock & this ){}
    273340
    274341static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
Note: See TracChangeset for help on using the changeset viewer.