Changeset b77f0e1 for libcfa/src


Ignore:
Timestamp:
Nov 14, 2022, 11:52:40 AM (2 years ago)
Author:
caparson <caparson@…>
Branches:
ADT, ast-experimental, master
Children:
63be3387
Parents:
bc899d6
Message:

cleaned up some lock stuff

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rbc899d6 rb77f0e1  
    3030#include "time.hfa"
    3131
     32#include <fstream.hfa>
     33
     34
     35// futex headers
     36#include <linux/futex.h>      /* Definition of FUTEX_* constants */
     37#include <sys/syscall.h>      /* Definition of SYS_* constants */
     38#include <unistd.h>
     39
    3240//-----------------------------------------------------------------------------
    3341// Semaphore
     
    140148
    141149//-----------------------------------------------------------------------------
     150// futex_mutex
     151
     152// - No cond var support
     153// - Kernel thd blocking alternative to the spinlock
     154// - No ownership (will deadlock on reacq)
     155struct futex_mutex {
     156        // lock state any state other than UNLOCKED is locked
     157        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
     158       
     159        // stores a lock state
     160        int val;
     161};
     162
     163// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
     164static int futex(int *uaddr, int futex_op, int val) {
     165    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
     166}
     167
     168static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
     169
     170static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
     171        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
     172}
     173
     174static inline int internal_exchange(futex_mutex & this) with(this) {
     175        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
     176}
     177
     178// if this is called recursively IT WILL DEADLOCK!!!!!
     179static inline void lock(futex_mutex & this) with(this) {
     180        int state;
     181
     182       
     183        // linear backoff
     184        for( int spin = 4; spin < 1024; spin += spin) {
     185                state = 0;
     186                // if unlocked, lock and return
     187                if (internal_try_lock(this, state)) return;
     188                if (2 == state) break;
     189                for (int i = 0; i < spin; i++) Pause();
     190        }
     191        // if (internal_try_lock(this, state)) return;
     192       
     193        // if not in contended state, set to be in contended state
     194        if (state != 2) state = internal_exchange(this);
     195
     196        // block and spin until we win the lock
     197        while (state != 0) {
     198                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
     199                state = internal_exchange(this);
     200        }
     201}
     202
     203static inline void unlock(futex_mutex & this) with(this) {
     204        // if uncontended do atomice unlock and then return
     205        if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
     206       
     207        // otherwise threads are blocked so we must wake one
     208        __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
     209        futex((int *)&val, FUTEX_WAKE, 1);
     210}
     211
     212static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
     213static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
     214
     215// to set recursion count after getting signalled;
     216static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
     217
     218//-----------------------------------------------------------------------------
    142219// CLH Spinlock
    143220// - No recursive acquisition
     
    165242}
    166243
     244static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
     245static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
     246static inline void on_wakeup(clh_lock & this, size_t recursion ) {
     247        #ifdef REACQ
     248        lock(this);
     249        #endif
     250}
     251
     252
    167253//-----------------------------------------------------------------------------
    168254// Linear backoff Spinlock
     
    171257        __spinlock_t spinlock;
    172258
    173         // Current thread owning the lock
    174         struct thread$ * owner;
    175 
    176259        // List of blocked threads
    177260        dlist( thread$ ) blocked_threads;
     
    179262        // Used for comparing and exchanging
    180263        volatile size_t lock_value;
    181 
    182         // used for linear backoff spinning
    183         int spin_start;
    184         int spin_end;
    185         int spin_count;
    186 
    187         // after unsuccessful linear backoff yield this many times
    188         int yield_count;
    189 };
    190 
    191 static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
     264};
     265
     266static inline void  ?{}( linear_backoff_then_block_lock & this ) {
    192267        this.spinlock{};
    193268        this.blocked_threads{};
    194269        this.lock_value = 0;
    195         this.spin_start = spin_start;
    196         this.spin_end = spin_end;
    197         this.spin_count = spin_count;
    198         this.yield_count = yield_count;
    199 }
    200 static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
     270}
    201271static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
    202 static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
    203 static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
     272// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
     273// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
    204274
    205275static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
    206276        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
    207                 owner = active_thread();
    208277                return true;
    209278        }
     
    215284static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
    216285        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
    217                 owner = active_thread();
    218286                return true;
    219287        }
     
    234302
    235303static inline void lock(linear_backoff_then_block_lock & this) with(this) {
    236         // if owner just return
    237         if (active_thread() == owner) return;
    238304        size_t compare_val = 0;
    239         int spin = spin_start;
     305        int spin = 4;
    240306        // linear backoff
    241307        for( ;; ) {
     
    244310                if (2 == compare_val) break;
    245311                for (int i = 0; i < spin; i++) Pause();
    246                 if (spin >= spin_end) break;
     312                if (spin >= 1024) break;
    247313                spin += spin;
    248314        }
     
    254320
    255321static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
    256         verify(lock_value > 0);
    257     owner = 0p;
    258322    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    259323        lock( spinlock __cfaabi_dbg_ctx2 );
     
    265329static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    266330static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
    267 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
     331static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
     332        #ifdef REACQ
     333        lock(this);
     334        #endif
     335}
    268336
    269337//-----------------------------------------------------------------------------
     
    306374        assert(!(held && owner == active_thread()));
    307375        #endif
    308         if (held) {
     376        if ( held ) {
    309377                insert_last( blocked_threads, *active_thread() );
    310378                unlock( lock );
     
    331399}
    332400
    333 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
     401static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
     402        #ifdef REACQ
     403                lock( lock __cfaabi_dbg_ctx2 );
     404                insert_last( blocked_threads, *t );
     405                unlock( lock );
     406        #else
     407                unpark(t);
     408        #endif
     409}
    334410static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    335411static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     
    412488        if ( owner != 0p ) {
    413489                insert_last( blocked_threads, *t );
    414                 unlock( lock );
    415490        }
    416491        // lock not held
     
    419494                recursion_count = 1;
    420495                unpark( t );
    421                 unlock( lock );
    422         }
     496        }
     497        unlock( lock );
    423498}
    424499
     
    474549static inline void lock(spin_queue_lock & this) with(this) {
    475550        mcs_spin_node node;
    476         #ifdef __CFA_DEBUG__
    477         assert(!(held && owner == active_thread()));
    478         #endif
    479551        lock( lock, node );
    480552        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
    481553        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
    482554        unlock( lock, node );
    483         #ifdef __CFA_DEBUG__
    484         owner = active_thread();
    485         #endif
    486555}
    487556
    488557static inline void unlock(spin_queue_lock & this) with(this) {
    489         #ifdef __CFA_DEBUG__
    490         owner = 0p;
    491         #endif
    492558        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    493559}
    494560
    495 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
     561static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
     562        unpark(t);
     563}
    496564static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
    497 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
     565static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
     566        #ifdef REACQ
     567        lock(this);
     568        #endif
     569}
    498570
    499571
     
    511583        // flag showing if lock is held
    512584        volatile bool held;
    513 
    514         #ifdef __CFA_DEBUG__
    515         // for deadlock detection
    516         struct thread$ * owner;
    517         #endif
    518585};
    519586
     
    529596static inline void lock(mcs_block_spin_lock & this) with(this) {
    530597        mcs_node node;
    531         #ifdef __CFA_DEBUG__
    532         assert(!(held && owner == active_thread()));
    533         #endif
    534598        lock( lock, node );
    535599        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
    536600        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
    537601        unlock( lock, node );
    538         #ifdef __CFA_DEBUG__
    539         owner = active_thread();
    540         #endif
    541602}
    542603
    543604static inline void unlock(mcs_block_spin_lock & this) with(this) {
    544         #ifdef __CFA_DEBUG__
    545         owner = 0p;
    546         #endif
    547605        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
    548606}
     
    550608static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
    551609static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
    552 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
     610static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
     611        #ifdef REACQ
     612        lock(this);
     613        #endif
     614}
    553615
    554616//-----------------------------------------------------------------------------
     
    565627        // flag showing if lock is held
    566628        volatile bool held;
    567 
    568         #ifdef __CFA_DEBUG__
    569         // for deadlock detection
    570         struct thread$ * owner;
    571         #endif
    572629};
    573630
     
    582639// if this is called recursively IT WILL DEADLOCK!!!!!
    583640static inline void lock(block_spin_lock & this) with(this) {
    584         #ifdef __CFA_DEBUG__
    585         assert(!(held && owner == active_thread()));
    586         #endif
    587641        lock( lock );
    588642        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
    589643        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
    590644        unlock( lock );
     645}
     646
     647static inline void unlock(block_spin_lock & this) with(this) {
     648        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
     649}
     650
     651static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
     652  #ifdef REACQ
     653        // first we acquire internal fast_block_lock
     654        lock( lock __cfaabi_dbg_ctx2 );
     655        if ( held ) { // if internal fast_block_lock is held
     656                insert_last( blocked_threads, *t );
     657                unlock( lock );
     658                return;
     659        }
     660        // if internal fast_block_lock is not held
     661        held = true;
    591662        #ifdef __CFA_DEBUG__
    592         owner = active_thread();
    593         #endif
    594 }
    595 
    596 static inline void unlock(block_spin_lock & this) with(this) {
    597         #ifdef __CFA_DEBUG__
    598         owner = 0p;
    599         #endif
    600         __atomic_store_n(&held, false, __ATOMIC_RELEASE);
    601 }
    602 
    603 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     663        owner = t;
     664        #endif
     665        unlock( lock );
     666
     667  #endif
     668        unpark(t);
     669       
     670}
    604671static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
    605 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
     672static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
     673  #ifdef REACQ
     674        // now we acquire the entire block_spin_lock upon waking up
     675        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
     676        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
     677        unlock( lock ); // Now we release the internal fast_spin_lock
     678  #endif
     679}
    606680
    607681//-----------------------------------------------------------------------------
Note: See TracChangeset for help on using the changeset viewer.