Changeset 0cee082 for libcfa/src


Ignore:
Timestamp:
Jan 9, 2023, 3:21:02 PM (16 months ago)
Author:
caparsons <caparson@…>
Branches:
ADT, ast-experimental, master
Children:
01a8954
Parents:
5e180c2
Message:

refactored naming for lock to be more accurate and cleaned up REACQ nonsense in locks.hfa

Location:
libcfa/src/concurrency
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/clib/cfathread.cfa

    r5e180c2 r0cee082  
    439439        // Mutex
    440440        struct cfathread_mutex {
    441                 linear_backoff_then_block_lock impl;
     441                exp_backoff_then_block_lock impl;
    442442        };
    443443        int cfathread_mutex_init(cfathread_mutex_t *restrict mut, const cfathread_mutexattr_t *restrict) __attribute__((nonnull (1))) { *mut = new(); return 0; }
     
    454454        // Condition
    455455        struct cfathread_condition {
    456                 condition_variable(linear_backoff_then_block_lock) impl;
     456                condition_variable(exp_backoff_then_block_lock) impl;
    457457        };
    458458        int cfathread_cond_init(cfathread_cond_t *restrict cond, const cfathread_condattr_t *restrict) __attribute__((nonnull (1))) { *cond = new(); return 0; }
  • libcfa/src/concurrency/locks.hfa

    r5e180c2 r0cee082  
    3838#include <unistd.h>
    3939
    40 // undef to make a number of the locks not reacquire upon waking from a condlock
    41 #define REACQ 1
     40// C_TODO: cleanup this and locks.cfa
     41// - appropriate separation of interface and impl
     42// - clean up unused/unneeded locks
     43// - change messy big blocking lock from inheritance to composition to remove need for flags
    4244
    4345//-----------------------------------------------------------------------------
     
    249251static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
    250252static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
    251 static inline void on_wakeup(clh_lock & this, size_t recursion ) {
    252         #ifdef REACQ
    253         lock(this);
    254         #endif
    255 }
    256 
    257 
    258 //-----------------------------------------------------------------------------
    259 // Linear backoff Spinlock
    260 struct linear_backoff_then_block_lock {
     253static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
     254
     255
     256//-----------------------------------------------------------------------------
     257// Exponential backoff then block lock
     258struct exp_backoff_then_block_lock {
    261259        // Spin lock used for mutual exclusion
    262260        __spinlock_t spinlock;
     
    269267};
    270268
    271 static inline void  ?{}( linear_backoff_then_block_lock & this ) {
     269static inline void  ?{}( exp_backoff_then_block_lock & this ) {
    272270        this.spinlock{};
    273271        this.blocked_threads{};
    274272        this.lock_value = 0;
    275273}
    276 static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
    277 // static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
    278 // static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
    279 
    280 static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
     274static inline void ^?{}( exp_backoff_then_block_lock & this ) {}
     275// static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
     276// static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
     277
     278static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
    281279        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
    282280                return true;
     
    285283}
    286284
    287 static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
    288 
    289 static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
     285static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
     286
     287static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
    290288        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
    291289                return true;
     
    294292}
    295293
    296 static inline bool block(linear_backoff_then_block_lock & this) with(this) {
     294static inline bool block(exp_backoff_then_block_lock & this) with(this) {
    297295        lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
    298296        if (lock_value != 2) {
     
    306304}
    307305
    308 static inline void lock(linear_backoff_then_block_lock & this) with(this) {
     306static inline void lock(exp_backoff_then_block_lock & this) with(this) {
    309307        size_t compare_val = 0;
    310308        int spin = 4;
     
    324322}
    325323
    326 static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
     324static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
    327325    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    328326        lock( spinlock __cfaabi_dbg_ctx2 );
     
    332330}
    333331
    334 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    335 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
    336 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
    337         #ifdef REACQ
    338         lock(this);
    339         #endif
    340 }
     332static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
     333static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
     334static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
    341335
    342336//-----------------------------------------------------------------------------
     
    390384
    391385static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
    392         #ifdef REACQ
    393                 lock( lock __cfaabi_dbg_ctx2 );
    394                 insert_last( blocked_threads, *t );
    395                 unlock( lock );
    396         #else
    397                 unpark(t);
    398         #endif
     386    lock( lock __cfaabi_dbg_ctx2 );
     387    insert_last( blocked_threads, *t );
     388    unlock( lock );
    399389}
    400390static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
     
    553543}
    554544static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
    555 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
    556         #ifdef REACQ
    557         lock(this);
    558         #endif
    559 }
     545static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
    560546
    561547
     
    598584static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
    599585static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
    600 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
    601         #ifdef REACQ
    602         lock(this);
    603         #endif
    604 }
     586static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
    605587
    606588//-----------------------------------------------------------------------------
     
    640622
    641623static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
    642   #ifdef REACQ
    643624        // first we acquire internal fast_block_lock
    644625        lock( lock __cfaabi_dbg_ctx2 );
     
    652633        unlock( lock );
    653634
    654   #endif
    655635        unpark(t);
    656        
    657636}
    658637static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
    659638static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
    660   #ifdef REACQ
    661639        // now we acquire the entire block_spin_lock upon waking up
    662640        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
    663641        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
    664642        unlock( lock ); // Now we release the internal fast_spin_lock
    665   #endif
    666643}
    667644
Note: See TracChangeset for help on using the changeset viewer.