Ignore:
Timestamp:
Jun 29, 2021, 5:33:38 PM (7 months ago)
Author:
caparsons <caparson@…>
Branches:
jacob/cs343-translation, master, new-ast-unique-expr
Children:
660665f
Parents:
bae0d35
Message:

Added Martins SpinCondLock? as linear_backoff_then_block lock

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rbae0d35 r5a46e09  
    1818
    1919#include <stdbool.h>
     20#include <stdio.h>
    2021
    2122#include "bits/weakso_locks.hfa"
     
    237238}
    238239
     240struct linear_backoff_then_block_lock {
     241        // Spin lock used for mutual exclusion
     242        __spinlock_t spinlock;
     243
     244        // Current thread owning the lock
     245        struct $thread * owner;
     246
     247        // List of blocked threads
     248        dlist( $thread ) blocked_threads;
     249
     250        // Used for comparing and exchanging
     251        volatile size_t lock_value;
     252
     253        // used for linear backoff spinning
     254        int spin_start;
     255        int spin_end;
     256        int spin_count;
     257
     258        // after unsuccessful linear backoff yield this many times
     259        int yield_count;
     260};
     261
     262static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
     263        this.spinlock{};
     264        this.blocked_threads{};
     265        this.lock_value = 0;
     266        this.spin_start = spin_start;
     267        this.spin_end = spin_end;
     268        this.spin_count = spin_count;
     269        this.yield_count = yield_count;
     270}
     271static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
     272static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
     273
     274static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
     275        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
     276                owner = active_thread();
     277                return true;
     278        }
     279        return false;
     280}
     281
     282static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
     283
     284static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
     285        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
     286                owner = active_thread();
     287                return true;
     288        }
     289        return false;
     290}
     291
     292static inline bool block(linear_backoff_then_block_lock & this) with(this) {
     293        lock( spinlock __cfaabi_dbg_ctx2 );
     294        if (lock_value != 2) {
     295                unlock( spinlock );
     296                return true;
     297        }
     298        insert_last( blocked_threads, *active_thread() );
     299        unlock( spinlock );
     300        park( );
     301        return true;
     302}
     303
     304static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
     305        // if owner just return
     306        if (active_thread() == owner) return true;
     307        size_t compare_val = 0;
     308        int spin = spin_start;
     309        // linear backoff
     310        for( ;; ) {
     311                compare_val = 0;
     312                if (internal_try_lock(this, compare_val)) return true;
     313                if (2 == compare_val) break;
     314                for (int i = 0; i < spin; i++) Pause();
     315                if (spin >= spin_end) break;
     316                spin += spin;
     317        }
     318
     319        // linear backoff bounded by spin_count
     320        spin = spin_start;
     321        int spin_counter = 0;
     322        int yield_counter = 0;
     323        for ( ;; ) {
     324                if(try_lock_contention(this)) return true;
     325                if(spin_counter < spin_count) {
     326                        for (int i = 0; i < spin; i++) Pause();
     327                        if (spin < spin_end) spin += spin;
     328                        else spin_counter++;
     329                } else if (yield_counter < yield_count) {
     330                        // after linear backoff yield yield_count times
     331                        yield_counter++;
     332                        yield();
     333                } else { break; }
     334        }
     335
     336        // block until signalled
     337        while (block(this)) if(try_lock_contention(this)) return true;
     338       
     339        // this should never be reached as block(this) always returns true
     340        return false;
     341}
     342
     343static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
     344        verify(lock_value > 0);
     345    owner = 0p;
     346    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
     347        lock( spinlock __cfaabi_dbg_ctx2 );
     348        $thread * t = &try_pop_front( blocked_threads );
     349        unlock( spinlock );
     350        unpark( t );
     351}
     352
     353
     354void on_notify(linear_backoff_then_block_lock & this, struct $thread * t ) { unpark(t); }
     355size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
     356void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
     357
    239358//-----------------------------------------------------------------------------
    240359// is_blocking_lock
Note: See TracChangeset for help on using the changeset viewer.