Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/locks.hfa

    rab1b971 rddd473f  
    2020
    2121#include "bits/weakso_locks.hfa"
     22#include "containers/queueLockFree.hfa"
     23
     24#include "thread.hfa"
    2225
    2326#include "time_t.hfa"
    2427#include "time.hfa"
     28
     29//-----------------------------------------------------------------------------
     30// Semaphores
     31
     32// '0-nary' semaphore
     33// Similar to a counting semaphore except the value of one is never reached
     34// as a consequence, a V() that would bring the value to 1 *spins* until
     35// a P consumes it
     36struct Semaphore0nary {
     37        __spinlock_t lock; // needed to protect
     38        mpsc_queue($thread) queue;
     39};
     40
     41static inline bool P(Semaphore0nary & this, $thread * thrd) {
     42        /* paranoid */ verify(!(thrd->seqable.next));
     43        /* paranoid */ verify(!(thrd`next));
     44
     45        push(this.queue, thrd);
     46        return true;
     47}
     48
     49static inline bool P(Semaphore0nary & this) {
     50    $thread * thrd = active_thread();
     51    P(this, thrd);
     52    park();
     53    return true;
     54}
     55
     56static inline $thread * V(Semaphore0nary & this, bool doUnpark = true) {
     57        $thread * next;
     58        lock(this.lock __cfaabi_dbg_ctx2);
     59                for (;;) {
     60                        next = pop(this.queue);
     61                        if (next) break;
     62                        Pause();
     63                }
     64        unlock(this.lock);
     65
     66        if (doUnpark) unpark(next);
     67        return next;
     68}
     69
     70// Wrapper used on top of any sempahore to avoid potential locking
     71struct BinaryBenaphore {
     72        volatile ssize_t counter;
     73};
     74
     75static inline {
     76        void ?{}(BinaryBenaphore & this) { this.counter = 0; }
     77        void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }
     78        void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }
     79
     80        // returns true if no blocking needed
     81        bool P(BinaryBenaphore & this) {
     82                return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;
     83        }
     84
     85        bool tryP(BinaryBenaphore & this) {
     86                ssize_t c = this.counter;
     87                return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
     88        }
     89
     90        // returns true if notify needed
     91        bool V(BinaryBenaphore & this) {
     92                ssize_t c = 0;
     93                for () {
     94                        if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
     95                                if (c == 0) return true;
     96                                /* paranoid */ verify(c < 0);
     97                                return false;
     98                        } else {
     99                                if (c == 1) return true;
     100                                /* paranoid */ verify(c < 1);
     101                                Pause();
     102                        }
     103                }
     104        }
     105}
     106
     107// Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore
     108struct ThreadBenaphore {
     109        BinaryBenaphore ben;
     110        Semaphore0nary  sem;
     111};
     112
     113static inline void ?{}(ThreadBenaphore & this) {}
     114static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }
     115static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }
     116
     117static inline bool P(ThreadBenaphore & this)              { return P(this.ben) ? false : P(this.sem); }
     118static inline bool tryP(ThreadBenaphore & this)           { return tryP(this.ben); }
     119static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); }
     120
     121static inline $thread * V(ThreadBenaphore & this, bool doUnpark = true) {
     122        if (V(this.ben)) return 0p;
     123        return V(this.sem, doUnpark);
     124}
     125
     126//-----------------------------------------------------------------------------
     127// Semaphore
     128struct semaphore {
     129        __spinlock_t lock;
     130        int count;
     131        __queue_t($thread) waiting;
     132};
     133
     134void  ?{}(semaphore & this, int count = 1);
     135void ^?{}(semaphore & this);
     136bool   P (semaphore & this);
     137bool   V (semaphore & this);
     138bool   V (semaphore & this, unsigned count);
     139$thread * V (semaphore & this, bool );
    25140
    26141//----------
     
    31146static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
    32147static inline void ^?{}( single_acquisition_lock & this ) {}
    33 static inline void   lock      ( single_acquisition_lock & this ) { lock   ( (blocking_lock &)this ); }
    34 static inline void   unlock    ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
    35 static inline void   on_wait   ( single_acquisition_lock & this ) { on_wait( (blocking_lock &)this ); }
    36 static inline void   on_notify ( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
    37 static inline void   set_recursion_count( single_acquisition_lock & this, size_t recursion ) { set_recursion_count( (blocking_lock &)this, recursion ); }
    38 static inline size_t get_recursion_count( single_acquisition_lock & this ) { return get_recursion_count( (blocking_lock &)this ); }
     148static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
     149static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
     150static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
     151static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
     152static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
     153static inline void   on_notify( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
    39154
    40155//----------
     
    45160static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
    46161static inline void ^?{}( owner_lock & this ) {}
    47 static inline void   lock     ( owner_lock & this ) { lock   ( (blocking_lock &)this ); }
    48 static inline void   unlock   ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
    49 static inline void   on_wait  ( owner_lock & this ) { on_wait( (blocking_lock &)this ); }
     162static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
     163static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
     164static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
     165static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
     166static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    50167static inline void   on_notify( owner_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
    51 static inline void   set_recursion_count( owner_lock & this, size_t recursion ) { set_recursion_count( (blocking_lock &)this, recursion ); }
    52 static inline size_t get_recursion_count( owner_lock & this ) { return get_recursion_count( (blocking_lock &)this ); }
     168
     169struct fast_lock {
     170        $thread * volatile owner;
     171        ThreadBenaphore sem;
     172};
     173
     174static inline bool $try_lock(fast_lock & this, $thread * thrd) {
     175    $thread * exp = 0p;
     176    return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
     177}
     178
     179static inline void lock( fast_lock & this ) __attribute__((artificial));
     180static inline void lock( fast_lock & this ) {
     181        $thread * thrd = active_thread();
     182        /* paranoid */verify(thrd != this.owner);
     183
     184        for (;;) {
     185                if ($try_lock(this, thrd)) return;
     186                P(this.sem);
     187        }
     188}
     189
     190static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
     191static inline bool try_lock ( fast_lock & this ) {
     192        $thread * thrd = active_thread();
     193        /* paranoid */ verify(thrd != this.owner);
     194        return $try_lock(this, thrd);
     195}
     196
     197static inline $thread * unlock( fast_lock & this ) __attribute__((artificial));
     198static inline $thread * unlock( fast_lock & this ) {
     199        /* paranoid */ verify(active_thread() == this.owner);
     200
     201        // open 'owner' before unlocking anyone
     202        // so new and unlocked threads don't park incorrectly.
     203        // This may require additional fencing on ARM.
     204        this.owner = 0p;
     205
     206        return V(this.sem);
     207}
     208
     209static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
     210static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
     211static inline void on_notify( fast_lock &, struct $thread * t ) { unpark(t); }
     212
     213struct mcs_node {
     214        mcs_node * volatile next;
     215        single_sem sem;
     216};
     217
     218static inline void ?{}(mcs_node & this) { this.next = 0p; }
     219
     220static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
     221        return node->next;
     222}
     223
     224struct mcs_lock {
     225        mcs_queue(mcs_node) queue;
     226};
     227
     228static inline void lock(mcs_lock & l, mcs_node & n) {
     229        if(push(l.queue, &n))
     230                wait(n.sem);
     231}
     232
     233static inline void unlock(mcs_lock & l, mcs_node & n) {
     234        mcs_node * next = advance(l.queue, &n);
     235        if(next) post(next->sem);
     236}
    53237
    54238//-----------------------------------------------------------------------------
     
    59243
    60244        // For synchronization locks to use when releasing
    61         void on_wait( L & );
    62 
    63         // to get recursion count for cond lock to reset after waking
    64         size_t get_recursion_count( L & );
     245        size_t on_wait( L & );
    65246
    66247        // to set recursion count after getting signalled;
    67         void set_recursion_count( L &, size_t recursion );
     248        void on_wakeup( L &, size_t recursion );
    68249};
    69250
     
    119300        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Time time );
    120301}
    121 
    122 //-----------------------------------------------------------------------------
    123 // Semaphore
    124 struct semaphore {
    125         __spinlock_t lock;
    126         int count;
    127         __queue_t($thread) waiting;
    128 };
    129 
    130 void  ?{}(semaphore & this, int count = 1);
    131 void ^?{}(semaphore & this);
    132 bool   P (semaphore & this);
    133 bool   V (semaphore & this);
    134 bool   V (semaphore & this, unsigned count);
Note: See TracChangeset for help on using the changeset viewer.