- Timestamp:
- Jan 17, 2022, 2:25:00 PM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- 25337e0, a77f25b
- Parents:
- a2a4566
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
ra2a4566 r0fc447c 29 29 #include "time_t.hfa" 30 30 #include "time.hfa" 31 32 //-----------------------------------------------------------------------------33 // Semaphores34 35 // '0-nary' semaphore36 // Similar to a counting semaphore except the value of one is never reached37 // as a consequence, a V() that would bring the value to 1 *spins* until38 // a P consumes it39 struct Semaphore0nary {40 __spinlock_t lock; // needed to protect41 mpsc_queue(thread$) queue;42 };43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) {45 /* paranoid */ verify(!thrd`next);46 /* paranoid */ verify(!(&(*thrd)`next));47 48 push(this.queue, thrd);49 return true;50 }51 52 static inline bool P(Semaphore0nary & this) {53 thread$ * thrd = active_thread();54 P(this, thrd);55 park();56 return true;57 }58 59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) {60 thread$ * next;61 lock(this.lock __cfaabi_dbg_ctx2);62 for (;;) {63 next = pop(this.queue);64 if (next) break;65 Pause();66 }67 unlock(this.lock);68 69 if (doUnpark) unpark(next);70 return next;71 }72 73 // Wrapper used on top of any sempahore to avoid potential locking74 struct BinaryBenaphore {75 volatile ssize_t counter;76 };77 78 static inline {79 void ?{}(BinaryBenaphore & this) { this.counter = 0; }80 void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }81 void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }82 83 // returns true if no blocking needed84 bool P(BinaryBenaphore & this) {85 return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;86 }87 88 bool tryP(BinaryBenaphore & this) {89 ssize_t c = this.counter;90 /* paranoid */ verify( c > MIN );91 return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);92 }93 94 // returns true if notify needed95 bool V(BinaryBenaphore & this) {96 ssize_t c = 0;97 for () {98 /* paranoid */ verify( this.counter < MAX );99 if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {100 if (c == 0) return true;101 /* paranoid */ verify(c < 0);102 return false;103 } else {104 if (c == 1) return true;105 /* paranoid */ verify(c < 1);106 Pause();107 }108 }109 }110 }111 112 // Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore113 struct ThreadBenaphore {114 BinaryBenaphore ben;115 Semaphore0nary sem;116 };117 118 static inline void ?{}(ThreadBenaphore & this) {}119 static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }120 static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }121 122 static inline bool P(ThreadBenaphore & this) { return P(this.ben) ? false : P(this.sem); }123 static inline bool tryP(ThreadBenaphore & this) { return tryP(this.ben); }124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); }125 126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) {127 if (V(this.ben)) return 0p;128 return V(this.sem, doUnpark);129 }130 31 131 32 //----------------------------------------------------------------------------- … … 171 72 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 73 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 174 struct fast_lock {175 thread$ * volatile owner;176 ThreadBenaphore sem;177 };178 179 static inline void ?{}(fast_lock & this) __attribute__((deprecated("use linear_backoff_then_block_lock instead")));180 static inline void ?{}(fast_lock & this) { this.owner = 0p; }181 182 static inline bool $try_lock(fast_lock & this, thread$ * thrd) {183 thread$ * exp = 0p;184 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);185 }186 187 static inline void lock( fast_lock & this ) __attribute__((deprecated("use linear_backoff_then_block_lock instead"), artificial));188 static inline void lock( fast_lock & this ) {189 thread$ * thrd = active_thread();190 /* paranoid */verify(thrd != this.owner);191 192 for (;;) {193 if ($try_lock(this, thrd)) return;194 P(this.sem);195 }196 }197 198 static inline bool try_lock( fast_lock & this ) __attribute__((deprecated("use linear_backoff_then_block_lock instead"), artificial));199 static inline bool try_lock ( fast_lock & this ) {200 thread$ * thrd = active_thread();201 /* paranoid */ verify(thrd != this.owner);202 return $try_lock(this, thrd);203 }204 205 static inline thread$ * unlock( fast_lock & this ) __attribute__((deprecated("use linear_backoff_then_block_lock instead"), artificial));206 static inline thread$ * unlock( fast_lock & this ) {207 /* paranoid */ verify(active_thread() == this.owner);208 209 // open 'owner' before unlocking anyone210 // so new and unlocked threads don't park incorrectly.211 // This may require additional fencing on ARM.212 this.owner = 0p;213 214 return V(this.sem);215 }216 217 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }218 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }219 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); }220 74 221 75 struct mcs_node {
Note: See TracChangeset
for help on using the changeset viewer.