Changeset 12c1eef for libcfa/src/concurrency/locks.hfa
- Timestamp:
- Jan 19, 2022, 2:36:56 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- 97c215f
- Parents:
- 5235d49 (diff), 6a33e40 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/concurrency/locks.hfa (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
r5235d49 r12c1eef 29 29 #include "time_t.hfa" 30 30 #include "time.hfa" 31 32 //-----------------------------------------------------------------------------33 // Semaphores34 35 // '0-nary' semaphore36 // Similar to a counting semaphore except the value of one is never reached37 // as a consequence, a V() that would bring the value to 1 *spins* until38 // a P consumes it39 struct Semaphore0nary {40 __spinlock_t lock; // needed to protect41 mpsc_queue(thread$) queue;42 };43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) {45 /* paranoid */ verify(!thrd`next);46 /* paranoid */ verify(!(&(*thrd)`next));47 48 push(this.queue, thrd);49 return true;50 }51 52 static inline bool P(Semaphore0nary & this) {53 thread$ * thrd = active_thread();54 P(this, thrd);55 park();56 return true;57 }58 59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) {60 thread$ * next;61 lock(this.lock __cfaabi_dbg_ctx2);62 for (;;) {63 next = pop(this.queue);64 if (next) break;65 Pause();66 }67 unlock(this.lock);68 69 if (doUnpark) unpark(next);70 return next;71 }72 73 // Wrapper used on top of any sempahore to avoid potential locking74 struct BinaryBenaphore {75 volatile ssize_t counter;76 };77 78 static inline {79 void ?{}(BinaryBenaphore & this) { this.counter = 0; }80 void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }81 void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }82 83 // returns true if no blocking needed84 bool P(BinaryBenaphore & this) {85 return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;86 }87 88 bool tryP(BinaryBenaphore & this) {89 ssize_t c = this.counter;90 /* paranoid */ verify( c > MIN );91 return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);92 }93 94 // returns true if notify needed95 bool V(BinaryBenaphore & this) {96 ssize_t c = 0;97 for () {98 /* paranoid */ verify( this.counter < MAX );99 if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {100 if (c == 0) return true;101 /* paranoid */ verify(c < 0);102 return false;103 } else {104 if (c == 1) return true;105 /* paranoid */ verify(c < 1);106 Pause();107 }108 }109 }110 }111 112 // Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore113 struct ThreadBenaphore {114 BinaryBenaphore ben;115 Semaphore0nary sem;116 };117 118 static inline void ?{}(ThreadBenaphore & this) {}119 static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }120 static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }121 122 static inline bool P(ThreadBenaphore & this) { return P(this.ben) ? false : P(this.sem); }123 static inline bool tryP(ThreadBenaphore & this) { return tryP(this.ben); }124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); }125 126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) {127 if (V(this.ben)) return 0p;128 return V(this.sem, doUnpark);129 }130 31 131 32 //----------------------------------------------------------------------------- … … 171 72 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 73 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 174 struct fast_lock {175 thread$ * volatile owner;176 ThreadBenaphore sem;177 };178 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; }180 181 static inline bool $try_lock(fast_lock & this, thread$ * thrd) {182 thread$ * exp = 0p;183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);184 }185 186 static inline void lock( fast_lock & this ) __attribute__((artificial));187 static inline void lock( fast_lock & this ) {188 thread$ * thrd = active_thread();189 /* paranoid */verify(thrd != this.owner);190 191 for (;;) {192 if ($try_lock(this, thrd)) return;193 P(this.sem);194 }195 }196 197 static inline bool try_lock( fast_lock & this ) __attribute__((artificial));198 static inline bool try_lock ( fast_lock & this ) {199 thread$ * thrd = active_thread();200 /* paranoid */ verify(thrd != this.owner);201 return $try_lock(this, thrd);202 }203 204 static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial));205 static inline thread$ * unlock( fast_lock & this ) {206 /* paranoid */ verify(active_thread() == this.owner);207 208 // open 'owner' before unlocking anyone209 // so new and unlocked threads don't park incorrectly.210 // This may require additional fencing on ARM.211 this.owner = 0p;212 213 return V(this.sem);214 }215 216 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }217 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }218 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); }219 74 220 75 struct mcs_node {
Note:
See TracChangeset
for help on using the changeset viewer.