Changeset c86ee4c for libcfa/src/concurrency/locks.hfa
- Timestamp:
- Jul 7, 2021, 6:24:42 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d83b266
- Parents:
- 1f45c7d (diff), b1a2c4a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/concurrency/locks.hfa (modified) (17 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
r1f45c7d rc86ee4c 39 39 struct Semaphore0nary { 40 40 __spinlock_t lock; // needed to protect 41 mpsc_queue( $thread) queue;42 }; 43 44 static inline bool P(Semaphore0nary & this, $thread* thrd) {41 mpsc_queue(thread$) queue; 42 }; 43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) { 45 45 /* paranoid */ verify(!thrd`next); 46 46 /* paranoid */ verify(!(&(*thrd)`next)); … … 51 51 52 52 static inline bool P(Semaphore0nary & this) { 53 $thread* thrd = active_thread();53 thread$ * thrd = active_thread(); 54 54 P(this, thrd); 55 55 park(); … … 57 57 } 58 58 59 static inline $thread* V(Semaphore0nary & this, bool doUnpark = true) {60 $thread* next;59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) { 60 thread$ * next; 61 61 lock(this.lock __cfaabi_dbg_ctx2); 62 62 for (;;) { … … 124 124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); } 125 125 126 static inline $thread* V(ThreadBenaphore & this, bool doUnpark = true) {126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) { 127 127 if (V(this.ben)) return 0p; 128 128 return V(this.sem, doUnpark); … … 134 134 __spinlock_t lock; 135 135 int count; 136 __queue_t( $thread) waiting;136 __queue_t(thread$) waiting; 137 137 }; 138 138 … … 142 142 bool V (semaphore & this); 143 143 bool V (semaphore & this, unsigned count); 144 $thread* V (semaphore & this, bool );144 thread$ * V (semaphore & this, bool ); 145 145 146 146 //---------- … … 156 156 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 157 157 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 158 static inline void on_notify( single_acquisition_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }158 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 159 159 160 160 //---------- … … 170 170 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 171 171 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 static inline void on_notify( owner_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }172 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 173 174 174 struct fast_lock { 175 $thread* volatile owner;175 thread$ * volatile owner; 176 176 ThreadBenaphore sem; 177 177 }; … … 179 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; } 180 180 181 static inline bool $try_lock(fast_lock & this, $thread* thrd) {182 $thread* exp = 0p;181 static inline bool $try_lock(fast_lock & this, thread$ * thrd) { 182 thread$ * exp = 0p; 183 183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); 184 184 } … … 186 186 static inline void lock( fast_lock & this ) __attribute__((artificial)); 187 187 static inline void lock( fast_lock & this ) { 188 $thread* thrd = active_thread();188 thread$ * thrd = active_thread(); 189 189 /* paranoid */verify(thrd != this.owner); 190 190 … … 197 197 static inline bool try_lock( fast_lock & this ) __attribute__((artificial)); 198 198 static inline bool try_lock ( fast_lock & this ) { 199 $thread* thrd = active_thread();199 thread$ * thrd = active_thread(); 200 200 /* paranoid */ verify(thrd != this.owner); 201 201 return $try_lock(this, thrd); 202 202 } 203 203 204 static inline $thread* unlock( fast_lock & this ) __attribute__((artificial));205 static inline $thread* unlock( fast_lock & this ) {204 static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial)); 205 static inline thread$ * unlock( fast_lock & this ) { 206 206 /* paranoid */ verify(active_thread() == this.owner); 207 207 … … 216 216 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; } 217 217 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); } 218 static inline void on_notify( fast_lock &, struct $thread* t ) { unpark(t); }218 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); } 219 219 220 220 struct mcs_node { … … 248 248 249 249 // Current thread owning the lock 250 struct $thread* owner;250 struct thread$ * owner; 251 251 252 252 // List of blocked threads 253 dlist( $thread) blocked_threads;253 dlist( thread$ ) blocked_threads; 254 254 255 255 // Used for comparing and exchanging … … 341 341 // block until signalled 342 342 while (block(this)) if(try_lock_contention(this)) return true; 343 343 344 // this should never be reached as block(this) always returns true 345 return false; 346 } 347 348 static inline bool lock_improved(linear_backoff_then_block_lock & this) with(this) { 349 // if owner just return 350 if (active_thread() == owner) return true; 351 size_t compare_val = 0; 352 int spin = spin_start; 353 // linear backoff 354 for( ;; ) { 355 compare_val = 0; 356 if (internal_try_lock(this, compare_val)) return true; 357 if (2 == compare_val) break; 358 for (int i = 0; i < spin; i++) Pause(); 359 if (spin >= spin_end) break; 360 spin += spin; 361 } 362 363 // linear backoff bounded by spin_count 364 spin = spin_start; 365 int spin_counter = 0; 366 int yield_counter = 0; 367 for ( ;; ) { 368 compare_val = 0; 369 if(internal_try_lock(this, compare_val)) return true; 370 if (2 == compare_val) break; 371 if(spin_counter < spin_count) { 372 for (int i = 0; i < spin; i++) Pause(); 373 if (spin < spin_end) spin += spin; 374 else spin_counter++; 375 } else if (yield_counter < yield_count) { 376 // after linear backoff yield yield_count times 377 yield_counter++; 378 yield(); 379 } else { break; } 380 } 381 382 if(2 != compare_val && try_lock_contention(this)) return true; 383 // block until signalled 384 while (block(this)) if(try_lock_contention(this)) return true; 385 344 386 // this should never be reached as block(this) always returns true 345 387 return false; … … 351 393 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 352 394 lock( spinlock __cfaabi_dbg_ctx2 ); 353 $thread* t = &try_pop_front( blocked_threads );395 thread$ * t = &try_pop_front( blocked_threads ); 354 396 unlock( spinlock ); 355 397 unpark( t ); 356 398 } 357 399 358 static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread* t ) { unpark(t); }400 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 359 401 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } 360 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock (this); }402 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); } 361 403 362 404 //----------------------------------------------------------------------------- … … 364 406 trait is_blocking_lock(L & | sized(L)) { 365 407 // For synchronization locks to use when acquiring 366 void on_notify( L &, struct $thread* );408 void on_notify( L &, struct thread$ * ); 367 409 368 410 // For synchronization locks to use when releasing … … 398 440 int count; 399 441 }; 400 442 401 443 402 444 void ?{}( condition_variable(L) & this );
Note:
See TracChangeset
for help on using the changeset viewer.