Changeset 0cee082 for libcfa/src
- Timestamp:
- Jan 9, 2023, 3:21:02 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 01a8954
- Parents:
- 5e180c2
- Location:
- libcfa/src/concurrency
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/clib/cfathread.cfa
r5e180c2 r0cee082 439 439 // Mutex 440 440 struct cfathread_mutex { 441 linear_backoff_then_block_lock impl;441 exp_backoff_then_block_lock impl; 442 442 }; 443 443 int cfathread_mutex_init(cfathread_mutex_t *restrict mut, const cfathread_mutexattr_t *restrict) __attribute__((nonnull (1))) { *mut = new(); return 0; } … … 454 454 // Condition 455 455 struct cfathread_condition { 456 condition_variable( linear_backoff_then_block_lock) impl;456 condition_variable(exp_backoff_then_block_lock) impl; 457 457 }; 458 458 int cfathread_cond_init(cfathread_cond_t *restrict cond, const cfathread_condattr_t *restrict) __attribute__((nonnull (1))) { *cond = new(); return 0; } -
libcfa/src/concurrency/locks.hfa
r5e180c2 r0cee082 38 38 #include <unistd.h> 39 39 40 // undef to make a number of the locks not reacquire upon waking from a condlock 41 #define REACQ 1 40 // C_TODO: cleanup this and locks.cfa 41 // - appropriate separation of interface and impl 42 // - clean up unused/unneeded locks 43 // - change messy big blocking lock from inheritance to composition to remove need for flags 42 44 43 45 //----------------------------------------------------------------------------- … … 249 251 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); } 250 252 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; } 251 static inline void on_wakeup(clh_lock & this, size_t recursion ) { 252 #ifdef REACQ 253 lock(this); 254 #endif 255 } 256 257 258 //----------------------------------------------------------------------------- 259 // Linear backoff Spinlock 260 struct linear_backoff_then_block_lock { 253 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } 254 255 256 //----------------------------------------------------------------------------- 257 // Exponential backoff then block lock 258 struct exp_backoff_then_block_lock { 261 259 // Spin lock used for mutual exclusion 262 260 __spinlock_t spinlock; … … 269 267 }; 270 268 271 static inline void ?{}( linear_backoff_then_block_lock & this ) {269 static inline void ?{}( exp_backoff_then_block_lock & this ) { 272 270 this.spinlock{}; 273 271 this.blocked_threads{}; 274 272 this.lock_value = 0; 275 273 } 276 static inline void ^?{}( linear_backoff_then_block_lock & this ) {}277 // static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;278 // static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;279 280 static inline bool internal_try_lock( linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {274 static inline void ^?{}( exp_backoff_then_block_lock & this ) {} 275 // static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 276 // static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 277 278 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) { 281 279 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { 282 280 return true; … … 285 283 } 286 284 287 static inline bool try_lock( linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }288 289 static inline bool try_lock_contention( linear_backoff_then_block_lock & this) with(this) {285 static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); } 286 287 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) { 290 288 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) { 291 289 return true; … … 294 292 } 295 293 296 static inline bool block( linear_backoff_then_block_lock & this) with(this) {294 static inline bool block(exp_backoff_then_block_lock & this) with(this) { 297 295 lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC) 298 296 if (lock_value != 2) { … … 306 304 } 307 305 308 static inline void lock( linear_backoff_then_block_lock & this) with(this) {306 static inline void lock(exp_backoff_then_block_lock & this) with(this) { 309 307 size_t compare_val = 0; 310 308 int spin = 4; … … 324 322 } 325 323 326 static inline void unlock( linear_backoff_then_block_lock & this) with(this) {324 static inline void unlock(exp_backoff_then_block_lock & this) with(this) { 327 325 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 328 326 lock( spinlock __cfaabi_dbg_ctx2 ); … … 332 330 } 333 331 334 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 335 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } 336 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { 337 #ifdef REACQ 338 lock(this); 339 #endif 340 } 332 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 333 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; } 334 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); } 341 335 342 336 //----------------------------------------------------------------------------- … … 390 384 391 385 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) { 392 #ifdef REACQ 393 lock( lock __cfaabi_dbg_ctx2 ); 394 insert_last( blocked_threads, *t ); 395 unlock( lock ); 396 #else 397 unpark(t); 398 #endif 386 lock( lock __cfaabi_dbg_ctx2 ); 387 insert_last( blocked_threads, *t ); 388 unlock( lock ); 399 389 } 400 390 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } … … 553 543 } 554 544 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } 555 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { 556 #ifdef REACQ 557 lock(this); 558 #endif 559 } 545 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); } 560 546 561 547 … … 598 584 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } 599 585 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } 600 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { 601 #ifdef REACQ 602 lock(this); 603 #endif 604 } 586 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); } 605 587 606 588 //----------------------------------------------------------------------------- … … 640 622 641 623 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) { 642 #ifdef REACQ643 624 // first we acquire internal fast_block_lock 644 625 lock( lock __cfaabi_dbg_ctx2 ); … … 652 633 unlock( lock ); 653 634 654 #endif655 635 unpark(t); 656 657 636 } 658 637 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } 659 638 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) { 660 #ifdef REACQ661 639 // now we acquire the entire block_spin_lock upon waking up 662 640 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); 663 641 __atomic_store_n(&held, true, __ATOMIC_RELEASE); 664 642 unlock( lock ); // Now we release the internal fast_spin_lock 665 #endif666 643 } 667 644
Note: See TracChangeset
for help on using the changeset viewer.