Changeset 5a05946 for libcfa/src/concurrency/locks.hfa
- Timestamp:
- May 15, 2023, 1:14:42 PM (12 months ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 629c95a
- Parents:
- 8cb06b6
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
r8cb06b6 r5a05946 39 39 #include <unistd.h> 40 40 41 // C_TODO: cleanup this and locks.cfa42 // - appropriate separation of interface and impl43 // - clean up unused/unneeded locks44 // - change messy big blocking lock from inheritance to composition to remove need for flags45 46 41 typedef void (*__cfa_pre_park)( void * ); 47 42 … … 66 61 park(); 67 62 } 63 64 // macros for default routine impls for is_blocking_lock trait that do not wait-morph 65 66 #define DEFAULT_ON_NOTIFY( lock_type ) \ 67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); } 68 69 #define DEFAULT_ON_WAIT( lock_type ) \ 70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 71 unlock( this ); \ 72 pre_park_then_park( pp_fn, pp_datum ); \ 73 return 0; \ 74 } 75 76 // on_wakeup impl if lock should be reacquired after waking up 77 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); } 79 80 // on_wakeup impl if lock will not be reacquired after waking up 81 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 82 static inline void on_wakeup( lock_type & this, size_t recursion ) {} 83 84 68 85 69 86 //----------------------------------------------------------------------------- … … 187 204 // - Kernel thd blocking alternative to the spinlock 188 205 // - No ownership (will deadlock on reacq) 206 // - no reacq on wakeup 189 207 struct futex_mutex { 190 208 // lock state any state other than UNLOCKED is locked … … 200 218 } 201 219 202 static inline void 203 204 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {220 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; } 221 222 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) { 205 223 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); 206 224 } 207 225 208 static inline int internal_exchange( futex_mutex & this) with(this) {226 static inline int internal_exchange( futex_mutex & this ) with(this) { 209 227 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE); 210 228 } … … 240 258 } 241 259 242 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 243 static inline size_t on_wait( futex_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) { 244 unlock( this ); 245 pre_park_then_park( pp_fn, pp_datum ); 246 return 0; 247 } 248 249 // to set recursion count after getting signalled; 250 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} 260 DEFAULT_ON_NOTIFY( futex_mutex ) 261 DEFAULT_ON_WAIT( futex_mutex ) 262 DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex ) 251 263 252 264 //----------------------------------------------------------------------------- … … 264 276 int val; 265 277 }; 266 267 278 static inline void ?{}( go_mutex & this ) with(this) { val = 0; } 279 // static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted 280 // static inline void ?=?( go_mutex & this, go_mutex this2 ) = void; 268 281 269 282 static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) { … … 314 327 } 315 328 316 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); } 317 static inline size_t on_wait( go_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) { 318 unlock( this ); 319 pre_park_then_park( pp_fn, pp_datum ); 320 return 0; 321 } 322 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 329 DEFAULT_ON_NOTIFY( go_mutex ) 330 DEFAULT_ON_WAIT( go_mutex ) 331 DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex ) 323 332 324 333 //----------------------------------------------------------------------------- … … 340 349 this.lock_value = 0; 341 350 } 351 static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 352 static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 342 353 343 354 static inline void ^?{}( exp_backoff_then_block_lock & this ){} … … 392 403 } 393 404 394 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); } 395 static inline size_t on_wait( exp_backoff_then_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 396 unlock( this ); 397 pre_park_then_park( pp_fn, pp_datum ); 398 return 0; 399 } 400 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); } 405 DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock ) 406 DEFAULT_ON_WAIT( exp_backoff_then_block_lock ) 407 DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock ) 401 408 402 409 //----------------------------------------------------------------------------- … … 454 461 unlock( lock ); 455 462 } 456 static inline size_t on_wait( fast_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 457 unlock( this ); 458 pre_park_then_park( pp_fn, pp_datum ); 459 return 0; 460 } 461 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { } 463 DEFAULT_ON_WAIT( fast_block_lock ) 464 DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock ) 462 465 463 466 //----------------------------------------------------------------------------- … … 661 664 } 662 665 663 static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) { 664 unpark(t); 665 } 666 static inline size_t on_wait( spin_queue_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 667 unlock( this ); 668 pre_park_then_park( pp_fn, pp_datum ); 669 return 0; 670 } 671 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); } 672 666 DEFAULT_ON_NOTIFY( spin_queue_lock ) 667 DEFAULT_ON_WAIT( spin_queue_lock ) 668 DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock ) 673 669 674 670 //----------------------------------------------------------------------------- … … 708 704 } 709 705 710 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); } 711 static inline size_t on_wait( mcs_block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 712 unlock( this ); 713 pre_park_then_park( pp_fn, pp_datum ); 714 return 0; 715 } 716 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); } 706 DEFAULT_ON_NOTIFY( mcs_block_spin_lock ) 707 DEFAULT_ON_WAIT( mcs_block_spin_lock ) 708 DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock ) 717 709 718 710 //----------------------------------------------------------------------------- … … 765 757 unpark(t); 766 758 } 767 static inline size_t on_wait( block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 768 unlock( this ); 769 pre_park_then_park( pp_fn, pp_datum ); 770 return 0; 771 } 759 DEFAULT_ON_WAIT( block_spin_lock ) 772 760 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 773 761 // now we acquire the entire block_spin_lock upon waking up
Note: See TracChangeset
for help on using the changeset viewer.