Changeset 6b33e89 for libcfa/src/concurrency/locks.hfa
- Timestamp:
- Apr 25, 2025, 7:39:09 AM (8 months ago)
- Branches:
- master
- Children:
- 65bd3c2
- Parents:
- b195498
- File:
-
- 1 edited
-
libcfa/src/concurrency/locks.hfa (modified) (18 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
rb195498 r6b33e89 11 11 // Created On : Thu Jan 21 19:46:50 2021 12 12 // Last Modified By : Peter A. Buhr 13 // Last Modified On : Tue Dec 24 09:36:52 202414 // Update Count : 1613 // Last Modified On : Fri Apr 25 07:14:16 2025 14 // Update Count : 22 15 15 // 16 16 … … 56 56 57 57 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) { 58 pp_fn( pp_datum );59 park();58 pp_fn( pp_datum ); 59 park(); 60 60 } 61 61 … … 63 63 64 64 #define DEFAULT_ON_NOTIFY( lock_type ) \ 65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); } 66 66 67 67 #define DEFAULT_ON_WAIT( lock_type ) \ 68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \69 unlock( this ); \70 pre_park_then_park( pp_fn, pp_datum ); \71 return 0; \72 }68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 69 unlock( this ); \ 70 pre_park_then_park( pp_fn, pp_datum ); \ 71 return 0; \ 72 } 73 73 74 74 // on_wakeup impl if lock should be reacquired after waking up 75 75 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); } 77 77 78 78 // on_wakeup impl if lock will not be reacquired after waking up 79 79 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {} 81 81 82 82 … … 142 142 static inline void ?{}( mcs_node & this ) { this.next = 0p; } 143 143 144 static inline mcs_node * volatile & ?`next( mcs_node * node ) {144 static inline mcs_node * volatile & next( mcs_node * node ) { 145 145 return node->next; 146 146 } … … 156 156 157 157 static inline void unlock( mcs_lock & l, mcs_node & n ) { 158 mcs_node * n ext = advance( l.queue, &n );159 if ( n ext ) post( next->sem );158 mcs_node * nxt = advance( l.queue, &n ); 159 if ( nxt ) post( nxt->sem ); 160 160 } 161 161 … … 181 181 182 182 static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) { 183 n.locked = true;183 n.locked = true; 184 184 185 185 #if defined( __ARM_ARCH ) … … 187 187 #endif 188 188 189 mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );190 if ( prev == 0p ) return;191 prev ->next = &n;189 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST ); 190 if ( prev_val == 0p ) return; 191 prev_val->next = &n; 192 192 193 193 #if defined( __ARM_ARCH ) … … 234 234 // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params) 235 235 static inline int futex( int *uaddr, int futex_op, int val ) { 236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 ); 237 237 } 238 238 … … 271 271 static inline void unlock( futex_mutex & this ) with( this ) { 272 272 // if uncontended do atomic unlock and then return 273 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;273 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 274 274 275 275 // otherwise threads are blocked so we must wake one … … 311 311 int state, init_state; 312 312 313 // speculative grab314 state = internal_exchange( this, 1 );315 if ( ! state ) return; // state == 0316 init_state = state;317 for () {318 for ( 4 ) {319 while ( ! val ) { // lock unlocked320 state = 0;321 if ( internal_try_lock( this, state, init_state ) ) return;322 }323 for ( 30 ) Pause();324 }325 326 while ( ! val ) { // lock unlocked327 state = 0;328 if ( internal_try_lock( this, state, init_state ) ) return;329 }330 sched_yield();331 332 // if not in contended state, set to be in contended state333 state = internal_exchange( this, 2 );334 if ( ! state ) return; // state == 0335 init_state = 2;336 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK337 }313 // speculative grab 314 state = internal_exchange( this, 1 ); 315 if ( ! state ) return; // state == 0 316 init_state = state; 317 for () { 318 for ( 4 ) { 319 while ( ! val ) { // lock unlocked 320 state = 0; 321 if ( internal_try_lock( this, state, init_state ) ) return; 322 } 323 for ( 30 ) Pause(); 324 } 325 326 while ( ! val ) { // lock unlocked 327 state = 0; 328 if ( internal_try_lock( this, state, init_state ) ) return; 329 } 330 sched_yield(); 331 332 // if not in contended state, set to be in contended state 333 state = internal_exchange( this, 2 ); 334 if ( ! state ) return; // state == 0 335 init_state = 2; 336 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 337 } 338 338 } 339 339 340 340 static inline void unlock( go_mutex & this ) with( this ) { 341 341 // if uncontended do atomic unlock and then return 342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 343 343 344 344 // otherwise threads are blocked so we must wake one … … 384 384 385 385 static inline bool block( exp_backoff_then_block_lock & this ) with( this ) { 386 lock( spinlock __cfaabi_dbg_ctx2 );387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {388 unlock( spinlock );389 return true;390 }391 insert_last( blocked_threads, *active_thread() );392 unlock( spinlock );386 lock( spinlock __cfaabi_dbg_ctx2 ); 387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) { 388 unlock( spinlock ); 389 return true; 390 } 391 insert_last( blocked_threads, *active_thread() ); 392 unlock( spinlock ); 393 393 park( ); 394 394 return true; … … 415 415 416 416 static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) { 417 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;418 lock( spinlock __cfaabi_dbg_ctx2 );419 thread$ * t = &try_pop_front( blocked_threads );420 unlock( spinlock );421 unpark( t );417 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return; 418 lock( spinlock __cfaabi_dbg_ctx2 ); 419 thread$ * t = &remove_first( blocked_threads ); 420 unlock( spinlock ); 421 unpark( t ); 422 422 } 423 423 … … 469 469 lock( lock __cfaabi_dbg_ctx2 ); 470 470 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); 471 thread$ * t = & try_pop_front( blocked_threads );471 thread$ * t = &remove_first( blocked_threads ); 472 472 held = ( t ? true : false ); 473 473 unpark( t ); … … 476 476 477 477 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) { 478 lock( lock __cfaabi_dbg_ctx2 );479 insert_last( blocked_threads, *t );480 unlock( lock );478 lock( lock __cfaabi_dbg_ctx2 ); 479 insert_last( blocked_threads, *t ); 480 unlock( lock ); 481 481 } 482 482 DEFAULT_ON_WAIT( fast_block_lock ) … … 521 521 522 522 if ( owner != 0p ) { 523 select_node node;523 select_node node; 524 524 insert_last( blocked_threads, node ); 525 525 unlock( lock ); … … 533 533 534 534 static inline void pop_node( simple_owner_lock & this ) with( this ) { 535 __handle_waituntil_OR( blocked_threads );536 select_node * node = &try_pop_front( blocked_threads );537 if ( node ) {538 owner = node->blocked_thread;539 recursion_count = 1;540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );541 wake_one( blocked_threads, *node );542 } else {543 owner = 0p;544 recursion_count = 0;545 }535 __handle_waituntil_OR( blocked_threads ); 536 select_node * node = &remove_first( blocked_threads ); 537 if ( node ) { 538 owner = node->blocked_thread; 539 recursion_count = 1; 540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 541 wake_one( blocked_threads, *node ); 542 } else { 543 owner = 0p; 544 recursion_count = 0; 545 } 546 546 } 547 547 … … 582 582 pop_node( this ); 583 583 584 select_node node;585 active_thread()->link_node = (void *)&node;586 unlock( lock ); 587 588 pre_park_then_park( pp_fn, pp_datum );584 select_node node; 585 active_thread()->link_node = (void *)&node; 586 unlock( lock ); 587 588 pre_park_then_park( pp_fn, pp_datum ); 589 589 590 590 return ret; … … 595 595 // waituntil() support 596 596 static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) { 597 lock( lock __cfaabi_dbg_ctx2 );598 599 // check if we can complete operation. If so race to establish winner in special OR case600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering602 unlock( lock );603 return false;604 }605 }606 607 if ( owner == active_thread() ) {597 lock( lock __cfaabi_dbg_ctx2 ); 598 599 // check if we can complete operation. If so race to establish winner in special OR case 600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering 602 unlock( lock ); 603 return false; 604 } 605 } 606 607 if ( owner == active_thread() ) { 608 608 recursion_count++; 609 if ( node.park_counter ) __make_select_node_available( node );610 unlock( lock );609 if ( node.park_counter ) __make_select_node_available( node ); 610 unlock( lock ); 611 611 return true; 612 612 } 613 613 614 if ( owner != 0p ) {614 if ( owner != 0p ) { 615 615 insert_last( blocked_threads, node ); 616 616 unlock( lock ); 617 617 return false; 618 618 } 619 619 620 620 owner = active_thread(); 621 621 recursion_count = 1; 622 622 623 if ( node.park_counter ) __make_select_node_available( node );624 unlock( lock );625 return true;623 if ( node.park_counter ) __make_select_node_available( node ); 624 unlock( lock ); 625 return true; 626 626 } 627 627 628 628 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) { 629 lock( lock __cfaabi_dbg_ctx2 );630 if ( node`isListed) {631 remove( node );632 unlock( lock );633 return false;634 }635 636 if ( owner == active_thread() ) {637 recursion_count--;638 if ( recursion_count == 0 ) {639 pop_node( this );640 }641 }642 unlock( lock );643 return false;629 lock( lock __cfaabi_dbg_ctx2 ); 630 if ( isListed( node ) ) { 631 remove( node ); 632 unlock( lock ); 633 return false; 634 } 635 636 if ( owner == active_thread() ) { 637 recursion_count--; 638 if ( recursion_count == 0 ) { 639 pop_node( this ); 640 } 641 } 642 unlock( lock ); 643 return false; 644 644 } 645 645
Note:
See TracChangeset
for help on using the changeset viewer.