Changeset beeff61e for libcfa/src/concurrency/locks.hfa
- Timestamp:
- May 1, 2023, 4:00:06 PM (19 months ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 73bf7ddc
- Parents:
- bb7422a
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
rbb7422a rbeeff61e 30 30 #include "time.hfa" 31 31 32 #include "select.hfa" 33 32 34 #include <fstream.hfa> 33 35 … … 70 72 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 71 73 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 74 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 75 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 76 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 72 77 73 78 //---------- … … 84 89 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 85 90 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 91 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 92 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 93 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 86 94 87 95 //----------------------------------------------------------------------------- … … 180 188 181 189 // if this is called recursively IT WILL DEADLOCK!!!!! 182 static inline void lock( futex_mutex & this) with(this) {190 static inline void lock( futex_mutex & this ) with(this) { 183 191 int state; 184 192 … … 190 198 for (int i = 0; i < spin; i++) Pause(); 191 199 } 192 193 // // no contention try to acquire194 // if (internal_try_lock(this, state)) return;195 200 196 201 // if not in contended state, set to be in contended state … … 213 218 214 219 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 215 static inline size_t on_wait( futex_mutex & f ) { unlock(f); return 0;}220 static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; } 216 221 217 222 // to set recursion count after getting signalled; … … 244 249 245 250 // if this is called recursively IT WILL DEADLOCK!!!!! 246 static inline void lock( go_mutex & this) with(this) {251 static inline void lock( go_mutex & this ) with( this ) { 247 252 int state, init_state; 248 253 … … 255 260 while( !val ) { // lock unlocked 256 261 state = 0; 257 if ( internal_try_lock(this, state, init_state)) return;262 if ( internal_try_lock( this, state, init_state ) ) return; 258 263 } 259 264 for (int i = 0; i < 30; i++) Pause(); … … 262 267 while( !val ) { // lock unlocked 263 268 state = 0; 264 if ( internal_try_lock(this, state, init_state)) return;269 if ( internal_try_lock( this, state, init_state ) ) return; 265 270 } 266 271 sched_yield(); 267 272 268 273 // if not in contended state, set to be in contended state 269 state = internal_exchange( this, 2);274 state = internal_exchange( this, 2 ); 270 275 if ( !state ) return; // state == 0 271 276 init_state = 2; 272 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK277 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 273 278 } 274 279 } … … 276 281 static inline void unlock( go_mutex & this ) with(this) { 277 282 // if uncontended do atomic unlock and then return 278 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;283 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return; 279 284 280 285 // otherwise threads are blocked so we must wake one 281 futex( (int *)&val, FUTEX_WAKE, 1);282 } 283 284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t); }285 static inline size_t on_wait( go_mutex & f ) { unlock(f); return 0;}286 futex( (int *)&val, FUTEX_WAKE, 1 ); 287 } 288 289 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); } 290 static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; } 286 291 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 287 288 //-----------------------------------------------------------------------------289 // CLH Spinlock290 // - No recursive acquisition291 // - Needs to be released by owner292 293 struct clh_lock {294 volatile bool * volatile tail;295 volatile bool * volatile head;296 };297 298 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }299 static inline void ^?{}( clh_lock & this ) { free(this.tail); }300 301 static inline void lock(clh_lock & l) {302 thread$ * curr_thd = active_thread();303 *(curr_thd->clh_node) = false;304 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);305 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();306 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);307 curr_thd->clh_node = prev;308 }309 310 static inline void unlock(clh_lock & l) {311 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);312 }313 314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }317 292 318 293 //----------------------------------------------------------------------------- … … 337 312 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 338 313 339 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {314 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) { 340 315 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 341 316 } 342 317 343 static inline bool try_lock( exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }344 345 static inline bool try_lock_contention( exp_backoff_then_block_lock & this) with(this) {346 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE);347 } 348 349 static inline bool block( exp_backoff_then_block_lock & this) with(this) {318 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); } 319 320 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) { 321 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE ); 322 } 323 324 static inline bool block( exp_backoff_then_block_lock & this ) with(this) { 350 325 lock( spinlock __cfaabi_dbg_ctx2 ); 351 326 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { … … 359 334 } 360 335 361 static inline void lock( exp_backoff_then_block_lock & this) with(this) {336 static inline void lock( exp_backoff_then_block_lock & this ) with(this) { 362 337 size_t compare_val = 0; 363 338 int spin = 4; … … 378 353 } 379 354 380 static inline void unlock( exp_backoff_then_block_lock & this) with(this) {355 static inline void unlock( exp_backoff_then_block_lock & this ) with(this) { 381 356 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 382 357 lock( spinlock __cfaabi_dbg_ctx2 ); … … 386 361 } 387 362 388 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }389 static inline size_t on_wait( exp_backoff_then_block_lock & this) { unlock(this); return 0; }390 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }363 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); } 364 static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; } 365 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); } 391 366 392 367 //----------------------------------------------------------------------------- … … 418 393 419 394 // if this is called recursively IT WILL DEADLOCK!!!!! 420 static inline void lock( fast_block_lock & this) with(this) {395 static inline void lock( fast_block_lock & this ) with(this) { 421 396 lock( lock __cfaabi_dbg_ctx2 ); 422 397 if ( held ) { … … 430 405 } 431 406 432 static inline void unlock( fast_block_lock & this) with(this) {407 static inline void unlock( fast_block_lock & this ) with(this) { 433 408 lock( lock __cfaabi_dbg_ctx2 ); 434 409 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 439 414 } 440 415 441 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {416 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) { 442 417 lock( lock __cfaabi_dbg_ctx2 ); 443 418 insert_last( blocked_threads, *t ); 444 419 unlock( lock ); 445 420 } 446 static inline size_t on_wait( fast_block_lock & this) { unlock(this); return 0; }447 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }421 static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; } 422 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { } 448 423 449 424 //----------------------------------------------------------------------------- … … 456 431 struct simple_owner_lock { 457 432 // List of blocked threads 458 dlist( thread$) blocked_threads;433 dlist( select_node ) blocked_threads; 459 434 460 435 // Spin lock used for mutual exclusion … … 477 452 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 478 453 479 static inline void lock( simple_owner_lock & this) with(this) {480 if ( owner == active_thread()) {454 static inline void lock( simple_owner_lock & this ) with(this) { 455 if ( owner == active_thread() ) { 481 456 recursion_count++; 482 457 return; … … 484 459 lock( lock __cfaabi_dbg_ctx2 ); 485 460 486 if (owner != 0p) { 487 insert_last( blocked_threads, *active_thread() ); 461 if ( owner != 0p ) { 462 select_node node; 463 insert_last( blocked_threads, node ); 488 464 unlock( lock ); 489 465 park( ); … … 495 471 } 496 472 497 // TODO: fix duplicate def issue and bring this back 498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 499 // thread$ * t = &try_pop_front( blocked_threads ); 500 // owner = t; 501 // recursion_count = ( t ? 1 : 0 ); 502 // unpark( t ); 503 // } 504 505 static inline void unlock(simple_owner_lock & this) with(this) { 473 static inline void pop_node( simple_owner_lock & this ) with(this) { 474 __handle_waituntil_OR( blocked_threads ); 475 select_node * node = &try_pop_front( blocked_threads ); 476 if ( node ) { 477 owner = node->blocked_thread; 478 recursion_count = 1; 479 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 480 wake_one( blocked_threads, *node ); 481 } else { 482 owner = 0p; 483 recursion_count = 0; 484 } 485 } 486 487 static inline void unlock( simple_owner_lock & this ) with(this) { 506 488 lock( lock __cfaabi_dbg_ctx2 ); 507 489 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 510 492 recursion_count--; 511 493 if ( recursion_count == 0 ) { 512 // pop_and_set_new_owner( this ); 513 thread$ * t = &try_pop_front( blocked_threads ); 514 owner = t; 515 recursion_count = ( t ? 1 : 0 ); 516 unpark( t ); 494 pop_node( this ); 517 495 } 518 496 unlock( lock ); 519 497 } 520 498 521 static inline void on_notify(simple_owner_lock & this, structthread$ * t ) with(this) {499 static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) { 522 500 lock( lock __cfaabi_dbg_ctx2 ); 523 501 // lock held 524 502 if ( owner != 0p ) { 525 insert_last( blocked_threads, * t);503 insert_last( blocked_threads, *(select_node *)t->link_node ); 526 504 } 527 505 // lock not held … … 534 512 } 535 513 536 static inline size_t on_wait( simple_owner_lock & this) with(this) {514 static inline size_t on_wait( simple_owner_lock & this ) with(this) { 537 515 lock( lock __cfaabi_dbg_ctx2 ); 538 516 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 541 519 size_t ret = recursion_count; 542 520 543 // pop_and_set_new_owner( this ); 544 545 thread$ * t = &try_pop_front( blocked_threads ); 546 owner = t; 547 recursion_count = ( t ? 1 : 0 ); 548 unpark( t ); 549 521 pop_node( this ); 522 523 select_node node; 524 active_thread()->link_node = (void *)&node; 550 525 unlock( lock ); 526 park(); 527 551 528 return ret; 552 529 } 553 530 554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 531 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 532 533 // waituntil() support 534 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) { 535 lock( lock __cfaabi_dbg_ctx2 ); 536 537 // check if we can complete operation. If so race to establish winner in special OR case 538 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 539 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 540 unlock( lock ); 541 return false; 542 } 543 } 544 545 if ( owner == active_thread() ) { 546 recursion_count++; 547 if ( node.park_counter ) __make_select_node_available( node ); 548 unlock( lock ); 549 return true; 550 } 551 552 if ( owner != 0p ) { 553 insert_last( blocked_threads, node ); 554 unlock( lock ); 555 return false; 556 } 557 558 owner = active_thread(); 559 recursion_count = 1; 560 561 if ( node.park_counter ) __make_select_node_available( node ); 562 unlock( lock ); 563 return true; 564 } 565 566 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) { 567 lock( lock __cfaabi_dbg_ctx2 ); 568 if ( node`isListed ) { 569 remove( node ); 570 unlock( lock ); 571 return false; 572 } 573 574 if ( owner == active_thread() ) { 575 recursion_count--; 576 if ( recursion_count == 0 ) { 577 pop_node( this ); 578 } 579 } 580 unlock( lock ); 581 return false; 582 } 583 584 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; } 585 555 586 556 587 //----------------------------------------------------------------------------- … … 578 609 579 610 // if this is called recursively IT WILL DEADLOCK! 580 static inline void lock( spin_queue_lock & this) with(this) {611 static inline void lock( spin_queue_lock & this ) with(this) { 581 612 mcs_spin_node node; 582 613 lock( lock, node ); … … 586 617 } 587 618 588 static inline void unlock( spin_queue_lock & this) with(this) {619 static inline void unlock( spin_queue_lock & this ) with(this) { 589 620 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 590 621 } 591 622 592 static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {623 static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) { 593 624 unpark(t); 594 625 } 595 static inline size_t on_wait( spin_queue_lock & this) { unlock(this); return 0; }596 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock(this); }626 static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; } 627 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); } 597 628 598 629 … … 621 652 622 653 // if this is called recursively IT WILL DEADLOCK!!!!! 623 static inline void lock( mcs_block_spin_lock & this) with(this) {654 static inline void lock( mcs_block_spin_lock & this ) with(this) { 624 655 mcs_node node; 625 656 lock( lock, node ); … … 633 664 } 634 665 635 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }636 static inline size_t on_wait( mcs_block_spin_lock & this) { unlock(this); return 0; }637 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock(this); }666 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); } 667 static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; } 668 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); } 638 669 639 670 //----------------------------------------------------------------------------- … … 661 692 662 693 // if this is called recursively IT WILL DEADLOCK!!!!! 663 static inline void lock( block_spin_lock & this) with(this) {694 static inline void lock( block_spin_lock & this ) with(this) { 664 695 lock( lock ); 665 696 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 668 699 } 669 700 670 static inline void unlock( block_spin_lock & this) with(this) {701 static inline void unlock( block_spin_lock & this ) with(this) { 671 702 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 672 703 } 673 704 674 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {705 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) { 675 706 // first we acquire internal fast_block_lock 676 707 lock( lock __cfaabi_dbg_ctx2 ); … … 686 717 unpark(t); 687 718 } 688 static inline size_t on_wait( block_spin_lock & this) { unlock(this); return 0; }689 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {719 static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; } 720 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 690 721 // now we acquire the entire block_spin_lock upon waking up 691 722 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 714 745 forall(L & | is_blocking_lock(L)) { 715 746 struct info_thread; 716 717 // // for use by sequence718 // info_thread(L) *& Back( info_thread(L) * this );719 // info_thread(L) *& Next( info_thread(L) * this );720 747 } 721 748
Note: See TracChangeset
for help on using the changeset viewer.