Changeset 3982384 for libcfa/src/concurrency/locks.hfa
- Timestamp:
- May 17, 2023, 1:35:09 AM (2 years ago)
- Branches:
- ADT, master
- Children:
- f11010e
- Parents:
- 6e4c44d (diff), 8db4708 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
r6e4c44d r3982384 30 30 #include "time.hfa" 31 31 32 #include "select.hfa" 33 32 34 #include <fstream.hfa> 33 35 … … 37 39 #include <unistd.h> 38 40 39 // C_TODO: cleanup this and locks.cfa 40 // - appropriate separation of interface and impl 41 // - clean up unused/unneeded locks 42 // - change messy big blocking lock from inheritance to composition to remove need for flags 41 typedef void (*__cfa_pre_park)( void * ); 42 43 static inline void pre_park_noop( void * ) {} 44 45 //----------------------------------------------------------------------------- 46 // is_blocking_lock 47 forall( L & | sized(L) ) 48 trait is_blocking_lock { 49 // For synchronization locks to use when acquiring 50 void on_notify( L &, struct thread$ * ); 51 52 // For synchronization locks to use when releasing 53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum ); 54 55 // to set recursion count after getting signalled; 56 void on_wakeup( L &, size_t recursion ); 57 }; 58 59 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) { 60 pp_fn( pp_datum ); 61 park(); 62 } 63 64 // macros for default routine impls for is_blocking_lock trait that do not wait-morph 65 66 #define DEFAULT_ON_NOTIFY( lock_type ) \ 67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); } 68 69 #define DEFAULT_ON_WAIT( lock_type ) \ 70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 71 unlock( this ); \ 72 pre_park_then_park( pp_fn, pp_datum ); \ 73 return 0; \ 74 } 75 76 // on_wakeup impl if lock should be reacquired after waking up 77 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); } 79 80 // on_wakeup impl if lock will not be reacquired after waking up 81 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 82 static inline void on_wakeup( lock_type & this, size_t recursion ) {} 83 84 43 85 44 86 //----------------------------------------------------------------------------- … … 67 109 static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 68 110 static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 69 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this); }111 static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 70 112 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 71 113 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 114 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 115 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 116 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 72 117 73 118 //---------- … … 81 126 static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 82 127 static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } 83 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this); }128 static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 84 129 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 85 130 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 131 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 132 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 133 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 86 134 87 135 //----------------------------------------------------------------------------- … … 156 204 // - Kernel thd blocking alternative to the spinlock 157 205 // - No ownership (will deadlock on reacq) 206 // - no reacq on wakeup 158 207 struct futex_mutex { 159 208 // lock state any state other than UNLOCKED is locked … … 169 218 } 170 219 171 static inline void 172 173 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {220 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; } 221 222 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) { 174 223 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); 175 224 } 176 225 177 static inline int internal_exchange( futex_mutex & this) with(this) {226 static inline int internal_exchange( futex_mutex & this ) with(this) { 178 227 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE); 179 228 } 180 229 181 230 // if this is called recursively IT WILL DEADLOCK!!!!! 182 static inline void lock( futex_mutex & this) with(this) {231 static inline void lock( futex_mutex & this ) with(this) { 183 232 int state; 184 233 … … 190 239 for (int i = 0; i < spin; i++) Pause(); 191 240 } 192 193 // // no contention try to acquire194 // if (internal_try_lock(this, state)) return;195 241 196 242 // if not in contended state, set to be in contended state … … 212 258 } 213 259 214 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;} 216 217 // to set recursion count after getting signalled; 218 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} 260 DEFAULT_ON_NOTIFY( futex_mutex ) 261 DEFAULT_ON_WAIT( futex_mutex ) 262 DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex ) 219 263 220 264 //----------------------------------------------------------------------------- … … 232 276 int val; 233 277 }; 234 235 278 static inline void ?{}( go_mutex & this ) with(this) { val = 0; } 279 // static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted 280 // static inline void ?=?( go_mutex & this, go_mutex this2 ) = void; 236 281 237 282 static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) { … … 244 289 245 290 // if this is called recursively IT WILL DEADLOCK!!!!! 246 static inline void lock( go_mutex & this) with(this) {291 static inline void lock( go_mutex & this ) with( this ) { 247 292 int state, init_state; 248 293 … … 255 300 while( !val ) { // lock unlocked 256 301 state = 0; 257 if ( internal_try_lock(this, state, init_state)) return;302 if ( internal_try_lock( this, state, init_state ) ) return; 258 303 } 259 304 for (int i = 0; i < 30; i++) Pause(); … … 262 307 while( !val ) { // lock unlocked 263 308 state = 0; 264 if ( internal_try_lock(this, state, init_state)) return;309 if ( internal_try_lock( this, state, init_state ) ) return; 265 310 } 266 311 sched_yield(); 267 312 268 313 // if not in contended state, set to be in contended state 269 state = internal_exchange( this, 2);314 state = internal_exchange( this, 2 ); 270 315 if ( !state ) return; // state == 0 271 316 init_state = 2; 272 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK317 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 273 318 } 274 319 } … … 276 321 static inline void unlock( go_mutex & this ) with(this) { 277 322 // if uncontended do atomic unlock and then return 278 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;323 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return; 279 324 280 325 // otherwise threads are blocked so we must wake one 281 futex((int *)&val, FUTEX_WAKE, 1); 282 } 283 284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); } 285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;} 286 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 287 288 //----------------------------------------------------------------------------- 289 // CLH Spinlock 290 // - No recursive acquisition 291 // - Needs to be released by owner 292 293 struct clh_lock { 294 volatile bool * volatile tail; 295 volatile bool * volatile head; 296 }; 297 298 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; } 299 static inline void ^?{}( clh_lock & this ) { free(this.tail); } 300 301 static inline void lock(clh_lock & l) { 302 thread$ * curr_thd = active_thread(); 303 *(curr_thd->clh_node) = false; 304 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST); 305 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause(); 306 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST); 307 curr_thd->clh_node = prev; 308 } 309 310 static inline void unlock(clh_lock & l) { 311 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST); 312 } 313 314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); } 315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; } 316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } 326 futex( (int *)&val, FUTEX_WAKE, 1 ); 327 } 328 329 DEFAULT_ON_NOTIFY( go_mutex ) 330 DEFAULT_ON_WAIT( go_mutex ) 331 DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex ) 317 332 318 333 //----------------------------------------------------------------------------- … … 334 349 this.lock_value = 0; 335 350 } 351 static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 352 static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 336 353 337 354 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 338 355 339 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {356 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) { 340 357 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 341 358 } 342 359 343 static inline bool try_lock( exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }344 345 static inline bool try_lock_contention( exp_backoff_then_block_lock & this) with(this) {346 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE);347 } 348 349 static inline bool block( exp_backoff_then_block_lock & this) with(this) {360 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); } 361 362 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) { 363 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE ); 364 } 365 366 static inline bool block( exp_backoff_then_block_lock & this ) with(this) { 350 367 lock( spinlock __cfaabi_dbg_ctx2 ); 351 368 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { … … 359 376 } 360 377 361 static inline void lock( exp_backoff_then_block_lock & this) with(this) {378 static inline void lock( exp_backoff_then_block_lock & this ) with(this) { 362 379 size_t compare_val = 0; 363 380 int spin = 4; … … 378 395 } 379 396 380 static inline void unlock( exp_backoff_then_block_lock & this) with(this) {397 static inline void unlock( exp_backoff_then_block_lock & this ) with(this) { 381 398 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 382 399 lock( spinlock __cfaabi_dbg_ctx2 ); … … 386 403 } 387 404 388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; } 390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); } 405 DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock ) 406 DEFAULT_ON_WAIT( exp_backoff_then_block_lock ) 407 DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock ) 391 408 392 409 //----------------------------------------------------------------------------- … … 418 435 419 436 // if this is called recursively IT WILL DEADLOCK!!!!! 420 static inline void lock( fast_block_lock & this) with(this) {437 static inline void lock( fast_block_lock & this ) with(this) { 421 438 lock( lock __cfaabi_dbg_ctx2 ); 422 439 if ( held ) { … … 430 447 } 431 448 432 static inline void unlock( fast_block_lock & this) with(this) {449 static inline void unlock( fast_block_lock & this ) with(this) { 433 450 lock( lock __cfaabi_dbg_ctx2 ); 434 451 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 439 456 } 440 457 441 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {458 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) { 442 459 lock( lock __cfaabi_dbg_ctx2 ); 443 460 insert_last( blocked_threads, *t ); 444 461 unlock( lock ); 445 462 } 446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 463 DEFAULT_ON_WAIT( fast_block_lock ) 464 DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock ) 448 465 449 466 //----------------------------------------------------------------------------- … … 456 473 struct simple_owner_lock { 457 474 // List of blocked threads 458 dlist( thread$) blocked_threads;475 dlist( select_node ) blocked_threads; 459 476 460 477 // Spin lock used for mutual exclusion … … 477 494 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 478 495 479 static inline void lock( simple_owner_lock & this) with(this) {480 if ( owner == active_thread()) {496 static inline void lock( simple_owner_lock & this ) with(this) { 497 if ( owner == active_thread() ) { 481 498 recursion_count++; 482 499 return; … … 484 501 lock( lock __cfaabi_dbg_ctx2 ); 485 502 486 if (owner != 0p) { 487 insert_last( blocked_threads, *active_thread() ); 503 if ( owner != 0p ) { 504 select_node node; 505 insert_last( blocked_threads, node ); 488 506 unlock( lock ); 489 507 park( ); … … 495 513 } 496 514 497 // TODO: fix duplicate def issue and bring this back 498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 499 // thread$ * t = &try_pop_front( blocked_threads ); 500 // owner = t; 501 // recursion_count = ( t ? 1 : 0 ); 502 // unpark( t ); 503 // } 504 505 static inline void unlock(simple_owner_lock & this) with(this) { 515 static inline void pop_node( simple_owner_lock & this ) with(this) { 516 __handle_waituntil_OR( blocked_threads ); 517 select_node * node = &try_pop_front( blocked_threads ); 518 if ( node ) { 519 owner = node->blocked_thread; 520 recursion_count = 1; 521 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 522 wake_one( blocked_threads, *node ); 523 } else { 524 owner = 0p; 525 recursion_count = 0; 526 } 527 } 528 529 static inline void unlock( simple_owner_lock & this ) with(this) { 506 530 lock( lock __cfaabi_dbg_ctx2 ); 507 531 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 510 534 recursion_count--; 511 535 if ( recursion_count == 0 ) { 512 // pop_and_set_new_owner( this ); 513 thread$ * t = &try_pop_front( blocked_threads ); 514 owner = t; 515 recursion_count = ( t ? 1 : 0 ); 516 unpark( t ); 536 pop_node( this ); 517 537 } 518 538 unlock( lock ); 519 539 } 520 540 521 static inline void on_notify( simple_owner_lock & this, structthread$ * t ) with(this) {541 static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) { 522 542 lock( lock __cfaabi_dbg_ctx2 ); 523 543 // lock held 524 544 if ( owner != 0p ) { 525 insert_last( blocked_threads, * t);545 insert_last( blocked_threads, *(select_node *)t->link_node ); 526 546 } 527 547 // lock not held … … 534 554 } 535 555 536 static inline size_t on_wait( simple_owner_lock & this) with(this) {556 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) { 537 557 lock( lock __cfaabi_dbg_ctx2 ); 538 558 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 541 561 size_t ret = recursion_count; 542 562 543 // pop_and_set_new_owner( this ); 544 545 thread$ * t = &try_pop_front( blocked_threads ); 546 owner = t; 547 recursion_count = ( t ? 1 : 0 ); 548 unpark( t ); 549 563 pop_node( this ); 564 565 select_node node; 566 active_thread()->link_node = (void *)&node; 550 567 unlock( lock ); 568 569 pre_park_then_park( pp_fn, pp_datum ); 570 551 571 return ret; 552 572 } 553 573 554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 574 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 575 576 // waituntil() support 577 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) { 578 lock( lock __cfaabi_dbg_ctx2 ); 579 580 // check if we can complete operation. If so race to establish winner in special OR case 581 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 582 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 583 unlock( lock ); 584 return false; 585 } 586 } 587 588 if ( owner == active_thread() ) { 589 recursion_count++; 590 if ( node.park_counter ) __make_select_node_available( node ); 591 unlock( lock ); 592 return true; 593 } 594 595 if ( owner != 0p ) { 596 insert_last( blocked_threads, node ); 597 unlock( lock ); 598 return false; 599 } 600 601 owner = active_thread(); 602 recursion_count = 1; 603 604 if ( node.park_counter ) __make_select_node_available( node ); 605 unlock( lock ); 606 return true; 607 } 608 609 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) { 610 lock( lock __cfaabi_dbg_ctx2 ); 611 if ( node`isListed ) { 612 remove( node ); 613 unlock( lock ); 614 return false; 615 } 616 617 if ( owner == active_thread() ) { 618 recursion_count--; 619 if ( recursion_count == 0 ) { 620 pop_node( this ); 621 } 622 } 623 unlock( lock ); 624 return false; 625 } 626 627 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; } 628 555 629 556 630 //----------------------------------------------------------------------------- … … 578 652 579 653 // if this is called recursively IT WILL DEADLOCK! 580 static inline void lock( spin_queue_lock & this) with(this) {654 static inline void lock( spin_queue_lock & this ) with(this) { 581 655 mcs_spin_node node; 582 656 lock( lock, node ); … … 586 660 } 587 661 588 static inline void unlock( spin_queue_lock & this) with(this) {662 static inline void unlock( spin_queue_lock & this ) with(this) { 589 663 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 590 664 } 591 665 592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { 593 unpark(t); 594 } 595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } 596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); } 597 666 DEFAULT_ON_NOTIFY( spin_queue_lock ) 667 DEFAULT_ON_WAIT( spin_queue_lock ) 668 DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock ) 598 669 599 670 //----------------------------------------------------------------------------- … … 621 692 622 693 // if this is called recursively IT WILL DEADLOCK!!!!! 623 static inline void lock( mcs_block_spin_lock & this) with(this) {694 static inline void lock( mcs_block_spin_lock & this ) with(this) { 624 695 mcs_node node; 625 696 lock( lock, node ); … … 633 704 } 634 705 635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } 636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } 637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); } 706 DEFAULT_ON_NOTIFY( mcs_block_spin_lock ) 707 DEFAULT_ON_WAIT( mcs_block_spin_lock ) 708 DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock ) 638 709 639 710 //----------------------------------------------------------------------------- … … 661 732 662 733 // if this is called recursively IT WILL DEADLOCK!!!!! 663 static inline void lock( block_spin_lock & this) with(this) {734 static inline void lock( block_spin_lock & this ) with(this) { 664 735 lock( lock ); 665 736 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 668 739 } 669 740 670 static inline void unlock( block_spin_lock & this) with(this) {741 static inline void unlock( block_spin_lock & this ) with(this) { 671 742 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 672 743 } 673 744 674 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {745 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) { 675 746 // first we acquire internal fast_block_lock 676 747 lock( lock __cfaabi_dbg_ctx2 ); … … 686 757 unpark(t); 687 758 } 688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } 689 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {759 DEFAULT_ON_WAIT( block_spin_lock ) 760 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 690 761 // now we acquire the entire block_spin_lock upon waking up 691 762 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 693 764 unlock( lock ); // Now we release the internal fast_spin_lock 694 765 } 695 696 //-----------------------------------------------------------------------------697 // is_blocking_lock698 forall( L & | sized(L) )699 trait is_blocking_lock {700 // For synchronization locks to use when acquiring701 void on_notify( L &, struct thread$ * );702 703 // For synchronization locks to use when releasing704 size_t on_wait( L & );705 706 // to set recursion count after getting signalled;707 void on_wakeup( L &, size_t recursion );708 };709 766 710 767 //----------------------------------------------------------------------------- … … 714 771 forall(L & | is_blocking_lock(L)) { 715 772 struct info_thread; 716 717 // // for use by sequence718 // info_thread(L) *& Back( info_thread(L) * this );719 // info_thread(L) *& Next( info_thread(L) * this );720 773 } 721 774
Note:
See TracChangeset
for help on using the changeset viewer.