- File:
-
- 1 edited
-
libcfa/src/concurrency/locks.hfa (modified) (36 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
r5a05946 rbd72c284 30 30 #include "time.hfa" 31 31 32 #include "select.hfa"33 34 32 #include <fstream.hfa> 35 33 … … 39 37 #include <unistd.h> 40 38 41 typedef void (*__cfa_pre_park)( void * ); 42 43 static inline void pre_park_noop( void * ) {} 44 45 //----------------------------------------------------------------------------- 46 // is_blocking_lock 47 forall( L & | sized(L) ) 48 trait is_blocking_lock { 49 // For synchronization locks to use when acquiring 50 void on_notify( L &, struct thread$ * ); 51 52 // For synchronization locks to use when releasing 53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum ); 54 55 // to set recursion count after getting signalled; 56 void on_wakeup( L &, size_t recursion ); 57 }; 58 59 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) { 60 pp_fn( pp_datum ); 61 park(); 62 } 63 64 // macros for default routine impls for is_blocking_lock trait that do not wait-morph 65 66 #define DEFAULT_ON_NOTIFY( lock_type ) \ 67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); } 68 69 #define DEFAULT_ON_WAIT( lock_type ) \ 70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 71 unlock( this ); \ 72 pre_park_then_park( pp_fn, pp_datum ); \ 73 return 0; \ 74 } 75 76 // on_wakeup impl if lock should be reacquired after waking up 77 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); } 79 80 // on_wakeup impl if lock will not be reacquired after waking up 81 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 82 static inline void on_wakeup( lock_type & this, size_t recursion ) {} 83 84 39 // C_TODO: cleanup this and locks.cfa 40 // - appropriate separation of interface and impl 41 // - clean up unused/unneeded locks 42 // - change messy big blocking lock from inheritance to composition to remove need for flags 85 43 86 44 //----------------------------------------------------------------------------- … … 109 67 static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 110 68 static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 111 static inline size_t on_wait ( single_acquisition_lock & this , __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum); }69 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 112 70 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 113 71 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 114 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }115 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }116 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }117 72 118 73 //---------- … … 126 81 static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 127 82 static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } 128 static inline size_t on_wait ( owner_lock & this , __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum); }83 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 129 84 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 130 85 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 131 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }132 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }133 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }134 86 135 87 //----------------------------------------------------------------------------- … … 204 156 // - Kernel thd blocking alternative to the spinlock 205 157 // - No ownership (will deadlock on reacq) 206 // - no reacq on wakeup207 158 struct futex_mutex { 208 159 // lock state any state other than UNLOCKED is locked … … 218 169 } 219 170 220 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }221 222 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {171 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; } 172 173 static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) { 223 174 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); 224 175 } 225 176 226 static inline int internal_exchange( futex_mutex & this) with(this) {177 static inline int internal_exchange(futex_mutex & this) with(this) { 227 178 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE); 228 179 } 229 180 230 181 // if this is called recursively IT WILL DEADLOCK!!!!! 231 static inline void lock( futex_mutex & this) with(this) {182 static inline void lock(futex_mutex & this) with(this) { 232 183 int state; 233 184 … … 239 190 for (int i = 0; i < spin; i++) Pause(); 240 191 } 192 193 // // no contention try to acquire 194 // if (internal_try_lock(this, state)) return; 241 195 242 196 // if not in contended state, set to be in contended state … … 258 212 } 259 213 260 DEFAULT_ON_NOTIFY( futex_mutex ) 261 DEFAULT_ON_WAIT( futex_mutex ) 262 DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex ) 214 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;} 216 217 // to set recursion count after getting signalled; 218 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} 263 219 264 220 //----------------------------------------------------------------------------- … … 276 232 int val; 277 233 }; 234 278 235 static inline void ?{}( go_mutex & this ) with(this) { val = 0; } 279 // static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted280 // static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;281 236 282 237 static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) { … … 289 244 290 245 // if this is called recursively IT WILL DEADLOCK!!!!! 291 static inline void lock( go_mutex & this ) with( this) {246 static inline void lock(go_mutex & this) with(this) { 292 247 int state, init_state; 293 248 … … 300 255 while( !val ) { // lock unlocked 301 256 state = 0; 302 if ( internal_try_lock( this, state, init_state )) return;257 if (internal_try_lock(this, state, init_state)) return; 303 258 } 304 259 for (int i = 0; i < 30; i++) Pause(); … … 307 262 while( !val ) { // lock unlocked 308 263 state = 0; 309 if ( internal_try_lock( this, state, init_state )) return;264 if (internal_try_lock(this, state, init_state)) return; 310 265 } 311 266 sched_yield(); 312 267 313 268 // if not in contended state, set to be in contended state 314 state = internal_exchange( this, 2);269 state = internal_exchange(this, 2); 315 270 if ( !state ) return; // state == 0 316 271 init_state = 2; 317 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK272 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK 318 273 } 319 274 } … … 321 276 static inline void unlock( go_mutex & this ) with(this) { 322 277 // if uncontended do atomic unlock and then return 323 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;278 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return; 324 279 325 280 // otherwise threads are blocked so we must wake one 326 futex( (int *)&val, FUTEX_WAKE, 1 ); 327 } 328 329 DEFAULT_ON_NOTIFY( go_mutex ) 330 DEFAULT_ON_WAIT( go_mutex ) 331 DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex ) 281 futex((int *)&val, FUTEX_WAKE, 1); 282 } 283 284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); } 285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;} 286 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 287 288 //----------------------------------------------------------------------------- 289 // CLH Spinlock 290 // - No recursive acquisition 291 // - Needs to be released by owner 292 293 struct clh_lock { 294 volatile bool * volatile tail; 295 volatile bool * volatile head; 296 }; 297 298 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; } 299 static inline void ^?{}( clh_lock & this ) { free(this.tail); } 300 301 static inline void lock(clh_lock & l) { 302 thread$ * curr_thd = active_thread(); 303 *(curr_thd->clh_node) = false; 304 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST); 305 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause(); 306 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST); 307 curr_thd->clh_node = prev; 308 } 309 310 static inline void unlock(clh_lock & l) { 311 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST); 312 } 313 314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); } 315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; } 316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } 332 317 333 318 //----------------------------------------------------------------------------- … … 349 334 this.lock_value = 0; 350 335 } 351 static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;352 static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;353 336 354 337 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 355 338 356 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {339 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) { 357 340 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 358 341 } 359 342 360 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val); }361 362 static inline bool try_lock_contention( exp_backoff_then_block_lock & this) with(this) {363 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE);364 } 365 366 static inline bool block( exp_backoff_then_block_lock & this) with(this) {343 static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); } 344 345 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) { 346 return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE); 347 } 348 349 static inline bool block(exp_backoff_then_block_lock & this) with(this) { 367 350 lock( spinlock __cfaabi_dbg_ctx2 ); 368 351 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { … … 376 359 } 377 360 378 static inline void lock( exp_backoff_then_block_lock & this) with(this) {361 static inline void lock(exp_backoff_then_block_lock & this) with(this) { 379 362 size_t compare_val = 0; 380 363 int spin = 4; … … 395 378 } 396 379 397 static inline void unlock( exp_backoff_then_block_lock & this) with(this) {380 static inline void unlock(exp_backoff_then_block_lock & this) with(this) { 398 381 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 399 382 lock( spinlock __cfaabi_dbg_ctx2 ); … … 403 386 } 404 387 405 DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock ) 406 DEFAULT_ON_WAIT( exp_backoff_then_block_lock ) 407 DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock ) 388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; } 390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); } 408 391 409 392 //----------------------------------------------------------------------------- … … 435 418 436 419 // if this is called recursively IT WILL DEADLOCK!!!!! 437 static inline void lock( fast_block_lock & this) with(this) {420 static inline void lock(fast_block_lock & this) with(this) { 438 421 lock( lock __cfaabi_dbg_ctx2 ); 439 422 if ( held ) { … … 447 430 } 448 431 449 static inline void unlock( fast_block_lock & this) with(this) {432 static inline void unlock(fast_block_lock & this) with(this) { 450 433 lock( lock __cfaabi_dbg_ctx2 ); 451 434 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 456 439 } 457 440 458 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {441 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) { 459 442 lock( lock __cfaabi_dbg_ctx2 ); 460 443 insert_last( blocked_threads, *t ); 461 444 unlock( lock ); 462 445 } 463 DEFAULT_ON_WAIT( fast_block_lock ) 464 DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock ) 446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 465 448 466 449 //----------------------------------------------------------------------------- … … 473 456 struct simple_owner_lock { 474 457 // List of blocked threads 475 dlist( select_node) blocked_threads;458 dlist( thread$ ) blocked_threads; 476 459 477 460 // Spin lock used for mutual exclusion … … 494 477 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 495 478 496 static inline void lock( simple_owner_lock & this) with(this) {497 if ( owner == active_thread()) {479 static inline void lock(simple_owner_lock & this) with(this) { 480 if (owner == active_thread()) { 498 481 recursion_count++; 499 482 return; … … 501 484 lock( lock __cfaabi_dbg_ctx2 ); 502 485 503 if ( owner != 0p ) { 504 select_node node; 505 insert_last( blocked_threads, node ); 486 if (owner != 0p) { 487 insert_last( blocked_threads, *active_thread() ); 506 488 unlock( lock ); 507 489 park( ); … … 513 495 } 514 496 515 static inline void pop_node( simple_owner_lock & this ) with(this) { 516 __handle_waituntil_OR( blocked_threads ); 517 select_node * node = &try_pop_front( blocked_threads ); 518 if ( node ) { 519 owner = node->blocked_thread; 520 recursion_count = 1; 521 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 522 wake_one( blocked_threads, *node ); 523 } else { 524 owner = 0p; 525 recursion_count = 0; 526 } 527 } 528 529 static inline void unlock( simple_owner_lock & this ) with(this) { 497 // TODO: fix duplicate def issue and bring this back 498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 499 // thread$ * t = &try_pop_front( blocked_threads ); 500 // owner = t; 501 // recursion_count = ( t ? 1 : 0 ); 502 // unpark( t ); 503 // } 504 505 static inline void unlock(simple_owner_lock & this) with(this) { 530 506 lock( lock __cfaabi_dbg_ctx2 ); 531 507 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 534 510 recursion_count--; 535 511 if ( recursion_count == 0 ) { 536 pop_node( this ); 512 // pop_and_set_new_owner( this ); 513 thread$ * t = &try_pop_front( blocked_threads ); 514 owner = t; 515 recursion_count = ( t ? 1 : 0 ); 516 unpark( t ); 537 517 } 538 518 unlock( lock ); 539 519 } 540 520 541 static inline void on_notify( simple_owner_lock & this,thread$ * t ) with(this) {521 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) { 542 522 lock( lock __cfaabi_dbg_ctx2 ); 543 523 // lock held 544 524 if ( owner != 0p ) { 545 insert_last( blocked_threads, * (select_node *)t->link_node);525 insert_last( blocked_threads, *t ); 546 526 } 547 527 // lock not held … … 554 534 } 555 535 556 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum) with(this) {536 static inline size_t on_wait(simple_owner_lock & this) with(this) { 557 537 lock( lock __cfaabi_dbg_ctx2 ); 558 538 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 561 541 size_t ret = recursion_count; 562 542 563 pop_node( this ); 564 565 select_node node; 566 active_thread()->link_node = (void *)&node; 543 // pop_and_set_new_owner( this ); 544 545 thread$ * t = &try_pop_front( blocked_threads ); 546 owner = t; 547 recursion_count = ( t ? 1 : 0 ); 548 unpark( t ); 549 567 550 unlock( lock ); 568 569 pre_park_then_park( pp_fn, pp_datum );570 571 551 return ret; 572 552 } 573 553 574 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 575 576 // waituntil() support 577 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) { 578 lock( lock __cfaabi_dbg_ctx2 ); 579 580 // check if we can complete operation. If so race to establish winner in special OR case 581 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 582 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 583 unlock( lock ); 584 return false; 585 } 586 } 587 588 if ( owner == active_thread() ) { 589 recursion_count++; 590 if ( node.park_counter ) __make_select_node_available( node ); 591 unlock( lock ); 592 return true; 593 } 594 595 if ( owner != 0p ) { 596 insert_last( blocked_threads, node ); 597 unlock( lock ); 598 return false; 599 } 600 601 owner = active_thread(); 602 recursion_count = 1; 603 604 if ( node.park_counter ) __make_select_node_available( node ); 605 unlock( lock ); 606 return true; 607 } 608 609 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) { 610 lock( lock __cfaabi_dbg_ctx2 ); 611 if ( node`isListed ) { 612 remove( node ); 613 unlock( lock ); 614 return false; 615 } 616 617 if ( owner == active_thread() ) { 618 recursion_count--; 619 if ( recursion_count == 0 ) { 620 pop_node( this ); 621 } 622 } 623 unlock( lock ); 624 return false; 625 } 626 627 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; } 628 554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 629 555 630 556 //----------------------------------------------------------------------------- … … 652 578 653 579 // if this is called recursively IT WILL DEADLOCK! 654 static inline void lock( spin_queue_lock & this) with(this) {580 static inline void lock(spin_queue_lock & this) with(this) { 655 581 mcs_spin_node node; 656 582 lock( lock, node ); … … 660 586 } 661 587 662 static inline void unlock( spin_queue_lock & this) with(this) {588 static inline void unlock(spin_queue_lock & this) with(this) { 663 589 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 664 590 } 665 591 666 DEFAULT_ON_NOTIFY( spin_queue_lock ) 667 DEFAULT_ON_WAIT( spin_queue_lock ) 668 DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock ) 592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { 593 unpark(t); 594 } 595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } 596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); } 597 669 598 670 599 //----------------------------------------------------------------------------- … … 692 621 693 622 // if this is called recursively IT WILL DEADLOCK!!!!! 694 static inline void lock( mcs_block_spin_lock & this) with(this) {623 static inline void lock(mcs_block_spin_lock & this) with(this) { 695 624 mcs_node node; 696 625 lock( lock, node ); … … 704 633 } 705 634 706 DEFAULT_ON_NOTIFY( mcs_block_spin_lock ) 707 DEFAULT_ON_WAIT( mcs_block_spin_lock ) 708 DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock ) 635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } 636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } 637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); } 709 638 710 639 //----------------------------------------------------------------------------- … … 732 661 733 662 // if this is called recursively IT WILL DEADLOCK!!!!! 734 static inline void lock( block_spin_lock & this) with(this) {663 static inline void lock(block_spin_lock & this) with(this) { 735 664 lock( lock ); 736 665 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 739 668 } 740 669 741 static inline void unlock( block_spin_lock & this) with(this) {670 static inline void unlock(block_spin_lock & this) with(this) { 742 671 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 743 672 } 744 673 745 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {674 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) { 746 675 // first we acquire internal fast_block_lock 747 676 lock( lock __cfaabi_dbg_ctx2 ); … … 757 686 unpark(t); 758 687 } 759 DEFAULT_ON_WAIT( block_spin_lock ) 760 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } 689 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) { 761 690 // now we acquire the entire block_spin_lock upon waking up 762 691 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 764 693 unlock( lock ); // Now we release the internal fast_spin_lock 765 694 } 695 696 //----------------------------------------------------------------------------- 697 // is_blocking_lock 698 forall( L & | sized(L) ) 699 trait is_blocking_lock { 700 // For synchronization locks to use when acquiring 701 void on_notify( L &, struct thread$ * ); 702 703 // For synchronization locks to use when releasing 704 size_t on_wait( L & ); 705 706 // to set recursion count after getting signalled; 707 void on_wakeup( L &, size_t recursion ); 708 }; 766 709 767 710 //----------------------------------------------------------------------------- … … 771 714 forall(L & | is_blocking_lock(L)) { 772 715 struct info_thread; 716 717 // // for use by sequence 718 // info_thread(L) *& Back( info_thread(L) * this ); 719 // info_thread(L) *& Next( info_thread(L) * this ); 773 720 } 774 721
Note:
See TracChangeset
for help on using the changeset viewer.