- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/locks.hfa
rbd72c284 r5ece8ce 30 30 #include "time.hfa" 31 31 32 #include "select.hfa" 33 32 34 #include <fstream.hfa> 33 35 … … 37 39 #include <unistd.h> 38 40 39 // C_TODO: cleanup this and locks.cfa 40 // - appropriate separation of interface and impl 41 // - clean up unused/unneeded locks 42 // - change messy big blocking lock from inheritance to composition to remove need for flags 41 typedef void (*__cfa_pre_park)( void * ); 42 43 static inline void pre_park_noop( void * ) {} 44 45 //----------------------------------------------------------------------------- 46 // is_blocking_lock 47 forall( L & | sized(L) ) 48 trait is_blocking_lock { 49 // For synchronization locks to use when acquiring 50 void on_notify( L &, struct thread$ * ); 51 52 // For synchronization locks to use when releasing 53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum ); 54 55 // to set recursion count after getting signalled; 56 void on_wakeup( L &, size_t recursion ); 57 }; 58 59 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) { 60 pp_fn( pp_datum ); 61 park(); 62 } 63 64 // macros for default routine impls for is_blocking_lock trait that do not wait-morph 65 66 #define DEFAULT_ON_NOTIFY( lock_type ) \ 67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); } 68 69 #define DEFAULT_ON_WAIT( lock_type ) \ 70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 71 unlock( this ); \ 72 pre_park_then_park( pp_fn, pp_datum ); \ 73 return 0; \ 74 } 75 76 // on_wakeup impl if lock should be reacquired after waking up 77 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); } 79 80 // on_wakeup impl if lock will not be reacquired after waking up 81 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 82 static inline void on_wakeup( lock_type & this, size_t recursion ) {} 83 84 43 85 44 86 //----------------------------------------------------------------------------- … … 67 109 static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 68 110 static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 69 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this); }111 static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 70 112 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 71 113 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 114 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 115 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 116 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 72 117 73 118 //---------- … … 81 126 static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 82 127 static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } 83 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this); }128 static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); } 84 129 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 85 130 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 131 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 132 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 133 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 86 134 87 135 //----------------------------------------------------------------------------- … … 128 176 static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; } 129 177 130 static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {131 return node->next;132 }133 134 178 struct mcs_spin_lock { 135 179 mcs_spin_queue queue; … … 137 181 138 182 static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) { 183 n.locked = true; 139 184 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST); 140 n.locked = true; 141 if(prev == 0p) return; 185 if( prev == 0p ) return; 142 186 prev->next = &n; 143 while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();187 while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause(); 144 188 } 145 189 … … 147 191 mcs_spin_node * n_ptr = &n; 148 192 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return; 149 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}193 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause(); 150 194 n.next->locked = false; 151 195 } … … 156 200 // - Kernel thd blocking alternative to the spinlock 157 201 // - No ownership (will deadlock on reacq) 202 // - no reacq on wakeup 158 203 struct futex_mutex { 159 204 // lock state any state other than UNLOCKED is locked … … 169 214 } 170 215 171 static inline void 172 173 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {216 static inline void ?{}( futex_mutex & this ) with(this) { val = 0; } 217 218 static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) { 174 219 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); 175 220 } 176 221 177 static inline int internal_exchange( futex_mutex & this) with(this) {222 static inline int internal_exchange( futex_mutex & this ) with(this) { 178 223 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE); 179 224 } 180 225 181 226 // if this is called recursively IT WILL DEADLOCK!!!!! 182 static inline void lock( futex_mutex & this) with(this) {227 static inline void lock( futex_mutex & this ) with(this) { 183 228 int state; 184 229 … … 190 235 for (int i = 0; i < spin; i++) Pause(); 191 236 } 192 193 // // no contention try to acquire194 // if (internal_try_lock(this, state)) return;195 237 196 238 // if not in contended state, set to be in contended state … … 212 254 } 213 255 214 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 215 static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;} 216 217 // to set recursion count after getting signalled; 218 static inline void on_wakeup( futex_mutex & f, size_t recursion ) {} 256 DEFAULT_ON_NOTIFY( futex_mutex ) 257 DEFAULT_ON_WAIT( futex_mutex ) 258 DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex ) 219 259 220 260 //----------------------------------------------------------------------------- … … 232 272 int val; 233 273 }; 234 235 274 static inline void ?{}( go_mutex & this ) with(this) { val = 0; } 275 // static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted 276 // static inline void ?=?( go_mutex & this, go_mutex this2 ) = void; 236 277 237 278 static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) { … … 244 285 245 286 // if this is called recursively IT WILL DEADLOCK!!!!! 246 static inline void lock( go_mutex & this) with(this) {287 static inline void lock( go_mutex & this ) with( this ) { 247 288 int state, init_state; 248 289 … … 255 296 while( !val ) { // lock unlocked 256 297 state = 0; 257 if ( internal_try_lock(this, state, init_state)) return;298 if ( internal_try_lock( this, state, init_state ) ) return; 258 299 } 259 300 for (int i = 0; i < 30; i++) Pause(); … … 262 303 while( !val ) { // lock unlocked 263 304 state = 0; 264 if ( internal_try_lock(this, state, init_state)) return;305 if ( internal_try_lock( this, state, init_state ) ) return; 265 306 } 266 307 sched_yield(); 267 308 268 309 // if not in contended state, set to be in contended state 269 state = internal_exchange( this, 2);310 state = internal_exchange( this, 2 ); 270 311 if ( !state ) return; // state == 0 271 312 init_state = 2; 272 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK313 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 273 314 } 274 315 } … … 276 317 static inline void unlock( go_mutex & this ) with(this) { 277 318 // if uncontended do atomic unlock and then return 278 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;319 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return; 279 320 280 321 // otherwise threads are blocked so we must wake one 281 futex((int *)&val, FUTEX_WAKE, 1); 282 } 283 284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); } 285 static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;} 286 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 287 288 //----------------------------------------------------------------------------- 289 // CLH Spinlock 290 // - No recursive acquisition 291 // - Needs to be released by owner 292 293 struct clh_lock { 294 volatile bool * volatile tail; 295 volatile bool * volatile head; 296 }; 297 298 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; } 299 static inline void ^?{}( clh_lock & this ) { free(this.tail); } 300 301 static inline void lock(clh_lock & l) { 302 thread$ * curr_thd = active_thread(); 303 *(curr_thd->clh_node) = false; 304 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST); 305 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause(); 306 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST); 307 curr_thd->clh_node = prev; 308 } 309 310 static inline void unlock(clh_lock & l) { 311 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST); 312 } 313 314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); } 315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; } 316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } 322 futex( (int *)&val, FUTEX_WAKE, 1 ); 323 } 324 325 DEFAULT_ON_NOTIFY( go_mutex ) 326 DEFAULT_ON_WAIT( go_mutex ) 327 DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex ) 317 328 318 329 //----------------------------------------------------------------------------- … … 334 345 this.lock_value = 0; 335 346 } 347 static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 348 static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void; 336 349 337 350 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 338 351 339 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {352 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) { 340 353 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 341 354 } 342 355 343 static inline bool try_lock( exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }344 345 static inline bool try_lock_contention( exp_backoff_then_block_lock & this) with(this) {346 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE);347 } 348 349 static inline bool block( exp_backoff_then_block_lock & this) with(this) {356 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); } 357 358 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) { 359 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE ); 360 } 361 362 static inline bool block( exp_backoff_then_block_lock & this ) with(this) { 350 363 lock( spinlock __cfaabi_dbg_ctx2 ); 351 364 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { … … 359 372 } 360 373 361 static inline void lock( exp_backoff_then_block_lock & this) with(this) {374 static inline void lock( exp_backoff_then_block_lock & this ) with(this) { 362 375 size_t compare_val = 0; 363 376 int spin = 4; … … 378 391 } 379 392 380 static inline void unlock( exp_backoff_then_block_lock & this) with(this) {393 static inline void unlock( exp_backoff_then_block_lock & this ) with(this) { 381 394 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 382 395 lock( spinlock __cfaabi_dbg_ctx2 ); … … 386 399 } 387 400 388 static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 389 static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; } 390 static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); } 401 DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock ) 402 DEFAULT_ON_WAIT( exp_backoff_then_block_lock ) 403 DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock ) 391 404 392 405 //----------------------------------------------------------------------------- … … 418 431 419 432 // if this is called recursively IT WILL DEADLOCK!!!!! 420 static inline void lock( fast_block_lock & this) with(this) {433 static inline void lock( fast_block_lock & this ) with(this) { 421 434 lock( lock __cfaabi_dbg_ctx2 ); 422 435 if ( held ) { … … 430 443 } 431 444 432 static inline void unlock( fast_block_lock & this) with(this) {445 static inline void unlock( fast_block_lock & this ) with(this) { 433 446 lock( lock __cfaabi_dbg_ctx2 ); 434 447 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 439 452 } 440 453 441 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {454 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) { 442 455 lock( lock __cfaabi_dbg_ctx2 ); 443 456 insert_last( blocked_threads, *t ); 444 457 unlock( lock ); 445 458 } 446 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 447 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 459 DEFAULT_ON_WAIT( fast_block_lock ) 460 DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock ) 448 461 449 462 //----------------------------------------------------------------------------- … … 456 469 struct simple_owner_lock { 457 470 // List of blocked threads 458 dlist( thread$) blocked_threads;471 dlist( select_node ) blocked_threads; 459 472 460 473 // Spin lock used for mutual exclusion … … 477 490 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 478 491 479 static inline void lock( simple_owner_lock & this) with(this) {480 if ( owner == active_thread()) {492 static inline void lock( simple_owner_lock & this ) with(this) { 493 if ( owner == active_thread() ) { 481 494 recursion_count++; 482 495 return; … … 484 497 lock( lock __cfaabi_dbg_ctx2 ); 485 498 486 if (owner != 0p) { 487 insert_last( blocked_threads, *active_thread() ); 499 if ( owner != 0p ) { 500 select_node node; 501 insert_last( blocked_threads, node ); 488 502 unlock( lock ); 489 503 park( ); … … 495 509 } 496 510 497 // TODO: fix duplicate def issue and bring this back 498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 499 // thread$ * t = &try_pop_front( blocked_threads ); 500 // owner = t; 501 // recursion_count = ( t ? 1 : 0 ); 502 // unpark( t ); 503 // } 504 505 static inline void unlock(simple_owner_lock & this) with(this) { 511 static inline void pop_node( simple_owner_lock & this ) with(this) { 512 __handle_waituntil_OR( blocked_threads ); 513 select_node * node = &try_pop_front( blocked_threads ); 514 if ( node ) { 515 owner = node->blocked_thread; 516 recursion_count = 1; 517 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 518 wake_one( blocked_threads, *node ); 519 } else { 520 owner = 0p; 521 recursion_count = 0; 522 } 523 } 524 525 static inline void unlock( simple_owner_lock & this ) with(this) { 506 526 lock( lock __cfaabi_dbg_ctx2 ); 507 527 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 510 530 recursion_count--; 511 531 if ( recursion_count == 0 ) { 512 // pop_and_set_new_owner( this ); 513 thread$ * t = &try_pop_front( blocked_threads ); 514 owner = t; 515 recursion_count = ( t ? 1 : 0 ); 516 unpark( t ); 532 pop_node( this ); 517 533 } 518 534 unlock( lock ); 519 535 } 520 536 521 static inline void on_notify( simple_owner_lock & this, structthread$ * t ) with(this) {537 static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) { 522 538 lock( lock __cfaabi_dbg_ctx2 ); 523 539 // lock held 524 540 if ( owner != 0p ) { 525 insert_last( blocked_threads, * t);541 insert_last( blocked_threads, *(select_node *)t->link_node ); 526 542 } 527 543 // lock not held … … 534 550 } 535 551 536 static inline size_t on_wait( simple_owner_lock & this) with(this) {552 static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) { 537 553 lock( lock __cfaabi_dbg_ctx2 ); 538 554 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 541 557 size_t ret = recursion_count; 542 558 543 // pop_and_set_new_owner( this ); 544 545 thread$ * t = &try_pop_front( blocked_threads ); 546 owner = t; 547 recursion_count = ( t ? 1 : 0 ); 548 unpark( t ); 549 559 pop_node( this ); 560 561 select_node node; 562 active_thread()->link_node = (void *)&node; 550 563 unlock( lock ); 564 565 pre_park_then_park( pp_fn, pp_datum ); 566 551 567 return ret; 552 568 } 553 569 554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 570 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 571 572 // waituntil() support 573 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) { 574 lock( lock __cfaabi_dbg_ctx2 ); 575 576 // check if we can complete operation. If so race to establish winner in special OR case 577 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 578 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 579 unlock( lock ); 580 return false; 581 } 582 } 583 584 if ( owner == active_thread() ) { 585 recursion_count++; 586 if ( node.park_counter ) __make_select_node_available( node ); 587 unlock( lock ); 588 return true; 589 } 590 591 if ( owner != 0p ) { 592 insert_last( blocked_threads, node ); 593 unlock( lock ); 594 return false; 595 } 596 597 owner = active_thread(); 598 recursion_count = 1; 599 600 if ( node.park_counter ) __make_select_node_available( node ); 601 unlock( lock ); 602 return true; 603 } 604 605 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) { 606 lock( lock __cfaabi_dbg_ctx2 ); 607 if ( node`isListed ) { 608 remove( node ); 609 unlock( lock ); 610 return false; 611 } 612 613 if ( owner == active_thread() ) { 614 recursion_count--; 615 if ( recursion_count == 0 ) { 616 pop_node( this ); 617 } 618 } 619 unlock( lock ); 620 return false; 621 } 622 623 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; } 624 555 625 556 626 //----------------------------------------------------------------------------- … … 578 648 579 649 // if this is called recursively IT WILL DEADLOCK! 580 static inline void lock( spin_queue_lock & this) with(this) {650 static inline void lock( spin_queue_lock & this ) with(this) { 581 651 mcs_spin_node node; 582 652 lock( lock, node ); … … 586 656 } 587 657 588 static inline void unlock( spin_queue_lock & this) with(this) {658 static inline void unlock( spin_queue_lock & this ) with(this) { 589 659 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 590 660 } 591 661 592 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { 593 unpark(t); 594 } 595 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; } 596 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); } 597 662 DEFAULT_ON_NOTIFY( spin_queue_lock ) 663 DEFAULT_ON_WAIT( spin_queue_lock ) 664 DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock ) 598 665 599 666 //----------------------------------------------------------------------------- … … 621 688 622 689 // if this is called recursively IT WILL DEADLOCK!!!!! 623 static inline void lock( mcs_block_spin_lock & this) with(this) {690 static inline void lock( mcs_block_spin_lock & this ) with(this) { 624 691 mcs_node node; 625 692 lock( lock, node ); … … 633 700 } 634 701 635 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); } 636 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; } 637 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); } 702 DEFAULT_ON_NOTIFY( mcs_block_spin_lock ) 703 DEFAULT_ON_WAIT( mcs_block_spin_lock ) 704 DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock ) 638 705 639 706 //----------------------------------------------------------------------------- … … 661 728 662 729 // if this is called recursively IT WILL DEADLOCK!!!!! 663 static inline void lock( block_spin_lock & this) with(this) {730 static inline void lock( block_spin_lock & this ) with(this) { 664 731 lock( lock ); 665 732 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 668 735 } 669 736 670 static inline void unlock( block_spin_lock & this) with(this) {737 static inline void unlock( block_spin_lock & this ) with(this) { 671 738 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 672 739 } 673 740 674 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {741 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) { 675 742 // first we acquire internal fast_block_lock 676 743 lock( lock __cfaabi_dbg_ctx2 ); … … 686 753 unpark(t); 687 754 } 688 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; } 689 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {755 DEFAULT_ON_WAIT( block_spin_lock ) 756 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 690 757 // now we acquire the entire block_spin_lock upon waking up 691 758 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 693 760 unlock( lock ); // Now we release the internal fast_spin_lock 694 761 } 695 696 //-----------------------------------------------------------------------------697 // is_blocking_lock698 forall( L & | sized(L) )699 trait is_blocking_lock {700 // For synchronization locks to use when acquiring701 void on_notify( L &, struct thread$ * );702 703 // For synchronization locks to use when releasing704 size_t on_wait( L & );705 706 // to set recursion count after getting signalled;707 void on_wakeup( L &, size_t recursion );708 };709 762 710 763 //----------------------------------------------------------------------------- … … 714 767 forall(L & | is_blocking_lock(L)) { 715 768 struct info_thread; 716 717 // // for use by sequence718 // info_thread(L) *& Back( info_thread(L) * this );719 // info_thread(L) *& Next( info_thread(L) * this );720 769 } 721 770
Note:
See TracChangeset
for help on using the changeset viewer.