Changes in / [8fd1b7c:2d0f918]
- Location:
- libcfa/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/weakso_locks.cfa
r8fd1b7c r2d0f918 25 25 void unlock( blocking_lock & ) {} 26 26 void on_notify( blocking_lock &, struct thread$ * ) {} 27 size_t on_wait( blocking_lock & , void (*pp_fn)( void * ), void * pp_datum) { return 0; }27 size_t on_wait( blocking_lock & ) { return 0; } 28 28 void on_wakeup( blocking_lock &, size_t ) {} 29 29 size_t wait_count( blocking_lock & ) { return 0; } -
libcfa/src/bits/weakso_locks.hfa
r8fd1b7c r2d0f918 57 57 void unlock( blocking_lock & this ) OPTIONAL_THREAD; 58 58 void on_notify( blocking_lock & this, struct thread$ * t ) OPTIONAL_THREAD; 59 size_t on_wait( blocking_lock & this , void (*pp_fn)( void * ), void * pp_datum) OPTIONAL_THREAD;59 size_t on_wait( blocking_lock & this ) OPTIONAL_THREAD; 60 60 void on_wakeup( blocking_lock & this, size_t ) OPTIONAL_THREAD; 61 61 size_t wait_count( blocking_lock & this ) OPTIONAL_THREAD; … … 75 75 static inline bool try_lock ( multiple_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 76 76 static inline void unlock ( multiple_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 77 static inline size_t on_wait ( multiple_acquisition_lock & this , void (*pp_fn)( void * ), void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum); }77 static inline size_t on_wait ( multiple_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 78 78 static inline void on_wakeup( multiple_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 79 79 static inline void on_notify( multiple_acquisition_lock & this, struct thread$ * t ){ on_notify( (blocking_lock &)this, t ); } -
libcfa/src/concurrency/locks.cfa
r8fd1b7c r2d0f918 171 171 } 172 172 173 size_t on_wait( blocking_lock & this , __cfa_pre_park pp_fn, void * pp_datum) with( this ) {173 size_t on_wait( blocking_lock & this ) with( this ) { 174 174 lock( lock __cfaabi_dbg_ctx2 ); 175 175 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 184 184 unlock( lock ); 185 185 186 p re_park_then_park( pp_fn, pp_datum);186 park(); 187 187 188 188 return ret; … … 396 396 } 397 397 398 static size_t block_and_get_recursion( info_thread(L) & i , __cfa_pre_park pp_fn, void * pp_datum) {398 static size_t block_and_get_recursion( info_thread(L) & i ) { 399 399 size_t recursion_count = 0; 400 400 if ( i.lock ) { 401 401 // if lock was passed get recursion count to reset to after waking thread 402 recursion_count = on_wait( *i.lock , pp_fn, pp_datum); // this call blocks403 } else p re_park_then_park( pp_fn, pp_datum);402 recursion_count = on_wait( *i.lock ); // this call blocks 403 } else park( ); 404 404 return recursion_count; 405 405 } 406 static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); }407 406 408 407 // helper for wait()'s' with no timeout … … 425 424 queue_info_thread( this, i ); 426 425 427 static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); }428 429 426 // helper for wait()'s' with a timeout 430 427 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { … … 436 433 437 434 // registers alarm outside cond lock to avoid deadlock 438 //register_self( &node_wrap.alarm_node );435 register_self( &node_wrap.alarm_node ); 439 436 440 437 // blocks here 441 size_t recursion_count = block_and_get_recursion( info , cond_alarm_register, (void *)(&node_wrap.alarm_node));438 size_t recursion_count = block_and_get_recursion( info ); 442 439 // park(); 443 440 … … 506 503 info_thread( L ) i = { active_thread(), info, &l }; 507 504 insert_last( blocked_threads, i ); 508 size_t recursion_count = on_wait( *i.lock , pre_park_noop, 0p); // blocks here505 size_t recursion_count = on_wait( *i.lock ); // blocks here 509 506 // park( ); 510 507 on_wakeup(*i.lock, recursion_count); … … 555 552 // return recursion_count; 556 553 // } 554 557 555 558 556 static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { … … 564 562 565 563 // registers alarm outside cond lock to avoid deadlock 566 // register_self( &node_wrap.alarm_node ); // C_TODO: fix race: registers itself and then alarm handler calls on_notify before block_and_get_recursion is run564 register_self( &node_wrap.alarm_node ); 567 565 568 566 // blocks here 569 size_t recursion_count = block_and_get_recursion( info , cond_alarm_register, (void *)(&node_wrap.alarm_node));567 size_t recursion_count = block_and_get_recursion( info ); 570 568 // park(); 571 569 -
libcfa/src/concurrency/locks.hfa
r8fd1b7c r2d0f918 44 44 // - change messy big blocking lock from inheritance to composition to remove need for flags 45 45 46 typedef void (*__cfa_pre_park)( void * );47 48 static inline void pre_park_noop( void * ) {}49 50 //-----------------------------------------------------------------------------51 // is_blocking_lock52 forall( L & | sized(L) )53 trait is_blocking_lock {54 // For synchronization locks to use when acquiring55 void on_notify( L &, struct thread$ * );56 57 // For synchronization locks to use when releasing58 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );59 60 // to set recursion count after getting signalled;61 void on_wakeup( L &, size_t recursion );62 };63 64 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {65 pp_fn( pp_datum );66 park();67 }68 69 46 //----------------------------------------------------------------------------- 70 47 // Semaphore … … 92 69 static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 93 70 static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 94 static inline size_t on_wait ( single_acquisition_lock & this , __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum); }71 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 95 72 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 96 73 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } … … 109 86 static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 110 87 static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } 111 static inline size_t on_wait ( owner_lock & this , __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum); }88 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 112 89 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 113 90 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } … … 241 218 242 219 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 243 static inline size_t on_wait( futex_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) { 244 unlock( this ); 245 pre_park_then_park( pp_fn, pp_datum ); 246 return 0; 247 } 220 static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; } 248 221 249 222 // to set recursion count after getting signalled; … … 315 288 316 289 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); } 317 static inline size_t on_wait( go_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) { 318 unlock( this ); 319 pre_park_then_park( pp_fn, pp_datum ); 320 return 0; 321 } 290 static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; } 322 291 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 323 292 … … 393 362 394 363 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); } 395 static inline size_t on_wait( exp_backoff_then_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 396 unlock( this ); 397 pre_park_then_park( pp_fn, pp_datum ); 398 return 0; 399 } 364 static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; } 400 365 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); } 401 366 … … 454 419 unlock( lock ); 455 420 } 456 static inline size_t on_wait( fast_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 457 unlock( this ); 458 pre_park_then_park( pp_fn, pp_datum ); 459 return 0; 460 } 421 static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; } 461 422 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { } 462 423 … … 536 497 } 537 498 538 static inline void on_notify( 499 static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) { 539 500 lock( lock __cfaabi_dbg_ctx2 ); 540 501 // lock held … … 551 512 } 552 513 553 static inline size_t on_wait( simple_owner_lock & this , __cfa_pre_park pp_fn, void * pp_datum) with(this) {514 static inline size_t on_wait( simple_owner_lock & this ) with(this) { 554 515 lock( lock __cfaabi_dbg_ctx2 ); 555 516 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 563 524 active_thread()->link_node = (void *)&node; 564 525 unlock( lock ); 565 566 pre_park_then_park( pp_fn, pp_datum ); 526 park(); 567 527 568 528 return ret; … … 664 624 unpark(t); 665 625 } 666 static inline size_t on_wait( spin_queue_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 667 unlock( this ); 668 pre_park_then_park( pp_fn, pp_datum ); 669 return 0; 670 } 626 static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; } 671 627 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); } 672 628 … … 709 665 710 666 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); } 711 static inline size_t on_wait( mcs_block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 712 unlock( this ); 713 pre_park_then_park( pp_fn, pp_datum ); 714 return 0; 715 } 667 static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; } 716 668 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); } 717 669 … … 765 717 unpark(t); 766 718 } 767 static inline size_t on_wait( block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { 768 unlock( this ); 769 pre_park_then_park( pp_fn, pp_datum ); 770 return 0; 771 } 719 static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; } 772 720 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 773 721 // now we acquire the entire block_spin_lock upon waking up … … 776 724 unlock( lock ); // Now we release the internal fast_spin_lock 777 725 } 726 727 //----------------------------------------------------------------------------- 728 // is_blocking_lock 729 forall( L & | sized(L) ) 730 trait is_blocking_lock { 731 // For synchronization locks to use when acquiring 732 void on_notify( L &, struct thread$ * ); 733 734 // For synchronization locks to use when releasing 735 size_t on_wait( L & ); 736 737 // to set recursion count after getting signalled; 738 void on_wakeup( L &, size_t recursion ); 739 }; 778 740 779 741 //-----------------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.