Changeset 74ec742 for libcfa/src/concurrency/monitor.cfa
- Timestamp:
- May 20, 2022, 10:36:45 AM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 25fa20a
- Parents:
- 29d8c02 (diff), 7831e8fb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/monitor.cfa
r29d8c02 r74ec742 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.