Changeset ac2b598 for libcfa/src/concurrency/monitor.cfa
- Timestamp:
- Feb 24, 2020, 2:21:03 PM (3 years ago)
- Branches:
- arm-eh, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 959f6ad
- Parents:
- 0f2c555
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/monitor.cfa
r0f2c555 rac2b598 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // monitor_desc.c --7 // $monitor.c -- 8 8 // 9 9 // Author : Thierry Delisle … … 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void __set_owner ( monitor_desc * this, thread_desc* owner );30 static inline void __set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc* owner );31 static inline void set_mask ( monitor_desc* storage [], __lock_size_t count, const __waitfor_mask_t & mask );32 static inline void reset_mask( monitor_desc* this );33 34 static inline thread_desc * next_thread( monitor_desc* this );35 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & monitors );29 static inline void __set_owner ( $monitor * this, $thread * owner ); 30 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner ); 31 static inline void set_mask ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 static inline void reset_mask( $monitor * this ); 33 34 static inline $thread * next_thread( $monitor * this ); 35 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors ); 36 36 37 37 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 38 static inline void lock_all ( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );38 static inline void lock_all ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 39 39 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 40 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count );41 42 static inline void save ( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );43 static inline void restore( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );44 45 static inline void init ( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );46 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 48 static inline thread_desc* check_condition ( __condition_criterion_t * );40 static inline void unlock_all( $monitor * locks [], __lock_size_t count ); 41 42 static inline void save ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 43 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 44 45 static inline void init ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 46 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 48 static inline $thread * check_condition ( __condition_criterion_t * ); 49 49 static inline void brand_condition ( condition & ); 50 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc* monitors [], __lock_size_t count );50 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count ); 51 51 52 52 forall(dtype T | sized( T )) 53 53 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 54 54 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 55 static inline __lock_size_t aggregate ( monitor_desc* storage [], const __waitfor_mask_t & mask );55 static inline __lock_size_t aggregate ( $monitor * storage [], const __waitfor_mask_t & mask ); 56 56 57 57 //----------------------------------------------------------------------------- … … 68 68 69 69 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 70 monitor_desc** monitors = mons; /* Save the targeted monitors */ \70 $monitor ** monitors = mons; /* Save the targeted monitors */ \ 71 71 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 72 72 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 81 81 // Enter/Leave routines 82 82 // Enter single monitor 83 static void __enter( monitor_desc* this, const __monitor_group_t & group ) {83 static void __enter( $monitor * this, const __monitor_group_t & group ) { 84 84 // Lock the monitor spinlock 85 85 lock( this->lock __cfaabi_dbg_ctx2 ); 86 86 // Interrupts disable inside critical section 87 thread_desc* thrd = kernelTLS.this_thread;87 $thread * thrd = kernelTLS.this_thread; 88 88 89 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 137 137 } 138 138 139 static void __dtor_enter( monitor_desc* this, fptr_t func ) {139 static void __dtor_enter( $monitor * this, fptr_t func ) { 140 140 // Lock the monitor spinlock 141 141 lock( this->lock __cfaabi_dbg_ctx2 ); 142 142 // Interrupts disable inside critical section 143 thread_desc* thrd = kernelTLS.this_thread;143 $thread * thrd = kernelTLS.this_thread; 144 144 145 145 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 164 164 165 165 __lock_size_t count = 1; 166 monitor_desc** monitors = &this;166 $monitor ** monitors = &this; 167 167 __monitor_group_t group = { &this, 1, func }; 168 168 if( is_accepted( this, group) ) { … … 216 216 217 217 // Leave single monitor 218 void __leave( monitor_desc* this ) {218 void __leave( $monitor * this ) { 219 219 // Lock the monitor spinlock 220 220 lock( this->lock __cfaabi_dbg_ctx2 ); … … 236 236 237 237 // Get the next thread, will be null on low contention monitor 238 thread_desc* new_owner = next_thread( this );238 $thread * new_owner = next_thread( this ); 239 239 240 240 // Check the new owner is consistent with who we wake-up … … 251 251 252 252 // Leave single monitor for the last time 253 void __dtor_leave( monitor_desc* this ) {253 void __dtor_leave( $monitor * this ) { 254 254 __cfaabi_dbg_debug_do( 255 255 if( TL_GET( this_thread ) != this->owner ) { … … 267 267 // Should never return 268 268 void __cfactx_thrd_leave() { 269 thread_desc* thrd = TL_GET( this_thread );270 monitor_desc* this = &thrd->self_mon;269 $thread * thrd = TL_GET( this_thread ); 270 $monitor * this = &thrd->self_mon; 271 271 272 272 // Lock the monitor now … … 287 287 288 288 // Fetch the next thread, can be null 289 thread_desc* new_owner = next_thread( this );289 $thread * new_owner = next_thread( this ); 290 290 291 291 // Release the monitor lock … … 317 317 // Leave multiple monitor 318 318 // relies on the monitor array being sorted 319 static inline void leave( monitor_desc* monitors [], __lock_size_t count) {319 static inline void leave($monitor * monitors [], __lock_size_t count) { 320 320 for( __lock_size_t i = count - 1; i >= 0; i--) { 321 321 __leave( monitors[i] ); … … 325 325 // Ctor for monitor guard 326 326 // Sorts monitors before entering 327 void ?{}( monitor_guard_t & this, monitor_desc* m [], __lock_size_t count, fptr_t func ) {328 thread_desc* thrd = TL_GET( this_thread );327 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) { 328 $thread * thrd = TL_GET( this_thread ); 329 329 330 330 // Store current array … … 366 366 // Ctor for monitor guard 367 367 // Sorts monitors before entering 368 void ?{}( monitor_dtor_guard_t & this, monitor_desc* m [], fptr_t func ) {368 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) { 369 369 // optimization 370 thread_desc* thrd = TL_GET( this_thread );370 $thread * thrd = TL_GET( this_thread ); 371 371 372 372 // Store current array … … 393 393 //----------------------------------------------------------------------------- 394 394 // Internal scheduling types 395 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info ) {395 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 396 396 this.waiting_thread = waiting_thread; 397 397 this.count = count; … … 407 407 } 408 408 409 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t & owner ) {409 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) { 410 410 this.ready = false; 411 411 this.target = target; … … 441 441 // Find the next thread(s) to run 442 442 __lock_size_t thread_count = 0; 443 thread_desc* threads[ count ];443 $thread * threads[ count ]; 444 444 __builtin_memset( threads, 0, sizeof( threads ) ); 445 445 … … 449 449 // Remove any duplicate threads 450 450 for( __lock_size_t i = 0; i < count; i++) { 451 thread_desc* new_owner = next_thread( monitors[i] );451 $thread * new_owner = next_thread( monitors[i] ); 452 452 insert_unique( threads, thread_count, new_owner ); 453 453 } … … 479 479 //Some more checking in debug 480 480 __cfaabi_dbg_debug_do( 481 thread_desc* this_thrd = TL_GET( this_thread );481 $thread * this_thrd = TL_GET( this_thread ); 482 482 if ( this.monitor_count != this_thrd->monitors.size ) { 483 483 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 533 533 534 534 //Find the thread to run 535 thread_desc* signallee = pop_head( this.blocked )->waiting_thread;535 $thread * signallee = pop_head( this.blocked )->waiting_thread; 536 536 /* paranoid */ verify( signallee->next == 0p ); 537 537 __set_owner( monitors, count, signallee ); … … 587 587 // Create one! 588 588 __lock_size_t max = count_max( mask ); 589 monitor_desc* mon_storage[max];589 $monitor * mon_storage[max]; 590 590 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 591 591 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 605 605 { 606 606 // Check if the entry queue 607 thread_desc* next; int index;607 $thread * next; int index; 608 608 [next, index] = search_entry_queue( mask, monitors, count ); 609 609 … … 615 615 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 616 616 617 monitor_desc* mon2dtor = accepted[0];617 $monitor * mon2dtor = accepted[0]; 618 618 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 619 619 … … 709 709 // Utilities 710 710 711 static inline void __set_owner( monitor_desc * this, thread_desc* owner ) {711 static inline void __set_owner( $monitor * this, $thread * owner ) { 712 712 /* paranoid */ verify( this->lock.lock ); 713 713 … … 719 719 } 720 720 721 static inline void __set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc* owner ) {721 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) { 722 722 /* paranoid */ verify ( monitors[0]->lock.lock ); 723 723 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); … … 732 732 } 733 733 734 static inline void set_mask( monitor_desc* storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {734 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 735 735 for( __lock_size_t i = 0; i < count; i++) { 736 736 storage[i]->mask = mask; … … 738 738 } 739 739 740 static inline void reset_mask( monitor_desc* this ) {740 static inline void reset_mask( $monitor * this ) { 741 741 this->mask.accepted = 0p; 742 742 this->mask.data = 0p; … … 744 744 } 745 745 746 static inline thread_desc * next_thread( monitor_desc* this ) {746 static inline $thread * next_thread( $monitor * this ) { 747 747 //Check the signaller stack 748 748 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 760 760 // No signaller thread 761 761 // Get the next thread in the entry_queue 762 thread_desc* new_owner = pop_head( this->entry_queue );762 $thread * new_owner = pop_head( this->entry_queue ); 763 763 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 764 764 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); … … 768 768 } 769 769 770 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & group ) {770 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) { 771 771 __acceptable_t * it = this->mask.data; // Optim 772 772 __lock_size_t count = this->mask.size; … … 790 790 } 791 791 792 static inline void init( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {792 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 793 793 for( __lock_size_t i = 0; i < count; i++) { 794 794 (criteria[i]){ monitors[i], waiter }; … … 798 798 } 799 799 800 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {800 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 801 801 for( __lock_size_t i = 0; i < count; i++) { 802 802 (criteria[i]){ monitors[i], waiter }; … … 814 814 } 815 815 816 static inline void lock_all( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {816 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 817 817 for( __lock_size_t i = 0; i < count; i++ ) { 818 818 __spinlock_t * l = &source[i]->lock; … … 828 828 } 829 829 830 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count ) {830 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) { 831 831 for( __lock_size_t i = 0; i < count; i++ ) { 832 832 unlock( locks[i]->lock ); … … 835 835 836 836 static inline void save( 837 monitor_desc* ctx [],837 $monitor * ctx [], 838 838 __lock_size_t count, 839 839 __attribute((unused)) __spinlock_t * locks [], … … 848 848 849 849 static inline void restore( 850 monitor_desc* ctx [],850 $monitor * ctx [], 851 851 __lock_size_t count, 852 852 __spinlock_t * locks [], … … 866 866 // 2 - Checks if all the monitors are ready to run 867 867 // if so return the thread to run 868 static inline thread_desc* check_condition( __condition_criterion_t * target ) {868 static inline $thread * check_condition( __condition_criterion_t * target ) { 869 869 __condition_node_t * node = target->owner; 870 870 unsigned short count = node->count; … … 889 889 890 890 static inline void brand_condition( condition & this ) { 891 thread_desc* thrd = TL_GET( this_thread );891 $thread * thrd = TL_GET( this_thread ); 892 892 if( !this.monitors ) { 893 893 // __cfaabi_dbg_print_safe( "Branding\n" ); … … 895 895 this.monitor_count = thrd->monitors.size; 896 896 897 this.monitors = ( monitor_desc**)malloc( this.monitor_count * sizeof( *this.monitors ) );897 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 898 898 for( int i = 0; i < this.monitor_count; i++ ) { 899 899 this.monitors[i] = thrd->monitors[i]; … … 902 902 } 903 903 904 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc* monitors [], __lock_size_t count ) {905 906 __queue_t( thread_desc) & entry_queue = monitors[0]->entry_queue;904 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) { 905 906 __queue_t($thread) & entry_queue = monitors[0]->entry_queue; 907 907 908 908 // For each thread in the entry-queue 909 for( thread_desc** thrd_it = &entry_queue.head;909 for( $thread ** thrd_it = &entry_queue.head; 910 910 *thrd_it != 1p; 911 911 thrd_it = &(*thrd_it)->next … … 951 951 } 952 952 953 static inline __lock_size_t aggregate( monitor_desc* storage [], const __waitfor_mask_t & mask ) {953 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) { 954 954 __lock_size_t size = 0; 955 955 for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset
for help on using the changeset viewer.