Changeset e84ab3d for libcfa/src/concurrency/monitor.cfa
- Timestamp:
- Jul 5, 2021, 4:44:20 PM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 7f62b708
- Parents:
- ee23a8d
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/monitor.cfa
ree23a8d re84ab3d 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // $monitor.c--7 // monitor.cfa -- 8 8 // 9 9 // Author : Thierry Delisle … … 28 28 //----------------------------------------------------------------------------- 29 29 // Forward declarations 30 static inline void __set_owner ( $monitor * this, $thread* owner );31 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread* owner );32 static inline void set_mask ( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask );33 static inline void reset_mask( $monitor* this );34 35 static inline $thread * next_thread( $monitor* this );36 static inline bool is_accepted( $monitor* this, const __monitor_group_t & monitors );30 static inline void __set_owner ( monitor$ * this, thread$ * owner ); 31 static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner ); 32 static inline void set_mask ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 33 static inline void reset_mask( monitor$ * this ); 34 35 static inline thread$ * next_thread( monitor$ * this ); 36 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors ); 37 37 38 38 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 39 static inline void lock_all ( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );39 static inline void lock_all ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 40 40 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 41 static inline void unlock_all( $monitor* locks [], __lock_size_t count );42 43 static inline void save ( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );44 static inline void restore( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );45 46 static inline void init ( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );48 49 static inline $thread* check_condition ( __condition_criterion_t * );41 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ); 42 43 static inline void save ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 46 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 48 49 static inline thread$ * check_condition ( __condition_criterion_t * ); 50 50 static inline void brand_condition ( condition & ); 51 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor* monitors [], __lock_size_t count );51 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count ); 52 52 53 53 forall(T & | sized( T )) 54 54 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 55 55 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 56 static inline __lock_size_t aggregate ( $monitor* storage [], const __waitfor_mask_t & mask );56 static inline __lock_size_t aggregate ( monitor$ * storage [], const __waitfor_mask_t & mask ); 57 57 58 58 //----------------------------------------------------------------------------- … … 69 69 70 70 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 71 $monitor** monitors = mons; /* Save the targeted monitors */ \71 monitor$ ** monitors = mons; /* Save the targeted monitors */ \ 72 72 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 73 73 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 82 82 // Enter/Leave routines 83 83 // Enter single monitor 84 static void __enter( $monitor* this, const __monitor_group_t & group ) {85 $thread* thrd = active_thread();84 static void __enter( monitor$ * this, const __monitor_group_t & group ) { 85 thread$ * thrd = active_thread(); 86 86 87 87 // Lock the monitor spinlock … … 141 141 } 142 142 143 static void __dtor_enter( $monitor* this, fptr_t func, bool join ) {144 $thread* thrd = active_thread();143 static void __dtor_enter( monitor$ * this, fptr_t func, bool join ) { 144 thread$ * thrd = active_thread(); 145 145 #if defined( __CFA_WITH_VERIFY__ ) 146 146 bool is_thrd = this == &thrd->self_mon; … … 173 173 // because join will not release the monitor after it executed. 174 174 // to avoid that it sets the owner to the special value thrd | 1p before exiting 175 else if( this->owner == ( $thread*)(1 | (uintptr_t)thrd) ) {175 else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) { 176 176 // restore the owner and just return 177 177 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); … … 191 191 192 192 __lock_size_t count = 1; 193 $monitor** monitors = &this;193 monitor$ ** monitors = &this; 194 194 __monitor_group_t group = { &this, 1, func }; 195 195 if( is_accepted( this, group) ) { … … 243 243 244 244 // Leave single monitor 245 void __leave( $monitor* this ) {245 void __leave( monitor$ * this ) { 246 246 // Lock the monitor spinlock 247 247 lock( this->lock __cfaabi_dbg_ctx2 ); … … 263 263 264 264 // Get the next thread, will be null on low contention monitor 265 $thread* new_owner = next_thread( this );265 thread$ * new_owner = next_thread( this ); 266 266 267 267 // Check the new owner is consistent with who we wake-up … … 278 278 279 279 // Leave single monitor for the last time 280 void __dtor_leave( $monitor* this, bool join ) {280 void __dtor_leave( monitor$ * this, bool join ) { 281 281 __cfaabi_dbg_debug_do( 282 282 if( active_thread() != this->owner ) { … … 288 288 ) 289 289 290 this->owner = ( $thread*)(1 | (uintptr_t)this->owner);291 } 292 293 void __thread_finish( $thread* thrd ) {294 $monitor* this = &thrd->self_mon;290 this->owner = (thread$*)(1 | (uintptr_t)this->owner); 291 } 292 293 void __thread_finish( thread$ * thrd ) { 294 monitor$ * this = &thrd->self_mon; 295 295 296 296 // Lock the monitor now … … 298 298 /* paranoid */ verify( this->lock.lock ); 299 299 /* paranoid */ verify( thrd->context.SP ); 300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 302 302 /* paranoid */ verify( ! __preemption_enabled() ); 303 303 … … 311 311 312 312 // Fetch the next thread, can be null 313 $thread* new_owner = next_thread( this );313 thread$ * new_owner = next_thread( this ); 314 314 315 315 // Mark the state as fully halted … … 336 336 // Leave multiple monitor 337 337 // relies on the monitor array being sorted 338 static inline void leave( $monitor* monitors [], __lock_size_t count) {338 static inline void leave(monitor$ * monitors [], __lock_size_t count) { 339 339 for( __lock_size_t i = count - 1; i >= 0; i--) { 340 340 __leave( monitors[i] ); … … 344 344 // Ctor for monitor guard 345 345 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, $monitor* m [], __lock_size_t count, fptr_t func ) {347 $thread* thrd = active_thread();346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) { 347 thread$ * thrd = active_thread(); 348 348 349 349 // Store current array … … 385 385 // Ctor for monitor guard 386 386 // Sorts monitors before entering 387 void ?{}( monitor_dtor_guard_t & this, $monitor* m [], fptr_t func, bool join ) {387 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) { 388 388 // optimization 389 $thread* thrd = active_thread();389 thread$ * thrd = active_thread(); 390 390 391 391 // Store current array … … 415 415 //----------------------------------------------------------------------------- 416 416 // Internal scheduling types 417 void ?{}(__condition_node_t & this, $thread* waiting_thread, __lock_size_t count, uintptr_t user_info ) {417 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 418 418 this.waiting_thread = waiting_thread; 419 419 this.count = count; … … 429 429 } 430 430 431 void ?{}(__condition_criterion_t & this, $monitor* target, __condition_node_t & owner ) {431 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 432 432 this.ready = false; 433 433 this.target = target; … … 463 463 // Find the next thread(s) to run 464 464 __lock_size_t thread_count = 0; 465 $thread* threads[ count ];465 thread$ * threads[ count ]; 466 466 __builtin_memset( threads, 0, sizeof( threads ) ); 467 467 … … 471 471 // Remove any duplicate threads 472 472 for( __lock_size_t i = 0; i < count; i++) { 473 $thread* new_owner = next_thread( monitors[i] );473 thread$ * new_owner = next_thread( monitors[i] ); 474 474 insert_unique( threads, thread_count, new_owner ); 475 475 } … … 501 501 //Some more checking in debug 502 502 __cfaabi_dbg_debug_do( 503 $thread* this_thrd = active_thread();503 thread$ * this_thrd = active_thread(); 504 504 if ( this.monitor_count != this_thrd->monitors.size ) { 505 505 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 555 555 556 556 //Find the thread to run 557 $thread* signallee = pop_head( this.blocked )->waiting_thread;557 thread$ * signallee = pop_head( this.blocked )->waiting_thread; 558 558 __set_owner( monitors, count, signallee ); 559 559 … … 608 608 // Create one! 609 609 __lock_size_t max = count_max( mask ); 610 $monitor* mon_storage[max];610 monitor$ * mon_storage[max]; 611 611 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 612 612 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 626 626 { 627 627 // Check if the entry queue 628 $thread* next; int index;628 thread$ * next; int index; 629 629 [next, index] = search_entry_queue( mask, monitors, count ); 630 630 … … 636 636 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 637 637 638 $monitor* mon2dtor = accepted[0];638 monitor$ * mon2dtor = accepted[0]; 639 639 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 640 640 … … 730 730 // Utilities 731 731 732 static inline void __set_owner( $monitor * this, $thread* owner ) {732 static inline void __set_owner( monitor$ * this, thread$ * owner ) { 733 733 /* paranoid */ verify( this->lock.lock ); 734 734 … … 740 740 } 741 741 742 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread* owner ) {742 static inline void __set_owner( monitor$ * monitors [], __lock_size_t count, thread$ * owner ) { 743 743 /* paranoid */ verify ( monitors[0]->lock.lock ); 744 744 /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] ); … … 753 753 } 754 754 755 static inline void set_mask( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {755 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 756 756 for( __lock_size_t i = 0; i < count; i++) { 757 757 storage[i]->mask = mask; … … 759 759 } 760 760 761 static inline void reset_mask( $monitor* this ) {761 static inline void reset_mask( monitor$ * this ) { 762 762 this->mask.accepted = 0p; 763 763 this->mask.data = 0p; … … 765 765 } 766 766 767 static inline $thread * next_thread( $monitor* this ) {767 static inline thread$ * next_thread( monitor$ * this ) { 768 768 //Check the signaller stack 769 769 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 781 781 // No signaller thread 782 782 // Get the next thread in the entry_queue 783 $thread* new_owner = pop_head( this->entry_queue );783 thread$ * new_owner = pop_head( this->entry_queue ); 784 784 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 785 785 /* paranoid */ verify( !new_owner || new_owner->link.next == 0p ); … … 789 789 } 790 790 791 static inline bool is_accepted( $monitor* this, const __monitor_group_t & group ) {791 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & group ) { 792 792 __acceptable_t * it = this->mask.data; // Optim 793 793 __lock_size_t count = this->mask.size; … … 811 811 } 812 812 813 static inline void init( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {813 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 814 814 for( __lock_size_t i = 0; i < count; i++) { 815 815 (criteria[i]){ monitors[i], waiter }; … … 819 819 } 820 820 821 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {821 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 822 822 for( __lock_size_t i = 0; i < count; i++) { 823 823 (criteria[i]){ monitors[i], waiter }; … … 835 835 } 836 836 837 static inline void lock_all( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {837 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 838 838 for( __lock_size_t i = 0; i < count; i++ ) { 839 839 __spinlock_t * l = &source[i]->lock; … … 849 849 } 850 850 851 static inline void unlock_all( $monitor* locks [], __lock_size_t count ) {851 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) { 852 852 for( __lock_size_t i = 0; i < count; i++ ) { 853 853 unlock( locks[i]->lock ); … … 856 856 857 857 static inline void save( 858 $monitor* ctx [],858 monitor$ * ctx [], 859 859 __lock_size_t count, 860 860 __attribute((unused)) __spinlock_t * locks [], … … 869 869 870 870 static inline void restore( 871 $monitor* ctx [],871 monitor$ * ctx [], 872 872 __lock_size_t count, 873 873 __spinlock_t * locks [], … … 887 887 // 2 - Checks if all the monitors are ready to run 888 888 // if so return the thread to run 889 static inline $thread* check_condition( __condition_criterion_t * target ) {889 static inline thread$ * check_condition( __condition_criterion_t * target ) { 890 890 __condition_node_t * node = target->owner; 891 891 unsigned short count = node->count; … … 910 910 911 911 static inline void brand_condition( condition & this ) { 912 $thread* thrd = active_thread();912 thread$ * thrd = active_thread(); 913 913 if( !this.monitors ) { 914 914 // __cfaabi_dbg_print_safe( "Branding\n" ); … … 916 916 this.monitor_count = thrd->monitors.size; 917 917 918 this.monitors = ( $monitor**)malloc( this.monitor_count * sizeof( *this.monitors ) );918 this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 919 919 for( int i = 0; i < this.monitor_count; i++ ) { 920 920 this.monitors[i] = thrd->monitors[i]; … … 923 923 } 924 924 925 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor* monitors [], __lock_size_t count ) {926 927 __queue_t( $thread) & entry_queue = monitors[0]->entry_queue;925 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor$ * monitors [], __lock_size_t count ) { 926 927 __queue_t(thread$) & entry_queue = monitors[0]->entry_queue; 928 928 929 929 // For each thread in the entry-queue 930 for( $thread** thrd_it = &entry_queue.head;930 for( thread$ ** thrd_it = &entry_queue.head; 931 931 (*thrd_it) != 1p; 932 932 thrd_it = &(*thrd_it)->link.next … … 972 972 } 973 973 974 static inline __lock_size_t aggregate( $monitor* storage [], const __waitfor_mask_t & mask ) {974 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) { 975 975 __lock_size_t size = 0; 976 976 for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset
for help on using the changeset viewer.