Changeset 8fc45b7
- Timestamp:
- Nov 2, 2017, 4:01:28 PM (5 years ago)
- Branches:
- aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 025278e
- Parents:
- 4cedd9f
- Location:
- src/libcfa/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
r4cedd9f r8fc45b7 50 50 extern "Cforall" { 51 51 void ?{}( struct __thread_queue_t & ); 52 void append( struct __thread_queue_t *, struct thread_desc * );53 struct thread_desc * pop_head( struct __thread_queue_t *);54 struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** );52 void append( struct __thread_queue_t &, struct thread_desc * ); 53 struct thread_desc * pop_head( struct __thread_queue_t & ); 54 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** ); 55 55 56 56 void ?{}( struct __condition_stack_t & ); 57 void push( struct __condition_stack_t *, struct __condition_criterion_t * );58 struct __condition_criterion_t * pop( struct __condition_stack_t *);57 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 58 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 59 59 60 void ?{}(spinlock & this);60 void ?{}(spinlock & this); 61 61 void ^?{}(spinlock & this); 62 62 } -
src/libcfa/concurrency/kernel
r4cedd9f r8fc45b7 99 99 }; 100 100 101 void ?{}(processor & this);102 void ?{}(processor & this, cluster * cltr);101 void ?{}(processor & this); 102 void ?{}(processor & this, cluster * cltr); 103 103 void ^?{}(processor & this); 104 104 -
src/libcfa/concurrency/kernel.c
r4cedd9f r8fc45b7 335 335 336 336 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 337 append( &this_processor->cltr->ready_queue, thrd );337 append( this_processor->cltr->ready_queue, thrd ); 338 338 unlock( &this_processor->cltr->ready_queue_lock ); 339 339 … … 344 344 verify( disable_preempt_count > 0 ); 345 345 lock( &this->ready_queue_lock DEBUG_CTX2 ); 346 thread_desc * head = pop_head( &this->ready_queue );346 thread_desc * head = pop_head( this->ready_queue ); 347 347 unlock( &this->ready_queue_lock ); 348 348 verify( disable_preempt_count > 0 ); … … 398 398 } 399 399 400 void BlockInternal(spinlock * * locks, unsigned short count) {400 void BlockInternal(spinlock * locks [], unsigned short count) { 401 401 disable_interrupts(); 402 402 this_processor->finish.action_code = Release_Multi; … … 411 411 } 412 412 413 void BlockInternal(spinlock * * locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {413 void BlockInternal(spinlock * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 414 414 disable_interrupts(); 415 415 this_processor->finish.action_code = Release_Multi_Schedule; … … 623 623 if ( this.count < 0 ) { 624 624 // queue current task 625 append( &this.waiting, (thread_desc *)this_thread );625 append( this.waiting, (thread_desc *)this_thread ); 626 626 627 627 // atomically release spin lock and block … … 639 639 if ( this.count <= 0 ) { 640 640 // remove task at head of waiting list 641 thrd = pop_head( &this.waiting );641 thrd = pop_head( this.waiting ); 642 642 } 643 643 … … 655 655 } 656 656 657 void append( __thread_queue_t *this, thread_desc * t ) {658 verify(this ->tail != NULL);659 *this ->tail = t;660 this ->tail = &t->next;661 } 662 663 thread_desc * pop_head( __thread_queue_t *this ) {664 thread_desc * head = this ->head;657 void append( __thread_queue_t & this, thread_desc * t ) { 658 verify(this.tail != NULL); 659 *this.tail = t; 660 this.tail = &t->next; 661 } 662 663 thread_desc * pop_head( __thread_queue_t & this ) { 664 thread_desc * head = this.head; 665 665 if( head ) { 666 this ->head = head->next;666 this.head = head->next; 667 667 if( !head->next ) { 668 this ->tail = &this->head;668 this.tail = &this.head; 669 669 } 670 670 head->next = NULL; … … 673 673 } 674 674 675 thread_desc * remove( __thread_queue_t *this, thread_desc ** it ) {675 thread_desc * remove( __thread_queue_t & this, thread_desc ** it ) { 676 676 thread_desc * thrd = *it; 677 677 verify( thrd ); … … 679 679 (*it) = thrd->next; 680 680 681 if( this ->tail == &thrd->next ) {682 this ->tail = it;681 if( this.tail == &thrd->next ) { 682 this.tail = it; 683 683 } 684 684 685 685 thrd->next = NULL; 686 686 687 verify( (this ->head == NULL) == (&this->head == this->tail) );688 verify( *this ->tail == NULL );687 verify( (this.head == NULL) == (&this.head == this.tail) ); 688 verify( *this.tail == NULL ); 689 689 return thrd; 690 690 } … … 694 694 } 695 695 696 void push( __condition_stack_t *this, __condition_criterion_t * t ) {696 void push( __condition_stack_t & this, __condition_criterion_t * t ) { 697 697 verify( !t->next ); 698 t->next = this ->top;699 this ->top = t;700 } 701 702 __condition_criterion_t * pop( __condition_stack_t *this ) {703 __condition_criterion_t * top = this ->top;698 t->next = this.top; 699 this.top = t; 700 } 701 702 __condition_criterion_t * pop( __condition_stack_t & this ) { 703 __condition_criterion_t * top = this.top; 704 704 if( top ) { 705 this ->top = top->next;705 this.top = top->next; 706 706 top->next = NULL; 707 707 } -
src/libcfa/concurrency/kernel_private.h
r4cedd9f r8fc45b7 48 48 void BlockInternal(thread_desc * thrd); 49 49 void BlockInternal(spinlock * lock, thread_desc * thrd); 50 void BlockInternal(spinlock * * locks, unsigned short count);51 void BlockInternal(spinlock * * locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);50 void BlockInternal(spinlock * locks [], unsigned short count); 51 void BlockInternal(spinlock * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 52 void LeaveThread(spinlock * lock, thread_desc * thrd); 53 53 -
src/libcfa/concurrency/monitor
r4cedd9f r8fc45b7 98 98 99 99 void ?{}( __condition_blocked_queue_t & ); 100 void append( __condition_blocked_queue_t *, __condition_node_t * );101 __condition_node_t * pop_head( __condition_blocked_queue_t *);100 void append( __condition_blocked_queue_t &, __condition_node_t * ); 101 __condition_node_t * pop_head( __condition_blocked_queue_t & ); 102 102 103 103 struct condition { … … 116 116 } 117 117 118 void wait( condition & this, uintptr_t user_info = 0 );119 bool signal( condition & this );120 bool signal_block( condition & this );121 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }122 uintptr_t front( condition & this );118 void wait ( condition & this, uintptr_t user_info = 0 ); 119 bool signal ( condition & this ); 120 bool signal_block( condition & this ); 121 static inline bool is_empty ( condition & this ) { return !this.blocked.head; } 122 uintptr_t front ( condition & this ); 123 123 124 124 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor.c
r4cedd9f r8fc45b7 26 26 // Forward declarations 27 27 static inline void set_owner ( monitor_desc * this, thread_desc * owner ); 28 static inline void set_owner ( monitor_desc * * storage, short count, thread_desc * owner );29 static inline void set_mask ( monitor_desc * * storage, short count, const __waitfor_mask_t & mask );28 static inline void set_owner ( monitor_desc * storage [], short count, thread_desc * owner ); 29 static inline void set_mask ( monitor_desc * storage [], short count, const __waitfor_mask_t & mask ); 30 30 static inline void reset_mask( monitor_desc * this ); 31 31 … … 33 33 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); 34 34 35 static inline void lock_all ( spinlock ** locks, unsigned short count );36 static inline void lock_all ( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );37 static inline void unlock_all( spinlock * * locks, unsigned short count );38 static inline void unlock_all( monitor_desc * * locks, unsigned short count );39 40 static inline void save ( monitor_desc * * ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks);41 static inline void restore( monitor_desc * * ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks);42 43 static inline void init ( int count, monitor_desc * * monitors, __condition_node_t * waiter, __condition_criterion_t * criteria);44 static inline void init_push( int count, monitor_desc * * monitors, __condition_node_t * waiter, __condition_criterion_t * criteria);35 static inline void lock_all ( spinlock * locks [], unsigned short count ); 36 static inline void lock_all ( monitor_desc * source [], spinlock * /*out*/ locks [], unsigned short count ); 37 static inline void unlock_all( spinlock * locks [], unsigned short count ); 38 static inline void unlock_all( monitor_desc * locks [], unsigned short count ); 39 40 static inline void save ( monitor_desc * ctx [], short count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 41 static inline void restore( monitor_desc * ctx [], short count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 42 43 static inline void init ( int count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 44 static inline void init_push( int count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 45 45 46 46 static inline thread_desc * check_condition ( __condition_criterion_t * ); 47 47 static inline void brand_condition ( condition & ); 48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * * monitors, int count );48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], int count ); 49 49 50 50 forall(dtype T | sized( T )) 51 static inline short insert_unique( T * * array, short & size, T * val );51 static inline short insert_unique( T * array [], short & size, T * val ); 52 52 static inline short count_max ( const __waitfor_mask_t & mask ); 53 static inline short aggregate ( monitor_desc * * storage, const __waitfor_mask_t & mask );53 static inline short aggregate ( monitor_desc * storage [], const __waitfor_mask_t & mask ); 54 54 55 55 //----------------------------------------------------------------------------- … … 58 58 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 59 59 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 60 init( count, monitors, &waiter, criteria );/* Link everything together */ \60 init( count, monitors, waiter, criteria ); /* Link everything together */ \ 61 61 62 62 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 63 63 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 64 64 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 65 init_push( count, monitors, &waiter, criteria );/* Link everything together and push it to the AS-Stack */ \65 init_push( count, monitors, waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 66 66 67 67 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ … … 114 114 115 115 // Some one else has the monitor, wait in line for it 116 append( &this->entry_queue, thrd );116 append( this->entry_queue, thrd ); 117 117 BlockInternal( &this->lock ); 118 118 … … 160 160 161 161 // Wake the thread that is waiting for this 162 __condition_criterion_t * urgent = pop( &this->signal_stack );162 __condition_criterion_t * urgent = pop( this->signal_stack ); 163 163 verify( urgent ); 164 164 … … 182 182 183 183 // Some one else has the monitor, wait in line for it 184 append( &this->entry_queue, thrd );184 append( this->entry_queue, thrd ); 185 185 BlockInternal( &this->lock ); 186 186 … … 279 279 // Leave multiple monitor 280 280 // relies on the monitor array being sorted 281 static inline void leave(monitor_desc * * monitors, int count) {281 static inline void leave(monitor_desc * monitors [], int count) { 282 282 for(int i = count - 1; i >= 0; i--) { 283 283 __leave_monitor_desc( monitors[i] ); … … 287 287 // Ctor for monitor guard 288 288 // Sorts monitors before entering 289 void ?{}( monitor_guard_t & this, monitor_desc * * m, int count, fptr_t func ) {289 void ?{}( monitor_guard_t & this, monitor_desc * m [], int count, fptr_t func ) { 290 290 // Store current array 291 291 this.m = m; … … 333 333 // Ctor for monitor guard 334 334 // Sorts monitors before entering 335 void ?{}( monitor_dtor_guard_t & this, monitor_desc * * m, fptr_t func ) {335 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) { 336 336 // Store current array 337 337 this.m = *m; … … 378 378 } 379 379 380 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t *owner ) {380 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) { 381 381 this.ready = false; 382 382 this.target = target; 383 this.owner = owner;383 this.owner = &owner; 384 384 this.next = NULL; 385 385 } … … 403 403 // Append the current wait operation to the ones already queued on the condition 404 404 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 405 append( &this.blocked, &waiter );405 append( this.blocked, &waiter ); 406 406 407 407 // Lock all monitors (aggregates the locks as well) … … 456 456 457 457 //Pop the head of the waiting queue 458 __condition_node_t * node = pop_head( &this.blocked );458 __condition_node_t * node = pop_head( this.blocked ); 459 459 460 460 //Add the thread to the proper AS stack … … 462 462 __condition_criterion_t * crit = &node->criteria[i]; 463 463 assert( !crit->ready ); 464 push( &crit->target->signal_stack, crit );464 push( crit->target->signal_stack, crit ); 465 465 } 466 466 … … 491 491 492 492 //Find the thread to run 493 thread_desc * signallee = pop_head( &this.blocked )->waiting_thread;493 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 494 494 set_owner( monitors, count, signallee ); 495 495 … … 569 569 570 570 __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria; 571 push( &mon2dtor->signal_stack, dtor_crit );571 push( mon2dtor->signal_stack, dtor_crit ); 572 572 573 573 unlock_all( locks, count ); … … 661 661 } 662 662 663 static inline void set_owner( monitor_desc * * monitors, short count, thread_desc * owner ) {663 static inline void set_owner( monitor_desc * monitors [], short count, thread_desc * owner ) { 664 664 monitors[0]->owner = owner; 665 665 monitors[0]->recursion = 1; … … 670 670 } 671 671 672 static inline void set_mask( monitor_desc * * storage, short count, const __waitfor_mask_t & mask ) {672 static inline void set_mask( monitor_desc * storage [], short count, const __waitfor_mask_t & mask ) { 673 673 for(int i = 0; i < count; i++) { 674 674 storage[i]->mask = mask; … … 685 685 //Check the signaller stack 686 686 LIB_DEBUG_PRINT_SAFE("Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); 687 __condition_criterion_t * urgent = pop( &this->signal_stack );687 __condition_criterion_t * urgent = pop( this->signal_stack ); 688 688 if( urgent ) { 689 689 //The signaller stack is not empty, … … 697 697 // No signaller thread 698 698 // Get the next thread in the entry_queue 699 thread_desc * new_owner = pop_head( &this->entry_queue );699 thread_desc * new_owner = pop_head( this->entry_queue ); 700 700 set_owner( this, new_owner ); 701 701 … … 725 725 } 726 726 727 static inline void init( int count, monitor_desc * * monitors, __condition_node_t * waiter, __condition_criterion_t * criteria) {727 static inline void init( int count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 728 728 for(int i = 0; i < count; i++) { 729 729 (criteria[i]){ monitors[i], waiter }; 730 730 } 731 731 732 waiter ->criteria = criteria;733 } 734 735 static inline void init_push( int count, monitor_desc * * monitors, __condition_node_t * waiter, __condition_criterion_t * criteria) {732 waiter.criteria = criteria; 733 } 734 735 static inline void init_push( int count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 736 736 for(int i = 0; i < count; i++) { 737 737 (criteria[i]){ monitors[i], waiter }; 738 738 LIB_DEBUG_PRINT_SAFE( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] ); 739 push( &criteria[i].target->signal_stack, &criteria[i] );740 } 741 742 waiter ->criteria = criteria;743 } 744 745 static inline void lock_all( spinlock * * locks, unsigned short count ) {739 push( criteria[i].target->signal_stack, &criteria[i] ); 740 } 741 742 waiter.criteria = criteria; 743 } 744 745 static inline void lock_all( spinlock * locks [], unsigned short count ) { 746 746 for( int i = 0; i < count; i++ ) { 747 747 lock_yield( locks[i] DEBUG_CTX2 ); … … 749 749 } 750 750 751 static inline void lock_all( monitor_desc * * source, spinlock ** /*out*/ locks, unsigned short count ) {751 static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], unsigned short count ) { 752 752 for( int i = 0; i < count; i++ ) { 753 753 spinlock * l = &source[i]->lock; … … 757 757 } 758 758 759 static inline void unlock_all( spinlock * * locks, unsigned short count ) {759 static inline void unlock_all( spinlock * locks [], unsigned short count ) { 760 760 for( int i = 0; i < count; i++ ) { 761 761 unlock( locks[i] ); … … 763 763 } 764 764 765 static inline void unlock_all( monitor_desc * * locks, unsigned short count ) {765 static inline void unlock_all( monitor_desc * locks [], unsigned short count ) { 766 766 for( int i = 0; i < count; i++ ) { 767 767 unlock( &locks[i]->lock ); … … 769 769 } 770 770 771 static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 771 static inline void save( 772 monitor_desc * ctx [], 773 short count, 774 __attribute((unused)) spinlock * locks [], 775 unsigned int /*out*/ recursions [], 776 __waitfor_mask_t /*out*/ masks [] 777 ) { 772 778 for( int i = 0; i < count; i++ ) { 773 779 recursions[i] = ctx[i]->recursion; … … 776 782 } 777 783 778 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 784 static inline void restore( 785 monitor_desc * ctx [], 786 short count, 787 spinlock * locks [], 788 unsigned int /*out*/ recursions [], 789 __waitfor_mask_t /*out*/ masks [] 790 ) { 779 791 lock_all( locks, count ); 780 792 for( int i = 0; i < count; i++ ) { … … 825 837 } 826 838 827 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * * monitors, int count ) {828 829 __thread_queue_t * entry_queue = &monitors[0]->entry_queue;839 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], int count ) { 840 841 __thread_queue_t & entry_queue = monitors[0]->entry_queue; 830 842 831 843 // For each thread in the entry-queue 832 for( thread_desc ** thrd_it = &entry_queue ->head;844 for( thread_desc ** thrd_it = &entry_queue.head; 833 845 *thrd_it; 834 846 thrd_it = &(*thrd_it)->next … … 852 864 853 865 forall(dtype T | sized( T )) 854 static inline short insert_unique( T * * array, short & size, T * val ) {866 static inline short insert_unique( T * array [], short & size, T * val ) { 855 867 if( !val ) return size; 856 868 … … 872 884 } 873 885 874 static inline short aggregate( monitor_desc * * storage, const __waitfor_mask_t & mask ) {886 static inline short aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) { 875 887 short size = 0; 876 888 for( int i = 0; i < mask.size; i++ ) { … … 890 902 } 891 903 892 void append( __condition_blocked_queue_t *this, __condition_node_t * c ) {893 verify(this ->tail != NULL);894 *this ->tail = c;895 this ->tail = &c->next;896 } 897 898 __condition_node_t * pop_head( __condition_blocked_queue_t *this ) {899 __condition_node_t * head = this ->head;904 void append( __condition_blocked_queue_t & this, __condition_node_t * c ) { 905 verify(this.tail != NULL); 906 *this.tail = c; 907 this.tail = &c->next; 908 } 909 910 __condition_node_t * pop_head( __condition_blocked_queue_t & this ) { 911 __condition_node_t * head = this.head; 900 912 if( head ) { 901 this ->head = head->next;913 this.head = head->next; 902 914 if( !head->next ) { 903 this ->tail = &this->head;915 this.tail = &this.head; 904 916 } 905 917 head->next = NULL;
Note: See TracChangeset
for help on using the changeset viewer.