Changeset 0cf5b79 for src/libcfa/concurrency
- Timestamp:
- Nov 20, 2017, 12:12:34 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- fdd3786
- Parents:
- b7f8cb44
- Location:
- src/libcfa/concurrency
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
rb7f8cb44 r0cf5b79 14 14 // 15 15 16 #include "bits/containers.h" 16 17 #include "bits/defs.h" 17 18 #include "bits/locks.h" 18 19 19 #ifdef __ CFORALL__20 #ifdef __cforall 20 21 extern "C" { 21 22 #endif … … 25 26 #define _INVOKE_H_ 26 27 27 typedef void (*fptr_t)(); 28 typedef int_fast16_t __lock_size_t; 29 30 struct __thread_queue_t { 31 struct thread_desc * head; 32 struct thread_desc ** tail; 33 }; 34 35 struct __condition_stack_t { 36 struct __condition_criterion_t * top; 37 }; 38 39 #ifdef __CFORALL__ 28 #ifdef __cforall 40 29 extern "Cforall" { 41 void ?{}( struct __thread_queue_t & ); 42 void append( struct __thread_queue_t &, struct thread_desc * ); 43 struct thread_desc * pop_head( struct __thread_queue_t & ); 44 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** ); 45 46 void ?{}( struct __condition_stack_t & ); 47 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 48 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 30 static inline struct thread_desc * & get_next( struct thread_desc & this ); 31 static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this ); 49 32 } 50 33 #endif … … 100 83 101 84 // list of acceptable functions, null if any 102 struct __acceptable_t * clauses; 103 104 // number of acceptable functions 105 __lock_size_t size; 85 __small_array_t(struct __acceptable_t) __cfa_anonymous_object; 106 86 }; 107 87 … … 114 94 115 95 // queue of threads that are blocked waiting for the monitor 116 struct __thread_queue_tentry_queue;96 __queue_t(struct thread_desc) entry_queue; 117 97 118 98 // stack of conditions to run next once we exit the monitor 119 struct __condition_stack_tsignal_stack;99 __stack_t(struct __condition_criterion_t) signal_stack; 120 100 121 101 // monitor routines can be called recursively, we need to keep track of that … … 131 111 struct __monitor_group_t { 132 112 // currently held monitors 133 struct monitor_desc ** list; 134 135 // number of currently held monitors 136 __lock_size_t size; 113 __small_array_t(monitor_desc*) __cfa_anonymous_object; 137 114 138 115 // last function that acquired monitors … … 159 136 }; 160 137 161 #ifdef __ CFORALL__138 #ifdef __cforall 162 139 extern "Cforall" { 163 static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) { 164 return this.list[index]; 140 static inline thread_desc * & get_next( thread_desc & this ) { 141 return this.next; 142 } 143 144 static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this ); 145 146 static inline void ?{}(__monitor_group_t & this) { 147 (this.data){NULL}; 148 (this.size){0}; 149 (this.func){NULL}; 150 } 151 152 static inline void ?{}(__monitor_group_t & this, struct monitor_desc ** data, __lock_size_t size, fptr_t func) { 153 (this.data){data}; 154 (this.size){size}; 155 (this.func){func}; 165 156 } 166 157 167 158 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) { 168 if( (lhs. list != 0) != (rhs.list!= 0) ) return false;159 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 169 160 if( lhs.size != rhs.size ) return false; 170 161 if( lhs.func != rhs.func ) return false; … … 177 168 178 169 return true; 170 } 171 172 static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) { 173 lhs.data = rhs.data; 174 lhs.size = rhs.size; 175 lhs.func = rhs.func; 179 176 } 180 177 } … … 210 207 #endif //_INVOKE_PRIVATE_H_ 211 208 #endif //! defined(__CFA_INVOKE_PRIVATE__) 212 #ifdef __ CFORALL__209 #ifdef __cforall 213 210 } 214 211 #endif -
src/libcfa/concurrency/kernel
rb7f8cb44 r0cf5b79 26 26 //----------------------------------------------------------------------------- 27 27 // Locks 28 // // Lock the spinlock, spin if already acquired29 // void lock ( spinlock * DEBUG_CTX_PARAM2 );30 31 // // Lock the spinlock, yield repeatedly if already acquired32 // void lock_yield( spinlock * DEBUG_CTX_PARAM2 );33 34 // // Lock the spinlock, return false if already acquired35 // bool try_lock ( spinlock * DEBUG_CTX_PARAM2 );36 37 // // Unlock the spinlock38 // void unlock ( spinlock * );39 40 28 struct semaphore { 41 29 __spinlock_t lock; 42 30 int count; 43 __ thread_queue_twaiting;31 __queue_t(thread_desc) waiting; 44 32 }; 45 33 … … 57 45 58 46 // Ready queue for threads 59 __ thread_queue_tready_queue;47 __queue_t(thread_desc) ready_queue; 60 48 61 49 // Preemption rate on this cluster -
src/libcfa/concurrency/kernel.c
rb7f8cb44 r0cf5b79 164 164 165 165 void ?{}(cluster & this) { 166 ( this.ready_queue){};166 (this.ready_queue){}; 167 167 ( this.ready_queue_lock ){}; 168 168 … … 611 611 } 612 612 613 //-----------------------------------------------------------------------------614 // Queues615 void ?{}( __thread_queue_t & this ) {616 this.head = NULL;617 this.tail = &this.head;618 }619 620 void append( __thread_queue_t & this, thread_desc * t ) {621 verify(this.tail != NULL);622 *this.tail = t;623 this.tail = &t->next;624 }625 626 thread_desc * pop_head( __thread_queue_t & this ) {627 thread_desc * head = this.head;628 if( head ) {629 this.head = head->next;630 if( !head->next ) {631 this.tail = &this.head;632 }633 head->next = NULL;634 }635 return head;636 }637 638 thread_desc * remove( __thread_queue_t & this, thread_desc ** it ) {639 thread_desc * thrd = *it;640 verify( thrd );641 642 (*it) = thrd->next;643 644 if( this.tail == &thrd->next ) {645 this.tail = it;646 }647 648 thrd->next = NULL;649 650 verify( (this.head == NULL) == (&this.head == this.tail) );651 verify( *this.tail == NULL );652 return thrd;653 }654 655 void ?{}( __condition_stack_t & this ) {656 this.top = NULL;657 }658 659 void push( __condition_stack_t & this, __condition_criterion_t * t ) {660 verify( !t->next );661 t->next = this.top;662 this.top = t;663 }664 665 __condition_criterion_t * pop( __condition_stack_t & this ) {666 __condition_criterion_t * top = this.top;667 if( top ) {668 this.top = top->next;669 top->next = NULL;670 }671 return top;672 }673 674 613 // Local Variables: // 675 614 // mode: c // -
src/libcfa/concurrency/monitor
rb7f8cb44 r0cf5b79 34 34 this.recursion = 0; 35 35 this.mask.accepted = NULL; 36 this.mask. clauses= NULL;36 this.mask.data = NULL; 37 37 this.mask.size = 0; 38 38 this.dtor_node = NULL; … … 40 40 41 41 struct monitor_guard_t { 42 monitor_desc ** m; 43 __lock_size_t count; 44 monitor_desc ** prev_mntrs; 45 __lock_size_t prev_count; 46 fptr_t prev_func; 42 monitor_desc ** m; 43 __lock_size_t count; 44 __monitor_group_t prev; 47 45 }; 48 46 … … 51 49 52 50 struct monitor_dtor_guard_t { 53 monitor_desc * m; 54 monitor_desc ** prev_mntrs; 55 __lock_size_t prev_count; 56 fptr_t prev_func; 51 monitor_desc * m; 52 __monitor_group_t prev; 57 53 }; 58 54 … … 83 79 }; 84 80 81 static inline __condition_criterion_t * & get_next( __condition_criterion_t & this ) { 82 return this.next; 83 } 84 85 85 struct __condition_node_t { 86 86 // Thread that needs to be woken when all criteria are met … … 100 100 }; 101 101 102 struct __condition_blocked_queue_t { 103 __condition_node_t * head; 104 __condition_node_t ** tail; 105 }; 102 static inline __condition_node_t * & get_next( __condition_node_t & this ) { 103 return this.next; 104 } 106 105 107 106 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ); … … 109 108 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ); 110 109 111 void ?{}( __condition_blocked_queue_t & );112 void append( __condition_blocked_queue_t &, __condition_node_t * );113 __condition_node_t * pop_head( __condition_blocked_queue_t & );114 115 110 struct condition { 116 111 // Link list which contains the blocked threads as-well as the information needed to unblock them 117 __ condition_blocked_queue_tblocked;112 __queue_t(__condition_node_t) blocked; 118 113 119 114 // Array of monitor pointers (Monitors are NOT contiguous in memory) -
src/libcfa/concurrency/monitor.c
rb7f8cb44 r0cf5b79 280 280 static inline void enter( __monitor_group_t monitors ) { 281 281 for( __lock_size_t i = 0; i < monitors.size; i++) { 282 __enter_monitor_desc( monitors .list[i], monitors );282 __enter_monitor_desc( monitors[i], monitors ); 283 283 } 284 284 } … … 303 303 304 304 // Save previous thread context 305 this. [prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];305 this.prev = this_thread->monitors; 306 306 307 307 // Update thread context (needed for conditions) 308 this_thread->monitors.[list, size, func] = [m, count, func];308 (this_thread->monitors){m, count, func}; 309 309 310 310 // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count); … … 328 328 329 329 // Restore thread context 330 this_thread->monitors .[list, size, func] = this.[prev_mntrs, prev_count, prev_func];330 this_thread->monitors = this.prev; 331 331 } 332 332 … … 338 338 339 339 // Save previous thread context 340 this. [prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];340 this.prev = this_thread->monitors; 341 341 342 342 // Update thread context (needed for conditions) 343 this_thread->monitors.[list, size, func] = [m, 1, func];343 (this_thread->monitors){m, 1, func}; 344 344 345 345 __enter_monitor_dtor( this.m, func ); … … 352 352 353 353 // Restore thread context 354 this_thread->monitors .[list, size, func] = this.[prev_mntrs, prev_count, prev_func];354 this_thread->monitors = this.prev; 355 355 } 356 356 … … 437 437 438 438 for(int i = 0; i < this.monitor_count; i++) { 439 if ( this.monitors[i] != this_thrd->monitors .list[i] ) {440 abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors .list[i] );439 if ( this.monitors[i] != this_thrd->monitors[i] ) { 440 abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors[i] ); 441 441 } 442 442 } … … 510 510 "Possible cause is not checking if the condition is empty before reading stored data." 511 511 ); 512 return this.blocked.head->user_info;512 return ((typeof(this.blocked.head))this.blocked.head)->user_info; 513 513 } 514 514 … … 554 554 if( next ) { 555 555 *mask.accepted = index; 556 if( mask.clauses[index].is_dtor ) { 556 __acceptable_t& accepted = mask[index]; 557 if( accepted.is_dtor ) { 557 558 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : dtor already there\n"); 558 verifyf( mask.clauses[index].size == 1 ,"ERROR: Accepted dtor has more than 1 mutex parameter." );559 560 monitor_desc * mon2dtor = mask.clauses[index].list[0];559 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 560 561 monitor_desc * mon2dtor = accepted[0]; 561 562 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 562 563 … … 596 597 597 598 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted); 598 599 599 return; 600 600 } … … 671 671 static inline void reset_mask( monitor_desc * this ) { 672 672 this->mask.accepted = NULL; 673 this->mask. clauses= NULL;673 this->mask.data = NULL; 674 674 this->mask.size = 0; 675 675 } … … 697 697 698 698 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) { 699 __acceptable_t * it = this->mask. clauses; // Optim699 __acceptable_t * it = this->mask.data; // Optim 700 700 __lock_size_t count = this->mask.size; 701 701 … … 820 820 if( !this.monitors ) { 821 821 // LIB_DEBUG_PRINT_SAFE("Branding\n"); 822 assertf( thrd->monitors. list != NULL, "No current monitor to brand condition %p", thrd->monitors.list);822 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data ); 823 823 this.monitor_count = thrd->monitors.size; 824 824 825 825 this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) ); 826 826 for( int i = 0; i < this.monitor_count; i++ ) { 827 this.monitors[i] = thrd->monitors .list[i];827 this.monitors[i] = thrd->monitors[i]; 828 828 } 829 829 } … … 832 832 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) { 833 833 834 __ thread_queue_t& entry_queue = monitors[0]->entry_queue;834 __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue; 835 835 836 836 // For each thread in the entry-queue … … 841 841 // For each acceptable check if it matches 842 842 int i = 0; 843 __acceptable_t * end = mask.clauses + mask.size; 844 for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) { 843 __acceptable_t * end = end (mask); 844 __acceptable_t * begin = begin(mask); 845 for( __acceptable_t * it = begin; it != end; it++, i++ ) { 845 846 // Check if we have a match 846 847 if( *it == (*thrd_it)->monitors ) { … … 872 873 __lock_size_t max = 0; 873 874 for( __lock_size_t i = 0; i < mask.size; i++ ) { 874 max += mask.clauses[i].size; 875 __acceptable_t & accepted = mask[i]; 876 max += accepted.size; 875 877 } 876 878 return max; … … 880 882 __lock_size_t size = 0; 881 883 for( __lock_size_t i = 0; i < mask.size; i++ ) { 882 __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size ); 883 for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) { 884 insert_unique( storage, size, mask.clauses[i].list[j] ); 884 __acceptable_t & accepted = mask[i]; 885 __libcfa_small_sort( accepted.data, accepted.size ); 886 for( __lock_size_t j = 0; j < accepted.size; j++) { 887 insert_unique( storage, size, accepted[j] ); 885 888 } 886 889 } … … 888 891 __libcfa_small_sort( storage, size ); 889 892 return size; 890 }891 892 void ?{}( __condition_blocked_queue_t & this ) {893 this.head = NULL;894 this.tail = &this.head;895 }896 897 void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {898 verify(this.tail != NULL);899 *this.tail = c;900 this.tail = &c->next;901 }902 903 __condition_node_t * pop_head( __condition_blocked_queue_t & this ) {904 __condition_node_t * head = this.head;905 if( head ) {906 this.head = head->next;907 if( !head->next ) {908 this.tail = &this.head;909 }910 head->next = NULL;911 }912 return head;913 893 } 914 894
Note:
See TracChangeset
for help on using the changeset viewer.