Changeset bbeb908 for src/libcfa
- Timestamp:
- Nov 6, 2017, 11:11:56 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- a2ea829
- Parents:
- e706bfd (diff), 121ac13 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
re706bfd rbbeb908 25 25 #define _INVOKE_H_ 26 26 27 #define unlikely(x) __builtin_expect(!!(x), 0) 28 #define thread_local _Thread_local 29 30 typedef void (*fptr_t)(); 31 32 struct spinlock { 33 volatile int lock; 34 #ifdef __CFA_DEBUG__ 35 const char * prev_name; 36 void* prev_thrd; 37 #endif 38 }; 39 40 struct __thread_queue_t { 41 struct thread_desc * head; 42 struct thread_desc ** tail; 43 }; 44 45 struct __condition_stack_t { 46 struct __condition_criterion_t * top; 47 }; 48 49 #ifdef __CFORALL__ 50 extern "Cforall" { 51 void ?{}( struct __thread_queue_t & ); 52 void append( struct __thread_queue_t *, struct thread_desc * ); 53 struct thread_desc * pop_head( struct __thread_queue_t * ); 54 struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** ); 55 56 void ?{}( struct __condition_stack_t & ); 57 void push( struct __condition_stack_t *, struct __condition_criterion_t * ); 58 struct __condition_criterion_t * pop( struct __condition_stack_t * ); 59 60 void ?{}(spinlock & this); 61 void ^?{}(spinlock & this); 62 } 63 #endif 64 65 struct coStack_t { 66 unsigned int size; // size of stack 67 void *storage; // pointer to stack 68 void *limit; // stack grows towards stack limit 69 void *base; // base of stack 70 void *context; // address of cfa_context_t 71 void *top; // address of top of storage 72 bool userStack; // whether or not the user allocated the stack 73 }; 74 75 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 76 77 struct coroutine_desc { 78 struct coStack_t stack; // stack information of the coroutine 79 const char *name; // textual name for coroutine/task, initialized by uC++ generated code 80 int errno_; // copy of global UNIX variable errno 81 enum coroutine_state state; // current execution status for coroutine 82 struct coroutine_desc * starter; // first coroutine to resume this one 83 struct coroutine_desc * last; // last coroutine to resume this one 84 }; 85 86 struct __waitfor_mask_t { 87 short * accepted; // the index of the accepted function, -1 if none 88 struct __acceptable_t * clauses; // list of acceptable functions, null if any 89 short size; // number of acceptable functions 90 }; 91 92 struct monitor_desc { 93 struct spinlock lock; // spinlock to protect internal data 94 struct thread_desc * owner; // current owner of the monitor 95 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 96 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 97 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 98 struct __waitfor_mask_t mask; // mask used to know if some thread is waiting for something while holding the monitor 99 struct __condition_node_t * dtor_node; // node used to signal the dtor in a waitfor dtor 100 }; 101 102 struct __monitor_group_t { 103 struct monitor_desc ** list; // currently held monitors 104 short size; // number of currently held monitors 105 fptr_t func; // last function that acquired monitors 106 }; 107 108 struct thread_desc { 109 // Core threading fields 110 struct coroutine_desc self_cor; // coroutine body used to store context 111 struct monitor_desc self_mon; // monitor body used for mutual exclusion 112 struct monitor_desc * self_mon_p; // pointer to monitor with sufficient lifetime for current monitors 113 struct __monitor_group_t monitors; // monitors currently held by this thread 114 115 // Link lists fields 116 struct thread_desc * next; // instrusive link field for threads 117 118 27 #define unlikely(x) __builtin_expect(!!(x), 0) 28 #define thread_local _Thread_local 29 30 typedef void (*fptr_t)(); 31 typedef int_fast16_t __lock_size_t; 32 33 struct spinlock { 34 volatile int lock; 35 #ifdef __CFA_DEBUG__ 36 const char * prev_name; 37 void* prev_thrd; 38 #endif 39 }; 40 41 struct __thread_queue_t { 42 struct thread_desc * head; 43 struct thread_desc ** tail; 44 }; 45 46 struct __condition_stack_t { 47 struct __condition_criterion_t * top; 48 }; 49 50 #ifdef __CFORALL__ 51 extern "Cforall" { 52 void ?{}( struct __thread_queue_t & ); 53 void append( struct __thread_queue_t &, struct thread_desc * ); 54 struct thread_desc * pop_head( struct __thread_queue_t & ); 55 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** ); 56 57 void ?{}( struct __condition_stack_t & ); 58 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 59 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 60 61 void ?{}(spinlock & this); 62 void ^?{}(spinlock & this); 63 } 64 #endif 65 66 struct coStack_t { 67 // size of stack 68 size_t size; 69 70 // pointer to stack 71 void *storage; 72 73 // stack grows towards stack limit 74 void *limit; 75 76 // base of stack 77 void *base; 78 79 // address of cfa_context_t 80 void *context; 81 82 // address of top of storage 83 void *top; 84 85 // whether or not the user allocated the stack 86 bool userStack; 87 }; 88 89 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 90 91 struct coroutine_desc { 92 // stack information of the coroutine 93 struct coStack_t stack; 94 95 // textual name for coroutine/task, initialized by uC++ generated code 96 const char *name; 97 98 // copy of global UNIX variable errno 99 int errno_; 100 101 // current execution status for coroutine 102 enum coroutine_state state; 103 104 // first coroutine to resume this one 105 struct coroutine_desc * starter; 106 107 // last coroutine to resume this one 108 struct coroutine_desc * last; 109 }; 110 111 struct __waitfor_mask_t { 112 // the index of the accepted function, -1 if none 113 short * accepted; 114 115 // list of acceptable functions, null if any 116 struct __acceptable_t * clauses; 117 118 // number of acceptable functions 119 __lock_size_t size; 120 }; 121 122 struct monitor_desc { 123 // spinlock to protect internal data 124 struct spinlock lock; 125 126 // current owner of the monitor 127 struct thread_desc * owner; 128 129 // queue of threads that are blocked waiting for the monitor 130 struct __thread_queue_t entry_queue; 131 132 // stack of conditions to run next once we exit the monitor 133 struct __condition_stack_t signal_stack; 134 135 // monitor routines can be called recursively, we need to keep track of that 136 unsigned int recursion; 137 138 // mask used to know if some thread is waiting for something while holding the monitor 139 struct __waitfor_mask_t mask; 140 141 // node used to signal the dtor in a waitfor dtor 142 struct __condition_node_t * dtor_node; 143 }; 144 145 struct __monitor_group_t { 146 // currently held monitors 147 struct monitor_desc ** list; 148 149 // number of currently held monitors 150 __lock_size_t size; 151 152 // last function that acquired monitors 153 fptr_t func; 154 }; 155 156 struct thread_desc { 157 // Core threading fields 158 // coroutine body used to store context 159 struct coroutine_desc self_cor; 160 161 // monitor body used for mutual exclusion 162 struct monitor_desc self_mon; 163 164 // pointer to monitor with sufficient lifetime for current monitors 165 struct monitor_desc * self_mon_p; 166 167 // monitors currently held by this thread 168 struct __monitor_group_t monitors; 169 170 // Link lists fields 171 // instrusive link field for threads 172 struct thread_desc * next; 119 173 }; 120 174 121 175 #ifdef __CFORALL__ 122 176 extern "Cforall" { 123 static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {124 return this.list[index];125 }126 127 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {128 if( (lhs.list != 0) != (rhs.list != 0) ) return false;129 if( lhs.size != rhs.size ) return false;130 if( lhs.func != rhs.func ) return false;131 132 // Check that all the monitors match133 for( int i = 0; i < lhs.size; i++ ) {134 // If not a match, check next function135 if( lhs[i] != rhs[i] ) return false;136 }137 138 return true;139 }140 }141 #endif177 static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) { 178 return this.list[index]; 179 } 180 181 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) { 182 if( (lhs.list != 0) != (rhs.list != 0) ) return false; 183 if( lhs.size != rhs.size ) return false; 184 if( lhs.func != rhs.func ) return false; 185 186 // Check that all the monitors match 187 for( int i = 0; i < lhs.size; i++ ) { 188 // If not a match, check next function 189 if( lhs[i] != rhs[i] ) return false; 190 } 191 192 return true; 193 } 194 } 195 #endif 142 196 143 197 #endif //_INVOKE_H_ … … 146 200 #define _INVOKE_PRIVATE_H_ 147 201 148 struct machine_context_t {149 void *SP;150 void *FP;151 void *PC;152 };153 154 // assembler routines that performs the context switch155 extern void CtxInvokeStub( void );156 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");157 158 #if defined( __x86_64__ )159 #define CtxGet( ctx ) __asm__ ( \160 "movq %%rsp,%0\n" \161 "movq %%rbp,%1\n" \162 : "=rm" (ctx.SP), "=rm" (ctx.FP) )163 #elif defined( __i386__ )164 #define CtxGet( ctx ) __asm__ ( \165 "movl %%esp,%0\n" \166 "movl %%ebp,%1\n" \167 : "=rm" (ctx.SP), "=rm" (ctx.FP) )168 #endif202 struct machine_context_t { 203 void *SP; 204 void *FP; 205 void *PC; 206 }; 207 208 // assembler routines that performs the context switch 209 extern void CtxInvokeStub( void ); 210 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 211 212 #if defined( __x86_64__ ) 213 #define CtxGet( ctx ) __asm__ ( \ 214 "movq %%rsp,%0\n" \ 215 "movq %%rbp,%1\n" \ 216 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 217 #elif defined( __i386__ ) 218 #define CtxGet( ctx ) __asm__ ( \ 219 "movl %%esp,%0\n" \ 220 "movl %%ebp,%1\n" \ 221 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 222 #endif 169 223 170 224 #endif //_INVOKE_PRIVATE_H_ -
src/libcfa/concurrency/kernel
re706bfd rbbeb908 26 26 //----------------------------------------------------------------------------- 27 27 // Locks 28 void lock ( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, spin if already acquired 29 void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, yield repeatedly if already acquired 30 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, return false if already acquired 31 void unlock ( spinlock * ); // Unlock the spinlock 28 // Lock the spinlock, spin if already acquired 29 void lock ( spinlock * DEBUG_CTX_PARAM2 ); 30 31 // Lock the spinlock, yield repeatedly if already acquired 32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); 33 34 // Lock the spinlock, return false if already acquired 35 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); 36 37 // Unlock the spinlock 38 void unlock ( spinlock * ); 32 39 33 40 struct semaphore { … … 39 46 void ?{}(semaphore & this, int count = 1); 40 47 void ^?{}(semaphore & this); 41 void P(semaphore *this);42 void V(semaphore *this);48 void P (semaphore & this); 49 void V (semaphore & this); 43 50 44 51 … … 46 53 // Cluster 47 54 struct cluster { 48 spinlock ready_queue_lock; // Ready queue locks 49 __thread_queue_t ready_queue; // Ready queue for threads 50 unsigned long long int preemption; // Preemption rate on this cluster 55 // Ready queue locks 56 spinlock ready_queue_lock; 57 58 // Ready queue for threads 59 __thread_queue_t ready_queue; 60 61 // Preemption rate on this cluster 62 unsigned long long int preemption; 51 63 }; 52 64 53 void ?{} (cluster & this);65 void ?{} (cluster & this); 54 66 void ^?{}(cluster & this); 55 67 … … 79 91 struct processor { 80 92 // Main state 81 struct processorCtx_t * runner; // Coroutine ctx who does keeps the state of the processor 82 cluster * cltr; // Cluster from which to get threads 83 pthread_t kernel_thread; // Handle to pthreads 93 // Coroutine ctx who does keeps the state of the processor 94 struct processorCtx_t * runner; 95 96 // Cluster from which to get threads 97 cluster * cltr; 98 99 // Handle to pthreads 100 pthread_t kernel_thread; 84 101 85 102 // Termination 86 volatile bool do_terminate; // Set to true to notify the processor should terminate 87 semaphore terminated; // Termination synchronisation 103 // Set to true to notify the processor should terminate 104 volatile bool do_terminate; 105 106 // Termination synchronisation 107 semaphore terminated; 88 108 89 109 // RunThread data 90 struct FinishAction finish; // Action to do after a thread is ran 110 // Action to do after a thread is ran 111 struct FinishAction finish; 91 112 92 113 // Preemption data 93 struct alarm_node_t * preemption_alarm; // Node which is added in the discrete event simulaiton 94 bool pending_preemption; // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible 114 // Node which is added in the discrete event simulaiton 115 struct alarm_node_t * preemption_alarm; 116 117 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible 118 bool pending_preemption; 95 119 96 120 #ifdef __CFA_DEBUG__ 97 char * last_enable; // Last function to enable preemption on this processor 121 // Last function to enable preemption on this processor 122 char * last_enable; 98 123 #endif 99 124 }; 100 125 101 void ?{}(processor & this);102 void ?{}(processor & this, cluster * cltr);126 void ?{}(processor & this); 127 void ?{}(processor & this, cluster * cltr); 103 128 void ^?{}(processor & this); 104 129 -
src/libcfa/concurrency/kernel.c
re706bfd rbbeb908 158 158 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", &this); 159 159 this.do_terminate = true; 160 P( &this.terminated );160 P( this.terminated ); 161 161 pthread_join( this.kernel_thread, NULL ); 162 162 } … … 216 216 } 217 217 218 V( &this->terminated );218 V( this->terminated ); 219 219 220 220 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this); … … 335 335 336 336 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 337 append( &this_processor->cltr->ready_queue, thrd );337 append( this_processor->cltr->ready_queue, thrd ); 338 338 unlock( &this_processor->cltr->ready_queue_lock ); 339 339 … … 344 344 verify( disable_preempt_count > 0 ); 345 345 lock( &this->ready_queue_lock DEBUG_CTX2 ); 346 thread_desc * head = pop_head( &this->ready_queue );346 thread_desc * head = pop_head( this->ready_queue ); 347 347 unlock( &this->ready_queue_lock ); 348 348 verify( disable_preempt_count > 0 ); … … 398 398 } 399 399 400 void BlockInternal(spinlock * * locks, unsigned short count) {400 void BlockInternal(spinlock * locks [], unsigned short count) { 401 401 disable_interrupts(); 402 402 this_processor->finish.action_code = Release_Multi; … … 411 411 } 412 412 413 void BlockInternal(spinlock * * locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {413 void BlockInternal(spinlock * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 414 414 disable_interrupts(); 415 415 this_processor->finish.action_code = Release_Multi_Schedule; … … 618 618 void ^?{}(semaphore & this) {} 619 619 620 void P(semaphore *this) {621 lock( &this ->lock DEBUG_CTX2 );622 this ->count -= 1;623 if ( this ->count < 0 ) {620 void P(semaphore & this) { 621 lock( &this.lock DEBUG_CTX2 ); 622 this.count -= 1; 623 if ( this.count < 0 ) { 624 624 // queue current task 625 append( &this->waiting, (thread_desc *)this_thread );625 append( this.waiting, (thread_desc *)this_thread ); 626 626 627 627 // atomically release spin lock and block 628 BlockInternal( &this ->lock );628 BlockInternal( &this.lock ); 629 629 } 630 630 else { 631 unlock( &this ->lock );632 } 633 } 634 635 void V(semaphore *this) {631 unlock( &this.lock ); 632 } 633 } 634 635 void V(semaphore & this) { 636 636 thread_desc * thrd = NULL; 637 lock( &this ->lock DEBUG_CTX2 );638 this ->count += 1;639 if ( this ->count <= 0 ) {637 lock( &this.lock DEBUG_CTX2 ); 638 this.count += 1; 639 if ( this.count <= 0 ) { 640 640 // remove task at head of waiting list 641 thrd = pop_head( &this->waiting );642 } 643 644 unlock( &this ->lock );641 thrd = pop_head( this.waiting ); 642 } 643 644 unlock( &this.lock ); 645 645 646 646 // make new owner … … 655 655 } 656 656 657 void append( __thread_queue_t *this, thread_desc * t ) {658 verify(this ->tail != NULL);659 *this ->tail = t;660 this ->tail = &t->next;661 } 662 663 thread_desc * pop_head( __thread_queue_t *this ) {664 thread_desc * head = this ->head;657 void append( __thread_queue_t & this, thread_desc * t ) { 658 verify(this.tail != NULL); 659 *this.tail = t; 660 this.tail = &t->next; 661 } 662 663 thread_desc * pop_head( __thread_queue_t & this ) { 664 thread_desc * head = this.head; 665 665 if( head ) { 666 this ->head = head->next;666 this.head = head->next; 667 667 if( !head->next ) { 668 this ->tail = &this->head;668 this.tail = &this.head; 669 669 } 670 670 head->next = NULL; … … 673 673 } 674 674 675 thread_desc * remove( __thread_queue_t *this, thread_desc ** it ) {675 thread_desc * remove( __thread_queue_t & this, thread_desc ** it ) { 676 676 thread_desc * thrd = *it; 677 677 verify( thrd ); … … 679 679 (*it) = thrd->next; 680 680 681 if( this ->tail == &thrd->next ) {682 this ->tail = it;681 if( this.tail == &thrd->next ) { 682 this.tail = it; 683 683 } 684 684 685 685 thrd->next = NULL; 686 686 687 verify( (this ->head == NULL) == (&this->head == this->tail) );688 verify( *this ->tail == NULL );687 verify( (this.head == NULL) == (&this.head == this.tail) ); 688 verify( *this.tail == NULL ); 689 689 return thrd; 690 690 } … … 694 694 } 695 695 696 void push( __condition_stack_t *this, __condition_criterion_t * t ) {696 void push( __condition_stack_t & this, __condition_criterion_t * t ) { 697 697 verify( !t->next ); 698 t->next = this ->top;699 this ->top = t;700 } 701 702 __condition_criterion_t * pop( __condition_stack_t *this ) {703 __condition_criterion_t * top = this ->top;698 t->next = this.top; 699 this.top = t; 700 } 701 702 __condition_criterion_t * pop( __condition_stack_t & this ) { 703 __condition_criterion_t * top = this.top; 704 704 if( top ) { 705 this ->top = top->next;705 this.top = top->next; 706 706 top->next = NULL; 707 707 } -
src/libcfa/concurrency/kernel_private.h
re706bfd rbbeb908 48 48 void BlockInternal(thread_desc * thrd); 49 49 void BlockInternal(spinlock * lock, thread_desc * thrd); 50 void BlockInternal(spinlock * * locks, unsigned short count);51 void BlockInternal(spinlock * * locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);50 void BlockInternal(spinlock * locks [], unsigned short count); 51 void BlockInternal(spinlock * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 52 void LeaveThread(spinlock * lock, thread_desc * thrd); 53 53 -
src/libcfa/concurrency/monitor
re706bfd rbbeb908 39 39 } 40 40 41 // static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {42 // return ((intptr_t)lhs) < ((intptr_t)rhs);43 // }44 45 41 struct monitor_guard_t { 46 42 monitor_desc ** m; 47 intcount;43 __lock_size_t count; 48 44 monitor_desc ** prev_mntrs; 49 unsigned shortprev_count;45 __lock_size_t prev_count; 50 46 fptr_t prev_func; 51 47 }; 52 48 53 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() );49 void ?{}( monitor_guard_t & this, monitor_desc ** m, __lock_size_t count, void (*func)() ); 54 50 void ^?{}( monitor_guard_t & this ); 55 51 … … 57 53 monitor_desc * m; 58 54 monitor_desc ** prev_mntrs; 59 unsigned shortprev_count;55 __lock_size_t prev_count; 60 56 fptr_t prev_func; 61 57 }; … … 74 70 75 71 struct __condition_criterion_t { 76 bool ready; //Whether or not the criterion is met (True if met) 77 monitor_desc * target; //The monitor this criterion concerns 78 struct __condition_node_t * owner; //The parent node to which this criterion belongs 79 __condition_criterion_t * next; //Intrusive linked list Next field 72 // Whether or not the criterion is met (True if met) 73 bool ready; 74 75 // The monitor this criterion concerns 76 monitor_desc * target; 77 78 // The parent node to which this criterion belongs 79 struct __condition_node_t * owner; 80 81 // Intrusive linked list Next field 82 __condition_criterion_t * next; 80 83 }; 81 84 82 85 struct __condition_node_t { 83 thread_desc * waiting_thread; //Thread that needs to be woken when all criteria are met 84 __condition_criterion_t * criteria; //Array of criteria (Criterions are contiguous in memory) 85 unsigned short count; //Number of criterions in the criteria 86 __condition_node_t * next; //Intrusive linked list Next field 87 uintptr_t user_info; //Custom user info accessible before signalling 86 // Thread that needs to be woken when all criteria are met 87 thread_desc * waiting_thread; 88 89 // Array of criteria (Criterions are contiguous in memory) 90 __condition_criterion_t * criteria; 91 92 // Number of criterions in the criteria 93 __lock_size_t count; 94 95 // Intrusive linked list Next field 96 __condition_node_t * next; 97 98 // Custom user info accessible before signalling 99 uintptr_t user_info; 88 100 }; 89 101 … … 93 105 }; 94 106 95 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info );107 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ); 96 108 void ?{}(__condition_criterion_t & this ); 97 109 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ); 98 110 99 111 void ?{}( __condition_blocked_queue_t & ); 100 void append( __condition_blocked_queue_t *, __condition_node_t * );101 __condition_node_t * pop_head( __condition_blocked_queue_t *);112 void append( __condition_blocked_queue_t &, __condition_node_t * ); 113 __condition_node_t * pop_head( __condition_blocked_queue_t & ); 102 114 103 115 struct condition { 104 __condition_blocked_queue_t blocked; //Link list which contains the blocked threads as-well as the information needed to unblock them 105 monitor_desc ** monitors; //Array of monitor pointers (Monitors are NOT contiguous in memory) 106 unsigned short monitor_count; //Number of monitors in the array 116 // Link list which contains the blocked threads as-well as the information needed to unblock them 117 __condition_blocked_queue_t blocked; 118 119 // Array of monitor pointers (Monitors are NOT contiguous in memory) 120 monitor_desc ** monitors; 121 122 // Number of monitors in the array 123 __lock_size_t monitor_count; 107 124 }; 108 125 … … 116 133 } 117 134 118 void wait( condition *this, uintptr_t user_info = 0 );119 bool signal( condition *this );120 bool signal_block( condition *this );121 static inline bool is_empty ( condition * this ) { return !this->blocked.head; }122 uintptr_t front( condition *this );135 void wait ( condition & this, uintptr_t user_info = 0 ); 136 bool signal ( condition & this ); 137 bool signal_block( condition & this ); 138 static inline bool is_empty ( condition & this ) { return !this.blocked.head; } 139 uintptr_t front ( condition & this ); 123 140 124 141 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor.c
re706bfd rbbeb908 26 26 // Forward declarations 27 27 static inline void set_owner ( monitor_desc * this, thread_desc * owner ); 28 static inline void set_owner ( monitor_desc * * storage, short count, thread_desc * owner );29 static inline void set_mask ( monitor_desc * * storage, short count, const __waitfor_mask_t & mask );28 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner ); 29 static inline void set_mask ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 30 30 static inline void reset_mask( monitor_desc * this ); 31 31 … … 33 33 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); 34 34 35 static inline void lock_all ( spinlock ** locks, unsigned short count );36 static inline void lock_all ( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );37 static inline void unlock_all( spinlock * * locks, unsigned short count );38 static inline void unlock_all( monitor_desc * * locks, unsigned short count );39 40 static inline void save ( monitor_desc * * ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks);41 static inline void restore( monitor_desc * * ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks);42 43 static inline void init ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria);44 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria);35 static inline void lock_all ( spinlock * locks [], __lock_size_t count ); 36 static inline void lock_all ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ); 37 static inline void unlock_all( spinlock * locks [], __lock_size_t count ); 38 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ); 39 40 static inline void save ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 41 static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 42 43 static inline void init ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 44 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 45 45 46 46 static inline thread_desc * check_condition ( __condition_criterion_t * ); 47 static inline void brand_condition ( condition *);48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * * monitors, int count );47 static inline void brand_condition ( condition & ); 48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count ); 49 49 50 50 forall(dtype T | sized( T )) 51 static inline short insert_unique( T ** array, short & size, T * val );52 static inline short count_max ( const __waitfor_mask_t & mask );53 static inline short aggregate ( monitor_desc ** storage, const __waitfor_mask_t & mask );51 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 52 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 53 static inline __lock_size_t aggregate ( monitor_desc * storage [], const __waitfor_mask_t & mask ); 54 54 55 55 //----------------------------------------------------------------------------- … … 58 58 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 59 59 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 60 init( count, monitors, &waiter, criteria );/* Link everything together */ \60 init( count, monitors, waiter, criteria ); /* Link everything together */ \ 61 61 62 62 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 63 63 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 64 64 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 65 init_push( count, monitors, &waiter, criteria );/* Link everything together and push it to the AS-Stack */ \65 init_push( count, monitors, waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 66 66 67 67 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 68 68 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \ 69 unsigned short count = cnt;/* Save the count to a local variable */ \69 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 70 70 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 71 __waitfor_mask_t masks [ count ];/* Save the current waitfor masks to restore them later */ \71 __waitfor_mask_t masks [ count ]; /* Save the current waitfor masks to restore them later */ \ 72 72 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 73 73 … … 114 114 115 115 // Some one else has the monitor, wait in line for it 116 append( &this->entry_queue, thrd );116 append( this->entry_queue, thrd ); 117 117 BlockInternal( &this->lock ); 118 118 … … 153 153 } 154 154 155 int count = 1;155 __lock_size_t count = 1; 156 156 monitor_desc ** monitors = &this; 157 157 __monitor_group_t group = { &this, 1, func }; … … 160 160 161 161 // Wake the thread that is waiting for this 162 __condition_criterion_t * urgent = pop( &this->signal_stack );162 __condition_criterion_t * urgent = pop( this->signal_stack ); 163 163 verify( urgent ); 164 164 … … 182 182 183 183 // Some one else has the monitor, wait in line for it 184 append( &this->entry_queue, thrd );184 append( this->entry_queue, thrd ); 185 185 BlockInternal( &this->lock ); 186 186 … … 272 272 // relies on the monitor array being sorted 273 273 static inline void enter( __monitor_group_t monitors ) { 274 for( int i = 0; i < monitors.size; i++) {274 for( __lock_size_t i = 0; i < monitors.size; i++) { 275 275 __enter_monitor_desc( monitors.list[i], monitors ); 276 276 } … … 279 279 // Leave multiple monitor 280 280 // relies on the monitor array being sorted 281 static inline void leave(monitor_desc * * monitors, int count) {282 for( int i = count - 1; i >= 0; i--) {281 static inline void leave(monitor_desc * monitors [], __lock_size_t count) { 282 for( __lock_size_t i = count - 1; i >= 0; i--) { 283 283 __leave_monitor_desc( monitors[i] ); 284 284 } … … 287 287 // Ctor for monitor guard 288 288 // Sorts monitors before entering 289 void ?{}( monitor_guard_t & this, monitor_desc * * m, int count, fptr_t func ) {289 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) { 290 290 // Store current array 291 291 this.m = m; … … 296 296 297 297 // Save previous thread context 298 this.prev_mntrs = this_thread->monitors.list; 299 this.prev_count = this_thread->monitors.size; 300 this.prev_func = this_thread->monitors.func; 298 this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func]; 301 299 302 300 // Update thread context (needed for conditions) 303 this_thread->monitors.list = m; 304 this_thread->monitors.size = count; 305 this_thread->monitors.func = func; 301 this_thread->monitors.[list, size, func] = [m, count, func]; 306 302 307 303 // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count); … … 325 321 326 322 // Restore thread context 327 this_thread->monitors.list = this.prev_mntrs; 328 this_thread->monitors.size = this.prev_count; 329 this_thread->monitors.func = this.prev_func; 330 } 331 323 this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func]; 324 } 332 325 333 326 // Ctor for monitor guard 334 327 // Sorts monitors before entering 335 void ?{}( monitor_dtor_guard_t & this, monitor_desc * * m, fptr_t func ) {328 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) { 336 329 // Store current array 337 330 this.m = *m; 338 331 339 332 // Save previous thread context 340 this.prev_mntrs = this_thread->monitors.list; 341 this.prev_count = this_thread->monitors.size; 342 this.prev_func = this_thread->monitors.func; 333 this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func]; 343 334 344 335 // Update thread context (needed for conditions) 345 this_thread->monitors.list = m; 346 this_thread->monitors.size = 1; 347 this_thread->monitors.func = func; 336 this_thread->monitors.[list, size, func] = [m, 1, func]; 348 337 349 338 __enter_monitor_dtor( this.m, func ); 350 339 } 351 352 340 353 341 // Dtor for monitor guard … … 357 345 358 346 // Restore thread context 359 this_thread->monitors.list = this.prev_mntrs; 360 this_thread->monitors.size = this.prev_count; 361 this_thread->monitors.func = this.prev_func; 347 this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func]; 362 348 } 363 349 364 350 //----------------------------------------------------------------------------- 365 351 // Internal scheduling types 366 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {352 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 367 353 this.waiting_thread = waiting_thread; 368 354 this.count = count; … … 378 364 } 379 365 380 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t *owner ) {366 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) { 381 367 this.ready = false; 382 368 this.target = target; 383 this.owner = owner;369 this.owner = &owner; 384 370 this.next = NULL; 385 371 } … … 387 373 //----------------------------------------------------------------------------- 388 374 // Internal scheduling 389 void wait( condition *this, uintptr_t user_info = 0 ) {375 void wait( condition & this, uintptr_t user_info = 0 ) { 390 376 brand_condition( this ); 391 377 392 378 // Check that everything is as expected 393 assertf( this ->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );394 verifyf( this ->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );395 verifyf( this ->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );379 assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors ); 380 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%i)", this.monitor_count ); 381 verifyf( this.monitor_count < 32u, "Excessive monitor count (%i)", this.monitor_count ); 396 382 397 383 // Create storage for monitor context 398 monitor_ctx( this ->monitors, this->monitor_count );384 monitor_ctx( this.monitors, this.monitor_count ); 399 385 400 386 // Create the node specific to this wait operation … … 403 389 // Append the current wait operation to the ones already queued on the condition 404 390 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 405 append( &this->blocked, &waiter );391 append( this.blocked, &waiter ); 406 392 407 393 // Lock all monitors (aggregates the locks as well) … … 409 395 410 396 // Find the next thread(s) to run 411 short thread_count = 0;397 __lock_size_t thread_count = 0; 412 398 thread_desc * threads[ count ]; 413 399 __builtin_memset( threads, 0, sizeof( threads ) ); … … 417 403 418 404 // Remove any duplicate threads 419 for( int i = 0; i < count; i++) {405 for( __lock_size_t i = 0; i < count; i++) { 420 406 thread_desc * new_owner = next_thread( monitors[i] ); 421 407 insert_unique( threads, thread_count, new_owner ); … … 429 415 } 430 416 431 bool signal( condition *this ) {417 bool signal( condition & this ) { 432 418 if( is_empty( this ) ) { return false; } 433 419 434 420 //Check that everything is as expected 435 verify( this ->monitors );436 verify( this ->monitor_count != 0 );421 verify( this.monitors ); 422 verify( this.monitor_count != 0 ); 437 423 438 424 //Some more checking in debug 439 425 LIB_DEBUG_DO( 440 426 thread_desc * this_thrd = this_thread; 441 if ( this ->monitor_count != this_thrd->monitors.size ) {442 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );443 } 444 445 for(int i = 0; i < this ->monitor_count; i++) {446 if ( this ->monitors[i] != this_thrd->monitors.list[i] ) {447 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );427 if ( this.monitor_count != this_thrd->monitors.size ) { 428 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size ); 429 } 430 431 for(int i = 0; i < this.monitor_count; i++) { 432 if ( this.monitors[i] != this_thrd->monitors.list[i] ) { 433 abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] ); 448 434 } 449 435 } 450 436 ); 451 437 452 unsigned short count = this->monitor_count;438 __lock_size_t count = this.monitor_count; 453 439 454 440 // Lock all monitors 455 lock_all( this ->monitors, NULL, count );441 lock_all( this.monitors, NULL, count ); 456 442 457 443 //Pop the head of the waiting queue 458 __condition_node_t * node = pop_head( &this->blocked );444 __condition_node_t * node = pop_head( this.blocked ); 459 445 460 446 //Add the thread to the proper AS stack … … 462 448 __condition_criterion_t * crit = &node->criteria[i]; 463 449 assert( !crit->ready ); 464 push( &crit->target->signal_stack, crit );450 push( crit->target->signal_stack, crit ); 465 451 } 466 452 467 453 //Release 468 unlock_all( this ->monitors, count );454 unlock_all( this.monitors, count ); 469 455 470 456 return true; 471 457 } 472 458 473 bool signal_block( condition *this ) {474 if( !this ->blocked.head ) { return false; }459 bool signal_block( condition & this ) { 460 if( !this.blocked.head ) { return false; } 475 461 476 462 //Check that everything is as expected 477 verifyf( this ->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );478 verifyf( this ->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );463 verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors ); 464 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%i)", this.monitor_count ); 479 465 480 466 // Create storage for monitor context 481 monitor_ctx( this ->monitors, this->monitor_count );467 monitor_ctx( this.monitors, this.monitor_count ); 482 468 483 469 // Lock all monitors (aggregates the locks them as well) … … 491 477 492 478 //Find the thread to run 493 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;479 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 494 480 set_owner( monitors, count, signallee ); 495 481 496 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", this, signallee );482 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 497 483 498 484 //Everything is ready to go to sleep … … 512 498 513 499 // Access the user_info of the thread waiting at the front of the queue 514 uintptr_t front( condition *this ) {500 uintptr_t front( condition & this ) { 515 501 verifyf( !is_empty(this), 516 502 "Attempt to access user data on an empty condition.\n" 517 503 "Possible cause is not checking if the condition is empty before reading stored data." 518 504 ); 519 return this ->blocked.head->user_info;505 return this.blocked.head->user_info; 520 506 } 521 507 … … 537 523 // This statment doesn't have a contiguous list of monitors... 538 524 // Create one! 539 short max = count_max( mask );525 __lock_size_t max = count_max( mask ); 540 526 monitor_desc * mon_storage[max]; 541 527 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 542 short actual_count = aggregate( mon_storage, mask );543 544 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, ( short)max);528 __lock_size_t actual_count = aggregate( mon_storage, mask ); 529 530 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max); 545 531 546 532 if(actual_count == 0) return; … … 569 555 570 556 __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria; 571 push( &mon2dtor->signal_stack, dtor_crit );557 push( mon2dtor->signal_stack, dtor_crit ); 572 558 573 559 unlock_all( locks, count ); … … 629 615 set_mask( monitors, count, mask ); 630 616 631 for( int i = 0; i < count; i++) {617 for( __lock_size_t i = 0; i < count; i++) { 632 618 verify( monitors[i]->owner == this_thread ); 633 619 } … … 661 647 } 662 648 663 static inline void set_owner( monitor_desc * * monitors, short count, thread_desc * owner ) {649 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 664 650 monitors[0]->owner = owner; 665 651 monitors[0]->recursion = 1; 666 for( int i = 1; i < count; i++ ) {652 for( __lock_size_t i = 1; i < count; i++ ) { 667 653 monitors[i]->owner = owner; 668 654 monitors[i]->recursion = 0; … … 670 656 } 671 657 672 static inline void set_mask( monitor_desc * * storage, short count, const __waitfor_mask_t & mask ) {673 for( int i = 0; i < count; i++) {658 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 659 for( __lock_size_t i = 0; i < count; i++) { 674 660 storage[i]->mask = mask; 675 661 } … … 685 671 //Check the signaller stack 686 672 LIB_DEBUG_PRINT_SAFE("Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); 687 __condition_criterion_t * urgent = pop( &this->signal_stack );673 __condition_criterion_t * urgent = pop( this->signal_stack ); 688 674 if( urgent ) { 689 675 //The signaller stack is not empty, … … 697 683 // No signaller thread 698 684 // Get the next thread in the entry_queue 699 thread_desc * new_owner = pop_head( &this->entry_queue );685 thread_desc * new_owner = pop_head( this->entry_queue ); 700 686 set_owner( this, new_owner ); 701 687 … … 705 691 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) { 706 692 __acceptable_t * it = this->mask.clauses; // Optim 707 int count = this->mask.size;693 __lock_size_t count = this->mask.size; 708 694 709 695 // Check if there are any acceptable functions … … 714 700 715 701 // For all acceptable functions check if this is the current function. 716 for( short i = 0; i < count; i++, it++ ) {702 for( __lock_size_t i = 0; i < count; i++, it++ ) { 717 703 if( *it == group ) { 718 704 *this->mask.accepted = i; … … 725 711 } 726 712 727 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria) {728 for( int i = 0; i < count; i++) {713 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 714 for( __lock_size_t i = 0; i < count; i++) { 729 715 (criteria[i]){ monitors[i], waiter }; 730 716 } 731 717 732 waiter ->criteria = criteria;733 } 734 735 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria) {736 for( int i = 0; i < count; i++) {718 waiter.criteria = criteria; 719 } 720 721 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 722 for( __lock_size_t i = 0; i < count; i++) { 737 723 (criteria[i]){ monitors[i], waiter }; 738 724 LIB_DEBUG_PRINT_SAFE( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] ); 739 push( &criteria[i].target->signal_stack, &criteria[i] );740 } 741 742 waiter ->criteria = criteria;743 } 744 745 static inline void lock_all( spinlock * * locks, unsigned short count ) {746 for( int i = 0; i < count; i++ ) {725 push( criteria[i].target->signal_stack, &criteria[i] ); 726 } 727 728 waiter.criteria = criteria; 729 } 730 731 static inline void lock_all( spinlock * locks [], __lock_size_t count ) { 732 for( __lock_size_t i = 0; i < count; i++ ) { 747 733 lock_yield( locks[i] DEBUG_CTX2 ); 748 734 } 749 735 } 750 736 751 static inline void lock_all( monitor_desc * * source, spinlock ** /*out*/ locks, unsigned short count ) {752 for( int i = 0; i < count; i++ ) {737 static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) { 738 for( __lock_size_t i = 0; i < count; i++ ) { 753 739 spinlock * l = &source[i]->lock; 754 740 lock_yield( l DEBUG_CTX2 ); … … 757 743 } 758 744 759 static inline void unlock_all( spinlock * * locks, unsigned short count ) {760 for( int i = 0; i < count; i++ ) {745 static inline void unlock_all( spinlock * locks [], __lock_size_t count ) { 746 for( __lock_size_t i = 0; i < count; i++ ) { 761 747 unlock( locks[i] ); 762 748 } 763 749 } 764 750 765 static inline void unlock_all( monitor_desc * * locks, unsigned short count ) {766 for( int i = 0; i < count; i++ ) {751 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) { 752 for( __lock_size_t i = 0; i < count; i++ ) { 767 753 unlock( &locks[i]->lock ); 768 754 } 769 755 } 770 756 771 static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 772 for( int i = 0; i < count; i++ ) { 757 static inline void save( 758 monitor_desc * ctx [], 759 __lock_size_t count, 760 __attribute((unused)) spinlock * locks [], 761 unsigned int /*out*/ recursions [], 762 __waitfor_mask_t /*out*/ masks [] 763 ) { 764 for( __lock_size_t i = 0; i < count; i++ ) { 773 765 recursions[i] = ctx[i]->recursion; 774 766 masks[i] = ctx[i]->mask; … … 776 768 } 777 769 778 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) { 770 static inline void restore( 771 monitor_desc * ctx [], 772 __lock_size_t count, 773 spinlock * locks [], 774 unsigned int /*out*/ recursions [], 775 __waitfor_mask_t /*out*/ masks [] 776 ) { 779 777 lock_all( locks, count ); 780 for( int i = 0; i < count; i++ ) {778 for( __lock_size_t i = 0; i < count; i++ ) { 781 779 ctx[i]->recursion = recursions[i]; 782 780 ctx[i]->mask = masks[i]; … … 811 809 } 812 810 813 static inline void brand_condition( condition *this ) {811 static inline void brand_condition( condition & this ) { 814 812 thread_desc * thrd = this_thread; 815 if( !this ->monitors ) {813 if( !this.monitors ) { 816 814 // LIB_DEBUG_PRINT_SAFE("Branding\n"); 817 815 assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list ); 818 this ->monitor_count = thrd->monitors.size;819 820 this ->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );821 for( int i = 0; i < this ->monitor_count; i++ ) {822 this ->monitors[i] = thrd->monitors.list[i];823 } 824 } 825 } 826 827 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * * monitors, int count ) {828 829 __thread_queue_t * entry_queue = &monitors[0]->entry_queue;816 this.monitor_count = thrd->monitors.size; 817 818 this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) ); 819 for( int i = 0; i < this.monitor_count; i++ ) { 820 this.monitors[i] = thrd->monitors.list[i]; 821 } 822 } 823 } 824 825 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) { 826 827 __thread_queue_t & entry_queue = monitors[0]->entry_queue; 830 828 831 829 // For each thread in the entry-queue 832 for( thread_desc ** thrd_it = &entry_queue ->head;830 for( thread_desc ** thrd_it = &entry_queue.head; 833 831 *thrd_it; 834 832 thrd_it = &(*thrd_it)->next … … 852 850 853 851 forall(dtype T | sized( T )) 854 static inline short insert_unique( T ** array, short & size, T * val ) {852 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) { 855 853 if( !val ) return size; 856 854 857 for( int i = 0; i <= size; i++) {855 for( __lock_size_t i = 0; i <= size; i++) { 858 856 if( array[i] == val ) return size; 859 857 } … … 864 862 } 865 863 866 static inline short count_max( const __waitfor_mask_t & mask ) {867 short max = 0;868 for( int i = 0; i < mask.size; i++ ) {864 static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) { 865 __lock_size_t max = 0; 866 for( __lock_size_t i = 0; i < mask.size; i++ ) { 869 867 max += mask.clauses[i].size; 870 868 } … … 872 870 } 873 871 874 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {875 short size = 0;876 for( int i = 0; i < mask.size; i++ ) {872 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) { 873 __lock_size_t size = 0; 874 for( __lock_size_t i = 0; i < mask.size; i++ ) { 877 875 __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size ); 878 for( int j = 0; j < mask.clauses[i].size; j++) {876 for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) { 879 877 insert_unique( storage, size, mask.clauses[i].list[j] ); 880 878 } … … 890 888 } 891 889 892 void append( __condition_blocked_queue_t *this, __condition_node_t * c ) {893 verify(this ->tail != NULL);894 *this ->tail = c;895 this ->tail = &c->next;896 } 897 898 __condition_node_t * pop_head( __condition_blocked_queue_t *this ) {899 __condition_node_t * head = this ->head;890 void append( __condition_blocked_queue_t & this, __condition_node_t * c ) { 891 verify(this.tail != NULL); 892 *this.tail = c; 893 this.tail = &c->next; 894 } 895 896 __condition_node_t * pop_head( __condition_blocked_queue_t & this ) { 897 __condition_node_t * head = this.head; 900 898 if( head ) { 901 this ->head = head->next;899 this.head = head->next; 902 900 if( !head->next ) { 903 this ->tail = &this->head;901 this.tail = &this.head; 904 902 } 905 903 head->next = NULL;
Note:
See TracChangeset
for help on using the changeset viewer.