Changes in src/libcfa/concurrency/invoke.h [c1a9c86:39fea2f]
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
rc1a9c86 r39fea2f 25 25 #define _INVOKE_H_ 26 26 27 28 27 #define unlikely(x) __builtin_expect(!!(x), 0) 28 #define thread_local _Thread_local 29 29 30 typedef void (*fptr_t)(); 31 typedef int_fast16_t __lock_size_t; 30 typedef void (*fptr_t)(); 32 31 33 34 35 36 37 38 39 32 struct spinlock { 33 volatile int lock; 34 #ifdef __CFA_DEBUG__ 35 const char * prev_name; 36 void* prev_thrd; 37 #endif 38 }; 40 39 41 42 43 44 40 struct __thread_queue_t { 41 struct thread_desc * head; 42 struct thread_desc ** tail; 43 }; 45 44 46 47 48 45 struct __condition_stack_t { 46 struct __condition_criterion_t * top; 47 }; 49 48 50 51 52 53 void append( struct __thread_queue_t &, struct thread_desc * );54 struct thread_desc * pop_head( struct __thread_queue_t &);55 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );49 #ifdef __CFORALL__ 50 extern "Cforall" { 51 void ?{}( struct __thread_queue_t & ); 52 void append( struct __thread_queue_t *, struct thread_desc * ); 53 struct thread_desc * pop_head( struct __thread_queue_t * ); 54 struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** ); 56 55 57 58 void push( struct __condition_stack_t &, struct __condition_criterion_t * );59 struct __condition_criterion_t * pop( struct __condition_stack_t &);56 void ?{}( struct __condition_stack_t & ); 57 void push( struct __condition_stack_t *, struct __condition_criterion_t * ); 58 struct __condition_criterion_t * pop( struct __condition_stack_t * ); 60 59 61 void?{}(spinlock & this);62 63 64 60 void ?{}(spinlock & this); 61 void ^?{}(spinlock & this); 62 } 63 #endif 65 64 66 struct coStack_t { 67 // size of stack 68 size_t size; 65 struct coStack_t { 66 unsigned int size; // size of stack 67 void *storage; // pointer to stack 68 void *limit; // stack grows towards stack limit 69 void *base; // base of stack 70 void *context; // address of cfa_context_t 71 void *top; // address of top of storage 72 bool userStack; // whether or not the user allocated the stack 73 }; 69 74 70 // pointer to stack 71 void *storage; 75 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 72 76 73 // stack grows towards stack limit 74 void *limit; 77 struct coroutine_desc { 78 struct coStack_t stack; // stack information of the coroutine 79 const char *name; // textual name for coroutine/task, initialized by uC++ generated code 80 int errno_; // copy of global UNIX variable errno 81 enum coroutine_state state; // current execution status for coroutine 82 struct coroutine_desc * starter; // first coroutine to resume this one 83 struct coroutine_desc * last; // last coroutine to resume this one 84 }; 75 85 76 // base of stack 77 void *base; 86 struct monitor_desc { 87 struct spinlock lock; // spinlock to protect internal data 88 struct thread_desc * owner; // current owner of the monitor 89 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 90 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 91 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 78 92 79 // address of cfa_context_t 80 void *context; 93 struct __acceptable_t * acceptables; // list of acceptable functions, null if any 94 unsigned short acceptable_count; // number of acceptable functions 95 short accepted_index; // the index of the accepted function, -1 if none 96 }; 81 97 82 // address of top of storage 83 void *top; 98 struct thread_desc { 99 // Core threading fields 100 struct coroutine_desc cor; // coroutine body used to store context 101 struct monitor_desc mon; // monitor body used for mutual exclusion 84 102 85 // whether or not the user allocated the stack 86 bool userStack; 87 }; 103 // Link lists fields 104 struct thread_desc * next; // instrusive link field for threads 88 105 89 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 90 91 struct coroutine_desc { 92 // stack information of the coroutine 93 struct coStack_t stack; 94 95 // textual name for coroutine/task, initialized by uC++ generated code 96 const char *name; 97 98 // copy of global UNIX variable errno 99 int errno_; 100 101 // current execution status for coroutine 102 enum coroutine_state state; 103 104 // first coroutine to resume this one 105 struct coroutine_desc * starter; 106 107 // last coroutine to resume this one 108 struct coroutine_desc * last; 109 }; 110 111 struct __waitfor_mask_t { 112 // the index of the accepted function, -1 if none 113 short * accepted; 114 115 // list of acceptable functions, null if any 116 struct __acceptable_t * clauses; 117 118 // number of acceptable functions 119 __lock_size_t size; 120 }; 121 122 struct monitor_desc { 123 // spinlock to protect internal data 124 struct spinlock lock; 125 126 // current owner of the monitor 127 struct thread_desc * owner; 128 129 // queue of threads that are blocked waiting for the monitor 130 struct __thread_queue_t entry_queue; 131 132 // stack of conditions to run next once we exit the monitor 133 struct __condition_stack_t signal_stack; 134 135 // monitor routines can be called recursively, we need to keep track of that 136 unsigned int recursion; 137 138 // mask used to know if some thread is waiting for something while holding the monitor 139 struct __waitfor_mask_t mask; 140 141 // node used to signal the dtor in a waitfor dtor 142 struct __condition_node_t * dtor_node; 143 }; 144 145 struct __monitor_group_t { 146 // currently held monitors 147 struct monitor_desc ** list; 148 149 // number of currently held monitors 150 __lock_size_t size; 151 152 // last function that acquired monitors 153 fptr_t func; 154 }; 155 156 struct thread_desc { 157 // Core threading fields 158 // coroutine body used to store context 159 struct coroutine_desc self_cor; 160 161 // monitor body used for mutual exclusion 162 struct monitor_desc self_mon; 163 164 // pointer to monitor with sufficient lifetime for current monitors 165 struct monitor_desc * self_mon_p; 166 167 // monitors currently held by this thread 168 struct __monitor_group_t monitors; 169 170 // Link lists fields 171 // instrusive link field for threads 172 struct thread_desc * next; 106 // Current status related to monitors 107 struct monitor_desc ** current_monitors; // currently held monitors 108 unsigned short current_monitor_count; // number of currently held monitors 109 fptr_t current_monitor_func; // last function that acquired monitors 173 110 }; 174 175 #ifdef __CFORALL__176 extern "Cforall" {177 static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {178 return this.list[index];179 }180 181 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {182 if( (lhs.list != 0) != (rhs.list != 0) ) return false;183 if( lhs.size != rhs.size ) return false;184 if( lhs.func != rhs.func ) return false;185 186 // Check that all the monitors match187 for( int i = 0; i < lhs.size; i++ ) {188 // If not a match, check next function189 if( lhs[i] != rhs[i] ) return false;190 }191 192 return true;193 }194 }195 #endif196 111 197 112 #endif //_INVOKE_H_ … … 200 115 #define _INVOKE_PRIVATE_H_ 201 116 202 203 204 205 206 117 struct machine_context_t { 118 void *SP; 119 void *FP; 120 void *PC; 121 }; 207 122 208 209 210 123 // assembler routines that performs the context switch 124 extern void CtxInvokeStub( void ); 125 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 211 126 212 213 214 215 216 217 218 219 220 221 222 127 #if defined( __x86_64__ ) 128 #define CtxGet( ctx ) __asm__ ( \ 129 "movq %%rsp,%0\n" \ 130 "movq %%rbp,%1\n" \ 131 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 132 #elif defined( __i386__ ) 133 #define CtxGet( ctx ) __asm__ ( \ 134 "movl %%esp,%0\n" \ 135 "movl %%ebp,%1\n" \ 136 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 137 #endif 223 138 224 139 #endif //_INVOKE_PRIVATE_H_
Note:
See TracChangeset
for help on using the changeset viewer.