Changeset 025278e for src/libcfa/concurrency/invoke.h
- Timestamp:
- Nov 2, 2017, 4:38:32 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 513daec
- Parents:
- 8fc45b7
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
r8fc45b7 r025278e 25 25 #define _INVOKE_H_ 26 26 27 #define unlikely(x) __builtin_expect(!!(x), 0) 28 #define thread_local _Thread_local 29 30 typedef void (*fptr_t)(); 31 32 struct spinlock { 33 volatile int lock; 34 #ifdef __CFA_DEBUG__ 35 const char * prev_name; 36 void* prev_thrd; 37 #endif 38 }; 39 40 struct __thread_queue_t { 41 struct thread_desc * head; 42 struct thread_desc ** tail; 43 }; 44 45 struct __condition_stack_t { 46 struct __condition_criterion_t * top; 47 }; 48 49 #ifdef __CFORALL__ 50 extern "Cforall" { 51 void ?{}( struct __thread_queue_t & ); 52 void append( struct __thread_queue_t &, struct thread_desc * ); 53 struct thread_desc * pop_head( struct __thread_queue_t & ); 54 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** ); 55 56 void ?{}( struct __condition_stack_t & ); 57 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 58 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 59 60 void ?{}(spinlock & this); 61 void ^?{}(spinlock & this); 62 } 63 #endif 64 65 struct coStack_t { 66 unsigned int size; // size of stack 67 void *storage; // pointer to stack 68 void *limit; // stack grows towards stack limit 69 void *base; // base of stack 70 void *context; // address of cfa_context_t 71 void *top; // address of top of storage 72 bool userStack; // whether or not the user allocated the stack 73 }; 74 75 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 76 77 struct coroutine_desc { 78 struct coStack_t stack; // stack information of the coroutine 79 const char *name; // textual name for coroutine/task, initialized by uC++ generated code 80 int errno_; // copy of global UNIX variable errno 81 enum coroutine_state state; // current execution status for coroutine 82 struct coroutine_desc * starter; // first coroutine to resume this one 83 struct coroutine_desc * last; // last coroutine to resume this one 84 }; 85 86 struct __waitfor_mask_t { 87 short * accepted; // the index of the accepted function, -1 if none 88 struct __acceptable_t * clauses; // list of acceptable functions, null if any 89 short size; // number of acceptable functions 90 }; 91 92 struct monitor_desc { 93 struct spinlock lock; // spinlock to protect internal data 94 struct thread_desc * owner; // current owner of the monitor 95 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 96 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 97 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 98 struct __waitfor_mask_t mask; // mask used to know if some thread is waiting for something while holding the monitor 99 struct __condition_node_t * dtor_node; // node used to signal the dtor in a waitfor dtor 100 }; 101 102 struct __monitor_group_t { 103 struct monitor_desc ** list; // currently held monitors 104 short size; // number of currently held monitors 105 fptr_t func; // last function that acquired monitors 106 }; 107 108 struct thread_desc { 109 // Core threading fields 110 struct coroutine_desc self_cor; // coroutine body used to store context 111 struct monitor_desc self_mon; // monitor body used for mutual exclusion 112 struct monitor_desc * self_mon_p; // pointer to monitor with sufficient lifetime for current monitors 113 struct __monitor_group_t monitors; // monitors currently held by this thread 114 115 // Link lists fields 116 struct thread_desc * next; // instrusive link field for threads 117 118 27 #define unlikely(x) __builtin_expect(!!(x), 0) 28 #define thread_local _Thread_local 29 30 typedef void (*fptr_t)(); 31 32 struct spinlock { 33 volatile int lock; 34 #ifdef __CFA_DEBUG__ 35 const char * prev_name; 36 void* prev_thrd; 37 #endif 38 }; 39 40 struct __thread_queue_t { 41 struct thread_desc * head; 42 struct thread_desc ** tail; 43 }; 44 45 struct __condition_stack_t { 46 struct __condition_criterion_t * top; 47 }; 48 49 #ifdef __CFORALL__ 50 extern "Cforall" { 51 void ?{}( struct __thread_queue_t & ); 52 void append( struct __thread_queue_t &, struct thread_desc * ); 53 struct thread_desc * pop_head( struct __thread_queue_t & ); 54 struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** ); 55 56 void ?{}( struct __condition_stack_t & ); 57 void push( struct __condition_stack_t &, struct __condition_criterion_t * ); 58 struct __condition_criterion_t * pop( struct __condition_stack_t & ); 59 60 void ?{}(spinlock & this); 61 void ^?{}(spinlock & this); 62 } 63 #endif 64 65 struct coStack_t { 66 // size of stack 67 unsigned int size; 68 69 // pointer to stack 70 void *storage; 71 72 // stack grows towards stack limit 73 void *limit; 74 75 // base of stack 76 void *base; 77 78 // address of cfa_context_t 79 void *context; 80 81 // address of top of storage 82 void *top; 83 84 // whether or not the user allocated the stack 85 bool userStack; 86 87 }; 88 89 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 90 91 struct coroutine_desc { 92 // stack information of the coroutine 93 struct coStack_t stack; 94 95 // textual name for coroutine/task, initialized by uC++ generated code 96 const char *name; 97 98 // copy of global UNIX variable errno 99 int errno_; 100 101 // current execution status for coroutine 102 enum coroutine_state state; 103 104 // first coroutine to resume this one 105 struct coroutine_desc * starter; 106 107 // last coroutine to resume this one 108 struct coroutine_desc * last; 109 }; 110 111 struct __waitfor_mask_t { 112 // the index of the accepted function, -1 if none 113 short * accepted; 114 115 // list of acceptable functions, null if any 116 struct __acceptable_t * clauses; 117 118 // number of acceptable functions 119 short size; 120 }; 121 122 struct monitor_desc { 123 // spinlock to protect internal data 124 struct spinlock lock; 125 126 // current owner of the monitor 127 struct thread_desc * owner; 128 129 // queue of threads that are blocked waiting for the monitor 130 struct __thread_queue_t entry_queue; 131 132 // stack of conditions to run next once we exit the monitor 133 struct __condition_stack_t signal_stack; 134 135 // monitor routines can be called recursively, we need to keep track of that 136 unsigned int recursion; 137 138 // mask used to know if some thread is waiting for something while holding the monitor 139 struct __waitfor_mask_t mask; 140 141 // node used to signal the dtor in a waitfor dtor 142 struct __condition_node_t * dtor_node; 143 }; 144 145 struct __monitor_group_t { 146 // currently held monitors 147 struct monitor_desc ** list; 148 149 // number of currently held monitors 150 short size; 151 152 // last function that acquired monitors 153 fptr_t func; 154 }; 155 156 struct thread_desc { 157 // Core threading fields 158 // coroutine body used to store context 159 struct coroutine_desc self_cor; 160 161 // monitor body used for mutual exclusion 162 struct monitor_desc self_mon; 163 164 // pointer to monitor with sufficient lifetime for current monitors 165 struct monitor_desc * self_mon_p; 166 167 // monitors currently held by this thread 168 struct __monitor_group_t monitors; 169 170 171 // Link lists fields 172 // instrusive link field for threads 173 struct thread_desc * next; 119 174 }; 120 175 121 176 #ifdef __CFORALL__ 122 177 extern "Cforall" { 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 178 static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) { 179 return this.list[index]; 180 } 181 182 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) { 183 if( (lhs.list != 0) != (rhs.list != 0) ) return false; 184 if( lhs.size != rhs.size ) return false; 185 if( lhs.func != rhs.func ) return false; 186 187 // Check that all the monitors match 188 for( int i = 0; i < lhs.size; i++ ) { 189 // If not a match, check next function 190 if( lhs[i] != rhs[i] ) return false; 191 } 192 193 return true; 194 } 195 } 196 #endif 142 197 143 198 #endif //_INVOKE_H_ … … 146 201 #define _INVOKE_PRIVATE_H_ 147 202 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 203 struct machine_context_t { 204 void *SP; 205 void *FP; 206 void *PC; 207 }; 208 209 // assembler routines that performs the context switch 210 extern void CtxInvokeStub( void ); 211 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 212 213 #if defined( __x86_64__ ) 214 #define CtxGet( ctx ) __asm__ ( \ 215 "movq %%rsp,%0\n" \ 216 "movq %%rbp,%1\n" \ 217 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 218 #elif defined( __i386__ ) 219 #define CtxGet( ctx ) __asm__ ( \ 220 "movl %%esp,%0\n" \ 221 "movl %%ebp,%1\n" \ 222 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 223 #endif 169 224 170 225 #endif //_INVOKE_PRIVATE_H_
Note: See TracChangeset
for help on using the changeset viewer.