Changeset 933f32f for libcfa/src/concurrency/invoke.h
- Timestamp:
- May 24, 2019, 10:19:41 AM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d908563
- Parents:
- 6a9d4b4 (diff), 292642a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
r6a9d4b4 r933f32f 50 50 51 51 extern thread_local struct KernelThreadData { 52 struct coroutine_desc * volatile this_coroutine;53 52 struct thread_desc * volatile this_thread; 54 53 struct processor * volatile this_processor; … … 61 60 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 62 61 } 63 64 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }65 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); }66 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE67 62 #endif 68 63 69 struct coStack_t { 70 size_t size; // size of stack 71 void * storage; // pointer to stack 72 void * limit; // stack grows towards stack limit 73 void * base; // base of stack 74 void * context; // address of cfa_context_t 75 void * top; // address of top of storage 76 bool userStack; // whether or not the user allocated the stack 64 struct __stack_context_t { 65 void * SP; 66 void * FP; 67 }; 68 69 // low adresses : +----------------------+ <- start of allocation 70 // | optional guard page | 71 // +----------------------+ <- __stack_t.limit 72 // | | 73 // | /\ /\ /\ | 74 // | || || || | 75 // | | 76 // | program stack | 77 // | | 78 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base 79 // | __stack_t | 80 // high adresses : +----------------------+ <- end of allocation 81 82 struct __stack_t { 83 // stack grows towards stack limit 84 void * limit; 85 86 // base of stack 87 void * base; 88 }; 89 90 struct __stack_info_t { 91 // pointer to stack 92 struct __stack_t * storage; 77 93 }; 78 94 … … 80 96 81 97 struct coroutine_desc { 98 // context that is switch during a CtxSwitch 99 struct __stack_context_t context; 100 82 101 // stack information of the coroutine 83 struct coStack_t stack;84 85 // textual name for coroutine/task , initialized by uC++ generated code102 struct __stack_info_t stack; 103 104 // textual name for coroutine/task 86 105 const char * name; 87 88 // copy of global UNIX variable errno89 int errno_;90 106 91 107 // current execution status for coroutine 92 108 enum coroutine_state state; 109 93 110 // first coroutine to resume this one 94 111 struct coroutine_desc * starter; … … 144 161 struct thread_desc { 145 162 // Core threading fields 163 // context that is switch during a CtxSwitch 164 struct __stack_context_t context; 165 166 // current execution status for coroutine 167 enum coroutine_state state; 168 169 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 170 146 171 // coroutine body used to store context 147 172 struct coroutine_desc self_cor; … … 170 195 struct thread_desc * prev; 171 196 } node; 172 }; 173 174 #ifdef __cforall 175 extern "Cforall" { 197 }; 198 199 #ifdef __cforall 200 extern "Cforall" { 201 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; } 202 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 203 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE 204 176 205 static inline thread_desc * & get_next( thread_desc & this ) { 177 206 return this.next; … … 231 260 // assembler routines that performs the context switch 232 261 extern void CtxInvokeStub( void ); 233 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 234 235 #if defined( __i386 ) 236 #define CtxGet( ctx ) __asm__ ( \ 237 "movl %%esp,%0\n" \ 238 "movl %%ebp,%1\n" \ 239 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 240 #elif defined( __x86_64 ) 241 #define CtxGet( ctx ) __asm__ ( \ 242 "movq %%rsp,%0\n" \ 243 "movq %%rbp,%1\n" \ 244 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 245 #elif defined( __ARM_ARCH ) 246 #define CtxGet( ctx ) __asm__ ( \ 247 "mov %0,%%sp\n" \ 248 "mov %1,%%r11\n" \ 249 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 250 #else 251 #error unknown hardware architecture 252 #endif 262 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch"); 263 // void CtxStore ( void * this ) asm ("CtxStore"); 264 // void CtxRet ( void * dst ) asm ("CtxRet"); 253 265 254 266 #endif //_INVOKE_PRIVATE_H_
Note:
See TracChangeset
for help on using the changeset viewer.