Changeset 87d13cd for src/libcfa
- Timestamp:
- Mar 21, 2017, 10:07:52 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- cb91437
- Parents:
- 829c907 (diff), a53e10a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/coroutine
r829c907 r87d13cd 30 30 }; 31 31 32 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X* this) { return &this-> c; } void main(X* this)32 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X* this) { return &this->__cor; } void main(X* this) 33 33 34 34 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/invoke.c
r829c907 r87d13cd 29 29 30 30 extern void __suspend_internal(void); 31 extern void __ thread_signal_termination(struct thread_desc*);31 extern void __leave_monitor_desc( struct monitor_desc * this ); 32 32 33 33 void CtxInvokeCoroutine( … … 56 56 57 57 void CtxInvokeThread( 58 void (*dtor)(void *), 58 59 void (*main)(void *), 59 60 struct thread_desc *(*get_thread)(void *), … … 63 64 64 65 struct thread_desc* thrd = get_thread( this ); 65 struct coroutine_desc* cor = &thrd->c; 66 struct coroutine_desc* cor = &thrd->cor; 67 struct monitor_desc* mon = &thrd->mon; 66 68 cor->state = Active; 67 69 … … 69 71 main( this ); 70 72 71 __ thread_signal_termination(thrd);73 __leave_monitor_desc( mon ); 72 74 73 75 //Final suspend, should never return … … 91 93 struct FakeStack { 92 94 void *fixedRegisters[3]; // fixed registers ebx, edi, esi (popped on 1st uSwitch, values unimportant) 93 uint32_t mxcr; 94 uint16_t fcw;// X97 FPU control word (preserved across function calls)95 void *rturn; 95 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls) 96 uint16_t fcw; // X97 FPU control word (preserved across function calls) 97 void *rturn; // where to go on return from uSwitch 96 98 void *dummyReturn; // fake return compiler would have pushed on call to uInvoke 97 99 void *argument[3]; // for 16-byte ABI, 16-byte alignment starts here … … 105 107 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this; // argument to invoke 106 108 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 109 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 110 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 107 111 108 112 #elif defined( __x86_64__ ) 109 113 110 114 struct FakeStack { 111 void *fixedRegisters[5]; 112 uint32_t mxcr; 113 uint16_t fcw; 114 void *rturn; 115 void *dummyReturn; 115 void *fixedRegisters[5]; // fixed registers rbx, r12, r13, r14, r15 116 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls) 117 uint16_t fcw; // X97 FPU control word (preserved across function calls) 118 void *rturn; // where to go on return from uSwitch 119 void *dummyReturn; // NULL return address to provide proper alignment 116 120 }; 117 121 -
src/libcfa/concurrency/invoke.h
r829c907 r87d13cd 28 28 #define unlikely(x) __builtin_expect(!!(x), 0) 29 29 #define thread_local _Thread_local 30 #define SCHEDULER_CAPACITY 1031 30 32 31 struct spinlock { … … 39 38 }; 40 39 41 struct signal_once {42 volatile bool condition;43 struct spinlock lock;44 struct simple_thread_list blocked;45 };46 47 40 #ifdef __CFORALL__ 48 41 extern "Cforall" { … … 53 46 void ?{}(spinlock * this); 54 47 void ^?{}(spinlock * this); 55 56 void ?{}(signal_once * this);57 void ^?{}(signal_once * this);58 48 } 59 49 #endif 60 50 61 51 struct coStack_t { 62 unsigned int size; 63 void *storage; 64 void *limit; 65 void *base; 66 void *context; 67 void *top; 68 bool userStack; 52 unsigned int size; // size of stack 53 void *storage; // pointer to stack 54 void *limit; // stack grows towards stack limit 55 void *base; // base of stack 56 void *context; // address of cfa_context_t 57 void *top; // address of top of storage 58 bool userStack; // whether or not the user allocated the stack 69 59 }; 70 60 … … 72 62 73 63 struct coroutine_desc { 74 struct coStack_t stack; 75 const char *name; // textual name for coroutine/task, initialized by uC++ generated code 76 int errno_; // copy of global UNIX variable errno 77 enum coroutine_state state; // current execution status for coroutine 78 struct coroutine_desc *starter; // first coroutine to resume this one 79 struct coroutine_desc *last; // last coroutine to resume this one 64 struct coStack_t stack; // stack information of the coroutine 65 const char *name; // textual name for coroutine/task, initialized by uC++ generated code 66 int errno_; // copy of global UNIX variable errno 67 enum coroutine_state state; // current execution status for coroutine 68 struct coroutine_desc *starter; // first coroutine to resume this one 69 struct coroutine_desc *last; // last coroutine to resume this one 70 }; 71 72 struct monitor_desc { 73 struct spinlock lock; 74 struct thread_desc * owner; 75 struct simple_thread_list entry_queue; 76 unsigned int recursion; 80 77 }; 81 78 82 79 struct thread_desc { 83 struct coroutine_desc c ;// coroutine body used to store context84 struct signal_once terminated; // indicate if execuation state is not halted85 struct thread_desc * next; 80 struct coroutine_desc cor; // coroutine body used to store context 81 struct monitor_desc mon; // monitor body used for mutual exclusion 82 struct thread_desc * next; // instrusive link field for threads 86 83 }; 87 84 -
src/libcfa/concurrency/kernel
r829c907 r87d13cd 30 30 void lock( spinlock * ); 31 31 void unlock( spinlock * ); 32 33 struct signal_once { 34 volatile bool condition; 35 struct spinlock lock; 36 struct simple_thread_list blocked; 37 }; 38 39 void ?{}(signal_once * this); 40 void ^?{}(signal_once * this); 32 41 33 42 void wait( signal_once * ); -
src/libcfa/concurrency/kernel.c
r829c907 r87d13cd 107 107 108 108 void ?{}( thread_desc * this, current_stack_info_t * info) { 109 (&this->c ){ info };109 (&this->cor){ info }; 110 110 } 111 111 … … 113 113 // Processor coroutine 114 114 void ?{}(processorCtx_t * this, processor * proc) { 115 (&this-> c){};115 (&this->__cor){}; 116 116 this->proc = proc; 117 117 proc->runner = this; … … 119 119 120 120 void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) { 121 (&this-> c){ info };121 (&this->__cor){ info }; 122 122 this->proc = proc; 123 123 proc->runner = this; … … 255 255 processorCtx_t proc_cor_storage = { proc, &info }; 256 256 257 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage. c.stack.base);257 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage.__cor.stack.base); 258 258 259 259 //Set global state 260 proc->current_coroutine = &proc->runner-> c;260 proc->current_coroutine = &proc->runner->__cor; 261 261 proc->current_thread = NULL; 262 262 … … 268 268 // back to here. Instead directly call the main since we already are on the 269 269 // appropriate stack. 270 proc_cor_storage. c.state = Active;270 proc_cor_storage.__cor.state = Active; 271 271 main( &proc_cor_storage ); 272 proc_cor_storage. c.state = Halted;272 proc_cor_storage.__cor.state = Halted; 273 273 274 274 // Main routine of the core returned, the core is now fully terminated … … 359 359 this_processor = systemProcessor; 360 360 this_processor->current_thread = mainThread; 361 this_processor->current_coroutine = &mainThread->c ;361 this_processor->current_coroutine = &mainThread->cor; 362 362 363 363 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX -
src/libcfa/concurrency/kernel_private.h
r829c907 r87d13cd 35 35 struct processorCtx_t { 36 36 processor * proc; 37 coroutine_desc c;37 coroutine_desc __cor; 38 38 }; 39 39 -
src/libcfa/concurrency/monitor
r829c907 r87d13cd 22 22 #include "stdlib" 23 23 24 struct monitor_desc {25 spinlock lock;26 thread_desc * owner;27 simple_thread_list entry_queue;28 unsigned int recursion;29 };30 31 24 static inline void ?{}(monitor_desc * this) { 32 25 this->owner = 0; 33 26 this->recursion = 0; 34 27 } 35 36 //Basic entering routine37 void enter(monitor_desc *);38 void leave(monitor_desc *);39 28 40 29 //Array entering routine … … 49 38 static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) { 50 39 return ((intptr_t)lhs) < ((intptr_t)rhs); 51 }52 53 static inline void ?{}( monitor_guard_t * this, monitor_desc ** m ) {54 this->m = m;55 this->count = 1;56 enter( *this->m );57 40 } 58 41 -
src/libcfa/concurrency/monitor.c
r829c907 r87d13cd 19 19 #include "kernel_private.h" 20 20 21 void enter(monitor_desc * this) { 22 lock( &this->lock ); 23 thread_desc * thrd = this_thread(); 21 extern "C" { 22 void __enter_monitor_desc(monitor_desc * this) { 23 lock( &this->lock ); 24 thread_desc * thrd = this_thread(); 24 25 25 if( !this->owner ) {26 //No one has the monitor, just take it27 this->owner = thrd;28 this->recursion = 1;29 }30 else if( this->owner == thrd) {31 //We already have the monitor, just not how many times we took it32 assert( this->recursion > 0 );33 this->recursion += 1;34 }35 else {36 //Some one else has the monitor, wait in line for it37 append( &this->entry_queue, thrd );38 ScheduleInternal( &this->lock );26 if( !this->owner ) { 27 //No one has the monitor, just take it 28 this->owner = thrd; 29 this->recursion = 1; 30 } 31 else if( this->owner == thrd) { 32 //We already have the monitor, just not how many times we took it 33 assert( this->recursion > 0 ); 34 this->recursion += 1; 35 } 36 else { 37 //Some one else has the monitor, wait in line for it 38 append( &this->entry_queue, thrd ); 39 ScheduleInternal( &this->lock ); 39 40 40 //ScheduleInternal will unlock spinlock, no need to unlock ourselves 41 return; 41 //ScheduleInternal will unlock spinlock, no need to unlock ourselves 42 return; 43 } 44 45 unlock( &this->lock ); 42 46 } 43 47 44 unlock( &this->lock );45 } 48 void __leave_monitor_desc(monitor_desc * this) { 49 lock( &this->lock ); 46 50 47 void leave(monitor_desc * this) { 48 lock( &this->lock);51 thread_desc * thrd = this_thread(); 52 assert( thrd == this->owner ); 49 53 50 thread_desc * thrd = this_thread();51 assert( thrd == this->owner );54 //Leaving a recursion level, decrement the counter 55 this->recursion -= 1; 52 56 53 //Leaving a recursion level, decrement the counter 54 this->recursion -= 1; 57 //If we left the last level of recursion it means we are changing who owns the monitor 58 thread_desc * new_owner = 0; 59 if( this->recursion == 0) { 60 //Get the next thread in the list 61 new_owner = this->owner = pop_head( &this->entry_queue ); 55 62 56 //If we left the last level of recursion it means we are changing who owns the monitor 57 thread_desc * new_owner = 0; 58 if( this->recursion == 0) { 59 //Get the next thread in the list 60 new_owner = this->owner = pop_head( &this->entry_queue ); 63 //We are passing the monitor to someone else, which means recursion level is not 0 64 this->recursion = new_owner ? 1 : 0; 65 } 61 66 62 //We are passing the monitor to someone else, which means recursion level is not 0 63 this->recursion = new_owner ? 1 : 0; 64 } 67 unlock( &this->lock ); 65 68 66 unlock( &this->lock ); 67 68 //If we have a new owner, we need to wake-up the thread 69 if( new_owner ) { 70 ScheduleThread( new_owner ); 69 //If we have a new owner, we need to wake-up the thread 70 if( new_owner ) { 71 ScheduleThread( new_owner ); 72 } 71 73 } 72 74 } … … 74 76 void enter(monitor_desc ** monitors, int count) { 75 77 for(int i = 0; i < count; i++) { 76 // printf("%d\n", i); 77 enter( monitors[i] ); 78 __enter_monitor_desc( monitors[i] ); 78 79 } 79 80 } … … 81 82 void leave(monitor_desc ** monitors, int count) { 82 83 for(int i = count - 1; i >= 0; i--) { 83 // printf("%d\n", i); 84 leave( monitors[i] ); 84 __leave_monitor_desc( monitors[i] ); 85 85 } 86 86 } -
src/libcfa/concurrency/thread
r829c907 r87d13cd 22 22 23 23 #include "coroutine" 24 #include "monitor" 24 25 25 26 //----------------------------------------------------------------------------- … … 28 29 // Anything that is resumed is a coroutine. 29 30 trait is_thread(dtype T) { 31 void ^?{}(T* mutex this); 30 32 void main(T* this); 31 33 thread_desc* get_thread(T* this); 32 34 }; 33 35 34 #define DECL_THREAD(X) thread_desc* get_thread(X* this) { return &this-> t; } void main(X* this)36 #define DECL_THREAD(X) thread_desc* get_thread(X* this) { return &this->__thrd; } void main(X* this) 35 37 36 38 forall( dtype T | is_thread(T) ) 37 39 static inline coroutine_desc* get_coroutine(T* this) { 38 return &get_thread(this)->c ;40 return &get_thread(this)->cor; 39 41 } 40 42 41 static inline coroutine_desc* get_coroutine(thread_desc* this) { 42 return &this->c; 43 forall( dtype T | is_thread(T) ) 44 static inline monitor_desc* get_monitor(T * this) { 45 return &get_thread(this)->mon; 46 } 47 48 static inline coroutine_desc* get_coroutine(thread_desc * this) { 49 return &this->cor; 50 } 51 52 static inline monitor_desc* get_monitor(thread_desc * this) { 53 return &this->mon; 43 54 } 44 55 … … 64 75 void ?{}( scoped(T)* this, P params ); 65 76 66 forall( dtype T | sized(T) | is_thread(T) | { void ^?{}(T*); })77 forall( dtype T | sized(T) | is_thread(T) ) 67 78 void ^?{}( scoped(T)* this ); 68 79 -
src/libcfa/concurrency/thread.c
r829c907 r87d13cd 35 35 void start( T* this ); 36 36 37 forall( dtype T | is_thread(T) )38 void stop( T* this );39 40 37 //----------------------------------------------------------------------------- 41 38 // Thread ctors and dtors 42 39 43 40 void ?{}(thread_desc* this) { 44 (&this->c){}; 45 this->c.name = "Anonymous Coroutine"; 46 (&this->terminated){}; 41 (&this->cor){}; 42 this->cor.name = "Anonymous Coroutine"; 43 this->mon.owner = this; 44 this->mon.recursion = 1; 47 45 this->next = NULL; 48 46 } 49 47 50 48 void ^?{}(thread_desc* this) { 51 ^(&this->c ){};49 ^(&this->cor){}; 52 50 } 53 51 … … 64 62 } 65 63 66 forall( dtype T | sized(T) | is_thread(T) | { void ^?{}(T*); })64 forall( dtype T | sized(T) | is_thread(T) ) 67 65 void ^?{}( scoped(T)* this ) { 68 stop(&this->handle);69 66 ^(&this->handle){}; 70 67 } … … 86 83 87 84 ScheduleThread(thrd_h); 88 }89 90 forall( dtype T | is_thread(T) )91 void stop( T* this ) {92 wait( & get_thread(this)->terminated );93 85 } 94 86 … … 116 108 } 117 109 118 // C Helper to signal the termination of a thread_desc119 // Used in invoke.c120 extern "C" {121 void __thread_signal_termination( thread_desc * this ) {122 this->c.state = Halted;123 LIB_DEBUG_PRINTF("Thread end : %p\n", this);124 signal( &this->terminated );125 }126 }127 128 110 // Local Variables: // 129 111 // mode: c // -
src/libcfa/iostream.c
r829c907 r87d13cd 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Mar 21 2 0:58:48201713 // Update Count : 34 712 // Last Modified On : Tue Mar 21 22:05:57 2017 13 // Update Count : 348 14 14 // 15 15
Note: See TracChangeset
for help on using the changeset viewer.