Changeset a362f97 for src/libcfa
- Timestamp:
- Jan 27, 2017, 3:27:34 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- c0aa336
- Parents:
- 6acb935 (diff), 0a86a30 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified src/libcfa/concurrency/CtxSwitch-i386.S ¶
r6acb935 ra362f97 86 86 ret 87 87 88 .text 89 .align 2 90 .globl CtxGet 91 CtxGet: 92 movl %esp,SP_OFFSET(%eax) 93 movl %ebp,FP_OFFSET(%eax) 94 95 ret 96 88 97 // Local Variables: // 89 98 // compile-command: "make install" // -
TabularUnified src/libcfa/concurrency/CtxSwitch-x86_64.S ¶
r6acb935 ra362f97 84 84 jmp *%r12 85 85 86 .text 87 .align 2 88 .globl CtxGet 89 CtxGet: 90 movq %rsp,SP_OFFSET(%rdi) 91 movq %rbp,FP_OFFSET(%rdi) 92 93 ret 94 86 95 // Local Variables: // 87 96 // mode: c // -
TabularUnified src/libcfa/concurrency/coroutines ¶
r6acb935 ra362f97 62 62 63 63 // Get current coroutine 64 extern coroutine * current_coroutine; //PRIVATE, never use directly 65 static inline coroutine * this_coroutine(void) { 66 return current_coroutine; 67 } 64 coroutine * this_coroutine(void); 68 65 69 66 // Private wrappers for context switch and stack creation … … 76 73 77 74 assertf( src->last != 0, 78 "Attempt to suspend coroutine %.256s(%p) that has never been resumed.\n"75 "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n" 79 76 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.", 80 77 src->name, src ); 81 78 assertf( src->last->notHalted, 82 "Attempt by coroutine %.256s (%p) to suspend back to terminated coroutine %.256s(%p).\n"79 "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n" 83 80 "Possible cause is terminated coroutine's main routine has already returned.", 84 81 src->name, src, src->last->name, src->last ); -
TabularUnified src/libcfa/concurrency/coroutines.c ¶
r6acb935 ra362f97 1 // -*- Mode: CFA -*- 1 2 // 2 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo … … 14 15 // 15 16 17 #include "coroutines" 18 16 19 extern "C" { 17 20 #include <stddef.h> … … 23 26 } 24 27 25 #include " coroutines"28 #include "kernel" 26 29 #include "libhdr.h" 27 30 28 31 #define __CFA_INVOKE_PRIVATE__ 29 32 #include "invoke.h" 33 34 extern processor * get_this_processor(); 30 35 31 36 //----------------------------------------------------------------------------- … … 35 40 #define MinStackSize 1000 36 41 static size_t pageSize = 0; // architecture pagesize HACK, should go in proper runtime singleton 37 38 //Extra private desctructor for the main39 //FIXME the main should not actually allocate a stack40 //Since the main is never resumed the extra stack does not cause41 //any problem but it is wasted memory42 void ?{}(coStack_t* this, size_t size);43 void ?{}(coroutine* this, size_t size);44 45 //Main coroutine46 //FIXME do not construct a stack for the main47 coroutine main_coroutine = { 1000 };48 49 //Current coroutine50 //Will need to be in TLS when multi-threading is added51 coroutine* current_coroutine = &main_coroutine;52 42 53 43 //----------------------------------------------------------------------------- … … 111 101 // is not inline (We can't inline Cforall in C) 112 102 void suspend_no_inline(void) { 113 LIB_DEBUG_PRINTF("Suspending back : to %p from %p\n", this_coroutine(), this_coroutine() ? this_coroutine()->last : (void*)-1);114 115 103 suspend(); 116 104 } … … 123 111 124 112 // set new coroutine that task is executing 125 current_coroutine = dst;113 get_this_processor()->current_coroutine = dst; 126 114 127 115 // context switch to specified coroutine -
TabularUnified src/libcfa/concurrency/invoke.c ¶
r6acb935 ra362f97 1 // -*- Mode: C -*- 2 // 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo 4 // 5 // The contents of this file are covered under the licence agreement in the 6 // file "LICENCE" distributed with Cforall. 7 // 8 // invoke.c -- 9 // 10 // Author : Thierry Delisle 11 // Created On : Tue Jan 17 12:27:26 2016 12 // Last Modified By : Thierry Delisle 13 // Last Modified On : -- 14 // Update Count : 0 15 // 1 16 2 17 #include <stdbool.h> … … 14 29 15 30 extern void __suspend_no_inline__F___1(void); 16 extern void __s cheduler_remove__F_P9sthread_h__1(struct thread_h*);31 extern void __signal_termination__F_P7sthread__1(struct thread*); 17 32 18 33 void CtxInvokeCoroutine( … … 33 48 main( this ); 34 49 50 cor->state = Halt; 51 cor->notHalted = false; 52 35 53 //Final suspend, should never return 36 54 __suspend_no_inline__F___1(); … … 40 58 void CtxInvokeThread( 41 59 void (*main)(void *), 42 struct thread _h*(*get_thread)(void *),60 struct thread *(*get_thread)(void *), 43 61 void *this 44 62 ) { … … 47 65 __suspend_no_inline__F___1(); 48 66 49 struct thread _h* thrd = get_thread( this );67 struct thread* thrd = get_thread( this ); 50 68 struct coroutine* cor = &thrd->c; 51 69 cor->state = Active; … … 54 72 main( this ); 55 73 56 __s cheduler_remove__F_P9sthread_h__1(thrd);74 __signal_termination__F_P7sthread__1(thrd); 57 75 58 76 //Final suspend, should never return -
TabularUnified src/libcfa/concurrency/invoke.h ¶
r6acb935 ra362f97 1 // -*- Mode: C -*- 2 // 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo 4 // 5 // The contents of this file are covered under the licence agreement in the 6 // file "LICENCE" distributed with Cforall. 7 // 8 // invoke.h -- 9 // 10 // Author : Thierry Delisle 11 // Created On : Tue Jan 17 12:27:26 2016 12 // Last Modified By : Thierry Delisle 13 // Last Modified On : -- 14 // Update Count : 0 15 // 16 1 17 #include <stdbool.h> 2 18 #include <stdint.h> … … 11 27 12 28 #define unlikely(x) __builtin_expect(!!(x), 0) 29 #define thread_local _Thread_local 30 #define SCHEDULER_CAPACITY 10 31 32 struct simple_thread_list { 33 struct thread * head; 34 struct thread ** tail; 35 }; 36 37 #ifdef __CFORALL__ 38 extern "Cforall" { 39 void ?{}( struct simple_thread_list * ); 40 void append( struct simple_thread_list *, struct thread * ); 41 struct thread * pop_head( struct simple_thread_list * ); 42 } 43 #endif 13 44 14 45 struct coStack_t { … … 35 66 }; 36 67 37 struct thread_h { 68 struct simple_lock { 69 struct simple_thread_list blocked; 70 }; 71 72 struct thread { 38 73 struct coroutine c; 74 struct simple_lock lock; 75 struct thread * next; 39 76 }; 40 77 … … 52 89 // assembler routines that performs the context switch 53 90 extern void CtxInvokeStub( void ); 54 void CtxSwitch( void *from, void *to ) asm ("CtxSwitch"); 91 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 92 void CtxGet( void * this ) asm ("CtxGet"); 55 93 56 94 #endif //_INVOKE_PRIVATE_H_ -
TabularUnified src/libcfa/concurrency/kernel ¶
r6acb935 ra362f97 20 20 #include <stdbool.h> 21 21 22 #include "invoke.h" 23 24 extern "C" { 25 #include <pthread.h> 26 } 27 28 //----------------------------------------------------------------------------- 29 // Cluster 30 struct cluster { 31 simple_thread_list ready_queue; 32 pthread_spinlock_t lock; 33 }; 34 35 void ?{}(cluster * this); 36 void ^?{}(cluster * this); 37 38 //----------------------------------------------------------------------------- 39 // Processor 22 40 struct processor { 23 struct proc_coroutine * cor; 24 unsigned int thread_index; 25 unsigned int thread_count; 26 struct thread_h * threads[10]; 27 bool terminated; 41 struct processorCtx_t * ctx; 42 cluster * cltr; 43 coroutine * current_coroutine; 44 thread * current_thread; 45 pthread_t kernel_thread; 46 simple_lock lock; 47 volatile bool terminated; 28 48 }; 29 49 30 50 void ?{}(processor * this); 51 void ?{}(processor * this, cluster * cltr); 31 52 void ^?{}(processor * this); 32 53 33 void scheduler_add( struct thread_h * thrd ); 34 void scheduler_remove( struct thread_h * thrd ); 35 void kernel_run( void ); 54 55 //----------------------------------------------------------------------------- 56 // Locks 57 58 void ?{}(simple_lock * this); 59 void ^?{}(simple_lock * this); 60 61 void lock( simple_lock * ); 62 void unlock( simple_lock * ); 63 64 struct pthread_spinlock_guard { 65 pthread_spinlock_t * lock; 66 }; 67 68 static inline void ?{}( pthread_spinlock_guard * this, pthread_spinlock_t * lock ) { 69 this->lock = lock; 70 pthread_spin_lock( this->lock ); 71 } 72 73 static inline void ^?{}( pthread_spinlock_guard * this ) { 74 pthread_spin_unlock( this->lock ); 75 } 76 77 // //Simple spinlock implementation from 78 // //http://stackoverflow.com/questions/1383363/is-my-spin-lock-implementation-correct-and-optimal 79 // //Not optimal but correct 80 // #define VOL 81 82 // struct simple_spinlock { 83 // VOL int lock; 84 // }; 85 86 // extern VOL int __sync_lock_test_and_set( VOL int *, VOL int); 87 // extern void __sync_synchronize(); 88 89 // static inline void lock( simple_spinlock * this ) { 90 // while (__sync_lock_test_and_set(&this->lock, 1)) { 91 // // Do nothing. This GCC builtin instruction 92 // // ensures memory barrier. 93 // } 94 // } 95 96 // static inline void unlock( simple_spinlock * this ) { 97 // __sync_synchronize(); // Memory barrier. 98 // this->lock = 0; 99 // } 36 100 37 101 #endif //KERNEL_H -
TabularUnified src/libcfa/concurrency/kernel.c ¶
r6acb935 ra362f97 20 20 //C Includes 21 21 #include <stddef.h> 22 extern "C" { 23 #include <sys/resource.h> 24 } 22 25 23 26 //CFA Includes … … 29 32 #include "invoke.h" 30 33 31 processor systemProcessorStorage = {}; 32 processor * systemProcessor = &systemProcessorStorage; 33 34 void ?{}(processor * this) { 35 this->cor = NULL; 36 this->thread_index = 0; 37 this->thread_count = 10; 38 this->terminated = false; 39 40 for(int i = 0; i < 10; i++) { 41 this->threads[i] = NULL; 42 } 43 44 LIB_DEBUG_PRINTF("Processor : ctor for core %p (core spots %d)\n", this, this->thread_count); 45 } 46 47 void ^?{}(processor * this) { 48 49 } 50 51 //----------------------------------------------------------------------------- 52 // Processor coroutine 53 struct proc_coroutine { 34 //----------------------------------------------------------------------------- 35 // Kernel storage 36 struct processorCtx_t { 54 37 processor * proc; 55 38 coroutine c; 56 39 }; 57 40 58 void ?{}(coroutine * this, processor * proc) { 59 this{}; 60 } 61 62 DECL_COROUTINE(proc_coroutine) 63 64 void ?{}(proc_coroutine * this, processor * proc) { 65 (&this->c){proc}; 41 DECL_COROUTINE(processorCtx_t) 42 43 #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)] 44 45 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); 46 KERNEL_STORAGE(cluster, systemCluster); 47 KERNEL_STORAGE(processor, systemProcessor); 48 KERNEL_STORAGE(thread, mainThread); 49 KERNEL_STORAGE(machine_context_t, mainThread_context); 50 51 cluster * systemCluster; 52 processor * systemProcessor; 53 thread * mainThread; 54 55 void kernel_startup(void) __attribute__((constructor(101))); 56 void kernel_shutdown(void) __attribute__((destructor(101))); 57 58 //----------------------------------------------------------------------------- 59 // Global state 60 61 thread_local processor * this_processor; 62 63 processor * get_this_processor() { 64 return this_processor; 65 } 66 67 coroutine * this_coroutine(void) { 68 return this_processor->current_coroutine; 69 } 70 71 thread * this_thread(void) { 72 return this_processor->current_thread; 73 } 74 75 //----------------------------------------------------------------------------- 76 // Main thread construction 77 struct current_stack_info_t { 78 machine_context_t ctx; 79 unsigned int size; // size of stack 80 void *base; // base of stack 81 void *storage; // pointer to stack 82 void *limit; // stack grows towards stack limit 83 void *context; // address of cfa_context_t 84 void *top; // address of top of storage 85 }; 86 87 void ?{}( current_stack_info_t * this ) { 88 CtxGet( &this->ctx ); 89 this->base = this->ctx.FP; 90 this->storage = this->ctx.SP; 91 92 rlimit r; 93 int ret = getrlimit( RLIMIT_STACK, &r); 94 this->size = r.rlim_cur; 95 96 this->limit = (void *)(((intptr_t)this->base) - this->size); 97 this->context = &mainThread_context_storage; 98 this->top = this->base; 99 } 100 101 void ?{}( coStack_t * this, current_stack_info_t * info) { 102 this->size = info->size; 103 this->storage = info->storage; 104 this->limit = info->limit; 105 this->base = info->base; 106 this->context = info->context; 107 this->top = info->top; 108 this->userStack = true; 109 } 110 111 void ?{}( coroutine * this, current_stack_info_t * info) { 112 (&this->stack){ info }; 113 this->name = "Main Thread"; 114 this->errno_ = 0; 115 this->state = Inactive; 116 this->notHalted = true; 117 } 118 119 void ?{}( thread * this, current_stack_info_t * info) { 120 (&this->c){ info }; 121 } 122 123 //----------------------------------------------------------------------------- 124 // Processor coroutine 125 void ?{}(processorCtx_t * this, processor * proc) { 126 (&this->c){}; 66 127 this->proc = proc; 67 proc->cor = this; 68 } 69 70 void ^?{}(proc_coroutine * this) { 71 ^(&this->c){}; 72 } 73 74 void CtxInvokeProcessor(processor * proc) { 75 proc_coroutine proc_cor_storage = {proc}; 76 resume( &proc_cor_storage ); 128 proc->ctx = this; 129 } 130 131 void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) { 132 (&this->c){ info }; 133 this->proc = proc; 134 proc->ctx = this; 135 } 136 137 void start(processor * this); 138 139 void ?{}(processor * this) { 140 this{ systemCluster }; 141 } 142 143 void ?{}(processor * this, cluster * cltr) { 144 this->cltr = cltr; 145 this->current_coroutine = NULL; 146 this->current_thread = NULL; 147 (&this->lock){}; 148 this->terminated = false; 149 150 start( this ); 151 } 152 153 void ?{}(processor * this, cluster * cltr, processorCtx_t * ctx) { 154 this->cltr = cltr; 155 this->current_coroutine = NULL; 156 this->current_thread = NULL; 157 (&this->lock){}; 158 this->terminated = false; 159 160 this->ctx = ctx; 161 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", ctx); 162 ctx{ this }; 163 } 164 165 void ^?{}(processor * this) { 166 if( ! this->terminated ) { 167 LIB_DEBUG_PRINTF("Kernel : core %p signaling termination\n", this); 168 this->terminated = true; 169 lock( &this->lock ); 170 } 171 } 172 173 void ?{}(cluster * this) { 174 ( &this->ready_queue ){}; 175 pthread_spin_init( &this->lock, PTHREAD_PROCESS_PRIVATE ); 176 } 177 178 void ^?{}(cluster * this) { 179 pthread_spin_destroy( &this->lock ); 77 180 } 78 181 79 182 //----------------------------------------------------------------------------- 80 183 // Processor running routines 81 void main(proc _coroutine * cor);82 thread _h * nextThread(processor * this);83 void runThread(processor * this, thread _h* dst);184 void main(processorCtx_t * ctx); 185 thread * nextThread(cluster * this); 186 void runThread(processor * this, thread * dst); 84 187 void spin(processor * this, unsigned int * spin_count); 85 188 86 void main(proc _coroutine * cor) {87 processor * this ;88 this = cor->proc;89 90 thread _h* readyThread = NULL;189 void main(processorCtx_t * ctx) { 190 processor * this = ctx->proc; 191 LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this); 192 193 thread * readyThread = NULL; 91 194 for( unsigned int spin_count = 0; ! this->terminated; spin_count++ ) { 92 195 93 readyThread = nextThread( this);196 readyThread = nextThread( this->cltr ); 94 197 95 198 if(readyThread) { … … 101 204 } 102 205 206 LIB_DEBUG_PRINTF("Kernel : core %p unlocking thread\n", this); 207 unlock( &this->lock ); 103 208 LIB_DEBUG_PRINTF("Kernel : core %p terminated\n", this); 104 209 } 105 210 106 thread_h * nextThread(processor * this) { 107 for(int i = 0; i < this->thread_count; i++) { 108 this->thread_index = (this->thread_index + 1) % this->thread_count; 109 110 thread_h * thrd = this->threads[this->thread_index]; 111 if(thrd) return thrd; 112 } 113 114 return NULL; 115 } 116 117 void runThread(processor * this, thread_h * dst) { 118 coroutine * proc_ctx = get_coroutine(this->cor); 211 void runThread(processor * this, thread * dst) { 212 coroutine * proc_ctx = get_coroutine(this->ctx); 119 213 coroutine * thrd_ctx = get_coroutine(dst); 120 214 thrd_ctx->last = proc_ctx; … … 122 216 // context switch to specified coroutine 123 217 // Which is now the current_coroutine 124 LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, current_coroutine); 125 current_coroutine = thrd_ctx; 218 // LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, current_coroutine); 219 this->current_thread = dst; 220 this->current_coroutine = thrd_ctx; 126 221 CtxSwitch( proc_ctx->stack.context, thrd_ctx->stack.context ); 127 current_coroutine = proc_ctx;128 LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, current_coroutine);222 this->current_coroutine = proc_ctx; 223 // LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, current_coroutine); 129 224 130 225 // when CtxSwitch returns we are back in the processor coroutine … … 135 230 } 136 231 137 //----------------------------------------------------------------------------- 138 // Kernel runner (Temporary) 139 140 void scheduler_add( struct thread_h * thrd ) { 141 LIB_DEBUG_PRINTF("Kernel : scheduling %p on core %p (%d spots)\n", thrd, systemProcessor, systemProcessor->thread_count); 142 for(int i = 0; i < systemProcessor->thread_count; i++) { 143 if(systemProcessor->threads[i] == NULL) { 144 systemProcessor->threads[i] = thrd; 145 return; 232 void * CtxInvokeProcessor(void * arg) { 233 processor * proc = (processor *) arg; 234 this_processor = proc; 235 // SKULLDUGGERY: We want to create a context for the processor coroutine 236 // which is needed for the 2-step context switch. However, there is no reason 237 // to waste the perfectly valid stack create by pthread. 238 current_stack_info_t info; 239 machine_context_t ctx; 240 info.context = &ctx; 241 processorCtx_t proc_cor_storage = { proc, &info }; 242 243 proc->current_coroutine = &proc->ctx->c; 244 proc->current_thread = NULL; 245 246 LIB_DEBUG_PRINTF("Kernel : core %p created (%p)\n", proc, proc->ctx); 247 248 // LIB_DEBUG_PRINTF("Kernel : core base : %p \n", info.base ); 249 // LIB_DEBUG_PRINTF("Kernel : core storage : %p \n", info.storage ); 250 // LIB_DEBUG_PRINTF("Kernel : core size : %x \n", info.size ); 251 // LIB_DEBUG_PRINTF("Kernel : core limit : %p \n", info.limit ); 252 // LIB_DEBUG_PRINTF("Kernel : core context : %p \n", info.context ); 253 // LIB_DEBUG_PRINTF("Kernel : core top : %p \n", info.top ); 254 255 //We now have a proper context from which to schedule threads 256 257 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 258 // resume it to start it like it normally would, it will just context switch 259 // back to here. Instead directly call the main since we already are on the 260 // appropriate stack. 261 proc_cor_storage.c.state = Active; 262 main( &proc_cor_storage ); 263 proc_cor_storage.c.state = Halt; 264 proc_cor_storage.c.notHalted = false; 265 266 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->ctx); 267 268 return NULL; 269 } 270 271 void start(processor * this) { 272 LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this); 273 274 pthread_attr_t attributes; 275 pthread_attr_init( &attributes ); 276 277 pthread_create( &this->kernel_thread, &attributes, CtxInvokeProcessor, (void*)this ); 278 279 pthread_attr_destroy( &attributes ); 280 281 LIB_DEBUG_PRINTF("Kernel : core %p started\n", this); 282 } 283 284 //----------------------------------------------------------------------------- 285 // Scheduler routines 286 void thread_schedule( thread * thrd ) { 287 assertf( thrd->next == NULL, "Expected null got %p", thrd->next ); 288 289 pthread_spinlock_guard guard = { &systemProcessor->cltr->lock }; 290 append( &systemProcessor->cltr->ready_queue, thrd ); 291 } 292 293 thread * nextThread(cluster * this) { 294 pthread_spinlock_guard guard = { &this->lock }; 295 return pop_head( &this->ready_queue ); 296 } 297 298 //----------------------------------------------------------------------------- 299 // Kernel boot procedures 300 void kernel_startup(void) { 301 302 // SKULLDUGGERY: the mainThread steals the process main thread 303 // which will then be scheduled by the systemProcessor normally 304 LIB_DEBUG_PRINTF("Kernel : Starting\n"); 305 306 current_stack_info_t info; 307 308 // LIB_DEBUG_PRINTF("Kernel : core base : %p \n", info.base ); 309 // LIB_DEBUG_PRINTF("Kernel : core storage : %p \n", info.storage ); 310 // LIB_DEBUG_PRINTF("Kernel : core size : %x \n", info.size ); 311 // LIB_DEBUG_PRINTF("Kernel : core limit : %p \n", info.limit ); 312 // LIB_DEBUG_PRINTF("Kernel : core context : %p \n", info.context ); 313 // LIB_DEBUG_PRINTF("Kernel : core top : %p \n", info.top ); 314 315 // Start by initializing the main thread 316 mainThread = (thread *)&mainThread_storage; 317 mainThread{ &info }; 318 319 // Initialize the system cluster 320 systemCluster = (cluster *)&systemCluster_storage; 321 systemCluster{}; 322 323 // Initialize the system processor and the system processor ctx 324 // (the coroutine that contains the processing control flow) 325 systemProcessor = (processor *)&systemProcessor_storage; 326 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; 327 328 // Add the main thread to the ready queue 329 // once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread 330 thread_schedule(mainThread); 331 332 //initialize the global state variables 333 this_processor = systemProcessor; 334 this_processor->current_thread = mainThread; 335 this_processor->current_coroutine = &mainThread->c; 336 337 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 338 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 339 // mainThread is on the ready queue when this call is made. 340 resume(systemProcessor->ctx); 341 342 343 344 // THE SYSTEM IS NOW COMPLETELY RUNNING 345 346 347 348 LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n"); 349 } 350 351 void kernel_shutdown(void) { 352 LIB_DEBUG_PRINTF("\n--------------------------------------------------\nKernel : Shutting down\n"); 353 354 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. 355 // When its coroutine terminates, it return control to the mainThread 356 // which is currently here 357 systemProcessor->terminated = true; 358 suspend(); 359 360 // THE SYSTEM IS NOW COMPLETELY STOPPED 361 362 // Destroy the system processor and its context in reverse order of construction 363 // These were manually constructed so we need manually destroy them 364 ^(systemProcessor->ctx){}; 365 ^(systemProcessor){}; 366 367 // Final step, destroy the main thread since it is no longer needed 368 // Since we provided a stack to this taxk it will not destroy anything 369 ^(mainThread){}; 370 371 LIB_DEBUG_PRINTF("Kernel : Shutdown complete\n"); 372 } 373 374 //----------------------------------------------------------------------------- 375 // Locks 376 void ?{}( simple_lock * this ) { 377 ( &this->blocked ){}; 378 } 379 380 void ^?{}( simple_lock * this ) { 381 382 } 383 384 void lock( simple_lock * this ) { 385 { 386 pthread_spinlock_guard guard = { &systemCluster->lock }; //HUGE TEMP HACK which only works if we have a single cluster and is stupid 387 append( &this->blocked, this_thread() ); 388 } 389 suspend(); 390 } 391 392 void unlock( simple_lock * this ) { 393 thread * it; 394 while( it = pop_head( &this->blocked) ) { 395 thread_schedule( it ); 396 } 397 } 398 399 //----------------------------------------------------------------------------- 400 // Queues 401 void ?{}( simple_thread_list * this ) { 402 this->head = NULL; 403 this->tail = &this->head; 404 } 405 406 void append( simple_thread_list * this, thread * t ) { 407 assert( t->next == NULL ); 408 *this->tail = t; 409 this->tail = &t->next; 410 } 411 412 thread * pop_head( simple_thread_list * this ) { 413 thread * head = this->head; 414 if( head ) { 415 this->head = head->next; 416 if( !head->next ) { 417 this->tail = &this->head; 146 418 } 147 } 148 assert(false); 149 } 150 151 void scheduler_remove( struct thread_h * thrd ) { 152 LIB_DEBUG_PRINTF("Kernel : unscheduling %p from core %p\n", thrd, systemProcessor); 153 for(int i = 0; i < systemProcessor->thread_count; i++) { 154 if(systemProcessor->threads[i] == thrd) { 155 systemProcessor->threads[i] = NULL; 156 break; 157 } 158 } 159 for(int i = 0; i < systemProcessor->thread_count; i++) { 160 if(systemProcessor->threads[i] != NULL) { 161 return; 162 } 163 } 164 LIB_DEBUG_PRINTF("Kernel : terminating core %p\n\n\n", systemProcessor); 165 systemProcessor->terminated = true; 166 } 167 168 void kernel_run( void ) { 169 CtxInvokeProcessor(systemProcessor); 170 } 171 419 head->next = NULL; 420 } 421 422 return head; 423 } 172 424 // Local Variables: // 173 425 // mode: c // -
TabularUnified src/libcfa/concurrency/threads ¶
r6acb935 ra362f97 27 27 // Anything that implements this trait can be resumed. 28 28 // Anything that is resumed is a coroutine. 29 trait is_thread(dtype T /*| sized(T)*/) {29 trait is_thread(dtype T | sized(T)) { 30 30 void main(T* this); 31 thread_h* get_thread(T* this); 32 /*void ?{}(T*); 33 void ^?{}(T*);*/ 31 thread* get_thread(T* this); 34 32 }; 35 33 36 forall(otype T | is_thread(T) ) 34 #define DECL_THREAD(X) static inline thread* get_thread(X* this) { return &this->t; } void main(X* this); 35 36 forall( dtype T | sized(T) | is_thread(T) ) 37 37 static inline coroutine* get_coroutine(T* this) { 38 38 return &get_thread(this)->c; 39 39 } 40 40 41 static inline coroutine* get_coroutine(thread _h* this) {41 static inline coroutine* get_coroutine(thread* this) { 42 42 return &this->c; 43 43 } 44 44 45 thread * this_thread(void); 46 45 47 //----------------------------------------------------------------------------- 46 48 // Ctors and dtors 47 void ?{}(thread _h* this);48 void ^?{}(thread _h* this);49 void ?{}(thread* this); 50 void ^?{}(thread* this); 49 51 50 52 //----------------------------------------------------------------------------- 51 53 // thread runner 52 54 // Structure that actually start and stop threads 53 forall( otype T| is_thread(T) )54 struct thread {55 forall( dtype T | sized(T) | is_thread(T) ) 56 struct scoped { 55 57 T handle; 56 58 }; 57 59 58 forall( otype T | is_thread(T))59 void ?{}( thread(T)* this );60 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T*); } ) 61 void ?{}( scoped(T)* this ); 60 62 61 forall( otype T, ttype P| is_thread(T) | { void ?{}(T*, P); } )62 void ?{}( thread(T)* this, P params );63 forall( dtype T, ttype P | sized(T) | is_thread(T) | { void ?{}(T*, P); } ) 64 void ?{}( scoped(T)* this, P params ); 63 65 64 forall( otype T | is_thread(T))65 void ^?{}( thread(T)* this );66 forall( dtype T | sized(T) | is_thread(T) | { void ^?{}(T*); } ) 67 void ^?{}( scoped(T)* this ); 66 68 67 //----------------------------------------------------------------------------- 68 // PRIVATE exposed because of inline 69 void yield(); 69 70 70 71 #endif //THREADS_H -
TabularUnified src/libcfa/concurrency/threads.c ¶
r6acb935 ra362f97 23 23 #include "invoke.h" 24 24 25 #include <stdlib> 25 extern "C" { 26 #include <stddef.h> 27 } 28 29 extern processor * get_this_processor(); 26 30 27 31 //----------------------------------------------------------------------------- 28 32 // Forward declarations 29 forall( otype T| is_thread(T) )30 void start( thread(T)* this );33 forall( dtype T | sized(T) | is_thread(T) ) 34 void start( T* this ); 31 35 32 forall( otype T| is_thread(T) )33 void stop( thread(T)* this );36 forall( dtype T | sized(T) | is_thread(T) ) 37 void stop( T* this ); 34 38 35 39 //----------------------------------------------------------------------------- 36 40 // Thread ctors and dtors 37 41 38 void ?{}(thread _h* this) {42 void ?{}(thread* this) { 39 43 (&this->c){}; 44 this->c.name = "Anonymous Coroutine"; 45 (&this->lock){}; 46 this->next = NULL; 40 47 } 41 48 42 void ^?{}(thread _h* this) {49 void ^?{}(thread* this) { 43 50 ^(&this->c){}; 44 51 } 45 52 46 forall(otype T | is_thread(T) ) 47 void ?{}( thread(T)* this ) { 48 printf("thread() ctor\n"); 53 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T*); } ) 54 void ?{}( scoped(T)* this ) { 49 55 (&this->handle){}; 50 start( this);56 start(&this->handle); 51 57 } 52 58 53 forall( otype T, ttype P| is_thread(T) | { void ?{}(T*, P); } )54 void ?{}( thread(T)* this, P params ) {59 forall( dtype T, ttype P | sized(T) | is_thread(T) | { void ?{}(T*, P); } ) 60 void ?{}( scoped(T)* this, P params ) { 55 61 (&this->handle){ params }; 56 start( this);62 start(&this->handle); 57 63 } 58 64 59 forall( otype T | is_thread(T))60 void ^?{}( thread(T)* this ) {61 stop( this);65 forall( dtype T | sized(T) | is_thread(T) | { void ^?{}(T*); } ) 66 void ^?{}( scoped(T)* this ) { 67 stop(&this->handle); 62 68 ^(&this->handle){}; 63 69 } … … 70 76 } 71 77 72 forall(otype T | is_thread(T)) 73 void start( thread(T)* this ) { 74 T* handle = &this->handle; 75 coroutine* thrd_c = get_coroutine(handle); 76 thread_h* thrd_h = get_thread (handle); 78 extern void thread_schedule( thread * ); 79 80 forall( dtype T | sized(T) | is_thread(T) ) 81 void start( T* this ) { 82 coroutine* thrd_c = get_coroutine(this); 83 thread* thrd_h = get_thread (this); 77 84 thrd_c->last = this_coroutine(); 78 current_coroutine = thrd_c;85 get_this_processor()->current_coroutine = thrd_c; 79 86 80 87 // LIB_DEBUG_PRINTF("Thread start : %p (t %p, c %p)\n", handle, thrd_c, thrd_h); 81 88 82 89 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 CtxStart( handle, CtxInvokeThread);90 CtxStart(this, CtxInvokeThread); 84 91 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 85 92 86 scheduler_add(thrd_h);93 thread_schedule(thrd_h); 87 94 } 88 95 89 forall(otype T | is_thread(T) ) 90 void stop( thread(T)* this ) { 96 forall( dtype T | sized(T) | is_thread(T) ) 97 void stop( T* this ) { 98 thread* thrd = get_thread(this); 99 if( thrd->c.notHalted ) { 100 lock( &thrd->lock ); 101 } 102 } 91 103 104 void signal_termination( thread * this ) { 105 this->c.state = Halt; 106 this->c.notHalted = false; 107 unlock( &this->lock ); 108 } 109 110 void yield( void ) { 111 thread_schedule( this_thread() ); 112 suspend(); 92 113 } 93 114
Note: See TracChangeset
for help on using the changeset viewer.