Changeset 992b089
- Timestamp:
- Feb 9, 2017, 10:13:07 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- aa9ee19, fb7dca0
- Parents:
- 6ef2d81 (diff), 132fad4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/coroutines
r6ef2d81 r992b089 36 36 void ?{}(coStack_t * this); 37 37 void ?{}(coroutine * this); 38 void ?{}(coroutine * this, const char * name); 38 39 void ^?{}(coStack_t * this); 39 40 void ^?{}(coroutine * this); -
src/libcfa/concurrency/coroutines.c
r6ef2d81 r992b089 62 62 void ?{}(coroutine* this) { 63 63 this->name = "Anonymous Coroutine"; 64 this->errno_ = 0; 65 this->state = Start; 66 this->notHalted = true; 67 this->starter = NULL; 68 this->last = NULL; 69 } 70 71 void ?{}(coroutine* this, const char * name) { 72 this->name = name; 64 73 this->errno_ = 0; 65 74 this->state = Start; … … 160 169 this->context = this->base; 161 170 this->top = (char *)this->context + cxtSize; 171 172 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", this->base); 162 173 } 163 174 -
src/libcfa/concurrency/kernel
r6ef2d81 r992b089 30 30 struct cluster { 31 31 simple_thread_list ready_queue; 32 pthread_spinlock_t lock;32 // pthread_spinlock_t lock; 33 33 }; 34 34 … … 38 38 //----------------------------------------------------------------------------- 39 39 // Processor 40 enum ProcessorAction { 41 Reschedule, 42 NoAction 43 }; 44 40 45 struct processor { 41 struct processorCtx_t * ctx;46 struct processorCtx_t * runner; 42 47 cluster * cltr; 43 48 coroutine * current_coroutine; … … 46 51 simple_lock lock; 47 52 volatile bool terminated; 53 ProcessorAction thread_action; 48 54 }; 49 55 … … 62 68 void unlock( simple_lock * ); 63 69 64 struct pthread_spinlock_guard {65 pthread_spinlock_t * lock;66 };67 68 static inline void ?{}( pthread_spinlock_guard * this, pthread_spinlock_t * lock ) {69 this->lock = lock;70 pthread_spin_lock( this->lock );71 }72 73 static inline void ^?{}( pthread_spinlock_guard * this ) {74 pthread_spin_unlock( this->lock );75 }76 77 // //Simple spinlock implementation from78 // //http://stackoverflow.com/questions/1383363/is-my-spin-lock-implementation-correct-and-optimal79 // //Not optimal but correct80 // #define VOL81 82 // struct simple_spinlock {83 // VOL int lock;84 // };85 86 // extern VOL int __sync_lock_test_and_set( VOL int *, VOL int);87 // extern void __sync_synchronize();88 89 // static inline void lock( simple_spinlock * this ) {90 // while (__sync_lock_test_and_set(&this->lock, 1)) {91 // // Do nothing. This GCC builtin instruction92 // // ensures memory barrier.93 // }94 // }95 96 // static inline void unlock( simple_spinlock * this ) {97 // __sync_synchronize(); // Memory barrier.98 // this->lock = 0;99 // }100 101 70 #endif //KERNEL_H 102 71 -
src/libcfa/concurrency/kernel.c
r6ef2d81 r992b089 25 25 #include <stddef.h> 26 26 extern "C" { 27 #include <fenv.h> 27 28 #include <sys/resource.h> 28 29 } … … 35 36 #define __CFA_INVOKE_PRIVATE__ 36 37 #include "invoke.h" 38 39 static volatile int lock; 40 41 void spin_lock( volatile int *lock ) { 42 for ( unsigned int i = 1;; i += 1 ) { 43 if ( *lock == 0 && __sync_lock_test_and_set_4( lock, 1 ) == 0 ) break; 44 } 45 } 46 47 void spin_unlock( volatile int *lock ) { 48 __sync_lock_release_4( lock ); 49 } 37 50 38 51 //----------------------------------------------------------------------------- … … 92 105 93 106 rlimit r; 94 int ret =getrlimit( RLIMIT_STACK, &r);107 getrlimit( RLIMIT_STACK, &r); 95 108 this->size = r.rlim_cur; 96 109 … … 127 140 (&this->c){}; 128 141 this->proc = proc; 129 proc-> ctx= this;142 proc->runner = this; 130 143 } 131 144 … … 133 146 (&this->c){ info }; 134 147 this->proc = proc; 135 proc-> ctx= this;148 proc->runner = this; 136 149 } 137 150 … … 152 165 } 153 166 154 void ?{}(processor * this, cluster * cltr, processorCtx_t * ctx) {167 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 155 168 this->cltr = cltr; 156 169 this->current_coroutine = NULL; … … 159 172 this->terminated = false; 160 173 161 this-> ctx = ctx;162 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", ctx);163 ctx{ this };174 this->runner = runner; 175 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", runner); 176 runner{ this }; 164 177 } 165 178 … … 174 187 void ?{}(cluster * this) { 175 188 ( &this->ready_queue ){}; 176 pthread_spin_init( &this->lock, PTHREAD_PROCESS_PRIVATE );189 lock = 0; 177 190 } 178 191 179 192 void ^?{}(cluster * this) { 180 pthread_spin_destroy( &this->lock );193 181 194 } 182 195 183 196 //----------------------------------------------------------------------------- 184 197 // Processor running routines 185 void main(processorCtx_t * ctx);198 void main(processorCtx_t *); 186 199 thread * nextThread(cluster * this); 187 200 void scheduleInternal(processor * this, thread * dst); 188 201 void spin(processor * this, unsigned int * spin_count); 189 190 void main(processorCtx_t * ctx) { 191 processor * this = ctx->proc; 202 void thread_schedule( thread * thrd ); 203 204 //Main of the processor contexts 205 void main(processorCtx_t * runner) { 206 processor * this = runner->proc; 192 207 LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this); 208 209 fenv_t envp; 210 fegetenv( &envp ); 211 LIB_DEBUG_PRINTF("Kernel : mxcsr %x\n", envp.__mxcsr); 193 212 194 213 thread * readyThread = NULL; … … 216 235 // from the processor coroutine to the target thread 217 236 void scheduleInternal(processor * this, thread * dst) { 237 this->thread_action = NoAction; 238 218 239 // coroutine * proc_ctx = get_coroutine(this->ctx); 219 240 // coroutine * thrd_ctx = get_coroutine(dst); … … 226 247 // // when ThreadCtxSwitch returns we are back in the processor coroutine 227 248 228 coroutine * proc_ctx = get_coroutine(this-> ctx);249 coroutine * proc_ctx = get_coroutine(this->runner); 229 250 coroutine * thrd_ctx = get_coroutine(dst); 230 251 thrd_ctx->last = proc_ctx; … … 232 253 // context switch to specified coroutine 233 254 // Which is now the current_coroutine 234 LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine);255 // LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine); 235 256 this->current_thread = dst; 236 257 this->current_coroutine = thrd_ctx; 237 258 CtxSwitch( proc_ctx->stack.context, thrd_ctx->stack.context ); 238 259 this->current_coroutine = proc_ctx; 239 LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine);260 // LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine); 240 261 241 262 // when CtxSwitch returns we are back in the processor coroutine 263 if(this->thread_action == Reschedule) { 264 thread_schedule( dst ); 265 } 242 266 } 243 267 … … 262 286 processorCtx_t proc_cor_storage = { proc, &info }; 263 287 288 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage.c.stack.base); 289 264 290 //Set global state 265 proc->current_coroutine = &proc-> ctx->c;291 proc->current_coroutine = &proc->runner->c; 266 292 proc->current_thread = NULL; 267 293 268 294 //We now have a proper context from which to schedule threads 269 LIB_DEBUG_PRINTF("Kernel : core %p created (%p )\n", proc, proc->ctx);295 LIB_DEBUG_PRINTF("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 270 296 271 297 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't … … 279 305 280 306 // Main routine of the core returned, the core is now fully terminated 281 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc-> ctx);307 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->runner); 282 308 283 309 return NULL; … … 287 313 LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this); 288 314 289 pthread_attr_t attributes;290 pthread_attr_init( &attributes );291 292 pthread_create( &this->kernel_thread, &attributes, CtxInvokeProcessor, (void*)this );293 294 pthread_attr_destroy( &attributes );315 // pthread_attr_t attributes; 316 // pthread_attr_init( &attributes ); 317 318 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 319 320 // pthread_attr_destroy( &attributes ); 295 321 296 322 LIB_DEBUG_PRINTF("Kernel : core %p started\n", this); … … 302 328 assertf( thrd->next == NULL, "Expected null got %p", thrd->next ); 303 329 304 pthread_spinlock_guard guard = { &systemProcessor->cltr->lock };330 spin_lock( &lock ); 305 331 append( &systemProcessor->cltr->ready_queue, thrd ); 332 spin_unlock( &lock ); 306 333 } 307 334 308 335 thread * nextThread(cluster * this) { 309 pthread_spinlock_guard guard = { &this->lock }; 310 return pop_head( &this->ready_queue ); 336 spin_lock( &lock ); 337 thread * head = pop_head( &this->ready_queue ); 338 spin_unlock( &lock ); 339 return head; 311 340 } 312 341 … … 314 343 // Kernel boot procedures 315 344 void kernel_startup(void) { 316 345 LIB_DEBUG_PRINTF("Kernel : Starting\n"); 346 347 // Start by initializing the main thread 317 348 // SKULLDUGGERY: the mainThread steals the process main thread 318 349 // which will then be scheduled by the systemProcessor normally 319 LIB_DEBUG_PRINTF("Kernel : Starting\n"); 320 350 mainThread = (thread *)&mainThread_storage; 321 351 current_stack_info_t info; 322 323 // LIB_DEBUG_PRINTF("Kernel : core base : %p \n", info.base );324 // LIB_DEBUG_PRINTF("Kernel : core storage : %p \n", info.storage );325 // LIB_DEBUG_PRINTF("Kernel : core size : %x \n", info.size );326 // LIB_DEBUG_PRINTF("Kernel : core limit : %p \n", info.limit );327 // LIB_DEBUG_PRINTF("Kernel : core context : %p \n", info.context );328 // LIB_DEBUG_PRINTF("Kernel : core top : %p \n", info.top );329 330 // Start by initializing the main thread331 mainThread = (thread *)&mainThread_storage;332 352 mainThread{ &info }; 333 353 … … 353 373 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 354 374 // mainThread is on the ready queue when this call is made. 355 resume(systemProcessor-> ctx);375 resume(systemProcessor->runner); 356 376 357 377 358 378 359 379 // THE SYSTEM IS NOW COMPLETELY RUNNING 360 361 362 363 380 LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n"); 364 381 } … … 377 394 // Destroy the system processor and its context in reverse order of construction 378 395 // These were manually constructed so we need manually destroy them 379 ^(systemProcessor-> ctx){};396 ^(systemProcessor->runner){}; 380 397 ^(systemProcessor){}; 381 398 … … 399 416 void lock( simple_lock * this ) { 400 417 { 401 pthread_spinlock_guard guard = { &systemCluster->lock }; //HUGE TEMP HACK which only works if we have a single cluster and is stupid418 spin_lock( &lock ); 402 419 append( &this->blocked, this_thread() ); 420 spin_unlock( &lock ); 403 421 } 404 422 suspend(); -
src/libcfa/concurrency/threads.c
r6ef2d81 r992b089 24 24 25 25 extern "C" { 26 #include <fenv.h> 26 27 #include <stddef.h> 27 28 } … … 91 92 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 92 93 94 fenv_t envp; 95 fegetenv( &envp ); 96 LIB_DEBUG_PRINTF("Thread : mxcsr %x\n", envp.__mxcsr); 93 97 LIB_DEBUG_PRINTF("Thread started : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 94 98 … … 105 109 106 110 void yield( void ) { 107 thread_schedule( this_thread() );111 get_this_processor()->thread_action = Reschedule; 108 112 suspend(); 109 113 }
Note: See TracChangeset
for help on using the changeset viewer.