Changeset ca37445 for src/libcfa/concurrency
- Timestamp:
- Apr 10, 2018, 3:31:07 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 33f5b57
- Parents:
- 9d1e3f7 (diff), 8ad6533 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
r9d1e3f7 rca37445 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 21 22:35:18 201713 // Update Count : 112 // Last Modified On : Mon Apr 9 13:36:18 2018 13 // Update Count : 61 14 14 // 15 15 … … 26 26 #include "preemption.h" 27 27 28 29 static inline void ?{}( itimerval & this, __cfa_time_t * alarm ) with( this ) {30 it_value.tv_sec = alarm->val / (1`cfa_s).val; // seconds31 it_value.tv_usec = max( (alarm->val % (1`cfa_s).val) / (1`cfa_us).val, 1000 ); // microseconds32 it_interval.tv_sec = 0;33 it_interval.tv_usec = 0;34 }35 36 static inline void ?{}( __cfa_time_t & this, timespec * curr ) {37 uint64_t secs = curr->tv_sec;38 uint64_t nsecs = curr->tv_nsec;39 this.val = from_s(secs).val + nsecs;40 }41 42 28 //============================================================================================= 43 29 // Clock logic 44 30 //============================================================================================= 45 31 46 __cfa_time_t__kernel_get_time() {32 Time __kernel_get_time() { 47 33 timespec curr; 48 clock_gettime( CLOCK_ REALTIME, &curr );49 return ( __cfa_time_t){ &curr };34 clock_gettime( CLOCK_MONOTONIC_RAW, &curr ); // CLOCK_REALTIME 35 return (Time){ curr }; 50 36 } 51 37 52 void __kernel_set_timer( __cfa_time_t alarm ) { 53 itimerval val = { &alarm }; 54 setitimer( ITIMER_REAL, &val, NULL ); 38 void __kernel_set_timer( Duration alarm ) { 39 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL ); 55 40 } 56 41 … … 59 44 //============================================================================================= 60 45 61 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s) with( this ) {46 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period ) with( this ) { 62 47 this.thrd = thrd; 63 48 this.alarm = alarm; … … 68 53 } 69 54 70 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s) with( this ) {55 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ) with( this ) { 71 56 this.proc = proc; 72 57 this.alarm = alarm; -
src/libcfa/concurrency/alarm.h
r9d1e3f7 rca37445 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 22 09:59:27 201713 // Update Count : 312 // Last Modified On : Mon Mar 26 16:25:41 2018 13 // Update Count : 11 14 14 // 15 15 … … 21 21 #include <assert.h> 22 22 23 #include " bits/cfatime.h"23 #include "time" 24 24 25 25 struct thread_desc; … … 30 30 //============================================================================================= 31 31 32 __cfa_time_t__kernel_get_time();33 void __kernel_set_timer( __cfa_time_talarm );32 Time __kernel_get_time(); 33 void __kernel_set_timer( Duration alarm ); 34 34 35 35 //============================================================================================= … … 38 38 39 39 struct alarm_node_t { 40 __cfa_time_t alarm;// time when alarm goes off41 __cfa_time_t period;// if > 0 => period of alarm40 Time alarm; // time when alarm goes off 41 Duration period; // if > 0 => period of alarm 42 42 alarm_node_t * next; // intrusive link list field 43 43 … … 53 53 typedef alarm_node_t ** __alarm_it_t; 54 54 55 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s);56 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s);55 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period ); 56 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 57 57 void ^?{}( alarm_node_t & this ); 58 58 -
src/libcfa/concurrency/coroutine
r9d1e3f7 rca37445 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Aug 30 07:58:29 201713 // Update Count : 312 // Last Modified On : Fri Mar 30 18:23:45 2018 13 // Update Count : 8 14 14 // 15 15 … … 60 60 } 61 61 62 // Get current coroutine63 extern thread_local coroutine_desc * volatile this_coroutine;64 65 62 // Private wrappers for context switch and stack creation 66 63 extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst); … … 69 66 // Suspend implementation inlined for performance 70 67 static inline void suspend() { 71 coroutine_desc * src = this_coroutine;// optimization68 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 72 69 73 70 assertf( src->last != 0, … … 86 83 forall(dtype T | is_coroutine(T)) 87 84 static inline void resume(T & cor) { 88 coroutine_desc * src = this_coroutine;// optimization85 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 89 86 coroutine_desc * dst = get_coroutine(cor); 90 87 … … 111 108 112 109 static inline void resume(coroutine_desc * dst) { 113 coroutine_desc * src = this_coroutine;// optimization110 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 114 111 115 112 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
r9d1e3f7 rca37445 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 8 16:10:31201813 // Update Count : 412 // Last Modified On : Fri Mar 30 17:20:57 2018 13 // Update Count : 9 14 14 // 15 15 … … 99 99 // Wrapper for co 100 100 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 101 verify( preemption_state.enabled || this_processor->do_terminate );101 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 102 102 disable_interrupts(); 103 103 … … 106 106 107 107 // set new coroutine that task is executing 108 this_coroutine = dst;108 TL_SET( this_coroutine, dst ); 109 109 110 110 // context switch to specified coroutine … … 117 117 118 118 enable_interrupts( __cfaabi_dbg_ctx ); 119 verify( preemption_state.enabled || this_processor->do_terminate );119 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 120 120 } //ctxSwitchDirect 121 121 … … 172 172 173 173 void __leave_coroutine(void) { 174 coroutine_desc * src = this_coroutine;// optimization174 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 175 175 176 176 assertf( src->starter != 0, -
src/libcfa/concurrency/invoke.h
r9d1e3f7 rca37445 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 9 14:41:55201813 // Update Count : 612 // Last Modified On : Fri Mar 30 22:33:59 2018 13 // Update Count : 30 14 14 // 15 15 … … 17 17 #include "bits/defs.h" 18 18 #include "bits/locks.h" 19 20 #define TL_GET( member ) kernelThreadData.member 21 #define TL_SET( member, value ) kernelThreadData.member = value; 19 22 20 23 #ifdef __cforall … … 30 33 static inline struct thread_desc * & get_next( struct thread_desc & this ); 31 34 static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this ); 35 36 extern thread_local struct KernelThreadData { 37 struct coroutine_desc * volatile this_coroutine; 38 struct thread_desc * volatile this_thread; 39 struct processor * volatile this_processor; 40 41 struct { 42 volatile unsigned short disable_count; 43 volatile bool enabled; 44 volatile bool in_progress; 45 } preemption_state; 46 } kernelThreadData; 32 47 } 48 49 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); } 50 static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread ); } 51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } 33 52 #endif 34 53 35 54 struct coStack_t { 36 // size of stack 37 size_t size; 38 39 // pointer to stack 40 void *storage; 41 42 // stack grows towards stack limit 43 void *limit; 44 45 // base of stack 46 void *base; 47 48 // address of cfa_context_t 49 void *context; 50 51 // address of top of storage 52 void *top; 53 54 // whether or not the user allocated the stack 55 bool userStack; 55 size_t size; // size of stack 56 void * storage; // pointer to stack 57 void * limit; // stack grows towards stack limit 58 void * base; // base of stack 59 void * context; // address of cfa_context_t 60 void * top; // address of top of storage 61 bool userStack; // whether or not the user allocated the stack 56 62 }; 57 63 … … 59 65 60 66 struct coroutine_desc { 61 // stack information of the coroutine 62 struct coStack_t stack; 63 64 // textual name for coroutine/task, initialized by uC++ generated code 65 const char *name; 66 67 // copy of global UNIX variable errno 68 int errno_; 69 70 // current execution status for coroutine 71 enum coroutine_state state; 72 73 // first coroutine to resume this one 74 struct coroutine_desc * starter; 75 76 // last coroutine to resume this one 77 struct coroutine_desc * last; 67 struct coStack_t stack; // stack information of the coroutine 68 const char * name; // textual name for coroutine/task, initialized by uC++ generated code 69 int errno_; // copy of global UNIX variable errno 70 enum coroutine_state state; // current execution status for coroutine 71 struct coroutine_desc * starter; // first coroutine to resume this one 72 struct coroutine_desc * last; // last coroutine to resume this one 78 73 }; 79 74 -
src/libcfa/concurrency/kernel
r9d1e3f7 rca37445 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 22 09:58:39 201713 // Update Count : 212 // Last Modified On : Fri Mar 23 17:08:20 2018 13 // Update Count : 3 14 14 // 15 15 … … 19 19 20 20 #include "invoke.h" 21 #include " bits/cfatime.h"21 #include "time" 22 22 23 23 extern "C" { … … 49 49 50 50 // Preemption rate on this cluster 51 __cfa_time_tpreemption_rate;51 Duration preemption_rate; 52 52 }; 53 53 54 extern __cfa_time_tdefault_preemption();54 extern Duration default_preemption(); 55 55 56 56 void ?{} (cluster & this); -
src/libcfa/concurrency/kernel.c
r9d1e3f7 rca37445 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 8 23:52:19201813 // Update Count : 512 // Last Modified On : Fri Mar 30 18:26:11 2018 13 // Update Count : 23 14 14 // 15 15 … … 52 52 // Global state 53 53 54 thread_local coroutine_desc * volatile this_coroutine;55 thread_local thread_desc * volatile this_thread;56 thread_local processor * volatile this_processor;57 58 54 // volatile thread_local bool preemption_in_progress = 0; 59 55 // volatile thread_local bool preemption_enabled = false; 60 56 // volatile thread_local unsigned short disable_preempt_count = 1; 61 57 62 volatile thread_local __cfa_kernel_preemption_state_t preemption_state = { false, false, 1 }; 58 thread_local struct KernelThreadData kernelThreadData = { 59 NULL, 60 NULL, 61 NULL, 62 { 1, false, false } 63 }; 63 64 64 65 //----------------------------------------------------------------------------- … … 172 173 terminate(&this); 173 174 verify(this.do_terminate); 174 verify( this_processor!= &this);175 verify(TL_GET( this_processor ) != &this); 175 176 P( terminated ); 176 verify( this_processor!= &this);177 verify(TL_GET( this_processor ) != &this); 177 178 pthread_join( kernel_thread, NULL ); 178 179 } … … 213 214 if(readyThread) 214 215 { 215 verify( ! preemption_state.enabled );216 verify( ! TL_GET( preemption_state ).enabled ); 216 217 217 218 runThread(this, readyThread); 218 219 219 verify( ! preemption_state.enabled );220 verify( ! TL_GET( preemption_state ).enabled ); 220 221 221 222 //Some actions need to be taken from the kernel … … 249 250 250 251 //Update global state 251 this_thread = dst;252 TL_SET( this_thread, dst ); 252 253 253 254 // Context Switch to the thread … … 257 258 258 259 void returnToKernel() { 259 coroutine_desc * proc_cor = get_coroutine( this_processor->runner);260 coroutine_desc * thrd_cor = this_thread->curr_cor = this_coroutine;260 coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner); 261 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine ); 261 262 ThreadCtxSwitch(thrd_cor, proc_cor); 262 263 } … … 266 267 void finishRunning(processor * this) with( this->finish ) { 267 268 if( action_code == Release ) { 268 verify( ! preemption_state.enabled );269 verify( ! TL_GET( preemption_state ).enabled ); 269 270 unlock( *lock ); 270 271 } … … 273 274 } 274 275 else if( action_code == Release_Schedule ) { 275 verify( ! preemption_state.enabled );276 verify( ! TL_GET( preemption_state ).enabled ); 276 277 unlock( *lock ); 277 278 ScheduleThread( thrd ); 278 279 } 279 280 else if( action_code == Release_Multi ) { 280 verify( ! preemption_state.enabled );281 verify( ! TL_GET( preemption_state ).enabled ); 281 282 for(int i = 0; i < lock_count; i++) { 282 283 unlock( *locks[i] ); … … 307 308 void * CtxInvokeProcessor(void * arg) { 308 309 processor * proc = (processor *) arg; 309 this_processor = proc;310 this_coroutine = NULL;311 this_thread = NULL;312 preemption_state.enabled = false;313 preemption_state.disable_count = 1;310 TL_SET( this_processor, proc ); 311 TL_SET( this_coroutine, NULL ); 312 TL_SET( this_thread, NULL ); 313 TL_GET( preemption_state ).enabled = false; 314 TL_GET( preemption_state ).disable_count = 1; 314 315 // SKULLDUGGERY: We want to create a context for the processor coroutine 315 316 // which is needed for the 2-step context switch. However, there is no reason … … 323 324 324 325 //Set global state 325 this_coroutine = get_coroutine(proc->runner);326 this_thread = NULL;326 TL_SET( this_coroutine, get_coroutine(proc->runner) ); 327 TL_SET( this_thread, NULL ); 327 328 328 329 //We now have a proper context from which to schedule threads … … 352 353 353 354 void kernel_first_resume(processor * this) { 354 coroutine_desc * src = this_coroutine;355 coroutine_desc * src = TL_GET( this_coroutine ); 355 356 coroutine_desc * dst = get_coroutine(this->runner); 356 357 357 verify( ! preemption_state.enabled );358 verify( ! TL_GET( preemption_state ).enabled ); 358 359 359 360 create_stack(&dst->stack, dst->stack.size); 360 361 CtxStart(&this->runner, CtxInvokeCoroutine); 361 362 362 verify( ! preemption_state.enabled );363 verify( ! TL_GET( preemption_state ).enabled ); 363 364 364 365 dst->last = src; … … 369 370 370 371 // set new coroutine that task is executing 371 this_coroutine = dst;372 TL_SET( this_coroutine, dst ); 372 373 373 374 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 386 387 src->state = Active; 387 388 388 verify( ! preemption_state.enabled );389 verify( ! TL_GET( preemption_state ).enabled ); 389 390 } 390 391 … … 392 393 // Scheduler routines 393 394 void ScheduleThread( thread_desc * thrd ) { 394 // if( ! thrd ) return;395 // if( ! thrd ) return; 395 396 verify( thrd ); 396 397 verify( thrd->self_cor.state != Halted ); 397 398 398 verify( ! preemption_state.enabled );399 verify( ! TL_GET( preemption_state ).enabled ); 399 400 400 401 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 401 402 402 with( * this_processor->cltr ) {403 with( *TL_GET( this_processor )->cltr ) { 403 404 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 404 405 append( ready_queue, thrd ); … … 406 407 } 407 408 408 verify( ! preemption_state.enabled );409 verify( ! TL_GET( preemption_state ).enabled ); 409 410 } 410 411 411 412 thread_desc * nextThread(cluster * this) with( *this ) { 412 verify( ! preemption_state.enabled );413 verify( ! TL_GET( preemption_state ).enabled ); 413 414 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 414 415 thread_desc * head = pop_head( ready_queue ); 415 416 unlock( ready_queue_lock ); 416 verify( ! preemption_state.enabled );417 verify( ! TL_GET( preemption_state ).enabled ); 417 418 return head; 418 419 } … … 420 421 void BlockInternal() { 421 422 disable_interrupts(); 422 verify( ! preemption_state.enabled );423 verify( ! TL_GET( preemption_state ).enabled ); 423 424 returnToKernel(); 424 verify( ! preemption_state.enabled );425 verify( ! TL_GET( preemption_state ).enabled ); 425 426 enable_interrupts( __cfaabi_dbg_ctx ); 426 427 } … … 428 429 void BlockInternal( __spinlock_t * lock ) { 429 430 disable_interrupts(); 430 this_processor->finish.action_code = Release;431 this_processor->finish.lock = lock;432 433 verify( ! preemption_state.enabled );431 TL_GET( this_processor )->finish.action_code = Release; 432 TL_GET( this_processor )->finish.lock = lock; 433 434 verify( ! TL_GET( preemption_state ).enabled ); 434 435 returnToKernel(); 435 verify( ! preemption_state.enabled );436 verify( ! TL_GET( preemption_state ).enabled ); 436 437 437 438 enable_interrupts( __cfaabi_dbg_ctx ); … … 440 441 void BlockInternal( thread_desc * thrd ) { 441 442 disable_interrupts(); 442 this_processor->finish.action_code = Schedule;443 this_processor->finish.thrd = thrd;444 445 verify( ! preemption_state.enabled );443 TL_GET( this_processor )->finish.action_code = Schedule; 444 TL_GET( this_processor )->finish.thrd = thrd; 445 446 verify( ! TL_GET( preemption_state ).enabled ); 446 447 returnToKernel(); 447 verify( ! preemption_state.enabled );448 verify( ! TL_GET( preemption_state ).enabled ); 448 449 449 450 enable_interrupts( __cfaabi_dbg_ctx ); … … 453 454 assert(thrd); 454 455 disable_interrupts(); 455 this_processor->finish.action_code = Release_Schedule;456 this_processor->finish.lock = lock;457 this_processor->finish.thrd = thrd;458 459 verify( ! preemption_state.enabled );456 TL_GET( this_processor )->finish.action_code = Release_Schedule; 457 TL_GET( this_processor )->finish.lock = lock; 458 TL_GET( this_processor )->finish.thrd = thrd; 459 460 verify( ! TL_GET( preemption_state ).enabled ); 460 461 returnToKernel(); 461 verify( ! preemption_state.enabled );462 verify( ! TL_GET( preemption_state ).enabled ); 462 463 463 464 enable_interrupts( __cfaabi_dbg_ctx ); … … 466 467 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 467 468 disable_interrupts(); 468 this_processor->finish.action_code = Release_Multi;469 this_processor->finish.locks = locks;470 this_processor->finish.lock_count = count;471 472 verify( ! preemption_state.enabled );469 TL_GET( this_processor )->finish.action_code = Release_Multi; 470 TL_GET( this_processor )->finish.locks = locks; 471 TL_GET( this_processor )->finish.lock_count = count; 472 473 verify( ! TL_GET( preemption_state ).enabled ); 473 474 returnToKernel(); 474 verify( ! preemption_state.enabled );475 verify( ! TL_GET( preemption_state ).enabled ); 475 476 476 477 enable_interrupts( __cfaabi_dbg_ctx ); … … 479 480 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 480 481 disable_interrupts(); 481 this_processor->finish.action_code = Release_Multi_Schedule;482 this_processor->finish.locks = locks;483 this_processor->finish.lock_count = lock_count;484 this_processor->finish.thrds = thrds;485 this_processor->finish.thrd_count = thrd_count;486 487 verify( ! preemption_state.enabled );482 TL_GET( this_processor )->finish.action_code = Release_Multi_Schedule; 483 TL_GET( this_processor )->finish.locks = locks; 484 TL_GET( this_processor )->finish.lock_count = lock_count; 485 TL_GET( this_processor )->finish.thrds = thrds; 486 TL_GET( this_processor )->finish.thrd_count = thrd_count; 487 488 verify( ! TL_GET( preemption_state ).enabled ); 488 489 returnToKernel(); 489 verify( ! preemption_state.enabled );490 verify( ! TL_GET( preemption_state ).enabled ); 490 491 491 492 enable_interrupts( __cfaabi_dbg_ctx ); … … 493 494 494 495 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 495 verify( ! preemption_state.enabled );496 this_processor->finish.action_code = thrd ? Release_Schedule : Release;497 this_processor->finish.lock = lock;498 this_processor->finish.thrd = thrd;496 verify( ! TL_GET( preemption_state ).enabled ); 497 TL_GET( this_processor )->finish.action_code = thrd ? Release_Schedule : Release; 498 TL_GET( this_processor )->finish.lock = lock; 499 TL_GET( this_processor )->finish.thrd = thrd; 499 500 500 501 returnToKernel(); … … 507 508 // Kernel boot procedures 508 509 void kernel_startup(void) { 509 verify( ! preemption_state.enabled );510 verify( ! TL_GET( preemption_state ).enabled ); 510 511 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 511 512 … … 531 532 532 533 //initialize the global state variables 533 this_processor = mainProcessor;534 this_thread = mainThread;535 this_coroutine = &mainThread->self_cor;534 TL_SET( this_processor, mainProcessor ); 535 TL_SET( this_thread, mainThread ); 536 TL_SET( this_coroutine, &mainThread->self_cor ); 536 537 537 538 // Enable preemption … … 545 546 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 546 547 // mainThread is on the ready queue when this call is made. 547 kernel_first_resume( this_processor);548 kernel_first_resume( TL_GET( this_processor ) ); 548 549 549 550 … … 552 553 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 553 554 554 verify( ! preemption_state.enabled );555 verify( ! TL_GET( preemption_state ).enabled ); 555 556 enable_interrupts( __cfaabi_dbg_ctx ); 556 verify( preemption_state.enabled );557 verify( TL_GET( preemption_state ).enabled ); 557 558 } 558 559 … … 560 561 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 561 562 562 verify( preemption_state.enabled );563 verify( TL_GET( preemption_state ).enabled ); 563 564 disable_interrupts(); 564 verify( ! preemption_state.enabled );565 verify( ! TL_GET( preemption_state ).enabled ); 565 566 566 567 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 602 603 603 604 // first task to abort ? 604 if ( ! kernel_abort_called ) { // not first task to abort ?605 if ( ! kernel_abort_called ) { // not first task to abort ? 605 606 kernel_abort_called = true; 606 607 unlock( kernel_abort_lock ); … … 617 618 } 618 619 619 return this_thread;620 return TL_GET( this_thread ); 620 621 } 621 622 … … 626 627 __cfaabi_dbg_bits_write( abort_text, len ); 627 628 628 if ( thrd != this_coroutine) {629 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine);629 if ( thrd != TL_GET( this_coroutine ) ) { 630 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) ); 630 631 __cfaabi_dbg_bits_write( abort_text, len ); 631 632 } … … 636 637 637 638 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 638 return get_coroutine( this_thread) == get_coroutine(mainThread) ? 4 : 2;639 return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2; 639 640 } 640 641 … … 666 667 if ( count < 0 ) { 667 668 // queue current task 668 append( waiting, (thread_desc *) this_thread);669 append( waiting, (thread_desc *)TL_GET( this_thread ) ); 669 670 670 671 // atomically release spin lock and block -
src/libcfa/concurrency/kernel_private.h
r9d1e3f7 rca37445 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 22 09:58:09 201713 // Update Count : 212 // Last Modified On : Thu Mar 29 14:06:40 2018 13 // Update Count : 3 14 14 // 15 15 … … 66 66 extern event_kernel_t * event_kernel; 67 67 68 extern thread_local coroutine_desc * volatile this_coroutine;69 extern thread_local thread_desc * volatile this_thread;70 extern thread_local processor * volatile this_processor;68 //extern thread_local coroutine_desc * volatile this_coroutine; 69 //extern thread_local thread_desc * volatile this_thread; 70 //extern thread_local processor * volatile this_processor; 71 71 72 72 // extern volatile thread_local bool preemption_in_progress; -
src/libcfa/concurrency/monitor.c
r9d1e3f7 rca37445 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 16 14:49:53201813 // Update Count : 512 // Last Modified On : Fri Mar 30 14:30:26 2018 13 // Update Count : 9 14 14 // 15 15 … … 85 85 // Lock the monitor spinlock 86 86 lock( this->lock __cfaabi_dbg_ctx2 ); 87 thread_desc * thrd = this_thread;87 thread_desc * thrd = TL_GET( this_thread ); 88 88 89 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 134 134 // Lock the monitor spinlock 135 135 lock( this->lock __cfaabi_dbg_ctx2 ); 136 thread_desc * thrd = this_thread;136 thread_desc * thrd = TL_GET( this_thread ); 137 137 138 138 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 168 168 169 169 // Create the node specific to this wait operation 170 wait_ctx_primed( this_thread, 0 )170 wait_ctx_primed( TL_GET( this_thread ), 0 ) 171 171 172 172 // Some one else has the monitor, wait for him to finish and then run … … 179 179 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 180 180 181 wait_ctx( this_thread, 0 )181 wait_ctx( TL_GET( this_thread ), 0 ) 182 182 this->dtor_node = &waiter; 183 183 … … 199 199 lock( this->lock __cfaabi_dbg_ctx2 ); 200 200 201 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);202 203 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );201 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner); 202 203 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this ); 204 204 205 205 // Leaving a recursion level, decrement the counter … … 227 227 void __leave_dtor_monitor_desc( monitor_desc * this ) { 228 228 __cfaabi_dbg_debug_do( 229 if( this_thread!= this->owner ) {230 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);229 if( TL_GET( this_thread ) != this->owner ) { 230 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 231 231 } 232 232 if( this->recursion != 1 ) { … … 297 297 298 298 // Save previous thread context 299 this.prev = this_thread->monitors;299 this.prev = TL_GET( this_thread )->monitors; 300 300 301 301 // Update thread context (needed for conditions) 302 ( this_thread->monitors){m, count, func};302 (TL_GET( this_thread )->monitors){m, count, func}; 303 303 304 304 // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count); … … 322 322 323 323 // Restore thread context 324 this_thread->monitors = this.prev;324 TL_GET( this_thread )->monitors = this.prev; 325 325 } 326 326 … … 332 332 333 333 // Save previous thread context 334 this.prev = this_thread->monitors;334 this.prev = TL_GET( this_thread )->monitors; 335 335 336 336 // Update thread context (needed for conditions) 337 ( this_thread->monitors){m, 1, func};337 (TL_GET( this_thread )->monitors){m, 1, func}; 338 338 339 339 __enter_monitor_dtor( this.m, func ); … … 346 346 347 347 // Restore thread context 348 this_thread->monitors = this.prev;348 TL_GET( this_thread )->monitors = this.prev; 349 349 } 350 350 … … 386 386 387 387 // Create the node specific to this wait operation 388 wait_ctx( this_thread, user_info );388 wait_ctx( TL_GET( this_thread ), user_info ); 389 389 390 390 // Append the current wait operation to the ones already queued on the condition … … 425 425 //Some more checking in debug 426 426 __cfaabi_dbg_debug_do( 427 thread_desc * this_thrd = this_thread;427 thread_desc * this_thrd = TL_GET( this_thread ); 428 428 if ( this.monitor_count != this_thrd->monitors.size ) { 429 429 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 473 473 474 474 // Create the node specific to this wait operation 475 wait_ctx_primed( this_thread, 0 )475 wait_ctx_primed( TL_GET( this_thread ), 0 ) 476 476 477 477 //save contexts … … 566 566 567 567 // Create the node specific to this wait operation 568 wait_ctx_primed( this_thread, 0 );568 wait_ctx_primed( TL_GET( this_thread ), 0 ); 569 569 570 570 // Save monitor states … … 612 612 613 613 // Create the node specific to this wait operation 614 wait_ctx_primed( this_thread, 0 );614 wait_ctx_primed( TL_GET( this_thread ), 0 ); 615 615 616 616 monitor_save; … … 618 618 619 619 for( __lock_size_t i = 0; i < count; i++) { 620 verify( monitors[i]->owner == this_thread);620 verify( monitors[i]->owner == TL_GET( this_thread ) ); 621 621 } 622 622 … … 812 812 813 813 static inline void brand_condition( condition & this ) { 814 thread_desc * thrd = this_thread;814 thread_desc * thrd = TL_GET( this_thread ); 815 815 if( !this.monitors ) { 816 816 // __cfaabi_dbg_print_safe( "Branding\n" ); -
src/libcfa/concurrency/preemption.c
r9d1e3f7 rca37445 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Feb 9 16:38:13201813 // Update Count : 1412 // Last Modified On : Mon Apr 9 13:52:39 2018 13 // Update Count : 36 14 14 // 15 15 … … 23 23 } 24 24 25 #include "bits/cfatime.h"26 25 #include "bits/signal.h" 27 26 28 27 #if !defined(__CFA_DEFAULT_PREEMPTION__) 29 #define __CFA_DEFAULT_PREEMPTION__ 10` cfa_ms28 #define __CFA_DEFAULT_PREEMPTION__ 10`ms 30 29 #endif 31 30 32 __cfa_time_tdefault_preemption() __attribute__((weak)) {31 Duration default_preemption() __attribute__((weak)) { 33 32 return __CFA_DEFAULT_PREEMPTION__; 34 33 } … … 78 77 79 78 // Get next expired node 80 static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_tcurrtime ) {79 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 81 80 if( !alarms->head ) return NULL; // If no alarms return null 82 81 if( alarms->head->alarm >= currtime ) return NULL; // If alarms head not expired return null … … 88 87 alarm_node_t * node = NULL; // Used in the while loop but cannot be declared in the while condition 89 88 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 90 __cfa_time_t currtime = __kernel_get_time();// Check current time once so we everything "happens at once"89 Time currtime = __kernel_get_time(); // Check current time once so we everything "happens at once" 91 90 92 91 //Loop throught every thing expired … … 102 101 103 102 // Check if this is a periodic alarm 104 __cfa_time_tperiod = node->period;103 Duration period = node->period; 105 104 if( period > 0 ) { 106 105 node->alarm = currtime + period; // Alarm is periodic, add currtime to it (used cached current time) … … 117 116 118 117 // Update the preemption of a processor and notify interested parties 119 void update_preemption( processor * this, __cfa_time_tduration ) {118 void update_preemption( processor * this, Duration duration ) { 120 119 alarm_node_t * alarm = this->preemption_alarm; 121 120 122 121 // Alarms need to be enabled 123 if ( duration > 0 && ! alarm->set ) {122 if ( duration > 0 && ! alarm->set ) { 124 123 alarm->alarm = __kernel_get_time() + duration; 125 124 alarm->period = duration; 126 125 register_self( alarm ); 127 126 } 128 // Zero dura ction but alarm is set127 // Zero duration but alarm is set 129 128 else if ( duration == 0 && alarm->set ) { 130 129 unregister_self( alarm ); … … 150 149 // Disable interrupts by incrementing the counter 151 150 void disable_interrupts() { 152 preemption_state.enabled = false;153 __attribute__((unused)) unsigned short new_val = preemption_state.disable_count + 1;154 preemption_state.disable_count = new_val;151 TL_GET( preemption_state ).enabled = false; 152 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1; 153 TL_GET( preemption_state ).disable_count = new_val; 155 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 156 155 } … … 159 158 // If counter reaches 0, execute any pending CtxSwitch 160 159 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 161 processor * proc = this_processor;// Cache the processor now since interrupts can start happening after the atomic add162 thread_desc * thrd = this_thread;// Cache the thread now since interrupts can start happening after the atomic add163 164 unsigned short prev = preemption_state.disable_count;165 preemption_state.disable_count -= 1;160 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add 161 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add 162 163 unsigned short prev = TL_GET( preemption_state ).disable_count; 164 TL_GET( preemption_state ).disable_count -= 1; 166 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 167 166 168 167 // Check if we need to prempt the thread because an interrupt was missed 169 168 if( prev == 1 ) { 170 preemption_state.enabled = true;169 TL_GET( preemption_state ).enabled = true; 171 170 if( proc->pending_preemption ) { 172 171 proc->pending_preemption = false; … … 182 181 // Don't execute any pending CtxSwitch even if counter reaches 0 183 182 void enable_interrupts_noPoll() { 184 unsigned short prev = preemption_state.disable_count;185 preemption_state.disable_count -= 1;183 unsigned short prev = TL_GET( preemption_state ).disable_count; 184 TL_GET( preemption_state ).disable_count -= 1; 186 185 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 187 186 if( prev == 1 ) { 188 preemption_state.enabled = true;187 TL_GET( preemption_state ).enabled = true; 189 188 } 190 189 } … … 236 235 // If false : preemption is unsafe and marked as pending 237 236 static inline bool preemption_ready() { 238 bool ready = preemption_state.enabled && !preemption_state.in_progress; // Check if preemption is safe239 this_processor->pending_preemption = !ready;// Adjust the pending flag accordingly237 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 238 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 240 239 return ready; 241 240 } … … 251 250 252 251 // Start with preemption disabled until ready 253 preemption_state.enabled = false;254 preemption_state.disable_count = 1;252 TL_GET( preemption_state ).enabled = false; 253 TL_GET( preemption_state ).disable_count = 1; 255 254 256 255 // Initialize the event kernel … … 291 290 // Used by thread to control when they want to receive preemption signals 292 291 void ?{}( preemption_scope & this, processor * proc ) { 293 (this.alarm){ proc, 0`cfa_s, 0`cfa_s };292 (this.alarm){ proc, (Time){ 0 }, 0`s }; 294 293 this.proc = proc; 295 294 this.proc->preemption_alarm = &this.alarm; … … 301 300 disable_interrupts(); 302 301 303 update_preemption( this.proc, 0` cfa_s );302 update_preemption( this.proc, 0`s ); 304 303 } 305 304 … … 317 316 // before the kernel thread has even started running. When that happens an iterrupt 318 317 // we a null 'this_processor' will be caught, just ignore it. 319 if(! this_processor) return;318 if(!TL_GET( this_processor )) return; 320 319 321 320 choose(sfp->si_value.sival_int) { 322 321 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 323 case PREEMPT_TERMINATE: verify( this_processor->do_terminate);322 case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate); 324 323 default: 325 324 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 331 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 332 331 333 preemption_state.in_progress = true;// Sync flag : prevent recursive calls to the signal handler332 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 334 333 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 335 preemption_state.in_progress = false;// Clear the in progress flag334 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 336 335 337 336 // Preemption can occur here 338 337 339 BlockInternal( (thread_desc*) this_thread );// Do the actual CtxSwitch338 BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch 340 339 } 341 340 -
src/libcfa/concurrency/preemption.h
r9d1e3f7 rca37445 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 21 22:34:25 201713 // Update Count : 112 // Last Modified On : Fri Mar 23 17:18:53 2018 13 // Update Count : 2 14 14 // 15 15 … … 21 21 void kernel_start_preemption(); 22 22 void kernel_stop_preemption(); 23 void update_preemption( processor * this, __cfa_time_tduration );23 void update_preemption( processor * this, Duration duration ); 24 24 void tick_preemption(); 25 25 -
src/libcfa/concurrency/thread
r9d1e3f7 rca37445 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 22 09:59:40 201713 // Update Count : 312 // Last Modified On : Thu Mar 29 14:07:11 2018 13 // Update Count : 4 14 14 // 15 15 … … 52 52 } 53 53 54 extern thread_local thread_desc * volatile this_thread;54 //extern thread_local thread_desc * volatile this_thread; 55 55 56 56 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
r9d1e3f7 rca37445 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jul 21 22:34:46 201713 // Update Count : 112 // Last Modified On : Fri Mar 30 17:19:52 2018 13 // Update Count : 8 14 14 // 15 15 … … 26 26 } 27 27 28 extern volatile thread_local processor * this_processor;28 //extern volatile thread_local processor * this_processor; 29 29 30 30 //----------------------------------------------------------------------------- … … 75 75 coroutine_desc* thrd_c = get_coroutine(this); 76 76 thread_desc * thrd_h = get_thread (this); 77 thrd_c->last = this_coroutine;77 thrd_c->last = TL_GET( this_coroutine ); 78 78 79 79 // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); … … 81 81 disable_interrupts(); 82 82 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 this_coroutine = thrd_c;83 TL_SET( this_coroutine, thrd_c ); 84 84 CtxStart(&this, CtxInvokeThread); 85 85 assert( thrd_c->last->stack.context ); … … 92 92 extern "C" { 93 93 void __finish_creation(void) { 94 coroutine_desc* thrd_c = this_coroutine;94 coroutine_desc* thrd_c = TL_GET( this_coroutine ); 95 95 ThreadCtxSwitch( thrd_c, thrd_c->last ); 96 96 } … … 98 98 99 99 void yield( void ) { 100 verify( preemption_state.enabled );101 BlockInternal( this_thread);102 verify( preemption_state.enabled );100 verify( TL_GET( preemption_state ).enabled ); 101 BlockInternal( TL_GET( this_thread ) ); 102 verify( TL_GET( preemption_state ).enabled ); 103 103 } 104 104 … … 116 116 // set new coroutine that the processor is executing 117 117 // and context switch to it 118 this_coroutine = dst;118 TL_SET( this_coroutine, dst ); 119 119 assert( src->stack.context ); 120 120 CtxSwitch( src->stack.context, dst->stack.context ); 121 this_coroutine = src;121 TL_SET( this_coroutine, src ); 122 122 123 123 // set state of new coroutine to active
Note:
See TracChangeset
for help on using the changeset viewer.