Changeset b69ea6b
- Timestamp:
- Feb 15, 2018, 10:52:35 AM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- d27e340
- Parents:
- ff2d1139
- Location:
- src/libcfa
- Files:
-
- 1 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
rff2d1139 rb69ea6b 18 18 #include <stdio.h> 19 19 #include <string.h> 20 #include <time.h>21 20 #include <unistd.h> 22 21 #include <sys/time.h> … … 27 26 #include "preemption.h" 28 27 29 //=============================================================================================30 // time type31 //=============================================================================================32 28 33 #define one_second 1_000_000_000ul 34 #define one_milisecond 1_000_000ul 35 #define one_microsecond 1_000ul 36 #define one_nanosecond 1ul 37 38 __cfa_time_t zero_time = { 0 }; 39 40 void ?{}( __cfa_time_t & this ) { this.val = 0; } 41 void ?{}( __cfa_time_t & this, zero_t zero ) { this.val = 0; } 42 43 void ?{}( itimerval & this, __cfa_time_t * alarm ) with( this ) { 44 it_value.tv_sec = alarm->val / one_second; // seconds 45 it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds 29 static inline void ?{}( itimerval & this, __cfa_time_t * alarm ) with( this ) { 30 it_value.tv_sec = alarm->val / (1`cfa_s).val; // seconds 31 it_value.tv_usec = max( (alarm->val % (1`cfa_s).val) / (1`cfa_us).val, 1000 ); // microseconds 46 32 it_interval.tv_sec = 0; 47 33 it_interval.tv_usec = 0; 48 34 } 49 35 50 51 void ?{}( __cfa_time_t & this, timespec * curr ) { 36 static inline void ?{}( __cfa_time_t & this, timespec * curr ) { 52 37 uint64_t secs = curr->tv_sec; 53 38 uint64_t nsecs = curr->tv_nsec; 54 this.val = (secs * one_second)+ nsecs;39 this.val = from_s(secs).val + nsecs; 55 40 } 56 57 __cfa_time_t ?=?( __cfa_time_t & this, zero_t rhs ) {58 this.val = 0;59 return this;60 }61 62 __cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; }63 __cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000ul; return ret; }64 __cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000ul; return ret; }65 __cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1ul; return ret; }66 41 67 42 //============================================================================================= … … 84 59 //============================================================================================= 85 60 86 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time) with( this ) {61 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s ) with( this ) { 87 62 this.thrd = thrd; 88 63 this.alarm = alarm; … … 93 68 } 94 69 95 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time) with( this ) {70 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s ) with( this ) { 96 71 this.proc = proc; 97 72 this.alarm = alarm; -
src/libcfa/concurrency/alarm.h
rff2d1139 rb69ea6b 21 21 #include <assert.h> 22 22 23 #include "bits/cfatime.h" 24 23 25 struct thread_desc; 24 26 struct processor; 25 26 struct timespec;27 struct itimerval;28 29 //=============================================================================================30 // time type31 //=============================================================================================32 33 struct __cfa_time_t {34 uint64_t val;35 };36 37 // ctors38 void ?{}( __cfa_time_t & this );39 void ?{}( __cfa_time_t & this, zero_t zero );40 void ?{}( __cfa_time_t & this, timespec * curr );41 void ?{}( itimerval & this, __cfa_time_t * alarm );42 43 __cfa_time_t ?=?( __cfa_time_t & this, zero_t rhs );44 45 // logical ops46 static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; }47 static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; }48 static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val > rhs.val; }49 static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val < rhs.val; }50 static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; }51 static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; }52 53 static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; }54 static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; }55 static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val > rhs; }56 static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val < rhs; }57 static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; }58 static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; }59 60 // addition/substract61 static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) {62 __cfa_time_t ret;63 ret.val = lhs.val + rhs.val;64 return ret;65 }66 67 static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) {68 __cfa_time_t ret;69 ret.val = lhs.val - rhs.val;70 return ret;71 }72 73 __cfa_time_t from_s ( uint64_t );74 __cfa_time_t from_ms( uint64_t );75 __cfa_time_t from_us( uint64_t );76 __cfa_time_t from_ns( uint64_t );77 78 extern __cfa_time_t zero_time;79 27 80 28 //============================================================================================= … … 105 53 typedef alarm_node_t ** __alarm_it_t; 106 54 107 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time);108 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time);55 void ?{}( alarm_node_t & this, thread_desc * thrd, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s ); 56 void ?{}( alarm_node_t & this, processor * proc, __cfa_time_t alarm = 0`cfa_s, __cfa_time_t period = 0`cfa_s ); 109 57 void ^?{}( alarm_node_t & this ); 110 58 -
src/libcfa/concurrency/coroutine.c
rff2d1139 rb69ea6b 99 99 // Wrapper for co 100 100 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 101 verify( preemption.enabled || this_processor->do_terminate ); 101 102 disable_interrupts(); 102 103 … … 116 117 117 118 enable_interrupts( __cfaabi_dbg_ctx ); 119 verify( preemption.enabled || this_processor->do_terminate ); 118 120 } //ctxSwitchDirect 119 121 -
src/libcfa/concurrency/invoke.c
rff2d1139 rb69ea6b 28 28 extern void __suspend_internal(void); 29 29 extern void __leave_coroutine(void); 30 extern void __finish_creation(void); 30 31 extern void __leave_thread_monitor( struct thread_desc * this ); 31 32 extern void disable_interrupts(); … … 44 45 45 46 cor->state = Active; 47 48 enable_interrupts( __cfaabi_dbg_ctx ); 46 49 47 50 main( this ); … … 62 65 // First suspend, once the thread arrives here, 63 66 // the function pointer to main can be invalidated without risk 64 __ suspend_internal();67 __finish_creation(); 65 68 66 69 // Fetch the thread handle from the user defined thread structure -
src/libcfa/concurrency/kernel.c
rff2d1139 rb69ea6b 56 56 thread_local processor * volatile this_processor; 57 57 58 volatile thread_local bool preemption_in_progress = 0; 59 volatile thread_local bool preemption_enabled = false; 60 volatile thread_local unsigned short disable_preempt_count = 1; 58 // volatile thread_local bool preemption_in_progress = 0; 59 // volatile thread_local bool preemption_enabled = false; 60 // volatile thread_local unsigned short disable_preempt_count = 1; 61 62 volatile thread_local __cfa_kernel_preemption_data_t preemption = { false, false, 1 }; 61 63 62 64 //----------------------------------------------------------------------------- … … 207 209 if(readyThread) 208 210 { 209 verify( !preemption _enabled );211 verify( !preemption.enabled ); 210 212 211 213 runThread(this, readyThread); 212 214 213 verify( !preemption _enabled );215 verify( !preemption.enabled ); 214 216 215 217 //Some actions need to be taken from the kernel … … 260 262 void finishRunning(processor * this) with( this->finish ) { 261 263 if( action_code == Release ) { 262 verify( !preemption _enabled );264 verify( !preemption.enabled ); 263 265 unlock( *lock ); 264 266 } … … 267 269 } 268 270 else if( action_code == Release_Schedule ) { 269 verify( !preemption _enabled );271 verify( !preemption.enabled ); 270 272 unlock( *lock ); 271 273 ScheduleThread( thrd ); 272 274 } 273 275 else if( action_code == Release_Multi ) { 274 verify( !preemption _enabled );276 verify( !preemption.enabled ); 275 277 for(int i = 0; i < lock_count; i++) { 276 278 unlock( *locks[i] ); … … 304 306 this_coroutine = NULL; 305 307 this_thread = NULL; 306 preemption _enabled = false;307 disable_preempt_count = 1;308 preemption.enabled = false; 309 preemption.disable_count = 1; 308 310 // SKULLDUGGERY: We want to create a context for the processor coroutine 309 311 // which is needed for the 2-step context switch. However, there is no reason … … 345 347 } 346 348 349 void kernel_first_resume(processor * this) { 350 coroutine_desc * src = this_coroutine; 351 coroutine_desc * dst = get_coroutine(*this->runner); 352 353 verify( !preemption.enabled ); 354 355 create_stack(&dst->stack, dst->stack.size); 356 CtxStart(this->runner, CtxInvokeCoroutine); 357 358 verify( !preemption.enabled ); 359 360 dst->last = src; 361 dst->starter = dst->starter ? dst->starter : src; 362 363 // set state of current coroutine to inactive 364 src->state = src->state == Halted ? Halted : Inactive; 365 366 // set new coroutine that task is executing 367 this_coroutine = dst; 368 369 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. 370 // Therefore, when first creating a coroutine, interrupts are enable before calling the main. 371 // This is consistent with thread creation. However, when creating the main processor coroutine, 372 // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will 373 // stay disabled. 374 disable_interrupts(); 375 376 // context switch to specified coroutine 377 assert( src->stack.context ); 378 CtxSwitch( src->stack.context, dst->stack.context ); 379 // when CtxSwitch returns we are back in the src coroutine 380 381 // set state of new coroutine to active 382 src->state = Active; 383 384 verify( !preemption.enabled ); 385 } 386 347 387 //----------------------------------------------------------------------------- 348 388 // Scheduler routines … … 352 392 verify( thrd->self_cor.state != Halted ); 353 393 354 verify( !preemption _enabled );394 verify( !preemption.enabled ); 355 395 356 396 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); … … 362 402 } 363 403 364 verify( !preemption _enabled );404 verify( !preemption.enabled ); 365 405 } 366 406 367 407 thread_desc * nextThread(cluster * this) with( *this ) { 368 verify( !preemption _enabled );408 verify( !preemption.enabled ); 369 409 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 370 410 thread_desc * head = pop_head( ready_queue ); 371 411 unlock( ready_queue_lock ); 372 verify( !preemption _enabled );412 verify( !preemption.enabled ); 373 413 return head; 374 414 } … … 376 416 void BlockInternal() { 377 417 disable_interrupts(); 378 verify( !preemption _enabled );418 verify( !preemption.enabled ); 379 419 returnToKernel(); 380 verify( !preemption _enabled );420 verify( !preemption.enabled ); 381 421 enable_interrupts( __cfaabi_dbg_ctx ); 382 422 } … … 387 427 this_processor->finish.lock = lock; 388 428 389 verify( !preemption _enabled );429 verify( !preemption.enabled ); 390 430 returnToKernel(); 391 verify( !preemption _enabled );431 verify( !preemption.enabled ); 392 432 393 433 enable_interrupts( __cfaabi_dbg_ctx ); … … 399 439 this_processor->finish.thrd = thrd; 400 440 401 verify( !preemption _enabled );441 verify( !preemption.enabled ); 402 442 returnToKernel(); 403 verify( !preemption _enabled );443 verify( !preemption.enabled ); 404 444 405 445 enable_interrupts( __cfaabi_dbg_ctx ); … … 413 453 this_processor->finish.thrd = thrd; 414 454 415 verify( !preemption _enabled );455 verify( !preemption.enabled ); 416 456 returnToKernel(); 417 verify( !preemption _enabled );457 verify( !preemption.enabled ); 418 458 419 459 enable_interrupts( __cfaabi_dbg_ctx ); … … 426 466 this_processor->finish.lock_count = count; 427 467 428 verify( !preemption _enabled );468 verify( !preemption.enabled ); 429 469 returnToKernel(); 430 verify( !preemption _enabled );470 verify( !preemption.enabled ); 431 471 432 472 enable_interrupts( __cfaabi_dbg_ctx ); … … 441 481 this_processor->finish.thrd_count = thrd_count; 442 482 443 verify( !preemption _enabled );483 verify( !preemption.enabled ); 444 484 returnToKernel(); 445 verify( !preemption _enabled );485 verify( !preemption.enabled ); 446 486 447 487 enable_interrupts( __cfaabi_dbg_ctx ); … … 449 489 450 490 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 451 verify( !preemption _enabled );491 verify( !preemption.enabled ); 452 492 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 453 493 this_processor->finish.lock = lock; … … 463 503 // Kernel boot procedures 464 504 void kernel_startup(void) { 505 verify( !preemption.enabled ); 465 506 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 466 507 … … 500 541 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 501 542 // mainThread is on the ready queue when this call is made. 502 resume( *mainProcessor->runner );543 kernel_first_resume( this_processor ); 503 544 504 545 … … 507 548 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 508 549 550 verify( !preemption.enabled ); 509 551 enable_interrupts( __cfaabi_dbg_ctx ); 552 verify( preemption.enabled ); 510 553 } 511 554 … … 513 556 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 514 557 558 verify( preemption.enabled ); 515 559 disable_interrupts(); 560 verify( !preemption.enabled ); 516 561 517 562 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. -
src/libcfa/concurrency/kernel_private.h
rff2d1139 rb69ea6b 74 74 extern thread_local processor * volatile this_processor; 75 75 76 extern volatile thread_local bool preemption_in_progress; 77 extern volatile thread_local bool preemption_enabled; 78 extern volatile thread_local unsigned short disable_preempt_count; 76 // extern volatile thread_local bool preemption_in_progress; 77 // extern volatile thread_local bool preemption_enabled; 78 // extern volatile thread_local unsigned short disable_preempt_count; 79 80 struct __cfa_kernel_preemption_data_t { 81 bool enabled; 82 bool in_progress; 83 unsigned short disable_count; 84 }; 85 86 extern volatile thread_local __cfa_kernel_preemption_data_t preemption; 79 87 80 88 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/preemption.c
rff2d1139 rb69ea6b 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 preemption _enabled = false;152 __attribute__((unused)) unsigned short new_val = disable_preempt_count + 1;153 disable_preempt_count = new_val;151 preemption.enabled = false; 152 __attribute__((unused)) unsigned short new_val = preemption.disable_count + 1; 153 preemption.disable_count = new_val; 154 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 155 155 } … … 161 161 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 162 162 163 unsigned short prev = disable_preempt_count;164 disable_preempt_count -= 1;163 unsigned short prev = preemption.disable_count; 164 preemption.disable_count -= 1; 165 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 166 167 167 // Check if we need to prempt the thread because an interrupt was missed 168 168 if( prev == 1 ) { 169 preemption _enabled = true;169 preemption.enabled = true; 170 170 if( proc->pending_preemption ) { 171 171 proc->pending_preemption = false; … … 181 181 // Don't execute any pending CtxSwitch even if counter reaches 0 182 182 void enable_interrupts_noPoll() { 183 unsigned short prev = disable_preempt_count;184 disable_preempt_count -= 1;183 unsigned short prev = preemption.disable_count; 184 preemption.disable_count -= 1; 185 185 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 186 186 if( prev == 1 ) { 187 preemption _enabled = true;187 preemption.enabled = true; 188 188 } 189 189 } … … 235 235 // If false : preemption is unsafe and marked as pending 236 236 static inline bool preemption_ready() { 237 bool ready = preemption _enabled && !preemption_in_progress; // Check if preemption is safe237 bool ready = preemption.enabled && !preemption.in_progress; // Check if preemption is safe 238 238 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 239 239 return ready; … … 250 250 251 251 // Start with preemption disabled until ready 252 preemption _enabled = false;253 disable_preempt_count = 1;252 preemption.enabled = false; 253 preemption.disable_count = 1; 254 254 255 255 // Initialize the event kernel … … 290 290 // Used by thread to control when they want to receive preemption signals 291 291 void ?{}( preemption_scope & this, processor * proc ) { 292 (this.alarm){ proc, zero_time, zero_time};292 (this.alarm){ proc, 0`cfa_s, 0`cfa_s }; 293 293 this.proc = proc; 294 294 this.proc->preemption_alarm = &this.alarm; … … 300 300 disable_interrupts(); 301 301 302 update_preemption( this.proc, zero_time);302 update_preemption( this.proc, 0`cfa_s ); 303 303 } 304 304 … … 330 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 331 331 332 preemption _in_progress = true; // Sync flag : prevent recursive calls to the signal handler332 preemption.in_progress = true; // Sync flag : prevent recursive calls to the signal handler 333 333 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 334 preemption _in_progress = false; // Clear the in progress flag334 preemption.in_progress = false; // Clear the in progress flag 335 335 336 336 // Preemption can occur here -
src/libcfa/concurrency/thread.c
rff2d1139 rb69ea6b 90 90 } 91 91 92 extern "C" { 93 void __finish_creation(void) { 94 coroutine_desc* thrd_c = this_coroutine; 95 ThreadCtxSwitch( thrd_c, thrd_c->last ); 96 } 97 } 98 92 99 void yield( void ) { 100 verify( preemption.enabled ); 93 101 BlockInternal( this_thread ); 102 verify( preemption.enabled ); 94 103 } 95 104
Note: See TracChangeset
for help on using the changeset viewer.