- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r1f45c7d r56e5b24 110 110 #endif 111 111 112 extern $thread* mainThread;112 extern thread$ * mainThread; 113 113 extern processor * mainProcessor; 114 114 115 115 //----------------------------------------------------------------------------- 116 116 // Kernel Scheduling logic 117 static $thread* __next_thread(cluster * this);118 static $thread* __next_thread_slow(cluster * this);119 static inline bool __must_unpark( $thread* thrd ) __attribute((nonnull(1)));120 static void __run_thread(processor * this, $thread* dst);117 static thread$ * __next_thread(cluster * this); 118 static thread$ * __next_thread_slow(cluster * this); 119 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 120 static void __run_thread(processor * this, thread$ * dst); 121 121 static void __wake_one(cluster * cltr); 122 122 … … 181 181 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 182 182 183 $thread* readyThread = 0p;183 thread$ * readyThread = 0p; 184 184 MAIN_LOOP: 185 185 for() { … … 231 231 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 232 232 233 __disable_interrupts_hard(); 234 eventfd_t val; 235 eventfd_read( this->idle, &val ); 236 __enable_interrupts_hard(); 233 { 234 eventfd_t val; 235 ssize_t ret = read( this->idle, &val, sizeof(val) ); 236 if(ret < 0) { 237 switch((int)errno) { 238 case EAGAIN: 239 #if EAGAIN != EWOULDBLOCK 240 case EWOULDBLOCK: 241 #endif 242 case EINTR: 243 // No need to do anything special here, just assume it's a legitimate wake-up 244 break; 245 default: 246 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 247 } 248 } 249 } 237 250 238 251 #if !defined(__CFA_NO_STATISTICS__) … … 388 401 // runThread runs a thread by context switching 389 402 // from the processor coroutine to the target thread 390 static void __run_thread(processor * this, $thread* thrd_dst) {403 static void __run_thread(processor * this, thread$ * thrd_dst) { 391 404 /* paranoid */ verify( ! __preemption_enabled() ); 392 405 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); … … 406 419 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 407 420 408 $coroutine* proc_cor = get_coroutine(this->runner);421 coroutine$ * proc_cor = get_coroutine(this->runner); 409 422 410 423 // set state of processor coroutine to inactive … … 425 438 /* paranoid */ verify( thrd_dst->context.SP ); 426 439 /* paranoid */ verify( thrd_dst->state != Halted ); 427 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor428 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor440 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 441 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 429 442 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 430 443 … … 438 451 439 452 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 440 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst );441 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst );453 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 454 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 442 455 /* paranoid */ verify( thrd_dst->context.SP ); 443 456 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); … … 505 518 void returnToKernel() { 506 519 /* paranoid */ verify( ! __preemption_enabled() ); 507 $coroutine* proc_cor = get_coroutine(kernelTLS().this_processor->runner);508 $thread* thrd_src = kernelTLS().this_thread;520 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); 521 thread$ * thrd_src = kernelTLS().this_thread; 509 522 510 523 __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) … … 534 547 535 548 /* paranoid */ verify( ! __preemption_enabled() ); 536 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too small.\n", thrd_src );537 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too large.\n", thrd_src );549 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 550 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 538 551 } 539 552 … … 541 554 // Scheduler routines 542 555 // KERNEL ONLY 543 static void __schedule_thread( $thread* thrd ) {556 static void __schedule_thread( thread$ * thrd ) { 544 557 /* paranoid */ verify( ! __preemption_enabled() ); 545 558 /* paranoid */ verify( ready_schedule_islocked()); … … 589 602 } 590 603 591 void schedule_thread$( $thread* thrd ) {604 void schedule_thread$( thread$ * thrd ) { 592 605 ready_schedule_lock(); 593 606 __schedule_thread( thrd ); … … 596 609 597 610 // KERNEL ONLY 598 static inline $thread* __next_thread(cluster * this) with( *this ) {611 static inline thread$ * __next_thread(cluster * this) with( *this ) { 599 612 /* paranoid */ verify( ! __preemption_enabled() ); 600 613 601 614 ready_schedule_lock(); 602 $thread* thrd = pop_fast( this );615 thread$ * thrd = pop_fast( this ); 603 616 ready_schedule_unlock(); 604 617 … … 608 621 609 622 // KERNEL ONLY 610 static inline $thread* __next_thread_slow(cluster * this) with( *this ) {623 static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { 611 624 /* paranoid */ verify( ! __preemption_enabled() ); 612 625 613 626 ready_schedule_lock(); 614 $thread* thrd;627 thread$ * thrd; 615 628 for(25) { 616 629 thrd = pop_slow( this ); … … 626 639 } 627 640 628 static inline bool __must_unpark( $thread* thrd ) {641 static inline bool __must_unpark( thread$ * thrd ) { 629 642 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 630 643 switch(old_ticket) { … … 642 655 } 643 656 644 void __kernel_unpark( $thread* thrd ) {657 void __kernel_unpark( thread$ * thrd ) { 645 658 /* paranoid */ verify( ! __preemption_enabled() ); 646 659 /* paranoid */ verify( ready_schedule_islocked()); … … 657 670 } 658 671 659 void unpark( $thread* thrd ) {672 void unpark( thread$ * thrd ) { 660 673 if( !thrd ) return; 661 674 … … 681 694 // Should never return 682 695 void __cfactx_thrd_leave() { 683 $thread* thrd = active_thread();684 $monitor* this = &thrd->self_mon;696 thread$ * thrd = active_thread(); 697 monitor$ * this = &thrd->self_mon; 685 698 686 699 // Lock the monitor now … … 694 707 /* paranoid */ verify( kernelTLS().this_thread == thrd ); 695 708 /* paranoid */ verify( thrd->context.SP ); 696 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );697 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );709 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 710 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 698 711 699 712 thrd->state = Halting; … … 713 726 bool force_yield( __Preemption_Reason reason ) { 714 727 __disable_interrupts_checked(); 715 $thread* thrd = kernelTLS().this_thread;728 thread$ * thrd = kernelTLS().this_thread; 716 729 /* paranoid */ verify(thrd->state == Active); 717 730 … … 825 838 //============================================================================================= 826 839 void __kernel_abort_msg( char * abort_text, int abort_text_size ) { 827 $thread* thrd = __cfaabi_tls.this_thread;840 thread$ * thrd = __cfaabi_tls.this_thread; 828 841 829 842 if(thrd) {
Note: See TracChangeset
for help on using the changeset viewer.