Changeset c86ee4c for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Jul 7, 2021, 6:24:42 PM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d83b266
- Parents:
- 1f45c7d (diff), b1a2c4a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r1f45c7d rc86ee4c 110 110 #endif 111 111 112 extern $thread* mainThread;112 extern thread$ * mainThread; 113 113 extern processor * mainProcessor; 114 114 115 115 //----------------------------------------------------------------------------- 116 116 // Kernel Scheduling logic 117 static $thread* __next_thread(cluster * this);118 static $thread* __next_thread_slow(cluster * this);119 static inline bool __must_unpark( $thread* thrd ) __attribute((nonnull(1)));120 static void __run_thread(processor * this, $thread* dst);117 static thread$ * __next_thread(cluster * this); 118 static thread$ * __next_thread_slow(cluster * this); 119 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 120 static void __run_thread(processor * this, thread$ * dst); 121 121 static void __wake_one(cluster * cltr); 122 122 … … 181 181 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 182 182 183 $thread* readyThread = 0p;183 thread$ * readyThread = 0p; 184 184 MAIN_LOOP: 185 185 for() { … … 388 388 // runThread runs a thread by context switching 389 389 // from the processor coroutine to the target thread 390 static void __run_thread(processor * this, $thread* thrd_dst) {390 static void __run_thread(processor * this, thread$ * thrd_dst) { 391 391 /* paranoid */ verify( ! __preemption_enabled() ); 392 392 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); … … 406 406 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 407 407 408 $coroutine* proc_cor = get_coroutine(this->runner);408 coroutine$ * proc_cor = get_coroutine(this->runner); 409 409 410 410 // set state of processor coroutine to inactive … … 425 425 /* paranoid */ verify( thrd_dst->context.SP ); 426 426 /* paranoid */ verify( thrd_dst->state != Halted ); 427 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor428 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor427 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 428 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 429 429 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 430 430 … … 438 438 439 439 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 440 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst );441 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst );440 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 441 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 442 442 /* paranoid */ verify( thrd_dst->context.SP ); 443 443 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); … … 505 505 void returnToKernel() { 506 506 /* paranoid */ verify( ! __preemption_enabled() ); 507 $coroutine* proc_cor = get_coroutine(kernelTLS().this_processor->runner);508 $thread* thrd_src = kernelTLS().this_thread;507 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); 508 thread$ * thrd_src = kernelTLS().this_thread; 509 509 510 510 __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) … … 534 534 535 535 /* paranoid */ verify( ! __preemption_enabled() ); 536 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too small.\n", thrd_src );537 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too large.\n", thrd_src );536 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 537 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 538 538 } 539 539 … … 541 541 // Scheduler routines 542 542 // KERNEL ONLY 543 static void __schedule_thread( $thread* thrd ) {543 static void __schedule_thread( thread$ * thrd ) { 544 544 /* paranoid */ verify( ! __preemption_enabled() ); 545 545 /* paranoid */ verify( ready_schedule_islocked()); … … 589 589 } 590 590 591 void schedule_thread$( $thread* thrd ) {591 void schedule_thread$( thread$ * thrd ) { 592 592 ready_schedule_lock(); 593 593 __schedule_thread( thrd ); … … 596 596 597 597 // KERNEL ONLY 598 static inline $thread* __next_thread(cluster * this) with( *this ) {598 static inline thread$ * __next_thread(cluster * this) with( *this ) { 599 599 /* paranoid */ verify( ! __preemption_enabled() ); 600 600 601 601 ready_schedule_lock(); 602 $thread* thrd = pop_fast( this );602 thread$ * thrd = pop_fast( this ); 603 603 ready_schedule_unlock(); 604 604 … … 608 608 609 609 // KERNEL ONLY 610 static inline $thread* __next_thread_slow(cluster * this) with( *this ) {610 static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { 611 611 /* paranoid */ verify( ! __preemption_enabled() ); 612 612 613 613 ready_schedule_lock(); 614 $thread* thrd;614 thread$ * thrd; 615 615 for(25) { 616 616 thrd = pop_slow( this ); … … 626 626 } 627 627 628 static inline bool __must_unpark( $thread* thrd ) {628 static inline bool __must_unpark( thread$ * thrd ) { 629 629 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 630 630 switch(old_ticket) { … … 642 642 } 643 643 644 void __kernel_unpark( $thread* thrd ) {644 void __kernel_unpark( thread$ * thrd ) { 645 645 /* paranoid */ verify( ! __preemption_enabled() ); 646 646 /* paranoid */ verify( ready_schedule_islocked()); … … 657 657 } 658 658 659 void unpark( $thread* thrd ) {659 void unpark( thread$ * thrd ) { 660 660 if( !thrd ) return; 661 661 … … 681 681 // Should never return 682 682 void __cfactx_thrd_leave() { 683 $thread* thrd = active_thread();684 $monitor* this = &thrd->self_mon;683 thread$ * thrd = active_thread(); 684 monitor$ * this = &thrd->self_mon; 685 685 686 686 // Lock the monitor now … … 694 694 /* paranoid */ verify( kernelTLS().this_thread == thrd ); 695 695 /* paranoid */ verify( thrd->context.SP ); 696 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );697 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );696 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 697 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 698 698 699 699 thrd->state = Halting; … … 713 713 bool force_yield( __Preemption_Reason reason ) { 714 714 __disable_interrupts_checked(); 715 $thread* thrd = kernelTLS().this_thread;715 thread$ * thrd = kernelTLS().this_thread; 716 716 /* paranoid */ verify(thrd->state == Active); 717 717 … … 825 825 //============================================================================================= 826 826 void __kernel_abort_msg( char * abort_text, int abort_text_size ) { 827 $thread* thrd = __cfaabi_tls.this_thread;827 thread$ * thrd = __cfaabi_tls.this_thread; 828 828 829 829 if(thrd) {
Note: See TracChangeset
for help on using the changeset viewer.