Changeset 8c50aed for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Feb 21, 2020, 3:33:14 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a505021
- Parents:
- b0c7419
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rb0c7419 r8c50aed 110 110 //----------------------------------------------------------------------------- 111 111 //Start and stop routine for the kernel, declared first to make sure they run first 112 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 114 114 115 115 //----------------------------------------------------------------------------- … … 208 208 } 209 209 210 static void start(processor * this); 210 static void * CtxInvokeProcessor(void * arg); 211 211 212 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) { 212 213 this.name = name; … … 221 222 idleLock{}; 222 223 223 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 224 229 } 225 230 … … 259 264 // Kernel Scheduling logic 260 265 //============================================================================================= 261 static thread_desc * nextThread(cluster * this);262 static void runThread(processor * this, thread_desc * dst);263 static void halt(processor * this);266 static thread_desc * __next_thread(cluster * this); 267 static void __run_thread(processor * this, thread_desc * dst); 268 static void __halt(processor * this); 264 269 265 270 //Main of the processor contexts … … 284 289 thread_desc * readyThread = 0p; 285 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 286 readyThread = nextThread( this->cltr );291 readyThread = __next_thread( this->cltr ); 287 292 288 293 if(readyThread) { … … 291 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 292 297 293 runThread(this, readyThread);298 __run_thread(this, readyThread); 294 299 295 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 298 303 } else { 299 304 // spin(this, &spin_count); 300 //halt(this);305 __halt(this); 301 306 } 302 307 } … … 318 323 // runThread runs a thread by context switching 319 324 // from the processor coroutine to the target thread 320 static void runThread(processor * this, thread_desc * thrd_dst) {325 static void __run_thread(processor * this, thread_desc * thrd_dst) { 321 326 coroutine_desc * proc_cor = get_coroutine(this->runner); 322 327 … … 359 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 360 365 // The thread was preempted, reschedule it and reset the flag 361 ScheduleThread( thrd_dst );366 __schedule_thread( thrd_dst ); 362 367 break RUNNING; 363 368 } … … 460 465 } // Abort 461 466 462 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 463 468 pthread_attr_t attr; 464 469 … … 488 493 } 489 494 490 static void start(processor * this) {491 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);492 493 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );494 495 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);496 }497 498 495 // KERNEL_ONLY 499 voidkernel_first_resume( processor * this ) {496 static void __kernel_first_resume( processor * this ) { 500 497 thread_desc * src = mainThread; 501 498 coroutine_desc * dst = get_coroutine(this->runner); … … 529 526 530 527 // KERNEL_ONLY 531 voidkernel_last_resume( processor * this ) {528 static void __kernel_last_resume( processor * this ) { 532 529 coroutine_desc * src = &mainThread->self_cor; 533 530 coroutine_desc * dst = get_coroutine(this->runner); … … 544 541 // Scheduler routines 545 542 // KERNEL ONLY 546 void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {543 void __schedule_thread( thread_desc * thrd ) with( *thrd->curr_cluster ) { 547 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 548 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) … … 574 571 575 572 // KERNEL ONLY 576 static thread_desc * nextThread(cluster * this) with( *this ) {573 static thread_desc * __next_thread(cluster * this) with( *this ) { 577 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 578 575 … … 600 597 // Wake lost the race, 601 598 thrd->state = Inactive; 602 ScheduleThread( thrd );599 __schedule_thread( thrd ); 603 600 break; 604 601 case Rerun: … … 668 665 //----------------------------------------------------------------------------- 669 666 // Kernel boot procedures 670 static void kernel_startup(void) {667 static void __kernel_startup(void) { 671 668 verify( ! kernelTLS.preemption_state.enabled ); 672 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 729 726 // Add the main thread to the ready queue 730 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 731 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 732 729 733 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 734 731 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 735 732 // mainThread is on the ready queue when this call is made. 736 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 737 734 738 735 … … 746 743 } 747 744 748 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 749 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 750 747 … … 757 754 // which is currently here 758 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 759 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 760 757 mainThread->self_cor.state = Halted; 761 758 … … 783 780 // Kernel Quiescing 784 781 //============================================================================================= 785 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 786 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 787 784 … … 972 969 //----------------------------------------------------------------------------- 973 970 // Debug 974 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 975 972 return true; 976 973 }
Note: See TracChangeset
for help on using the changeset viewer.