Changeset 04e6f93 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Feb 27, 2020, 4:04:25 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a037f85
- Parents:
- 41efd33 (diff), 930b504 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r41efd33 r04e6f93 110 110 //----------------------------------------------------------------------------- 111 111 //Start and stop routine for the kernel, declared first to make sure they run first 112 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 114 114 115 115 //----------------------------------------------------------------------------- … … 117 117 KERNEL_STORAGE(cluster, mainCluster); 118 118 KERNEL_STORAGE(processor, mainProcessor); 119 KERNEL_STORAGE( thread_desc, mainThread);119 KERNEL_STORAGE($thread, mainThread); 120 120 KERNEL_STORAGE(__stack_t, mainThreadCtx); 121 121 122 122 cluster * mainCluster; 123 123 processor * mainProcessor; 124 thread_desc* mainThread;124 $thread * mainThread; 125 125 126 126 extern "C" { … … 164 164 // Main thread construction 165 165 166 void ?{}( coroutine_desc& this, current_stack_info_t * info) with( this ) {166 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) { 167 167 stack.storage = info->storage; 168 168 with(*stack.storage) { … … 179 179 } 180 180 181 void ?{}( thread_desc& this, current_stack_info_t * info) with( this ) {181 void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 182 182 state = Start; 183 183 self_cor{ info }; … … 208 208 } 209 209 210 static void start(processor * this); 210 static void * __invoke_processor(void * arg); 211 211 212 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 212 213 this.name = name; 213 214 this.cltr = &cltr; 214 215 terminated{ 0 }; 216 destroyer = 0p; 215 217 do_terminate = false; 216 218 preemption_alarm = 0p; … … 220 222 idleLock{}; 221 223 222 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 223 229 } 224 230 … … 258 264 // Kernel Scheduling logic 259 265 //============================================================================================= 260 static void runThread(processor * this, thread_desc * dst);261 static void finishRunning(processor * this);262 static void halt(processor * this);266 static $thread * __next_thread(cluster * this); 267 static void __run_thread(processor * this, $thread * dst); 268 static void __halt(processor * this); 263 269 264 270 //Main of the processor contexts … … 281 287 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 282 288 283 thread_desc* readyThread = 0p;289 $thread * readyThread = 0p; 284 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 285 readyThread = nextThread( this->cltr );291 readyThread = __next_thread( this->cltr ); 286 292 287 293 if(readyThread) { 288 verify( ! kernelTLS.preemption_state.enabled ); 289 290 runThread(this, readyThread); 291 292 verify( ! kernelTLS.preemption_state.enabled ); 293 294 //Some actions need to be taken from the kernel 295 finishRunning(this); 294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 295 /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 297 298 __run_thread(this, readyThread); 299 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 296 301 297 302 spin_count = 0; 298 303 } else { 299 304 // spin(this, &spin_count); 300 halt(this);305 __halt(this); 301 306 } 302 307 } … … 318 323 // runThread runs a thread by context switching 319 324 // from the processor coroutine to the target thread 320 static void runThread(processor * this, thread_desc * thrd_dst) { 321 coroutine_desc * proc_cor = get_coroutine(this->runner); 322 323 // Reset the terminating actions here 324 this->finish.action_code = No_Action; 325 static void __run_thread(processor * this, $thread * thrd_dst) { 326 $coroutine * proc_cor = get_coroutine(this->runner); 325 327 326 328 // Update global state 327 329 kernelTLS.this_thread = thrd_dst; 328 330 329 // set state of processor coroutine to inactive and the thread to active 330 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 331 thrd_dst->state = Active; 332 333 // set context switch to the thread that the processor is executing 334 verify( thrd_dst->context.SP ); 335 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 336 // when CtxSwitch returns we are back in the processor coroutine 337 338 // set state of processor coroutine to active and the thread to inactive 339 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 331 // set state of processor coroutine to inactive 332 verify(proc_cor->state == Active); 333 proc_cor->state = Inactive; 334 335 // Actually run the thread 336 RUNNING: while(true) { 337 if(unlikely(thrd_dst->preempted)) { 338 thrd_dst->preempted = __NO_PREEMPTION; 339 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 340 } else { 341 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); 342 thrd_dst->state = Active; 343 } 344 345 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 346 347 // set context switch to the thread that the processor is executing 348 verify( thrd_dst->context.SP ); 349 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 350 // when __cfactx_switch returns we are back in the processor coroutine 351 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 353 354 355 // We just finished running a thread, there are a few things that could have happened. 356 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 357 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 358 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 359 // 4 - Preempted 360 // In case 1, we may have won a race so we can't write to the state again. 361 // In case 2, we lost the race so we now own the thread. 362 // In case 3, we lost the race but can just reschedule the thread. 363 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 365 // The thread was preempted, reschedule it and reset the flag 366 __schedule_thread( thrd_dst ); 367 break RUNNING; 368 } 369 370 // set state of processor coroutine to active and the thread to inactive 371 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 372 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 373 switch(old_state) { 374 case Halted: 375 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 376 thrd_dst->state = Halted; 377 378 // We may need to wake someone up here since 379 unpark( this->destroyer ); 380 this->destroyer = 0p; 381 break RUNNING; 382 case Active: 383 // This is case 1, the regular case, nothing more is needed 384 break RUNNING; 385 case Rerun: 386 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 387 // In this case, just run it again. 388 continue RUNNING; 389 default: 390 // This makes no sense, something is wrong abort 391 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 392 } 393 } 394 395 // Just before returning to the processor, set the processor coroutine to active 340 396 proc_cor->state = Active; 341 397 } 342 398 343 399 // KERNEL_ONLY 344 static void returnToKernel() { 345 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 346 thread_desc * thrd_src = kernelTLS.this_thread; 347 348 // set state of current coroutine to inactive 349 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 350 proc_cor->state = Active; 351 int local_errno = *__volatile_errno(); 352 #if defined( __i386 ) || defined( __x86_64 ) 353 __x87_store; 354 #endif 355 356 // set new coroutine that the processor is executing 357 // and context switch to it 358 verify( proc_cor->context.SP ); 359 CtxSwitch( &thrd_src->context, &proc_cor->context ); 360 361 // set state of new coroutine to active 362 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 363 thrd_src->state = Active; 364 365 #if defined( __i386 ) || defined( __x86_64 ) 366 __x87_load; 367 #endif 368 *__volatile_errno() = local_errno; 369 } 370 371 // KERNEL_ONLY 372 // Once a thread has finished running, some of 373 // its final actions must be executed from the kernel 374 static void finishRunning(processor * this) with( this->finish ) { 375 verify( ! kernelTLS.preemption_state.enabled ); 376 choose( action_code ) { 377 case No_Action: 378 break; 379 case Release: 380 unlock( *lock ); 381 case Schedule: 382 ScheduleThread( thrd ); 383 case Release_Schedule: 384 unlock( *lock ); 385 ScheduleThread( thrd ); 386 case Release_Multi: 387 for(int i = 0; i < lock_count; i++) { 388 unlock( *locks[i] ); 389 } 390 case Release_Multi_Schedule: 391 for(int i = 0; i < lock_count; i++) { 392 unlock( *locks[i] ); 393 } 394 for(int i = 0; i < thrd_count; i++) { 395 ScheduleThread( thrds[i] ); 396 } 397 case Callback: 398 callback(); 399 default: 400 abort("KERNEL ERROR: Unexpected action to run after thread"); 401 } 400 void returnToKernel() { 401 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 402 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 403 $thread * thrd_src = kernelTLS.this_thread; 404 405 // Run the thread on this processor 406 { 407 int local_errno = *__volatile_errno(); 408 #if defined( __i386 ) || defined( __x86_64 ) 409 __x87_store; 410 #endif 411 verify( proc_cor->context.SP ); 412 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 413 #if defined( __i386 ) || defined( __x86_64 ) 414 __x87_load; 415 #endif 416 *__volatile_errno() = local_errno; 417 } 418 419 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 402 420 } 403 421 … … 406 424 // This is the entry point for processors (kernel threads) 407 425 // It effectively constructs a coroutine by stealing the pthread stack 408 static void * CtxInvokeProcessor(void * arg) {426 static void * __invoke_processor(void * arg) { 409 427 processor * proc = (processor *) arg; 410 428 kernelTLS.this_processor = proc; … … 447 465 } // Abort 448 466 449 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 450 468 pthread_attr_t attr; 451 469 … … 475 493 } 476 494 477 static void start(processor * this) {478 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);479 480 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );481 482 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);483 }484 485 495 // KERNEL_ONLY 486 voidkernel_first_resume( processor * this ) {487 thread_desc* src = mainThread;488 coroutine_desc* dst = get_coroutine(this->runner);496 static void __kernel_first_resume( processor * this ) { 497 $thread * src = mainThread; 498 $coroutine * dst = get_coroutine(this->runner); 489 499 490 500 verify( ! kernelTLS.preemption_state.enabled ); … … 492 502 kernelTLS.this_thread->curr_cor = dst; 493 503 __stack_prepare( &dst->stack, 65000 ); 494 CtxStart(main, dst, this->runner, CtxInvokeCoroutine);504 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 495 505 496 506 verify( ! kernelTLS.preemption_state.enabled ); … … 504 514 // context switch to specified coroutine 505 515 verify( dst->context.SP ); 506 CtxSwitch( &src->context, &dst->context );507 // when CtxSwitch returns we are back in the src coroutine516 __cfactx_switch( &src->context, &dst->context ); 517 // when __cfactx_switch returns we are back in the src coroutine 508 518 509 519 mainThread->curr_cor = &mainThread->self_cor; … … 516 526 517 527 // KERNEL_ONLY 518 voidkernel_last_resume( processor * this ) {519 coroutine_desc* src = &mainThread->self_cor;520 coroutine_desc* dst = get_coroutine(this->runner);528 static void __kernel_last_resume( processor * this ) { 529 $coroutine * src = &mainThread->self_cor; 530 $coroutine * dst = get_coroutine(this->runner); 521 531 522 532 verify( ! kernelTLS.preemption_state.enabled ); … … 525 535 526 536 // context switch to the processor 527 CtxSwitch( &src->context, &dst->context );537 __cfactx_switch( &src->context, &dst->context ); 528 538 } 529 539 530 540 //----------------------------------------------------------------------------- 531 541 // Scheduler routines 532 533 542 // KERNEL ONLY 534 void ScheduleThread( thread_desc * thrd ) { 535 verify( thrd ); 536 verify( thrd->state != Halted ); 537 538 verify( ! kernelTLS.preemption_state.enabled ); 539 540 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 541 542 with( *thrd->curr_cluster ) { 543 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 544 bool was_empty = !(ready_queue != 0); 545 append( ready_queue, thrd ); 546 unlock( ready_queue_lock ); 547 548 if(was_empty) { 549 lock (proc_list_lock __cfaabi_dbg_ctx2); 550 if(idles) { 551 wake_fast(idles.head); 552 } 553 unlock (proc_list_lock); 543 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) { 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 546 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 547 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 548 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 549 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 550 /* paranoid */ #endif 551 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 552 553 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 554 bool was_empty = !(ready_queue != 0); 555 append( ready_queue, thrd ); 556 unlock( ready_queue_lock ); 557 558 if(was_empty) { 559 lock (proc_list_lock __cfaabi_dbg_ctx2); 560 if(idles) { 561 wake_fast(idles.head); 554 562 } 555 else if( struct processor * idle = idles.head ) {556 wake_fast(idle);557 }558 559 } 560 561 verify( ! kernelTLS.preemption_state.enabled );563 unlock (proc_list_lock); 564 } 565 else if( struct processor * idle = idles.head ) { 566 wake_fast(idle); 567 } 568 569 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 562 570 } 563 571 564 572 // KERNEL ONLY 565 thread_desc * nextThread(cluster * this) with( *this ) { 566 verify( ! kernelTLS.preemption_state.enabled ); 573 static $thread * __next_thread(cluster * this) with( *this ) { 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 575 567 576 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 568 thread_desc* head = pop_head( ready_queue );577 $thread * head = pop_head( ready_queue ); 569 578 unlock( ready_queue_lock ); 570 verify( ! kernelTLS.preemption_state.enabled ); 579 580 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 571 581 return head; 572 582 } 573 583 574 void BlockInternal() { 584 void unpark( $thread * thrd ) { 585 if( !thrd ) return; 586 575 587 disable_interrupts(); 576 verify( ! kernelTLS.preemption_state.enabled ); 588 static_assert(sizeof(thrd->state) == sizeof(int)); 589 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 590 switch(old_state) { 591 case Active: 592 // Wake won the race, the thread will reschedule/rerun itself 593 break; 594 case Inactive: 595 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 596 597 // Wake lost the race, 598 thrd->state = Inactive; 599 __schedule_thread( thrd ); 600 break; 601 case Rerun: 602 abort("More than one thread attempted to schedule thread %p\n", thrd); 603 break; 604 case Halted: 605 case Start: 606 case Primed: 607 default: 608 // This makes no sense, something is wrong abort 609 abort(); 610 } 611 enable_interrupts( __cfaabi_dbg_ctx ); 612 } 613 614 void park( void ) { 615 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 616 disable_interrupts(); 617 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 618 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 619 577 620 returnToKernel(); 578 verify( ! kernelTLS.preemption_state.enabled ); 621 622 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 579 623 enable_interrupts( __cfaabi_dbg_ctx ); 580 } 581 582 void BlockInternal( __spinlock_t * lock ) { 624 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 625 626 } 627 628 // KERNEL ONLY 629 void __leave_thread() { 630 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 631 returnToKernel(); 632 abort(); 633 } 634 635 // KERNEL ONLY 636 bool force_yield( __Preemption_Reason reason ) { 637 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 583 638 disable_interrupts(); 584 with( *kernelTLS.this_processor ) { 585 finish.action_code = Release; 586 finish.lock = lock; 587 } 588 589 verify( ! kernelTLS.preemption_state.enabled ); 590 returnToKernel(); 591 verify( ! kernelTLS.preemption_state.enabled ); 592 593 enable_interrupts( __cfaabi_dbg_ctx ); 594 } 595 596 void BlockInternal( thread_desc * thrd ) { 597 disable_interrupts(); 598 with( * kernelTLS.this_processor ) { 599 finish.action_code = Schedule; 600 finish.thrd = thrd; 601 } 602 603 verify( ! kernelTLS.preemption_state.enabled ); 604 returnToKernel(); 605 verify( ! kernelTLS.preemption_state.enabled ); 606 607 enable_interrupts( __cfaabi_dbg_ctx ); 608 } 609 610 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 611 assert(thrd); 612 disable_interrupts(); 613 with( * kernelTLS.this_processor ) { 614 finish.action_code = Release_Schedule; 615 finish.lock = lock; 616 finish.thrd = thrd; 617 } 618 619 verify( ! kernelTLS.preemption_state.enabled ); 620 returnToKernel(); 621 verify( ! kernelTLS.preemption_state.enabled ); 622 623 enable_interrupts( __cfaabi_dbg_ctx ); 624 } 625 626 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 627 disable_interrupts(); 628 with( * kernelTLS.this_processor ) { 629 finish.action_code = Release_Multi; 630 finish.locks = locks; 631 finish.lock_count = count; 632 } 633 634 verify( ! kernelTLS.preemption_state.enabled ); 635 returnToKernel(); 636 verify( ! kernelTLS.preemption_state.enabled ); 637 638 enable_interrupts( __cfaabi_dbg_ctx ); 639 } 640 641 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 642 disable_interrupts(); 643 with( *kernelTLS.this_processor ) { 644 finish.action_code = Release_Multi_Schedule; 645 finish.locks = locks; 646 finish.lock_count = lock_count; 647 finish.thrds = thrds; 648 finish.thrd_count = thrd_count; 649 } 650 651 verify( ! kernelTLS.preemption_state.enabled ); 652 returnToKernel(); 653 verify( ! kernelTLS.preemption_state.enabled ); 654 655 enable_interrupts( __cfaabi_dbg_ctx ); 656 } 657 658 void BlockInternal(__finish_callback_fptr_t callback) { 659 disable_interrupts(); 660 with( *kernelTLS.this_processor ) { 661 finish.action_code = Callback; 662 finish.callback = callback; 663 } 664 665 verify( ! kernelTLS.preemption_state.enabled ); 666 returnToKernel(); 667 verify( ! kernelTLS.preemption_state.enabled ); 668 669 enable_interrupts( __cfaabi_dbg_ctx ); 670 } 671 672 // KERNEL ONLY 673 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 674 verify( ! kernelTLS.preemption_state.enabled ); 675 with( * kernelTLS.this_processor ) { 676 finish.action_code = thrd ? Release_Schedule : Release; 677 finish.lock = lock; 678 finish.thrd = thrd; 679 } 680 681 returnToKernel(); 639 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 640 641 $thread * thrd = kernelTLS.this_thread; 642 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 643 644 // SKULLDUGGERY: It is possible that we are preempting this thread just before 645 // it was going to park itself. If that is the case and it is already using the 646 // intrusive fields then we can't use them to preempt the thread 647 // If that is the case, abandon the preemption. 648 bool preempted = false; 649 if(thrd->next == 0p) { 650 preempted = true; 651 thrd->preempted = reason; 652 returnToKernel(); 653 } 654 655 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 656 enable_interrupts_noPoll(); 657 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 658 659 return preempted; 682 660 } 683 661 … … 687 665 //----------------------------------------------------------------------------- 688 666 // Kernel boot procedures 689 static void kernel_startup(void) {667 static void __kernel_startup(void) { 690 668 verify( ! kernelTLS.preemption_state.enabled ); 691 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 705 683 // SKULLDUGGERY: the mainThread steals the process main thread 706 684 // which will then be scheduled by the mainProcessor normally 707 mainThread = ( thread_desc*)&storage_mainThread;685 mainThread = ($thread *)&storage_mainThread; 708 686 current_stack_info_t info; 709 687 info.storage = (__stack_t*)&storage_mainThreadCtx; … … 748 726 // Add the main thread to the ready queue 749 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 750 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 751 729 752 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 753 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that731 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 754 732 // mainThread is on the ready queue when this call is made. 755 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 756 734 757 735 … … 765 743 } 766 744 767 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 768 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 769 747 … … 776 754 // which is currently here 777 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 778 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 779 757 mainThread->self_cor.state = Halted; 780 758 … … 802 780 // Kernel Quiescing 803 781 //============================================================================================= 804 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 805 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 806 784 … … 857 835 858 836 void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) { 859 thread_desc* thrd = kernel_data;837 $thread * thrd = kernel_data; 860 838 861 839 if(thrd) { … … 913 891 914 892 // atomically release spin lock and block 915 BlockInternal( &lock ); 893 unlock( lock ); 894 park(); 916 895 } 917 896 else { … … 921 900 922 901 void V(semaphore & this) with( this ) { 923 thread_desc* thrd = 0p;902 $thread * thrd = 0p; 924 903 lock( lock __cfaabi_dbg_ctx2 ); 925 904 count += 1; … … 932 911 933 912 // make new owner 934 WakeThread( thrd );913 unpark( thrd ); 935 914 } 936 915 … … 949 928 } 950 929 951 void doregister( cluster * cltr, thread_desc& thrd ) {930 void doregister( cluster * cltr, $thread & thrd ) { 952 931 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 953 932 cltr->nthreads += 1; … … 956 935 } 957 936 958 void unregister( cluster * cltr, thread_desc& thrd ) {937 void unregister( cluster * cltr, $thread & thrd ) { 959 938 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 960 939 remove(cltr->threads, thrd ); … … 990 969 //----------------------------------------------------------------------------- 991 970 // Debug 992 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 993 972 return true; 994 973 }
Note:
See TracChangeset
for help on using the changeset viewer.