Ignore:
Timestamp:
Feb 21, 2020, 3:33:14 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a505021
Parents:
b0c7419
Message:

Some clean-up and renaming, also adding attribute((const/pure)) where relevant

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rb0c7419 r8c50aed  
    110110//-----------------------------------------------------------------------------
    111111//Start and stop routine for the kernel, declared first to make sure they run first
    112 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    114114
    115115//-----------------------------------------------------------------------------
     
    208208}
    209209
    210 static void start(processor * this);
     210static void * CtxInvokeProcessor(void * arg);
     211
    211212void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
    212213        this.name = name;
     
    221222        idleLock{};
    222223
    223         start( &this );
     224        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     225
     226        this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this );
     227
     228        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
    224229}
    225230
     
    259264// Kernel Scheduling logic
    260265//=============================================================================================
    261 static thread_desc * nextThread(cluster * this);
    262 static void runThread(processor * this, thread_desc * dst);
    263 static void halt(processor * this);
     266static thread_desc * __next_thread(cluster * this);
     267static void __run_thread(processor * this, thread_desc * dst);
     268static void __halt(processor * this);
    264269
    265270//Main of the processor contexts
     
    284289                thread_desc * readyThread = 0p;
    285290                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
    286                         readyThread = nextThread( this->cltr );
     291                        readyThread = __next_thread( this->cltr );
    287292
    288293                        if(readyThread) {
     
    291296                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    292297
    293                                 runThread(this, readyThread);
     298                                __run_thread(this, readyThread);
    294299
    295300                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    298303                        } else {
    299304                                // spin(this, &spin_count);
    300                                 // halt(this);
     305                                __halt(this);
    301306                        }
    302307                }
     
    318323// runThread runs a thread by context switching
    319324// from the processor coroutine to the target thread
    320 static void runThread(processor * this, thread_desc * thrd_dst) {
     325static void __run_thread(processor * this, thread_desc * thrd_dst) {
    321326        coroutine_desc * proc_cor = get_coroutine(this->runner);
    322327
     
    359364                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    360365                        // The thread was preempted, reschedule it and reset the flag
    361                         ScheduleThread( thrd_dst );
     366                        __schedule_thread( thrd_dst );
    362367                        break RUNNING;
    363368                }
     
    460465} // Abort
    461466
    462 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
     467void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
    463468        pthread_attr_t attr;
    464469
     
    488493}
    489494
    490 static void start(processor * this) {
    491         __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
    492 
    493         this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );
    494 
    495         __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
    496 }
    497 
    498495// KERNEL_ONLY
    499 void kernel_first_resume( processor * this ) {
     496static void __kernel_first_resume( processor * this ) {
    500497        thread_desc * src = mainThread;
    501498        coroutine_desc * dst = get_coroutine(this->runner);
     
    529526
    530527// KERNEL_ONLY
    531 void kernel_last_resume( processor * this ) {
     528static void __kernel_last_resume( processor * this ) {
    532529        coroutine_desc * src = &mainThread->self_cor;
    533530        coroutine_desc * dst = get_coroutine(this->runner);
     
    544541// Scheduler routines
    545542// KERNEL ONLY
    546 void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
     543void __schedule_thread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
    547544        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    548545        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     
    574571
    575572// KERNEL ONLY
    576 static thread_desc * nextThread(cluster * this) with( *this ) {
     573static thread_desc * __next_thread(cluster * this) with( *this ) {
    577574        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    578575
     
    600597                        // Wake lost the race,
    601598                        thrd->state = Inactive;
    602                         ScheduleThread( thrd );
     599                        __schedule_thread( thrd );
    603600                        break;
    604601                case Rerun:
     
    668665//-----------------------------------------------------------------------------
    669666// Kernel boot procedures
    670 static void kernel_startup(void) {
     667static void __kernel_startup(void) {
    671668        verify( ! kernelTLS.preemption_state.enabled );
    672669        __cfaabi_dbg_print_safe("Kernel : Starting\n");
     
    729726        // Add the main thread to the ready queue
    730727        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    731         ScheduleThread(mainThread);
     728        __schedule_thread(mainThread);
    732729
    733730        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    734731        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    735732        // mainThread is on the ready queue when this call is made.
    736         kernel_first_resume( kernelTLS.this_processor );
     733        __kernel_first_resume( kernelTLS.this_processor );
    737734
    738735
     
    746743}
    747744
    748 static void kernel_shutdown(void) {
     745static void __kernel_shutdown(void) {
    749746        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    750747
     
    757754        // which is currently here
    758755        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    759         kernel_last_resume( kernelTLS.this_processor );
     756        __kernel_last_resume( kernelTLS.this_processor );
    760757        mainThread->self_cor.state = Halted;
    761758
     
    783780// Kernel Quiescing
    784781//=============================================================================================
    785 static void halt(processor * this) with( *this ) {
     782static void __halt(processor * this) with( *this ) {
    786783        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
    787784
     
    972969//-----------------------------------------------------------------------------
    973970// Debug
    974 bool threading_enabled(void) {
     971bool threading_enabled(void) __attribute__((const)) {
    975972        return true;
    976973}
Note: See TracChangeset for help on using the changeset viewer.