Ignore:
Timestamp:
Feb 13, 2020, 4:18:07 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
50b8885
Parents:
9f575ea
Message:

Added park/unpark primitives thread and removed BlockInternal?.
Converted monitors to use park unpark.
Intrusive Queue now mark next field when thread is inside queue.
Added several asserts to kernel and monitor.
Added a few tests for park and unpark.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r9f575ea r3381ed7  
    258258// Kernel Scheduling logic
    259259//=============================================================================================
     260static thread_desc * nextThread(cluster * this);
    260261static void runThread(processor * this, thread_desc * dst);
    261 static void finishRunning(processor * this);
    262262static void halt(processor * this);
    263263
     
    286286
    287287                        if(readyThread) {
    288                                 verify( ! kernelTLS.preemption_state.enabled );
     288                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     289                                /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
     290                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    289291
    290292                                runThread(this, readyThread);
    291293
    292                                 verify( ! kernelTLS.preemption_state.enabled );
    293 
    294                                 //Some actions need to be taken from the kernel
    295                                 finishRunning(this);
     294                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    296295
    297296                                spin_count = 0;
    298297                        } else {
    299298                                // spin(this, &spin_count);
    300                                 halt(this);
     299                                // halt(this);
    301300                        }
    302301                }
     
    332331
    333332        // Actually run the thread
    334         RUN:
    335         {
     333        RUNNING:  while(true) {
    336334                if(unlikely(thrd_dst->preempted)) {
    337                         thrd_dst->preempted = false;
     335                        thrd_dst->preempted = __NO_PREEMPTION;
     336                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);
    338337                } else {
     338                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
    339339                        thrd_dst->state = Active;
    340340                }
     341
     342                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    341343
    342344                // set context switch to the thread that the processor is executing
     
    344346                CtxSwitch( &proc_cor->context, &thrd_dst->context );
    345347                // when CtxSwitch returns we are back in the processor coroutine
    346         }
    347 
    348         // We just finished running a thread, there are a few things that could have happened.
    349         // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
    350         // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
    351         // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
    352         // 4 - Preempted
    353         // In case 1, we may have won a race so we can't write to the state again.
    354         // In case 2, we lost the race so we now own the thread.
    355         // In case 3, we lost the race but can just reschedule the thread.
    356 
    357         if(unlikely(thrd_dst->preempted)) {
    358                 // The thread was preempted, reschedule it and reset the flag
    359                 ScheduleThread( thrd_dst );
    360 
    361                 // Just before returning to the processor, set the processor coroutine to active
    362                 proc_cor->state = Active;
    363                 return;
    364         }
    365 
    366         // set state of processor coroutine to active and the thread to inactive
    367         enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
    368         switch(old_state) {
    369                 case Halted:
    370                         // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
    371                         thrd_dst->state = Halted;
    372                         break;
    373                 case Active:
    374                         // This is case 1, the regular case, nothing more is needed
    375                         break;
    376                 case Rerun:
    377                         // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    378                         // In this case, just run it again.
    379                         goto RUN;
    380                 case Reschedule:
    381                         // This is case 3, someone tried to run this before it finished blocking
    382                         // but it must go through the ready-queue
    383                         thrd_dst->state = Inactive;  /*restore invariant */
     348
     349                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     350
     351
     352                // We just finished running a thread, there are a few things that could have happened.
     353                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
     354                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
     355                // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
     356                // 4 - Preempted
     357                // In case 1, we may have won a race so we can't write to the state again.
     358                // In case 2, we lost the race so we now own the thread.
     359                // In case 3, we lost the race but can just reschedule the thread.
     360
     361                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
     362                        // The thread was preempted, reschedule it and reset the flag
    384363                        ScheduleThread( thrd_dst );
    385                         break;
    386                 case Inactive:
    387                 case Start:
    388                 case Primed:
    389                 default:
    390                         // This makes no sense, something is wrong abort
    391                         abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     364                        break RUNNING;
     365                }
     366
     367                // set state of processor coroutine to active and the thread to inactive
     368                static_assert(sizeof(thrd_dst->state) == sizeof(int));
     369                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
     370                switch(old_state) {
     371                        case Halted:
     372                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
     373                                thrd_dst->state = Halted;
     374                                break RUNNING;
     375                        case Active:
     376                                // This is case 1, the regular case, nothing more is needed
     377                                break RUNNING;
     378                        case Rerun:
     379                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
     380                                // In this case, just run it again.
     381                                continue RUNNING;
     382                        case Reschedule:
     383                                // This is case 3, someone tried to run this before it finished blocking
     384                                // but it must go through the ready-queue
     385                                thrd_dst->state = Inactive;  /*restore invariant */
     386                                ScheduleThread( thrd_dst );
     387                                break RUNNING;
     388                        default:
     389                                // This makes no sense, something is wrong abort
     390                                abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     391                }
    392392        }
    393393
     
    398398// KERNEL_ONLY
    399399static void returnToKernel() {
    400         verify( ! kernelTLS.preemption_state.enabled );
     400        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    401401        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    402402        thread_desc * thrd_src = kernelTLS.this_thread;
     
    416416        }
    417417
    418         verify( ! kernelTLS.preemption_state.enabled );
    419 }
    420 
    421 // KERNEL_ONLY
    422 // Once a thread has finished running, some of
    423 // its final actions must be executed from the kernel
    424 static void finishRunning(processor * this) with( this->finish ) {
    425         verify( ! kernelTLS.preemption_state.enabled );
    426         verify( action_code == No_Action );
    427         choose( action_code ) {
    428         case No_Action:
    429                 break;
    430         case Release:
    431                 unlock( *lock );
    432         case Schedule:
    433                 ScheduleThread( thrd );
    434         case Release_Schedule:
    435                 unlock( *lock );
    436                 ScheduleThread( thrd );
    437         case Release_Multi:
    438                 for(int i = 0; i < lock_count; i++) {
    439                         unlock( *locks[i] );
    440                 }
    441         case Release_Multi_Schedule:
    442                 for(int i = 0; i < lock_count; i++) {
    443                         unlock( *locks[i] );
    444                 }
    445                 for(int i = 0; i < thrd_count; i++) {
    446                         ScheduleThread( thrds[i] );
    447                 }
    448         case Callback:
    449                 callback();
    450         default:
    451                 abort("KERNEL ERROR: Unexpected action to run after thread");
    452         }
     418        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    453419}
    454420
     
    581547//-----------------------------------------------------------------------------
    582548// Scheduler routines
    583 
    584549// KERNEL ONLY
    585550void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
    586551        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    587         /* paranoid */ verifyf( thrd->state == Inactive || thrd->state == Start || thrd->preempted, "state : %d, preempted %d\n", thrd->state, thrd->preempted);
     552        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     553        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     554                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     555        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,
     556                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     557        /* paranoid */ #endif
    588558        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
    589559
     
    608578
    609579// KERNEL ONLY
    610 thread_desc * nextThread(cluster * this) with( *this ) {
    611         verify( ! kernelTLS.preemption_state.enabled );
     580static thread_desc * nextThread(cluster * this) with( *this ) {
     581        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     582
    612583        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    613584        thread_desc * head = pop_head( ready_queue );
    614585        unlock( ready_queue_lock );
    615         verify( ! kernelTLS.preemption_state.enabled );
     586
     587        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    616588        return head;
    617589}
    618590
    619 void BlockInternal() {
     591void unpark( thread_desc * thrd, bool must_yield ) {
     592        if( !thrd ) return;
     593
     594        enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
     595
    620596        disable_interrupts();
    621         verify( ! kernelTLS.preemption_state.enabled );
     597        static_assert(sizeof(thrd->state) == sizeof(int));
     598        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);
     599        switch(old_state) {
     600                case Active:
     601                        // Wake won the race, the thread will reschedule/rerun itself
     602                        break;
     603                case Inactive:
     604                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
     605
     606                        // Wake lost the race,
     607                        thrd->state = Inactive;
     608                        ScheduleThread( thrd );
     609                        break;
     610                case Rerun:
     611                case Reschedule:
     612                        abort("More than one thread attempted to schedule thread %p\n", thrd);
     613                        break;
     614                case Halted:
     615                case Start:
     616                case Primed:
     617                default:
     618                        // This makes no sense, something is wrong abort
     619                        abort();
     620        }
     621        enable_interrupts( __cfaabi_dbg_ctx );
     622}
     623
     624void park( void ) {
     625        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     626        disable_interrupts();
     627        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     628        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
     629
    622630        returnToKernel();
    623         verify( ! kernelTLS.preemption_state.enabled );
     631
     632        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    624633        enable_interrupts( __cfaabi_dbg_ctx );
    625 }
    626 
    627 void BlockInternal( __spinlock_t * lock ) {
     634        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     635
     636}
     637
     638// KERNEL ONLY
     639void LeaveThread() {
     640        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     641        returnToKernel();
     642}
     643
     644// KERNEL ONLY
     645bool force_yield( __Preemption_Reason reason ) {
     646        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
    628647        disable_interrupts();
    629         unlock( *lock );
    630 
    631         verify( ! kernelTLS.preemption_state.enabled );
    632         returnToKernel();
    633         verify( ! kernelTLS.preemption_state.enabled );
    634 
    635         enable_interrupts( __cfaabi_dbg_ctx );
    636 }
    637 
    638 void BlockInternal( thread_desc * thrd ) {
    639         disable_interrupts();
    640         WakeThread( thrd, false );
    641 
    642         verify( ! kernelTLS.preemption_state.enabled );
    643         returnToKernel();
    644         verify( ! kernelTLS.preemption_state.enabled );
    645 
    646         enable_interrupts( __cfaabi_dbg_ctx );
    647 }
    648 
    649 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
    650         disable_interrupts();
    651         unlock( *lock );
    652         WakeThread( thrd, false );
    653 
    654         verify( ! kernelTLS.preemption_state.enabled );
    655         returnToKernel();
    656         verify( ! kernelTLS.preemption_state.enabled );
    657 
    658         enable_interrupts( __cfaabi_dbg_ctx );
    659 }
    660 
    661 void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    662         disable_interrupts();
    663         for(int i = 0; i < count; i++) {
    664                 unlock( *locks[i] );
    665         }
    666 
    667         verify( ! kernelTLS.preemption_state.enabled );
    668         returnToKernel();
    669         verify( ! kernelTLS.preemption_state.enabled );
    670 
    671         enable_interrupts( __cfaabi_dbg_ctx );
    672 }
    673 
    674 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    675         disable_interrupts();
    676         for(int i = 0; i < lock_count; i++) {
    677                 unlock( *locks[i] );
    678         }
    679         for(int i = 0; i < thrd_count; i++) {
    680                 WakeThread( thrds[i], false );
    681         }
    682 
    683         verify( ! kernelTLS.preemption_state.enabled );
    684         returnToKernel();
    685         verify( ! kernelTLS.preemption_state.enabled );
    686 
    687         enable_interrupts( __cfaabi_dbg_ctx );
    688 }
    689 
    690 void BlockInternal(__finish_callback_fptr_t callback) {
    691         disable_interrupts();
    692         callback();
    693 
    694         verify( ! kernelTLS.preemption_state.enabled );
    695         returnToKernel();
    696         verify( ! kernelTLS.preemption_state.enabled );
    697 
    698         enable_interrupts( __cfaabi_dbg_ctx );
    699 }
    700 
    701 // KERNEL ONLY
    702 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    703         verify( ! kernelTLS.preemption_state.enabled );
    704         unlock( *lock );
    705         WakeThread( thrd, false );
    706 
    707         returnToKernel();
     648        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     649
     650        thread_desc * thrd = kernelTLS.this_thread;
     651        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);
     652
     653        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     654        // it was going to park itself. If that is the case and it is already using the
     655        // intrusive fields then we can't use them to preempt the thread
     656        // If that is the case, abandon the preemption.
     657        bool preempted = false;
     658        if(thrd->next == 0p) {
     659                preempted = true;
     660                thrd->preempted = reason;
     661                returnToKernel();
     662        }
     663
     664        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     665        enable_interrupts_noPoll();
     666        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     667
     668        return preempted;
    708669}
    709670
     
    939900
    940901                // atomically release spin lock and block
    941                 BlockInternal( &lock );
     902                unlock( lock );
     903                park();
    942904        }
    943905        else {
     
    958920
    959921        // make new owner
    960         WakeThread( thrd, false );
     922        unpark( thrd );
    961923}
    962924
Note: See TracChangeset for help on using the changeset viewer.