Ignore:
Timestamp:
Mar 27, 2020, 7:28:06 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
bb2e05e
Parents:
f0ce5f4
Message:

Fixed ready state.
Fixed race condition between halt and wake_*

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rf0ce5f4 r92e7631  
    114114
    115115//-----------------------------------------------------------------------------
     116// Kernel Scheduling logic
     117static $thread * __next_thread(cluster * this);
     118static void __run_thread(processor * this, $thread * dst);
     119static $thread * __halt(processor * this);
     120static bool __wake_one(cluster * cltr, bool was_empty);
     121static bool __wake_proc(processor *);
     122
     123//-----------------------------------------------------------------------------
    116124// Kernel storage
    117125KERNEL_STORAGE(cluster,         mainCluster);
     
    220228        runner.proc = &this;
    221229
    222         idleLock{};
     230        idle{};
    223231
    224232        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     
    234242
    235243                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
    236                 wake( &this );
     244                __wake_proc( &this );
    237245
    238246                P( terminated );
     
    264272// Kernel Scheduling logic
    265273//=============================================================================================
    266 static $thread * __next_thread(cluster * this);
    267 static void __run_thread(processor * this, $thread * dst);
    268 static void __halt(processor * this);
    269 
    270274//Main of the processor contexts
    271275void main(processorCtx_t & runner) {
     
    289293                $thread * readyThread = 0p;
    290294                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
     295                        // Try to get the next thread
    291296                        readyThread = __next_thread( this->cltr );
    292297
    293                         if(readyThread) {
     298                        // If no ready thread
     299                        if( readyThread == 0p ) {
     300                                // Block until a thread is ready
     301                                readyThread = __halt(this);
     302                        }
     303
     304                        // Check if we actually found a thread
     305                        if( readyThread ) {
    294306                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    295                                 /* paranoid */ verifyf( readyThread->state == Blocked || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
     307                                /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
    296308                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    297309
     310                                // We found a thread run it
    298311                                __run_thread(this, readyThread);
    299312
    300313                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    301 
    302                                 spin_count = 0;
    303                         } else {
    304                                 // spin(this, &spin_count);
    305                                 __halt(this);
    306314                        }
    307315                }
     
    312320        unregister(this->cltr, this);
    313321
    314         bool signalled = V( this->terminated );
    315         if(signalled)
     322        V( this->terminated );
    316323
    317324        __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
     325
     326        // HACK : the coroutine context switch expects this_thread to be set
     327        // and it make sense for it to be set in all other cases except here
     328        // fake it
     329        if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
    318330}
    319331
     
    338350                if(unlikely(thrd_dst->preempted)) {
    339351                        thrd_dst->preempted = __NO_PREEMPTION;
    340                         verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
     352                        verify(thrd_dst->state == Active  || thrd_dst->state == Rerun);
    341353                } else {
    342                         verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Blocked);
     354                        verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun
    343355                        thrd_dst->state = Active;
    344356                }
     
    521533        dst->starter = dst->starter ? dst->starter : &src->self_cor;
    522534
    523         // set state of current coroutine to inactive
    524         src->state = src->state == Halted ? Halted : Blocked;
     535        // make sure the current state is still correct
     536        /* paranoid */ verify(src->state == Ready);
    525537
    526538        // context switch to specified coroutine
     
    531543        mainThread->curr_cor = &mainThread->self_cor;
    532544
    533         // set state of new coroutine to active
    534         src->state = Active;
     545        // make sure the current state has been update
     546        /* paranoid */ verify(src->state == Active);
    535547
    536548        verify( ! kernelTLS.preemption_state.enabled );
     
    570582        unlock( ready_queue_lock );
    571583
    572         if(was_empty) {
    573                 lock      (proc_list_lock __cfaabi_dbg_ctx2);
    574                 if(idles) {
    575                         wake_fast(idles.head);
    576                 }
    577                 unlock    (proc_list_lock);
    578         }
    579         else if( struct processor * idle = idles.head ) {
    580                 wake_fast(idle);
    581         }
     584        __wake_one(thrd->curr_cluster, was_empty);
    582585
    583586        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    768771        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    769772
    770         verify( TL_GET( preemption_state.enabled ) );
     773        /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
    771774        disable_interrupts();
    772         verify( ! kernelTLS.preemption_state.enabled );
     775        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    773776
    774777        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
     
    800803
    801804//=============================================================================================
    802 // Kernel Quiescing
     805// Kernel Idle Sleep
    803806//=============================================================================================
    804 static void __halt(processor * this) with( *this ) {
    805         // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
    806 
     807static $thread * __halt(processor * this) with( *this ) {
     808        if( do_terminate ) return 0p;
     809
     810        // First, lock the cluster idle
     811        lock( cltr->idle_lock __cfaabi_dbg_ctx2 );
     812
     813        // Check if we can find a thread
     814        if( $thread * found = __next_thread( cltr ) ) {
     815                unlock( cltr->idle_lock );
     816                return found;
     817        }
     818
     819        // Move this processor from the active list to the idle list
     820        move_to_front(cltr->procs, cltr->idles, *this);
     821
     822        // Unlock the idle lock so we don't go to sleep with a lock
     823        unlock    (cltr->idle_lock);
     824
     825        // We are ready to sleep
     826        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
     827        wait( idle );
     828
     829        // We have woken up
     830        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
     831
     832        // Get ourself off the idle list
    807833        with( *cltr ) {
    808                 lock      (proc_list_lock __cfaabi_dbg_ctx2);
    809                 remove    (procs, *this);
    810                 push_front(idles, *this);
    811                 unlock    (proc_list_lock);
    812         }
    813 
    814         __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
    815 
    816         wait( idleLock );
    817 
    818         __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
    819 
    820         with( *cltr ) {
    821                 lock      (proc_list_lock __cfaabi_dbg_ctx2);
    822                 remove    (idles, *this);
    823                 push_front(procs, *this);
    824                 unlock    (proc_list_lock);
    825         }
     834                lock  (idle_lock __cfaabi_dbg_ctx2);
     835                move_to_front(idles, procs, *this);
     836                unlock(idle_lock);
     837        }
     838
     839        // Don't check the ready queue again, we may not be in a position to run a thread
     840        return 0p;
     841}
     842
     843// Wake a thread from the front if there are any
     844static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
     845        // if we don't want to force check if we know it's false
     846        if( !this->idles.head && !force ) return false;
     847
     848        // First, lock the cluster idle
     849        lock( this->idle_lock __cfaabi_dbg_ctx2 );
     850
     851        // Check if there is someone to wake up
     852        if( !this->idles.head ) {
     853                // Nope unlock and return false
     854                unlock( this->idle_lock );
     855                return false;
     856        }
     857
     858        // Wake them up
     859        post( this->idles.head->idle );
     860
     861        // Unlock and return true
     862        unlock( this->idle_lock );
     863        return true;
     864}
     865
     866// Unconditionnaly wake a thread
     867static bool __wake_proc(processor * this) {
     868        return post( this->idle );
    826869}
    827870
     
    9671010
    9681011void doregister( cluster * cltr, processor * proc ) {
    969         lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     1012        lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
    9701013        cltr->nprocessors += 1;
    9711014        push_front(cltr->procs, *proc);
    972         unlock    (cltr->proc_list_lock);
     1015        unlock    (cltr->idle_lock);
    9731016}
    9741017
    9751018void unregister( cluster * cltr, processor * proc ) {
    976         lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     1019        lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
    9771020        remove(cltr->procs, *proc );
    9781021        cltr->nprocessors -= 1;
    979         unlock(cltr->proc_list_lock);
     1022        unlock(cltr->idle_lock);
    9801023}
    9811024
Note: See TracChangeset for help on using the changeset viewer.