Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rada0246d r29cb302  
    118118// Kernel Scheduling logic
    119119static $thread * __next_thread(cluster * this);
     120static bool __has_next_thread(cluster * this);
    120121static void __run_thread(processor * this, $thread * dst);
    121 static $thread * __halt(processor * this);
    122 static bool __wake_one(cluster * cltr, bool was_empty);
    123122static bool __wake_proc(processor *);
     123static bool __wake_one(struct __processor_id_t * id, cluster * cltr);
     124static void __halt(processor * this);
    124125
    125126//-----------------------------------------------------------------------------
    126127// Kernel storage
    127 KERNEL_STORAGE(cluster,         mainCluster);
    128 KERNEL_STORAGE(processor,       mainProcessor);
    129 KERNEL_STORAGE($thread, mainThread);
    130 KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    131 
    132 cluster     * mainCluster;
    133 processor   * mainProcessor;
    134 $thread * mainThread;
     128KERNEL_STORAGE(cluster,              mainCluster);
     129KERNEL_STORAGE(processor,            mainProcessor);
     130KERNEL_STORAGE($thread,              mainThread);
     131KERNEL_STORAGE(__stack_t,            mainThreadCtx);
     132KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     133#if !defined(__CFA_NO_STATISTICS__)
     134KERNEL_STORAGE(__stats_t, mainProcStats);
     135#endif
     136
     137cluster              * mainCluster;
     138processor            * mainProcessor;
     139$thread              * mainThread;
     140__scheduler_RWLock_t * __scheduler_lock;
    135141
    136142extern "C" {
     
    144150thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
    145151        NULL,                                                                                           // cannot use 0p
     152        NULL,
    146153        NULL,
    147154        { 1, false, false },
     
    190197
    191198void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
     199        ticket = 1;
    192200        state = Start;
    193201        self_cor{ info };
     
    197205        self_mon.recursion = 1;
    198206        self_mon_p = &self_mon;
    199         next = 0p;
     207        link.next = 0p;
     208        link.prev = 0p;
    200209
    201210        node.next = 0p;
     
    220229static void * __invoke_processor(void * arg);
    221230
    222 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
     231void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) {
    223232        this.name = name;
    224         this.cltr = &cltr;
     233        this.cltr = &_cltr;
     234        id = -1u;
    225235        terminated{ 0 };
    226236        destroyer = 0p;
     
    235245
    236246        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
     247        __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    237248
    238249        __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
     
    254265
    255266        free( this.stack );
     267
     268        __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    256269}
    257270
     
    259272        this.name = name;
    260273        this.preemption_rate = preemption_rate;
     274        this.nprocessors = 0;
    261275        ready_queue{};
    262         ready_queue_lock{};
    263276
    264277        #if !defined(__CFA_NO_STATISTICS__)
    265278                print_stats = false;
     279                stats = alloc();
     280                __init_stats( stats );
    266281        #endif
    267282
    268         procs{ __get };
    269         idles{ __get };
    270283        threads{ __get };
    271284
     
    277290void ^?{}(cluster & this) {
    278291        __kernel_io_shutdown( this, &this == mainCluster );
     292
     293        #if !defined(__CFA_NO_STATISTICS__)
     294                if(this.print_stats) {
     295                        __print_stats( this.stats );
     296                }
     297                free( this.stats );
     298        #endif
    279299
    280300        unregister(this);
     
    295315        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    296316
    297         doregister(this->cltr, this);
     317        // register the processor unless it's the main thread which is handled in the boot sequence
     318        if(this != mainProcessor) {
     319                this->id = doregister((__processor_id_t*)this);
     320                // Lock the RWlock so no-one pushes/pops while we are changing the queue
     321                uint_fast32_t last_size = ready_mutate_lock();
     322
     323                        // Adjust the ready queue size
     324                        ready_queue_grow( this->cltr );
     325
     326                // Unlock the RWlock
     327                ready_mutate_unlock( last_size );
     328        }
    298329
    299330        {
     
    308339                        readyThread = __next_thread( this->cltr );
    309340
    310                         // If no ready thread
    311                         if( readyThread == 0p ) {
    312                                 // Block until a thread is ready
    313                                 readyThread = __halt(this);
    314                         }
    315 
    316341                        // Check if we actually found a thread
    317342                        if( readyThread ) {
    318343                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    319344                                /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
    320                                 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
     345                                /* paranoid */ verifyf( readyThread->link.next == 0p, "Expected null got %p", readyThread->link.next );
     346                                __builtin_prefetch( readyThread->context.SP );
    321347
    322348                                // We found a thread run it
     
    325351                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    326352                        }
     353                        else {
     354                                // Block until a thread is ready
     355                                __halt(this);
     356                        }
    327357                }
    328358
     
    330360        }
    331361
    332         unregister(this->cltr, this);
    333 
    334362        V( this->terminated );
    335363
     364        // unregister the processor unless it's the main thread which is handled in the boot sequence
     365        if(this != mainProcessor) {
     366                // Lock the RWlock so no-one pushes/pops while we are changing the queue
     367                uint_fast32_t last_size = ready_mutate_lock();
     368
     369                        // Adjust the ready queue size
     370                        ready_queue_shrink( this->cltr );
     371
     372                        // Make sure we aren't on the idle queue
     373                        #if !defined(__CFA_NO_STATISTICS__)
     374                                bool removed =
     375                        #endif
     376                        unsafe_remove( this->cltr->idles, this );
     377
     378                        #if !defined(__CFA_NO_STATISTICS__)
     379                                if(removed) __tls_stats()->ready.sleep.exits++;
     380                        #endif
     381
     382                // Unlock the RWlock
     383                ready_mutate_unlock( last_size );
     384
     385                // Finally we don't need the read_lock any more
     386                unregister((__processor_id_t*)this);
     387        }
     388        else {
     389                // HACK : the coroutine context switch expects this_thread to be set
     390                // and it make sense for it to be set in all other cases except here
     391                // fake it
     392                kernelTLS.this_thread = mainThread;
     393        }
     394
    336395        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
    337 
    338         // HACK : the coroutine context switch expects this_thread to be set
    339         // and it make sense for it to be set in all other cases except here
    340         // fake it
    341         if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
    342396}
    343397
     
    360414        // Actually run the thread
    361415        RUNNING:  while(true) {
    362                 if(unlikely(thrd_dst->preempted)) {
    363                         thrd_dst->preempted = __NO_PREEMPTION;
    364                         verify(thrd_dst->state == Active  || thrd_dst->state == Rerun);
    365                 } else {
    366                         verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun
    367                         thrd_dst->state = Active;
    368                 }
     416                thrd_dst->preempted = __NO_PREEMPTION;
     417                thrd_dst->state = Active;
    369418
    370419                __cfaabi_dbg_debug_do(
     
    398447                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    399448                        // The thread was preempted, reschedule it and reset the flag
    400                         __schedule_thread( thrd_dst );
     449                        __schedule_thread( (__processor_id_t*)this, thrd_dst );
    401450                        break RUNNING;
    402451                }
    403452
     453                if(unlikely(thrd_dst->state == Halted)) {
     454                        // The thread has halted, it should never be scheduled/run again
     455                        // We may need to wake someone up here since
     456                        unpark( this->destroyer __cfaabi_dbg_ctx2 );
     457                        this->destroyer = 0p;
     458                        break RUNNING;
     459                }
     460
     461                /* paranoid */ verify( thrd_dst->state == Active );
     462                thrd_dst->state = Blocked;
     463
    404464                // set state of processor coroutine to active and the thread to inactive
    405                 static_assert(sizeof(thrd_dst->state) == sizeof(int));
    406                 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST);
    407                 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; )
    408                 switch(old_state) {
    409                         case Halted:
    410                                 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
    411                                 thrd_dst->state = Halted;
    412 
    413                                 // We may need to wake someone up here since
    414                                 unpark( this->destroyer __cfaabi_dbg_ctx2 );
    415                                 this->destroyer = 0p;
    416                                 break RUNNING;
    417                         case Active:
     465                int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
     466                __cfaabi_dbg_debug_do( thrd_dst->park_result = old_ticket; )
     467                switch(old_ticket) {
     468                        case 1:
    418469                                // This is case 1, the regular case, nothing more is needed
    419470                                break RUNNING;
    420                         case Rerun:
     471                        case 2:
    421472                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    422473                                // In this case, just run it again.
     
    424475                        default:
    425476                                // This makes no sense, something is wrong abort
    426                                 abort("Finished running a thread that was Blocked/Start/Primed %d\n", old_state);
     477                                abort();
    427478                }
    428479        }
     
    438489        $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    439490        $thread * thrd_src = kernelTLS.this_thread;
     491
     492        #if !defined(__CFA_NO_STATISTICS__)
     493                struct processor * last_proc = kernelTLS.this_processor;
     494        #endif
    440495
    441496        // Run the thread on this processor
     
    453508        }
    454509
     510        #if !defined(__CFA_NO_STATISTICS__)
     511                if(last_proc != kernelTLS.this_processor) {
     512                        __tls_stats()->ready.threads.migration++;
     513                }
     514        #endif
     515
    455516        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    456517        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
     
    463524// It effectively constructs a coroutine by stealing the pthread stack
    464525static void * __invoke_processor(void * arg) {
     526        #if !defined( __CFA_NO_STATISTICS__ )
     527                __stats_t local_stats;
     528                __init_stats( &local_stats );
     529                kernelTLS.this_stats = &local_stats;
     530        #endif
     531
    465532        processor * proc = (processor *) arg;
    466533        kernelTLS.this_processor = proc;
     
    494561        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    495562
     563        #if !defined(__CFA_NO_STATISTICS__)
     564                __tally_stats(proc->cltr->stats, &local_stats);
     565        #endif
     566
    496567        return 0p;
    497568}
     
    591662// Scheduler routines
    592663// KERNEL ONLY
    593 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
     664void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
     665        /* paranoid */ verify( thrd );
     666        /* paranoid */ verify( thrd->state != Halted );
    594667        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    595668        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    596         /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
    597                           "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
    598         /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
    599                           "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     669        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     670                                        "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     671        /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
     672                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    600673        /* paranoid */ #endif
    601         /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
     674        /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
    602675
    603676        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    604677
    605         lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    606         bool was_empty = !(ready_queue != 0);
    607         append( ready_queue, thrd );
    608         unlock( ready_queue_lock );
    609 
    610         __wake_one(thrd->curr_cluster, was_empty);
     678        ready_schedule_lock  ( id );
     679                push( thrd->curr_cluster, thrd );
     680
     681                #if !defined(__CFA_NO_STATISTICS__)
     682                        bool woke =
     683                #endif
     684                        __wake_one(id, thrd->curr_cluster);
     685
     686                #if !defined(__CFA_NO_STATISTICS__)
     687                        if(woke) __tls_stats()->ready.sleep.wakes++;
     688                #endif
     689        ready_schedule_unlock( id );
    611690
    612691        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    617696        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    618697
    619         lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    620         $thread * head = pop_head( ready_queue );
    621         unlock( ready_queue_lock );
     698        ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     699                $thread * head = pop( this );
     700        ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    622701
    623702        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    625704}
    626705
     706// KERNEL ONLY
     707static bool __has_next_thread(cluster * this) with( *this ) {
     708        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     709
     710        ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     711                bool not_empty = query( this );
     712        ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
     713
     714        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     715        return not_empty;
     716}
     717
    627718// KERNEL ONLY unpark with out disabling interrupts
    628 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {
    629         static_assert(sizeof(thrd->state) == sizeof(int));
    630 
     719void __unpark(  struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) {
    631720        // record activity
    632721        __cfaabi_dbg_debug_do( char * old_caller = thrd->unpark_caller; )
    633722        __cfaabi_dbg_record_thrd( *thrd, false, caller );
    634723
    635         enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
    636         __cfaabi_dbg_debug_do( thrd->unpark_result = old_state; )
    637         switch(old_state) {
    638                 case Active:
     724        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
     725        __cfaabi_dbg_debug_do( thrd->unpark_result = old_ticket; thrd->unpark_state = thrd->state; )
     726        switch(old_ticket) {
     727                case 1:
    639728                        // Wake won the race, the thread will reschedule/rerun itself
    640729                        break;
    641                 case Blocked:
     730                case 0:
    642731                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
     732                        /* paranoid */ verify( thrd->state == Blocked );
    643733
    644734                        // Wake lost the race,
    645                         thrd->state = Blocked;
    646                         __schedule_thread( thrd );
     735                        __schedule_thread( id, thrd );
    647736                        break;
    648                 case Rerun:
    649                         abort("More than one thread attempted to schedule thread %p\n", thrd);
    650                         break;
    651                 case Halted:
    652                 case Start:
    653                 case Primed:
    654737                default:
    655738                        // This makes no sense, something is wrong abort
     
    662745
    663746        disable_interrupts();
    664         __unpark( thrd __cfaabi_dbg_ctx_fwd2 );
     747        __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2 );
    665748        enable_interrupts( __cfaabi_dbg_ctx );
    666749}
     
    697780
    698781        $thread * thrd = kernelTLS.this_thread;
    699         /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
     782        /* paranoid */ verify(thrd->state == Active);
    700783
    701784        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     
    704787        // If that is the case, abandon the preemption.
    705788        bool preempted = false;
    706         if(thrd->next == 0p) {
     789        if(thrd->link.next == 0p) {
    707790                preempted = true;
    708791                thrd->preempted = reason;
     
    730813        __cfa_dbg_global_clusters.list{ __get };
    731814        __cfa_dbg_global_clusters.lock{};
     815
     816        // Initialize the global scheduler lock
     817        __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
     818        (*__scheduler_lock){};
    732819
    733820        // Initialize the main cluster
     
    764851                pending_preemption = false;
    765852                kernel_thread = pthread_self();
     853                id = -1u;
    766854
    767855                runner{ &this };
    768856                __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
     857
     858                __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    769859        }
    770860
     
    774864        (*mainProcessor){};
    775865
     866        mainProcessor->id = doregister( (__processor_id_t*)mainProcessor);
     867
    776868        //initialize the global state variables
    777869        kernelTLS.this_processor = mainProcessor;
    778870        kernelTLS.this_thread    = mainThread;
    779871
     872        #if !defined( __CFA_NO_STATISTICS__ )
     873                kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
     874                __init_stats( kernelTLS.this_stats );
     875        #endif
     876
    780877        // Enable preemption
    781878        kernel_start_preemption();
     
    783880        // Add the main thread to the ready queue
    784881        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    785         __schedule_thread(mainThread);
     882        __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
    786883
    787884        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
     
    827924        kernel_stop_preemption();
    828925
     926        unregister((__processor_id_t*)mainProcessor);
     927
    829928        // Destroy the main processor and its context in reverse order of construction
    830929        // These were manually constructed so we need manually destroy them
    831930        void ^?{}(processor & this) with( this ){
    832931                /* paranoid */ verify( this.do_terminate == true );
     932                __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
     933                __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    833934        }
    834935
     
    836937
    837938        // Final step, destroy the main thread since it is no longer needed
     939
    838940        // Since we provided a stack to this taxk it will not destroy anything
    839941        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
     
    842944        ^(*mainCluster){};
    843945
     946        ^(*__scheduler_lock){};
     947
    844948        ^(__cfa_dbg_global_clusters.list){};
    845949        ^(__cfa_dbg_global_clusters.lock){};
     
    851955// Kernel Idle Sleep
    852956//=============================================================================================
    853 static $thread * __halt(processor * this) with( *this ) {
    854         if( do_terminate ) return 0p;
    855 
    856         // First, lock the cluster idle
    857         lock( cltr->idle_lock __cfaabi_dbg_ctx2 );
    858 
    859         // Check if we can find a thread
    860         if( $thread * found = __next_thread( cltr ) ) {
    861                 unlock( cltr->idle_lock );
    862                 return found;
    863         }
    864 
    865         // Move this processor from the active list to the idle list
    866         move_to_front(cltr->procs, cltr->idles, *this);
    867 
    868         // Unlock the idle lock so we don't go to sleep with a lock
    869         unlock    (cltr->idle_lock);
    870 
    871         // We are ready to sleep
    872         __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this);
    873         wait( idle );
    874 
    875         // We have woken up
    876         __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this);
    877 
    878         // Get ourself off the idle list
    879         with( *cltr ) {
    880                 lock  (idle_lock __cfaabi_dbg_ctx2);
    881                 move_to_front(idles, procs, *this);
    882                 unlock(idle_lock);
    883         }
    884 
    885         // Don't check the ready queue again, we may not be in a position to run a thread
    886         return 0p;
    887 }
    888 
    889957// Wake a thread from the front if there are any
    890 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
    891         // if we don't want to force check if we know it's false
    892         // if( !this->idles.head && !force ) return false;
    893 
    894         // First, lock the cluster idle
    895         lock( this->idle_lock __cfaabi_dbg_ctx2 );
    896 
    897         // Check if there is someone to wake up
    898         if( !this->idles.head ) {
    899                 // Nope unlock and return false
    900                 unlock( this->idle_lock );
    901                 return false;
    902         }
    903 
    904         // Wake them up
    905         __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head);
    906         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    907         post( this->idles.head->idle );
    908 
    909         // Unlock and return true
    910         unlock( this->idle_lock );
     958static bool __wake_one(struct __processor_id_t * id, cluster * this) {
     959        /* paranoid */ verify( ready_schedule_islocked( id ) );
     960
     961        // Check if there is a sleeping processor
     962        processor * p = pop(this->idles);
     963
     964        // If no one is sleeping, we are done
     965        if( 0p == p ) return false;
     966
     967        // We found a processor, wake it up
     968        post( p->idle );
     969
    911970        return true;
    912971}
     
    922981
    923982        return ret;
     983}
     984
     985static void __halt(processor * this) with( *this ) {
     986        if( do_terminate ) return;
     987
     988        #if !defined(__CFA_NO_STATISTICS__)
     989                __tls_stats()->ready.sleep.halts++;
     990        #endif
     991        // Push self to queue
     992        push(cltr->idles, *this);
     993
     994        // Makre sure we don't miss a thread
     995        if( __has_next_thread(cltr) ) {
     996                // A thread was posted, make sure a processor is woken up
     997                struct __processor_id_t *id = (struct __processor_id_t *) this;
     998                ready_schedule_lock  ( id );
     999                        __wake_one( id, cltr );
     1000                ready_schedule_unlock( id );
     1001                #if !defined(__CFA_NO_STATISTICS__)
     1002                        __tls_stats()->ready.sleep.cancels++;
     1003                #endif
     1004        }
     1005
     1006        wait( idle );
    9241007}
    9251008
     
    10781161        cltr->nthreads -= 1;
    10791162        unlock(cltr->thread_list_lock);
    1080 }
    1081 
    1082 void doregister( cluster * cltr, processor * proc ) {
    1083         lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
    1084         cltr->nprocessors += 1;
    1085         push_front(cltr->procs, *proc);
    1086         unlock    (cltr->idle_lock);
    1087 }
    1088 
    1089 void unregister( cluster * cltr, processor * proc ) {
    1090         lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
    1091         remove(cltr->procs, *proc );
    1092         cltr->nprocessors -= 1;
    1093         unlock(cltr->idle_lock);
    10941163}
    10951164
Note: See TracChangeset for help on using the changeset viewer.