Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r29cb302 rada0246d  
    118118// Kernel Scheduling logic
    119119static $thread * __next_thread(cluster * this);
    120 static bool __has_next_thread(cluster * this);
    121120static void __run_thread(processor * this, $thread * dst);
     121static $thread * __halt(processor * this);
     122static bool __wake_one(cluster * cltr, bool was_empty);
    122123static bool __wake_proc(processor *);
    123 static bool __wake_one(struct __processor_id_t * id, cluster * cltr);
    124 static void __halt(processor * this);
    125124
    126125//-----------------------------------------------------------------------------
    127126// Kernel storage
    128 KERNEL_STORAGE(cluster,              mainCluster);
    129 KERNEL_STORAGE(processor,            mainProcessor);
    130 KERNEL_STORAGE($thread,              mainThread);
    131 KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    132 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    133 #if !defined(__CFA_NO_STATISTICS__)
    134 KERNEL_STORAGE(__stats_t, mainProcStats);
    135 #endif
    136 
    137 cluster              * mainCluster;
    138 processor            * mainProcessor;
    139 $thread              * mainThread;
    140 __scheduler_RWLock_t * __scheduler_lock;
     127KERNEL_STORAGE(cluster,         mainCluster);
     128KERNEL_STORAGE(processor,       mainProcessor);
     129KERNEL_STORAGE($thread, mainThread);
     130KERNEL_STORAGE(__stack_t,       mainThreadCtx);
     131
     132cluster     * mainCluster;
     133processor   * mainProcessor;
     134$thread * mainThread;
    141135
    142136extern "C" {
     
    150144thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
    151145        NULL,                                                                                           // cannot use 0p
    152         NULL,
    153146        NULL,
    154147        { 1, false, false },
     
    197190
    198191void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    199         ticket = 1;
    200192        state = Start;
    201193        self_cor{ info };
     
    205197        self_mon.recursion = 1;
    206198        self_mon_p = &self_mon;
    207         link.next = 0p;
    208         link.prev = 0p;
     199        next = 0p;
    209200
    210201        node.next = 0p;
     
    229220static void * __invoke_processor(void * arg);
    230221
    231 void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) {
     222void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
    232223        this.name = name;
    233         this.cltr = &_cltr;
    234         id = -1u;
     224        this.cltr = &cltr;
    235225        terminated{ 0 };
    236226        destroyer = 0p;
     
    245235
    246236        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
    247         __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    248237
    249238        __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
     
    265254
    266255        free( this.stack );
    267 
    268         __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    269256}
    270257
     
    272259        this.name = name;
    273260        this.preemption_rate = preemption_rate;
    274         this.nprocessors = 0;
    275261        ready_queue{};
     262        ready_queue_lock{};
    276263
    277264        #if !defined(__CFA_NO_STATISTICS__)
    278265                print_stats = false;
    279                 stats = alloc();
    280                 __init_stats( stats );
    281266        #endif
    282267
     268        procs{ __get };
     269        idles{ __get };
    283270        threads{ __get };
    284271
     
    290277void ^?{}(cluster & this) {
    291278        __kernel_io_shutdown( this, &this == mainCluster );
    292 
    293         #if !defined(__CFA_NO_STATISTICS__)
    294                 if(this.print_stats) {
    295                         __print_stats( this.stats );
    296                 }
    297                 free( this.stats );
    298         #endif
    299279
    300280        unregister(this);
     
    315295        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    316296
    317         // register the processor unless it's the main thread which is handled in the boot sequence
    318         if(this != mainProcessor) {
    319                 this->id = doregister((__processor_id_t*)this);
    320                 // Lock the RWlock so no-one pushes/pops while we are changing the queue
    321                 uint_fast32_t last_size = ready_mutate_lock();
    322 
    323                         // Adjust the ready queue size
    324                         ready_queue_grow( this->cltr );
    325 
    326                 // Unlock the RWlock
    327                 ready_mutate_unlock( last_size );
    328         }
     297        doregister(this->cltr, this);
    329298
    330299        {
     
    339308                        readyThread = __next_thread( this->cltr );
    340309
     310                        // If no ready thread
     311                        if( readyThread == 0p ) {
     312                                // Block until a thread is ready
     313                                readyThread = __halt(this);
     314                        }
     315
    341316                        // Check if we actually found a thread
    342317                        if( readyThread ) {
    343318                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    344319                                /* paranoid */ verifyf( readyThread->state == Ready || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
    345                                 /* paranoid */ verifyf( readyThread->link.next == 0p, "Expected null got %p", readyThread->link.next );
    346                                 __builtin_prefetch( readyThread->context.SP );
     320                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    347321
    348322                                // We found a thread run it
     
    351325                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    352326                        }
    353                         else {
    354                                 // Block until a thread is ready
    355                                 __halt(this);
    356                         }
    357327                }
    358328
     
    360330        }
    361331
     332        unregister(this->cltr, this);
     333
    362334        V( this->terminated );
    363335
    364         // unregister the processor unless it's the main thread which is handled in the boot sequence
    365         if(this != mainProcessor) {
    366                 // Lock the RWlock so no-one pushes/pops while we are changing the queue
    367                 uint_fast32_t last_size = ready_mutate_lock();
    368 
    369                         // Adjust the ready queue size
    370                         ready_queue_shrink( this->cltr );
    371 
    372                         // Make sure we aren't on the idle queue
    373                         #if !defined(__CFA_NO_STATISTICS__)
    374                                 bool removed =
    375                         #endif
    376                         unsafe_remove( this->cltr->idles, this );
    377 
    378                         #if !defined(__CFA_NO_STATISTICS__)
    379                                 if(removed) __tls_stats()->ready.sleep.exits++;
    380                         #endif
    381 
    382                 // Unlock the RWlock
    383                 ready_mutate_unlock( last_size );
    384 
    385                 // Finally we don't need the read_lock any more
    386                 unregister((__processor_id_t*)this);
    387         }
    388         else {
    389                 // HACK : the coroutine context switch expects this_thread to be set
    390                 // and it make sense for it to be set in all other cases except here
    391                 // fake it
    392                 kernelTLS.this_thread = mainThread;
    393         }
    394 
    395336        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
     337
     338        // HACK : the coroutine context switch expects this_thread to be set
     339        // and it make sense for it to be set in all other cases except here
     340        // fake it
     341        if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
    396342}
    397343
     
    414360        // Actually run the thread
    415361        RUNNING:  while(true) {
    416                 thrd_dst->preempted = __NO_PREEMPTION;
    417                 thrd_dst->state = Active;
     362                if(unlikely(thrd_dst->preempted)) {
     363                        thrd_dst->preempted = __NO_PREEMPTION;
     364                        verify(thrd_dst->state == Active  || thrd_dst->state == Rerun);
     365                } else {
     366                        verify(thrd_dst->state == Blocked || thrd_dst->state == Ready); // Ready means scheduled normally, blocked means rerun
     367                        thrd_dst->state = Active;
     368                }
    418369
    419370                __cfaabi_dbg_debug_do(
     
    447398                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    448399                        // The thread was preempted, reschedule it and reset the flag
    449                         __schedule_thread( (__processor_id_t*)this, thrd_dst );
     400                        __schedule_thread( thrd_dst );
    450401                        break RUNNING;
    451402                }
    452403
    453                 if(unlikely(thrd_dst->state == Halted)) {
    454                         // The thread has halted, it should never be scheduled/run again
    455                         // We may need to wake someone up here since
    456                         unpark( this->destroyer __cfaabi_dbg_ctx2 );
    457                         this->destroyer = 0p;
    458                         break RUNNING;
    459                 }
    460 
    461                 /* paranoid */ verify( thrd_dst->state == Active );
    462                 thrd_dst->state = Blocked;
    463 
    464404                // set state of processor coroutine to active and the thread to inactive
    465                 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
    466                 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_ticket; )
    467                 switch(old_ticket) {
    468                         case 1:
     405                static_assert(sizeof(thrd_dst->state) == sizeof(int));
     406                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Blocked, __ATOMIC_SEQ_CST);
     407                __cfaabi_dbg_debug_do( thrd_dst->park_result = old_state; )
     408                switch(old_state) {
     409                        case Halted:
     410                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
     411                                thrd_dst->state = Halted;
     412
     413                                // We may need to wake someone up here since
     414                                unpark( this->destroyer __cfaabi_dbg_ctx2 );
     415                                this->destroyer = 0p;
     416                                break RUNNING;
     417                        case Active:
    469418                                // This is case 1, the regular case, nothing more is needed
    470419                                break RUNNING;
    471                         case 2:
     420                        case Rerun:
    472421                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    473422                                // In this case, just run it again.
     
    475424                        default:
    476425                                // This makes no sense, something is wrong abort
    477                                 abort();
     426                                abort("Finished running a thread that was Blocked/Start/Primed %d\n", old_state);
    478427                }
    479428        }
     
    489438        $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    490439        $thread * thrd_src = kernelTLS.this_thread;
    491 
    492         #if !defined(__CFA_NO_STATISTICS__)
    493                 struct processor * last_proc = kernelTLS.this_processor;
    494         #endif
    495440
    496441        // Run the thread on this processor
     
    508453        }
    509454
    510         #if !defined(__CFA_NO_STATISTICS__)
    511                 if(last_proc != kernelTLS.this_processor) {
    512                         __tls_stats()->ready.threads.migration++;
    513                 }
    514         #endif
    515 
    516455        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    517456        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
     
    524463// It effectively constructs a coroutine by stealing the pthread stack
    525464static void * __invoke_processor(void * arg) {
    526         #if !defined( __CFA_NO_STATISTICS__ )
    527                 __stats_t local_stats;
    528                 __init_stats( &local_stats );
    529                 kernelTLS.this_stats = &local_stats;
    530         #endif
    531 
    532465        processor * proc = (processor *) arg;
    533466        kernelTLS.this_processor = proc;
     
    561494        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    562495
    563         #if !defined(__CFA_NO_STATISTICS__)
    564                 __tally_stats(proc->cltr->stats, &local_stats);
    565         #endif
    566 
    567496        return 0p;
    568497}
     
    662591// Scheduler routines
    663592// KERNEL ONLY
    664 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
    665         /* paranoid */ verify( thrd );
    666         /* paranoid */ verify( thrd->state != Halted );
     593void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
    667594        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    668595        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    669         /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
    670                                         "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
    671         /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
    672                                         "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     596        /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     597                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     598        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
     599                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    673600        /* paranoid */ #endif
    674         /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
     601        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
    675602
    676603        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    677604
    678         ready_schedule_lock  ( id );
    679                 push( thrd->curr_cluster, thrd );
    680 
    681                 #if !defined(__CFA_NO_STATISTICS__)
    682                         bool woke =
    683                 #endif
    684                         __wake_one(id, thrd->curr_cluster);
    685 
    686                 #if !defined(__CFA_NO_STATISTICS__)
    687                         if(woke) __tls_stats()->ready.sleep.wakes++;
    688                 #endif
    689         ready_schedule_unlock( id );
     605        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     606        bool was_empty = !(ready_queue != 0);
     607        append( ready_queue, thrd );
     608        unlock( ready_queue_lock );
     609
     610        __wake_one(thrd->curr_cluster, was_empty);
    690611
    691612        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    696617        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    697618
    698         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
    699                 $thread * head = pop( this );
    700         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
     619        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
     620        $thread * head = pop_head( ready_queue );
     621        unlock( ready_queue_lock );
    701622
    702623        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    704625}
    705626
    706 // KERNEL ONLY
    707 static bool __has_next_thread(cluster * this) with( *this ) {
    708         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    709 
    710         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
    711                 bool not_empty = query( this );
    712         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    713 
    714         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    715         return not_empty;
    716 }
    717 
    718627// KERNEL ONLY unpark with out disabling interrupts
    719 void __unpark(  struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) {
     628void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {
     629        static_assert(sizeof(thrd->state) == sizeof(int));
     630
    720631        // record activity
    721632        __cfaabi_dbg_debug_do( char * old_caller = thrd->unpark_caller; )
    722633        __cfaabi_dbg_record_thrd( *thrd, false, caller );
    723634
    724         int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    725         __cfaabi_dbg_debug_do( thrd->unpark_result = old_ticket; thrd->unpark_state = thrd->state; )
    726         switch(old_ticket) {
    727                 case 1:
     635        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
     636        __cfaabi_dbg_debug_do( thrd->unpark_result = old_state; )
     637        switch(old_state) {
     638                case Active:
    728639                        // Wake won the race, the thread will reschedule/rerun itself
    729640                        break;
    730                 case 0:
     641                case Blocked:
    731642                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
    732                         /* paranoid */ verify( thrd->state == Blocked );
    733643
    734644                        // Wake lost the race,
    735                         __schedule_thread( id, thrd );
     645                        thrd->state = Blocked;
     646                        __schedule_thread( thrd );
    736647                        break;
     648                case Rerun:
     649                        abort("More than one thread attempted to schedule thread %p\n", thrd);
     650                        break;
     651                case Halted:
     652                case Start:
     653                case Primed:
    737654                default:
    738655                        // This makes no sense, something is wrong abort
     
    745662
    746663        disable_interrupts();
    747         __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2 );
     664        __unpark( thrd __cfaabi_dbg_ctx_fwd2 );
    748665        enable_interrupts( __cfaabi_dbg_ctx );
    749666}
     
    780697
    781698        $thread * thrd = kernelTLS.this_thread;
    782         /* paranoid */ verify(thrd->state == Active);
     699        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
    783700
    784701        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     
    787704        // If that is the case, abandon the preemption.
    788705        bool preempted = false;
    789         if(thrd->link.next == 0p) {
     706        if(thrd->next == 0p) {
    790707                preempted = true;
    791708                thrd->preempted = reason;
     
    813730        __cfa_dbg_global_clusters.list{ __get };
    814731        __cfa_dbg_global_clusters.lock{};
    815 
    816         // Initialize the global scheduler lock
    817         __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
    818         (*__scheduler_lock){};
    819732
    820733        // Initialize the main cluster
     
    851764                pending_preemption = false;
    852765                kernel_thread = pthread_self();
    853                 id = -1u;
    854766
    855767                runner{ &this };
    856768                __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
    857 
    858                 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    859769        }
    860770
     
    864774        (*mainProcessor){};
    865775
    866         mainProcessor->id = doregister( (__processor_id_t*)mainProcessor);
    867 
    868776        //initialize the global state variables
    869777        kernelTLS.this_processor = mainProcessor;
    870778        kernelTLS.this_thread    = mainThread;
    871779
    872         #if !defined( __CFA_NO_STATISTICS__ )
    873                 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
    874                 __init_stats( kernelTLS.this_stats );
    875         #endif
    876 
    877780        // Enable preemption
    878781        kernel_start_preemption();
     
    880783        // Add the main thread to the ready queue
    881784        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    882         __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
     785        __schedule_thread(mainThread);
    883786
    884787        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
     
    924827        kernel_stop_preemption();
    925828
    926         unregister((__processor_id_t*)mainProcessor);
    927 
    928829        // Destroy the main processor and its context in reverse order of construction
    929830        // These were manually constructed so we need manually destroy them
    930831        void ^?{}(processor & this) with( this ){
    931832                /* paranoid */ verify( this.do_terminate == true );
    932                 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    933                 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    934833        }
    935834
     
    937836
    938837        // Final step, destroy the main thread since it is no longer needed
    939 
    940838        // Since we provided a stack to this taxk it will not destroy anything
    941839        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
     
    944842        ^(*mainCluster){};
    945843
    946         ^(*__scheduler_lock){};
    947 
    948844        ^(__cfa_dbg_global_clusters.list){};
    949845        ^(__cfa_dbg_global_clusters.lock){};
     
    955851// Kernel Idle Sleep
    956852//=============================================================================================
     853static $thread * __halt(processor * this) with( *this ) {
     854        if( do_terminate ) return 0p;
     855
     856        // First, lock the cluster idle
     857        lock( cltr->idle_lock __cfaabi_dbg_ctx2 );
     858
     859        // Check if we can find a thread
     860        if( $thread * found = __next_thread( cltr ) ) {
     861                unlock( cltr->idle_lock );
     862                return found;
     863        }
     864
     865        // Move this processor from the active list to the idle list
     866        move_to_front(cltr->procs, cltr->idles, *this);
     867
     868        // Unlock the idle lock so we don't go to sleep with a lock
     869        unlock    (cltr->idle_lock);
     870
     871        // We are ready to sleep
     872        __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this);
     873        wait( idle );
     874
     875        // We have woken up
     876        __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this);
     877
     878        // Get ourself off the idle list
     879        with( *cltr ) {
     880                lock  (idle_lock __cfaabi_dbg_ctx2);
     881                move_to_front(idles, procs, *this);
     882                unlock(idle_lock);
     883        }
     884
     885        // Don't check the ready queue again, we may not be in a position to run a thread
     886        return 0p;
     887}
     888
    957889// Wake a thread from the front if there are any
    958 static bool __wake_one(struct __processor_id_t * id, cluster * this) {
    959         /* paranoid */ verify( ready_schedule_islocked( id ) );
    960 
    961         // Check if there is a sleeping processor
    962         processor * p = pop(this->idles);
    963 
    964         // If no one is sleeping, we are done
    965         if( 0p == p ) return false;
    966 
    967         // We found a processor, wake it up
    968         post( p->idle );
    969 
     890static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
     891        // if we don't want to force check if we know it's false
     892        // if( !this->idles.head && !force ) return false;
     893
     894        // First, lock the cluster idle
     895        lock( this->idle_lock __cfaabi_dbg_ctx2 );
     896
     897        // Check if there is someone to wake up
     898        if( !this->idles.head ) {
     899                // Nope unlock and return false
     900                unlock( this->idle_lock );
     901                return false;
     902        }
     903
     904        // Wake them up
     905        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head);
     906        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     907        post( this->idles.head->idle );
     908
     909        // Unlock and return true
     910        unlock( this->idle_lock );
    970911        return true;
    971912}
     
    981922
    982923        return ret;
    983 }
    984 
    985 static void __halt(processor * this) with( *this ) {
    986         if( do_terminate ) return;
    987 
    988         #if !defined(__CFA_NO_STATISTICS__)
    989                 __tls_stats()->ready.sleep.halts++;
    990         #endif
    991         // Push self to queue
    992         push(cltr->idles, *this);
    993 
    994         // Makre sure we don't miss a thread
    995         if( __has_next_thread(cltr) ) {
    996                 // A thread was posted, make sure a processor is woken up
    997                 struct __processor_id_t *id = (struct __processor_id_t *) this;
    998                 ready_schedule_lock  ( id );
    999                         __wake_one( id, cltr );
    1000                 ready_schedule_unlock( id );
    1001                 #if !defined(__CFA_NO_STATISTICS__)
    1002                         __tls_stats()->ready.sleep.cancels++;
    1003                 #endif
    1004         }
    1005 
    1006         wait( idle );
    1007924}
    1008925
     
    11611078        cltr->nthreads -= 1;
    11621079        unlock(cltr->thread_list_lock);
     1080}
     1081
     1082void doregister( cluster * cltr, processor * proc ) {
     1083        lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
     1084        cltr->nprocessors += 1;
     1085        push_front(cltr->procs, *proc);
     1086        unlock    (cltr->idle_lock);
     1087}
     1088
     1089void unregister( cluster * cltr, processor * proc ) {
     1090        lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
     1091        remove(cltr->procs, *proc );
     1092        cltr->nprocessors -= 1;
     1093        unlock(cltr->idle_lock);
    11631094}
    11641095
Note: See TracChangeset for help on using the changeset viewer.