Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r1f45c7d rc33c2af  
    2222#include <errno.h>
    2323#include <stdio.h>
     24#include <string.h>
    2425#include <signal.h>
    2526#include <unistd.h>
     
    3132#include "kernel_private.hfa"
    3233#include "preemption.hfa"
     34#include "strstream.hfa"
     35#include "device/cpu.hfa"
    3336
    3437//Private includes
     
    110113#endif
    111114
    112 extern $thread * mainThread;
     115extern thread$ * mainThread;
    113116extern processor * mainProcessor;
    114117
    115118//-----------------------------------------------------------------------------
    116119// Kernel Scheduling logic
    117 static $thread * __next_thread(cluster * this);
    118 static $thread * __next_thread_slow(cluster * this);
    119 static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
    120 static void __run_thread(processor * this, $thread * dst);
     120static thread$ * __next_thread(cluster * this);
     121static thread$ * __next_thread_slow(cluster * this);
     122static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
     123static void __run_thread(processor * this, thread$ * dst);
    121124static void __wake_one(cluster * cltr);
    122125
     
    181184                __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
    182185
    183                 $thread * readyThread = 0p;
     186                thread$ * readyThread = 0p;
    184187                MAIN_LOOP:
    185188                for() {
     
    193196
    194197                        if( !readyThread ) {
     198                                ready_schedule_lock();
    195199                                __cfa_io_flush( this );
     200                                ready_schedule_unlock();
     201
    196202                                readyThread = __next_thread_slow( this->cltr );
    197203                        }
     
    231237                                __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
    232238
    233                                 __disable_interrupts_hard();
    234                                 eventfd_t val;
    235                                 eventfd_read( this->idle, &val );
    236                                 __enable_interrupts_hard();
     239                                {
     240                                        eventfd_t val;
     241                                        ssize_t ret = read( this->idle, &val, sizeof(val) );
     242                                        if(ret < 0) {
     243                                                switch((int)errno) {
     244                                                case EAGAIN:
     245                                                #if EAGAIN != EWOULDBLOCK
     246                                                        case EWOULDBLOCK:
     247                                                #endif
     248                                                case EINTR:
     249                                                        // No need to do anything special here, just assume it's a legitimate wake-up
     250                                                        break;
     251                                                default:
     252                                                        abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
     253                                                }
     254                                        }
     255                                }
    237256
    238257                                #if !defined(__CFA_NO_STATISTICS__)
     
    261280
    262281                        if(this->io.pending && !this->io.dirty) {
     282                                ready_schedule_lock();
    263283                                __cfa_io_flush( this );
     284                                ready_schedule_unlock();
    264285                        }
    265286
     
    301322
    302323                                // Don't block if we are done
    303                                 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
     324                                if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {
     325                                        ready_schedule_unlock();
     326                                        break MAIN_LOOP;
     327                                }
    304328
    305329                                __STATS( __tls_stats()->ready.sleep.halts++; )
     
    325349                                }
    326350
    327                                         __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
     351                                __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
    328352                                __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
    329353
    330                                 // __disable_interrupts_hard();
    331                                 eventfd_t val;
    332                                 eventfd_read( this->idle, &val );
    333                                 // __enable_interrupts_hard();
     354                                {
     355                                        eventfd_t val;
     356                                        ssize_t ret = read( this->idle, &val, sizeof(val) );
     357                                        if(ret < 0) {
     358                                                switch((int)errno) {
     359                                                case EAGAIN:
     360                                                #if EAGAIN != EWOULDBLOCK
     361                                                        case EWOULDBLOCK:
     362                                                #endif
     363                                                case EINTR:
     364                                                        // No need to do anything special here, just assume it's a legitimate wake-up
     365                                                        break;
     366                                                default:
     367                                                        abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
     368                                                }
     369                                        }
     370                                }
    334371
    335372                                        __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
     
    388425// runThread runs a thread by context switching
    389426// from the processor coroutine to the target thread
    390 static void __run_thread(processor * this, $thread * thrd_dst) {
     427static void __run_thread(processor * this, thread$ * thrd_dst) {
    391428        /* paranoid */ verify( ! __preemption_enabled() );
    392429        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
     
    394431        __builtin_prefetch( thrd_dst->context.SP );
    395432
    396         int curr = __kernel_getcpu();
    397         if(thrd_dst->last_cpu != curr) {
    398                 int64_t l = thrd_dst->last_cpu;
    399                 int64_t c = curr;
    400                 int64_t v = (l << 32) | c;
    401                 __push_stat( __tls_stats(), v, false, "Processor", this );
    402         }
    403 
    404         thrd_dst->last_cpu = curr;
    405 
    406433        __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
    407434
    408         $coroutine * proc_cor = get_coroutine(this->runner);
     435        coroutine$ * proc_cor = get_coroutine(this->runner);
    409436
    410437        // set state of processor coroutine to inactive
     
    425452                /* paranoid */ verify( thrd_dst->context.SP );
    426453                /* paranoid */ verify( thrd_dst->state != Halted );
    427                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
    428                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
     454                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
     455                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
    429456                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
    430457
     
    438465
    439466                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
    440                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
    441                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
     467                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
     468                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
    442469                /* paranoid */ verify( thrd_dst->context.SP );
    443470                /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
     
    457484                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    458485                        // The thread was preempted, reschedule it and reset the flag
    459                         schedule_thread$( thrd_dst );
     486                        schedule_thread$( thrd_dst, UNPARK_LOCAL );
    460487                        break RUNNING;
    461488                }
     
    505532void returnToKernel() {
    506533        /* paranoid */ verify( ! __preemption_enabled() );
    507         $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
    508         $thread * thrd_src = kernelTLS().this_thread;
     534        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     535        thread$ * thrd_src = kernelTLS().this_thread;
    509536
    510537        __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
     
    534561
    535562        /* paranoid */ verify( ! __preemption_enabled() );
    536         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
    537         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
     563        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
     564        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
    538565}
    539566
     
    541568// Scheduler routines
    542569// KERNEL ONLY
    543 static void __schedule_thread( $thread * thrd ) {
     570static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
    544571        /* paranoid */ verify( ! __preemption_enabled() );
    545572        /* paranoid */ verify( ready_schedule_islocked());
     
    561588        // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
    562589        struct cluster * cl = thrd->curr_cluster;
    563         __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
     590        __STATS(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
    564591
    565592        // push the thread to the cluster ready-queue
    566         push( cl, thrd, local );
     593        push( cl, thrd, hint );
    567594
    568595        // variable thrd is no longer safe to use
     
    589616}
    590617
    591 void schedule_thread$( $thread * thrd ) {
     618void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
    592619        ready_schedule_lock();
    593                 __schedule_thread( thrd );
     620                __schedule_thread( thrd, hint );
    594621        ready_schedule_unlock();
    595622}
    596623
    597624// KERNEL ONLY
    598 static inline $thread * __next_thread(cluster * this) with( *this ) {
     625static inline thread$ * __next_thread(cluster * this) with( *this ) {
    599626        /* paranoid */ verify( ! __preemption_enabled() );
    600627
    601628        ready_schedule_lock();
    602                 $thread * thrd = pop_fast( this );
     629                thread$ * thrd = pop_fast( this );
    603630        ready_schedule_unlock();
    604631
     
    608635
    609636// KERNEL ONLY
    610 static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
     637static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
    611638        /* paranoid */ verify( ! __preemption_enabled() );
    612639
    613640        ready_schedule_lock();
    614                 $thread * thrd;
     641                thread$ * thrd;
    615642                for(25) {
    616643                        thrd = pop_slow( this );
     
    626653}
    627654
    628 static inline bool __must_unpark( $thread * thrd ) {
     655static inline bool __must_unpark( thread$ * thrd ) {
    629656        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    630657        switch(old_ticket) {
     
    642669}
    643670
    644 void __kernel_unpark( $thread * thrd ) {
     671void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
    645672        /* paranoid */ verify( ! __preemption_enabled() );
    646673        /* paranoid */ verify( ready_schedule_islocked());
     
    650677        if(__must_unpark(thrd)) {
    651678                // Wake lost the race,
    652                 __schedule_thread( thrd );
     679                __schedule_thread( thrd, hint );
    653680        }
    654681
     
    657684}
    658685
    659 void unpark( $thread * thrd ) {
     686void unpark( thread$ * thrd, unpark_hint hint ) {
    660687        if( !thrd ) return;
    661688
     
    663690                disable_interrupts();
    664691                        // Wake lost the race,
    665                         schedule_thread$( thrd );
     692                        schedule_thread$( thrd, hint );
    666693                enable_interrupts(false);
    667694        }
     
    681708        // Should never return
    682709        void __cfactx_thrd_leave() {
    683                 $thread * thrd = active_thread();
    684                 $monitor * this = &thrd->self_mon;
     710                thread$ * thrd = active_thread();
     711                monitor$ * this = &thrd->self_mon;
    685712
    686713                // Lock the monitor now
     
    694721                /* paranoid */ verify( kernelTLS().this_thread == thrd );
    695722                /* paranoid */ verify( thrd->context.SP );
    696                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
    697                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
     723                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
     724                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    698725
    699726                thrd->state = Halting;
     
    713740bool force_yield( __Preemption_Reason reason ) {
    714741        __disable_interrupts_checked();
    715                 $thread * thrd = kernelTLS().this_thread;
     742                thread$ * thrd = kernelTLS().this_thread;
    716743                /* paranoid */ verify(thrd->state == Active);
    717744
     
    825852//=============================================================================================
    826853void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
    827         $thread * thrd = __cfaabi_tls.this_thread;
     854        thread$ * thrd = __cfaabi_tls.this_thread;
    828855
    829856        if(thrd) {
     
    920947                        /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
    921948                        /* paranoid */ verify( it->local_data->this_stats );
     949                        // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
    922950                        __tally_stats( cltr->stats, it->local_data->this_stats );
    923951                        it = &(*it)`next;
     
    929957                // this doesn't solve all problems but does solve many
    930958                // so it's probably good enough
     959                disable_interrupts();
    931960                uint_fast32_t last_size = ready_mutate_lock();
    932961
     
    936965                // Unlock the RWlock
    937966                ready_mutate_unlock( last_size );
     967                enable_interrupts();
    938968        }
    939969
Note: See TracChangeset for help on using the changeset viewer.