// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // kernel.c -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Wed Nov 30 18:14:08 2022 // Update Count : 76 // #define __cforall_thread__ #define _GNU_SOURCE // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Waddress-of-packed-member" //C Includes #include #include #include #include #include extern "C" { #include #include } //CFA Includes #include "kernel/private.hfa" #include "preemption.hfa" #include "strstream.hfa" #include "device/cpu.hfa" #include "io/types.hfa" //Private includes #define __CFA_INVOKE_PRIVATE__ #include "invoke.h" #pragma GCC diagnostic pop #if !defined(__CFA_NO_STATISTICS__) #define __STATS_DEF( ...) __VA_ARGS__ #else #define __STATS_DEF( ...) #endif //----------------------------------------------------------------------------- // Some assembly required #if defined( __i386 ) // mxcr : SSE Status and Control bits (control bits are preserved across function calls) // fcw : X87 FPU control word (preserved across function calls) #define __x87_store \ uint32_t __mxcr; \ uint16_t __fcw; \ __asm__ volatile ( \ "stmxcsr %0\n" \ "fnstcw %1\n" \ : "=m" (__mxcr),\ "=m" (__fcw) \ ) #define __x87_load \ __asm__ volatile ( \ "fldcw %1\n" \ "ldmxcsr %0\n" \ ::"m" (__mxcr),\ "m" (__fcw) \ ) #elif defined( __x86_64 ) #define __x87_store \ uint32_t __mxcr; \ uint16_t __fcw; \ __asm__ volatile ( \ "stmxcsr %0\n" \ "fnstcw %1\n" \ : "=m" (__mxcr),\ "=m" (__fcw) \ ) #define __x87_load \ __asm__ volatile ( \ "fldcw %1\n" \ "ldmxcsr %0\n" \ :: "m" (__mxcr),\ "m" (__fcw) \ ) #elif defined( __arm__ ) #define __x87_store #define __x87_load #elif defined( __aarch64__ ) #define __x87_store \ uint32_t __fpcntl[2]; \ __asm__ volatile ( \ "mrs x9, FPCR\n" \ "mrs x10, FPSR\n" \ "stp x9, x10, %0\n" \ : "=m" (__fpcntl) : : "x9", "x10" \ ) #define __x87_load \ __asm__ volatile ( \ "ldp x9, x10, %0\n" \ "msr FPSR, x10\n" \ "msr FPCR, x9\n" \ : "=m" (__fpcntl) : : "x9", "x10" \ ) #else #error unsupported hardware architecture #endif extern thread$ * mainThread; extern processor * mainProcessor; //----------------------------------------------------------------------------- // Kernel Scheduling logic static thread$ * __next_thread(cluster * this); static thread$ * __next_thread_slow(cluster * this); static thread$ * __next_thread_search(cluster * this); static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); static void __run_thread(processor * this, thread$ * dst); static void __wake_one(cluster * cltr); static void idle_sleep(processor * proc); static bool mark_idle (__cluster_proc_list & idles, processor & proc); static void mark_awake(__cluster_proc_list & idles, processor & proc); extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))); extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1))); extern void __disable_interrupts_hard(); extern void __enable_interrupts_hard(); //============================================================================================= // Kernel Scheduling logic //============================================================================================= //Main of the processor contexts void main(processorCtx_t & runner) { // Because of a bug, we couldn't initialized the seed on construction // Do it here PRNG_SET_SEED( __cfaabi_tls.random_state, rdtscl() ); __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner); __tls_rand_advance_bck(); processor * this = runner.proc; verify(this); __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); #if !defined(__CFA_NO_STATISTICS__) if( this->print_halts ) { __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); } #endif { // Setup preemption data preemption_scope scope = { this }; // if we need to run some special setup, now is the time to do it. if(this->init.thrd) { this->init.thrd->curr_cluster = this->cltr; __run_thread(this, this->init.thrd); } __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); thread$ * readyThread = 0p; MAIN_LOOP: for() { // Check if there is pending io __cfa_io_drain( this ); // Try to get the next thread readyThread = __next_thread( this->cltr ); if( !readyThread ) { // there is no point in holding submissions if we are idle __IO_STATS__(true, io.flush.idle++; ) __cfa_io_flush( this ); // drain again in case something showed up __cfa_io_drain( this ); readyThread = __next_thread( this->cltr ); } if( !readyThread ) for(5) { readyThread = __next_thread_slow( this->cltr ); if( readyThread ) break; // It's unlikely we still I/O to submit, but the arbiter could __IO_STATS__(true, io.flush.idle++; ) __cfa_io_flush( this ); // drain again in case something showed up __cfa_io_drain( this ); } HALT: if( !readyThread ) { // Don't block if we are done if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; // Push self to idle stack if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; // Confirm the ready-queue is empty readyThread = __next_thread_search( this->cltr ); if( readyThread ) { // A thread was found, cancel the halt mark_awake(this->cltr->procs, * this); __STATS__(true, ready.sleep.cancels++; ) // continue the mai loop break HALT; } idle_sleep( this ); // We were woken up, remove self from idle mark_awake(this->cltr->procs, * this); // DON'T just proceed, start looking again continue MAIN_LOOP; } /* paranoid */ verify( readyThread ); // Reset io dirty bit this->io.dirty = false; // We found a thread run it __run_thread(this, readyThread); // Are we done? if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { __IO_STATS__(true, io.flush.dirty++; ) __cfa_io_flush( this ); } } __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); } post( this->terminated ); if(this == mainProcessor) { // HACK : the coroutine context switch expects this_thread to be set // and it make sense for it to be set in all other cases except here // fake it __cfaabi_tls.this_thread = mainThread; } __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this); } static int * __volatile_errno() __attribute__((noinline)); static int * __volatile_errno() { asm(""); return &errno; } // KERNEL ONLY // runThread runs a thread by context switching // from the processor coroutine to the target thread static void __run_thread(processor * this, thread$ * thrd_dst) { /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); /* paranoid */ verifyf( thrd_dst->rdy_link.next == 0p, "Expected null got %p", thrd_dst->rdy_link.next ); __builtin_prefetch( thrd_dst->context.SP ); __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); coroutine$ * proc_cor = get_coroutine(this->runner); // set state of processor coroutine to inactive verify(proc_cor->state == Active); proc_cor->state = Blocked; // Actually run the thread RUNNING: while(true) { thrd_dst->preempted = __NO_PREEMPTION; // Update global state kernelTLS().this_thread = thrd_dst; // Update the state after setting this_thread // so that the debugger can find all active threads // in tls storage thrd_dst->state = Active; /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); /* paranoid */ verify( thrd_dst->context.SP ); /* paranoid */ verify( thrd_dst->state != Halted ); /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, this, __ATOMIC_SEQ_CST) == 0p ); /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); // set context switch to the thread that the processor is executing __cfactx_switch( &proc_cor->context, &thrd_dst->context ); // when __cfactx_switch returns we are back in the processor coroutine /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, 0p, __ATOMIC_SEQ_CST) == this ); /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); /* paranoid */ verify( thrd_dst->state != Halted ); /* paranoid */ verify( thrd_dst->context.SP ); /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); /* paranoid */ verify( ! __preemption_enabled() ); // We just finished running a thread, there are a few things that could have happened. // 1 - Regular case : the thread has blocked and now one has scheduled it yet. // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. // 4 - Preempted // In case 1, we may have won a race so we can't write to the state again. // In case 2, we lost the race so we now own the thread. if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { // Reset the this_thread now that we know // the state isn't active anymore kernelTLS().this_thread = 0p; // The thread was preempted, reschedule it and reset the flag schedule_thread$( thrd_dst, UNPARK_LOCAL ); break RUNNING; } if(unlikely(thrd_dst->state == Halting)) { // Reset the this_thread now that we know // the state isn't active anymore kernelTLS().this_thread = 0p; // The thread has halted, it should never be scheduled/run again // finish the thread __thread_finish( thrd_dst ); break RUNNING; } /* paranoid */ verify( thrd_dst->state == Active ); thrd_dst->state = Blocked; // Reset the this_thread now that we know // the state isn't active anymore kernelTLS().this_thread = 0p; // set state of processor coroutine to active and the thread to inactive int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST); switch(old_ticket) { case TICKET_RUNNING: // This is case 1, the regular case, nothing more is needed break RUNNING; case TICKET_UNBLOCK: __STATS__(true, ready.threads.threads++; ) // This is case 2, the racy case, someone tried to run this thread before it finished blocking // In this case, just run it again. continue RUNNING; default: // This makes no sense, something is wrong abort abort(); } } // Just before returning to the processor, set the processor coroutine to active proc_cor->state = Active; __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); __STATS__(true, ready.threads.threads--; ) /* paranoid */ verify( ! __preemption_enabled() ); } // KERNEL_ONLY static void returnToKernel() { /* paranoid */ verify( ! __preemption_enabled() ); coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); thread$ * thrd_src = kernelTLS().this_thread; __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; ) // Run the thread on this processor { int local_errno = *__volatile_errno(); #if defined( __i386 ) || defined( __x86_64 ) __x87_store; #endif /* paranoid */ verify( proc_cor->context.SP ); /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary ); __cfactx_switch( &thrd_src->context, &proc_cor->context ); /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary ); #if defined( __i386 ) || defined( __x86_64 ) __x87_load; #endif *__volatile_errno() = local_errno; } #if !defined(__CFA_NO_STATISTICS__) /* paranoid */ verify( thrd_src->last_proc != 0p ); if(thrd_src->last_proc != kernelTLS().this_processor) { __tls_stats()->ready.threads.migration++; } #endif /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); } //----------------------------------------------------------------------------- // Scheduler routines // KERNEL ONLY static void __schedule_thread( thread$ * thrd, unpark_hint hint ) { /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verify( ready_schedule_islocked()); /* paranoid */ verify( thrd ); /* paranoid */ verify( thrd->state != Halted ); /* paranoid */ verify( thrd->curr_cluster ); /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); /* paranoid */ #endif /* paranoid */ verifyf( thrd->rdy_link.next == 0p, "Expected null got %p", thrd->rdy_link.next ); /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; // Dereference the thread now because once we push it, there is not guaranteed it's still valid. struct cluster * cl = thrd->curr_cluster; __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) // push the thread to the cluster ready-queue push( cl, thrd, hint ); // variable thrd is no longer safe to use thrd = 0xdeaddeaddeaddeadp; // wake the cluster using the save variable. __wake_one( cl ); #if !defined(__CFA_NO_STATISTICS__) if( kernelTLS().this_stats ) { __tls_stats()->ready.threads.threads++; if(outside) { __tls_stats()->ready.threads.extunpark++; } } else { __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED); __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED); } #endif /* paranoid */ verify( ready_schedule_islocked()); /* paranoid */ verify( ! __preemption_enabled() ); } void schedule_thread$( thread$ * thrd, unpark_hint hint ) { ready_schedule_lock(); __schedule_thread( thrd, hint ); ready_schedule_unlock(); } // KERNEL ONLY static inline thread$ * __next_thread(cluster * this) with( *this ) { /* paranoid */ verify( ! __preemption_enabled() ); ready_schedule_lock(); thread$ * thrd = pop_fast( this ); ready_schedule_unlock(); /* paranoid */ verify( ! __preemption_enabled() ); return thrd; } // KERNEL ONLY static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { /* paranoid */ verify( ! __preemption_enabled() ); ready_schedule_lock(); thread$ * thrd = pop_slow( this ); ready_schedule_unlock(); /* paranoid */ verify( ! __preemption_enabled() ); return thrd; } // KERNEL ONLY static inline thread$ * __next_thread_search(cluster * this) with( *this ) { /* paranoid */ verify( ! __preemption_enabled() ); ready_schedule_lock(); thread$ * thrd = pop_search( this ); ready_schedule_unlock(); /* paranoid */ verify( ! __preemption_enabled() ); return thrd; } static inline bool __must_unpark( thread$ * thrd ) { int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); switch(old_ticket) { case TICKET_RUNNING: // Wake won the race, the thread will reschedule/rerun itself return false; case TICKET_BLOCKED: /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); /* paranoid */ verify( thrd->state == Blocked ); return true; default: // This makes no sense, something is wrong abort abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name); } } void __kernel_unpark( thread$ * thrd, unpark_hint hint ) { /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verify( ready_schedule_islocked()); if( !thrd ) return; if(__must_unpark(thrd)) { // Wake lost the race, __schedule_thread( thrd, hint ); } /* paranoid */ verify( ready_schedule_islocked()); /* paranoid */ verify( ! __preemption_enabled() ); } void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { if( !thrd ) return; if(__must_unpark(thrd)) { disable_interrupts(); // Wake lost the race, schedule_thread$( thrd, hint ); enable_interrupts(false); } } void park( void ) libcfa_public { __disable_interrupts_checked(); /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); returnToKernel(); __enable_interrupts_checked(); } extern "C" { // Leave the thread monitor // last routine called by a thread. // Should never return void __cfactx_thrd_leave() { thread$ * thrd = active_thread(); monitor$ * this = &thrd->self_mon; // Lock the monitor now lock( this->lock __cfaabi_dbg_ctx2 ); disable_interrupts(); /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verify( thrd->state == Active ); /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); /* paranoid */ verify( kernelTLS().this_thread == thrd ); /* paranoid */ verify( thrd->context.SP ); /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } thrd->state = Halting; thrd->ticket = TICKET_DEAD; // Leave the thread returnToKernel(); // Control flow should never reach here! abort(); } } // KERNEL ONLY bool force_yield( __Preemption_Reason reason ) libcfa_public { __disable_interrupts_checked(); thread$ * thrd = kernelTLS().this_thread; /* paranoid */ verify(thrd->state == Active); // SKULLDUGGERY: It is possible that we are preempting this thread just before // it was going to park itself. If that is the case and it is already using the // intrusive fields then we can't use them to preempt the thread // If that is the case, abandon the preemption. bool preempted = false; if(thrd->rdy_link.next == 0p) { preempted = true; thrd->preempted = reason; returnToKernel(); } __enable_interrupts_checked( false ); return preempted; } //============================================================================================= // Kernel Idle Sleep //============================================================================================= // Wake a thread from the front if there are any static void __wake_one(cluster * this) { eventfd_t val; /* paranoid */ verify( ! __preemption_enabled() ); /* paranoid */ verify( ready_schedule_islocked() ); // Check if there is a sleeping processor struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST); // If no one is sleeping: we are done if( fdp == 0p ) return; int fd = 1; if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); } switch(fd) { __attribute__((unused)) int ret; case 0: // If the processor isn't ready to sleep then the exchange will already wake it up #if !defined(__CFA_NO_STATISTICS__) if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } #endif break; case 1: // If someone else already said they will wake them: we are done #if !defined(__CFA_NO_STATISTICS__) if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } #endif break; default: // If the processor was ready to sleep, we need to wake it up with an actual write val = 1; ret = eventfd_write( fd, val ); /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); #if !defined(__CFA_NO_STATISTICS__) if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } #endif break; } /* paranoid */ verify( ready_schedule_islocked() ); /* paranoid */ verify( ! __preemption_enabled() ); return; } // Unconditionnaly wake a thread void __wake_proc(processor * this) { /* paranoid */ verify( ! __preemption_enabled() ); __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); this->idle_wctx.sem = 1; this->idle_wctx.wake__time = rdtscl(); eventfd_t val; val = 1; __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val ); /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); /* paranoid */ verify( ! __preemption_enabled() ); } static void idle_sleep(processor * this) { /* paranoid */ verify( this->idle_wctx.evfd != 1 ); /* paranoid */ verify( this->idle_wctx.evfd != 2 ); // Tell everyone we are ready to go do sleep for() { int expected = this->idle_wctx.sem; // Someone already told us to wake-up! No time for a nap. if(expected == 1) { return; } // Try to mark that we are going to sleep if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { // Every one agreed, taking a nap break; } } #if !defined(__CFA_NO_STATISTICS__) if(this->print_halts) { __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); } #endif __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); { eventfd_t val; ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); if(ret < 0) { switch((int)errno) { case EAGAIN: #if EAGAIN != EWOULDBLOCK case EWOULDBLOCK: #endif case EINTR: // No need to do anything special here, just assume it's a legitimate wake-up break; default: abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); } } } #if !defined(__CFA_NO_STATISTICS__) if(this->print_halts) { __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); } #endif } static bool mark_idle(__cluster_proc_list & this, processor & proc) { __STATS__(true, ready.sleep.halts++; ) proc.idle_wctx.sem = 0; /* paranoid */ verify( ! __preemption_enabled() ); if(!try_lock( this )) return false; this.idle++; /* paranoid */ verify( this.idle <= this.total ); remove(proc); insert_first(this.idles, proc); // update the pointer to the head wait context, which should now point to this proc. __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST); unlock( this ); /* paranoid */ verify( ! __preemption_enabled() ); return true; } static void mark_awake(__cluster_proc_list & this, processor & proc) { /* paranoid */ verify( ! __preemption_enabled() ); lock( this ); this.idle--; /* paranoid */ verify( this.idle >= 0 ); remove(proc); insert_last(this.actives, proc); { // update the pointer to the head wait context struct __fd_waitctx * wctx = 0; if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx; __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); } unlock( this ); /* paranoid */ verify( ! __preemption_enabled() ); } //============================================================================================= // Unexpected Terminating logic //============================================================================================= void __kernel_abort_msg( char * abort_text, int abort_text_size ) { thread$ * thrd = __cfaabi_tls.this_thread; if(thrd) { int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); if ( &thrd->self_cor != thrd->curr_cor ) { len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); } else { __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); } } else { int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); } } int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2; } static __spinlock_t kernel_debug_lock; extern "C" { void __cfaabi_bits_acquire() { lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); } void __cfaabi_bits_release() { unlock( kernel_debug_lock ); } } //============================================================================================= // Kernel Utilities //============================================================================================= #if defined(CFA_HAVE_LINUX_IO_URING_H) #include "io/types.hfa" #endif //----------------------------------------------------------------------------- // Debug bool threading_enabled(void) __attribute__((const)) libcfa_public { return true; } //----------------------------------------------------------------------------- // Statistics #if !defined(__CFA_NO_STATISTICS__) void print_halts( processor & this ) libcfa_public { this.print_halts = true; } static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) { /* paranoid */ verify( cltr->stats ); processor * it = &list`first; for(unsigned i = 0; i < count; i++) { /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); /* paranoid */ verify( it->local_data->this_stats ); // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); __tally_stats( cltr->stats, it->local_data->this_stats ); it = &(*it)`next; } } static void crawl_cluster_stats( cluster & this ) { // Stop the world, otherwise stats could get really messed-up // this doesn't solve all problems but does solve many // so it's probably good enough disable_interrupts(); uint_fast32_t last_size = ready_mutate_lock(); crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle); crawl_list(&this, this.procs.idles , this.procs.idle ); // Unlock the RWlock ready_mutate_unlock( last_size ); enable_interrupts(); } void print_stats_now( cluster & this, int flags ) libcfa_public { crawl_cluster_stats( this ); __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); } #endif // Local Variables: // // mode: c // // tab-width: 4 // // End: //