| 1 | //
 | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
 | 
|---|
| 3 | //
 | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
 | 
|---|
| 5 | // file "LICENCE" distributed with Cforall.
 | 
|---|
| 6 | //
 | 
|---|
| 7 | // kernel.c --
 | 
|---|
| 8 | //
 | 
|---|
| 9 | // Author           : Thierry Delisle
 | 
|---|
| 10 | // Created On       : Tue Jan 17 12:27:26 2017
 | 
|---|
| 11 | // Last Modified By : Peter A. Buhr
 | 
|---|
| 12 | // Last Modified On : Mon Aug 31 07:08:20 2020
 | 
|---|
| 13 | // Update Count     : 71
 | 
|---|
| 14 | //
 | 
|---|
| 15 | 
 | 
|---|
| 16 | #define __cforall_thread__
 | 
|---|
| 17 | #define _GNU_SOURCE
 | 
|---|
| 18 | 
 | 
|---|
| 19 | // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
 | 
|---|
| 20 | 
 | 
|---|
| 21 | #pragma GCC diagnostic push
 | 
|---|
| 22 | #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
 | 
|---|
| 23 | 
 | 
|---|
| 24 | //C Includes
 | 
|---|
| 25 | #include <errno.h>
 | 
|---|
| 26 | #include <stdio.h>
 | 
|---|
| 27 | #include <string.h>
 | 
|---|
| 28 | #include <signal.h>
 | 
|---|
| 29 | #include <unistd.h>
 | 
|---|
| 30 | 
 | 
|---|
| 31 | extern "C" {
 | 
|---|
| 32 |         #include <sys/eventfd.h>
 | 
|---|
| 33 |         #include <sys/uio.h>
 | 
|---|
| 34 | }
 | 
|---|
| 35 | 
 | 
|---|
| 36 | //CFA Includes
 | 
|---|
| 37 | #include "kernel/private.hfa"
 | 
|---|
| 38 | #include "preemption.hfa"
 | 
|---|
| 39 | #include "strstream.hfa"
 | 
|---|
| 40 | #include "device/cpu.hfa"
 | 
|---|
| 41 | #include "io/types.hfa"
 | 
|---|
| 42 | 
 | 
|---|
| 43 | //Private includes
 | 
|---|
| 44 | #define __CFA_INVOKE_PRIVATE__
 | 
|---|
| 45 | #include "invoke.h"
 | 
|---|
| 46 | #pragma GCC diagnostic pop
 | 
|---|
| 47 | 
 | 
|---|
| 48 | #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 49 |         #define __STATS_DEF( ...) __VA_ARGS__
 | 
|---|
| 50 | #else
 | 
|---|
| 51 |         #define __STATS_DEF( ...)
 | 
|---|
| 52 | #endif
 | 
|---|
| 53 | 
 | 
|---|
| 54 | //-----------------------------------------------------------------------------
 | 
|---|
| 55 | // Some assembly required
 | 
|---|
| 56 | #if defined( __i386 )
 | 
|---|
| 57 |         // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
 | 
|---|
| 58 |         // fcw  : X87 FPU control word (preserved across function calls)
 | 
|---|
| 59 |         #define __x87_store         \
 | 
|---|
| 60 |                 uint32_t __mxcr;      \
 | 
|---|
| 61 |                 uint16_t __fcw;       \
 | 
|---|
| 62 |                 __asm__ volatile (    \
 | 
|---|
| 63 |                         "stmxcsr %0\n"  \
 | 
|---|
| 64 |                         "fnstcw  %1\n"  \
 | 
|---|
| 65 |                         : "=m" (__mxcr),\
 | 
|---|
| 66 |                                 "=m" (__fcw)  \
 | 
|---|
| 67 |                 )
 | 
|---|
| 68 | 
 | 
|---|
| 69 |         #define __x87_load         \
 | 
|---|
| 70 |                 __asm__ volatile (   \
 | 
|---|
| 71 |                         "fldcw  %1\n"  \
 | 
|---|
| 72 |                         "ldmxcsr %0\n" \
 | 
|---|
| 73 |                         ::"m" (__mxcr),\
 | 
|---|
| 74 |                                 "m" (__fcw)  \
 | 
|---|
| 75 |                 )
 | 
|---|
| 76 | 
 | 
|---|
| 77 | #elif defined( __x86_64 )
 | 
|---|
| 78 |         #define __x87_store         \
 | 
|---|
| 79 |                 uint32_t __mxcr;      \
 | 
|---|
| 80 |                 uint16_t __fcw;       \
 | 
|---|
| 81 |                 __asm__ volatile (    \
 | 
|---|
| 82 |                         "stmxcsr %0\n"  \
 | 
|---|
| 83 |                         "fnstcw  %1\n"  \
 | 
|---|
| 84 |                         : "=m" (__mxcr),\
 | 
|---|
| 85 |                                 "=m" (__fcw)  \
 | 
|---|
| 86 |                 )
 | 
|---|
| 87 | 
 | 
|---|
| 88 |         #define __x87_load          \
 | 
|---|
| 89 |                 __asm__ volatile (    \
 | 
|---|
| 90 |                         "fldcw  %1\n"   \
 | 
|---|
| 91 |                         "ldmxcsr %0\n"  \
 | 
|---|
| 92 |                         :: "m" (__mxcr),\
 | 
|---|
| 93 |                                 "m" (__fcw)  \
 | 
|---|
| 94 |                 )
 | 
|---|
| 95 | 
 | 
|---|
| 96 | #elif defined( __arm__ )
 | 
|---|
| 97 |         #define __x87_store
 | 
|---|
| 98 |         #define __x87_load
 | 
|---|
| 99 | 
 | 
|---|
| 100 | #elif defined( __aarch64__ )
 | 
|---|
| 101 |         #define __x87_store              \
 | 
|---|
| 102 |                 uint32_t __fpcntl[2];    \
 | 
|---|
| 103 |                 __asm__ volatile (    \
 | 
|---|
| 104 |                         "mrs x9, FPCR\n" \
 | 
|---|
| 105 |                         "mrs x10, FPSR\n"  \
 | 
|---|
| 106 |                         "stp x9, x10, %0\n"  \
 | 
|---|
| 107 |                         : "=m" (__fpcntl) : : "x9", "x10" \
 | 
|---|
| 108 |                 )
 | 
|---|
| 109 | 
 | 
|---|
| 110 |         #define __x87_load         \
 | 
|---|
| 111 |                 __asm__ volatile (    \
 | 
|---|
| 112 |                         "ldp x9, x10, %0\n"  \
 | 
|---|
| 113 |                         "msr FPSR, x10\n"  \
 | 
|---|
| 114 |                         "msr FPCR, x9\n" \
 | 
|---|
| 115 |                 : "=m" (__fpcntl) : : "x9", "x10" \
 | 
|---|
| 116 |                 )
 | 
|---|
| 117 | 
 | 
|---|
| 118 | #else
 | 
|---|
| 119 |         #error unsupported hardware architecture
 | 
|---|
| 120 | #endif
 | 
|---|
| 121 | 
 | 
|---|
| 122 | extern thread$ * mainThread;
 | 
|---|
| 123 | extern processor * mainProcessor;
 | 
|---|
| 124 | 
 | 
|---|
| 125 | //-----------------------------------------------------------------------------
 | 
|---|
| 126 | // Kernel Scheduling logic
 | 
|---|
| 127 | static thread$ * __next_thread(cluster * this);
 | 
|---|
| 128 | static thread$ * __next_thread_slow(cluster * this);
 | 
|---|
| 129 | static thread$ * __next_thread_search(cluster * this);
 | 
|---|
| 130 | static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
 | 
|---|
| 131 | static void __run_thread(processor * this, thread$ * dst);
 | 
|---|
| 132 | static void __wake_one(cluster * cltr);
 | 
|---|
| 133 | 
 | 
|---|
| 134 | static void idle_sleep(processor * proc);
 | 
|---|
| 135 | static bool mark_idle (__cluster_proc_list & idles, processor & proc);
 | 
|---|
| 136 | static void mark_awake(__cluster_proc_list & idles, processor & proc);
 | 
|---|
| 137 | 
 | 
|---|
| 138 | extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
 | 
|---|
| 139 | extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
 | 
|---|
| 140 | extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
 | 
|---|
| 141 | 
 | 
|---|
| 142 | #if defined(CFA_WITH_IO_URING_IDLE)
 | 
|---|
| 143 |         extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
 | 
|---|
| 144 | #endif
 | 
|---|
| 145 | 
 | 
|---|
| 146 | extern void __disable_interrupts_hard();
 | 
|---|
| 147 | extern void __enable_interrupts_hard();
 | 
|---|
| 148 | 
 | 
|---|
| 149 | 
 | 
|---|
| 150 | //=============================================================================================
 | 
|---|
| 151 | // Kernel Scheduling logic
 | 
|---|
| 152 | //=============================================================================================
 | 
|---|
| 153 | //Main of the processor contexts
 | 
|---|
| 154 | void main(processorCtx_t & runner) {
 | 
|---|
| 155 |         // Because of a bug, we couldn't initialized the seed on construction
 | 
|---|
| 156 |         // Do it here
 | 
|---|
| 157 |         __cfaabi_tls.rand_seed ^= rdtscl();
 | 
|---|
| 158 |         __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
 | 
|---|
| 159 |         __tls_rand_advance_bck();
 | 
|---|
| 160 | 
 | 
|---|
| 161 |         processor * this = runner.proc;
 | 
|---|
| 162 |         verify(this);
 | 
|---|
| 163 | 
 | 
|---|
| 164 |         /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
 | 
|---|
| 165 |         /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
 | 
|---|
| 166 | 
 | 
|---|
| 167 |         // used for idle sleep when io_uring is present
 | 
|---|
| 168 |         // mark it as already fulfilled so we know if there is a pending request or not
 | 
|---|
| 169 |         this->idle_wctx.ftr->self.ptr = 1p;
 | 
|---|
| 170 | 
 | 
|---|
| 171 |         __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
 | 
|---|
| 172 |         #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 173 |                 if( this->print_halts ) {
 | 
|---|
| 174 |                         __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
 | 
|---|
| 175 |                 }
 | 
|---|
| 176 |         #endif
 | 
|---|
| 177 | 
 | 
|---|
| 178 |         {
 | 
|---|
| 179 |                 // Setup preemption data
 | 
|---|
| 180 |                 preemption_scope scope = { this };
 | 
|---|
| 181 | 
 | 
|---|
| 182 |                 // if we need to run some special setup, now is the time to do it.
 | 
|---|
| 183 |                 if(this->init.thrd) {
 | 
|---|
| 184 |                         this->init.thrd->curr_cluster = this->cltr;
 | 
|---|
| 185 |                         __run_thread(this, this->init.thrd);
 | 
|---|
| 186 |                 }
 | 
|---|
| 187 | 
 | 
|---|
| 188 |                 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
 | 
|---|
| 189 | 
 | 
|---|
| 190 |                 thread$ * readyThread = 0p;
 | 
|---|
| 191 |                 MAIN_LOOP:
 | 
|---|
| 192 |                 for() {
 | 
|---|
| 193 |                         // Check if there is pending io
 | 
|---|
| 194 |                         __cfa_io_drain( this );
 | 
|---|
| 195 | 
 | 
|---|
| 196 |                         // Try to get the next thread
 | 
|---|
| 197 |                         readyThread = __next_thread( this->cltr );
 | 
|---|
| 198 | 
 | 
|---|
| 199 |                         if( !readyThread ) {
 | 
|---|
| 200 |                                 // there is no point in holding submissions if we are idle
 | 
|---|
| 201 |                                 __IO_STATS__(true, io.flush.idle++; )
 | 
|---|
| 202 |                                 __cfa_io_flush( this );
 | 
|---|
| 203 | 
 | 
|---|
| 204 |                                 // drain again in case something showed up
 | 
|---|
| 205 |                                 __cfa_io_drain( this );
 | 
|---|
| 206 | 
 | 
|---|
| 207 |                                 readyThread = __next_thread( this->cltr );
 | 
|---|
| 208 |                         }
 | 
|---|
| 209 | 
 | 
|---|
| 210 |                         if( !readyThread ) for(5) {
 | 
|---|
| 211 |                                 readyThread = __next_thread_slow( this->cltr );
 | 
|---|
| 212 | 
 | 
|---|
| 213 |                                 if( readyThread ) break;
 | 
|---|
| 214 | 
 | 
|---|
| 215 |                                 // It's unlikely we still I/O to submit, but the arbiter could
 | 
|---|
| 216 |                                 __IO_STATS__(true, io.flush.idle++; )
 | 
|---|
| 217 |                                 __cfa_io_flush( this );
 | 
|---|
| 218 | 
 | 
|---|
| 219 |                                 // drain again in case something showed up
 | 
|---|
| 220 |                                 __cfa_io_drain( this );
 | 
|---|
| 221 |                         }
 | 
|---|
| 222 | 
 | 
|---|
| 223 |                         HALT:
 | 
|---|
| 224 |                         if( !readyThread ) {
 | 
|---|
| 225 |                                 // Don't block if we are done
 | 
|---|
| 226 |                                 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
 | 
|---|
| 227 | 
 | 
|---|
| 228 |                                 // Push self to idle stack
 | 
|---|
| 229 |                                 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
 | 
|---|
| 230 | 
 | 
|---|
| 231 |                                 // Confirm the ready-queue is empty
 | 
|---|
| 232 |                                 readyThread = __next_thread_search( this->cltr );
 | 
|---|
| 233 |                                 if( readyThread ) {
 | 
|---|
| 234 |                                         // A thread was found, cancel the halt
 | 
|---|
| 235 |                                         mark_awake(this->cltr->procs, * this);
 | 
|---|
| 236 | 
 | 
|---|
| 237 |                                         __STATS__(true, ready.sleep.cancels++; )
 | 
|---|
| 238 | 
 | 
|---|
| 239 |                                         // continue the mai loop
 | 
|---|
| 240 |                                         break HALT;
 | 
|---|
| 241 |                                 }
 | 
|---|
| 242 | 
 | 
|---|
| 243 |                                 idle_sleep( this );
 | 
|---|
| 244 | 
 | 
|---|
| 245 |                                 // We were woken up, remove self from idle
 | 
|---|
| 246 |                                 mark_awake(this->cltr->procs, * this);
 | 
|---|
| 247 | 
 | 
|---|
| 248 |                                 // DON'T just proceed, start looking again
 | 
|---|
| 249 |                                 continue MAIN_LOOP;
 | 
|---|
| 250 |                         }
 | 
|---|
| 251 | 
 | 
|---|
| 252 |                         /* paranoid */ verify( readyThread );
 | 
|---|
| 253 | 
 | 
|---|
| 254 |                         // Reset io dirty bit
 | 
|---|
| 255 |                         this->io.dirty = false;
 | 
|---|
| 256 | 
 | 
|---|
| 257 |                         // We found a thread run it
 | 
|---|
| 258 |                         __run_thread(this, readyThread);
 | 
|---|
| 259 | 
 | 
|---|
| 260 |                         // Are we done?
 | 
|---|
| 261 |                         if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
 | 
|---|
| 262 | 
 | 
|---|
| 263 |                         if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
 | 
|---|
| 264 |                                 __IO_STATS__(true, io.flush.dirty++; )
 | 
|---|
| 265 |                                 __cfa_io_flush( this );
 | 
|---|
| 266 |                         }
 | 
|---|
| 267 |                 }
 | 
|---|
| 268 | 
 | 
|---|
| 269 |                 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
 | 
|---|
| 270 |         }
 | 
|---|
| 271 | 
 | 
|---|
| 272 |         post( this->terminated );
 | 
|---|
| 273 | 
 | 
|---|
| 274 |         if(this == mainProcessor) {
 | 
|---|
| 275 |                 // HACK : the coroutine context switch expects this_thread to be set
 | 
|---|
| 276 |                 // and it make sense for it to be set in all other cases except here
 | 
|---|
| 277 |                 // fake it
 | 
|---|
| 278 |                 __cfaabi_tls.this_thread = mainThread;
 | 
|---|
| 279 |         }
 | 
|---|
| 280 | 
 | 
|---|
| 281 |         __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
 | 
|---|
| 282 | }
 | 
|---|
| 283 | 
 | 
|---|
| 284 | static int * __volatile_errno() __attribute__((noinline));
 | 
|---|
| 285 | static int * __volatile_errno() { asm(""); return &errno; }
 | 
|---|
| 286 | 
 | 
|---|
| 287 | // KERNEL ONLY
 | 
|---|
| 288 | // runThread runs a thread by context switching
 | 
|---|
| 289 | // from the processor coroutine to the target thread
 | 
|---|
| 290 | static void __run_thread(processor * this, thread$ * thrd_dst) {
 | 
|---|
| 291 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 292 |         /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
 | 
|---|
| 293 |         /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
 | 
|---|
| 294 |         __builtin_prefetch( thrd_dst->context.SP );
 | 
|---|
| 295 | 
 | 
|---|
| 296 |         __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
 | 
|---|
| 297 | 
 | 
|---|
| 298 |         coroutine$ * proc_cor = get_coroutine(this->runner);
 | 
|---|
| 299 | 
 | 
|---|
| 300 |         // set state of processor coroutine to inactive
 | 
|---|
| 301 |         verify(proc_cor->state == Active);
 | 
|---|
| 302 |         proc_cor->state = Blocked;
 | 
|---|
| 303 | 
 | 
|---|
| 304 |         // Actually run the thread
 | 
|---|
| 305 |         RUNNING:  while(true) {
 | 
|---|
| 306 |                 thrd_dst->preempted = __NO_PREEMPTION;
 | 
|---|
| 307 | 
 | 
|---|
| 308 |                 // Update global state
 | 
|---|
| 309 |                 kernelTLS().this_thread = thrd_dst;
 | 
|---|
| 310 | 
 | 
|---|
| 311 |                 // Update the state after setting this_thread
 | 
|---|
| 312 |                 // so that the debugger can find all active threads
 | 
|---|
| 313 |                 // in tls storage
 | 
|---|
| 314 |                 thrd_dst->state = Active;
 | 
|---|
| 315 | 
 | 
|---|
| 316 |                 /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 317 |                 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
 | 
|---|
| 318 |                 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
 | 
|---|
| 319 |                 /* paranoid */ verify( thrd_dst->context.SP );
 | 
|---|
| 320 |                 /* paranoid */ verify( thrd_dst->state != Halted );
 | 
|---|
| 321 |                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
 | 
|---|
| 322 |                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
 | 
|---|
| 323 |                 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
 | 
|---|
| 324 | 
 | 
|---|
| 325 | 
 | 
|---|
| 326 | 
 | 
|---|
| 327 |                 // set context switch to the thread that the processor is executing
 | 
|---|
| 328 |                 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
 | 
|---|
| 329 |                 // when __cfactx_switch returns we are back in the processor coroutine
 | 
|---|
| 330 | 
 | 
|---|
| 331 | 
 | 
|---|
| 332 | 
 | 
|---|
| 333 |                 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
 | 
|---|
| 334 |                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
 | 
|---|
| 335 |                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
 | 
|---|
| 336 |                 /* paranoid */ verify( thrd_dst->context.SP );
 | 
|---|
| 337 |                 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
 | 
|---|
| 338 |                 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
 | 
|---|
| 339 |                 /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 340 | 
 | 
|---|
| 341 |                 // We just finished running a thread, there are a few things that could have happened.
 | 
|---|
| 342 |                 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
 | 
|---|
| 343 |                 // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
 | 
|---|
| 344 |                 // 4 - Preempted
 | 
|---|
| 345 |                 // In case 1, we may have won a race so we can't write to the state again.
 | 
|---|
| 346 |                 // In case 2, we lost the race so we now own the thread.
 | 
|---|
| 347 | 
 | 
|---|
| 348 |                 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
 | 
|---|
| 349 |                         // Reset the this_thread now that we know
 | 
|---|
| 350 |                         // the state isn't active anymore
 | 
|---|
| 351 |                         kernelTLS().this_thread = 0p;
 | 
|---|
| 352 | 
 | 
|---|
| 353 |                         // The thread was preempted, reschedule it and reset the flag
 | 
|---|
| 354 |                         schedule_thread$( thrd_dst, UNPARK_LOCAL );
 | 
|---|
| 355 |                         break RUNNING;
 | 
|---|
| 356 |                 }
 | 
|---|
| 357 | 
 | 
|---|
| 358 |                 if(unlikely(thrd_dst->state == Halting)) {
 | 
|---|
| 359 |                         // Reset the this_thread now that we know
 | 
|---|
| 360 |                         // the state isn't active anymore
 | 
|---|
| 361 |                         kernelTLS().this_thread = 0p;
 | 
|---|
| 362 | 
 | 
|---|
| 363 |                         // The thread has halted, it should never be scheduled/run again
 | 
|---|
| 364 |                         // finish the thread
 | 
|---|
| 365 |                         __thread_finish( thrd_dst );
 | 
|---|
| 366 |                         break RUNNING;
 | 
|---|
| 367 |                 }
 | 
|---|
| 368 | 
 | 
|---|
| 369 |                 /* paranoid */ verify( thrd_dst->state == Active );
 | 
|---|
| 370 |                 thrd_dst->state = Blocked;
 | 
|---|
| 371 | 
 | 
|---|
| 372 |                 // Reset the this_thread now that we know
 | 
|---|
| 373 |                 // the state isn't active anymore
 | 
|---|
| 374 |                 kernelTLS().this_thread = 0p;
 | 
|---|
| 375 | 
 | 
|---|
| 376 |                 // set state of processor coroutine to active and the thread to inactive
 | 
|---|
| 377 |                 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
 | 
|---|
| 378 |                 switch(old_ticket) {
 | 
|---|
| 379 |                         case TICKET_RUNNING:
 | 
|---|
| 380 |                                 // This is case 1, the regular case, nothing more is needed
 | 
|---|
| 381 |                                 break RUNNING;
 | 
|---|
| 382 |                         case TICKET_UNBLOCK:
 | 
|---|
| 383 |                                 __STATS__(true, ready.threads.threads++; )
 | 
|---|
| 384 |                                 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
 | 
|---|
| 385 |                                 // In this case, just run it again.
 | 
|---|
| 386 |                                 continue RUNNING;
 | 
|---|
| 387 |                         default:
 | 
|---|
| 388 |                                 // This makes no sense, something is wrong abort
 | 
|---|
| 389 |                                 abort();
 | 
|---|
| 390 |                 }
 | 
|---|
| 391 |         }
 | 
|---|
| 392 | 
 | 
|---|
| 393 |         // Just before returning to the processor, set the processor coroutine to active
 | 
|---|
| 394 |         proc_cor->state = Active;
 | 
|---|
| 395 | 
 | 
|---|
| 396 |         __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
 | 
|---|
| 397 | 
 | 
|---|
| 398 |         __STATS__(true, ready.threads.threads--; )
 | 
|---|
| 399 | 
 | 
|---|
| 400 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 401 | }
 | 
|---|
| 402 | 
 | 
|---|
| 403 | // KERNEL_ONLY
 | 
|---|
| 404 | static void returnToKernel() {
 | 
|---|
| 405 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 406 |         coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
 | 
|---|
| 407 |         thread$ * thrd_src = kernelTLS().this_thread;
 | 
|---|
| 408 | 
 | 
|---|
| 409 |         __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; )
 | 
|---|
| 410 | 
 | 
|---|
| 411 |         // Run the thread on this processor
 | 
|---|
| 412 |         {
 | 
|---|
| 413 |                 int local_errno = *__volatile_errno();
 | 
|---|
| 414 |                 #if defined( __i386 ) || defined( __x86_64 )
 | 
|---|
| 415 |                         __x87_store;
 | 
|---|
| 416 |                 #endif
 | 
|---|
| 417 |                 /* paranoid */ verify( proc_cor->context.SP );
 | 
|---|
| 418 |                 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
 | 
|---|
| 419 |                 __cfactx_switch( &thrd_src->context, &proc_cor->context );
 | 
|---|
| 420 |                 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
 | 
|---|
| 421 |                 #if defined( __i386 ) || defined( __x86_64 )
 | 
|---|
| 422 |                         __x87_load;
 | 
|---|
| 423 |                 #endif
 | 
|---|
| 424 |                 *__volatile_errno() = local_errno;
 | 
|---|
| 425 |         }
 | 
|---|
| 426 | 
 | 
|---|
| 427 |         #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 428 |                 /* paranoid */ verify( thrd_src->last_proc != 0p );
 | 
|---|
| 429 |                 if(thrd_src->last_proc != kernelTLS().this_processor) {
 | 
|---|
| 430 |                         __tls_stats()->ready.threads.migration++;
 | 
|---|
| 431 |                 }
 | 
|---|
| 432 |         #endif
 | 
|---|
| 433 | 
 | 
|---|
| 434 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 435 |         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
 | 
|---|
| 436 |         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
 | 
|---|
| 437 | }
 | 
|---|
| 438 | 
 | 
|---|
| 439 | //-----------------------------------------------------------------------------
 | 
|---|
| 440 | // Scheduler routines
 | 
|---|
| 441 | // KERNEL ONLY
 | 
|---|
| 442 | static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
 | 
|---|
| 443 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 444 |         /* paranoid */ verify( ready_schedule_islocked());
 | 
|---|
| 445 |         /* paranoid */ verify( thrd );
 | 
|---|
| 446 |         /* paranoid */ verify( thrd->state != Halted );
 | 
|---|
| 447 |         /* paranoid */ verify( thrd->curr_cluster );
 | 
|---|
| 448 |         /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
 | 
|---|
| 449 |         /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
 | 
|---|
| 450 |                                         "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
 | 
|---|
| 451 |         /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
 | 
|---|
| 452 |                                         "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
 | 
|---|
| 453 |         /* paranoid */ #endif
 | 
|---|
| 454 |         /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
 | 
|---|
| 455 |         /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
 | 
|---|
| 456 | 
 | 
|---|
| 457 |         if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
 | 
|---|
| 458 | 
 | 
|---|
| 459 |         // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
 | 
|---|
| 460 |         struct cluster * cl = thrd->curr_cluster;
 | 
|---|
| 461 |         __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
 | 
|---|
| 462 | 
 | 
|---|
| 463 |         // push the thread to the cluster ready-queue
 | 
|---|
| 464 |         push( cl, thrd, hint );
 | 
|---|
| 465 | 
 | 
|---|
| 466 |         // variable thrd is no longer safe to use
 | 
|---|
| 467 |         thrd = 0xdeaddeaddeaddeadp;
 | 
|---|
| 468 | 
 | 
|---|
| 469 |         // wake the cluster using the save variable.
 | 
|---|
| 470 |         __wake_one( cl );
 | 
|---|
| 471 | 
 | 
|---|
| 472 |         #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 473 |                 if( kernelTLS().this_stats ) {
 | 
|---|
| 474 |                         __tls_stats()->ready.threads.threads++;
 | 
|---|
| 475 |                         if(outside) {
 | 
|---|
| 476 |                                 __tls_stats()->ready.threads.extunpark++;
 | 
|---|
| 477 |                         }
 | 
|---|
| 478 |                 }
 | 
|---|
| 479 |                 else {
 | 
|---|
| 480 |                         __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
 | 
|---|
| 481 |                         __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
 | 
|---|
| 482 |                 }
 | 
|---|
| 483 |         #endif
 | 
|---|
| 484 | 
 | 
|---|
| 485 |         /* paranoid */ verify( ready_schedule_islocked());
 | 
|---|
| 486 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 487 | }
 | 
|---|
| 488 | 
 | 
|---|
| 489 | void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
 | 
|---|
| 490 |         ready_schedule_lock();
 | 
|---|
| 491 |                 __schedule_thread( thrd, hint );
 | 
|---|
| 492 |         ready_schedule_unlock();
 | 
|---|
| 493 | }
 | 
|---|
| 494 | 
 | 
|---|
| 495 | // KERNEL ONLY
 | 
|---|
| 496 | static inline thread$ * __next_thread(cluster * this) with( *this ) {
 | 
|---|
| 497 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 498 | 
 | 
|---|
| 499 |         ready_schedule_lock();
 | 
|---|
| 500 |                 thread$ * thrd = pop_fast( this );
 | 
|---|
| 501 |         ready_schedule_unlock();
 | 
|---|
| 502 | 
 | 
|---|
| 503 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 504 |         return thrd;
 | 
|---|
| 505 | }
 | 
|---|
| 506 | 
 | 
|---|
| 507 | // KERNEL ONLY
 | 
|---|
| 508 | static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
 | 
|---|
| 509 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 510 | 
 | 
|---|
| 511 |         ready_schedule_lock();
 | 
|---|
| 512 |                 thread$ * thrd = pop_slow( this );
 | 
|---|
| 513 |         ready_schedule_unlock();
 | 
|---|
| 514 | 
 | 
|---|
| 515 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 516 |         return thrd;
 | 
|---|
| 517 | }
 | 
|---|
| 518 | 
 | 
|---|
| 519 | // KERNEL ONLY
 | 
|---|
| 520 | static inline thread$ * __next_thread_search(cluster * this) with( *this ) {
 | 
|---|
| 521 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 522 | 
 | 
|---|
| 523 |         ready_schedule_lock();
 | 
|---|
| 524 |                 thread$ * thrd = pop_search( this );
 | 
|---|
| 525 |         ready_schedule_unlock();
 | 
|---|
| 526 | 
 | 
|---|
| 527 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 528 |         return thrd;
 | 
|---|
| 529 | }
 | 
|---|
| 530 | 
 | 
|---|
| 531 | static inline bool __must_unpark( thread$ * thrd ) {
 | 
|---|
| 532 |         int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
 | 
|---|
| 533 |         switch(old_ticket) {
 | 
|---|
| 534 |                 case TICKET_RUNNING:
 | 
|---|
| 535 |                         // Wake won the race, the thread will reschedule/rerun itself
 | 
|---|
| 536 |                         return false;
 | 
|---|
| 537 |                 case TICKET_BLOCKED:
 | 
|---|
| 538 |                         /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
 | 
|---|
| 539 |                         /* paranoid */ verify( thrd->state == Blocked );
 | 
|---|
| 540 |                         return true;
 | 
|---|
| 541 |                 default:
 | 
|---|
| 542 |                         // This makes no sense, something is wrong abort
 | 
|---|
| 543 |                         abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
 | 
|---|
| 544 |         }
 | 
|---|
| 545 | }
 | 
|---|
| 546 | 
 | 
|---|
| 547 | void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
 | 
|---|
| 548 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 549 |         /* paranoid */ verify( ready_schedule_islocked());
 | 
|---|
| 550 | 
 | 
|---|
| 551 |         if( !thrd ) return;
 | 
|---|
| 552 | 
 | 
|---|
| 553 |         if(__must_unpark(thrd)) {
 | 
|---|
| 554 |                 // Wake lost the race,
 | 
|---|
| 555 |                 __schedule_thread( thrd, hint );
 | 
|---|
| 556 |         }
 | 
|---|
| 557 | 
 | 
|---|
| 558 |         /* paranoid */ verify( ready_schedule_islocked());
 | 
|---|
| 559 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 560 | }
 | 
|---|
| 561 | 
 | 
|---|
| 562 | void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
 | 
|---|
| 563 |         if( !thrd ) return;
 | 
|---|
| 564 | 
 | 
|---|
| 565 |         if(__must_unpark(thrd)) {
 | 
|---|
| 566 |                 disable_interrupts();
 | 
|---|
| 567 |                         // Wake lost the race,
 | 
|---|
| 568 |                         schedule_thread$( thrd, hint );
 | 
|---|
| 569 |                 enable_interrupts(false);
 | 
|---|
| 570 |         }
 | 
|---|
| 571 | }
 | 
|---|
| 572 | 
 | 
|---|
| 573 | void park( void ) libcfa_public {
 | 
|---|
| 574 |         __disable_interrupts_checked();
 | 
|---|
| 575 |                 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
 | 
|---|
| 576 |                 returnToKernel();
 | 
|---|
| 577 |         __enable_interrupts_checked();
 | 
|---|
| 578 | 
 | 
|---|
| 579 | }
 | 
|---|
| 580 | 
 | 
|---|
| 581 | extern "C" {
 | 
|---|
| 582 |         // Leave the thread monitor
 | 
|---|
| 583 |         // last routine called by a thread.
 | 
|---|
| 584 |         // Should never return
 | 
|---|
| 585 |         void __cfactx_thrd_leave() {
 | 
|---|
| 586 |                 thread$ * thrd = active_thread();
 | 
|---|
| 587 |                 monitor$ * this = &thrd->self_mon;
 | 
|---|
| 588 | 
 | 
|---|
| 589 |                 // Lock the monitor now
 | 
|---|
| 590 |                 lock( this->lock __cfaabi_dbg_ctx2 );
 | 
|---|
| 591 | 
 | 
|---|
| 592 |                 disable_interrupts();
 | 
|---|
| 593 | 
 | 
|---|
| 594 |                 /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 595 |                 /* paranoid */ verify( thrd->state == Active );
 | 
|---|
| 596 |                 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
 | 
|---|
| 597 |                 /* paranoid */ verify( kernelTLS().this_thread == thrd );
 | 
|---|
| 598 |                 /* paranoid */ verify( thrd->context.SP );
 | 
|---|
| 599 |                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
 | 
|---|
| 600 |                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
 | 
|---|
| 601 | 
 | 
|---|
| 602 |                 thrd->state = Halting;
 | 
|---|
| 603 |                 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
 | 
|---|
| 604 |                 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
 | 
|---|
| 605 |                 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
 | 
|---|
| 606 | 
 | 
|---|
| 607 |                 // Leave the thread
 | 
|---|
| 608 |                 returnToKernel();
 | 
|---|
| 609 | 
 | 
|---|
| 610 |                 // Control flow should never reach here!
 | 
|---|
| 611 |                 abort();
 | 
|---|
| 612 |         }
 | 
|---|
| 613 | }
 | 
|---|
| 614 | 
 | 
|---|
| 615 | // KERNEL ONLY
 | 
|---|
| 616 | bool force_yield( __Preemption_Reason reason ) libcfa_public {
 | 
|---|
| 617 |         __disable_interrupts_checked();
 | 
|---|
| 618 |                 thread$ * thrd = kernelTLS().this_thread;
 | 
|---|
| 619 |                 /* paranoid */ verify(thrd->state == Active);
 | 
|---|
| 620 | 
 | 
|---|
| 621 |                 // SKULLDUGGERY: It is possible that we are preempting this thread just before
 | 
|---|
| 622 |                 // it was going to park itself. If that is the case and it is already using the
 | 
|---|
| 623 |                 // intrusive fields then we can't use them to preempt the thread
 | 
|---|
| 624 |                 // If that is the case, abandon the preemption.
 | 
|---|
| 625 |                 bool preempted = false;
 | 
|---|
| 626 |                 if(thrd->link.next == 0p) {
 | 
|---|
| 627 |                         preempted = true;
 | 
|---|
| 628 |                         thrd->preempted = reason;
 | 
|---|
| 629 |                         returnToKernel();
 | 
|---|
| 630 |                 }
 | 
|---|
| 631 |         __enable_interrupts_checked( false );
 | 
|---|
| 632 |         return preempted;
 | 
|---|
| 633 | }
 | 
|---|
| 634 | 
 | 
|---|
| 635 | //=============================================================================================
 | 
|---|
| 636 | // Kernel Idle Sleep
 | 
|---|
| 637 | //=============================================================================================
 | 
|---|
| 638 | // Wake a thread from the front if there are any
 | 
|---|
| 639 | static void __wake_one(cluster * this) {
 | 
|---|
| 640 |         eventfd_t val;
 | 
|---|
| 641 | 
 | 
|---|
| 642 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 643 |         /* paranoid */ verify( ready_schedule_islocked() );
 | 
|---|
| 644 | 
 | 
|---|
| 645 |         // Check if there is a sleeping processor
 | 
|---|
| 646 |         struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST);
 | 
|---|
| 647 | 
 | 
|---|
| 648 |         // If no one is sleeping: we are done
 | 
|---|
| 649 |         if( fdp == 0p ) return;
 | 
|---|
| 650 | 
 | 
|---|
| 651 |         int fd = 1;
 | 
|---|
| 652 |         if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
 | 
|---|
| 653 |                 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
 | 
|---|
| 654 |         }
 | 
|---|
| 655 | 
 | 
|---|
| 656 |         switch(fd) {
 | 
|---|
| 657 |                 __attribute__((unused)) int ret;
 | 
|---|
| 658 |         case 0:
 | 
|---|
| 659 |                 // If the processor isn't ready to sleep then the exchange will already wake it up
 | 
|---|
| 660 |                 #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 661 |                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
 | 
|---|
| 662 |                         } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
 | 
|---|
| 663 |                 #endif
 | 
|---|
| 664 |                 break;
 | 
|---|
| 665 |         case 1:
 | 
|---|
| 666 |                 // If someone else already said they will wake them: we are done
 | 
|---|
| 667 |                 #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 668 |                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
 | 
|---|
| 669 |                         } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
 | 
|---|
| 670 |                 #endif
 | 
|---|
| 671 |                 break;
 | 
|---|
| 672 |         default:
 | 
|---|
| 673 |                 // If the processor was ready to sleep, we need to wake it up with an actual write
 | 
|---|
| 674 |                 val = 1;
 | 
|---|
| 675 |                 ret = eventfd_write( fd, val );
 | 
|---|
| 676 |                 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
 | 
|---|
| 677 | 
 | 
|---|
| 678 |                 #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 679 |                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
 | 
|---|
| 680 |                         } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
 | 
|---|
| 681 |                 #endif
 | 
|---|
| 682 |                 break;
 | 
|---|
| 683 |         }
 | 
|---|
| 684 | 
 | 
|---|
| 685 |         /* paranoid */ verify( ready_schedule_islocked() );
 | 
|---|
| 686 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 687 | 
 | 
|---|
| 688 |         return;
 | 
|---|
| 689 | }
 | 
|---|
| 690 | 
 | 
|---|
| 691 | // Unconditionnaly wake a thread
 | 
|---|
| 692 | void __wake_proc(processor * this) {
 | 
|---|
| 693 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 694 | 
 | 
|---|
| 695 |         __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
 | 
|---|
| 696 | 
 | 
|---|
| 697 |         this->idle_wctx.sem = 1;
 | 
|---|
| 698 | 
 | 
|---|
| 699 |         this->idle_wctx.wake__time = rdtscl();
 | 
|---|
| 700 | 
 | 
|---|
| 701 |         eventfd_t val;
 | 
|---|
| 702 |         val = 1;
 | 
|---|
| 703 |         __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
 | 
|---|
| 704 | 
 | 
|---|
| 705 |         /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
 | 
|---|
| 706 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 707 | }
 | 
|---|
| 708 | 
 | 
|---|
| 709 | static void idle_sleep(processor * this) {
 | 
|---|
| 710 |         /* paranoid */ verify( this->idle_wctx.evfd != 1 );
 | 
|---|
| 711 |         /* paranoid */ verify( this->idle_wctx.evfd != 2 );
 | 
|---|
| 712 | 
 | 
|---|
| 713 |         // Tell everyone we are ready to go do sleep
 | 
|---|
| 714 |         for() {
 | 
|---|
| 715 |                 int expected = this->idle_wctx.sem;
 | 
|---|
| 716 | 
 | 
|---|
| 717 |                 // Someone already told us to wake-up! No time for a nap.
 | 
|---|
| 718 |                 if(expected == 1) { return; }
 | 
|---|
| 719 | 
 | 
|---|
| 720 |                 // Try to mark that we are going to sleep
 | 
|---|
| 721 |                 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
 | 
|---|
| 722 |                         // Every one agreed, taking a nap
 | 
|---|
| 723 |                         break;
 | 
|---|
| 724 |                 }
 | 
|---|
| 725 |         }
 | 
|---|
| 726 | 
 | 
|---|
| 727 | 
 | 
|---|
| 728 |         #if !defined(CFA_WITH_IO_URING_IDLE)
 | 
|---|
| 729 |                 #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 730 |                         if(this->print_halts) {
 | 
|---|
| 731 |                                 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
 | 
|---|
| 732 |                         }
 | 
|---|
| 733 |                 #endif
 | 
|---|
| 734 | 
 | 
|---|
| 735 |                 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
 | 
|---|
| 736 | 
 | 
|---|
| 737 |                 {
 | 
|---|
| 738 |                         eventfd_t val;
 | 
|---|
| 739 |                         ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
 | 
|---|
| 740 |                         if(ret < 0) {
 | 
|---|
| 741 |                                 switch((int)errno) {
 | 
|---|
| 742 |                                 case EAGAIN:
 | 
|---|
| 743 |                                 #if EAGAIN != EWOULDBLOCK
 | 
|---|
| 744 |                                         case EWOULDBLOCK:
 | 
|---|
| 745 |                                 #endif
 | 
|---|
| 746 |                                 case EINTR:
 | 
|---|
| 747 |                                         // No need to do anything special here, just assume it's a legitimate wake-up
 | 
|---|
| 748 |                                         break;
 | 
|---|
| 749 |                                 default:
 | 
|---|
| 750 |                                         abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
 | 
|---|
| 751 |                                 }
 | 
|---|
| 752 |                         }
 | 
|---|
| 753 |                 }
 | 
|---|
| 754 | 
 | 
|---|
| 755 |                 #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 756 |                         if(this->print_halts) {
 | 
|---|
| 757 |                                 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
 | 
|---|
| 758 |                         }
 | 
|---|
| 759 |                 #endif
 | 
|---|
| 760 |         #else
 | 
|---|
| 761 |                 __cfa_io_idle( this );
 | 
|---|
| 762 |         #endif
 | 
|---|
| 763 | }
 | 
|---|
| 764 | 
 | 
|---|
| 765 | static bool mark_idle(__cluster_proc_list & this, processor & proc) {
 | 
|---|
| 766 |         __STATS__(true, ready.sleep.halts++; )
 | 
|---|
| 767 | 
 | 
|---|
| 768 |         proc.idle_wctx.sem = 0;
 | 
|---|
| 769 | 
 | 
|---|
| 770 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 771 |         if(!try_lock( this )) return false;
 | 
|---|
| 772 |                 this.idle++;
 | 
|---|
| 773 |                 /* paranoid */ verify( this.idle <= this.total );
 | 
|---|
| 774 |                 remove(proc);
 | 
|---|
| 775 |                 insert_first(this.idles, proc);
 | 
|---|
| 776 | 
 | 
|---|
| 777 |                 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
 | 
|---|
| 778 |         unlock( this );
 | 
|---|
| 779 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 780 | 
 | 
|---|
| 781 |         return true;
 | 
|---|
| 782 | }
 | 
|---|
| 783 | 
 | 
|---|
| 784 | static void mark_awake(__cluster_proc_list & this, processor & proc) {
 | 
|---|
| 785 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 786 |         lock( this );
 | 
|---|
| 787 |                 this.idle--;
 | 
|---|
| 788 |                 /* paranoid */ verify( this.idle >= 0 );
 | 
|---|
| 789 |                 remove(proc);
 | 
|---|
| 790 |                 insert_last(this.actives, proc);
 | 
|---|
| 791 | 
 | 
|---|
| 792 |                 {
 | 
|---|
| 793 |                         struct __fd_waitctx * wctx = 0;
 | 
|---|
| 794 |                         if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
 | 
|---|
| 795 |                         __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
 | 
|---|
| 796 |                 }
 | 
|---|
| 797 | 
 | 
|---|
| 798 |         unlock( this );
 | 
|---|
| 799 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 800 | }
 | 
|---|
| 801 | 
 | 
|---|
| 802 | //=============================================================================================
 | 
|---|
| 803 | // Unexpected Terminating logic
 | 
|---|
| 804 | //=============================================================================================
 | 
|---|
| 805 | void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
 | 
|---|
| 806 |         thread$ * thrd = __cfaabi_tls.this_thread;
 | 
|---|
| 807 | 
 | 
|---|
| 808 |         if(thrd) {
 | 
|---|
| 809 |                 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
 | 
|---|
| 810 |                 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
 | 
|---|
| 811 | 
 | 
|---|
| 812 |                 if ( &thrd->self_cor != thrd->curr_cor ) {
 | 
|---|
| 813 |                         len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
 | 
|---|
| 814 |                         __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
 | 
|---|
| 815 |                 }
 | 
|---|
| 816 |                 else {
 | 
|---|
| 817 |                         __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
 | 
|---|
| 818 |                 }
 | 
|---|
| 819 |         }
 | 
|---|
| 820 |         else {
 | 
|---|
| 821 |                 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
 | 
|---|
| 822 |                 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
 | 
|---|
| 823 |         }
 | 
|---|
| 824 | }
 | 
|---|
| 825 | 
 | 
|---|
| 826 | int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
 | 
|---|
| 827 |         return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
 | 
|---|
| 828 | }
 | 
|---|
| 829 | 
 | 
|---|
| 830 | static __spinlock_t kernel_debug_lock;
 | 
|---|
| 831 | 
 | 
|---|
| 832 | extern "C" {
 | 
|---|
| 833 |         void __cfaabi_bits_acquire() {
 | 
|---|
| 834 |                 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
 | 
|---|
| 835 |         }
 | 
|---|
| 836 | 
 | 
|---|
| 837 |         void __cfaabi_bits_release() {
 | 
|---|
| 838 |                 unlock( kernel_debug_lock );
 | 
|---|
| 839 |         }
 | 
|---|
| 840 | }
 | 
|---|
| 841 | 
 | 
|---|
| 842 | //=============================================================================================
 | 
|---|
| 843 | // Kernel Utilities
 | 
|---|
| 844 | //=============================================================================================
 | 
|---|
| 845 | #if defined(CFA_HAVE_LINUX_IO_URING_H)
 | 
|---|
| 846 | #include "io/types.hfa"
 | 
|---|
| 847 | #endif
 | 
|---|
| 848 | 
 | 
|---|
| 849 | //-----------------------------------------------------------------------------
 | 
|---|
| 850 | // Debug
 | 
|---|
| 851 | bool threading_enabled(void) __attribute__((const)) libcfa_public {
 | 
|---|
| 852 |         return true;
 | 
|---|
| 853 | }
 | 
|---|
| 854 | 
 | 
|---|
| 855 | //-----------------------------------------------------------------------------
 | 
|---|
| 856 | // Statistics
 | 
|---|
| 857 | #if !defined(__CFA_NO_STATISTICS__)
 | 
|---|
| 858 |         void print_halts( processor & this ) libcfa_public {
 | 
|---|
| 859 |                 this.print_halts = true;
 | 
|---|
| 860 |         }
 | 
|---|
| 861 | 
 | 
|---|
| 862 |         static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
 | 
|---|
| 863 |                 /* paranoid */ verify( cltr->stats );
 | 
|---|
| 864 | 
 | 
|---|
| 865 |                 processor * it = &list`first;
 | 
|---|
| 866 |                 for(unsigned i = 0; i < count; i++) {
 | 
|---|
| 867 |                         /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
 | 
|---|
| 868 |                         /* paranoid */ verify( it->local_data->this_stats );
 | 
|---|
| 869 |                         // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
 | 
|---|
| 870 |                         __tally_stats( cltr->stats, it->local_data->this_stats );
 | 
|---|
| 871 |                         it = &(*it)`next;
 | 
|---|
| 872 |                 }
 | 
|---|
| 873 |         }
 | 
|---|
| 874 | 
 | 
|---|
| 875 |         static void crawl_cluster_stats( cluster & this ) {
 | 
|---|
| 876 |                 // Stop the world, otherwise stats could get really messed-up
 | 
|---|
| 877 |                 // this doesn't solve all problems but does solve many
 | 
|---|
| 878 |                 // so it's probably good enough
 | 
|---|
| 879 |                 disable_interrupts();
 | 
|---|
| 880 |                 uint_fast32_t last_size = ready_mutate_lock();
 | 
|---|
| 881 | 
 | 
|---|
| 882 |                         crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
 | 
|---|
| 883 |                         crawl_list(&this, this.procs.idles  , this.procs.idle );
 | 
|---|
| 884 | 
 | 
|---|
| 885 |                 // Unlock the RWlock
 | 
|---|
| 886 |                 ready_mutate_unlock( last_size );
 | 
|---|
| 887 |                 enable_interrupts();
 | 
|---|
| 888 |         }
 | 
|---|
| 889 | 
 | 
|---|
| 890 | 
 | 
|---|
| 891 |         void print_stats_now( cluster & this, int flags ) libcfa_public {
 | 
|---|
| 892 |                 crawl_cluster_stats( this );
 | 
|---|
| 893 |                 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
 | 
|---|
| 894 |         }
 | 
|---|
| 895 | #endif
 | 
|---|
| 896 | // Local Variables: //
 | 
|---|
| 897 | // mode: c //
 | 
|---|
| 898 | // tab-width: 4 //
 | 
|---|
| 899 | // End: //
 | 
|---|