| [8118303] | 1 | // | 
|---|
|  | 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo | 
|---|
|  | 3 | // | 
|---|
|  | 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
|  | 5 | // file "LICENCE" distributed with Cforall. | 
|---|
|  | 6 | // | 
|---|
|  | 7 | // kernel.c -- | 
|---|
|  | 8 | // | 
|---|
|  | 9 | // Author           : Thierry Delisle | 
|---|
| [75f3522] | 10 | // Created On       : Tue Jan 17 12:27:26 2017 | 
|---|
| [6b0b624] | 11 | // Last Modified By : Peter A. Buhr | 
|---|
| [0190480] | 12 | // Last Modified On : Mon Aug 31 07:08:20 2020 | 
|---|
|  | 13 | // Update Count     : 71 | 
|---|
| [8118303] | 14 | // | 
|---|
|  | 15 |  | 
|---|
| [2026bb6] | 16 | #define __cforall_thread__ | 
|---|
| [43784ac] | 17 | #define _GNU_SOURCE | 
|---|
|  | 18 |  | 
|---|
| [4069faad] | 19 | // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ | 
|---|
| [2026bb6] | 20 |  | 
|---|
| [8118303] | 21 | //C Includes | 
|---|
| [214e8da] | 22 | #include <errno.h> | 
|---|
| [9d944b2] | 23 | #include <stdio.h> | 
|---|
| [58b6d1b] | 24 | #include <signal.h> | 
|---|
| [9d944b2] | 25 | #include <unistd.h> | 
|---|
| [dddb3dd0] | 26 | extern "C" { | 
|---|
|  | 27 | #include <sys/eventfd.h> | 
|---|
|  | 28 | } | 
|---|
| [8118303] | 29 |  | 
|---|
|  | 30 | //CFA Includes | 
|---|
| [73abe95] | 31 | #include "kernel_private.hfa" | 
|---|
|  | 32 | #include "preemption.hfa" | 
|---|
| [8118303] | 33 |  | 
|---|
|  | 34 | //Private includes | 
|---|
|  | 35 | #define __CFA_INVOKE_PRIVATE__ | 
|---|
|  | 36 | #include "invoke.h" | 
|---|
|  | 37 |  | 
|---|
| [89eff25] | 38 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 39 | #define __STATS( ...) __VA_ARGS__ | 
|---|
|  | 40 | #else | 
|---|
|  | 41 | #define __STATS( ...) | 
|---|
|  | 42 | #endif | 
|---|
| [4069faad] | 43 |  | 
|---|
| [deca0f5] | 44 | //----------------------------------------------------------------------------- | 
|---|
|  | 45 | // Some assembly required | 
|---|
| [1805b1b] | 46 | #if defined( __i386 ) | 
|---|
| [deca0f5] | 47 | // mxcr : SSE Status and Control bits (control bits are preserved across function calls) | 
|---|
|  | 48 | // fcw  : X87 FPU control word (preserved across function calls) | 
|---|
|  | 49 | #define __x87_store         \ | 
|---|
|  | 50 | uint32_t __mxcr;      \ | 
|---|
|  | 51 | uint16_t __fcw;       \ | 
|---|
|  | 52 | __asm__ volatile (    \ | 
|---|
|  | 53 | "stmxcsr %0\n"  \ | 
|---|
|  | 54 | "fnstcw  %1\n"  \ | 
|---|
|  | 55 | : "=m" (__mxcr),\ | 
|---|
|  | 56 | "=m" (__fcw)  \ | 
|---|
|  | 57 | ) | 
|---|
|  | 58 |  | 
|---|
|  | 59 | #define __x87_load         \ | 
|---|
|  | 60 | __asm__ volatile (   \ | 
|---|
|  | 61 | "fldcw  %1\n"  \ | 
|---|
|  | 62 | "ldmxcsr %0\n" \ | 
|---|
|  | 63 | ::"m" (__mxcr),\ | 
|---|
|  | 64 | "m" (__fcw)  \ | 
|---|
|  | 65 | ) | 
|---|
|  | 66 |  | 
|---|
|  | 67 | #elif defined( __x86_64 ) | 
|---|
|  | 68 | #define __x87_store         \ | 
|---|
|  | 69 | uint32_t __mxcr;      \ | 
|---|
|  | 70 | uint16_t __fcw;       \ | 
|---|
|  | 71 | __asm__ volatile (    \ | 
|---|
|  | 72 | "stmxcsr %0\n"  \ | 
|---|
|  | 73 | "fnstcw  %1\n"  \ | 
|---|
|  | 74 | : "=m" (__mxcr),\ | 
|---|
|  | 75 | "=m" (__fcw)  \ | 
|---|
|  | 76 | ) | 
|---|
|  | 77 |  | 
|---|
|  | 78 | #define __x87_load          \ | 
|---|
|  | 79 | __asm__ volatile (    \ | 
|---|
|  | 80 | "fldcw  %1\n"   \ | 
|---|
|  | 81 | "ldmxcsr %0\n"  \ | 
|---|
|  | 82 | :: "m" (__mxcr),\ | 
|---|
|  | 83 | "m" (__fcw)  \ | 
|---|
|  | 84 | ) | 
|---|
|  | 85 |  | 
|---|
| [0190480] | 86 | #elif defined( __arm__ ) | 
|---|
|  | 87 | #define __x87_store | 
|---|
|  | 88 | #define __x87_load | 
|---|
|  | 89 |  | 
|---|
|  | 90 | #elif defined( __aarch64__ ) | 
|---|
| [74f5c83] | 91 | #define __x87_store              \ | 
|---|
|  | 92 | uint32_t __fpcntl[2];    \ | 
|---|
|  | 93 | __asm__ volatile (    \ | 
|---|
|  | 94 | "mrs x9, FPCR\n" \ | 
|---|
|  | 95 | "mrs x10, FPSR\n"  \ | 
|---|
|  | 96 | "stp x9, x10, %0\n"  \ | 
|---|
|  | 97 | : "=m" (__fpcntl) : : "x9", "x10" \ | 
|---|
|  | 98 | ) | 
|---|
|  | 99 |  | 
|---|
|  | 100 | #define __x87_load         \ | 
|---|
|  | 101 | __asm__ volatile (    \ | 
|---|
|  | 102 | "ldp x9, x10, %0\n"  \ | 
|---|
|  | 103 | "msr FPSR, x10\n"  \ | 
|---|
|  | 104 | "msr FPCR, x9\n" \ | 
|---|
|  | 105 | : "=m" (__fpcntl) : : "x9", "x10" \ | 
|---|
|  | 106 | ) | 
|---|
|  | 107 |  | 
|---|
| [deca0f5] | 108 | #else | 
|---|
| [0190480] | 109 | #error unsupported hardware architecture | 
|---|
| [deca0f5] | 110 | #endif | 
|---|
|  | 111 |  | 
|---|
| [e84ab3d] | 112 | extern thread$ * mainThread; | 
|---|
| [e660761] | 113 | extern processor * mainProcessor; | 
|---|
| [2ac095d] | 114 |  | 
|---|
| [92e7631] | 115 | //----------------------------------------------------------------------------- | 
|---|
|  | 116 | // Kernel Scheduling logic | 
|---|
| [e84ab3d] | 117 | static thread$ * __next_thread(cluster * this); | 
|---|
|  | 118 | static thread$ * __next_thread_slow(cluster * this); | 
|---|
|  | 119 | static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); | 
|---|
|  | 120 | static void __run_thread(processor * this, thread$ * dst); | 
|---|
| [e873838] | 121 | static void __wake_one(cluster * cltr); | 
|---|
| [1eb239e4] | 122 |  | 
|---|
| [6a9b12b] | 123 | static void mark_idle (__cluster_proc_list & idles, processor & proc); | 
|---|
|  | 124 | static void mark_awake(__cluster_proc_list & idles, processor & proc); | 
|---|
|  | 125 | static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles ); | 
|---|
| [1eb239e4] | 126 |  | 
|---|
| [dddb3dd0] | 127 | extern void __cfa_io_start( processor * ); | 
|---|
| [c1c95b1] | 128 | extern bool __cfa_io_drain( processor * ); | 
|---|
| [dddb3dd0] | 129 | extern void __cfa_io_flush( processor * ); | 
|---|
|  | 130 | extern void __cfa_io_stop ( processor * ); | 
|---|
| [c1c95b1] | 131 | static inline bool __maybe_io_drain( processor * ); | 
|---|
| [dddb3dd0] | 132 |  | 
|---|
|  | 133 | extern void __disable_interrupts_hard(); | 
|---|
|  | 134 | extern void __enable_interrupts_hard(); | 
|---|
| [c84e80a] | 135 |  | 
|---|
| [a3821fa] | 136 | static inline void __disable_interrupts_checked() { | 
|---|
|  | 137 | /* paranoid */ verify( __preemption_enabled() ); | 
|---|
|  | 138 | disable_interrupts(); | 
|---|
|  | 139 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 140 | } | 
|---|
|  | 141 |  | 
|---|
|  | 142 | static inline void __enable_interrupts_checked( bool poll = true ) { | 
|---|
|  | 143 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 144 | enable_interrupts( poll ); | 
|---|
|  | 145 | /* paranoid */ verify( __preemption_enabled() ); | 
|---|
|  | 146 | } | 
|---|
|  | 147 |  | 
|---|
| [75f3522] | 148 | //============================================================================================= | 
|---|
|  | 149 | // Kernel Scheduling logic | 
|---|
|  | 150 | //============================================================================================= | 
|---|
| [8fcbb4c] | 151 | //Main of the processor contexts | 
|---|
| [83a071f9] | 152 | void main(processorCtx_t & runner) { | 
|---|
| [21184e3] | 153 | // Because of a bug, we couldn't initialized the seed on construction | 
|---|
|  | 154 | // Do it here | 
|---|
| [8fc652e0] | 155 | __cfaabi_tls.rand_seed ^= rdtscl(); | 
|---|
|  | 156 | __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner); | 
|---|
| [f2384c9a] | 157 | __tls_rand_advance_bck(); | 
|---|
| [21184e3] | 158 |  | 
|---|
| [83a071f9] | 159 | processor * this = runner.proc; | 
|---|
| [094476d] | 160 | verify(this); | 
|---|
| [c81ebf9] | 161 |  | 
|---|
| [dddb3dd0] | 162 | __cfa_io_start( this ); | 
|---|
|  | 163 |  | 
|---|
| [4069faad] | 164 | __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); | 
|---|
| [28d73c1] | 165 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 166 | if( this->print_halts ) { | 
|---|
| [c993b15] | 167 | __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); | 
|---|
| [28d73c1] | 168 | } | 
|---|
|  | 169 | #endif | 
|---|
| [b798713] | 170 |  | 
|---|
| [75f3522] | 171 | { | 
|---|
| [c81ebf9] | 172 | // Setup preemption data | 
|---|
|  | 173 | preemption_scope scope = { this }; | 
|---|
|  | 174 |  | 
|---|
| [a5e7233] | 175 | // if we need to run some special setup, now is the time to do it. | 
|---|
|  | 176 | if(this->init.thrd) { | 
|---|
|  | 177 | this->init.thrd->curr_cluster = this->cltr; | 
|---|
|  | 178 | __run_thread(this, this->init.thrd); | 
|---|
|  | 179 | } | 
|---|
| [325e6ea] | 180 |  | 
|---|
| [4069faad] | 181 | __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); | 
|---|
| [8118303] | 182 |  | 
|---|
| [e84ab3d] | 183 | thread$ * readyThread = 0p; | 
|---|
| [1eb239e4] | 184 | MAIN_LOOP: | 
|---|
|  | 185 | for() { | 
|---|
| [57f70ab] | 186 | #define OLD_MAIN 1 | 
|---|
|  | 187 | #if OLD_MAIN | 
|---|
| [dddb3dd0] | 188 | // Check if there is pending io | 
|---|
|  | 189 | __maybe_io_drain( this ); | 
|---|
|  | 190 |  | 
|---|
| [92e7631] | 191 | // Try to get the next thread | 
|---|
| [8c50aed] | 192 | readyThread = __next_thread( this->cltr ); | 
|---|
| [75f3522] | 193 |  | 
|---|
| [1eb239e4] | 194 | if( !readyThread ) { | 
|---|
| [dddb3dd0] | 195 | __cfa_io_flush( this ); | 
|---|
| [1eb239e4] | 196 | readyThread = __next_thread_slow( this->cltr ); | 
|---|
|  | 197 | } | 
|---|
| [4e6fb8e] | 198 |  | 
|---|
| [1eb239e4] | 199 | HALT: | 
|---|
|  | 200 | if( !readyThread ) { | 
|---|
|  | 201 | // Don't block if we are done | 
|---|
|  | 202 | if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; | 
|---|
| [c81ebf9] | 203 |  | 
|---|
| [1eb239e4] | 204 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 205 | __tls_stats()->ready.sleep.halts++; | 
|---|
|  | 206 | #endif | 
|---|
| [398e8e9] | 207 |  | 
|---|
| [1eb239e4] | 208 | // Push self to idle stack | 
|---|
| [6a9b12b] | 209 | mark_idle(this->cltr->procs, * this); | 
|---|
| [398e8e9] | 210 |  | 
|---|
| [1eb239e4] | 211 | // Confirm the ready-queue is empty | 
|---|
|  | 212 | readyThread = __next_thread_slow( this->cltr ); | 
|---|
|  | 213 | if( readyThread ) { | 
|---|
|  | 214 | // A thread was found, cancel the halt | 
|---|
| [6a9b12b] | 215 | mark_awake(this->cltr->procs, * this); | 
|---|
| [1eb239e4] | 216 |  | 
|---|
|  | 217 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 218 | __tls_stats()->ready.sleep.cancels++; | 
|---|
|  | 219 | #endif | 
|---|
|  | 220 |  | 
|---|
|  | 221 | // continue the mai loop | 
|---|
|  | 222 | break HALT; | 
|---|
|  | 223 | } | 
|---|
|  | 224 |  | 
|---|
|  | 225 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 226 | if(this->print_halts) { | 
|---|
| [c993b15] | 227 | __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); | 
|---|
| [1eb239e4] | 228 | } | 
|---|
|  | 229 | #endif | 
|---|
|  | 230 |  | 
|---|
| [dddb3dd0] | 231 | __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); | 
|---|
|  | 232 |  | 
|---|
|  | 233 | __disable_interrupts_hard(); | 
|---|
|  | 234 | eventfd_t val; | 
|---|
|  | 235 | eventfd_read( this->idle, &val ); | 
|---|
|  | 236 | __enable_interrupts_hard(); | 
|---|
| [1eb239e4] | 237 |  | 
|---|
|  | 238 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 239 | if(this->print_halts) { | 
|---|
| [c993b15] | 240 | __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); | 
|---|
| [1eb239e4] | 241 | } | 
|---|
|  | 242 | #endif | 
|---|
|  | 243 |  | 
|---|
|  | 244 | // We were woken up, remove self from idle | 
|---|
| [6a9b12b] | 245 | mark_awake(this->cltr->procs, * this); | 
|---|
| [1eb239e4] | 246 |  | 
|---|
|  | 247 | // DON'T just proceed, start looking again | 
|---|
|  | 248 | continue MAIN_LOOP; | 
|---|
| [64a7146] | 249 | } | 
|---|
| [1eb239e4] | 250 |  | 
|---|
|  | 251 | /* paranoid */ verify( readyThread ); | 
|---|
|  | 252 |  | 
|---|
| [dddb3dd0] | 253 | // Reset io dirty bit | 
|---|
|  | 254 | this->io.dirty = false; | 
|---|
|  | 255 |  | 
|---|
| [1eb239e4] | 256 | // We found a thread run it | 
|---|
|  | 257 | __run_thread(this, readyThread); | 
|---|
|  | 258 |  | 
|---|
|  | 259 | // Are we done? | 
|---|
|  | 260 | if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; | 
|---|
| [325e6ea] | 261 |  | 
|---|
| [dddb3dd0] | 262 | if(this->io.pending && !this->io.dirty) { | 
|---|
|  | 263 | __cfa_io_flush( this ); | 
|---|
|  | 264 | } | 
|---|
| [89eff25] | 265 |  | 
|---|
| [34b2796] | 266 | #else | 
|---|
| [57f70ab] | 267 | #warning new kernel loop | 
|---|
| [34b2796] | 268 | SEARCH: { | 
|---|
|  | 269 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 270 |  | 
|---|
|  | 271 | // First, lock the scheduler since we are searching for a thread | 
|---|
|  | 272 | ready_schedule_lock(); | 
|---|
|  | 273 |  | 
|---|
|  | 274 | // Try to get the next thread | 
|---|
|  | 275 | readyThread = pop_fast( this->cltr ); | 
|---|
|  | 276 | if(readyThread) { ready_schedule_unlock(); break SEARCH; } | 
|---|
|  | 277 |  | 
|---|
|  | 278 | // If we can't find a thread, might as well flush any outstanding I/O | 
|---|
|  | 279 | if(this->io.pending) { __cfa_io_flush( this ); } | 
|---|
|  | 280 |  | 
|---|
|  | 281 | // Spin a little on I/O, just in case | 
|---|
| [12daa43] | 282 | for(5) { | 
|---|
| [34b2796] | 283 | __maybe_io_drain( this ); | 
|---|
|  | 284 | readyThread = pop_fast( this->cltr ); | 
|---|
|  | 285 | if(readyThread) { ready_schedule_unlock(); break SEARCH; } | 
|---|
|  | 286 | } | 
|---|
|  | 287 |  | 
|---|
|  | 288 | // no luck, try stealing a few times | 
|---|
| [12daa43] | 289 | for(5) { | 
|---|
| [34b2796] | 290 | if( __maybe_io_drain( this ) ) { | 
|---|
|  | 291 | readyThread = pop_fast( this->cltr ); | 
|---|
|  | 292 | } else { | 
|---|
|  | 293 | readyThread = pop_slow( this->cltr ); | 
|---|
|  | 294 | } | 
|---|
|  | 295 | if(readyThread) { ready_schedule_unlock(); break SEARCH; } | 
|---|
|  | 296 | } | 
|---|
|  | 297 |  | 
|---|
|  | 298 | // still no luck, search for a thread | 
|---|
|  | 299 | readyThread = pop_search( this->cltr ); | 
|---|
|  | 300 | if(readyThread) { ready_schedule_unlock(); break SEARCH; } | 
|---|
|  | 301 |  | 
|---|
|  | 302 | // Don't block if we are done | 
|---|
|  | 303 | if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; | 
|---|
|  | 304 |  | 
|---|
|  | 305 | __STATS( __tls_stats()->ready.sleep.halts++; ) | 
|---|
|  | 306 |  | 
|---|
|  | 307 | // Push self to idle stack | 
|---|
|  | 308 | ready_schedule_unlock(); | 
|---|
|  | 309 | mark_idle(this->cltr->procs, * this); | 
|---|
|  | 310 | ready_schedule_lock(); | 
|---|
|  | 311 |  | 
|---|
|  | 312 | // Confirm the ready-queue is empty | 
|---|
|  | 313 | __maybe_io_drain( this ); | 
|---|
|  | 314 | readyThread = pop_search( this->cltr ); | 
|---|
|  | 315 | ready_schedule_unlock(); | 
|---|
|  | 316 |  | 
|---|
|  | 317 | if( readyThread ) { | 
|---|
|  | 318 | // A thread was found, cancel the halt | 
|---|
|  | 319 | mark_awake(this->cltr->procs, * this); | 
|---|
|  | 320 |  | 
|---|
|  | 321 | __STATS( __tls_stats()->ready.sleep.cancels++; ) | 
|---|
|  | 322 |  | 
|---|
|  | 323 | // continue the main loop | 
|---|
|  | 324 | break SEARCH; | 
|---|
|  | 325 | } | 
|---|
|  | 326 |  | 
|---|
| [fb4ccdf] | 327 | __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) | 
|---|
| [34b2796] | 328 | __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); | 
|---|
|  | 329 |  | 
|---|
|  | 330 | // __disable_interrupts_hard(); | 
|---|
|  | 331 | eventfd_t val; | 
|---|
|  | 332 | eventfd_read( this->idle, &val ); | 
|---|
|  | 333 | // __enable_interrupts_hard(); | 
|---|
|  | 334 |  | 
|---|
| [fb4ccdf] | 335 | __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) | 
|---|
| [34b2796] | 336 |  | 
|---|
|  | 337 | // We were woken up, remove self from idle | 
|---|
|  | 338 | mark_awake(this->cltr->procs, * this); | 
|---|
|  | 339 |  | 
|---|
|  | 340 | // DON'T just proceed, start looking again | 
|---|
|  | 341 | continue MAIN_LOOP; | 
|---|
|  | 342 | } | 
|---|
|  | 343 |  | 
|---|
|  | 344 | RUN_THREAD: | 
|---|
|  | 345 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 346 | /* paranoid */ verify( readyThread ); | 
|---|
|  | 347 |  | 
|---|
|  | 348 | // Reset io dirty bit | 
|---|
|  | 349 | this->io.dirty = false; | 
|---|
|  | 350 |  | 
|---|
|  | 351 | // We found a thread run it | 
|---|
|  | 352 | __run_thread(this, readyThread); | 
|---|
|  | 353 |  | 
|---|
|  | 354 | // Are we done? | 
|---|
|  | 355 | if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; | 
|---|
|  | 356 |  | 
|---|
|  | 357 | if(this->io.pending && !this->io.dirty) { | 
|---|
|  | 358 | __cfa_io_flush( this ); | 
|---|
|  | 359 | } | 
|---|
|  | 360 |  | 
|---|
|  | 361 | ready_schedule_lock(); | 
|---|
|  | 362 | __maybe_io_drain( this ); | 
|---|
|  | 363 | ready_schedule_unlock(); | 
|---|
|  | 364 | #endif | 
|---|
| [c81ebf9] | 365 | } | 
|---|
|  | 366 |  | 
|---|
| [4069faad] | 367 | __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); | 
|---|
| [c84e80a] | 368 | } | 
|---|
| [8118303] | 369 |  | 
|---|
| [dddb3dd0] | 370 | __cfa_io_stop( this ); | 
|---|
|  | 371 |  | 
|---|
| [454f478] | 372 | post( this->terminated ); | 
|---|
| [bdeba0b] | 373 |  | 
|---|
| [28d73c1] | 374 | if(this == mainProcessor) { | 
|---|
| [6a490b2] | 375 | // HACK : the coroutine context switch expects this_thread to be set | 
|---|
|  | 376 | // and it make sense for it to be set in all other cases except here | 
|---|
|  | 377 | // fake it | 
|---|
| [8fc652e0] | 378 | __cfaabi_tls.this_thread = mainThread; | 
|---|
| [6a490b2] | 379 | } | 
|---|
| [7768b8d] | 380 |  | 
|---|
| [4069faad] | 381 | __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this); | 
|---|
| [c84e80a] | 382 | } | 
|---|
|  | 383 |  | 
|---|
| [5c1a531] | 384 | static int * __volatile_errno() __attribute__((noinline)); | 
|---|
|  | 385 | static int * __volatile_errno() { asm(""); return &errno; } | 
|---|
|  | 386 |  | 
|---|
| [14a61b5] | 387 | // KERNEL ONLY | 
|---|
| [1c273d0] | 388 | // runThread runs a thread by context switching | 
|---|
|  | 389 | // from the processor coroutine to the target thread | 
|---|
| [e84ab3d] | 390 | static void __run_thread(processor * this, thread$ * thrd_dst) { | 
|---|
| [8fc652e0] | 391 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 392 | /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); | 
|---|
|  | 393 | /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next ); | 
|---|
|  | 394 | __builtin_prefetch( thrd_dst->context.SP ); | 
|---|
|  | 395 |  | 
|---|
| [1f45c7d] | 396 | int curr = __kernel_getcpu(); | 
|---|
|  | 397 | if(thrd_dst->last_cpu != curr) { | 
|---|
|  | 398 | int64_t l = thrd_dst->last_cpu; | 
|---|
|  | 399 | int64_t c = curr; | 
|---|
|  | 400 | int64_t v = (l << 32) | c; | 
|---|
|  | 401 | __push_stat( __tls_stats(), v, false, "Processor", this ); | 
|---|
|  | 402 | } | 
|---|
|  | 403 |  | 
|---|
|  | 404 | thrd_dst->last_cpu = curr; | 
|---|
|  | 405 |  | 
|---|
| [dddb3dd0] | 406 | __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); | 
|---|
|  | 407 |  | 
|---|
| [e84ab3d] | 408 | coroutine$ * proc_cor = get_coroutine(this->runner); | 
|---|
| [8fcbb4c] | 409 |  | 
|---|
| [9f575ea] | 410 | // set state of processor coroutine to inactive | 
|---|
|  | 411 | verify(proc_cor->state == Active); | 
|---|
| [ae7be7a] | 412 | proc_cor->state = Blocked; | 
|---|
| [e8e457e] | 413 |  | 
|---|
| [9f575ea] | 414 | // Actually run the thread | 
|---|
| [3381ed7] | 415 | RUNNING:  while(true) { | 
|---|
| [ff79d5e] | 416 | thrd_dst->preempted = __NO_PREEMPTION; | 
|---|
|  | 417 | thrd_dst->state = Active; | 
|---|
| [e8e457e] | 418 |  | 
|---|
| [58d64a4] | 419 | // Update global state | 
|---|
| [8fc652e0] | 420 | kernelTLS().this_thread = thrd_dst; | 
|---|
| [75f3522] | 421 |  | 
|---|
| [8fc652e0] | 422 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 423 | /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); | 
|---|
| [9d6e1b8a] | 424 | /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); | 
|---|
| [b4b63e8] | 425 | /* paranoid */ verify( thrd_dst->context.SP ); | 
|---|
| [5afb49a] | 426 | /* paranoid */ verify( thrd_dst->state != Halted ); | 
|---|
| [e84ab3d] | 427 | /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor | 
|---|
|  | 428 | /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor | 
|---|
| [ac12f1f] | 429 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); | 
|---|
| [b4b63e8] | 430 |  | 
|---|
| [3381ed7] | 431 |  | 
|---|
| [58d64a4] | 432 |  | 
|---|
| [9f575ea] | 433 | // set context switch to the thread that the processor is executing | 
|---|
| [c7a900a] | 434 | __cfactx_switch( &proc_cor->context, &thrd_dst->context ); | 
|---|
|  | 435 | // when __cfactx_switch returns we are back in the processor coroutine | 
|---|
| [9f575ea] | 436 |  | 
|---|
| [50871b4] | 437 |  | 
|---|
|  | 438 |  | 
|---|
| [ac12f1f] | 439 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); | 
|---|
| [e84ab3d] | 440 | /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); | 
|---|
|  | 441 | /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); | 
|---|
| [b4b63e8] | 442 | /* paranoid */ verify( thrd_dst->context.SP ); | 
|---|
| [9d6e1b8a] | 443 | /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); | 
|---|
| [8fc652e0] | 444 | /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); | 
|---|
|  | 445 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [75f3522] | 446 |  | 
|---|
| [58d64a4] | 447 | // Reset global state | 
|---|
| [8fc652e0] | 448 | kernelTLS().this_thread = 0p; | 
|---|
| [3381ed7] | 449 |  | 
|---|
|  | 450 | // We just finished running a thread, there are a few things that could have happened. | 
|---|
|  | 451 | // 1 - Regular case : the thread has blocked and now one has scheduled it yet. | 
|---|
|  | 452 | // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it. | 
|---|
|  | 453 | // 4 - Preempted | 
|---|
|  | 454 | // In case 1, we may have won a race so we can't write to the state again. | 
|---|
|  | 455 | // In case 2, we lost the race so we now own the thread. | 
|---|
|  | 456 |  | 
|---|
|  | 457 | if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { | 
|---|
|  | 458 | // The thread was preempted, reschedule it and reset the flag | 
|---|
| [254ad1b] | 459 | schedule_thread$( thrd_dst ); | 
|---|
| [3381ed7] | 460 | break RUNNING; | 
|---|
|  | 461 | } | 
|---|
| [75f3522] | 462 |  | 
|---|
| [3ea8ad1] | 463 | if(unlikely(thrd_dst->state == Halting)) { | 
|---|
| [ff79d5e] | 464 | // The thread has halted, it should never be scheduled/run again | 
|---|
| [5afb49a] | 465 | // finish the thread | 
|---|
|  | 466 | __thread_finish( thrd_dst ); | 
|---|
| [ff79d5e] | 467 | break RUNNING; | 
|---|
|  | 468 | } | 
|---|
|  | 469 |  | 
|---|
|  | 470 | /* paranoid */ verify( thrd_dst->state == Active ); | 
|---|
|  | 471 | thrd_dst->state = Blocked; | 
|---|
|  | 472 |  | 
|---|
| [3381ed7] | 473 | // set state of processor coroutine to active and the thread to inactive | 
|---|
| [ff79d5e] | 474 | int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST); | 
|---|
|  | 475 | switch(old_ticket) { | 
|---|
| [6a77224] | 476 | case TICKET_RUNNING: | 
|---|
| [3381ed7] | 477 | // This is case 1, the regular case, nothing more is needed | 
|---|
|  | 478 | break RUNNING; | 
|---|
| [6a77224] | 479 | case TICKET_UNBLOCK: | 
|---|
| [ec43cf9] | 480 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 481 | __tls_stats()->ready.threads.threads++; | 
|---|
|  | 482 | #endif | 
|---|
| [3381ed7] | 483 | // This is case 2, the racy case, someone tried to run this thread before it finished blocking | 
|---|
|  | 484 | // In this case, just run it again. | 
|---|
|  | 485 | continue RUNNING; | 
|---|
|  | 486 | default: | 
|---|
|  | 487 | // This makes no sense, something is wrong abort | 
|---|
| [ff79d5e] | 488 | abort(); | 
|---|
| [3381ed7] | 489 | } | 
|---|
| [9f575ea] | 490 | } | 
|---|
| [e8e457e] | 491 |  | 
|---|
| [9f575ea] | 492 | // Just before returning to the processor, set the processor coroutine to active | 
|---|
| [e8e457e] | 493 | proc_cor->state = Active; | 
|---|
| [1eb239e4] | 494 |  | 
|---|
| [dddb3dd0] | 495 | __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); | 
|---|
|  | 496 |  | 
|---|
| [ec43cf9] | 497 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 498 | __tls_stats()->ready.threads.threads--; | 
|---|
|  | 499 | #endif | 
|---|
|  | 500 |  | 
|---|
| [8fc652e0] | 501 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [82c948c] | 502 | } | 
|---|
|  | 503 |  | 
|---|
| [14a61b5] | 504 | // KERNEL_ONLY | 
|---|
| [b0c7419] | 505 | void returnToKernel() { | 
|---|
| [8fc652e0] | 506 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [e84ab3d] | 507 | coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); | 
|---|
|  | 508 | thread$ * thrd_src = kernelTLS().this_thread; | 
|---|
| [e8e457e] | 509 |  | 
|---|
| [89eff25] | 510 | __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) | 
|---|
| [29cb302] | 511 |  | 
|---|
| [9f575ea] | 512 | // Run the thread on this processor | 
|---|
|  | 513 | { | 
|---|
|  | 514 | int local_errno = *__volatile_errno(); | 
|---|
|  | 515 | #if defined( __i386 ) || defined( __x86_64 ) | 
|---|
|  | 516 | __x87_store; | 
|---|
|  | 517 | #endif | 
|---|
| [b4b63e8] | 518 | /* paranoid */ verify( proc_cor->context.SP ); | 
|---|
| [ac12f1f] | 519 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary ); | 
|---|
| [c7a900a] | 520 | __cfactx_switch( &thrd_src->context, &proc_cor->context ); | 
|---|
| [ac12f1f] | 521 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary ); | 
|---|
| [9f575ea] | 522 | #if defined( __i386 ) || defined( __x86_64 ) | 
|---|
|  | 523 | __x87_load; | 
|---|
|  | 524 | #endif | 
|---|
|  | 525 | *__volatile_errno() = local_errno; | 
|---|
| [8fcbb4c] | 526 | } | 
|---|
| [deca0f5] | 527 |  | 
|---|
| [29cb302] | 528 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [89eff25] | 529 | /* paranoid */ verify( thrd_src->last_proc != 0p ); | 
|---|
|  | 530 | if(thrd_src->last_proc != kernelTLS().this_processor) { | 
|---|
| [29cb302] | 531 | __tls_stats()->ready.threads.migration++; | 
|---|
|  | 532 | } | 
|---|
|  | 533 | #endif | 
|---|
|  | 534 |  | 
|---|
| [8fc652e0] | 535 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [e84ab3d] | 536 | /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); | 
|---|
|  | 537 | /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); | 
|---|
| [c84e80a] | 538 | } | 
|---|
|  | 539 |  | 
|---|
| [8def349] | 540 | //----------------------------------------------------------------------------- | 
|---|
|  | 541 | // Scheduler routines | 
|---|
| [14a61b5] | 542 | // KERNEL ONLY | 
|---|
| [e84ab3d] | 543 | static void __schedule_thread( thread$ * thrd ) { | 
|---|
| [8fc652e0] | 544 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [254ad1b] | 545 | /* paranoid */ verify( ready_schedule_islocked()); | 
|---|
| [6a490b2] | 546 | /* paranoid */ verify( thrd ); | 
|---|
|  | 547 | /* paranoid */ verify( thrd->state != Halted ); | 
|---|
| [9d6e1b8a] | 548 | /* paranoid */ verify( thrd->curr_cluster ); | 
|---|
| [3381ed7] | 549 | /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) | 
|---|
| [504a7dc] | 550 | /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, | 
|---|
|  | 551 | "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); | 
|---|
| [ff79d5e] | 552 | /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, | 
|---|
| [504a7dc] | 553 | "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); | 
|---|
| [3381ed7] | 554 | /* paranoid */ #endif | 
|---|
| [6a490b2] | 555 | /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); | 
|---|
| [ac12f1f] | 556 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); | 
|---|
| [b4b63e8] | 557 |  | 
|---|
| [b808625] | 558 | const bool local = thrd->state != Start; | 
|---|
| [ae7be7a] | 559 | if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; | 
|---|
| [6b4cdd3] | 560 |  | 
|---|
| [ec43cf9] | 561 | // Dereference the thread now because once we push it, there is not guaranteed it's still valid. | 
|---|
|  | 562 | struct cluster * cl = thrd->curr_cluster; | 
|---|
| [89eff25] | 563 | __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) | 
|---|
| [32a8b61] | 564 |  | 
|---|
| [254ad1b] | 565 | // push the thread to the cluster ready-queue | 
|---|
| [b808625] | 566 | push( cl, thrd, local ); | 
|---|
| [32a8b61] | 567 |  | 
|---|
| [254ad1b] | 568 | // variable thrd is no longer safe to use | 
|---|
| [734908c] | 569 | thrd = 0xdeaddeaddeaddeadp; | 
|---|
| [32a8b61] | 570 |  | 
|---|
| [254ad1b] | 571 | // wake the cluster using the save variable. | 
|---|
|  | 572 | __wake_one( cl ); | 
|---|
| [1c273d0] | 573 |  | 
|---|
| [ec43cf9] | 574 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 575 | if( kernelTLS().this_stats ) { | 
|---|
|  | 576 | __tls_stats()->ready.threads.threads++; | 
|---|
| [89eff25] | 577 | if(outside) { | 
|---|
|  | 578 | __tls_stats()->ready.threads.extunpark++; | 
|---|
|  | 579 | } | 
|---|
| [ec43cf9] | 580 | } | 
|---|
|  | 581 | else { | 
|---|
|  | 582 | __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED); | 
|---|
| [89eff25] | 583 | __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED); | 
|---|
| [ec43cf9] | 584 | } | 
|---|
|  | 585 | #endif | 
|---|
|  | 586 |  | 
|---|
| [254ad1b] | 587 | /* paranoid */ verify( ready_schedule_islocked()); | 
|---|
| [8fc652e0] | 588 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [db6f06a] | 589 | } | 
|---|
|  | 590 |  | 
|---|
| [e84ab3d] | 591 | void schedule_thread$( thread$ * thrd ) { | 
|---|
| [254ad1b] | 592 | ready_schedule_lock(); | 
|---|
|  | 593 | __schedule_thread( thrd ); | 
|---|
|  | 594 | ready_schedule_unlock(); | 
|---|
|  | 595 | } | 
|---|
|  | 596 |  | 
|---|
| [14a61b5] | 597 | // KERNEL ONLY | 
|---|
| [e84ab3d] | 598 | static inline thread$ * __next_thread(cluster * this) with( *this ) { | 
|---|
| [8fc652e0] | 599 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [7768b8d] | 600 |  | 
|---|
| [e873838] | 601 | ready_schedule_lock(); | 
|---|
| [e84ab3d] | 602 | thread$ * thrd = pop_fast( this ); | 
|---|
| [e873838] | 603 | ready_schedule_unlock(); | 
|---|
| [7768b8d] | 604 |  | 
|---|
| [8fc652e0] | 605 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 606 | return thrd; | 
|---|
| [eb2e723] | 607 | } | 
|---|
|  | 608 |  | 
|---|
| [64a7146] | 609 | // KERNEL ONLY | 
|---|
| [e84ab3d] | 610 | static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { | 
|---|
| [8fc652e0] | 611 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [64a7146] | 612 |  | 
|---|
| [e873838] | 613 | ready_schedule_lock(); | 
|---|
| [e84ab3d] | 614 | thread$ * thrd; | 
|---|
| [fc59df78] | 615 | for(25) { | 
|---|
|  | 616 | thrd = pop_slow( this ); | 
|---|
|  | 617 | if(thrd) goto RET; | 
|---|
|  | 618 | } | 
|---|
|  | 619 | thrd = pop_search( this ); | 
|---|
|  | 620 |  | 
|---|
|  | 621 | RET: | 
|---|
| [e873838] | 622 | ready_schedule_unlock(); | 
|---|
| [64a7146] | 623 |  | 
|---|
| [8fc652e0] | 624 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 625 | return thrd; | 
|---|
| [64a7146] | 626 | } | 
|---|
|  | 627 |  | 
|---|
| [e84ab3d] | 628 | static inline bool __must_unpark( thread$ * thrd ) { | 
|---|
| [ff79d5e] | 629 | int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); | 
|---|
|  | 630 | switch(old_ticket) { | 
|---|
| [6a77224] | 631 | case TICKET_RUNNING: | 
|---|
| [3381ed7] | 632 | // Wake won the race, the thread will reschedule/rerun itself | 
|---|
| [c6c7e6c] | 633 | return false; | 
|---|
| [6a77224] | 634 | case TICKET_BLOCKED: | 
|---|
| [3381ed7] | 635 | /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); | 
|---|
| [ff79d5e] | 636 | /* paranoid */ verify( thrd->state == Blocked ); | 
|---|
| [c6c7e6c] | 637 | return true; | 
|---|
| [3381ed7] | 638 | default: | 
|---|
|  | 639 | // This makes no sense, something is wrong abort | 
|---|
| [7ee8153] | 640 | abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name); | 
|---|
| [de6319f] | 641 | } | 
|---|
| [eb2e723] | 642 | } | 
|---|
|  | 643 |  | 
|---|
| [e84ab3d] | 644 | void __kernel_unpark( thread$ * thrd ) { | 
|---|
| [e9c0b4c] | 645 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 646 | /* paranoid */ verify( ready_schedule_islocked()); | 
|---|
|  | 647 |  | 
|---|
|  | 648 | if( !thrd ) return; | 
|---|
|  | 649 |  | 
|---|
|  | 650 | if(__must_unpark(thrd)) { | 
|---|
|  | 651 | // Wake lost the race, | 
|---|
|  | 652 | __schedule_thread( thrd ); | 
|---|
|  | 653 | } | 
|---|
|  | 654 |  | 
|---|
|  | 655 | /* paranoid */ verify( ready_schedule_islocked()); | 
|---|
|  | 656 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 657 | } | 
|---|
|  | 658 |  | 
|---|
| [e84ab3d] | 659 | void unpark( thread$ * thrd ) { | 
|---|
| [c6c7e6c] | 660 | if( !thrd ) return; | 
|---|
| [0b33412] | 661 |  | 
|---|
| [c6c7e6c] | 662 | if(__must_unpark(thrd)) { | 
|---|
|  | 663 | disable_interrupts(); | 
|---|
| [a3821fa] | 664 | // Wake lost the race, | 
|---|
| [254ad1b] | 665 | schedule_thread$( thrd ); | 
|---|
| [a3821fa] | 666 | enable_interrupts(false); | 
|---|
| [de6319f] | 667 | } | 
|---|
| [eb2e723] | 668 | } | 
|---|
| [0b33412] | 669 |  | 
|---|
| [e235429] | 670 | void park( void ) { | 
|---|
| [a3821fa] | 671 | __disable_interrupts_checked(); | 
|---|
|  | 672 | /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); | 
|---|
|  | 673 | returnToKernel(); | 
|---|
|  | 674 | __enable_interrupts_checked(); | 
|---|
| [0c78741] | 675 |  | 
|---|
|  | 676 | } | 
|---|
| [09800e9] | 677 |  | 
|---|
| [5afb49a] | 678 | extern "C" { | 
|---|
|  | 679 | // Leave the thread monitor | 
|---|
|  | 680 | // last routine called by a thread. | 
|---|
|  | 681 | // Should never return | 
|---|
|  | 682 | void __cfactx_thrd_leave() { | 
|---|
| [e84ab3d] | 683 | thread$ * thrd = active_thread(); | 
|---|
|  | 684 | monitor$ * this = &thrd->self_mon; | 
|---|
| [5afb49a] | 685 |  | 
|---|
|  | 686 | // Lock the monitor now | 
|---|
|  | 687 | lock( this->lock __cfaabi_dbg_ctx2 ); | 
|---|
|  | 688 |  | 
|---|
|  | 689 | disable_interrupts(); | 
|---|
|  | 690 |  | 
|---|
| [9d6e1b8a] | 691 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 692 | /* paranoid */ verify( thrd->state == Active ); | 
|---|
|  | 693 | /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); | 
|---|
|  | 694 | /* paranoid */ verify( kernelTLS().this_thread == thrd ); | 
|---|
|  | 695 | /* paranoid */ verify( thrd->context.SP ); | 
|---|
| [e84ab3d] | 696 | /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); | 
|---|
|  | 697 | /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); | 
|---|
| [9d6e1b8a] | 698 |  | 
|---|
| [3ea8ad1] | 699 | thrd->state = Halting; | 
|---|
| [58688bf] | 700 | if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } | 
|---|
| [9d6e1b8a] | 701 | if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } | 
|---|
|  | 702 | if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } | 
|---|
| [5afb49a] | 703 |  | 
|---|
|  | 704 | // Leave the thread | 
|---|
|  | 705 | returnToKernel(); | 
|---|
|  | 706 |  | 
|---|
|  | 707 | // Control flow should never reach here! | 
|---|
| [9d6e1b8a] | 708 | abort(); | 
|---|
| [5afb49a] | 709 | } | 
|---|
| [09800e9] | 710 | } | 
|---|
|  | 711 |  | 
|---|
| [14a61b5] | 712 | // KERNEL ONLY | 
|---|
| [3381ed7] | 713 | bool force_yield( __Preemption_Reason reason ) { | 
|---|
| [a3821fa] | 714 | __disable_interrupts_checked(); | 
|---|
| [e84ab3d] | 715 | thread$ * thrd = kernelTLS().this_thread; | 
|---|
| [a3821fa] | 716 | /* paranoid */ verify(thrd->state == Active); | 
|---|
|  | 717 |  | 
|---|
|  | 718 | // SKULLDUGGERY: It is possible that we are preempting this thread just before | 
|---|
|  | 719 | // it was going to park itself. If that is the case and it is already using the | 
|---|
|  | 720 | // intrusive fields then we can't use them to preempt the thread | 
|---|
|  | 721 | // If that is the case, abandon the preemption. | 
|---|
|  | 722 | bool preempted = false; | 
|---|
|  | 723 | if(thrd->link.next == 0p) { | 
|---|
|  | 724 | preempted = true; | 
|---|
|  | 725 | thrd->preempted = reason; | 
|---|
|  | 726 | returnToKernel(); | 
|---|
|  | 727 | } | 
|---|
|  | 728 | __enable_interrupts_checked( false ); | 
|---|
| [3381ed7] | 729 | return preempted; | 
|---|
| [f2b12406] | 730 | } | 
|---|
|  | 731 |  | 
|---|
| [14a61b5] | 732 | //============================================================================================= | 
|---|
| [92e7631] | 733 | // Kernel Idle Sleep | 
|---|
| [14a61b5] | 734 | //============================================================================================= | 
|---|
| [64a7146] | 735 | // Wake a thread from the front if there are any | 
|---|
| [e873838] | 736 | static void __wake_one(cluster * this) { | 
|---|
| [8fc652e0] | 737 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [e873838] | 738 | /* paranoid */ verify( ready_schedule_islocked() ); | 
|---|
| [14a61b5] | 739 |  | 
|---|
| [64a7146] | 740 | // Check if there is a sleeping processor | 
|---|
| [1eb239e4] | 741 | processor * p; | 
|---|
|  | 742 | unsigned idle; | 
|---|
|  | 743 | unsigned total; | 
|---|
| [6a9b12b] | 744 | [idle, total, p] = query_idles(this->procs); | 
|---|
| [14a61b5] | 745 |  | 
|---|
| [64a7146] | 746 | // If no one is sleeping, we are done | 
|---|
| [1eb239e4] | 747 | if( idle == 0 ) return; | 
|---|
| [14a61b5] | 748 |  | 
|---|
| [64a7146] | 749 | // We found a processor, wake it up | 
|---|
| [dddb3dd0] | 750 | eventfd_t val; | 
|---|
|  | 751 | val = 1; | 
|---|
|  | 752 | eventfd_write( p->idle, val ); | 
|---|
| [14a61b5] | 753 |  | 
|---|
| [1eb239e4] | 754 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
| [5cb51502] | 755 | if( kernelTLS().this_stats ) { | 
|---|
|  | 756 | __tls_stats()->ready.sleep.wakes++; | 
|---|
|  | 757 | } | 
|---|
|  | 758 | else { | 
|---|
|  | 759 | __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); | 
|---|
|  | 760 | } | 
|---|
| [1eb239e4] | 761 | #endif | 
|---|
|  | 762 |  | 
|---|
| [e873838] | 763 | /* paranoid */ verify( ready_schedule_islocked() ); | 
|---|
| [8fc652e0] | 764 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 765 |  | 
|---|
|  | 766 | return; | 
|---|
| [64a7146] | 767 | } | 
|---|
| [14a61b5] | 768 |  | 
|---|
| [64a7146] | 769 | // Unconditionnaly wake a thread | 
|---|
| [1eb239e4] | 770 | void __wake_proc(processor * this) { | 
|---|
| [64a7146] | 771 | __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); | 
|---|
| [14a61b5] | 772 |  | 
|---|
| [a3821fa] | 773 | __disable_interrupts_checked(); | 
|---|
| [8fc652e0] | 774 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [dddb3dd0] | 775 | eventfd_t val; | 
|---|
|  | 776 | val = 1; | 
|---|
| [55d6affb] | 777 | eventfd_write( this->idle, val ); | 
|---|
| [a3821fa] | 778 | __enable_interrupts_checked(); | 
|---|
| [92e7631] | 779 | } | 
|---|
|  | 780 |  | 
|---|
| [6a9b12b] | 781 | static void mark_idle(__cluster_proc_list & this, processor & proc) { | 
|---|
| [8fc652e0] | 782 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 783 | lock( this ); | 
|---|
|  | 784 | this.idle++; | 
|---|
|  | 785 | /* paranoid */ verify( this.idle <= this.total ); | 
|---|
| [fc59b580] | 786 | remove(proc); | 
|---|
| [6a9b12b] | 787 | insert_first(this.idles, proc); | 
|---|
| [1eb239e4] | 788 | unlock( this ); | 
|---|
| [8fc652e0] | 789 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 790 | } | 
|---|
| [8e16177] | 791 |  | 
|---|
| [6a9b12b] | 792 | static void mark_awake(__cluster_proc_list & this, processor & proc) { | 
|---|
| [8fc652e0] | 793 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 794 | lock( this ); | 
|---|
|  | 795 | this.idle--; | 
|---|
|  | 796 | /* paranoid */ verify( this.idle >= 0 ); | 
|---|
|  | 797 | remove(proc); | 
|---|
| [fc59b580] | 798 | insert_last(this.actives, proc); | 
|---|
| [1eb239e4] | 799 | unlock( this ); | 
|---|
| [8fc652e0] | 800 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [1eb239e4] | 801 | } | 
|---|
| [c34ebf2] | 802 |  | 
|---|
| [6a9b12b] | 803 | static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) { | 
|---|
|  | 804 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 805 | /* paranoid */ verify( ready_schedule_islocked() ); | 
|---|
|  | 806 |  | 
|---|
| [1eb239e4] | 807 | for() { | 
|---|
|  | 808 | uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST); | 
|---|
|  | 809 | if( 1 == (l % 2) ) { Pause(); continue; } | 
|---|
|  | 810 | unsigned idle    = this.idle; | 
|---|
|  | 811 | unsigned total   = this.total; | 
|---|
| [6a9b12b] | 812 | processor * proc = &this.idles`first; | 
|---|
| [7fdae38] | 813 | // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it | 
|---|
|  | 814 | asm volatile("": : :"memory"); | 
|---|
| [1eb239e4] | 815 | if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; } | 
|---|
|  | 816 | return [idle, total, proc]; | 
|---|
|  | 817 | } | 
|---|
| [6a9b12b] | 818 |  | 
|---|
|  | 819 | /* paranoid */ verify( ready_schedule_islocked() ); | 
|---|
|  | 820 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [6b4cdd3] | 821 | } | 
|---|
|  | 822 |  | 
|---|
| [dbe9b08] | 823 | //============================================================================================= | 
|---|
|  | 824 | // Unexpected Terminating logic | 
|---|
|  | 825 | //============================================================================================= | 
|---|
| [92bfda0] | 826 | void __kernel_abort_msg( char * abort_text, int abort_text_size ) { | 
|---|
| [e84ab3d] | 827 | thread$ * thrd = __cfaabi_tls.this_thread; | 
|---|
| [9d944b2] | 828 |  | 
|---|
| [de94a60] | 829 | if(thrd) { | 
|---|
|  | 830 | int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); | 
|---|
| [1c40091] | 831 | __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); | 
|---|
| [de94a60] | 832 |  | 
|---|
| [212c2187] | 833 | if ( &thrd->self_cor != thrd->curr_cor ) { | 
|---|
|  | 834 | len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); | 
|---|
| [1c40091] | 835 | __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); | 
|---|
| [de94a60] | 836 | } | 
|---|
|  | 837 | else { | 
|---|
| [1c40091] | 838 | __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); | 
|---|
| [de94a60] | 839 | } | 
|---|
| [1c273d0] | 840 | } | 
|---|
| [9d944b2] | 841 | else { | 
|---|
| [de94a60] | 842 | int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); | 
|---|
| [1c40091] | 843 | __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); | 
|---|
| [9d944b2] | 844 | } | 
|---|
|  | 845 | } | 
|---|
|  | 846 |  | 
|---|
| [92bfda0] | 847 | int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { | 
|---|
|  | 848 | return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2; | 
|---|
| [2b8bc41] | 849 | } | 
|---|
|  | 850 |  | 
|---|
| [de94a60] | 851 | static __spinlock_t kernel_debug_lock; | 
|---|
|  | 852 |  | 
|---|
| [9d944b2] | 853 | extern "C" { | 
|---|
| [1c40091] | 854 | void __cfaabi_bits_acquire() { | 
|---|
| [36982fc] | 855 | lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); | 
|---|
| [9d944b2] | 856 | } | 
|---|
|  | 857 |  | 
|---|
| [1c40091] | 858 | void __cfaabi_bits_release() { | 
|---|
| [ea7d2b0] | 859 | unlock( kernel_debug_lock ); | 
|---|
| [9d944b2] | 860 | } | 
|---|
| [8118303] | 861 | } | 
|---|
|  | 862 |  | 
|---|
| [fa21ac9] | 863 | //============================================================================================= | 
|---|
|  | 864 | // Kernel Utilities | 
|---|
|  | 865 | //============================================================================================= | 
|---|
| [dddb3dd0] | 866 | #if defined(CFA_HAVE_LINUX_IO_URING_H) | 
|---|
|  | 867 | #include "io/types.hfa" | 
|---|
|  | 868 | #endif | 
|---|
|  | 869 |  | 
|---|
| [c1c95b1] | 870 | static inline bool __maybe_io_drain( processor * proc ) { | 
|---|
| [e9c0b4c] | 871 | bool ret = false; | 
|---|
| [dddb3dd0] | 872 | #if defined(CFA_HAVE_LINUX_IO_URING_H) | 
|---|
|  | 873 | __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd); | 
|---|
|  | 874 |  | 
|---|
|  | 875 | // Check if we should drain the queue | 
|---|
|  | 876 | $io_context * ctx = proc->io.ctx; | 
|---|
|  | 877 | unsigned head = *ctx->cq.head; | 
|---|
|  | 878 | unsigned tail = *ctx->cq.tail; | 
|---|
| [c1c95b1] | 879 | if(head == tail) return false; | 
|---|
| [57f70ab] | 880 | #if OLD_MAIN | 
|---|
| [b7fd2db6] | 881 | ready_schedule_lock(); | 
|---|
| [e9c0b4c] | 882 | ret = __cfa_io_drain( proc ); | 
|---|
| [b7fd2db6] | 883 | ready_schedule_unlock(); | 
|---|
| [57f70ab] | 884 | #else | 
|---|
|  | 885 | ret = __cfa_io_drain( proc ); | 
|---|
|  | 886 | #endif | 
|---|
| [dddb3dd0] | 887 | #endif | 
|---|
| [e9c0b4c] | 888 | return ret; | 
|---|
| [dddb3dd0] | 889 | } | 
|---|
|  | 890 |  | 
|---|
| [de94a60] | 891 | //----------------------------------------------------------------------------- | 
|---|
|  | 892 | // Debug | 
|---|
|  | 893 | __cfaabi_dbg_debug_do( | 
|---|
| [1997b4e] | 894 | extern "C" { | 
|---|
| [ae66348] | 895 | void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) { | 
|---|
| [1997b4e] | 896 | this.prev_name = prev_name; | 
|---|
| [8fc652e0] | 897 | this.prev_thrd = kernelTLS().this_thread; | 
|---|
| [1997b4e] | 898 | } | 
|---|
| [9181f1d] | 899 | } | 
|---|
| [f7d6bb0] | 900 | ) | 
|---|
| [2026bb6] | 901 |  | 
|---|
|  | 902 | //----------------------------------------------------------------------------- | 
|---|
|  | 903 | // Debug | 
|---|
| [8c50aed] | 904 | bool threading_enabled(void) __attribute__((const)) { | 
|---|
| [2026bb6] | 905 | return true; | 
|---|
|  | 906 | } | 
|---|
| [c34ebf2] | 907 |  | 
|---|
|  | 908 | //----------------------------------------------------------------------------- | 
|---|
|  | 909 | // Statistics | 
|---|
|  | 910 | #if !defined(__CFA_NO_STATISTICS__) | 
|---|
|  | 911 | void print_halts( processor & this ) { | 
|---|
|  | 912 | this.print_halts = true; | 
|---|
|  | 913 | } | 
|---|
| [58688bf] | 914 |  | 
|---|
| [69914cbc] | 915 | static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) { | 
|---|
| [8464edf] | 916 | /* paranoid */ verify( cltr->stats ); | 
|---|
|  | 917 |  | 
|---|
|  | 918 | processor * it = &list`first; | 
|---|
|  | 919 | for(unsigned i = 0; i < count; i++) { | 
|---|
|  | 920 | /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); | 
|---|
|  | 921 | /* paranoid */ verify( it->local_data->this_stats ); | 
|---|
|  | 922 | __tally_stats( cltr->stats, it->local_data->this_stats ); | 
|---|
|  | 923 | it = &(*it)`next; | 
|---|
|  | 924 | } | 
|---|
|  | 925 | } | 
|---|
|  | 926 |  | 
|---|
|  | 927 | void crawl_cluster_stats( cluster & this ) { | 
|---|
|  | 928 | // Stop the world, otherwise stats could get really messed-up | 
|---|
|  | 929 | // this doesn't solve all problems but does solve many | 
|---|
|  | 930 | // so it's probably good enough | 
|---|
|  | 931 | uint_fast32_t last_size = ready_mutate_lock(); | 
|---|
|  | 932 |  | 
|---|
|  | 933 | crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle); | 
|---|
|  | 934 | crawl_list(&this, this.procs.idles  , this.procs.idle ); | 
|---|
|  | 935 |  | 
|---|
|  | 936 | // Unlock the RWlock | 
|---|
|  | 937 | ready_mutate_unlock( last_size ); | 
|---|
|  | 938 | } | 
|---|
|  | 939 |  | 
|---|
|  | 940 |  | 
|---|
| [58688bf] | 941 | void print_stats_now( cluster & this, int flags ) { | 
|---|
| [8464edf] | 942 | crawl_cluster_stats( this ); | 
|---|
| [1b033b8] | 943 | __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this ); | 
|---|
|  | 944 | } | 
|---|
| [c34ebf2] | 945 | #endif | 
|---|
| [8118303] | 946 | // Local Variables: // | 
|---|
|  | 947 | // mode: c // | 
|---|
|  | 948 | // tab-width: 4 // | 
|---|
|  | 949 | // End: // | 
|---|