source: libcfa/src/concurrency/kernel.cfa@ 7dd98b6

ADT ast-experimental enum forall-pointer-decay pthread-emulation qualifiedEnum
Last change on this file since 7dd98b6 was c33c2af, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Made some of the schedule locking more fine grain.

  • Property mode set to 100644
File size: 30.5 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[0190480]12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
[8118303]14//
15
[2026bb6]16#define __cforall_thread__
[43784ac]17#define _GNU_SOURCE
18
[4069faad]19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
[2026bb6]20
[8118303]21//C Includes
[214e8da]22#include <errno.h>
[9d944b2]23#include <stdio.h>
[445f984]24#include <string.h>
[58b6d1b]25#include <signal.h>
[9d944b2]26#include <unistd.h>
[dddb3dd0]27extern "C" {
28 #include <sys/eventfd.h>
29}
[8118303]30
31//CFA Includes
[73abe95]32#include "kernel_private.hfa"
33#include "preemption.hfa"
[445f984]34#include "strstream.hfa"
35#include "device/cpu.hfa"
[8118303]36
37//Private includes
38#define __CFA_INVOKE_PRIVATE__
39#include "invoke.h"
40
[89eff25]41#if !defined(__CFA_NO_STATISTICS__)
42 #define __STATS( ...) __VA_ARGS__
43#else
44 #define __STATS( ...)
45#endif
[4069faad]46
[deca0f5]47//-----------------------------------------------------------------------------
48// Some assembly required
[1805b1b]49#if defined( __i386 )
[deca0f5]50 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
51 // fcw : X87 FPU control word (preserved across function calls)
52 #define __x87_store \
53 uint32_t __mxcr; \
54 uint16_t __fcw; \
55 __asm__ volatile ( \
56 "stmxcsr %0\n" \
57 "fnstcw %1\n" \
58 : "=m" (__mxcr),\
59 "=m" (__fcw) \
60 )
61
62 #define __x87_load \
63 __asm__ volatile ( \
64 "fldcw %1\n" \
65 "ldmxcsr %0\n" \
66 ::"m" (__mxcr),\
67 "m" (__fcw) \
68 )
69
70#elif defined( __x86_64 )
71 #define __x87_store \
72 uint32_t __mxcr; \
73 uint16_t __fcw; \
74 __asm__ volatile ( \
75 "stmxcsr %0\n" \
76 "fnstcw %1\n" \
77 : "=m" (__mxcr),\
78 "=m" (__fcw) \
79 )
80
81 #define __x87_load \
82 __asm__ volatile ( \
83 "fldcw %1\n" \
84 "ldmxcsr %0\n" \
85 :: "m" (__mxcr),\
86 "m" (__fcw) \
87 )
88
[0190480]89#elif defined( __arm__ )
90 #define __x87_store
91 #define __x87_load
92
93#elif defined( __aarch64__ )
[74f5c83]94 #define __x87_store \
95 uint32_t __fpcntl[2]; \
96 __asm__ volatile ( \
97 "mrs x9, FPCR\n" \
98 "mrs x10, FPSR\n" \
99 "stp x9, x10, %0\n" \
100 : "=m" (__fpcntl) : : "x9", "x10" \
101 )
102
103 #define __x87_load \
104 __asm__ volatile ( \
105 "ldp x9, x10, %0\n" \
106 "msr FPSR, x10\n" \
107 "msr FPCR, x9\n" \
108 : "=m" (__fpcntl) : : "x9", "x10" \
109 )
110
[deca0f5]111#else
[0190480]112 #error unsupported hardware architecture
[deca0f5]113#endif
114
[e84ab3d]115extern thread$ * mainThread;
[e660761]116extern processor * mainProcessor;
[2ac095d]117
[92e7631]118//-----------------------------------------------------------------------------
119// Kernel Scheduling logic
[e84ab3d]120static thread$ * __next_thread(cluster * this);
121static thread$ * __next_thread_slow(cluster * this);
122static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
123static void __run_thread(processor * this, thread$ * dst);
[e873838]124static void __wake_one(cluster * cltr);
[1eb239e4]125
[6a9b12b]126static void mark_idle (__cluster_proc_list & idles, processor & proc);
127static void mark_awake(__cluster_proc_list & idles, processor & proc);
128static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );
[1eb239e4]129
[dddb3dd0]130extern void __cfa_io_start( processor * );
[c1c95b1]131extern bool __cfa_io_drain( processor * );
[dddb3dd0]132extern void __cfa_io_flush( processor * );
133extern void __cfa_io_stop ( processor * );
[c1c95b1]134static inline bool __maybe_io_drain( processor * );
[dddb3dd0]135
136extern void __disable_interrupts_hard();
137extern void __enable_interrupts_hard();
[c84e80a]138
[a3821fa]139static inline void __disable_interrupts_checked() {
140 /* paranoid */ verify( __preemption_enabled() );
141 disable_interrupts();
142 /* paranoid */ verify( ! __preemption_enabled() );
143}
144
145static inline void __enable_interrupts_checked( bool poll = true ) {
146 /* paranoid */ verify( ! __preemption_enabled() );
147 enable_interrupts( poll );
148 /* paranoid */ verify( __preemption_enabled() );
149}
150
[75f3522]151//=============================================================================================
152// Kernel Scheduling logic
153//=============================================================================================
[8fcbb4c]154//Main of the processor contexts
[83a071f9]155void main(processorCtx_t & runner) {
[21184e3]156 // Because of a bug, we couldn't initialized the seed on construction
157 // Do it here
[8fc652e0]158 __cfaabi_tls.rand_seed ^= rdtscl();
159 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
[f2384c9a]160 __tls_rand_advance_bck();
[21184e3]161
[83a071f9]162 processor * this = runner.proc;
[094476d]163 verify(this);
[c81ebf9]164
[dddb3dd0]165 __cfa_io_start( this );
166
[4069faad]167 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
[28d73c1]168 #if !defined(__CFA_NO_STATISTICS__)
169 if( this->print_halts ) {
[c993b15]170 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
[28d73c1]171 }
172 #endif
[b798713]173
[75f3522]174 {
[c81ebf9]175 // Setup preemption data
176 preemption_scope scope = { this };
177
[a5e7233]178 // if we need to run some special setup, now is the time to do it.
179 if(this->init.thrd) {
180 this->init.thrd->curr_cluster = this->cltr;
181 __run_thread(this, this->init.thrd);
182 }
[325e6ea]183
[4069faad]184 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
[8118303]185
[e84ab3d]186 thread$ * readyThread = 0p;
[1eb239e4]187 MAIN_LOOP:
188 for() {
[57f70ab]189 #define OLD_MAIN 1
190 #if OLD_MAIN
[dddb3dd0]191 // Check if there is pending io
192 __maybe_io_drain( this );
193
[92e7631]194 // Try to get the next thread
[8c50aed]195 readyThread = __next_thread( this->cltr );
[75f3522]196
[1eb239e4]197 if( !readyThread ) {
[c33c2af]198 ready_schedule_lock();
[dddb3dd0]199 __cfa_io_flush( this );
[c33c2af]200 ready_schedule_unlock();
201
[1eb239e4]202 readyThread = __next_thread_slow( this->cltr );
203 }
[4e6fb8e]204
[1eb239e4]205 HALT:
206 if( !readyThread ) {
207 // Don't block if we are done
208 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[c81ebf9]209
[1eb239e4]210 #if !defined(__CFA_NO_STATISTICS__)
211 __tls_stats()->ready.sleep.halts++;
212 #endif
[398e8e9]213
[1eb239e4]214 // Push self to idle stack
[6a9b12b]215 mark_idle(this->cltr->procs, * this);
[398e8e9]216
[1eb239e4]217 // Confirm the ready-queue is empty
218 readyThread = __next_thread_slow( this->cltr );
219 if( readyThread ) {
220 // A thread was found, cancel the halt
[6a9b12b]221 mark_awake(this->cltr->procs, * this);
[1eb239e4]222
223 #if !defined(__CFA_NO_STATISTICS__)
224 __tls_stats()->ready.sleep.cancels++;
225 #endif
226
227 // continue the mai loop
228 break HALT;
229 }
230
231 #if !defined(__CFA_NO_STATISTICS__)
232 if(this->print_halts) {
[c993b15]233 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
[1eb239e4]234 }
235 #endif
236
[dddb3dd0]237 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
238
[56e5b24]239 {
240 eventfd_t val;
241 ssize_t ret = read( this->idle, &val, sizeof(val) );
242 if(ret < 0) {
243 switch((int)errno) {
244 case EAGAIN:
245 #if EAGAIN != EWOULDBLOCK
246 case EWOULDBLOCK:
247 #endif
248 case EINTR:
249 // No need to do anything special here, just assume it's a legitimate wake-up
250 break;
251 default:
252 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
253 }
254 }
255 }
[1eb239e4]256
257 #if !defined(__CFA_NO_STATISTICS__)
258 if(this->print_halts) {
[c993b15]259 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
[1eb239e4]260 }
261 #endif
262
263 // We were woken up, remove self from idle
[6a9b12b]264 mark_awake(this->cltr->procs, * this);
[1eb239e4]265
266 // DON'T just proceed, start looking again
267 continue MAIN_LOOP;
[64a7146]268 }
[1eb239e4]269
270 /* paranoid */ verify( readyThread );
271
[dddb3dd0]272 // Reset io dirty bit
273 this->io.dirty = false;
274
[1eb239e4]275 // We found a thread run it
276 __run_thread(this, readyThread);
277
278 // Are we done?
279 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[325e6ea]280
[dddb3dd0]281 if(this->io.pending && !this->io.dirty) {
[c33c2af]282 ready_schedule_lock();
[dddb3dd0]283 __cfa_io_flush( this );
[c33c2af]284 ready_schedule_unlock();
[dddb3dd0]285 }
[89eff25]286
[34b2796]287 #else
[57f70ab]288 #warning new kernel loop
[34b2796]289 SEARCH: {
290 /* paranoid */ verify( ! __preemption_enabled() );
291
292 // First, lock the scheduler since we are searching for a thread
293 ready_schedule_lock();
294
295 // Try to get the next thread
296 readyThread = pop_fast( this->cltr );
297 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
298
299 // If we can't find a thread, might as well flush any outstanding I/O
300 if(this->io.pending) { __cfa_io_flush( this ); }
301
302 // Spin a little on I/O, just in case
[12daa43]303 for(5) {
[34b2796]304 __maybe_io_drain( this );
305 readyThread = pop_fast( this->cltr );
306 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
307 }
308
309 // no luck, try stealing a few times
[12daa43]310 for(5) {
[34b2796]311 if( __maybe_io_drain( this ) ) {
312 readyThread = pop_fast( this->cltr );
313 } else {
314 readyThread = pop_slow( this->cltr );
315 }
316 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
317 }
318
319 // still no luck, search for a thread
320 readyThread = pop_search( this->cltr );
321 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
322
323 // Don't block if we are done
[c33c2af]324 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {
325 ready_schedule_unlock();
326 break MAIN_LOOP;
327 }
[34b2796]328
329 __STATS( __tls_stats()->ready.sleep.halts++; )
330
331 // Push self to idle stack
332 ready_schedule_unlock();
333 mark_idle(this->cltr->procs, * this);
334 ready_schedule_lock();
335
336 // Confirm the ready-queue is empty
337 __maybe_io_drain( this );
338 readyThread = pop_search( this->cltr );
339 ready_schedule_unlock();
340
341 if( readyThread ) {
342 // A thread was found, cancel the halt
343 mark_awake(this->cltr->procs, * this);
344
345 __STATS( __tls_stats()->ready.sleep.cancels++; )
346
347 // continue the main loop
348 break SEARCH;
349 }
350
[abcae55]351 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
[34b2796]352 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
353
[ec421636]354 {
355 eventfd_t val;
356 ssize_t ret = read( this->idle, &val, sizeof(val) );
357 if(ret < 0) {
358 switch((int)errno) {
359 case EAGAIN:
360 #if EAGAIN != EWOULDBLOCK
361 case EWOULDBLOCK:
362 #endif
363 case EINTR:
364 // No need to do anything special here, just assume it's a legitimate wake-up
365 break;
366 default:
367 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
368 }
369 }
370 }
[34b2796]371
[fb4ccdf]372 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
[34b2796]373
374 // We were woken up, remove self from idle
375 mark_awake(this->cltr->procs, * this);
376
377 // DON'T just proceed, start looking again
378 continue MAIN_LOOP;
379 }
380
381 RUN_THREAD:
382 /* paranoid */ verify( ! __preemption_enabled() );
383 /* paranoid */ verify( readyThread );
384
385 // Reset io dirty bit
386 this->io.dirty = false;
387
388 // We found a thread run it
389 __run_thread(this, readyThread);
390
391 // Are we done?
392 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
393
394 if(this->io.pending && !this->io.dirty) {
395 __cfa_io_flush( this );
396 }
397
398 ready_schedule_lock();
399 __maybe_io_drain( this );
400 ready_schedule_unlock();
401 #endif
[c81ebf9]402 }
403
[4069faad]404 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
[c84e80a]405 }
[8118303]406
[dddb3dd0]407 __cfa_io_stop( this );
408
[454f478]409 post( this->terminated );
[bdeba0b]410
[28d73c1]411 if(this == mainProcessor) {
[6a490b2]412 // HACK : the coroutine context switch expects this_thread to be set
413 // and it make sense for it to be set in all other cases except here
414 // fake it
[8fc652e0]415 __cfaabi_tls.this_thread = mainThread;
[6a490b2]416 }
[7768b8d]417
[4069faad]418 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
[c84e80a]419}
420
[5c1a531]421static int * __volatile_errno() __attribute__((noinline));
422static int * __volatile_errno() { asm(""); return &errno; }
423
[14a61b5]424// KERNEL ONLY
[1c273d0]425// runThread runs a thread by context switching
426// from the processor coroutine to the target thread
[e84ab3d]427static void __run_thread(processor * this, thread$ * thrd_dst) {
[8fc652e0]428 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]429 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
430 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
431 __builtin_prefetch( thrd_dst->context.SP );
432
[dddb3dd0]433 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
434
[e84ab3d]435 coroutine$ * proc_cor = get_coroutine(this->runner);
[8fcbb4c]436
[9f575ea]437 // set state of processor coroutine to inactive
438 verify(proc_cor->state == Active);
[ae7be7a]439 proc_cor->state = Blocked;
[e8e457e]440
[9f575ea]441 // Actually run the thread
[3381ed7]442 RUNNING: while(true) {
[ff79d5e]443 thrd_dst->preempted = __NO_PREEMPTION;
444 thrd_dst->state = Active;
[e8e457e]445
[58d64a4]446 // Update global state
[8fc652e0]447 kernelTLS().this_thread = thrd_dst;
[75f3522]448
[8fc652e0]449 /* paranoid */ verify( ! __preemption_enabled() );
450 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
[9d6e1b8a]451 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[b4b63e8]452 /* paranoid */ verify( thrd_dst->context.SP );
[5afb49a]453 /* paranoid */ verify( thrd_dst->state != Halted );
[e84ab3d]454 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
455 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
[ac12f1f]456 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[b4b63e8]457
[3381ed7]458
[58d64a4]459
[9f575ea]460 // set context switch to the thread that the processor is executing
[c7a900a]461 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
462 // when __cfactx_switch returns we are back in the processor coroutine
[9f575ea]463
[50871b4]464
465
[ac12f1f]466 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[e84ab3d]467 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
468 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
[b4b63e8]469 /* paranoid */ verify( thrd_dst->context.SP );
[9d6e1b8a]470 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[8fc652e0]471 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
472 /* paranoid */ verify( ! __preemption_enabled() );
[75f3522]473
[58d64a4]474 // Reset global state
[8fc652e0]475 kernelTLS().this_thread = 0p;
[3381ed7]476
477 // We just finished running a thread, there are a few things that could have happened.
478 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
479 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
480 // 4 - Preempted
481 // In case 1, we may have won a race so we can't write to the state again.
482 // In case 2, we lost the race so we now own the thread.
483
484 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
485 // The thread was preempted, reschedule it and reset the flag
[24e321c]486 schedule_thread$( thrd_dst, UNPARK_LOCAL );
[3381ed7]487 break RUNNING;
488 }
[75f3522]489
[3ea8ad1]490 if(unlikely(thrd_dst->state == Halting)) {
[ff79d5e]491 // The thread has halted, it should never be scheduled/run again
[5afb49a]492 // finish the thread
493 __thread_finish( thrd_dst );
[ff79d5e]494 break RUNNING;
495 }
496
497 /* paranoid */ verify( thrd_dst->state == Active );
498 thrd_dst->state = Blocked;
499
[3381ed7]500 // set state of processor coroutine to active and the thread to inactive
[ff79d5e]501 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
502 switch(old_ticket) {
[6a77224]503 case TICKET_RUNNING:
[3381ed7]504 // This is case 1, the regular case, nothing more is needed
505 break RUNNING;
[6a77224]506 case TICKET_UNBLOCK:
[ec43cf9]507 #if !defined(__CFA_NO_STATISTICS__)
508 __tls_stats()->ready.threads.threads++;
509 #endif
[3381ed7]510 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
511 // In this case, just run it again.
512 continue RUNNING;
513 default:
514 // This makes no sense, something is wrong abort
[ff79d5e]515 abort();
[3381ed7]516 }
[9f575ea]517 }
[e8e457e]518
[9f575ea]519 // Just before returning to the processor, set the processor coroutine to active
[e8e457e]520 proc_cor->state = Active;
[1eb239e4]521
[dddb3dd0]522 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
523
[ec43cf9]524 #if !defined(__CFA_NO_STATISTICS__)
525 __tls_stats()->ready.threads.threads--;
526 #endif
527
[8fc652e0]528 /* paranoid */ verify( ! __preemption_enabled() );
[82c948c]529}
530
[14a61b5]531// KERNEL_ONLY
[b0c7419]532void returnToKernel() {
[8fc652e0]533 /* paranoid */ verify( ! __preemption_enabled() );
[e84ab3d]534 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
535 thread$ * thrd_src = kernelTLS().this_thread;
[e8e457e]536
[89eff25]537 __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
[29cb302]538
[9f575ea]539 // Run the thread on this processor
540 {
541 int local_errno = *__volatile_errno();
542 #if defined( __i386 ) || defined( __x86_64 )
543 __x87_store;
544 #endif
[b4b63e8]545 /* paranoid */ verify( proc_cor->context.SP );
[ac12f1f]546 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[c7a900a]547 __cfactx_switch( &thrd_src->context, &proc_cor->context );
[ac12f1f]548 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[9f575ea]549 #if defined( __i386 ) || defined( __x86_64 )
550 __x87_load;
551 #endif
552 *__volatile_errno() = local_errno;
[8fcbb4c]553 }
[deca0f5]554
[29cb302]555 #if !defined(__CFA_NO_STATISTICS__)
[89eff25]556 /* paranoid */ verify( thrd_src->last_proc != 0p );
557 if(thrd_src->last_proc != kernelTLS().this_processor) {
[29cb302]558 __tls_stats()->ready.threads.migration++;
559 }
560 #endif
561
[8fc652e0]562 /* paranoid */ verify( ! __preemption_enabled() );
[e84ab3d]563 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
564 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
[c84e80a]565}
566
[8def349]567//-----------------------------------------------------------------------------
568// Scheduler routines
[14a61b5]569// KERNEL ONLY
[24e321c]570static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
[8fc652e0]571 /* paranoid */ verify( ! __preemption_enabled() );
[254ad1b]572 /* paranoid */ verify( ready_schedule_islocked());
[6a490b2]573 /* paranoid */ verify( thrd );
574 /* paranoid */ verify( thrd->state != Halted );
[9d6e1b8a]575 /* paranoid */ verify( thrd->curr_cluster );
[3381ed7]576 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
[504a7dc]577 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
578 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
[ff79d5e]579 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
[504a7dc]580 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
[3381ed7]581 /* paranoid */ #endif
[6a490b2]582 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
[ac12f1f]583 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
[b4b63e8]584
[b808625]585 const bool local = thrd->state != Start;
[ae7be7a]586 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
[6b4cdd3]587
[ec43cf9]588 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
589 struct cluster * cl = thrd->curr_cluster;
[24e321c]590 __STATS(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
[32a8b61]591
[254ad1b]592 // push the thread to the cluster ready-queue
[24e321c]593 push( cl, thrd, hint );
[32a8b61]594
[254ad1b]595 // variable thrd is no longer safe to use
[734908c]596 thrd = 0xdeaddeaddeaddeadp;
[32a8b61]597
[254ad1b]598 // wake the cluster using the save variable.
599 __wake_one( cl );
[1c273d0]600
[ec43cf9]601 #if !defined(__CFA_NO_STATISTICS__)
602 if( kernelTLS().this_stats ) {
603 __tls_stats()->ready.threads.threads++;
[89eff25]604 if(outside) {
605 __tls_stats()->ready.threads.extunpark++;
606 }
[ec43cf9]607 }
608 else {
609 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
[89eff25]610 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
[ec43cf9]611 }
612 #endif
613
[254ad1b]614 /* paranoid */ verify( ready_schedule_islocked());
[8fc652e0]615 /* paranoid */ verify( ! __preemption_enabled() );
[db6f06a]616}
617
[24e321c]618void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
[254ad1b]619 ready_schedule_lock();
[24e321c]620 __schedule_thread( thrd, hint );
[254ad1b]621 ready_schedule_unlock();
622}
623
[14a61b5]624// KERNEL ONLY
[e84ab3d]625static inline thread$ * __next_thread(cluster * this) with( *this ) {
[8fc652e0]626 /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]627
[e873838]628 ready_schedule_lock();
[e84ab3d]629 thread$ * thrd = pop_fast( this );
[e873838]630 ready_schedule_unlock();
[7768b8d]631
[8fc652e0]632 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]633 return thrd;
[eb2e723]634}
635
[64a7146]636// KERNEL ONLY
[e84ab3d]637static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
[8fc652e0]638 /* paranoid */ verify( ! __preemption_enabled() );
[64a7146]639
[e873838]640 ready_schedule_lock();
[e84ab3d]641 thread$ * thrd;
[fc59df78]642 for(25) {
643 thrd = pop_slow( this );
644 if(thrd) goto RET;
645 }
646 thrd = pop_search( this );
647
648 RET:
[e873838]649 ready_schedule_unlock();
[64a7146]650
[8fc652e0]651 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]652 return thrd;
[64a7146]653}
654
[e84ab3d]655static inline bool __must_unpark( thread$ * thrd ) {
[ff79d5e]656 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
657 switch(old_ticket) {
[6a77224]658 case TICKET_RUNNING:
[3381ed7]659 // Wake won the race, the thread will reschedule/rerun itself
[c6c7e6c]660 return false;
[6a77224]661 case TICKET_BLOCKED:
[3381ed7]662 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
[ff79d5e]663 /* paranoid */ verify( thrd->state == Blocked );
[c6c7e6c]664 return true;
[3381ed7]665 default:
666 // This makes no sense, something is wrong abort
[7ee8153]667 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
[de6319f]668 }
[eb2e723]669}
670
[24e321c]671void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
[e9c0b4c]672 /* paranoid */ verify( ! __preemption_enabled() );
673 /* paranoid */ verify( ready_schedule_islocked());
674
675 if( !thrd ) return;
676
677 if(__must_unpark(thrd)) {
678 // Wake lost the race,
[24e321c]679 __schedule_thread( thrd, hint );
[e9c0b4c]680 }
681
682 /* paranoid */ verify( ready_schedule_islocked());
683 /* paranoid */ verify( ! __preemption_enabled() );
684}
685
[24e321c]686void unpark( thread$ * thrd, unpark_hint hint ) {
[c6c7e6c]687 if( !thrd ) return;
[0b33412]688
[c6c7e6c]689 if(__must_unpark(thrd)) {
690 disable_interrupts();
[a3821fa]691 // Wake lost the race,
[24e321c]692 schedule_thread$( thrd, hint );
[a3821fa]693 enable_interrupts(false);
[de6319f]694 }
[eb2e723]695}
[0b33412]696
[e235429]697void park( void ) {
[a3821fa]698 __disable_interrupts_checked();
699 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
700 returnToKernel();
701 __enable_interrupts_checked();
[0c78741]702
703}
[09800e9]704
[5afb49a]705extern "C" {
706 // Leave the thread monitor
707 // last routine called by a thread.
708 // Should never return
709 void __cfactx_thrd_leave() {
[e84ab3d]710 thread$ * thrd = active_thread();
711 monitor$ * this = &thrd->self_mon;
[5afb49a]712
713 // Lock the monitor now
714 lock( this->lock __cfaabi_dbg_ctx2 );
715
716 disable_interrupts();
717
[9d6e1b8a]718 /* paranoid */ verify( ! __preemption_enabled() );
719 /* paranoid */ verify( thrd->state == Active );
720 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
721 /* paranoid */ verify( kernelTLS().this_thread == thrd );
722 /* paranoid */ verify( thrd->context.SP );
[e84ab3d]723 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
724 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
[9d6e1b8a]725
[3ea8ad1]726 thrd->state = Halting;
[58688bf]727 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
[9d6e1b8a]728 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
729 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
[5afb49a]730
731 // Leave the thread
732 returnToKernel();
733
734 // Control flow should never reach here!
[9d6e1b8a]735 abort();
[5afb49a]736 }
[09800e9]737}
738
[14a61b5]739// KERNEL ONLY
[3381ed7]740bool force_yield( __Preemption_Reason reason ) {
[a3821fa]741 __disable_interrupts_checked();
[e84ab3d]742 thread$ * thrd = kernelTLS().this_thread;
[a3821fa]743 /* paranoid */ verify(thrd->state == Active);
744
745 // SKULLDUGGERY: It is possible that we are preempting this thread just before
746 // it was going to park itself. If that is the case and it is already using the
747 // intrusive fields then we can't use them to preempt the thread
748 // If that is the case, abandon the preemption.
749 bool preempted = false;
750 if(thrd->link.next == 0p) {
751 preempted = true;
752 thrd->preempted = reason;
753 returnToKernel();
754 }
755 __enable_interrupts_checked( false );
[3381ed7]756 return preempted;
[f2b12406]757}
758
[14a61b5]759//=============================================================================================
[92e7631]760// Kernel Idle Sleep
[14a61b5]761//=============================================================================================
[64a7146]762// Wake a thread from the front if there are any
[e873838]763static void __wake_one(cluster * this) {
[8fc652e0]764 /* paranoid */ verify( ! __preemption_enabled() );
[e873838]765 /* paranoid */ verify( ready_schedule_islocked() );
[14a61b5]766
[64a7146]767 // Check if there is a sleeping processor
[1eb239e4]768 processor * p;
769 unsigned idle;
770 unsigned total;
[6a9b12b]771 [idle, total, p] = query_idles(this->procs);
[14a61b5]772
[64a7146]773 // If no one is sleeping, we are done
[1eb239e4]774 if( idle == 0 ) return;
[14a61b5]775
[64a7146]776 // We found a processor, wake it up
[dddb3dd0]777 eventfd_t val;
778 val = 1;
779 eventfd_write( p->idle, val );
[14a61b5]780
[1eb239e4]781 #if !defined(__CFA_NO_STATISTICS__)
[5cb51502]782 if( kernelTLS().this_stats ) {
783 __tls_stats()->ready.sleep.wakes++;
784 }
785 else {
786 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
787 }
[1eb239e4]788 #endif
789
[e873838]790 /* paranoid */ verify( ready_schedule_islocked() );
[8fc652e0]791 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]792
793 return;
[64a7146]794}
[14a61b5]795
[64a7146]796// Unconditionnaly wake a thread
[1eb239e4]797void __wake_proc(processor * this) {
[64a7146]798 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
[14a61b5]799
[a3821fa]800 __disable_interrupts_checked();
[8fc652e0]801 /* paranoid */ verify( ! __preemption_enabled() );
[dddb3dd0]802 eventfd_t val;
803 val = 1;
[55d6affb]804 eventfd_write( this->idle, val );
[a3821fa]805 __enable_interrupts_checked();
[92e7631]806}
807
[6a9b12b]808static void mark_idle(__cluster_proc_list & this, processor & proc) {
[8fc652e0]809 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]810 lock( this );
811 this.idle++;
812 /* paranoid */ verify( this.idle <= this.total );
[fc59b580]813 remove(proc);
[6a9b12b]814 insert_first(this.idles, proc);
[1eb239e4]815 unlock( this );
[8fc652e0]816 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]817}
[8e16177]818
[6a9b12b]819static void mark_awake(__cluster_proc_list & this, processor & proc) {
[8fc652e0]820 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]821 lock( this );
822 this.idle--;
823 /* paranoid */ verify( this.idle >= 0 );
824 remove(proc);
[fc59b580]825 insert_last(this.actives, proc);
[1eb239e4]826 unlock( this );
[8fc652e0]827 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]828}
[c34ebf2]829
[6a9b12b]830static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {
831 /* paranoid */ verify( ! __preemption_enabled() );
832 /* paranoid */ verify( ready_schedule_islocked() );
833
[1eb239e4]834 for() {
835 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);
836 if( 1 == (l % 2) ) { Pause(); continue; }
837 unsigned idle = this.idle;
838 unsigned total = this.total;
[6a9b12b]839 processor * proc = &this.idles`first;
[7fdae38]840 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it
841 asm volatile("": : :"memory");
[1eb239e4]842 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }
843 return [idle, total, proc];
844 }
[6a9b12b]845
846 /* paranoid */ verify( ready_schedule_islocked() );
847 /* paranoid */ verify( ! __preemption_enabled() );
[6b4cdd3]848}
849
[dbe9b08]850//=============================================================================================
851// Unexpected Terminating logic
852//=============================================================================================
[92bfda0]853void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
[e84ab3d]854 thread$ * thrd = __cfaabi_tls.this_thread;
[9d944b2]855
[de94a60]856 if(thrd) {
857 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
[1c40091]858 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]859
[212c2187]860 if ( &thrd->self_cor != thrd->curr_cor ) {
861 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
[1c40091]862 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]863 }
864 else {
[1c40091]865 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
[de94a60]866 }
[1c273d0]867 }
[9d944b2]868 else {
[de94a60]869 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
[1c40091]870 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[9d944b2]871 }
872}
873
[92bfda0]874int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
875 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
[2b8bc41]876}
877
[de94a60]878static __spinlock_t kernel_debug_lock;
879
[9d944b2]880extern "C" {
[1c40091]881 void __cfaabi_bits_acquire() {
[36982fc]882 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
[9d944b2]883 }
884
[1c40091]885 void __cfaabi_bits_release() {
[ea7d2b0]886 unlock( kernel_debug_lock );
[9d944b2]887 }
[8118303]888}
889
[fa21ac9]890//=============================================================================================
891// Kernel Utilities
892//=============================================================================================
[dddb3dd0]893#if defined(CFA_HAVE_LINUX_IO_URING_H)
894#include "io/types.hfa"
895#endif
896
[c1c95b1]897static inline bool __maybe_io_drain( processor * proc ) {
[e9c0b4c]898 bool ret = false;
[dddb3dd0]899 #if defined(CFA_HAVE_LINUX_IO_URING_H)
900 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
901
902 // Check if we should drain the queue
903 $io_context * ctx = proc->io.ctx;
904 unsigned head = *ctx->cq.head;
905 unsigned tail = *ctx->cq.tail;
[c1c95b1]906 if(head == tail) return false;
[57f70ab]907 #if OLD_MAIN
[b7fd2db6]908 ready_schedule_lock();
[e9c0b4c]909 ret = __cfa_io_drain( proc );
[b7fd2db6]910 ready_schedule_unlock();
[57f70ab]911 #else
912 ret = __cfa_io_drain( proc );
913 #endif
[dddb3dd0]914 #endif
[e9c0b4c]915 return ret;
[dddb3dd0]916}
917
[de94a60]918//-----------------------------------------------------------------------------
919// Debug
920__cfaabi_dbg_debug_do(
[1997b4e]921 extern "C" {
[ae66348]922 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
[1997b4e]923 this.prev_name = prev_name;
[8fc652e0]924 this.prev_thrd = kernelTLS().this_thread;
[1997b4e]925 }
[9181f1d]926 }
[f7d6bb0]927)
[2026bb6]928
929//-----------------------------------------------------------------------------
930// Debug
[8c50aed]931bool threading_enabled(void) __attribute__((const)) {
[2026bb6]932 return true;
933}
[c34ebf2]934
935//-----------------------------------------------------------------------------
936// Statistics
937#if !defined(__CFA_NO_STATISTICS__)
938 void print_halts( processor & this ) {
939 this.print_halts = true;
940 }
[58688bf]941
[69914cbc]942 static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
[8464edf]943 /* paranoid */ verify( cltr->stats );
944
945 processor * it = &list`first;
946 for(unsigned i = 0; i < count; i++) {
947 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
948 /* paranoid */ verify( it->local_data->this_stats );
[c33c2af]949 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
[8464edf]950 __tally_stats( cltr->stats, it->local_data->this_stats );
951 it = &(*it)`next;
952 }
953 }
954
955 void crawl_cluster_stats( cluster & this ) {
956 // Stop the world, otherwise stats could get really messed-up
957 // this doesn't solve all problems but does solve many
958 // so it's probably good enough
[c33c2af]959 disable_interrupts();
[8464edf]960 uint_fast32_t last_size = ready_mutate_lock();
961
962 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
963 crawl_list(&this, this.procs.idles , this.procs.idle );
964
965 // Unlock the RWlock
966 ready_mutate_unlock( last_size );
[c33c2af]967 enable_interrupts();
[8464edf]968 }
969
970
[58688bf]971 void print_stats_now( cluster & this, int flags ) {
[8464edf]972 crawl_cluster_stats( this );
[1b033b8]973 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
974 }
[c34ebf2]975#endif
[8118303]976// Local Variables: //
977// mode: c //
978// tab-width: 4 //
979// End: //
Note: See TracBrowser for help on using the repository browser.