source: libcfa/src/concurrency/kernel.cfa@ 4233338b

ADT ast-experimental enum forall-pointer-decay pthread-emulation qualifiedEnum
Last change on this file since 4233338b was 7d0ebd0, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Processors should now correctly be unconditionnaly woken-up on termination

  • Property mode set to 100644
File size: 30.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
20
21//C Includes
22#include <errno.h>
23#include <stdio.h>
24#include <string.h>
25#include <signal.h>
26#include <unistd.h>
27extern "C" {
28 #include <sys/eventfd.h>
29 #include <sys/uio.h>
30}
31
32//CFA Includes
33#include "kernel_private.hfa"
34#include "preemption.hfa"
35#include "strstream.hfa"
36#include "device/cpu.hfa"
37#include "io/types.hfa"
38
39//Private includes
40#define __CFA_INVOKE_PRIVATE__
41#include "invoke.h"
42
43#if !defined(__CFA_NO_STATISTICS__)
44 #define __STATS( ...) __VA_ARGS__
45#else
46 #define __STATS( ...)
47#endif
48
49//-----------------------------------------------------------------------------
50// Some assembly required
51#if defined( __i386 )
52 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
53 // fcw : X87 FPU control word (preserved across function calls)
54 #define __x87_store \
55 uint32_t __mxcr; \
56 uint16_t __fcw; \
57 __asm__ volatile ( \
58 "stmxcsr %0\n" \
59 "fnstcw %1\n" \
60 : "=m" (__mxcr),\
61 "=m" (__fcw) \
62 )
63
64 #define __x87_load \
65 __asm__ volatile ( \
66 "fldcw %1\n" \
67 "ldmxcsr %0\n" \
68 ::"m" (__mxcr),\
69 "m" (__fcw) \
70 )
71
72#elif defined( __x86_64 )
73 #define __x87_store \
74 uint32_t __mxcr; \
75 uint16_t __fcw; \
76 __asm__ volatile ( \
77 "stmxcsr %0\n" \
78 "fnstcw %1\n" \
79 : "=m" (__mxcr),\
80 "=m" (__fcw) \
81 )
82
83 #define __x87_load \
84 __asm__ volatile ( \
85 "fldcw %1\n" \
86 "ldmxcsr %0\n" \
87 :: "m" (__mxcr),\
88 "m" (__fcw) \
89 )
90
91#elif defined( __arm__ )
92 #define __x87_store
93 #define __x87_load
94
95#elif defined( __aarch64__ )
96 #define __x87_store \
97 uint32_t __fpcntl[2]; \
98 __asm__ volatile ( \
99 "mrs x9, FPCR\n" \
100 "mrs x10, FPSR\n" \
101 "stp x9, x10, %0\n" \
102 : "=m" (__fpcntl) : : "x9", "x10" \
103 )
104
105 #define __x87_load \
106 __asm__ volatile ( \
107 "ldp x9, x10, %0\n" \
108 "msr FPSR, x10\n" \
109 "msr FPCR, x9\n" \
110 : "=m" (__fpcntl) : : "x9", "x10" \
111 )
112
113#else
114 #error unsupported hardware architecture
115#endif
116
117extern thread$ * mainThread;
118extern processor * mainProcessor;
119
120//-----------------------------------------------------------------------------
121// Kernel Scheduling logic
122static thread$ * __next_thread(cluster * this);
123static thread$ * __next_thread_slow(cluster * this);
124static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
125static void __run_thread(processor * this, thread$ * dst);
126static void __wake_one(cluster * cltr);
127
128static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);
129static bool mark_idle (__cluster_proc_list & idles, processor & proc);
130static void mark_awake(__cluster_proc_list & idles, processor & proc);
131
132extern void __cfa_io_start( processor * );
133extern bool __cfa_io_drain( processor * );
134extern bool __cfa_io_flush( processor *, int min_comp );
135extern void __cfa_io_stop ( processor * );
136static inline bool __maybe_io_drain( processor * );
137
138#if defined(CFA_WITH_IO_URING_IDLE)
139 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
140#endif
141
142extern void __disable_interrupts_hard();
143extern void __enable_interrupts_hard();
144
145
146//=============================================================================================
147// Kernel Scheduling logic
148//=============================================================================================
149//Main of the processor contexts
150void main(processorCtx_t & runner) {
151 // Because of a bug, we couldn't initialized the seed on construction
152 // Do it here
153 __cfaabi_tls.rand_seed ^= rdtscl();
154 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
155 __tls_rand_advance_bck();
156
157 processor * this = runner.proc;
158 verify(this);
159
160 io_future_t future; // used for idle sleep when io_uring is present
161 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not
162 eventfd_t idle_val;
163 iovec idle_iovec = { &idle_val, sizeof(idle_val) };
164
165 __cfa_io_start( this );
166
167 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
168 #if !defined(__CFA_NO_STATISTICS__)
169 if( this->print_halts ) {
170 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
171 }
172 #endif
173
174 {
175 // Setup preemption data
176 preemption_scope scope = { this };
177
178 // if we need to run some special setup, now is the time to do it.
179 if(this->init.thrd) {
180 this->init.thrd->curr_cluster = this->cltr;
181 __run_thread(this, this->init.thrd);
182 }
183
184 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
185
186 thread$ * readyThread = 0p;
187 MAIN_LOOP:
188 for() {
189 #define OLD_MAIN 1
190 #if OLD_MAIN
191 // Check if there is pending io
192 __maybe_io_drain( this );
193
194 // Try to get the next thread
195 readyThread = __next_thread( this->cltr );
196
197 if( !readyThread ) {
198 __cfa_io_flush( this, 0 );
199
200 readyThread = __next_thread_slow( this->cltr );
201 }
202
203 HALT:
204 if( !readyThread ) {
205 // Don't block if we are done
206 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
207
208 #if !defined(__CFA_NO_STATISTICS__)
209 __tls_stats()->ready.sleep.halts++;
210 #endif
211
212 // Push self to idle stack
213 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
214
215 // Confirm the ready-queue is empty
216 readyThread = __next_thread_slow( this->cltr );
217 if( readyThread ) {
218 // A thread was found, cancel the halt
219 mark_awake(this->cltr->procs, * this);
220
221 #if !defined(__CFA_NO_STATISTICS__)
222 __tls_stats()->ready.sleep.cancels++;
223 #endif
224
225 // continue the mai loop
226 break HALT;
227 }
228
229 idle_sleep( this, future, idle_iovec );
230
231 // We were woken up, remove self from idle
232 mark_awake(this->cltr->procs, * this);
233
234 // DON'T just proceed, start looking again
235 continue MAIN_LOOP;
236 }
237
238 /* paranoid */ verify( readyThread );
239
240 // Reset io dirty bit
241 this->io.dirty = false;
242
243 // We found a thread run it
244 __run_thread(this, readyThread);
245
246 // Are we done?
247 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
248
249 if(this->io.pending && !this->io.dirty) {
250 __cfa_io_flush( this, 0 );
251 }
252
253 #else
254 #warning new kernel loop
255 SEARCH: {
256 /* paranoid */ verify( ! __preemption_enabled() );
257
258 // First, lock the scheduler since we are searching for a thread
259 ready_schedule_lock();
260
261 // Try to get the next thread
262 readyThread = pop_fast( this->cltr );
263 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
264
265 // If we can't find a thread, might as well flush any outstanding I/O
266 if(this->io.pending) { __cfa_io_flush( this, 0 ); }
267
268 // Spin a little on I/O, just in case
269 for(5) {
270 __maybe_io_drain( this );
271 readyThread = pop_fast( this->cltr );
272 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
273 }
274
275 // no luck, try stealing a few times
276 for(5) {
277 if( __maybe_io_drain( this ) ) {
278 readyThread = pop_fast( this->cltr );
279 } else {
280 readyThread = pop_slow( this->cltr );
281 }
282 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
283 }
284
285 // still no luck, search for a thread
286 readyThread = pop_search( this->cltr );
287 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
288
289 // Don't block if we are done
290 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) {
291 ready_schedule_unlock();
292 break MAIN_LOOP;
293 }
294
295 __STATS( __tls_stats()->ready.sleep.halts++; )
296
297 // Push self to idle stack
298 ready_schedule_unlock();
299 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH;
300 ready_schedule_lock();
301
302 // Confirm the ready-queue is empty
303 __maybe_io_drain( this );
304 readyThread = pop_search( this->cltr );
305 ready_schedule_unlock();
306
307 if( readyThread ) {
308 // A thread was found, cancel the halt
309 mark_awake(this->cltr->procs, * this);
310
311 __STATS( __tls_stats()->ready.sleep.cancels++; )
312
313 // continue the main loop
314 break SEARCH;
315 }
316
317 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
318 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
319
320 {
321 eventfd_t val;
322 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
323 if(ret < 0) {
324 switch((int)errno) {
325 case EAGAIN:
326 #if EAGAIN != EWOULDBLOCK
327 case EWOULDBLOCK:
328 #endif
329 case EINTR:
330 // No need to do anything special here, just assume it's a legitimate wake-up
331 break;
332 default:
333 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
334 }
335 }
336 }
337
338 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
339
340 // We were woken up, remove self from idle
341 mark_awake(this->cltr->procs, * this);
342
343 // DON'T just proceed, start looking again
344 continue MAIN_LOOP;
345 }
346
347 RUN_THREAD:
348 /* paranoid */ verify( ! __preemption_enabled() );
349 /* paranoid */ verify( readyThread );
350
351 // Reset io dirty bit
352 this->io.dirty = false;
353
354 // We found a thread run it
355 __run_thread(this, readyThread);
356
357 // Are we done?
358 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
359
360 if(this->io.pending && !this->io.dirty) {
361 __cfa_io_flush( this, 0 );
362 }
363
364 ready_schedule_lock();
365 __maybe_io_drain( this );
366 ready_schedule_unlock();
367 #endif
368 }
369
370 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
371 }
372
373 for(int i = 0; !available(future); i++) {
374 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
375 __cfa_io_flush( this, 1 );
376 }
377
378 __cfa_io_stop( this );
379
380 post( this->terminated );
381
382 if(this == mainProcessor) {
383 // HACK : the coroutine context switch expects this_thread to be set
384 // and it make sense for it to be set in all other cases except here
385 // fake it
386 __cfaabi_tls.this_thread = mainThread;
387 }
388
389 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
390}
391
392static int * __volatile_errno() __attribute__((noinline));
393static int * __volatile_errno() { asm(""); return &errno; }
394
395// KERNEL ONLY
396// runThread runs a thread by context switching
397// from the processor coroutine to the target thread
398static void __run_thread(processor * this, thread$ * thrd_dst) {
399 /* paranoid */ verify( ! __preemption_enabled() );
400 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
401 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
402 __builtin_prefetch( thrd_dst->context.SP );
403
404 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
405
406 coroutine$ * proc_cor = get_coroutine(this->runner);
407
408 // set state of processor coroutine to inactive
409 verify(proc_cor->state == Active);
410 proc_cor->state = Blocked;
411
412 // Actually run the thread
413 RUNNING: while(true) {
414 thrd_dst->preempted = __NO_PREEMPTION;
415 thrd_dst->state = Active;
416
417 // Update global state
418 kernelTLS().this_thread = thrd_dst;
419
420 /* paranoid */ verify( ! __preemption_enabled() );
421 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
422 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
423 /* paranoid */ verify( thrd_dst->context.SP );
424 /* paranoid */ verify( thrd_dst->state != Halted );
425 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
426 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
427 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
428
429
430
431 // set context switch to the thread that the processor is executing
432 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
433 // when __cfactx_switch returns we are back in the processor coroutine
434
435
436
437 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
438 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
439 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
440 /* paranoid */ verify( thrd_dst->context.SP );
441 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
442 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
443 /* paranoid */ verify( ! __preemption_enabled() );
444
445 // Reset global state
446 kernelTLS().this_thread = 0p;
447
448 // We just finished running a thread, there are a few things that could have happened.
449 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
450 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
451 // 4 - Preempted
452 // In case 1, we may have won a race so we can't write to the state again.
453 // In case 2, we lost the race so we now own the thread.
454
455 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
456 // The thread was preempted, reschedule it and reset the flag
457 schedule_thread$( thrd_dst, UNPARK_LOCAL );
458 break RUNNING;
459 }
460
461 if(unlikely(thrd_dst->state == Halting)) {
462 // The thread has halted, it should never be scheduled/run again
463 // finish the thread
464 __thread_finish( thrd_dst );
465 break RUNNING;
466 }
467
468 /* paranoid */ verify( thrd_dst->state == Active );
469 thrd_dst->state = Blocked;
470
471 // set state of processor coroutine to active and the thread to inactive
472 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
473 switch(old_ticket) {
474 case TICKET_RUNNING:
475 // This is case 1, the regular case, nothing more is needed
476 break RUNNING;
477 case TICKET_UNBLOCK:
478 #if !defined(__CFA_NO_STATISTICS__)
479 __tls_stats()->ready.threads.threads++;
480 #endif
481 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
482 // In this case, just run it again.
483 continue RUNNING;
484 default:
485 // This makes no sense, something is wrong abort
486 abort();
487 }
488 }
489
490 // Just before returning to the processor, set the processor coroutine to active
491 proc_cor->state = Active;
492
493 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
494
495 #if !defined(__CFA_NO_STATISTICS__)
496 __tls_stats()->ready.threads.threads--;
497 #endif
498
499 /* paranoid */ verify( ! __preemption_enabled() );
500}
501
502// KERNEL_ONLY
503void returnToKernel() {
504 /* paranoid */ verify( ! __preemption_enabled() );
505 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
506 thread$ * thrd_src = kernelTLS().this_thread;
507
508 __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
509
510 // Run the thread on this processor
511 {
512 int local_errno = *__volatile_errno();
513 #if defined( __i386 ) || defined( __x86_64 )
514 __x87_store;
515 #endif
516 /* paranoid */ verify( proc_cor->context.SP );
517 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
518 __cfactx_switch( &thrd_src->context, &proc_cor->context );
519 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
520 #if defined( __i386 ) || defined( __x86_64 )
521 __x87_load;
522 #endif
523 *__volatile_errno() = local_errno;
524 }
525
526 #if !defined(__CFA_NO_STATISTICS__)
527 /* paranoid */ verify( thrd_src->last_proc != 0p );
528 if(thrd_src->last_proc != kernelTLS().this_processor) {
529 __tls_stats()->ready.threads.migration++;
530 }
531 #endif
532
533 /* paranoid */ verify( ! __preemption_enabled() );
534 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
535 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
536}
537
538//-----------------------------------------------------------------------------
539// Scheduler routines
540// KERNEL ONLY
541static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
542 /* paranoid */ verify( ! __preemption_enabled() );
543 /* paranoid */ verify( ready_schedule_islocked());
544 /* paranoid */ verify( thrd );
545 /* paranoid */ verify( thrd->state != Halted );
546 /* paranoid */ verify( thrd->curr_cluster );
547 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
548 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
549 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
550 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
551 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
552 /* paranoid */ #endif
553 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
554 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
555
556 const bool local = thrd->state != Start;
557 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
558
559 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
560 struct cluster * cl = thrd->curr_cluster;
561 __STATS(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
562
563 // push the thread to the cluster ready-queue
564 push( cl, thrd, hint );
565
566 // variable thrd is no longer safe to use
567 thrd = 0xdeaddeaddeaddeadp;
568
569 // wake the cluster using the save variable.
570 __wake_one( cl );
571
572 #if !defined(__CFA_NO_STATISTICS__)
573 if( kernelTLS().this_stats ) {
574 __tls_stats()->ready.threads.threads++;
575 if(outside) {
576 __tls_stats()->ready.threads.extunpark++;
577 }
578 }
579 else {
580 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
581 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
582 }
583 #endif
584
585 /* paranoid */ verify( ready_schedule_islocked());
586 /* paranoid */ verify( ! __preemption_enabled() );
587}
588
589void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
590 ready_schedule_lock();
591 __schedule_thread( thrd, hint );
592 ready_schedule_unlock();
593}
594
595// KERNEL ONLY
596static inline thread$ * __next_thread(cluster * this) with( *this ) {
597 /* paranoid */ verify( ! __preemption_enabled() );
598
599 ready_schedule_lock();
600 thread$ * thrd = pop_fast( this );
601 ready_schedule_unlock();
602
603 /* paranoid */ verify( ! __preemption_enabled() );
604 return thrd;
605}
606
607// KERNEL ONLY
608static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
609 /* paranoid */ verify( ! __preemption_enabled() );
610
611 ready_schedule_lock();
612 thread$ * thrd;
613 for(25) {
614 thrd = pop_slow( this );
615 if(thrd) goto RET;
616 }
617 thrd = pop_search( this );
618
619 RET:
620 ready_schedule_unlock();
621
622 /* paranoid */ verify( ! __preemption_enabled() );
623 return thrd;
624}
625
626static inline bool __must_unpark( thread$ * thrd ) {
627 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
628 switch(old_ticket) {
629 case TICKET_RUNNING:
630 // Wake won the race, the thread will reschedule/rerun itself
631 return false;
632 case TICKET_BLOCKED:
633 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
634 /* paranoid */ verify( thrd->state == Blocked );
635 return true;
636 default:
637 // This makes no sense, something is wrong abort
638 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
639 }
640}
641
642void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
643 /* paranoid */ verify( ! __preemption_enabled() );
644 /* paranoid */ verify( ready_schedule_islocked());
645
646 if( !thrd ) return;
647
648 if(__must_unpark(thrd)) {
649 // Wake lost the race,
650 __schedule_thread( thrd, hint );
651 }
652
653 /* paranoid */ verify( ready_schedule_islocked());
654 /* paranoid */ verify( ! __preemption_enabled() );
655}
656
657void unpark( thread$ * thrd, unpark_hint hint ) {
658 if( !thrd ) return;
659
660 if(__must_unpark(thrd)) {
661 disable_interrupts();
662 // Wake lost the race,
663 schedule_thread$( thrd, hint );
664 enable_interrupts(false);
665 }
666}
667
668void park( void ) {
669 __disable_interrupts_checked();
670 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
671 returnToKernel();
672 __enable_interrupts_checked();
673
674}
675
676extern "C" {
677 // Leave the thread monitor
678 // last routine called by a thread.
679 // Should never return
680 void __cfactx_thrd_leave() {
681 thread$ * thrd = active_thread();
682 monitor$ * this = &thrd->self_mon;
683
684 // Lock the monitor now
685 lock( this->lock __cfaabi_dbg_ctx2 );
686
687 disable_interrupts();
688
689 /* paranoid */ verify( ! __preemption_enabled() );
690 /* paranoid */ verify( thrd->state == Active );
691 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
692 /* paranoid */ verify( kernelTLS().this_thread == thrd );
693 /* paranoid */ verify( thrd->context.SP );
694 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
695 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
696
697 thrd->state = Halting;
698 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
699 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
700 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
701
702 // Leave the thread
703 returnToKernel();
704
705 // Control flow should never reach here!
706 abort();
707 }
708}
709
710// KERNEL ONLY
711bool force_yield( __Preemption_Reason reason ) {
712 __disable_interrupts_checked();
713 thread$ * thrd = kernelTLS().this_thread;
714 /* paranoid */ verify(thrd->state == Active);
715
716 // SKULLDUGGERY: It is possible that we are preempting this thread just before
717 // it was going to park itself. If that is the case and it is already using the
718 // intrusive fields then we can't use them to preempt the thread
719 // If that is the case, abandon the preemption.
720 bool preempted = false;
721 if(thrd->link.next == 0p) {
722 preempted = true;
723 thrd->preempted = reason;
724 returnToKernel();
725 }
726 __enable_interrupts_checked( false );
727 return preempted;
728}
729
730//=============================================================================================
731// Kernel Idle Sleep
732//=============================================================================================
733// Wake a thread from the front if there are any
734static void __wake_one(cluster * this) {
735 /* paranoid */ verify( ! __preemption_enabled() );
736 /* paranoid */ verify( ready_schedule_islocked() );
737
738 // Check if there is a sleeping processor
739 int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST);
740
741 // If no one is sleeping, we are done
742 if( fd == 0 ) return;
743
744 // We found a processor, wake it up
745 eventfd_t val;
746 val = 1;
747 eventfd_write( fd, val );
748
749 #if !defined(__CFA_NO_STATISTICS__)
750 if( kernelTLS().this_stats ) {
751 __tls_stats()->ready.sleep.wakes++;
752 }
753 else {
754 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
755 }
756 #endif
757
758 /* paranoid */ verify( ready_schedule_islocked() );
759 /* paranoid */ verify( ! __preemption_enabled() );
760
761 return;
762}
763
764// Unconditionnaly wake a thread
765void __wake_proc(processor * this) {
766 /* paranoid */ verify( ! __preemption_enabled() );
767
768 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
769
770 eventfd_t val;
771 val = 1;
772 eventfd_write( this->idle_fd, val );
773
774 /* paranoid */ verify( ! __preemption_enabled() );
775}
776
777static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
778 #if !defined(CFA_WITH_IO_URING_IDLE)
779 #if !defined(__CFA_NO_STATISTICS__)
780 if(this->print_halts) {
781 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
782 }
783 #endif
784
785 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
786
787 {
788 eventfd_t val;
789 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
790 if(ret < 0) {
791 switch((int)errno) {
792 case EAGAIN:
793 #if EAGAIN != EWOULDBLOCK
794 case EWOULDBLOCK:
795 #endif
796 case EINTR:
797 // No need to do anything special here, just assume it's a legitimate wake-up
798 break;
799 default:
800 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
801 }
802 }
803 }
804
805 #if !defined(__CFA_NO_STATISTICS__)
806 if(this->print_halts) {
807 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
808 }
809 #endif
810 #else
811 // Do we already have a pending read
812 if(available(future)) {
813 // There is no pending read, we need to add one
814 reset(future);
815
816 __kernel_read(this, future, iov, this->idle_fd );
817 }
818
819 __cfa_io_flush( this, 1 );
820 #endif
821}
822
823static bool mark_idle(__cluster_proc_list & this, processor & proc) {
824 /* paranoid */ verify( ! __preemption_enabled() );
825 if(!try_lock( this )) return false;
826 this.idle++;
827 /* paranoid */ verify( this.idle <= this.total );
828 remove(proc);
829 insert_first(this.idles, proc);
830
831 __atomic_store_n(&this.fd, proc.idle_fd, __ATOMIC_SEQ_CST);
832 unlock( this );
833 /* paranoid */ verify( ! __preemption_enabled() );
834
835 return true;
836}
837
838static void mark_awake(__cluster_proc_list & this, processor & proc) {
839 /* paranoid */ verify( ! __preemption_enabled() );
840 lock( this );
841 this.idle--;
842 /* paranoid */ verify( this.idle >= 0 );
843 remove(proc);
844 insert_last(this.actives, proc);
845
846 {
847 int fd = 0;
848 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd;
849 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST);
850 }
851
852 unlock( this );
853 /* paranoid */ verify( ! __preemption_enabled() );
854}
855
856//=============================================================================================
857// Unexpected Terminating logic
858//=============================================================================================
859void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
860 thread$ * thrd = __cfaabi_tls.this_thread;
861
862 if(thrd) {
863 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
864 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
865
866 if ( &thrd->self_cor != thrd->curr_cor ) {
867 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
868 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
869 }
870 else {
871 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
872 }
873 }
874 else {
875 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
876 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
877 }
878}
879
880int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
881 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
882}
883
884static __spinlock_t kernel_debug_lock;
885
886extern "C" {
887 void __cfaabi_bits_acquire() {
888 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
889 }
890
891 void __cfaabi_bits_release() {
892 unlock( kernel_debug_lock );
893 }
894}
895
896//=============================================================================================
897// Kernel Utilities
898//=============================================================================================
899#if defined(CFA_HAVE_LINUX_IO_URING_H)
900#include "io/types.hfa"
901#endif
902
903static inline bool __maybe_io_drain( processor * proc ) {
904 bool ret = false;
905 #if defined(CFA_HAVE_LINUX_IO_URING_H)
906 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
907
908 // Check if we should drain the queue
909 $io_context * ctx = proc->io.ctx;
910 unsigned head = *ctx->cq.head;
911 unsigned tail = *ctx->cq.tail;
912 if(head == tail) return false;
913 #if OLD_MAIN
914 ready_schedule_lock();
915 ret = __cfa_io_drain( proc );
916 ready_schedule_unlock();
917 #else
918 ret = __cfa_io_drain( proc );
919 #endif
920 #endif
921 return ret;
922}
923
924//-----------------------------------------------------------------------------
925// Debug
926__cfaabi_dbg_debug_do(
927 extern "C" {
928 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
929 this.prev_name = prev_name;
930 this.prev_thrd = kernelTLS().this_thread;
931 }
932 }
933)
934
935//-----------------------------------------------------------------------------
936// Debug
937bool threading_enabled(void) __attribute__((const)) {
938 return true;
939}
940
941//-----------------------------------------------------------------------------
942// Statistics
943#if !defined(__CFA_NO_STATISTICS__)
944 void print_halts( processor & this ) {
945 this.print_halts = true;
946 }
947
948 static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
949 /* paranoid */ verify( cltr->stats );
950
951 processor * it = &list`first;
952 for(unsigned i = 0; i < count; i++) {
953 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
954 /* paranoid */ verify( it->local_data->this_stats );
955 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
956 __tally_stats( cltr->stats, it->local_data->this_stats );
957 it = &(*it)`next;
958 }
959 }
960
961 void crawl_cluster_stats( cluster & this ) {
962 // Stop the world, otherwise stats could get really messed-up
963 // this doesn't solve all problems but does solve many
964 // so it's probably good enough
965 disable_interrupts();
966 uint_fast32_t last_size = ready_mutate_lock();
967
968 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
969 crawl_list(&this, this.procs.idles , this.procs.idle );
970
971 // Unlock the RWlock
972 ready_mutate_unlock( last_size );
973 enable_interrupts();
974 }
975
976
977 void print_stats_now( cluster & this, int flags ) {
978 crawl_cluster_stats( this );
979 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
980 }
981#endif
982// Local Variables: //
983// mode: c //
984// tab-width: 4 //
985// End: //
Note: See TracBrowser for help on using the repository browser.