source: libcfa/src/concurrency/kernel.cfa@ ec421636

ADT ast-experimental enum forall-pointer-decay pthread-emulation qualifiedEnum
Last change on this file since ec421636 was ec421636, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

No longer using eventfd_read to avoid the poor interrupt handling.

  • Property mode set to 100644
File size: 30.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
20
21//C Includes
22#include <errno.h>
23#include <stdio.h>
24#include <string.h>
25#include <signal.h>
26#include <unistd.h>
27extern "C" {
28 #include <sys/eventfd.h>
29}
30
31//CFA Includes
32#include "kernel_private.hfa"
33#include "preemption.hfa"
34#include "strstream.hfa"
35#include "device/cpu.hfa"
36
37//Private includes
38#define __CFA_INVOKE_PRIVATE__
39#include "invoke.h"
40
41#if !defined(__CFA_NO_STATISTICS__)
42 #define __STATS( ...) __VA_ARGS__
43#else
44 #define __STATS( ...)
45#endif
46
47//-----------------------------------------------------------------------------
48// Some assembly required
49#if defined( __i386 )
50 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
51 // fcw : X87 FPU control word (preserved across function calls)
52 #define __x87_store \
53 uint32_t __mxcr; \
54 uint16_t __fcw; \
55 __asm__ volatile ( \
56 "stmxcsr %0\n" \
57 "fnstcw %1\n" \
58 : "=m" (__mxcr),\
59 "=m" (__fcw) \
60 )
61
62 #define __x87_load \
63 __asm__ volatile ( \
64 "fldcw %1\n" \
65 "ldmxcsr %0\n" \
66 ::"m" (__mxcr),\
67 "m" (__fcw) \
68 )
69
70#elif defined( __x86_64 )
71 #define __x87_store \
72 uint32_t __mxcr; \
73 uint16_t __fcw; \
74 __asm__ volatile ( \
75 "stmxcsr %0\n" \
76 "fnstcw %1\n" \
77 : "=m" (__mxcr),\
78 "=m" (__fcw) \
79 )
80
81 #define __x87_load \
82 __asm__ volatile ( \
83 "fldcw %1\n" \
84 "ldmxcsr %0\n" \
85 :: "m" (__mxcr),\
86 "m" (__fcw) \
87 )
88
89#elif defined( __arm__ )
90 #define __x87_store
91 #define __x87_load
92
93#elif defined( __aarch64__ )
94 #define __x87_store \
95 uint32_t __fpcntl[2]; \
96 __asm__ volatile ( \
97 "mrs x9, FPCR\n" \
98 "mrs x10, FPSR\n" \
99 "stp x9, x10, %0\n" \
100 : "=m" (__fpcntl) : : "x9", "x10" \
101 )
102
103 #define __x87_load \
104 __asm__ volatile ( \
105 "ldp x9, x10, %0\n" \
106 "msr FPSR, x10\n" \
107 "msr FPCR, x9\n" \
108 : "=m" (__fpcntl) : : "x9", "x10" \
109 )
110
111#else
112 #error unsupported hardware architecture
113#endif
114
115extern thread$ * mainThread;
116extern processor * mainProcessor;
117
118//-----------------------------------------------------------------------------
119// Kernel Scheduling logic
120static thread$ * __next_thread(cluster * this);
121static thread$ * __next_thread_slow(cluster * this);
122static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
123static void __run_thread(processor * this, thread$ * dst);
124static void __wake_one(cluster * cltr);
125
126static void mark_idle (__cluster_proc_list & idles, processor & proc);
127static void mark_awake(__cluster_proc_list & idles, processor & proc);
128static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );
129
130extern void __cfa_io_start( processor * );
131extern bool __cfa_io_drain( processor * );
132extern void __cfa_io_flush( processor * );
133extern void __cfa_io_stop ( processor * );
134static inline bool __maybe_io_drain( processor * );
135
136extern void __disable_interrupts_hard();
137extern void __enable_interrupts_hard();
138
139static inline void __disable_interrupts_checked() {
140 /* paranoid */ verify( __preemption_enabled() );
141 disable_interrupts();
142 /* paranoid */ verify( ! __preemption_enabled() );
143}
144
145static inline void __enable_interrupts_checked( bool poll = true ) {
146 /* paranoid */ verify( ! __preemption_enabled() );
147 enable_interrupts( poll );
148 /* paranoid */ verify( __preemption_enabled() );
149}
150
151//=============================================================================================
152// Kernel Scheduling logic
153//=============================================================================================
154//Main of the processor contexts
155void main(processorCtx_t & runner) {
156 // Because of a bug, we couldn't initialized the seed on construction
157 // Do it here
158 __cfaabi_tls.rand_seed ^= rdtscl();
159 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
160 __tls_rand_advance_bck();
161
162 processor * this = runner.proc;
163 verify(this);
164
165 __cfa_io_start( this );
166
167 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
168 #if !defined(__CFA_NO_STATISTICS__)
169 if( this->print_halts ) {
170 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
171 }
172 #endif
173
174 {
175 // Setup preemption data
176 preemption_scope scope = { this };
177
178 // if we need to run some special setup, now is the time to do it.
179 if(this->init.thrd) {
180 this->init.thrd->curr_cluster = this->cltr;
181 __run_thread(this, this->init.thrd);
182 }
183
184 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
185
186 thread$ * readyThread = 0p;
187 MAIN_LOOP:
188 for() {
189 #define OLD_MAIN 1
190 #if OLD_MAIN
191 // Check if there is pending io
192 __maybe_io_drain( this );
193
194 // Try to get the next thread
195 readyThread = __next_thread( this->cltr );
196
197 if( !readyThread ) {
198 __cfa_io_flush( this );
199 readyThread = __next_thread_slow( this->cltr );
200 }
201
202 HALT:
203 if( !readyThread ) {
204 // Don't block if we are done
205 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
206
207 #if !defined(__CFA_NO_STATISTICS__)
208 __tls_stats()->ready.sleep.halts++;
209 #endif
210
211 // Push self to idle stack
212 mark_idle(this->cltr->procs, * this);
213
214 // Confirm the ready-queue is empty
215 readyThread = __next_thread_slow( this->cltr );
216 if( readyThread ) {
217 // A thread was found, cancel the halt
218 mark_awake(this->cltr->procs, * this);
219
220 #if !defined(__CFA_NO_STATISTICS__)
221 __tls_stats()->ready.sleep.cancels++;
222 #endif
223
224 // continue the mai loop
225 break HALT;
226 }
227
228 #if !defined(__CFA_NO_STATISTICS__)
229 if(this->print_halts) {
230 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
231 }
232 #endif
233
234 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
235
236 {
237 eventfd_t val;
238 ssize_t ret = read( this->idle, &val, sizeof(val) );
239 if(ret < 0) {
240 switch((int)errno) {
241 case EAGAIN:
242 #if EAGAIN != EWOULDBLOCK
243 case EWOULDBLOCK:
244 #endif
245 case EINTR:
246 // No need to do anything special here, just assume it's a legitimate wake-up
247 break;
248 default:
249 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
250 }
251 }
252 }
253
254 #if !defined(__CFA_NO_STATISTICS__)
255 if(this->print_halts) {
256 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
257 }
258 #endif
259
260 // We were woken up, remove self from idle
261 mark_awake(this->cltr->procs, * this);
262
263 // DON'T just proceed, start looking again
264 continue MAIN_LOOP;
265 }
266
267 /* paranoid */ verify( readyThread );
268
269 // Reset io dirty bit
270 this->io.dirty = false;
271
272 // We found a thread run it
273 __run_thread(this, readyThread);
274
275 // Are we done?
276 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
277
278 if(this->io.pending && !this->io.dirty) {
279 __cfa_io_flush( this );
280 }
281
282 #else
283 #warning new kernel loop
284 SEARCH: {
285 /* paranoid */ verify( ! __preemption_enabled() );
286
287 // First, lock the scheduler since we are searching for a thread
288 ready_schedule_lock();
289
290 // Try to get the next thread
291 readyThread = pop_fast( this->cltr );
292 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
293
294 // If we can't find a thread, might as well flush any outstanding I/O
295 if(this->io.pending) { __cfa_io_flush( this ); }
296
297 // Spin a little on I/O, just in case
298 for(5) {
299 __maybe_io_drain( this );
300 readyThread = pop_fast( this->cltr );
301 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
302 }
303
304 // no luck, try stealing a few times
305 for(5) {
306 if( __maybe_io_drain( this ) ) {
307 readyThread = pop_fast( this->cltr );
308 } else {
309 readyThread = pop_slow( this->cltr );
310 }
311 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
312 }
313
314 // still no luck, search for a thread
315 readyThread = pop_search( this->cltr );
316 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
317
318 // Don't block if we are done
319 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
320
321 __STATS( __tls_stats()->ready.sleep.halts++; )
322
323 // Push self to idle stack
324 ready_schedule_unlock();
325 mark_idle(this->cltr->procs, * this);
326 ready_schedule_lock();
327
328 // Confirm the ready-queue is empty
329 __maybe_io_drain( this );
330 readyThread = pop_search( this->cltr );
331 ready_schedule_unlock();
332
333 if( readyThread ) {
334 // A thread was found, cancel the halt
335 mark_awake(this->cltr->procs, * this);
336
337 __STATS( __tls_stats()->ready.sleep.cancels++; )
338
339 // continue the main loop
340 break SEARCH;
341 }
342
343 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
344 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
345
346 {
347 eventfd_t val;
348 ssize_t ret = read( this->idle, &val, sizeof(val) );
349 if(ret < 0) {
350 switch((int)errno) {
351 case EAGAIN:
352 #if EAGAIN != EWOULDBLOCK
353 case EWOULDBLOCK:
354 #endif
355 case EINTR:
356 // No need to do anything special here, just assume it's a legitimate wake-up
357 break;
358 default:
359 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
360 }
361 }
362 }
363
364 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
365
366 // We were woken up, remove self from idle
367 mark_awake(this->cltr->procs, * this);
368
369 // DON'T just proceed, start looking again
370 continue MAIN_LOOP;
371 }
372
373 RUN_THREAD:
374 /* paranoid */ verify( ! __preemption_enabled() );
375 /* paranoid */ verify( readyThread );
376
377 // Reset io dirty bit
378 this->io.dirty = false;
379
380 // We found a thread run it
381 __run_thread(this, readyThread);
382
383 // Are we done?
384 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
385
386 if(this->io.pending && !this->io.dirty) {
387 __cfa_io_flush( this );
388 }
389
390 ready_schedule_lock();
391 __maybe_io_drain( this );
392 ready_schedule_unlock();
393 #endif
394 }
395
396 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
397 }
398
399 __cfa_io_stop( this );
400
401 post( this->terminated );
402
403 if(this == mainProcessor) {
404 // HACK : the coroutine context switch expects this_thread to be set
405 // and it make sense for it to be set in all other cases except here
406 // fake it
407 __cfaabi_tls.this_thread = mainThread;
408 }
409
410 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
411}
412
413static int * __volatile_errno() __attribute__((noinline));
414static int * __volatile_errno() { asm(""); return &errno; }
415
416// KERNEL ONLY
417// runThread runs a thread by context switching
418// from the processor coroutine to the target thread
419static void __run_thread(processor * this, thread$ * thrd_dst) {
420 /* paranoid */ verify( ! __preemption_enabled() );
421 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
422 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
423 __builtin_prefetch( thrd_dst->context.SP );
424
425 int curr = __kernel_getcpu();
426 if(thrd_dst->last_cpu != curr) {
427 int64_t l = thrd_dst->last_cpu;
428 int64_t c = curr;
429 int64_t v = (l << 32) | c;
430 __push_stat( __tls_stats(), v, false, "Processor", this );
431 }
432
433 thrd_dst->last_cpu = curr;
434
435 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
436
437 coroutine$ * proc_cor = get_coroutine(this->runner);
438
439 // set state of processor coroutine to inactive
440 verify(proc_cor->state == Active);
441 proc_cor->state = Blocked;
442
443 // Actually run the thread
444 RUNNING: while(true) {
445 thrd_dst->preempted = __NO_PREEMPTION;
446 thrd_dst->state = Active;
447
448 // Update global state
449 kernelTLS().this_thread = thrd_dst;
450
451 /* paranoid */ verify( ! __preemption_enabled() );
452 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
453 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
454 /* paranoid */ verify( thrd_dst->context.SP );
455 /* paranoid */ verify( thrd_dst->state != Halted );
456 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
457 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
458 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
459
460
461
462 // set context switch to the thread that the processor is executing
463 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
464 // when __cfactx_switch returns we are back in the processor coroutine
465
466
467
468 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
469 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
470 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
471 /* paranoid */ verify( thrd_dst->context.SP );
472 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
473 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
474 /* paranoid */ verify( ! __preemption_enabled() );
475
476 // Reset global state
477 kernelTLS().this_thread = 0p;
478
479 // We just finished running a thread, there are a few things that could have happened.
480 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
481 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
482 // 4 - Preempted
483 // In case 1, we may have won a race so we can't write to the state again.
484 // In case 2, we lost the race so we now own the thread.
485
486 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
487 // The thread was preempted, reschedule it and reset the flag
488 schedule_thread$( thrd_dst );
489 break RUNNING;
490 }
491
492 if(unlikely(thrd_dst->state == Halting)) {
493 // The thread has halted, it should never be scheduled/run again
494 // finish the thread
495 __thread_finish( thrd_dst );
496 break RUNNING;
497 }
498
499 /* paranoid */ verify( thrd_dst->state == Active );
500 thrd_dst->state = Blocked;
501
502 // set state of processor coroutine to active and the thread to inactive
503 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
504 switch(old_ticket) {
505 case TICKET_RUNNING:
506 // This is case 1, the regular case, nothing more is needed
507 break RUNNING;
508 case TICKET_UNBLOCK:
509 #if !defined(__CFA_NO_STATISTICS__)
510 __tls_stats()->ready.threads.threads++;
511 #endif
512 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
513 // In this case, just run it again.
514 continue RUNNING;
515 default:
516 // This makes no sense, something is wrong abort
517 abort();
518 }
519 }
520
521 // Just before returning to the processor, set the processor coroutine to active
522 proc_cor->state = Active;
523
524 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
525
526 #if !defined(__CFA_NO_STATISTICS__)
527 __tls_stats()->ready.threads.threads--;
528 #endif
529
530 /* paranoid */ verify( ! __preemption_enabled() );
531}
532
533// KERNEL_ONLY
534void returnToKernel() {
535 /* paranoid */ verify( ! __preemption_enabled() );
536 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
537 thread$ * thrd_src = kernelTLS().this_thread;
538
539 __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
540
541 // Run the thread on this processor
542 {
543 int local_errno = *__volatile_errno();
544 #if defined( __i386 ) || defined( __x86_64 )
545 __x87_store;
546 #endif
547 /* paranoid */ verify( proc_cor->context.SP );
548 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
549 __cfactx_switch( &thrd_src->context, &proc_cor->context );
550 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
551 #if defined( __i386 ) || defined( __x86_64 )
552 __x87_load;
553 #endif
554 *__volatile_errno() = local_errno;
555 }
556
557 #if !defined(__CFA_NO_STATISTICS__)
558 /* paranoid */ verify( thrd_src->last_proc != 0p );
559 if(thrd_src->last_proc != kernelTLS().this_processor) {
560 __tls_stats()->ready.threads.migration++;
561 }
562 #endif
563
564 /* paranoid */ verify( ! __preemption_enabled() );
565 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
566 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
567}
568
569//-----------------------------------------------------------------------------
570// Scheduler routines
571// KERNEL ONLY
572static void __schedule_thread( thread$ * thrd ) {
573 /* paranoid */ verify( ! __preemption_enabled() );
574 /* paranoid */ verify( ready_schedule_islocked());
575 /* paranoid */ verify( thrd );
576 /* paranoid */ verify( thrd->state != Halted );
577 /* paranoid */ verify( thrd->curr_cluster );
578 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
579 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
580 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
581 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
582 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
583 /* paranoid */ #endif
584 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
585 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
586
587 const bool local = thrd->state != Start;
588 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
589
590 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
591 struct cluster * cl = thrd->curr_cluster;
592 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
593
594 // push the thread to the cluster ready-queue
595 push( cl, thrd, local );
596
597 // variable thrd is no longer safe to use
598 thrd = 0xdeaddeaddeaddeadp;
599
600 // wake the cluster using the save variable.
601 __wake_one( cl );
602
603 #if !defined(__CFA_NO_STATISTICS__)
604 if( kernelTLS().this_stats ) {
605 __tls_stats()->ready.threads.threads++;
606 if(outside) {
607 __tls_stats()->ready.threads.extunpark++;
608 }
609 }
610 else {
611 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
612 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
613 }
614 #endif
615
616 /* paranoid */ verify( ready_schedule_islocked());
617 /* paranoid */ verify( ! __preemption_enabled() );
618}
619
620void schedule_thread$( thread$ * thrd ) {
621 ready_schedule_lock();
622 __schedule_thread( thrd );
623 ready_schedule_unlock();
624}
625
626// KERNEL ONLY
627static inline thread$ * __next_thread(cluster * this) with( *this ) {
628 /* paranoid */ verify( ! __preemption_enabled() );
629
630 ready_schedule_lock();
631 thread$ * thrd = pop_fast( this );
632 ready_schedule_unlock();
633
634 /* paranoid */ verify( ! __preemption_enabled() );
635 return thrd;
636}
637
638// KERNEL ONLY
639static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
640 /* paranoid */ verify( ! __preemption_enabled() );
641
642 ready_schedule_lock();
643 thread$ * thrd;
644 for(25) {
645 thrd = pop_slow( this );
646 if(thrd) goto RET;
647 }
648 thrd = pop_search( this );
649
650 RET:
651 ready_schedule_unlock();
652
653 /* paranoid */ verify( ! __preemption_enabled() );
654 return thrd;
655}
656
657static inline bool __must_unpark( thread$ * thrd ) {
658 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
659 switch(old_ticket) {
660 case TICKET_RUNNING:
661 // Wake won the race, the thread will reschedule/rerun itself
662 return false;
663 case TICKET_BLOCKED:
664 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
665 /* paranoid */ verify( thrd->state == Blocked );
666 return true;
667 default:
668 // This makes no sense, something is wrong abort
669 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
670 }
671}
672
673void __kernel_unpark( thread$ * thrd ) {
674 /* paranoid */ verify( ! __preemption_enabled() );
675 /* paranoid */ verify( ready_schedule_islocked());
676
677 if( !thrd ) return;
678
679 if(__must_unpark(thrd)) {
680 // Wake lost the race,
681 __schedule_thread( thrd );
682 }
683
684 /* paranoid */ verify( ready_schedule_islocked());
685 /* paranoid */ verify( ! __preemption_enabled() );
686}
687
688void unpark( thread$ * thrd ) {
689 if( !thrd ) return;
690
691 if(__must_unpark(thrd)) {
692 disable_interrupts();
693 // Wake lost the race,
694 schedule_thread$( thrd );
695 enable_interrupts(false);
696 }
697}
698
699void park( void ) {
700 __disable_interrupts_checked();
701 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
702 returnToKernel();
703 __enable_interrupts_checked();
704
705}
706
707extern "C" {
708 // Leave the thread monitor
709 // last routine called by a thread.
710 // Should never return
711 void __cfactx_thrd_leave() {
712 thread$ * thrd = active_thread();
713 monitor$ * this = &thrd->self_mon;
714
715 // Lock the monitor now
716 lock( this->lock __cfaabi_dbg_ctx2 );
717
718 disable_interrupts();
719
720 /* paranoid */ verify( ! __preemption_enabled() );
721 /* paranoid */ verify( thrd->state == Active );
722 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
723 /* paranoid */ verify( kernelTLS().this_thread == thrd );
724 /* paranoid */ verify( thrd->context.SP );
725 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
726 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
727
728 thrd->state = Halting;
729 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
730 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
731 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
732
733 // Leave the thread
734 returnToKernel();
735
736 // Control flow should never reach here!
737 abort();
738 }
739}
740
741// KERNEL ONLY
742bool force_yield( __Preemption_Reason reason ) {
743 __disable_interrupts_checked();
744 thread$ * thrd = kernelTLS().this_thread;
745 /* paranoid */ verify(thrd->state == Active);
746
747 // SKULLDUGGERY: It is possible that we are preempting this thread just before
748 // it was going to park itself. If that is the case and it is already using the
749 // intrusive fields then we can't use them to preempt the thread
750 // If that is the case, abandon the preemption.
751 bool preempted = false;
752 if(thrd->link.next == 0p) {
753 preempted = true;
754 thrd->preempted = reason;
755 returnToKernel();
756 }
757 __enable_interrupts_checked( false );
758 return preempted;
759}
760
761//=============================================================================================
762// Kernel Idle Sleep
763//=============================================================================================
764// Wake a thread from the front if there are any
765static void __wake_one(cluster * this) {
766 /* paranoid */ verify( ! __preemption_enabled() );
767 /* paranoid */ verify( ready_schedule_islocked() );
768
769 // Check if there is a sleeping processor
770 processor * p;
771 unsigned idle;
772 unsigned total;
773 [idle, total, p] = query_idles(this->procs);
774
775 // If no one is sleeping, we are done
776 if( idle == 0 ) return;
777
778 // We found a processor, wake it up
779 eventfd_t val;
780 val = 1;
781 eventfd_write( p->idle, val );
782
783 #if !defined(__CFA_NO_STATISTICS__)
784 if( kernelTLS().this_stats ) {
785 __tls_stats()->ready.sleep.wakes++;
786 }
787 else {
788 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
789 }
790 #endif
791
792 /* paranoid */ verify( ready_schedule_islocked() );
793 /* paranoid */ verify( ! __preemption_enabled() );
794
795 return;
796}
797
798// Unconditionnaly wake a thread
799void __wake_proc(processor * this) {
800 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
801
802 __disable_interrupts_checked();
803 /* paranoid */ verify( ! __preemption_enabled() );
804 eventfd_t val;
805 val = 1;
806 eventfd_write( this->idle, val );
807 __enable_interrupts_checked();
808}
809
810static void mark_idle(__cluster_proc_list & this, processor & proc) {
811 /* paranoid */ verify( ! __preemption_enabled() );
812 lock( this );
813 this.idle++;
814 /* paranoid */ verify( this.idle <= this.total );
815 remove(proc);
816 insert_first(this.idles, proc);
817 unlock( this );
818 /* paranoid */ verify( ! __preemption_enabled() );
819}
820
821static void mark_awake(__cluster_proc_list & this, processor & proc) {
822 /* paranoid */ verify( ! __preemption_enabled() );
823 lock( this );
824 this.idle--;
825 /* paranoid */ verify( this.idle >= 0 );
826 remove(proc);
827 insert_last(this.actives, proc);
828 unlock( this );
829 /* paranoid */ verify( ! __preemption_enabled() );
830}
831
832static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {
833 /* paranoid */ verify( ! __preemption_enabled() );
834 /* paranoid */ verify( ready_schedule_islocked() );
835
836 for() {
837 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);
838 if( 1 == (l % 2) ) { Pause(); continue; }
839 unsigned idle = this.idle;
840 unsigned total = this.total;
841 processor * proc = &this.idles`first;
842 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it
843 asm volatile("": : :"memory");
844 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }
845 return [idle, total, proc];
846 }
847
848 /* paranoid */ verify( ready_schedule_islocked() );
849 /* paranoid */ verify( ! __preemption_enabled() );
850}
851
852//=============================================================================================
853// Unexpected Terminating logic
854//=============================================================================================
855void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
856 thread$ * thrd = __cfaabi_tls.this_thread;
857
858 if(thrd) {
859 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
860 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
861
862 if ( &thrd->self_cor != thrd->curr_cor ) {
863 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
864 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
865 }
866 else {
867 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
868 }
869 }
870 else {
871 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
872 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
873 }
874}
875
876int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
877 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
878}
879
880static __spinlock_t kernel_debug_lock;
881
882extern "C" {
883 void __cfaabi_bits_acquire() {
884 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
885 }
886
887 void __cfaabi_bits_release() {
888 unlock( kernel_debug_lock );
889 }
890}
891
892//=============================================================================================
893// Kernel Utilities
894//=============================================================================================
895#if defined(CFA_HAVE_LINUX_IO_URING_H)
896#include "io/types.hfa"
897#endif
898
899static inline bool __maybe_io_drain( processor * proc ) {
900 bool ret = false;
901 #if defined(CFA_HAVE_LINUX_IO_URING_H)
902 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
903
904 // Check if we should drain the queue
905 $io_context * ctx = proc->io.ctx;
906 unsigned head = *ctx->cq.head;
907 unsigned tail = *ctx->cq.tail;
908 if(head == tail) return false;
909 #if OLD_MAIN
910 ready_schedule_lock();
911 ret = __cfa_io_drain( proc );
912 ready_schedule_unlock();
913 #else
914 ret = __cfa_io_drain( proc );
915 #endif
916 #endif
917 return ret;
918}
919
920//-----------------------------------------------------------------------------
921// Debug
922__cfaabi_dbg_debug_do(
923 extern "C" {
924 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
925 this.prev_name = prev_name;
926 this.prev_thrd = kernelTLS().this_thread;
927 }
928 }
929)
930
931//-----------------------------------------------------------------------------
932// Debug
933bool threading_enabled(void) __attribute__((const)) {
934 return true;
935}
936
937//-----------------------------------------------------------------------------
938// Statistics
939#if !defined(__CFA_NO_STATISTICS__)
940 void print_halts( processor & this ) {
941 this.print_halts = true;
942 }
943
944 static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
945 /* paranoid */ verify( cltr->stats );
946
947 processor * it = &list`first;
948 for(unsigned i = 0; i < count; i++) {
949 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
950 /* paranoid */ verify( it->local_data->this_stats );
951 __tally_stats( cltr->stats, it->local_data->this_stats );
952 it = &(*it)`next;
953 }
954 }
955
956 void crawl_cluster_stats( cluster & this ) {
957 // Stop the world, otherwise stats could get really messed-up
958 // this doesn't solve all problems but does solve many
959 // so it's probably good enough
960 uint_fast32_t last_size = ready_mutate_lock();
961
962 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
963 crawl_list(&this, this.procs.idles , this.procs.idle );
964
965 // Unlock the RWlock
966 ready_mutate_unlock( last_size );
967 }
968
969
970 void print_stats_now( cluster & this, int flags ) {
971 crawl_cluster_stats( this );
972 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
973 }
974#endif
975// Local Variables: //
976// mode: c //
977// tab-width: 4 //
978// End: //
Note: See TracBrowser for help on using the repository browser.