source: libcfa/src/concurrency/kernel.cfa@ 708ae38

ADT ast-experimental enum pthread-emulation qualifiedEnum
Last change on this file since 708ae38 was 708ae38, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Some more cleanup and grow/shrink now readjusts io timestamps.
(They are still unused).

  • Property mode set to 100644
File size: 28.9 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
20
21#pragma GCC diagnostic push
22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
23
24//C Includes
25#include <errno.h>
26#include <stdio.h>
27#include <string.h>
28#include <signal.h>
29#include <unistd.h>
30
31extern "C" {
32 #include <sys/eventfd.h>
33 #include <sys/uio.h>
34}
35
36//CFA Includes
37#include "kernel/private.hfa"
38#include "preemption.hfa"
39#include "strstream.hfa"
40#include "device/cpu.hfa"
41#include "io/types.hfa"
42
43//Private includes
44#define __CFA_INVOKE_PRIVATE__
45#include "invoke.h"
46#pragma GCC diagnostic pop
47
48#if !defined(__CFA_NO_STATISTICS__)
49 #define __STATS_DEF( ...) __VA_ARGS__
50#else
51 #define __STATS_DEF( ...)
52#endif
53
54//-----------------------------------------------------------------------------
55// Some assembly required
56#if defined( __i386 )
57 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
58 // fcw : X87 FPU control word (preserved across function calls)
59 #define __x87_store \
60 uint32_t __mxcr; \
61 uint16_t __fcw; \
62 __asm__ volatile ( \
63 "stmxcsr %0\n" \
64 "fnstcw %1\n" \
65 : "=m" (__mxcr),\
66 "=m" (__fcw) \
67 )
68
69 #define __x87_load \
70 __asm__ volatile ( \
71 "fldcw %1\n" \
72 "ldmxcsr %0\n" \
73 ::"m" (__mxcr),\
74 "m" (__fcw) \
75 )
76
77#elif defined( __x86_64 )
78 #define __x87_store \
79 uint32_t __mxcr; \
80 uint16_t __fcw; \
81 __asm__ volatile ( \
82 "stmxcsr %0\n" \
83 "fnstcw %1\n" \
84 : "=m" (__mxcr),\
85 "=m" (__fcw) \
86 )
87
88 #define __x87_load \
89 __asm__ volatile ( \
90 "fldcw %1\n" \
91 "ldmxcsr %0\n" \
92 :: "m" (__mxcr),\
93 "m" (__fcw) \
94 )
95
96#elif defined( __arm__ )
97 #define __x87_store
98 #define __x87_load
99
100#elif defined( __aarch64__ )
101 #define __x87_store \
102 uint32_t __fpcntl[2]; \
103 __asm__ volatile ( \
104 "mrs x9, FPCR\n" \
105 "mrs x10, FPSR\n" \
106 "stp x9, x10, %0\n" \
107 : "=m" (__fpcntl) : : "x9", "x10" \
108 )
109
110 #define __x87_load \
111 __asm__ volatile ( \
112 "ldp x9, x10, %0\n" \
113 "msr FPSR, x10\n" \
114 "msr FPCR, x9\n" \
115 : "=m" (__fpcntl) : : "x9", "x10" \
116 )
117
118#else
119 #error unsupported hardware architecture
120#endif
121
122extern thread$ * mainThread;
123extern processor * mainProcessor;
124
125//-----------------------------------------------------------------------------
126// Kernel Scheduling logic
127static thread$ * __next_thread(cluster * this);
128static thread$ * __next_thread_slow(cluster * this);
129static thread$ * __next_thread_search(cluster * this);
130static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
131static void __run_thread(processor * this, thread$ * dst);
132static void __wake_one(cluster * cltr);
133
134static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);
135static bool mark_idle (__cluster_proc_list & idles, processor & proc);
136static void mark_awake(__cluster_proc_list & idles, processor & proc);
137
138extern void __cfa_io_start( processor * );
139extern bool __cfa_io_drain( processor * );
140extern bool __cfa_io_flush( processor *, int min_comp );
141extern void __cfa_io_stop ( processor * );
142static inline bool __maybe_io_drain( processor * );
143
144#if defined(CFA_WITH_IO_URING_IDLE)
145 extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
146#endif
147
148extern void __disable_interrupts_hard();
149extern void __enable_interrupts_hard();
150
151
152//=============================================================================================
153// Kernel Scheduling logic
154//=============================================================================================
155//Main of the processor contexts
156void main(processorCtx_t & runner) {
157 // Because of a bug, we couldn't initialized the seed on construction
158 // Do it here
159 __cfaabi_tls.rand_seed ^= rdtscl();
160 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
161 __tls_rand_advance_bck();
162
163 processor * this = runner.proc;
164 verify(this);
165
166 io_future_t future; // used for idle sleep when io_uring is present
167 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not
168 eventfd_t idle_val;
169 iovec idle_iovec = { &idle_val, sizeof(idle_val) };
170
171 __cfa_io_start( this );
172
173 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
174 #if !defined(__CFA_NO_STATISTICS__)
175 if( this->print_halts ) {
176 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
177 }
178 #endif
179
180 {
181 // Setup preemption data
182 preemption_scope scope = { this };
183
184 // if we need to run some special setup, now is the time to do it.
185 if(this->init.thrd) {
186 this->init.thrd->curr_cluster = this->cltr;
187 __run_thread(this, this->init.thrd);
188 }
189
190 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
191
192 thread$ * readyThread = 0p;
193 MAIN_LOOP:
194 for() {
195 // Check if there is pending io
196 __maybe_io_drain( this );
197
198 // Try to get the next thread
199 readyThread = __next_thread( this->cltr );
200
201 if( !readyThread ) {
202 __IO_STATS__(true, io.flush.idle++; )
203 __cfa_io_flush( this, 0 );
204
205 readyThread = __next_thread( this->cltr );
206 }
207
208 if( !readyThread ) for(5) {
209 __IO_STATS__(true, io.flush.idle++; )
210
211 readyThread = __next_thread_slow( this->cltr );
212
213 if( readyThread ) break;
214
215 __cfa_io_flush( this, 0 );
216 }
217
218 HALT:
219 if( !readyThread ) {
220 // Don't block if we are done
221 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
222
223 // Push self to idle stack
224 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
225
226 // Confirm the ready-queue is empty
227 readyThread = __next_thread_search( this->cltr );
228 if( readyThread ) {
229 // A thread was found, cancel the halt
230 mark_awake(this->cltr->procs, * this);
231
232 __STATS__(true, ready.sleep.cancels++; )
233
234 // continue the mai loop
235 break HALT;
236 }
237
238 idle_sleep( this, future, idle_iovec );
239
240 // We were woken up, remove self from idle
241 mark_awake(this->cltr->procs, * this);
242
243 // DON'T just proceed, start looking again
244 continue MAIN_LOOP;
245 }
246
247 /* paranoid */ verify( readyThread );
248
249 // Reset io dirty bit
250 this->io.dirty = false;
251
252 // We found a thread run it
253 __run_thread(this, readyThread);
254
255 // Are we done?
256 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
257
258 if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
259 __IO_STATS__(true, io.flush.dirty++; )
260 __cfa_io_flush( this, 0 );
261 }
262 }
263
264 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
265 }
266
267 for(int i = 0; !available(future); i++) {
268 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
269 __cfa_io_flush( this, 1 );
270 }
271
272 __cfa_io_stop( this );
273
274 post( this->terminated );
275
276 if(this == mainProcessor) {
277 // HACK : the coroutine context switch expects this_thread to be set
278 // and it make sense for it to be set in all other cases except here
279 // fake it
280 __cfaabi_tls.this_thread = mainThread;
281 }
282
283 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
284}
285
286static int * __volatile_errno() __attribute__((noinline));
287static int * __volatile_errno() { asm(""); return &errno; }
288
289// KERNEL ONLY
290// runThread runs a thread by context switching
291// from the processor coroutine to the target thread
292static void __run_thread(processor * this, thread$ * thrd_dst) {
293 /* paranoid */ verify( ! __preemption_enabled() );
294 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
295 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
296 __builtin_prefetch( thrd_dst->context.SP );
297
298 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
299
300 coroutine$ * proc_cor = get_coroutine(this->runner);
301
302 // set state of processor coroutine to inactive
303 verify(proc_cor->state == Active);
304 proc_cor->state = Blocked;
305
306 // Actually run the thread
307 RUNNING: while(true) {
308 thrd_dst->preempted = __NO_PREEMPTION;
309 thrd_dst->state = Active;
310
311 // Update global state
312 kernelTLS().this_thread = thrd_dst;
313
314 /* paranoid */ verify( ! __preemption_enabled() );
315 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
316 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
317 /* paranoid */ verify( thrd_dst->context.SP );
318 /* paranoid */ verify( thrd_dst->state != Halted );
319 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
320 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
321 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
322
323
324
325 // set context switch to the thread that the processor is executing
326 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
327 // when __cfactx_switch returns we are back in the processor coroutine
328
329
330
331 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
332 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
333 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
334 /* paranoid */ verify( thrd_dst->context.SP );
335 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
336 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
337 /* paranoid */ verify( ! __preemption_enabled() );
338
339 // Reset global state
340 kernelTLS().this_thread = 0p;
341
342 // We just finished running a thread, there are a few things that could have happened.
343 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
344 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
345 // 4 - Preempted
346 // In case 1, we may have won a race so we can't write to the state again.
347 // In case 2, we lost the race so we now own the thread.
348
349 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
350 // The thread was preempted, reschedule it and reset the flag
351 schedule_thread$( thrd_dst, UNPARK_LOCAL );
352 break RUNNING;
353 }
354
355 if(unlikely(thrd_dst->state == Halting)) {
356 // The thread has halted, it should never be scheduled/run again
357 // finish the thread
358 __thread_finish( thrd_dst );
359 break RUNNING;
360 }
361
362 /* paranoid */ verify( thrd_dst->state == Active );
363 thrd_dst->state = Blocked;
364
365 // set state of processor coroutine to active and the thread to inactive
366 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
367 switch(old_ticket) {
368 case TICKET_RUNNING:
369 // This is case 1, the regular case, nothing more is needed
370 break RUNNING;
371 case TICKET_UNBLOCK:
372 __STATS__(true, ready.threads.threads++; )
373 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
374 // In this case, just run it again.
375 continue RUNNING;
376 default:
377 // This makes no sense, something is wrong abort
378 abort();
379 }
380 }
381
382 // Just before returning to the processor, set the processor coroutine to active
383 proc_cor->state = Active;
384
385 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
386
387 __STATS__(true, ready.threads.threads--; )
388
389 /* paranoid */ verify( ! __preemption_enabled() );
390}
391
392// KERNEL_ONLY
393void returnToKernel() {
394 /* paranoid */ verify( ! __preemption_enabled() );
395 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
396 thread$ * thrd_src = kernelTLS().this_thread;
397
398 __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; )
399
400 // Run the thread on this processor
401 {
402 int local_errno = *__volatile_errno();
403 #if defined( __i386 ) || defined( __x86_64 )
404 __x87_store;
405 #endif
406 /* paranoid */ verify( proc_cor->context.SP );
407 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
408 __cfactx_switch( &thrd_src->context, &proc_cor->context );
409 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
410 #if defined( __i386 ) || defined( __x86_64 )
411 __x87_load;
412 #endif
413 *__volatile_errno() = local_errno;
414 }
415
416 #if !defined(__CFA_NO_STATISTICS__)
417 /* paranoid */ verify( thrd_src->last_proc != 0p );
418 if(thrd_src->last_proc != kernelTLS().this_processor) {
419 __tls_stats()->ready.threads.migration++;
420 }
421 #endif
422
423 /* paranoid */ verify( ! __preemption_enabled() );
424 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
425 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
426}
427
428//-----------------------------------------------------------------------------
429// Scheduler routines
430// KERNEL ONLY
431static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
432 /* paranoid */ verify( ! __preemption_enabled() );
433 /* paranoid */ verify( ready_schedule_islocked());
434 /* paranoid */ verify( thrd );
435 /* paranoid */ verify( thrd->state != Halted );
436 /* paranoid */ verify( thrd->curr_cluster );
437 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
438 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
439 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
440 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
441 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
442 /* paranoid */ #endif
443 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
444 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
445
446 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
447
448 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
449 struct cluster * cl = thrd->curr_cluster;
450 __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
451
452 // push the thread to the cluster ready-queue
453 push( cl, thrd, hint );
454
455 // variable thrd is no longer safe to use
456 thrd = 0xdeaddeaddeaddeadp;
457
458 // wake the cluster using the save variable.
459 __wake_one( cl );
460
461 #if !defined(__CFA_NO_STATISTICS__)
462 if( kernelTLS().this_stats ) {
463 __tls_stats()->ready.threads.threads++;
464 if(outside) {
465 __tls_stats()->ready.threads.extunpark++;
466 }
467 }
468 else {
469 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
470 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
471 }
472 #endif
473
474 /* paranoid */ verify( ready_schedule_islocked());
475 /* paranoid */ verify( ! __preemption_enabled() );
476}
477
478void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
479 ready_schedule_lock();
480 __schedule_thread( thrd, hint );
481 ready_schedule_unlock();
482}
483
484// KERNEL ONLY
485static inline thread$ * __next_thread(cluster * this) with( *this ) {
486 /* paranoid */ verify( ! __preemption_enabled() );
487
488 ready_schedule_lock();
489 thread$ * thrd = pop_fast( this );
490 ready_schedule_unlock();
491
492 /* paranoid */ verify( ! __preemption_enabled() );
493 return thrd;
494}
495
496// KERNEL ONLY
497static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
498 /* paranoid */ verify( ! __preemption_enabled() );
499
500 ready_schedule_lock();
501 thread$ * thrd = pop_slow( this );
502 ready_schedule_unlock();
503
504 /* paranoid */ verify( ! __preemption_enabled() );
505 return thrd;
506}
507
508// KERNEL ONLY
509static inline thread$ * __next_thread_search(cluster * this) with( *this ) {
510 /* paranoid */ verify( ! __preemption_enabled() );
511
512 ready_schedule_lock();
513 thread$ * thrd = pop_search( this );
514 ready_schedule_unlock();
515
516 /* paranoid */ verify( ! __preemption_enabled() );
517 return thrd;
518}
519
520static inline bool __must_unpark( thread$ * thrd ) {
521 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
522 switch(old_ticket) {
523 case TICKET_RUNNING:
524 // Wake won the race, the thread will reschedule/rerun itself
525 return false;
526 case TICKET_BLOCKED:
527 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
528 /* paranoid */ verify( thrd->state == Blocked );
529 return true;
530 default:
531 // This makes no sense, something is wrong abort
532 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
533 }
534}
535
536void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
537 /* paranoid */ verify( ! __preemption_enabled() );
538 /* paranoid */ verify( ready_schedule_islocked());
539
540 if( !thrd ) return;
541
542 if(__must_unpark(thrd)) {
543 // Wake lost the race,
544 __schedule_thread( thrd, hint );
545 }
546
547 /* paranoid */ verify( ready_schedule_islocked());
548 /* paranoid */ verify( ! __preemption_enabled() );
549}
550
551void unpark( thread$ * thrd, unpark_hint hint ) {
552 if( !thrd ) return;
553
554 if(__must_unpark(thrd)) {
555 disable_interrupts();
556 // Wake lost the race,
557 schedule_thread$( thrd, hint );
558 enable_interrupts(false);
559 }
560}
561
562void park( void ) {
563 __disable_interrupts_checked();
564 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
565 returnToKernel();
566 __enable_interrupts_checked();
567
568}
569
570extern "C" {
571 // Leave the thread monitor
572 // last routine called by a thread.
573 // Should never return
574 void __cfactx_thrd_leave() {
575 thread$ * thrd = active_thread();
576 monitor$ * this = &thrd->self_mon;
577
578 // Lock the monitor now
579 lock( this->lock __cfaabi_dbg_ctx2 );
580
581 disable_interrupts();
582
583 /* paranoid */ verify( ! __preemption_enabled() );
584 /* paranoid */ verify( thrd->state == Active );
585 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
586 /* paranoid */ verify( kernelTLS().this_thread == thrd );
587 /* paranoid */ verify( thrd->context.SP );
588 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
589 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
590
591 thrd->state = Halting;
592 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
593 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
594 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
595
596 // Leave the thread
597 returnToKernel();
598
599 // Control flow should never reach here!
600 abort();
601 }
602}
603
604// KERNEL ONLY
605bool force_yield( __Preemption_Reason reason ) {
606 __disable_interrupts_checked();
607 thread$ * thrd = kernelTLS().this_thread;
608 /* paranoid */ verify(thrd->state == Active);
609
610 // SKULLDUGGERY: It is possible that we are preempting this thread just before
611 // it was going to park itself. If that is the case and it is already using the
612 // intrusive fields then we can't use them to preempt the thread
613 // If that is the case, abandon the preemption.
614 bool preempted = false;
615 if(thrd->link.next == 0p) {
616 preempted = true;
617 thrd->preempted = reason;
618 returnToKernel();
619 }
620 __enable_interrupts_checked( false );
621 return preempted;
622}
623
624//=============================================================================================
625// Kernel Idle Sleep
626//=============================================================================================
627// Wake a thread from the front if there are any
628static void __wake_one(cluster * this) {
629 eventfd_t val;
630
631 /* paranoid */ verify( ! __preemption_enabled() );
632 /* paranoid */ verify( ready_schedule_islocked() );
633
634 // Check if there is a sleeping processor
635 struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST);
636
637 // If no one is sleeping: we are done
638 if( fdp == 0p ) return;
639
640 int fd = 1;
641 if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
642 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
643 }
644
645 switch(fd) {
646 case 0:
647 // If the processor isn't ready to sleep then the exchange will already wake it up
648 #if !defined(__CFA_NO_STATISTICS__)
649 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
650 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
651 #endif
652 break;
653 case 1:
654 // If someone else already said they will wake them: we are done
655 #if !defined(__CFA_NO_STATISTICS__)
656 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
657 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
658 #endif
659 break;
660 default:
661 // If the processor was ready to sleep, we need to wake it up with an actual write
662 val = 1;
663 eventfd_write( fd, val );
664
665 #if !defined(__CFA_NO_STATISTICS__)
666 if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
667 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
668 #endif
669 break;
670 }
671
672 /* paranoid */ verify( ready_schedule_islocked() );
673 /* paranoid */ verify( ! __preemption_enabled() );
674
675 return;
676}
677
678// Unconditionnaly wake a thread
679void __wake_proc(processor * this) {
680 /* paranoid */ verify( ! __preemption_enabled() );
681
682 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
683
684 this->idle_wctx.fd = 1;
685
686 eventfd_t val;
687 val = 1;
688 eventfd_write( this->idle_fd, val );
689
690 /* paranoid */ verify( ! __preemption_enabled() );
691}
692
693static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
694 // Tell everyone we are ready to go do sleep
695 for() {
696 int expected = this->idle_wctx.fd;
697
698 // Someone already told us to wake-up! No time for a nap.
699 if(expected == 1) { return; }
700
701 // Try to mark that we are going to sleep
702 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
703 // Every one agreed, taking a nap
704 break;
705 }
706 }
707
708
709 #if !defined(CFA_WITH_IO_URING_IDLE)
710 #if !defined(__CFA_NO_STATISTICS__)
711 if(this->print_halts) {
712 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
713 }
714 #endif
715
716 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
717
718 {
719 eventfd_t val;
720 ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
721 if(ret < 0) {
722 switch((int)errno) {
723 case EAGAIN:
724 #if EAGAIN != EWOULDBLOCK
725 case EWOULDBLOCK:
726 #endif
727 case EINTR:
728 // No need to do anything special here, just assume it's a legitimate wake-up
729 break;
730 default:
731 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
732 }
733 }
734 }
735
736 #if !defined(__CFA_NO_STATISTICS__)
737 if(this->print_halts) {
738 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
739 }
740 #endif
741 #else
742 // Do we already have a pending read
743 if(available(future)) {
744 // There is no pending read, we need to add one
745 reset(future);
746
747 __kernel_read(this, future, iov, this->idle_fd );
748 }
749
750 __cfa_io_flush( this, 1 );
751 #endif
752}
753
754static bool mark_idle(__cluster_proc_list & this, processor & proc) {
755 __STATS__(true, ready.sleep.halts++; )
756
757 proc.idle_wctx.fd = 0;
758
759 /* paranoid */ verify( ! __preemption_enabled() );
760 if(!try_lock( this )) return false;
761 this.idle++;
762 /* paranoid */ verify( this.idle <= this.total );
763 remove(proc);
764 insert_first(this.idles, proc);
765
766 __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
767 unlock( this );
768 /* paranoid */ verify( ! __preemption_enabled() );
769
770 return true;
771}
772
773static void mark_awake(__cluster_proc_list & this, processor & proc) {
774 /* paranoid */ verify( ! __preemption_enabled() );
775 lock( this );
776 this.idle--;
777 /* paranoid */ verify( this.idle >= 0 );
778 remove(proc);
779 insert_last(this.actives, proc);
780
781 {
782 struct __fd_waitctx * wctx = 0;
783 if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
784 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
785 }
786
787 unlock( this );
788 /* paranoid */ verify( ! __preemption_enabled() );
789}
790
791//=============================================================================================
792// Unexpected Terminating logic
793//=============================================================================================
794void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
795 thread$ * thrd = __cfaabi_tls.this_thread;
796
797 if(thrd) {
798 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
799 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
800
801 if ( &thrd->self_cor != thrd->curr_cor ) {
802 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
803 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
804 }
805 else {
806 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
807 }
808 }
809 else {
810 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
811 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
812 }
813}
814
815int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
816 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
817}
818
819static __spinlock_t kernel_debug_lock;
820
821extern "C" {
822 void __cfaabi_bits_acquire() {
823 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
824 }
825
826 void __cfaabi_bits_release() {
827 unlock( kernel_debug_lock );
828 }
829}
830
831//=============================================================================================
832// Kernel Utilities
833//=============================================================================================
834#if defined(CFA_HAVE_LINUX_IO_URING_H)
835#include "io/types.hfa"
836#endif
837
838static inline bool __maybe_io_drain( processor * proc ) {
839 bool ret = false;
840 #if defined(CFA_HAVE_LINUX_IO_URING_H)
841 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
842
843 // Check if we should drain the queue
844 $io_context * ctx = proc->io.ctx;
845 unsigned head = *ctx->cq.head;
846 unsigned tail = *ctx->cq.tail;
847 if(head == tail) return false;
848 ready_schedule_lock();
849 ret = __cfa_io_drain( proc );
850 ready_schedule_unlock();
851 #endif
852 return ret;
853}
854
855//-----------------------------------------------------------------------------
856// Debug
857__cfaabi_dbg_debug_do(
858 extern "C" {
859 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
860 this.prev_name = prev_name;
861 this.prev_thrd = kernelTLS().this_thread;
862 }
863 }
864)
865
866//-----------------------------------------------------------------------------
867// Debug
868bool threading_enabled(void) __attribute__((const)) {
869 return true;
870}
871
872//-----------------------------------------------------------------------------
873// Statistics
874#if !defined(__CFA_NO_STATISTICS__)
875 void print_halts( processor & this ) {
876 this.print_halts = true;
877 }
878
879 static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
880 /* paranoid */ verify( cltr->stats );
881
882 processor * it = &list`first;
883 for(unsigned i = 0; i < count; i++) {
884 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
885 /* paranoid */ verify( it->local_data->this_stats );
886 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
887 __tally_stats( cltr->stats, it->local_data->this_stats );
888 it = &(*it)`next;
889 }
890 }
891
892 void crawl_cluster_stats( cluster & this ) {
893 // Stop the world, otherwise stats could get really messed-up
894 // this doesn't solve all problems but does solve many
895 // so it's probably good enough
896 disable_interrupts();
897 uint_fast32_t last_size = ready_mutate_lock();
898
899 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
900 crawl_list(&this, this.procs.idles , this.procs.idle );
901
902 // Unlock the RWlock
903 ready_mutate_unlock( last_size );
904 enable_interrupts();
905 }
906
907
908 void print_stats_now( cluster & this, int flags ) {
909 crawl_cluster_stats( this );
910 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
911 }
912#endif
913// Local Variables: //
914// mode: c //
915// tab-width: 4 //
916// End: //
Note: See TracBrowser for help on using the repository browser.