source: libcfa/src/concurrency/kernel.cfa@ a9172b5

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum stuck-waitfor-destruct
Last change on this file since a9172b5 was a9172b5, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Removed push stats in alternate main

  • Property mode set to 100644
File size: 29.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
18
19//C Includes
20#include <errno.h>
21#include <stdio.h>
22#include <signal.h>
23#include <unistd.h>
24extern "C" {
25 #include <sys/eventfd.h>
26}
27
28//CFA Includes
29#include "kernel_private.hfa"
30#include "preemption.hfa"
31
32//Private includes
33#define __CFA_INVOKE_PRIVATE__
34#include "invoke.h"
35
36#if !defined(__CFA_NO_STATISTICS__)
37 #define __STATS( ...) __VA_ARGS__
38#else
39 #define __STATS( ...)
40#endif
41
42//-----------------------------------------------------------------------------
43// Some assembly required
44#if defined( __i386 )
45 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
46 // fcw : X87 FPU control word (preserved across function calls)
47 #define __x87_store \
48 uint32_t __mxcr; \
49 uint16_t __fcw; \
50 __asm__ volatile ( \
51 "stmxcsr %0\n" \
52 "fnstcw %1\n" \
53 : "=m" (__mxcr),\
54 "=m" (__fcw) \
55 )
56
57 #define __x87_load \
58 __asm__ volatile ( \
59 "fldcw %1\n" \
60 "ldmxcsr %0\n" \
61 ::"m" (__mxcr),\
62 "m" (__fcw) \
63 )
64
65#elif defined( __x86_64 )
66 #define __x87_store \
67 uint32_t __mxcr; \
68 uint16_t __fcw; \
69 __asm__ volatile ( \
70 "stmxcsr %0\n" \
71 "fnstcw %1\n" \
72 : "=m" (__mxcr),\
73 "=m" (__fcw) \
74 )
75
76 #define __x87_load \
77 __asm__ volatile ( \
78 "fldcw %1\n" \
79 "ldmxcsr %0\n" \
80 :: "m" (__mxcr),\
81 "m" (__fcw) \
82 )
83
84#elif defined( __arm__ )
85 #define __x87_store
86 #define __x87_load
87
88#elif defined( __aarch64__ )
89 #define __x87_store \
90 uint32_t __fpcntl[2]; \
91 __asm__ volatile ( \
92 "mrs x9, FPCR\n" \
93 "mrs x10, FPSR\n" \
94 "stp x9, x10, %0\n" \
95 : "=m" (__fpcntl) : : "x9", "x10" \
96 )
97
98 #define __x87_load \
99 __asm__ volatile ( \
100 "ldp x9, x10, %0\n" \
101 "msr FPSR, x10\n" \
102 "msr FPCR, x9\n" \
103 : "=m" (__fpcntl) : : "x9", "x10" \
104 )
105
106#else
107 #error unsupported hardware architecture
108#endif
109
110extern $thread * mainThread;
111extern processor * mainProcessor;
112
113//-----------------------------------------------------------------------------
114// Kernel Scheduling logic
115static $thread * __next_thread(cluster * this);
116static $thread * __next_thread_slow(cluster * this);
117static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
118static void __run_thread(processor * this, $thread * dst);
119static void __wake_one(cluster * cltr);
120
121static void mark_idle (__cluster_proc_list & idles, processor & proc);
122static void mark_awake(__cluster_proc_list & idles, processor & proc);
123static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );
124
125extern void __cfa_io_start( processor * );
126extern bool __cfa_io_drain( processor * );
127extern void __cfa_io_flush( processor * );
128extern void __cfa_io_stop ( processor * );
129static inline bool __maybe_io_drain( processor * );
130
131extern void __disable_interrupts_hard();
132extern void __enable_interrupts_hard();
133
134static inline void __disable_interrupts_checked() {
135 /* paranoid */ verify( __preemption_enabled() );
136 disable_interrupts();
137 /* paranoid */ verify( ! __preemption_enabled() );
138}
139
140static inline void __enable_interrupts_checked( bool poll = true ) {
141 /* paranoid */ verify( ! __preemption_enabled() );
142 enable_interrupts( poll );
143 /* paranoid */ verify( __preemption_enabled() );
144}
145
146//=============================================================================================
147// Kernel Scheduling logic
148//=============================================================================================
149//Main of the processor contexts
150void main(processorCtx_t & runner) {
151 // Because of a bug, we couldn't initialized the seed on construction
152 // Do it here
153 __cfaabi_tls.rand_seed ^= rdtscl();
154 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
155 __tls_rand_advance_bck();
156
157 processor * this = runner.proc;
158 verify(this);
159
160 __cfa_io_start( this );
161
162 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
163 #if !defined(__CFA_NO_STATISTICS__)
164 if( this->print_halts ) {
165 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
166 }
167 #endif
168
169 {
170 // Setup preemption data
171 preemption_scope scope = { this };
172
173 // if we need to run some special setup, now is the time to do it.
174 if(this->init.thrd) {
175 this->init.thrd->curr_cluster = this->cltr;
176 __run_thread(this, this->init.thrd);
177 }
178
179 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
180
181 $thread * readyThread = 0p;
182 MAIN_LOOP:
183 for() {
184 #if 1
185 // Check if there is pending io
186 __maybe_io_drain( this );
187
188 // Try to get the next thread
189 readyThread = __next_thread( this->cltr );
190
191 if( !readyThread ) {
192 __cfa_io_flush( this );
193 readyThread = __next_thread_slow( this->cltr );
194 }
195
196 HALT:
197 if( !readyThread ) {
198 // Don't block if we are done
199 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
200
201 #if !defined(__CFA_NO_STATISTICS__)
202 __tls_stats()->ready.sleep.halts++;
203 #endif
204
205 // Push self to idle stack
206 mark_idle(this->cltr->procs, * this);
207
208 // Confirm the ready-queue is empty
209 readyThread = __next_thread_slow( this->cltr );
210 if( readyThread ) {
211 // A thread was found, cancel the halt
212 mark_awake(this->cltr->procs, * this);
213
214 #if !defined(__CFA_NO_STATISTICS__)
215 __tls_stats()->ready.sleep.cancels++;
216 #endif
217
218 // continue the mai loop
219 break HALT;
220 }
221
222 #if !defined(__CFA_NO_STATISTICS__)
223 if(this->print_halts) {
224 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
225 }
226 #endif
227
228 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
229
230 __disable_interrupts_hard();
231 eventfd_t val;
232 eventfd_read( this->idle, &val );
233 __enable_interrupts_hard();
234
235 #if !defined(__CFA_NO_STATISTICS__)
236 if(this->print_halts) {
237 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
238 }
239 #endif
240
241 // We were woken up, remove self from idle
242 mark_awake(this->cltr->procs, * this);
243
244 // DON'T just proceed, start looking again
245 continue MAIN_LOOP;
246 }
247
248 /* paranoid */ verify( readyThread );
249
250 // Reset io dirty bit
251 this->io.dirty = false;
252
253 // We found a thread run it
254 __run_thread(this, readyThread);
255
256 // Are we done?
257 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
258
259 if(this->io.pending && !this->io.dirty) {
260 __cfa_io_flush( this );
261 }
262
263 #else
264
265 SEARCH: {
266 /* paranoid */ verify( ! __preemption_enabled() );
267
268 // First, lock the scheduler since we are searching for a thread
269 ready_schedule_lock();
270
271 // Try to get the next thread
272 readyThread = pop_fast( this->cltr );
273 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
274
275 // If we can't find a thread, might as well flush any outstanding I/O
276 if(this->io.pending) { __cfa_io_flush( this ); }
277
278 // Spin a little on I/O, just in case
279 for(25) {
280 __maybe_io_drain( this );
281 readyThread = pop_fast( this->cltr );
282 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
283 }
284
285 // no luck, try stealing a few times
286 for(25) {
287 if( __maybe_io_drain( this ) ) {
288 readyThread = pop_fast( this->cltr );
289 } else {
290 readyThread = pop_slow( this->cltr );
291 }
292 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
293 }
294
295 // still no luck, search for a thread
296 readyThread = pop_search( this->cltr );
297 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
298
299 // Don't block if we are done
300 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
301
302 __STATS( __tls_stats()->ready.sleep.halts++; )
303
304 // Push self to idle stack
305 ready_schedule_unlock();
306 mark_idle(this->cltr->procs, * this);
307 ready_schedule_lock();
308
309 // Confirm the ready-queue is empty
310 __maybe_io_drain( this );
311 readyThread = pop_search( this->cltr );
312 ready_schedule_unlock();
313
314 if( readyThread ) {
315 // A thread was found, cancel the halt
316 mark_awake(this->cltr->procs, * this);
317
318 __STATS( __tls_stats()->ready.sleep.cancels++; )
319
320 // continue the main loop
321 break SEARCH;
322 }
323
324 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
325 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
326
327 // __disable_interrupts_hard();
328 eventfd_t val;
329 eventfd_read( this->idle, &val );
330 // __enable_interrupts_hard();
331
332 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
333
334 // We were woken up, remove self from idle
335 mark_awake(this->cltr->procs, * this);
336
337 // DON'T just proceed, start looking again
338 continue MAIN_LOOP;
339 }
340
341 RUN_THREAD:
342 /* paranoid */ verify( ! __preemption_enabled() );
343 /* paranoid */ verify( readyThread );
344
345 // Reset io dirty bit
346 this->io.dirty = false;
347
348 // We found a thread run it
349 __run_thread(this, readyThread);
350
351 // Are we done?
352 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
353
354 if(this->io.pending && !this->io.dirty) {
355 __cfa_io_flush( this );
356 }
357
358 ready_schedule_lock();
359 __maybe_io_drain( this );
360 ready_schedule_unlock();
361 #endif
362 }
363
364 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
365 }
366
367 __cfa_io_stop( this );
368
369 post( this->terminated );
370
371 if(this == mainProcessor) {
372 // HACK : the coroutine context switch expects this_thread to be set
373 // and it make sense for it to be set in all other cases except here
374 // fake it
375 __cfaabi_tls.this_thread = mainThread;
376 }
377
378 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
379}
380
381static int * __volatile_errno() __attribute__((noinline));
382static int * __volatile_errno() { asm(""); return &errno; }
383
384// KERNEL ONLY
385// runThread runs a thread by context switching
386// from the processor coroutine to the target thread
387static void __run_thread(processor * this, $thread * thrd_dst) {
388 /* paranoid */ verify( ! __preemption_enabled() );
389 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
390 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
391 __builtin_prefetch( thrd_dst->context.SP );
392
393 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
394
395 $coroutine * proc_cor = get_coroutine(this->runner);
396
397 // set state of processor coroutine to inactive
398 verify(proc_cor->state == Active);
399 proc_cor->state = Blocked;
400
401 // Actually run the thread
402 RUNNING: while(true) {
403 thrd_dst->preempted = __NO_PREEMPTION;
404 thrd_dst->state = Active;
405
406 // Update global state
407 kernelTLS().this_thread = thrd_dst;
408
409 /* paranoid */ verify( ! __preemption_enabled() );
410 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
411 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
412 /* paranoid */ verify( thrd_dst->context.SP );
413 /* paranoid */ verify( thrd_dst->state != Halted );
414 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
415 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
416 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
417
418
419
420 // set context switch to the thread that the processor is executing
421 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
422 // when __cfactx_switch returns we are back in the processor coroutine
423
424 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
425 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
426 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
427 /* paranoid */ verify( thrd_dst->context.SP );
428 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
429 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
430 /* paranoid */ verify( ! __preemption_enabled() );
431
432 // Reset global state
433 kernelTLS().this_thread = 0p;
434
435 // We just finished running a thread, there are a few things that could have happened.
436 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
437 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
438 // 4 - Preempted
439 // In case 1, we may have won a race so we can't write to the state again.
440 // In case 2, we lost the race so we now own the thread.
441
442 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
443 // The thread was preempted, reschedule it and reset the flag
444 schedule_thread$( thrd_dst );
445 break RUNNING;
446 }
447
448 if(unlikely(thrd_dst->state == Halting)) {
449 // The thread has halted, it should never be scheduled/run again
450 // finish the thread
451 __thread_finish( thrd_dst );
452 break RUNNING;
453 }
454
455 /* paranoid */ verify( thrd_dst->state == Active );
456 thrd_dst->state = Blocked;
457
458 // set state of processor coroutine to active and the thread to inactive
459 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
460 switch(old_ticket) {
461 case TICKET_RUNNING:
462 // This is case 1, the regular case, nothing more is needed
463 break RUNNING;
464 case TICKET_UNBLOCK:
465 #if !defined(__CFA_NO_STATISTICS__)
466 __tls_stats()->ready.threads.threads++;
467 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this );
468 #endif
469 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
470 // In this case, just run it again.
471 continue RUNNING;
472 default:
473 // This makes no sense, something is wrong abort
474 abort();
475 }
476 }
477
478 // Just before returning to the processor, set the processor coroutine to active
479 proc_cor->state = Active;
480
481 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
482
483 #if !defined(__CFA_NO_STATISTICS__)
484 __tls_stats()->ready.threads.threads--;
485 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this );
486 #endif
487
488 /* paranoid */ verify( ! __preemption_enabled() );
489}
490
491// KERNEL_ONLY
492void returnToKernel() {
493 /* paranoid */ verify( ! __preemption_enabled() );
494 $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
495 $thread * thrd_src = kernelTLS().this_thread;
496
497 __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
498
499 // Run the thread on this processor
500 {
501 int local_errno = *__volatile_errno();
502 #if defined( __i386 ) || defined( __x86_64 )
503 __x87_store;
504 #endif
505 /* paranoid */ verify( proc_cor->context.SP );
506 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
507 __cfactx_switch( &thrd_src->context, &proc_cor->context );
508 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
509 #if defined( __i386 ) || defined( __x86_64 )
510 __x87_load;
511 #endif
512 *__volatile_errno() = local_errno;
513 }
514
515 #if !defined(__CFA_NO_STATISTICS__)
516 /* paranoid */ verify( thrd_src->last_proc != 0p );
517 if(thrd_src->last_proc != kernelTLS().this_processor) {
518 __tls_stats()->ready.threads.migration++;
519 }
520 #endif
521
522 /* paranoid */ verify( ! __preemption_enabled() );
523 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
524 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
525}
526
527//-----------------------------------------------------------------------------
528// Scheduler routines
529// KERNEL ONLY
530static void __schedule_thread( $thread * thrd ) {
531 /* paranoid */ verify( ! __preemption_enabled() );
532 /* paranoid */ verify( ready_schedule_islocked());
533 /* paranoid */ verify( thrd );
534 /* paranoid */ verify( thrd->state != Halted );
535 /* paranoid */ verify( thrd->curr_cluster );
536 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
537 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
538 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
539 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
540 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
541 /* paranoid */ #endif
542 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
543 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
544
545
546 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
547
548 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
549 struct cluster * cl = thrd->curr_cluster;
550 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
551
552 // push the thread to the cluster ready-queue
553 push( cl, thrd );
554
555 // variable thrd is no longer safe to use
556 thrd = 0xdeaddeaddeaddeadp;
557
558 // wake the cluster using the save variable.
559 __wake_one( cl );
560
561 #if !defined(__CFA_NO_STATISTICS__)
562 if( kernelTLS().this_stats ) {
563 __tls_stats()->ready.threads.threads++;
564 if(outside) {
565 __tls_stats()->ready.threads.extunpark++;
566 }
567 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", kernelTLS().this_processor );
568 }
569 else {
570 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
571 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
572 __push_stat( cl->stats, cl->stats->ready.threads.threads, true, "Cluster", cl );
573 }
574 #endif
575
576 /* paranoid */ verify( ready_schedule_islocked());
577 /* paranoid */ verify( ! __preemption_enabled() );
578}
579
580void schedule_thread$( $thread * thrd ) {
581 ready_schedule_lock();
582 __schedule_thread( thrd );
583 ready_schedule_unlock();
584}
585
586// KERNEL ONLY
587static inline $thread * __next_thread(cluster * this) with( *this ) {
588 /* paranoid */ verify( ! __preemption_enabled() );
589
590 ready_schedule_lock();
591 $thread * thrd = pop_fast( this );
592 ready_schedule_unlock();
593
594 /* paranoid */ verify( ! __preemption_enabled() );
595 return thrd;
596}
597
598// KERNEL ONLY
599static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
600 /* paranoid */ verify( ! __preemption_enabled() );
601
602 ready_schedule_lock();
603 $thread * thrd;
604 for(25) {
605 thrd = pop_slow( this );
606 if(thrd) goto RET;
607 }
608 thrd = pop_search( this );
609
610 RET:
611 ready_schedule_unlock();
612
613 /* paranoid */ verify( ! __preemption_enabled() );
614 return thrd;
615}
616
617static inline bool __must_unpark( $thread * thrd ) {
618 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
619 switch(old_ticket) {
620 case TICKET_RUNNING:
621 // Wake won the race, the thread will reschedule/rerun itself
622 return false;
623 case TICKET_BLOCKED:
624 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
625 /* paranoid */ verify( thrd->state == Blocked );
626 return true;
627 default:
628 // This makes no sense, something is wrong abort
629 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
630 }
631}
632
633void __kernel_unpark( $thread * thrd ) {
634 /* paranoid */ verify( ! __preemption_enabled() );
635 /* paranoid */ verify( ready_schedule_islocked());
636
637 if( !thrd ) return;
638
639 if(__must_unpark(thrd)) {
640 // Wake lost the race,
641 __schedule_thread( thrd );
642 }
643
644 /* paranoid */ verify( ready_schedule_islocked());
645 /* paranoid */ verify( ! __preemption_enabled() );
646}
647
648void unpark( $thread * thrd ) {
649 if( !thrd ) return;
650
651 if(__must_unpark(thrd)) {
652 disable_interrupts();
653 // Wake lost the race,
654 schedule_thread$( thrd );
655 enable_interrupts(false);
656 }
657}
658
659void park( void ) {
660 __disable_interrupts_checked();
661 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
662 returnToKernel();
663 __enable_interrupts_checked();
664
665}
666
667extern "C" {
668 // Leave the thread monitor
669 // last routine called by a thread.
670 // Should never return
671 void __cfactx_thrd_leave() {
672 $thread * thrd = active_thread();
673 $monitor * this = &thrd->self_mon;
674
675 // Lock the monitor now
676 lock( this->lock __cfaabi_dbg_ctx2 );
677
678 disable_interrupts();
679
680 /* paranoid */ verify( ! __preemption_enabled() );
681 /* paranoid */ verify( thrd->state == Active );
682 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
683 /* paranoid */ verify( kernelTLS().this_thread == thrd );
684 /* paranoid */ verify( thrd->context.SP );
685 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
686 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
687
688 thrd->state = Halting;
689 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
690 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
691 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
692
693 // Leave the thread
694 returnToKernel();
695
696 // Control flow should never reach here!
697 abort();
698 }
699}
700
701// KERNEL ONLY
702bool force_yield( __Preemption_Reason reason ) {
703 __disable_interrupts_checked();
704 $thread * thrd = kernelTLS().this_thread;
705 /* paranoid */ verify(thrd->state == Active);
706
707 // SKULLDUGGERY: It is possible that we are preempting this thread just before
708 // it was going to park itself. If that is the case and it is already using the
709 // intrusive fields then we can't use them to preempt the thread
710 // If that is the case, abandon the preemption.
711 bool preempted = false;
712 if(thrd->link.next == 0p) {
713 preempted = true;
714 thrd->preempted = reason;
715 returnToKernel();
716 }
717 __enable_interrupts_checked( false );
718 return preempted;
719}
720
721//=============================================================================================
722// Kernel Idle Sleep
723//=============================================================================================
724// Wake a thread from the front if there are any
725static void __wake_one(cluster * this) {
726 /* paranoid */ verify( ! __preemption_enabled() );
727 /* paranoid */ verify( ready_schedule_islocked() );
728
729 // Check if there is a sleeping processor
730 processor * p;
731 unsigned idle;
732 unsigned total;
733 [idle, total, p] = query_idles(this->procs);
734
735 // If no one is sleeping, we are done
736 if( idle == 0 ) return;
737
738 // We found a processor, wake it up
739 eventfd_t val;
740 val = 1;
741 eventfd_write( p->idle, val );
742
743 #if !defined(__CFA_NO_STATISTICS__)
744 if( kernelTLS().this_stats ) {
745 __tls_stats()->ready.sleep.wakes++;
746 }
747 else {
748 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
749 }
750 #endif
751
752 /* paranoid */ verify( ready_schedule_islocked() );
753 /* paranoid */ verify( ! __preemption_enabled() );
754
755 return;
756}
757
758// Unconditionnaly wake a thread
759void __wake_proc(processor * this) {
760 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
761
762 __disable_interrupts_checked();
763 /* paranoid */ verify( ! __preemption_enabled() );
764 eventfd_t val;
765 val = 1;
766 eventfd_write( this->idle, val );
767 __enable_interrupts_checked();
768}
769
770static void mark_idle(__cluster_proc_list & this, processor & proc) {
771 /* paranoid */ verify( ! __preemption_enabled() );
772 lock( this );
773 this.idle++;
774 /* paranoid */ verify( this.idle <= this.total );
775 remove(proc);
776 insert_first(this.idles, proc);
777 unlock( this );
778 /* paranoid */ verify( ! __preemption_enabled() );
779}
780
781static void mark_awake(__cluster_proc_list & this, processor & proc) {
782 /* paranoid */ verify( ! __preemption_enabled() );
783 lock( this );
784 this.idle--;
785 /* paranoid */ verify( this.idle >= 0 );
786 remove(proc);
787 insert_last(this.actives, proc);
788 unlock( this );
789 /* paranoid */ verify( ! __preemption_enabled() );
790}
791
792static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {
793 /* paranoid */ verify( ! __preemption_enabled() );
794 /* paranoid */ verify( ready_schedule_islocked() );
795
796 for() {
797 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);
798 if( 1 == (l % 2) ) { Pause(); continue; }
799 unsigned idle = this.idle;
800 unsigned total = this.total;
801 processor * proc = &this.idles`first;
802 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it
803 asm volatile("": : :"memory");
804 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }
805 return [idle, total, proc];
806 }
807
808 /* paranoid */ verify( ready_schedule_islocked() );
809 /* paranoid */ verify( ! __preemption_enabled() );
810}
811
812//=============================================================================================
813// Unexpected Terminating logic
814//=============================================================================================
815void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
816 $thread * thrd = __cfaabi_tls.this_thread;
817
818 if(thrd) {
819 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
820 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
821
822 if ( &thrd->self_cor != thrd->curr_cor ) {
823 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
824 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
825 }
826 else {
827 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
828 }
829 }
830 else {
831 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
832 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
833 }
834}
835
836int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
837 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
838}
839
840static __spinlock_t kernel_debug_lock;
841
842extern "C" {
843 void __cfaabi_bits_acquire() {
844 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
845 }
846
847 void __cfaabi_bits_release() {
848 unlock( kernel_debug_lock );
849 }
850}
851
852//=============================================================================================
853// Kernel Utilities
854//=============================================================================================
855#if defined(CFA_HAVE_LINUX_IO_URING_H)
856#include "io/types.hfa"
857#endif
858
859static inline bool __maybe_io_drain( processor * proc ) {
860 bool ret = false;
861 #if defined(CFA_HAVE_LINUX_IO_URING_H)
862 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
863
864 // Check if we should drain the queue
865 $io_context * ctx = proc->io.ctx;
866 unsigned head = *ctx->cq.head;
867 unsigned tail = *ctx->cq.tail;
868 if(head == tail) return false;
869 ready_schedule_lock();
870 ret = __cfa_io_drain( proc );
871 ready_schedule_unlock();
872 #endif
873 return ret;
874}
875
876//-----------------------------------------------------------------------------
877// Debug
878__cfaabi_dbg_debug_do(
879 extern "C" {
880 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
881 this.prev_name = prev_name;
882 this.prev_thrd = kernelTLS().this_thread;
883 }
884 }
885)
886
887//-----------------------------------------------------------------------------
888// Debug
889bool threading_enabled(void) __attribute__((const)) {
890 return true;
891}
892
893//-----------------------------------------------------------------------------
894// Statistics
895#if !defined(__CFA_NO_STATISTICS__)
896 void print_halts( processor & this ) {
897 this.print_halts = true;
898 }
899
900 static void crawl_list( cluster * cltr, dlist(processor, processor) & list, unsigned count ) {
901 /* paranoid */ verify( cltr->stats );
902
903 processor * it = &list`first;
904 for(unsigned i = 0; i < count; i++) {
905 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
906 /* paranoid */ verify( it->local_data->this_stats );
907 __tally_stats( cltr->stats, it->local_data->this_stats );
908 it = &(*it)`next;
909 }
910 }
911
912 void crawl_cluster_stats( cluster & this ) {
913 // Stop the world, otherwise stats could get really messed-up
914 // this doesn't solve all problems but does solve many
915 // so it's probably good enough
916 uint_fast32_t last_size = ready_mutate_lock();
917
918 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
919 crawl_list(&this, this.procs.idles , this.procs.idle );
920
921 // Unlock the RWlock
922 ready_mutate_unlock( last_size );
923 }
924
925
926 void print_stats_now( cluster & this, int flags ) {
927 crawl_cluster_stats( this );
928 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
929 }
930#endif
931// Local Variables: //
932// mode: c //
933// tab-width: 4 //
934// End: //
Note: See TracBrowser for help on using the repository browser.