source: libcfa/src/concurrency/kernel.cfa@ a9172b5

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum stuck-waitfor-destruct
Last change on this file since a9172b5 was a9172b5, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Removed push stats in alternate main

  • Property mode set to 100644
File size: 29.4 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[0190480]12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count : 71
[8118303]14//
15
[2026bb6]16#define __cforall_thread__
[4069faad]17// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
[2026bb6]18
[8118303]19//C Includes
[214e8da]20#include <errno.h>
[9d944b2]21#include <stdio.h>
[58b6d1b]22#include <signal.h>
[9d944b2]23#include <unistd.h>
[dddb3dd0]24extern "C" {
25 #include <sys/eventfd.h>
26}
[8118303]27
28//CFA Includes
[73abe95]29#include "kernel_private.hfa"
30#include "preemption.hfa"
[8118303]31
32//Private includes
33#define __CFA_INVOKE_PRIVATE__
34#include "invoke.h"
35
[89eff25]36#if !defined(__CFA_NO_STATISTICS__)
37 #define __STATS( ...) __VA_ARGS__
38#else
39 #define __STATS( ...)
40#endif
[4069faad]41
[deca0f5]42//-----------------------------------------------------------------------------
43// Some assembly required
[1805b1b]44#if defined( __i386 )
[deca0f5]45 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
46 // fcw : X87 FPU control word (preserved across function calls)
47 #define __x87_store \
48 uint32_t __mxcr; \
49 uint16_t __fcw; \
50 __asm__ volatile ( \
51 "stmxcsr %0\n" \
52 "fnstcw %1\n" \
53 : "=m" (__mxcr),\
54 "=m" (__fcw) \
55 )
56
57 #define __x87_load \
58 __asm__ volatile ( \
59 "fldcw %1\n" \
60 "ldmxcsr %0\n" \
61 ::"m" (__mxcr),\
62 "m" (__fcw) \
63 )
64
65#elif defined( __x86_64 )
66 #define __x87_store \
67 uint32_t __mxcr; \
68 uint16_t __fcw; \
69 __asm__ volatile ( \
70 "stmxcsr %0\n" \
71 "fnstcw %1\n" \
72 : "=m" (__mxcr),\
73 "=m" (__fcw) \
74 )
75
76 #define __x87_load \
77 __asm__ volatile ( \
78 "fldcw %1\n" \
79 "ldmxcsr %0\n" \
80 :: "m" (__mxcr),\
81 "m" (__fcw) \
82 )
83
[0190480]84#elif defined( __arm__ )
85 #define __x87_store
86 #define __x87_load
87
88#elif defined( __aarch64__ )
[74f5c83]89 #define __x87_store \
90 uint32_t __fpcntl[2]; \
91 __asm__ volatile ( \
92 "mrs x9, FPCR\n" \
93 "mrs x10, FPSR\n" \
94 "stp x9, x10, %0\n" \
95 : "=m" (__fpcntl) : : "x9", "x10" \
96 )
97
98 #define __x87_load \
99 __asm__ volatile ( \
100 "ldp x9, x10, %0\n" \
101 "msr FPSR, x10\n" \
102 "msr FPCR, x9\n" \
103 : "=m" (__fpcntl) : : "x9", "x10" \
104 )
105
[deca0f5]106#else
[0190480]107 #error unsupported hardware architecture
[deca0f5]108#endif
109
[e660761]110extern $thread * mainThread;
111extern processor * mainProcessor;
[2ac095d]112
[92e7631]113//-----------------------------------------------------------------------------
114// Kernel Scheduling logic
115static $thread * __next_thread(cluster * this);
[1eb239e4]116static $thread * __next_thread_slow(cluster * this);
[c6c7e6c]117static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
[92e7631]118static void __run_thread(processor * this, $thread * dst);
[e873838]119static void __wake_one(cluster * cltr);
[1eb239e4]120
[6a9b12b]121static void mark_idle (__cluster_proc_list & idles, processor & proc);
122static void mark_awake(__cluster_proc_list & idles, processor & proc);
123static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );
[1eb239e4]124
[dddb3dd0]125extern void __cfa_io_start( processor * );
[c1c95b1]126extern bool __cfa_io_drain( processor * );
[dddb3dd0]127extern void __cfa_io_flush( processor * );
128extern void __cfa_io_stop ( processor * );
[c1c95b1]129static inline bool __maybe_io_drain( processor * );
[dddb3dd0]130
131extern void __disable_interrupts_hard();
132extern void __enable_interrupts_hard();
[c84e80a]133
[a3821fa]134static inline void __disable_interrupts_checked() {
135 /* paranoid */ verify( __preemption_enabled() );
136 disable_interrupts();
137 /* paranoid */ verify( ! __preemption_enabled() );
138}
139
140static inline void __enable_interrupts_checked( bool poll = true ) {
141 /* paranoid */ verify( ! __preemption_enabled() );
142 enable_interrupts( poll );
143 /* paranoid */ verify( __preemption_enabled() );
144}
145
[75f3522]146//=============================================================================================
147// Kernel Scheduling logic
148//=============================================================================================
[8fcbb4c]149//Main of the processor contexts
[83a071f9]150void main(processorCtx_t & runner) {
[21184e3]151 // Because of a bug, we couldn't initialized the seed on construction
152 // Do it here
[8fc652e0]153 __cfaabi_tls.rand_seed ^= rdtscl();
154 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
[f2384c9a]155 __tls_rand_advance_bck();
[21184e3]156
[83a071f9]157 processor * this = runner.proc;
[094476d]158 verify(this);
[c81ebf9]159
[dddb3dd0]160 __cfa_io_start( this );
161
[4069faad]162 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
[28d73c1]163 #if !defined(__CFA_NO_STATISTICS__)
164 if( this->print_halts ) {
[c993b15]165 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
[28d73c1]166 }
167 #endif
[b798713]168
[75f3522]169 {
[c81ebf9]170 // Setup preemption data
171 preemption_scope scope = { this };
172
[a5e7233]173 // if we need to run some special setup, now is the time to do it.
174 if(this->init.thrd) {
175 this->init.thrd->curr_cluster = this->cltr;
176 __run_thread(this, this->init.thrd);
177 }
[325e6ea]178
[4069faad]179 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
[8118303]180
[ac2b598]181 $thread * readyThread = 0p;
[1eb239e4]182 MAIN_LOOP:
183 for() {
[34b2796]184 #if 1
[dddb3dd0]185 // Check if there is pending io
186 __maybe_io_drain( this );
187
[92e7631]188 // Try to get the next thread
[8c50aed]189 readyThread = __next_thread( this->cltr );
[75f3522]190
[1eb239e4]191 if( !readyThread ) {
[dddb3dd0]192 __cfa_io_flush( this );
[1eb239e4]193 readyThread = __next_thread_slow( this->cltr );
194 }
[4e6fb8e]195
[1eb239e4]196 HALT:
197 if( !readyThread ) {
198 // Don't block if we are done
199 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[c81ebf9]200
[1eb239e4]201 #if !defined(__CFA_NO_STATISTICS__)
202 __tls_stats()->ready.sleep.halts++;
203 #endif
[398e8e9]204
[1eb239e4]205 // Push self to idle stack
[6a9b12b]206 mark_idle(this->cltr->procs, * this);
[398e8e9]207
[1eb239e4]208 // Confirm the ready-queue is empty
209 readyThread = __next_thread_slow( this->cltr );
210 if( readyThread ) {
211 // A thread was found, cancel the halt
[6a9b12b]212 mark_awake(this->cltr->procs, * this);
[1eb239e4]213
214 #if !defined(__CFA_NO_STATISTICS__)
215 __tls_stats()->ready.sleep.cancels++;
216 #endif
217
218 // continue the mai loop
219 break HALT;
220 }
221
222 #if !defined(__CFA_NO_STATISTICS__)
223 if(this->print_halts) {
[c993b15]224 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
[1eb239e4]225 }
226 #endif
227
[dddb3dd0]228 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
229
230 __disable_interrupts_hard();
231 eventfd_t val;
232 eventfd_read( this->idle, &val );
233 __enable_interrupts_hard();
[1eb239e4]234
235 #if !defined(__CFA_NO_STATISTICS__)
236 if(this->print_halts) {
[c993b15]237 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
[1eb239e4]238 }
239 #endif
240
241 // We were woken up, remove self from idle
[6a9b12b]242 mark_awake(this->cltr->procs, * this);
[1eb239e4]243
244 // DON'T just proceed, start looking again
245 continue MAIN_LOOP;
[64a7146]246 }
[1eb239e4]247
248 /* paranoid */ verify( readyThread );
249
[dddb3dd0]250 // Reset io dirty bit
251 this->io.dirty = false;
252
[1eb239e4]253 // We found a thread run it
254 __run_thread(this, readyThread);
255
256 // Are we done?
257 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[325e6ea]258
[dddb3dd0]259 if(this->io.pending && !this->io.dirty) {
260 __cfa_io_flush( this );
261 }
[89eff25]262
[34b2796]263 #else
264
265 SEARCH: {
266 /* paranoid */ verify( ! __preemption_enabled() );
267
268 // First, lock the scheduler since we are searching for a thread
269 ready_schedule_lock();
270
271 // Try to get the next thread
272 readyThread = pop_fast( this->cltr );
273 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
274
275 // If we can't find a thread, might as well flush any outstanding I/O
276 if(this->io.pending) { __cfa_io_flush( this ); }
277
278 // Spin a little on I/O, just in case
279 for(25) {
280 __maybe_io_drain( this );
281 readyThread = pop_fast( this->cltr );
282 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
283 }
284
285 // no luck, try stealing a few times
286 for(25) {
287 if( __maybe_io_drain( this ) ) {
288 readyThread = pop_fast( this->cltr );
289 } else {
290 readyThread = pop_slow( this->cltr );
291 }
292 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
293 }
294
295 // still no luck, search for a thread
296 readyThread = pop_search( this->cltr );
297 if(readyThread) { ready_schedule_unlock(); break SEARCH; }
298
299 // Don't block if we are done
300 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
301
302 __STATS( __tls_stats()->ready.sleep.halts++; )
303
304 // Push self to idle stack
305 ready_schedule_unlock();
306 mark_idle(this->cltr->procs, * this);
307 ready_schedule_lock();
308
309 // Confirm the ready-queue is empty
310 __maybe_io_drain( this );
311 readyThread = pop_search( this->cltr );
312 ready_schedule_unlock();
313
314 if( readyThread ) {
315 // A thread was found, cancel the halt
316 mark_awake(this->cltr->procs, * this);
317
318 __STATS( __tls_stats()->ready.sleep.cancels++; )
319
320 // continue the main loop
321 break SEARCH;
322 }
323
[fb4ccdf]324 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )
[34b2796]325 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);
326
327 // __disable_interrupts_hard();
328 eventfd_t val;
329 eventfd_read( this->idle, &val );
330 // __enable_interrupts_hard();
331
[fb4ccdf]332 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )
[34b2796]333
334 // We were woken up, remove self from idle
335 mark_awake(this->cltr->procs, * this);
336
337 // DON'T just proceed, start looking again
338 continue MAIN_LOOP;
339 }
340
341 RUN_THREAD:
342 /* paranoid */ verify( ! __preemption_enabled() );
343 /* paranoid */ verify( readyThread );
344
345 // Reset io dirty bit
346 this->io.dirty = false;
347
348 // We found a thread run it
349 __run_thread(this, readyThread);
350
351 // Are we done?
352 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
353
354 if(this->io.pending && !this->io.dirty) {
355 __cfa_io_flush( this );
356 }
357
358 ready_schedule_lock();
359 __maybe_io_drain( this );
360 ready_schedule_unlock();
361 #endif
[c81ebf9]362 }
363
[4069faad]364 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
[c84e80a]365 }
[8118303]366
[dddb3dd0]367 __cfa_io_stop( this );
368
[454f478]369 post( this->terminated );
[bdeba0b]370
[28d73c1]371 if(this == mainProcessor) {
[6a490b2]372 // HACK : the coroutine context switch expects this_thread to be set
373 // and it make sense for it to be set in all other cases except here
374 // fake it
[8fc652e0]375 __cfaabi_tls.this_thread = mainThread;
[6a490b2]376 }
[7768b8d]377
[4069faad]378 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
[c84e80a]379}
380
[5c1a531]381static int * __volatile_errno() __attribute__((noinline));
382static int * __volatile_errno() { asm(""); return &errno; }
383
[14a61b5]384// KERNEL ONLY
[1c273d0]385// runThread runs a thread by context switching
386// from the processor coroutine to the target thread
[ac2b598]387static void __run_thread(processor * this, $thread * thrd_dst) {
[8fc652e0]388 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]389 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
390 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
391 __builtin_prefetch( thrd_dst->context.SP );
392
[dddb3dd0]393 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
394
[ac2b598]395 $coroutine * proc_cor = get_coroutine(this->runner);
[8fcbb4c]396
[9f575ea]397 // set state of processor coroutine to inactive
398 verify(proc_cor->state == Active);
[ae7be7a]399 proc_cor->state = Blocked;
[e8e457e]400
[9f575ea]401 // Actually run the thread
[3381ed7]402 RUNNING: while(true) {
[ff79d5e]403 thrd_dst->preempted = __NO_PREEMPTION;
404 thrd_dst->state = Active;
[e8e457e]405
[58d64a4]406 // Update global state
[8fc652e0]407 kernelTLS().this_thread = thrd_dst;
[75f3522]408
[8fc652e0]409 /* paranoid */ verify( ! __preemption_enabled() );
410 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
[9d6e1b8a]411 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[b4b63e8]412 /* paranoid */ verify( thrd_dst->context.SP );
[5afb49a]413 /* paranoid */ verify( thrd_dst->state != Halted );
[210b8b3]414 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
415 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
[ac12f1f]416 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[b4b63e8]417
[3381ed7]418
[58d64a4]419
[9f575ea]420 // set context switch to the thread that the processor is executing
[c7a900a]421 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
422 // when __cfactx_switch returns we are back in the processor coroutine
[9f575ea]423
[ac12f1f]424 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[210b8b3]425 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
426 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
[b4b63e8]427 /* paranoid */ verify( thrd_dst->context.SP );
[9d6e1b8a]428 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[8fc652e0]429 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
430 /* paranoid */ verify( ! __preemption_enabled() );
[75f3522]431
[58d64a4]432 // Reset global state
[8fc652e0]433 kernelTLS().this_thread = 0p;
[3381ed7]434
435 // We just finished running a thread, there are a few things that could have happened.
436 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
437 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
438 // 4 - Preempted
439 // In case 1, we may have won a race so we can't write to the state again.
440 // In case 2, we lost the race so we now own the thread.
441
442 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
443 // The thread was preempted, reschedule it and reset the flag
[254ad1b]444 schedule_thread$( thrd_dst );
[3381ed7]445 break RUNNING;
446 }
[75f3522]447
[3ea8ad1]448 if(unlikely(thrd_dst->state == Halting)) {
[ff79d5e]449 // The thread has halted, it should never be scheduled/run again
[5afb49a]450 // finish the thread
451 __thread_finish( thrd_dst );
[ff79d5e]452 break RUNNING;
453 }
454
455 /* paranoid */ verify( thrd_dst->state == Active );
456 thrd_dst->state = Blocked;
457
[3381ed7]458 // set state of processor coroutine to active and the thread to inactive
[ff79d5e]459 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
460 switch(old_ticket) {
[6a77224]461 case TICKET_RUNNING:
[3381ed7]462 // This is case 1, the regular case, nothing more is needed
463 break RUNNING;
[6a77224]464 case TICKET_UNBLOCK:
[ec43cf9]465 #if !defined(__CFA_NO_STATISTICS__)
466 __tls_stats()->ready.threads.threads++;
[73f4d08]467 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this );
[ec43cf9]468 #endif
[3381ed7]469 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
470 // In this case, just run it again.
471 continue RUNNING;
472 default:
473 // This makes no sense, something is wrong abort
[ff79d5e]474 abort();
[3381ed7]475 }
[9f575ea]476 }
[e8e457e]477
[9f575ea]478 // Just before returning to the processor, set the processor coroutine to active
[e8e457e]479 proc_cor->state = Active;
[1eb239e4]480
[dddb3dd0]481 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
482
[ec43cf9]483 #if !defined(__CFA_NO_STATISTICS__)
484 __tls_stats()->ready.threads.threads--;
[73f4d08]485 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", this );
[ec43cf9]486 #endif
487
[8fc652e0]488 /* paranoid */ verify( ! __preemption_enabled() );
[82c948c]489}
490
[14a61b5]491// KERNEL_ONLY
[b0c7419]492void returnToKernel() {
[8fc652e0]493 /* paranoid */ verify( ! __preemption_enabled() );
494 $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
495 $thread * thrd_src = kernelTLS().this_thread;
[e8e457e]496
[89eff25]497 __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
[29cb302]498
[9f575ea]499 // Run the thread on this processor
500 {
501 int local_errno = *__volatile_errno();
502 #if defined( __i386 ) || defined( __x86_64 )
503 __x87_store;
504 #endif
[b4b63e8]505 /* paranoid */ verify( proc_cor->context.SP );
[ac12f1f]506 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[c7a900a]507 __cfactx_switch( &thrd_src->context, &proc_cor->context );
[ac12f1f]508 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[9f575ea]509 #if defined( __i386 ) || defined( __x86_64 )
510 __x87_load;
511 #endif
512 *__volatile_errno() = local_errno;
[8fcbb4c]513 }
[deca0f5]514
[29cb302]515 #if !defined(__CFA_NO_STATISTICS__)
[89eff25]516 /* paranoid */ verify( thrd_src->last_proc != 0p );
517 if(thrd_src->last_proc != kernelTLS().this_processor) {
[29cb302]518 __tls_stats()->ready.threads.migration++;
519 }
520 #endif
521
[8fc652e0]522 /* paranoid */ verify( ! __preemption_enabled() );
[210b8b3]523 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
524 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
[c84e80a]525}
526
[8def349]527//-----------------------------------------------------------------------------
528// Scheduler routines
[14a61b5]529// KERNEL ONLY
[e9c0b4c]530static void __schedule_thread( $thread * thrd ) {
[8fc652e0]531 /* paranoid */ verify( ! __preemption_enabled() );
[254ad1b]532 /* paranoid */ verify( ready_schedule_islocked());
[6a490b2]533 /* paranoid */ verify( thrd );
534 /* paranoid */ verify( thrd->state != Halted );
[9d6e1b8a]535 /* paranoid */ verify( thrd->curr_cluster );
[3381ed7]536 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
[504a7dc]537 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
538 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
[ff79d5e]539 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
[504a7dc]540 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
[3381ed7]541 /* paranoid */ #endif
[6a490b2]542 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
[ac12f1f]543 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
[b4b63e8]544
[9f575ea]545
[ae7be7a]546 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
[6b4cdd3]547
[ec43cf9]548 // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
549 struct cluster * cl = thrd->curr_cluster;
[89eff25]550 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
[32a8b61]551
[254ad1b]552 // push the thread to the cluster ready-queue
553 push( cl, thrd );
[32a8b61]554
[254ad1b]555 // variable thrd is no longer safe to use
[734908c]556 thrd = 0xdeaddeaddeaddeadp;
[32a8b61]557
[254ad1b]558 // wake the cluster using the save variable.
559 __wake_one( cl );
[1c273d0]560
[ec43cf9]561 #if !defined(__CFA_NO_STATISTICS__)
562 if( kernelTLS().this_stats ) {
563 __tls_stats()->ready.threads.threads++;
[89eff25]564 if(outside) {
565 __tls_stats()->ready.threads.extunpark++;
566 }
[73f4d08]567 __push_stat( __tls_stats(), __tls_stats()->ready.threads.threads, false, "Processor", kernelTLS().this_processor );
[ec43cf9]568 }
569 else {
570 __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
[89eff25]571 __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
[73f4d08]572 __push_stat( cl->stats, cl->stats->ready.threads.threads, true, "Cluster", cl );
[ec43cf9]573 }
574 #endif
575
[254ad1b]576 /* paranoid */ verify( ready_schedule_islocked());
[8fc652e0]577 /* paranoid */ verify( ! __preemption_enabled() );
[db6f06a]578}
579
[254ad1b]580void schedule_thread$( $thread * thrd ) {
581 ready_schedule_lock();
582 __schedule_thread( thrd );
583 ready_schedule_unlock();
584}
585
[14a61b5]586// KERNEL ONLY
[1eb239e4]587static inline $thread * __next_thread(cluster * this) with( *this ) {
[8fc652e0]588 /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]589
[e873838]590 ready_schedule_lock();
[431cd4f]591 $thread * thrd = pop_fast( this );
[e873838]592 ready_schedule_unlock();
[7768b8d]593
[8fc652e0]594 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]595 return thrd;
[eb2e723]596}
597
[64a7146]598// KERNEL ONLY
[1eb239e4]599static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
[8fc652e0]600 /* paranoid */ verify( ! __preemption_enabled() );
[64a7146]601
[e873838]602 ready_schedule_lock();
[fc59df78]603 $thread * thrd;
604 for(25) {
605 thrd = pop_slow( this );
606 if(thrd) goto RET;
607 }
608 thrd = pop_search( this );
609
610 RET:
[e873838]611 ready_schedule_unlock();
[64a7146]612
[8fc652e0]613 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]614 return thrd;
[64a7146]615}
616
[c6c7e6c]617static inline bool __must_unpark( $thread * thrd ) {
[ff79d5e]618 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
619 switch(old_ticket) {
[6a77224]620 case TICKET_RUNNING:
[3381ed7]621 // Wake won the race, the thread will reschedule/rerun itself
[c6c7e6c]622 return false;
[6a77224]623 case TICKET_BLOCKED:
[3381ed7]624 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
[ff79d5e]625 /* paranoid */ verify( thrd->state == Blocked );
[c6c7e6c]626 return true;
[3381ed7]627 default:
628 // This makes no sense, something is wrong abort
[7ee8153]629 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
[de6319f]630 }
[eb2e723]631}
632
[e9c0b4c]633void __kernel_unpark( $thread * thrd ) {
634 /* paranoid */ verify( ! __preemption_enabled() );
635 /* paranoid */ verify( ready_schedule_islocked());
636
637 if( !thrd ) return;
638
639 if(__must_unpark(thrd)) {
640 // Wake lost the race,
641 __schedule_thread( thrd );
642 }
643
644 /* paranoid */ verify( ready_schedule_islocked());
645 /* paranoid */ verify( ! __preemption_enabled() );
646}
647
[c6c7e6c]648void unpark( $thread * thrd ) {
649 if( !thrd ) return;
[0b33412]650
[c6c7e6c]651 if(__must_unpark(thrd)) {
652 disable_interrupts();
[a3821fa]653 // Wake lost the race,
[254ad1b]654 schedule_thread$( thrd );
[a3821fa]655 enable_interrupts(false);
[de6319f]656 }
[eb2e723]657}
[0b33412]658
[e235429]659void park( void ) {
[a3821fa]660 __disable_interrupts_checked();
661 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
662 returnToKernel();
663 __enable_interrupts_checked();
[0c78741]664
665}
[09800e9]666
[5afb49a]667extern "C" {
668 // Leave the thread monitor
669 // last routine called by a thread.
670 // Should never return
671 void __cfactx_thrd_leave() {
[8fc652e0]672 $thread * thrd = active_thread();
[5afb49a]673 $monitor * this = &thrd->self_mon;
674
675 // Lock the monitor now
676 lock( this->lock __cfaabi_dbg_ctx2 );
677
678 disable_interrupts();
679
[9d6e1b8a]680 /* paranoid */ verify( ! __preemption_enabled() );
681 /* paranoid */ verify( thrd->state == Active );
682 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
683 /* paranoid */ verify( kernelTLS().this_thread == thrd );
684 /* paranoid */ verify( thrd->context.SP );
685 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
686 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
687
[3ea8ad1]688 thrd->state = Halting;
[58688bf]689 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
[9d6e1b8a]690 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
691 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
[5afb49a]692
693 // Leave the thread
694 returnToKernel();
695
696 // Control flow should never reach here!
[9d6e1b8a]697 abort();
[5afb49a]698 }
[09800e9]699}
700
[14a61b5]701// KERNEL ONLY
[3381ed7]702bool force_yield( __Preemption_Reason reason ) {
[a3821fa]703 __disable_interrupts_checked();
704 $thread * thrd = kernelTLS().this_thread;
705 /* paranoid */ verify(thrd->state == Active);
706
707 // SKULLDUGGERY: It is possible that we are preempting this thread just before
708 // it was going to park itself. If that is the case and it is already using the
709 // intrusive fields then we can't use them to preempt the thread
710 // If that is the case, abandon the preemption.
711 bool preempted = false;
712 if(thrd->link.next == 0p) {
713 preempted = true;
714 thrd->preempted = reason;
715 returnToKernel();
716 }
717 __enable_interrupts_checked( false );
[3381ed7]718 return preempted;
[f2b12406]719}
720
[14a61b5]721//=============================================================================================
[92e7631]722// Kernel Idle Sleep
[14a61b5]723//=============================================================================================
[64a7146]724// Wake a thread from the front if there are any
[e873838]725static void __wake_one(cluster * this) {
[8fc652e0]726 /* paranoid */ verify( ! __preemption_enabled() );
[e873838]727 /* paranoid */ verify( ready_schedule_islocked() );
[14a61b5]728
[64a7146]729 // Check if there is a sleeping processor
[1eb239e4]730 processor * p;
731 unsigned idle;
732 unsigned total;
[6a9b12b]733 [idle, total, p] = query_idles(this->procs);
[14a61b5]734
[64a7146]735 // If no one is sleeping, we are done
[1eb239e4]736 if( idle == 0 ) return;
[14a61b5]737
[64a7146]738 // We found a processor, wake it up
[dddb3dd0]739 eventfd_t val;
740 val = 1;
741 eventfd_write( p->idle, val );
[14a61b5]742
[1eb239e4]743 #if !defined(__CFA_NO_STATISTICS__)
[5cb51502]744 if( kernelTLS().this_stats ) {
745 __tls_stats()->ready.sleep.wakes++;
746 }
747 else {
748 __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED);
749 }
[1eb239e4]750 #endif
751
[e873838]752 /* paranoid */ verify( ready_schedule_islocked() );
[8fc652e0]753 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]754
755 return;
[64a7146]756}
[14a61b5]757
[64a7146]758// Unconditionnaly wake a thread
[1eb239e4]759void __wake_proc(processor * this) {
[64a7146]760 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
[14a61b5]761
[a3821fa]762 __disable_interrupts_checked();
[8fc652e0]763 /* paranoid */ verify( ! __preemption_enabled() );
[dddb3dd0]764 eventfd_t val;
765 val = 1;
[55d6affb]766 eventfd_write( this->idle, val );
[a3821fa]767 __enable_interrupts_checked();
[92e7631]768}
769
[6a9b12b]770static void mark_idle(__cluster_proc_list & this, processor & proc) {
[8fc652e0]771 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]772 lock( this );
773 this.idle++;
774 /* paranoid */ verify( this.idle <= this.total );
[fc59b580]775 remove(proc);
[6a9b12b]776 insert_first(this.idles, proc);
[1eb239e4]777 unlock( this );
[8fc652e0]778 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]779}
[8e16177]780
[6a9b12b]781static void mark_awake(__cluster_proc_list & this, processor & proc) {
[8fc652e0]782 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]783 lock( this );
784 this.idle--;
785 /* paranoid */ verify( this.idle >= 0 );
786 remove(proc);
[fc59b580]787 insert_last(this.actives, proc);
[1eb239e4]788 unlock( this );
[8fc652e0]789 /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]790}
[c34ebf2]791
[6a9b12b]792static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {
793 /* paranoid */ verify( ! __preemption_enabled() );
794 /* paranoid */ verify( ready_schedule_islocked() );
795
[1eb239e4]796 for() {
797 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);
798 if( 1 == (l % 2) ) { Pause(); continue; }
799 unsigned idle = this.idle;
800 unsigned total = this.total;
[6a9b12b]801 processor * proc = &this.idles`first;
[7fdae38]802 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it
803 asm volatile("": : :"memory");
[1eb239e4]804 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }
805 return [idle, total, proc];
806 }
[6a9b12b]807
808 /* paranoid */ verify( ready_schedule_islocked() );
809 /* paranoid */ verify( ! __preemption_enabled() );
[6b4cdd3]810}
811
[dbe9b08]812//=============================================================================================
813// Unexpected Terminating logic
814//=============================================================================================
[92bfda0]815void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
816 $thread * thrd = __cfaabi_tls.this_thread;
[9d944b2]817
[de94a60]818 if(thrd) {
819 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
[1c40091]820 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]821
[212c2187]822 if ( &thrd->self_cor != thrd->curr_cor ) {
823 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
[1c40091]824 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]825 }
826 else {
[1c40091]827 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
[de94a60]828 }
[1c273d0]829 }
[9d944b2]830 else {
[de94a60]831 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
[1c40091]832 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[9d944b2]833 }
834}
835
[92bfda0]836int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
837 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
[2b8bc41]838}
839
[de94a60]840static __spinlock_t kernel_debug_lock;
841
[9d944b2]842extern "C" {
[1c40091]843 void __cfaabi_bits_acquire() {
[36982fc]844 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
[9d944b2]845 }
846
[1c40091]847 void __cfaabi_bits_release() {
[ea7d2b0]848 unlock( kernel_debug_lock );
[9d944b2]849 }
[8118303]850}
851
[fa21ac9]852//=============================================================================================
853// Kernel Utilities
854//=============================================================================================
[dddb3dd0]855#if defined(CFA_HAVE_LINUX_IO_URING_H)
856#include "io/types.hfa"
857#endif
858
[c1c95b1]859static inline bool __maybe_io_drain( processor * proc ) {
[e9c0b4c]860 bool ret = false;
[dddb3dd0]861 #if defined(CFA_HAVE_LINUX_IO_URING_H)
862 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
863
864 // Check if we should drain the queue
865 $io_context * ctx = proc->io.ctx;
866 unsigned head = *ctx->cq.head;
867 unsigned tail = *ctx->cq.tail;
[c1c95b1]868 if(head == tail) return false;
[b7fd2db6]869 ready_schedule_lock();
[e9c0b4c]870 ret = __cfa_io_drain( proc );
[b7fd2db6]871 ready_schedule_unlock();
[dddb3dd0]872 #endif
[e9c0b4c]873 return ret;
[dddb3dd0]874}
875
[de94a60]876//-----------------------------------------------------------------------------
877// Debug
878__cfaabi_dbg_debug_do(
[1997b4e]879 extern "C" {
[ae66348]880 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
[1997b4e]881 this.prev_name = prev_name;
[8fc652e0]882 this.prev_thrd = kernelTLS().this_thread;
[1997b4e]883 }
[9181f1d]884 }
[f7d6bb0]885)
[2026bb6]886
887//-----------------------------------------------------------------------------
888// Debug
[8c50aed]889bool threading_enabled(void) __attribute__((const)) {
[2026bb6]890 return true;
891}
[c34ebf2]892
893//-----------------------------------------------------------------------------
894// Statistics
895#if !defined(__CFA_NO_STATISTICS__)
896 void print_halts( processor & this ) {
897 this.print_halts = true;
898 }
[58688bf]899
[8464edf]900 static void crawl_list( cluster * cltr, dlist(processor, processor) & list, unsigned count ) {
901 /* paranoid */ verify( cltr->stats );
902
903 processor * it = &list`first;
904 for(unsigned i = 0; i < count; i++) {
905 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
906 /* paranoid */ verify( it->local_data->this_stats );
907 __tally_stats( cltr->stats, it->local_data->this_stats );
908 it = &(*it)`next;
909 }
910 }
911
912 void crawl_cluster_stats( cluster & this ) {
913 // Stop the world, otherwise stats could get really messed-up
914 // this doesn't solve all problems but does solve many
915 // so it's probably good enough
916 uint_fast32_t last_size = ready_mutate_lock();
917
918 crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
919 crawl_list(&this, this.procs.idles , this.procs.idle );
920
921 // Unlock the RWlock
922 ready_mutate_unlock( last_size );
923 }
924
925
[58688bf]926 void print_stats_now( cluster & this, int flags ) {
[8464edf]927 crawl_cluster_stats( this );
[1b033b8]928 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
929 }
[c34ebf2]930#endif
[8118303]931// Local Variables: //
932// mode: c //
933// tab-width: 4 //
934// End: //
Note: See TracBrowser for help on using the repository browser.