source: libcfa/src/concurrency/kernel.cfa @ 878cfcc

ADTast-experimental
Last change on this file since 878cfcc was 878cfcc, checked in by Thierry Delisle <tdelisle@…>, 22 months ago

Added extra check to make sure threads is never double executed

  • Property mode set to 100644
File size: 29.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count     : 71
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
20
21#pragma GCC diagnostic push
22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
23
24//C Includes
25#include <errno.h>
26#include <stdio.h>
27#include <string.h>
28#include <signal.h>
29#include <unistd.h>
30
31extern "C" {
32        #include <sys/eventfd.h>
33        #include <sys/uio.h>
34}
35
36//CFA Includes
37#include "kernel/private.hfa"
38#include "preemption.hfa"
39#include "strstream.hfa"
40#include "device/cpu.hfa"
41#include "io/types.hfa"
42
43//Private includes
44#define __CFA_INVOKE_PRIVATE__
45#include "invoke.h"
46#pragma GCC diagnostic pop
47
48#if !defined(__CFA_NO_STATISTICS__)
49        #define __STATS_DEF( ...) __VA_ARGS__
50#else
51        #define __STATS_DEF( ...)
52#endif
53
54//-----------------------------------------------------------------------------
55// Some assembly required
56#if defined( __i386 )
57        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
58        // fcw  : X87 FPU control word (preserved across function calls)
59        #define __x87_store         \
60                uint32_t __mxcr;      \
61                uint16_t __fcw;       \
62                __asm__ volatile (    \
63                        "stmxcsr %0\n"  \
64                        "fnstcw  %1\n"  \
65                        : "=m" (__mxcr),\
66                                "=m" (__fcw)  \
67                )
68
69        #define __x87_load         \
70                __asm__ volatile (   \
71                        "fldcw  %1\n"  \
72                        "ldmxcsr %0\n" \
73                        ::"m" (__mxcr),\
74                                "m" (__fcw)  \
75                )
76
77#elif defined( __x86_64 )
78        #define __x87_store         \
79                uint32_t __mxcr;      \
80                uint16_t __fcw;       \
81                __asm__ volatile (    \
82                        "stmxcsr %0\n"  \
83                        "fnstcw  %1\n"  \
84                        : "=m" (__mxcr),\
85                                "=m" (__fcw)  \
86                )
87
88        #define __x87_load          \
89                __asm__ volatile (    \
90                        "fldcw  %1\n"   \
91                        "ldmxcsr %0\n"  \
92                        :: "m" (__mxcr),\
93                                "m" (__fcw)  \
94                )
95
96#elif defined( __arm__ )
97        #define __x87_store
98        #define __x87_load
99
100#elif defined( __aarch64__ )
101        #define __x87_store              \
102                uint32_t __fpcntl[2];    \
103                __asm__ volatile (    \
104                        "mrs x9, FPCR\n" \
105                        "mrs x10, FPSR\n"  \
106                        "stp x9, x10, %0\n"  \
107                        : "=m" (__fpcntl) : : "x9", "x10" \
108                )
109
110        #define __x87_load         \
111                __asm__ volatile (    \
112                        "ldp x9, x10, %0\n"  \
113                        "msr FPSR, x10\n"  \
114                        "msr FPCR, x9\n" \
115                : "=m" (__fpcntl) : : "x9", "x10" \
116                )
117
118#else
119        #error unsupported hardware architecture
120#endif
121
122extern thread$ * mainThread;
123extern processor * mainProcessor;
124
125//-----------------------------------------------------------------------------
126// Kernel Scheduling logic
127static thread$ * __next_thread(cluster * this);
128static thread$ * __next_thread_slow(cluster * this);
129static thread$ * __next_thread_search(cluster * this);
130static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
131static void __run_thread(processor * this, thread$ * dst);
132static void __wake_one(cluster * cltr);
133
134static void idle_sleep(processor * proc);
135static bool mark_idle (__cluster_proc_list & idles, processor & proc);
136static void mark_awake(__cluster_proc_list & idles, processor & proc);
137
138extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
139extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
140extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
141
142#if defined(CFA_WITH_IO_URING_IDLE)
143        extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
144#endif
145
146extern void __disable_interrupts_hard();
147extern void __enable_interrupts_hard();
148
149
150//=============================================================================================
151// Kernel Scheduling logic
152//=============================================================================================
153//Main of the processor contexts
154void main(processorCtx_t & runner) {
155        // Because of a bug, we couldn't initialized the seed on construction
156        // Do it here
157        __cfaabi_tls.rand_seed ^= rdtscl();
158        __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
159        __tls_rand_advance_bck();
160
161        processor * this = runner.proc;
162        verify(this);
163
164        /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
165        /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
166
167        // used for idle sleep when io_uring is present
168        // mark it as already fulfilled so we know if there is a pending request or not
169        this->idle_wctx.ftr->self.ptr = 1p;
170
171        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
172        #if !defined(__CFA_NO_STATISTICS__)
173                if( this->print_halts ) {
174                        __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
175                }
176        #endif
177
178        {
179                // Setup preemption data
180                preemption_scope scope = { this };
181
182                // if we need to run some special setup, now is the time to do it.
183                if(this->init.thrd) {
184                        this->init.thrd->curr_cluster = this->cltr;
185                        __run_thread(this, this->init.thrd);
186                }
187
188                __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
189
190                thread$ * readyThread = 0p;
191                MAIN_LOOP:
192                for() {
193                        // Check if there is pending io
194                        __cfa_io_drain( this );
195
196                        // Try to get the next thread
197                        readyThread = __next_thread( this->cltr );
198
199                        if( !readyThread ) {
200                                // there is no point in holding submissions if we are idle
201                                __IO_STATS__(true, io.flush.idle++; )
202                                __cfa_io_flush( this );
203
204                                // drain again in case something showed up
205                                __cfa_io_drain( this );
206
207                                readyThread = __next_thread( this->cltr );
208                        }
209
210                        if( !readyThread ) for(5) {
211                                readyThread = __next_thread_slow( this->cltr );
212
213                                if( readyThread ) break;
214
215                                // It's unlikely we still I/O to submit, but the arbiter could
216                                __IO_STATS__(true, io.flush.idle++; )
217                                __cfa_io_flush( this );
218
219                                // drain again in case something showed up
220                                __cfa_io_drain( this );
221                        }
222
223                        HALT:
224                        if( !readyThread ) {
225                                // Don't block if we are done
226                                if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
227
228                                // Push self to idle stack
229                                if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
230
231                                // Confirm the ready-queue is empty
232                                readyThread = __next_thread_search( this->cltr );
233                                if( readyThread ) {
234                                        // A thread was found, cancel the halt
235                                        mark_awake(this->cltr->procs, * this);
236
237                                        __STATS__(true, ready.sleep.cancels++; )
238
239                                        // continue the mai loop
240                                        break HALT;
241                                }
242
243                                idle_sleep( this );
244
245                                // We were woken up, remove self from idle
246                                mark_awake(this->cltr->procs, * this);
247
248                                // DON'T just proceed, start looking again
249                                continue MAIN_LOOP;
250                        }
251
252                        /* paranoid */ verify( readyThread );
253
254                        // Reset io dirty bit
255                        this->io.dirty = false;
256
257                        // We found a thread run it
258                        __run_thread(this, readyThread);
259
260                        // Are we done?
261                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
262
263                        if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
264                                __IO_STATS__(true, io.flush.dirty++; )
265                                __cfa_io_flush( this );
266                        }
267                }
268
269                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
270        }
271
272        post( this->terminated );
273
274        if(this == mainProcessor) {
275                // HACK : the coroutine context switch expects this_thread to be set
276                // and it make sense for it to be set in all other cases except here
277                // fake it
278                __cfaabi_tls.this_thread = mainThread;
279        }
280
281        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
282}
283
284static int * __volatile_errno() __attribute__((noinline));
285static int * __volatile_errno() { asm(""); return &errno; }
286
287// KERNEL ONLY
288// runThread runs a thread by context switching
289// from the processor coroutine to the target thread
290static void __run_thread(processor * this, thread$ * thrd_dst) {
291        /* paranoid */ verify( ! __preemption_enabled() );
292        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
293        /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
294        __builtin_prefetch( thrd_dst->context.SP );
295
296        __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
297
298        coroutine$ * proc_cor = get_coroutine(this->runner);
299
300        // set state of processor coroutine to inactive
301        verify(proc_cor->state == Active);
302        proc_cor->state = Blocked;
303
304        // Actually run the thread
305        RUNNING:  while(true) {
306                thrd_dst->preempted = __NO_PREEMPTION;
307
308                // Update global state
309                kernelTLS().this_thread = thrd_dst;
310
311                // Update the state after setting this_thread
312                // so that the debugger can find all active threads
313                // in tls storage
314                thrd_dst->state = Active;
315
316                /* paranoid */ verify( ! __preemption_enabled() );
317                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
318                /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
319                /* paranoid */ verify( thrd_dst->context.SP );
320                /* paranoid */ verify( thrd_dst->state != Halted );
321                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
322                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
323                /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, this, __ATOMIC_SEQ_CST) == 0p );
324                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
325
326
327
328                // set context switch to the thread that the processor is executing
329                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
330                // when __cfactx_switch returns we are back in the processor coroutine
331
332
333
334                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
335                /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, 0p, __ATOMIC_SEQ_CST) == this );
336                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
337                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
338                /* paranoid */ verify( thrd_dst->state != Halted );
339                /* paranoid */ verify( thrd_dst->context.SP );
340                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
341                /* paranoid */ verify( ! __preemption_enabled() );
342
343                // We just finished running a thread, there are a few things that could have happened.
344                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
345                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
346                // 4 - Preempted
347                // In case 1, we may have won a race so we can't write to the state again.
348                // In case 2, we lost the race so we now own the thread.
349
350                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
351                        // Reset the this_thread now that we know
352                        // the state isn't active anymore
353                        kernelTLS().this_thread = 0p;
354
355                        // The thread was preempted, reschedule it and reset the flag
356                        schedule_thread$( thrd_dst, UNPARK_LOCAL );
357                        break RUNNING;
358                }
359
360                if(unlikely(thrd_dst->state == Halting)) {
361                        // Reset the this_thread now that we know
362                        // the state isn't active anymore
363                        kernelTLS().this_thread = 0p;
364
365                        // The thread has halted, it should never be scheduled/run again
366                        // finish the thread
367                        __thread_finish( thrd_dst );
368                        break RUNNING;
369                }
370
371                /* paranoid */ verify( thrd_dst->state == Active );
372                thrd_dst->state = Blocked;
373
374                // Reset the this_thread now that we know
375                // the state isn't active anymore
376                kernelTLS().this_thread = 0p;
377
378                // set state of processor coroutine to active and the thread to inactive
379                int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
380                switch(old_ticket) {
381                        case TICKET_RUNNING:
382                                // This is case 1, the regular case, nothing more is needed
383                                break RUNNING;
384                        case TICKET_UNBLOCK:
385                                __STATS__(true, ready.threads.threads++; )
386                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
387                                // In this case, just run it again.
388                                continue RUNNING;
389                        default:
390                                // This makes no sense, something is wrong abort
391                                abort();
392                }
393        }
394
395        // Just before returning to the processor, set the processor coroutine to active
396        proc_cor->state = Active;
397
398        __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
399
400        __STATS__(true, ready.threads.threads--; )
401
402        /* paranoid */ verify( ! __preemption_enabled() );
403}
404
405// KERNEL_ONLY
406static void returnToKernel() {
407        /* paranoid */ verify( ! __preemption_enabled() );
408        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
409        thread$ * thrd_src = kernelTLS().this_thread;
410
411        __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; )
412
413        // Run the thread on this processor
414        {
415                int local_errno = *__volatile_errno();
416                #if defined( __i386 ) || defined( __x86_64 )
417                        __x87_store;
418                #endif
419                /* paranoid */ verify( proc_cor->context.SP );
420                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
421                __cfactx_switch( &thrd_src->context, &proc_cor->context );
422                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
423                #if defined( __i386 ) || defined( __x86_64 )
424                        __x87_load;
425                #endif
426                *__volatile_errno() = local_errno;
427        }
428
429        #if !defined(__CFA_NO_STATISTICS__)
430                /* paranoid */ verify( thrd_src->last_proc != 0p );
431                if(thrd_src->last_proc != kernelTLS().this_processor) {
432                        __tls_stats()->ready.threads.migration++;
433                }
434        #endif
435
436        /* paranoid */ verify( ! __preemption_enabled() );
437        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
438        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
439}
440
441//-----------------------------------------------------------------------------
442// Scheduler routines
443// KERNEL ONLY
444static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
445        /* paranoid */ verify( ! __preemption_enabled() );
446        /* paranoid */ verify( ready_schedule_islocked());
447        /* paranoid */ verify( thrd );
448        /* paranoid */ verify( thrd->state != Halted );
449        /* paranoid */ verify( thrd->curr_cluster );
450        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
451        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
452                                        "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
453        /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
454                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
455        /* paranoid */ #endif
456        /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
457        /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
458
459        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
460
461        // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
462        struct cluster * cl = thrd->curr_cluster;
463        __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
464
465        // push the thread to the cluster ready-queue
466        push( cl, thrd, hint );
467
468        // variable thrd is no longer safe to use
469        thrd = 0xdeaddeaddeaddeadp;
470
471        // wake the cluster using the save variable.
472        __wake_one( cl );
473
474        #if !defined(__CFA_NO_STATISTICS__)
475                if( kernelTLS().this_stats ) {
476                        __tls_stats()->ready.threads.threads++;
477                        if(outside) {
478                                __tls_stats()->ready.threads.extunpark++;
479                        }
480                }
481                else {
482                        __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
483                        __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
484                }
485        #endif
486
487        /* paranoid */ verify( ready_schedule_islocked());
488        /* paranoid */ verify( ! __preemption_enabled() );
489}
490
491void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
492        ready_schedule_lock();
493                __schedule_thread( thrd, hint );
494        ready_schedule_unlock();
495}
496
497// KERNEL ONLY
498static inline thread$ * __next_thread(cluster * this) with( *this ) {
499        /* paranoid */ verify( ! __preemption_enabled() );
500
501        ready_schedule_lock();
502                thread$ * thrd = pop_fast( this );
503        ready_schedule_unlock();
504
505        /* paranoid */ verify( ! __preemption_enabled() );
506        return thrd;
507}
508
509// KERNEL ONLY
510static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
511        /* paranoid */ verify( ! __preemption_enabled() );
512
513        ready_schedule_lock();
514                thread$ * thrd = pop_slow( this );
515        ready_schedule_unlock();
516
517        /* paranoid */ verify( ! __preemption_enabled() );
518        return thrd;
519}
520
521// KERNEL ONLY
522static inline thread$ * __next_thread_search(cluster * this) with( *this ) {
523        /* paranoid */ verify( ! __preemption_enabled() );
524
525        ready_schedule_lock();
526                thread$ * thrd = pop_search( this );
527        ready_schedule_unlock();
528
529        /* paranoid */ verify( ! __preemption_enabled() );
530        return thrd;
531}
532
533static inline bool __must_unpark( thread$ * thrd ) {
534        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
535        switch(old_ticket) {
536                case TICKET_RUNNING:
537                        // Wake won the race, the thread will reschedule/rerun itself
538                        return false;
539                case TICKET_BLOCKED:
540                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
541                        /* paranoid */ verify( thrd->state == Blocked );
542                        return true;
543                default:
544                        // This makes no sense, something is wrong abort
545                        abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
546        }
547}
548
549void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
550        /* paranoid */ verify( ! __preemption_enabled() );
551        /* paranoid */ verify( ready_schedule_islocked());
552
553        if( !thrd ) return;
554
555        if(__must_unpark(thrd)) {
556                // Wake lost the race,
557                __schedule_thread( thrd, hint );
558        }
559
560        /* paranoid */ verify( ready_schedule_islocked());
561        /* paranoid */ verify( ! __preemption_enabled() );
562}
563
564void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
565        if( !thrd ) return;
566
567        if(__must_unpark(thrd)) {
568                disable_interrupts();
569                        // Wake lost the race,
570                        schedule_thread$( thrd, hint );
571                enable_interrupts(false);
572        }
573}
574
575void park( void ) libcfa_public {
576        __disable_interrupts_checked();
577                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
578                returnToKernel();
579        __enable_interrupts_checked();
580
581}
582
583extern "C" {
584        // Leave the thread monitor
585        // last routine called by a thread.
586        // Should never return
587        void __cfactx_thrd_leave() {
588                thread$ * thrd = active_thread();
589                monitor$ * this = &thrd->self_mon;
590
591                // Lock the monitor now
592                lock( this->lock __cfaabi_dbg_ctx2 );
593
594                disable_interrupts();
595
596                /* paranoid */ verify( ! __preemption_enabled() );
597                /* paranoid */ verify( thrd->state == Active );
598                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
599                /* paranoid */ verify( kernelTLS().this_thread == thrd );
600                /* paranoid */ verify( thrd->context.SP );
601                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
602                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
603
604                thrd->state = Halting;
605                if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
606                if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
607                if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
608
609                // Leave the thread
610                returnToKernel();
611
612                // Control flow should never reach here!
613                abort();
614        }
615}
616
617// KERNEL ONLY
618bool force_yield( __Preemption_Reason reason ) libcfa_public {
619        __disable_interrupts_checked();
620                thread$ * thrd = kernelTLS().this_thread;
621                /* paranoid */ verify(thrd->state == Active);
622
623                // SKULLDUGGERY: It is possible that we are preempting this thread just before
624                // it was going to park itself. If that is the case and it is already using the
625                // intrusive fields then we can't use them to preempt the thread
626                // If that is the case, abandon the preemption.
627                bool preempted = false;
628                if(thrd->link.next == 0p) {
629                        preempted = true;
630                        thrd->preempted = reason;
631                        returnToKernel();
632                }
633        __enable_interrupts_checked( false );
634        return preempted;
635}
636
637//=============================================================================================
638// Kernel Idle Sleep
639//=============================================================================================
640// Wake a thread from the front if there are any
641static void __wake_one(cluster * this) {
642        eventfd_t val;
643
644        /* paranoid */ verify( ! __preemption_enabled() );
645        /* paranoid */ verify( ready_schedule_islocked() );
646
647        // Check if there is a sleeping processor
648        struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST);
649
650        // If no one is sleeping: we are done
651        if( fdp == 0p ) return;
652
653        int fd = 1;
654        if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
655                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
656        }
657
658        switch(fd) {
659                __attribute__((unused)) int ret;
660        case 0:
661                // If the processor isn't ready to sleep then the exchange will already wake it up
662                #if !defined(__CFA_NO_STATISTICS__)
663                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
664                        } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
665                #endif
666                break;
667        case 1:
668                // If someone else already said they will wake them: we are done
669                #if !defined(__CFA_NO_STATISTICS__)
670                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
671                        } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
672                #endif
673                break;
674        default:
675                // If the processor was ready to sleep, we need to wake it up with an actual write
676                val = 1;
677                ret = eventfd_write( fd, val );
678                /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
679
680                #if !defined(__CFA_NO_STATISTICS__)
681                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
682                        } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
683                #endif
684                break;
685        }
686
687        /* paranoid */ verify( ready_schedule_islocked() );
688        /* paranoid */ verify( ! __preemption_enabled() );
689
690        return;
691}
692
693// Unconditionnaly wake a thread
694void __wake_proc(processor * this) {
695        /* paranoid */ verify( ! __preemption_enabled() );
696
697        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
698
699        this->idle_wctx.sem = 1;
700
701        this->idle_wctx.wake__time = rdtscl();
702
703        eventfd_t val;
704        val = 1;
705        __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
706
707        /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
708        /* paranoid */ verify( ! __preemption_enabled() );
709}
710
711static void idle_sleep(processor * this) {
712        /* paranoid */ verify( this->idle_wctx.evfd != 1 );
713        /* paranoid */ verify( this->idle_wctx.evfd != 2 );
714
715        // Tell everyone we are ready to go do sleep
716        for() {
717                int expected = this->idle_wctx.sem;
718
719                // Someone already told us to wake-up! No time for a nap.
720                if(expected == 1) { return; }
721
722                // Try to mark that we are going to sleep
723                if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
724                        // Every one agreed, taking a nap
725                        break;
726                }
727        }
728
729
730        #if !defined(CFA_WITH_IO_URING_IDLE)
731                #if !defined(__CFA_NO_STATISTICS__)
732                        if(this->print_halts) {
733                                __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
734                        }
735                #endif
736
737                __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
738
739                {
740                        eventfd_t val;
741                        ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
742                        if(ret < 0) {
743                                switch((int)errno) {
744                                case EAGAIN:
745                                #if EAGAIN != EWOULDBLOCK
746                                        case EWOULDBLOCK:
747                                #endif
748                                case EINTR:
749                                        // No need to do anything special here, just assume it's a legitimate wake-up
750                                        break;
751                                default:
752                                        abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
753                                }
754                        }
755                }
756
757                #if !defined(__CFA_NO_STATISTICS__)
758                        if(this->print_halts) {
759                                __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
760                        }
761                #endif
762        #else
763                __cfa_io_idle( this );
764        #endif
765}
766
767static bool mark_idle(__cluster_proc_list & this, processor & proc) {
768        __STATS__(true, ready.sleep.halts++; )
769
770        proc.idle_wctx.sem = 0;
771
772        /* paranoid */ verify( ! __preemption_enabled() );
773        if(!try_lock( this )) return false;
774                this.idle++;
775                /* paranoid */ verify( this.idle <= this.total );
776                remove(proc);
777                insert_first(this.idles, proc);
778
779                __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
780        unlock( this );
781        /* paranoid */ verify( ! __preemption_enabled() );
782
783        return true;
784}
785
786static void mark_awake(__cluster_proc_list & this, processor & proc) {
787        /* paranoid */ verify( ! __preemption_enabled() );
788        lock( this );
789                this.idle--;
790                /* paranoid */ verify( this.idle >= 0 );
791                remove(proc);
792                insert_last(this.actives, proc);
793
794                {
795                        struct __fd_waitctx * wctx = 0;
796                        if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
797                        __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
798                }
799
800        unlock( this );
801        /* paranoid */ verify( ! __preemption_enabled() );
802}
803
804//=============================================================================================
805// Unexpected Terminating logic
806//=============================================================================================
807void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
808        thread$ * thrd = __cfaabi_tls.this_thread;
809
810        if(thrd) {
811                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
812                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
813
814                if ( &thrd->self_cor != thrd->curr_cor ) {
815                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
816                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
817                }
818                else {
819                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
820                }
821        }
822        else {
823                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
824                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
825        }
826}
827
828int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
829        return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
830}
831
832static __spinlock_t kernel_debug_lock;
833
834extern "C" {
835        void __cfaabi_bits_acquire() {
836                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
837        }
838
839        void __cfaabi_bits_release() {
840                unlock( kernel_debug_lock );
841        }
842}
843
844//=============================================================================================
845// Kernel Utilities
846//=============================================================================================
847#if defined(CFA_HAVE_LINUX_IO_URING_H)
848#include "io/types.hfa"
849#endif
850
851//-----------------------------------------------------------------------------
852// Debug
853bool threading_enabled(void) __attribute__((const)) libcfa_public {
854        return true;
855}
856
857//-----------------------------------------------------------------------------
858// Statistics
859#if !defined(__CFA_NO_STATISTICS__)
860        void print_halts( processor & this ) libcfa_public {
861                this.print_halts = true;
862        }
863
864        static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
865                /* paranoid */ verify( cltr->stats );
866
867                processor * it = &list`first;
868                for(unsigned i = 0; i < count; i++) {
869                        /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
870                        /* paranoid */ verify( it->local_data->this_stats );
871                        // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
872                        __tally_stats( cltr->stats, it->local_data->this_stats );
873                        it = &(*it)`next;
874                }
875        }
876
877        static void crawl_cluster_stats( cluster & this ) {
878                // Stop the world, otherwise stats could get really messed-up
879                // this doesn't solve all problems but does solve many
880                // so it's probably good enough
881                disable_interrupts();
882                uint_fast32_t last_size = ready_mutate_lock();
883
884                        crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
885                        crawl_list(&this, this.procs.idles  , this.procs.idle );
886
887                // Unlock the RWlock
888                ready_mutate_unlock( last_size );
889                enable_interrupts();
890        }
891
892
893        void print_stats_now( cluster & this, int flags ) libcfa_public {
894                crawl_cluster_stats( this );
895                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
896        }
897#endif
898// Local Variables: //
899// mode: c //
900// tab-width: 4 //
901// End: //
Note: See TracBrowser for help on using the repository browser.