source: libcfa/src/concurrency/kernel.cfa @ 4994d67

ADTast-experimentalpthread-emulationqualifiedEnum
Last change on this file since 4994d67 was 4ccc150, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Fix the verifys I just added.

  • Property mode set to 100644
File size: 28.6 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author           : Thierry Delisle
[75f3522]10// Created On       : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[0190480]12// Last Modified On : Mon Aug 31 07:08:20 2020
13// Update Count     : 71
[8118303]14//
15
[2026bb6]16#define __cforall_thread__
[43784ac]17#define _GNU_SOURCE
18
[4069faad]19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
[2026bb6]20
[bfb9bf5]21#pragma GCC diagnostic push
22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
23
[8118303]24//C Includes
[214e8da]25#include <errno.h>
[9d944b2]26#include <stdio.h>
[445f984]27#include <string.h>
[58b6d1b]28#include <signal.h>
[9d944b2]29#include <unistd.h>
[bfb9bf5]30
[dddb3dd0]31extern "C" {
32        #include <sys/eventfd.h>
[d3605f8]33        #include <sys/uio.h>
[dddb3dd0]34}
[8118303]35
36//CFA Includes
[708ae38]37#include "kernel/private.hfa"
[73abe95]38#include "preemption.hfa"
[445f984]39#include "strstream.hfa"
40#include "device/cpu.hfa"
[7ef162b2]41#include "io/types.hfa"
[8118303]42
43//Private includes
44#define __CFA_INVOKE_PRIVATE__
45#include "invoke.h"
[bfb9bf5]46#pragma GCC diagnostic pop
[8118303]47
[89eff25]48#if !defined(__CFA_NO_STATISTICS__)
[c9c1c1c]49        #define __STATS_DEF( ...) __VA_ARGS__
[89eff25]50#else
[c9c1c1c]51        #define __STATS_DEF( ...)
[89eff25]52#endif
[4069faad]53
[deca0f5]54//-----------------------------------------------------------------------------
55// Some assembly required
[1805b1b]56#if defined( __i386 )
[deca0f5]57        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
58        // fcw  : X87 FPU control word (preserved across function calls)
59        #define __x87_store         \
60                uint32_t __mxcr;      \
61                uint16_t __fcw;       \
62                __asm__ volatile (    \
63                        "stmxcsr %0\n"  \
64                        "fnstcw  %1\n"  \
65                        : "=m" (__mxcr),\
66                                "=m" (__fcw)  \
67                )
68
69        #define __x87_load         \
70                __asm__ volatile (   \
71                        "fldcw  %1\n"  \
72                        "ldmxcsr %0\n" \
73                        ::"m" (__mxcr),\
74                                "m" (__fcw)  \
75                )
76
77#elif defined( __x86_64 )
78        #define __x87_store         \
79                uint32_t __mxcr;      \
80                uint16_t __fcw;       \
81                __asm__ volatile (    \
82                        "stmxcsr %0\n"  \
83                        "fnstcw  %1\n"  \
84                        : "=m" (__mxcr),\
85                                "=m" (__fcw)  \
86                )
87
88        #define __x87_load          \
89                __asm__ volatile (    \
90                        "fldcw  %1\n"   \
91                        "ldmxcsr %0\n"  \
92                        :: "m" (__mxcr),\
93                                "m" (__fcw)  \
94                )
95
[0190480]96#elif defined( __arm__ )
97        #define __x87_store
98        #define __x87_load
99
100#elif defined( __aarch64__ )
[74f5c83]101        #define __x87_store              \
102                uint32_t __fpcntl[2];    \
103                __asm__ volatile (    \
104                        "mrs x9, FPCR\n" \
105                        "mrs x10, FPSR\n"  \
106                        "stp x9, x10, %0\n"  \
107                        : "=m" (__fpcntl) : : "x9", "x10" \
108                )
109
110        #define __x87_load         \
111                __asm__ volatile (    \
112                        "ldp x9, x10, %0\n"  \
113                        "msr FPSR, x10\n"  \
114                        "msr FPCR, x9\n" \
115                : "=m" (__fpcntl) : : "x9", "x10" \
116                )
117
[deca0f5]118#else
[0190480]119        #error unsupported hardware architecture
[deca0f5]120#endif
121
[e84ab3d]122extern thread$ * mainThread;
[e660761]123extern processor * mainProcessor;
[2ac095d]124
[92e7631]125//-----------------------------------------------------------------------------
126// Kernel Scheduling logic
[e84ab3d]127static thread$ * __next_thread(cluster * this);
128static thread$ * __next_thread_slow(cluster * this);
[c9c1c1c]129static thread$ * __next_thread_search(cluster * this);
[e84ab3d]130static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
131static void __run_thread(processor * this, thread$ * dst);
[e873838]132static void __wake_one(cluster * cltr);
[1eb239e4]133
[18f7858]134static void idle_sleep(processor * proc);
[5f5a729]135static bool mark_idle (__cluster_proc_list & idles, processor & proc);
[6a9b12b]136static void mark_awake(__cluster_proc_list & idles, processor & proc);
[1eb239e4]137
[4479890]138extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
[18f7858]139extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
140extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
[dddb3dd0]141
[d3605f8]142#if defined(CFA_WITH_IO_URING_IDLE)
143        extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
[6ddef36]144#endif
[7ef162b2]145
[dddb3dd0]146extern void __disable_interrupts_hard();
147extern void __enable_interrupts_hard();
[c84e80a]148
[7ef162b2]149
[75f3522]150//=============================================================================================
151// Kernel Scheduling logic
152//=============================================================================================
[8fcbb4c]153//Main of the processor contexts
[83a071f9]154void main(processorCtx_t & runner) {
[21184e3]155        // Because of a bug, we couldn't initialized the seed on construction
156        // Do it here
[8fc652e0]157        __cfaabi_tls.rand_seed ^= rdtscl();
158        __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
[f2384c9a]159        __tls_rand_advance_bck();
[21184e3]160
[83a071f9]161        processor * this = runner.proc;
[094476d]162        verify(this);
[c81ebf9]163
[22226e4]164        /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
165        /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
[7ef162b2]166
[22226e4]167        // used for idle sleep when io_uring is present
168        // mark it as already fulfilled so we know if there is a pending request or not
169        this->idle_wctx.ftr->self.ptr = 1p;
[dddb3dd0]170
[4069faad]171        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
[28d73c1]172        #if !defined(__CFA_NO_STATISTICS__)
173                if( this->print_halts ) {
[c993b15]174                        __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
[28d73c1]175                }
176        #endif
[b798713]177
[75f3522]178        {
[c81ebf9]179                // Setup preemption data
180                preemption_scope scope = { this };
181
[a5e7233]182                // if we need to run some special setup, now is the time to do it.
183                if(this->init.thrd) {
184                        this->init.thrd->curr_cluster = this->cltr;
185                        __run_thread(this, this->init.thrd);
186                }
[325e6ea]187
[4069faad]188                __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
[8118303]189
[e84ab3d]190                thread$ * readyThread = 0p;
[1eb239e4]191                MAIN_LOOP:
192                for() {
[dddb3dd0]193                        // Check if there is pending io
[18f7858]194                        __cfa_io_drain( this );
[dddb3dd0]195
[92e7631]196                        // Try to get the next thread
[8c50aed]197                        readyThread = __next_thread( this->cltr );
[75f3522]198
[1eb239e4]199                        if( !readyThread ) {
[18f7858]200                                // there is no point in holding submissions if we are idle
[c9c1c1c]201                                __IO_STATS__(true, io.flush.idle++; )
[18f7858]202                                __cfa_io_flush( this );
203
204                                // drain again in case something showed up
205                                __cfa_io_drain( this );
[c33c2af]206
[c9c1c1c]207                                readyThread = __next_thread( this->cltr );
208                        }
209
210                        if( !readyThread ) for(5) {
[1eb239e4]211                                readyThread = __next_thread_slow( this->cltr );
[c9c1c1c]212
213                                if( readyThread ) break;
214
[18f7858]215                                // It's unlikely we still I/O to submit, but the arbiter could
216                                __IO_STATS__(true, io.flush.idle++; )
217                                __cfa_io_flush( this );
218
219                                // drain again in case something showed up
220                                __cfa_io_drain( this );
[1eb239e4]221                        }
[4e6fb8e]222
[1eb239e4]223                        HALT:
224                        if( !readyThread ) {
225                                // Don't block if we are done
226                                if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[c81ebf9]227
[1eb239e4]228                                // Push self to idle stack
[5f5a729]229                                if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
[398e8e9]230
[1eb239e4]231                                // Confirm the ready-queue is empty
[c9c1c1c]232                                readyThread = __next_thread_search( this->cltr );
[1eb239e4]233                                if( readyThread ) {
234                                        // A thread was found, cancel the halt
[6a9b12b]235                                        mark_awake(this->cltr->procs, * this);
[1eb239e4]236
[c9c1c1c]237                                        __STATS__(true, ready.sleep.cancels++; )
[1eb239e4]238
239                                        // continue the mai loop
240                                        break HALT;
241                                }
242
[18f7858]243                                idle_sleep( this );
[1eb239e4]244
245                                // We were woken up, remove self from idle
[6a9b12b]246                                mark_awake(this->cltr->procs, * this);
[1eb239e4]247
248                                // DON'T just proceed, start looking again
249                                continue MAIN_LOOP;
[64a7146]250                        }
[1eb239e4]251
252                        /* paranoid */ verify( readyThread );
253
[dddb3dd0]254                        // Reset io dirty bit
255                        this->io.dirty = false;
256
[1eb239e4]257                        // We found a thread run it
258                        __run_thread(this, readyThread);
259
260                        // Are we done?
261                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
[325e6ea]262
[d529ad0]263                        if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
[c9c1c1c]264                                __IO_STATS__(true, io.flush.dirty++; )
[18f7858]265                                __cfa_io_flush( this );
[dddb3dd0]266                        }
[c81ebf9]267                }
268
[4069faad]269                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
[c84e80a]270        }
[8118303]271
[454f478]272        post( this->terminated );
[bdeba0b]273
[28d73c1]274        if(this == mainProcessor) {
[6a490b2]275                // HACK : the coroutine context switch expects this_thread to be set
276                // and it make sense for it to be set in all other cases except here
277                // fake it
[8fc652e0]278                __cfaabi_tls.this_thread = mainThread;
[6a490b2]279        }
[7768b8d]280
[4069faad]281        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
[c84e80a]282}
283
[5c1a531]284static int * __volatile_errno() __attribute__((noinline));
285static int * __volatile_errno() { asm(""); return &errno; }
286
[14a61b5]287// KERNEL ONLY
[1c273d0]288// runThread runs a thread by context switching
289// from the processor coroutine to the target thread
[e84ab3d]290static void __run_thread(processor * this, thread$ * thrd_dst) {
[8fc652e0]291        /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]292        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
293        /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
294        __builtin_prefetch( thrd_dst->context.SP );
295
[dddb3dd0]296        __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
297
[e84ab3d]298        coroutine$ * proc_cor = get_coroutine(this->runner);
[8fcbb4c]299
[9f575ea]300        // set state of processor coroutine to inactive
301        verify(proc_cor->state == Active);
[ae7be7a]302        proc_cor->state = Blocked;
[e8e457e]303
[9f575ea]304        // Actually run the thread
[3381ed7]305        RUNNING:  while(true) {
[ff79d5e]306                thrd_dst->preempted = __NO_PREEMPTION;
307                thrd_dst->state = Active;
[e8e457e]308
[58d64a4]309                // Update global state
[8fc652e0]310                kernelTLS().this_thread = thrd_dst;
[75f3522]311
[8fc652e0]312                /* paranoid */ verify( ! __preemption_enabled() );
313                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
[9d6e1b8a]314                /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[b4b63e8]315                /* paranoid */ verify( thrd_dst->context.SP );
[5afb49a]316                /* paranoid */ verify( thrd_dst->state != Halted );
[e84ab3d]317                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
318                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
[ac12f1f]319                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[b4b63e8]320
[3381ed7]321
[58d64a4]322
[9f575ea]323                // set context switch to the thread that the processor is executing
[c7a900a]324                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
325                // when __cfactx_switch returns we are back in the processor coroutine
[9f575ea]326
[50871b4]327
328
[ac12f1f]329                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
[e84ab3d]330                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
331                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
[b4b63e8]332                /* paranoid */ verify( thrd_dst->context.SP );
[9d6e1b8a]333                /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
[8fc652e0]334                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
335                /* paranoid */ verify( ! __preemption_enabled() );
[75f3522]336
[58d64a4]337                // Reset global state
[8fc652e0]338                kernelTLS().this_thread = 0p;
[3381ed7]339
340                // We just finished running a thread, there are a few things that could have happened.
341                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
342                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
343                // 4 - Preempted
344                // In case 1, we may have won a race so we can't write to the state again.
345                // In case 2, we lost the race so we now own the thread.
346
347                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
348                        // The thread was preempted, reschedule it and reset the flag
[24e321c]349                        schedule_thread$( thrd_dst, UNPARK_LOCAL );
[3381ed7]350                        break RUNNING;
351                }
[75f3522]352
[3ea8ad1]353                if(unlikely(thrd_dst->state == Halting)) {
[ff79d5e]354                        // The thread has halted, it should never be scheduled/run again
[5afb49a]355                        // finish the thread
356                        __thread_finish( thrd_dst );
[ff79d5e]357                        break RUNNING;
358                }
359
360                /* paranoid */ verify( thrd_dst->state == Active );
361                thrd_dst->state = Blocked;
362
[3381ed7]363                // set state of processor coroutine to active and the thread to inactive
[ff79d5e]364                int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
365                switch(old_ticket) {
[6a77224]366                        case TICKET_RUNNING:
[3381ed7]367                                // This is case 1, the regular case, nothing more is needed
368                                break RUNNING;
[6a77224]369                        case TICKET_UNBLOCK:
[c9c1c1c]370                                __STATS__(true, ready.threads.threads++; )
[3381ed7]371                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
372                                // In this case, just run it again.
373                                continue RUNNING;
374                        default:
375                                // This makes no sense, something is wrong abort
[ff79d5e]376                                abort();
[3381ed7]377                }
[9f575ea]378        }
[e8e457e]379
[9f575ea]380        // Just before returning to the processor, set the processor coroutine to active
[e8e457e]381        proc_cor->state = Active;
[1eb239e4]382
[dddb3dd0]383        __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst);
384
[c9c1c1c]385        __STATS__(true, ready.threads.threads--; )
[ec43cf9]386
[8fc652e0]387        /* paranoid */ verify( ! __preemption_enabled() );
[82c948c]388}
389
[14a61b5]390// KERNEL_ONLY
[b0c7419]391void returnToKernel() {
[8fc652e0]392        /* paranoid */ verify( ! __preemption_enabled() );
[e84ab3d]393        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
394        thread$ * thrd_src = kernelTLS().this_thread;
[e8e457e]395
[c9c1c1c]396        __STATS_DEF( thrd_src->last_proc = kernelTLS().this_processor; )
[29cb302]397
[9f575ea]398        // Run the thread on this processor
399        {
400                int local_errno = *__volatile_errno();
401                #if defined( __i386 ) || defined( __x86_64 )
402                        __x87_store;
403                #endif
[b4b63e8]404                /* paranoid */ verify( proc_cor->context.SP );
[ac12f1f]405                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[c7a900a]406                __cfactx_switch( &thrd_src->context, &proc_cor->context );
[ac12f1f]407                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_src->canary );
[9f575ea]408                #if defined( __i386 ) || defined( __x86_64 )
409                        __x87_load;
410                #endif
411                *__volatile_errno() = local_errno;
[8fcbb4c]412        }
[deca0f5]413
[29cb302]414        #if !defined(__CFA_NO_STATISTICS__)
[89eff25]415                /* paranoid */ verify( thrd_src->last_proc != 0p );
416                if(thrd_src->last_proc != kernelTLS().this_processor) {
[29cb302]417                        __tls_stats()->ready.threads.migration++;
418                }
419        #endif
420
[8fc652e0]421        /* paranoid */ verify( ! __preemption_enabled() );
[e84ab3d]422        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
423        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
[c84e80a]424}
425
[8def349]426//-----------------------------------------------------------------------------
427// Scheduler routines
[14a61b5]428// KERNEL ONLY
[24e321c]429static void __schedule_thread( thread$ * thrd, unpark_hint hint ) {
[8fc652e0]430        /* paranoid */ verify( ! __preemption_enabled() );
[254ad1b]431        /* paranoid */ verify( ready_schedule_islocked());
[6a490b2]432        /* paranoid */ verify( thrd );
433        /* paranoid */ verify( thrd->state != Halted );
[9d6e1b8a]434        /* paranoid */ verify( thrd->curr_cluster );
[3381ed7]435        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
[504a7dc]436        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
437                                        "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
[ff79d5e]438        /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
[504a7dc]439                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
[3381ed7]440        /* paranoid */ #endif
[6a490b2]441        /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
[ac12f1f]442        /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
[b4b63e8]443
[ae7be7a]444        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
[6b4cdd3]445
[ec43cf9]446        // Dereference the thread now because once we push it, there is not guaranteed it's still valid.
447        struct cluster * cl = thrd->curr_cluster;
[c9c1c1c]448        __STATS_DEF(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
[32a8b61]449
[254ad1b]450        // push the thread to the cluster ready-queue
[24e321c]451        push( cl, thrd, hint );
[32a8b61]452
[254ad1b]453        // variable thrd is no longer safe to use
[734908c]454        thrd = 0xdeaddeaddeaddeadp;
[32a8b61]455
[254ad1b]456        // wake the cluster using the save variable.
457        __wake_one( cl );
[1c273d0]458
[ec43cf9]459        #if !defined(__CFA_NO_STATISTICS__)
460                if( kernelTLS().this_stats ) {
461                        __tls_stats()->ready.threads.threads++;
[89eff25]462                        if(outside) {
463                                __tls_stats()->ready.threads.extunpark++;
464                        }
[ec43cf9]465                }
466                else {
467                        __atomic_fetch_add(&cl->stats->ready.threads.threads, 1, __ATOMIC_RELAXED);
[89eff25]468                        __atomic_fetch_add(&cl->stats->ready.threads.extunpark, 1, __ATOMIC_RELAXED);
[ec43cf9]469                }
470        #endif
471
[254ad1b]472        /* paranoid */ verify( ready_schedule_islocked());
[8fc652e0]473        /* paranoid */ verify( ! __preemption_enabled() );
[db6f06a]474}
475
[24e321c]476void schedule_thread$( thread$ * thrd, unpark_hint hint ) {
[254ad1b]477        ready_schedule_lock();
[24e321c]478                __schedule_thread( thrd, hint );
[254ad1b]479        ready_schedule_unlock();
480}
481
[14a61b5]482// KERNEL ONLY
[e84ab3d]483static inline thread$ * __next_thread(cluster * this) with( *this ) {
[8fc652e0]484        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]485
[e873838]486        ready_schedule_lock();
[e84ab3d]487                thread$ * thrd = pop_fast( this );
[e873838]488        ready_schedule_unlock();
[7768b8d]489
[8fc652e0]490        /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]491        return thrd;
[eb2e723]492}
493
[64a7146]494// KERNEL ONLY
[e84ab3d]495static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
[8fc652e0]496        /* paranoid */ verify( ! __preemption_enabled() );
[64a7146]497
[e873838]498        ready_schedule_lock();
[c9c1c1c]499                thread$ * thrd = pop_slow( this );
500        ready_schedule_unlock();
501
502        /* paranoid */ verify( ! __preemption_enabled() );
503        return thrd;
504}
[fc59df78]505
[c9c1c1c]506// KERNEL ONLY
507static inline thread$ * __next_thread_search(cluster * this) with( *this ) {
508        /* paranoid */ verify( ! __preemption_enabled() );
509
510        ready_schedule_lock();
511                thread$ * thrd = pop_search( this );
[e873838]512        ready_schedule_unlock();
[64a7146]513
[8fc652e0]514        /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]515        return thrd;
[64a7146]516}
517
[e84ab3d]518static inline bool __must_unpark( thread$ * thrd ) {
[ff79d5e]519        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
520        switch(old_ticket) {
[6a77224]521                case TICKET_RUNNING:
[3381ed7]522                        // Wake won the race, the thread will reschedule/rerun itself
[c6c7e6c]523                        return false;
[6a77224]524                case TICKET_BLOCKED:
[3381ed7]525                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
[ff79d5e]526                        /* paranoid */ verify( thrd->state == Blocked );
[c6c7e6c]527                        return true;
[3381ed7]528                default:
529                        // This makes no sense, something is wrong abort
[7ee8153]530                        abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
[de6319f]531        }
[eb2e723]532}
533
[24e321c]534void __kernel_unpark( thread$ * thrd, unpark_hint hint ) {
[e9c0b4c]535        /* paranoid */ verify( ! __preemption_enabled() );
536        /* paranoid */ verify( ready_schedule_islocked());
537
538        if( !thrd ) return;
539
540        if(__must_unpark(thrd)) {
541                // Wake lost the race,
[24e321c]542                __schedule_thread( thrd, hint );
[e9c0b4c]543        }
544
545        /* paranoid */ verify( ready_schedule_islocked());
546        /* paranoid */ verify( ! __preemption_enabled() );
547}
548
[24e321c]549void unpark( thread$ * thrd, unpark_hint hint ) {
[c6c7e6c]550        if( !thrd ) return;
[0b33412]551
[c6c7e6c]552        if(__must_unpark(thrd)) {
553                disable_interrupts();
[a3821fa]554                        // Wake lost the race,
[24e321c]555                        schedule_thread$( thrd, hint );
[a3821fa]556                enable_interrupts(false);
[de6319f]557        }
[eb2e723]558}
[0b33412]559
[e235429]560void park( void ) {
[a3821fa]561        __disable_interrupts_checked();
562                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
563                returnToKernel();
564        __enable_interrupts_checked();
[0c78741]565
566}
[09800e9]567
[5afb49a]568extern "C" {
569        // Leave the thread monitor
570        // last routine called by a thread.
571        // Should never return
572        void __cfactx_thrd_leave() {
[e84ab3d]573                thread$ * thrd = active_thread();
574                monitor$ * this = &thrd->self_mon;
[5afb49a]575
576                // Lock the monitor now
577                lock( this->lock __cfaabi_dbg_ctx2 );
578
579                disable_interrupts();
580
[9d6e1b8a]581                /* paranoid */ verify( ! __preemption_enabled() );
582                /* paranoid */ verify( thrd->state == Active );
583                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
584                /* paranoid */ verify( kernelTLS().this_thread == thrd );
585                /* paranoid */ verify( thrd->context.SP );
[e84ab3d]586                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
587                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
[9d6e1b8a]588
[3ea8ad1]589                thrd->state = Halting;
[58688bf]590                if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
[9d6e1b8a]591                if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
592                if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
[5afb49a]593
594                // Leave the thread
595                returnToKernel();
596
597                // Control flow should never reach here!
[9d6e1b8a]598                abort();
[5afb49a]599        }
[09800e9]600}
601
[14a61b5]602// KERNEL ONLY
[3381ed7]603bool force_yield( __Preemption_Reason reason ) {
[a3821fa]604        __disable_interrupts_checked();
[e84ab3d]605                thread$ * thrd = kernelTLS().this_thread;
[a3821fa]606                /* paranoid */ verify(thrd->state == Active);
607
608                // SKULLDUGGERY: It is possible that we are preempting this thread just before
609                // it was going to park itself. If that is the case and it is already using the
610                // intrusive fields then we can't use them to preempt the thread
611                // If that is the case, abandon the preemption.
612                bool preempted = false;
613                if(thrd->link.next == 0p) {
614                        preempted = true;
615                        thrd->preempted = reason;
616                        returnToKernel();
617                }
618        __enable_interrupts_checked( false );
[3381ed7]619        return preempted;
[f2b12406]620}
621
[14a61b5]622//=============================================================================================
[92e7631]623// Kernel Idle Sleep
[14a61b5]624//=============================================================================================
[64a7146]625// Wake a thread from the front if there are any
[e873838]626static void __wake_one(cluster * this) {
[7cf3b1d]627        eventfd_t val;
628
[8fc652e0]629        /* paranoid */ verify( ! __preemption_enabled() );
[e873838]630        /* paranoid */ verify( ready_schedule_islocked() );
[14a61b5]631
[64a7146]632        // Check if there is a sleeping processor
[7cf3b1d]633        struct __fd_waitctx * fdp = __atomic_load_n(&this->procs.fdw, __ATOMIC_SEQ_CST);
[14a61b5]634
[7cf3b1d]635        // If no one is sleeping: we are done
636        if( fdp == 0p ) return;
[14a61b5]637
[7cf3b1d]638        int fd = 1;
[22226e4]639        if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
640                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
[7cf3b1d]641        }
[14a61b5]642
[7cf3b1d]643        switch(fd) {
[4ccc150]644                __attribute__((unused)) int ret;
[7cf3b1d]645        case 0:
646                // If the processor isn't ready to sleep then the exchange will already wake it up
647                #if !defined(__CFA_NO_STATISTICS__)
648                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
649                        } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
650                #endif
651                break;
652        case 1:
653                // If someone else already said they will wake them: we are done
654                #if !defined(__CFA_NO_STATISTICS__)
655                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
656                        } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
657                #endif
658                break;
659        default:
660                // If the processor was ready to sleep, we need to wake it up with an actual write
661                val = 1;
[4ccc150]662                ret = eventfd_write( fd, val );
[77adaee]663                /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
[7cf3b1d]664
665                #if !defined(__CFA_NO_STATISTICS__)
666                        if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
667                        } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
668                #endif
669                break;
670        }
[1eb239e4]671
[e873838]672        /* paranoid */ verify( ready_schedule_islocked() );
[8fc652e0]673        /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]674
675        return;
[64a7146]676}
[14a61b5]677
[64a7146]678// Unconditionnaly wake a thread
[1eb239e4]679void __wake_proc(processor * this) {
[7d0ebd0]680        /* paranoid */ verify( ! __preemption_enabled() );
681
[64a7146]682        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
[14a61b5]683
[22226e4]684        this->idle_wctx.sem = 1;
[7cf3b1d]685
[efa28d5]686        this->idle_wctx.wake__time = rdtscl();
[262fafd9]687
[7d0ebd0]688        eventfd_t val;
689        val = 1;
[d080549]690        __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
[7d0ebd0]691
[d080549]692        /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
[7d0ebd0]693        /* paranoid */ verify( ! __preemption_enabled() );
[92e7631]694}
695
[18f7858]696static void idle_sleep(processor * this) {
[202c80b]697        /* paranoid */ verify( this->idle_wctx.evfd != 1 );
698        /* paranoid */ verify( this->idle_wctx.evfd != 2 );
699
[7cf3b1d]700        // Tell everyone we are ready to go do sleep
701        for() {
[22226e4]702                int expected = this->idle_wctx.sem;
[7cf3b1d]703
704                // Someone already told us to wake-up! No time for a nap.
705                if(expected == 1) { return; }
706
707                // Try to mark that we are going to sleep
[22226e4]708                if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
[7cf3b1d]709                        // Every one agreed, taking a nap
710                        break;
711                }
712        }
713
714
[d3605f8]715        #if !defined(CFA_WITH_IO_URING_IDLE)
[7ef162b2]716                #if !defined(__CFA_NO_STATISTICS__)
717                        if(this->print_halts) {
718                                __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
719                        }
720                #endif
[1757f98]721
[7ef162b2]722                __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
[1757f98]723
[7ef162b2]724                {
725                        eventfd_t val;
[22226e4]726                        ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
[7ef162b2]727                        if(ret < 0) {
728                                switch((int)errno) {
729                                case EAGAIN:
730                                #if EAGAIN != EWOULDBLOCK
731                                        case EWOULDBLOCK:
732                                #endif
733                                case EINTR:
734                                        // No need to do anything special here, just assume it's a legitimate wake-up
735                                        break;
736                                default:
737                                        abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
738                                }
[1757f98]739                        }
740                }
741
[7ef162b2]742                #if !defined(__CFA_NO_STATISTICS__)
743                        if(this->print_halts) {
744                                __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
745                        }
746                #endif
747        #else
[18f7858]748                __cfa_io_idle( this );
[1757f98]749        #endif
750}
751
[5f5a729]752static bool mark_idle(__cluster_proc_list & this, processor & proc) {
[c9c1c1c]753        __STATS__(true, ready.sleep.halts++; )
[7cf3b1d]754
[22226e4]755        proc.idle_wctx.sem = 0;
[7cf3b1d]756
[8fc652e0]757        /* paranoid */ verify( ! __preemption_enabled() );
[5f5a729]758        if(!try_lock( this )) return false;
[1eb239e4]759                this.idle++;
760                /* paranoid */ verify( this.idle <= this.total );
[fc59b580]761                remove(proc);
[6a9b12b]762                insert_first(this.idles, proc);
[34b8cb7]763
[7cf3b1d]764                __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
[1eb239e4]765        unlock( this );
[8fc652e0]766        /* paranoid */ verify( ! __preemption_enabled() );
[5f5a729]767
768        return true;
[1eb239e4]769}
[8e16177]770
[6a9b12b]771static void mark_awake(__cluster_proc_list & this, processor & proc) {
[8fc652e0]772        /* paranoid */ verify( ! __preemption_enabled() );
[1eb239e4]773        lock( this );
774                this.idle--;
775                /* paranoid */ verify( this.idle >= 0 );
776                remove(proc);
[fc59b580]777                insert_last(this.actives, proc);
[6a9b12b]778
[a633f6f]779                {
[7cf3b1d]780                        struct __fd_waitctx * wctx = 0;
781                        if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
782                        __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
[a633f6f]783                }
784
[34b8cb7]785        unlock( this );
[6a9b12b]786        /* paranoid */ verify( ! __preemption_enabled() );
[6b4cdd3]787}
788
[dbe9b08]789//=============================================================================================
790// Unexpected Terminating logic
791//=============================================================================================
[92bfda0]792void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
[e84ab3d]793        thread$ * thrd = __cfaabi_tls.this_thread;
[9d944b2]794
[de94a60]795        if(thrd) {
796                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
[1c40091]797                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]798
[212c2187]799                if ( &thrd->self_cor != thrd->curr_cor ) {
800                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
[1c40091]801                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[de94a60]802                }
803                else {
[1c40091]804                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
[de94a60]805                }
[1c273d0]806        }
[9d944b2]807        else {
[de94a60]808                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
[1c40091]809                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
[9d944b2]810        }
811}
812
[92bfda0]813int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
814        return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2;
[2b8bc41]815}
816
[de94a60]817static __spinlock_t kernel_debug_lock;
818
[9d944b2]819extern "C" {
[1c40091]820        void __cfaabi_bits_acquire() {
[36982fc]821                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
[9d944b2]822        }
823
[1c40091]824        void __cfaabi_bits_release() {
[ea7d2b0]825                unlock( kernel_debug_lock );
[9d944b2]826        }
[8118303]827}
828
[fa21ac9]829//=============================================================================================
830// Kernel Utilities
831//=============================================================================================
[dddb3dd0]832#if defined(CFA_HAVE_LINUX_IO_URING_H)
833#include "io/types.hfa"
834#endif
835
[18f7858]836
[dddb3dd0]837
[de94a60]838//-----------------------------------------------------------------------------
839// Debug
840__cfaabi_dbg_debug_do(
[1997b4e]841        extern "C" {
[ae66348]842                void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
[1997b4e]843                        this.prev_name = prev_name;
[8fc652e0]844                        this.prev_thrd = kernelTLS().this_thread;
[1997b4e]845                }
[9181f1d]846        }
[f7d6bb0]847)
[2026bb6]848
849//-----------------------------------------------------------------------------
850// Debug
[8c50aed]851bool threading_enabled(void) __attribute__((const)) {
[2026bb6]852        return true;
853}
[c34ebf2]854
855//-----------------------------------------------------------------------------
856// Statistics
857#if !defined(__CFA_NO_STATISTICS__)
858        void print_halts( processor & this ) {
859                this.print_halts = true;
860        }
[58688bf]861
[69914cbc]862        static void crawl_list( cluster * cltr, dlist(processor) & list, unsigned count ) {
[8464edf]863                /* paranoid */ verify( cltr->stats );
864
865                processor * it = &list`first;
866                for(unsigned i = 0; i < count; i++) {
867                        /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
868                        /* paranoid */ verify( it->local_data->this_stats );
[c33c2af]869                        // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
[8464edf]870                        __tally_stats( cltr->stats, it->local_data->this_stats );
871                        it = &(*it)`next;
872                }
873        }
874
875        void crawl_cluster_stats( cluster & this ) {
876                // Stop the world, otherwise stats could get really messed-up
877                // this doesn't solve all problems but does solve many
878                // so it's probably good enough
[c33c2af]879                disable_interrupts();
[8464edf]880                uint_fast32_t last_size = ready_mutate_lock();
881
882                        crawl_list(&this, this.procs.actives, this.procs.total - this.procs.idle);
883                        crawl_list(&this, this.procs.idles  , this.procs.idle );
884
885                // Unlock the RWlock
886                ready_mutate_unlock( last_size );
[c33c2af]887                enable_interrupts();
[8464edf]888        }
889
890
[58688bf]891        void print_stats_now( cluster & this, int flags ) {
[8464edf]892                crawl_cluster_stats( this );
[202c80b]893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
[1b033b8]894        }
[c34ebf2]895#endif
[8118303]896// Local Variables: //
897// mode: c //
898// tab-width: 4 //
899// End: //
Note: See TracBrowser for help on using the repository browser.