source: libcfa/src/concurrency/kernel/startup.cfa @ 262fafd9

ADTast-experimentalenumpthread-emulationqualifiedEnum
Last change on this file since 262fafd9 was 262fafd9, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

Added debugging information to help find deadlock.

  • Property mode set to 100644
File size: 26.3 KB
RevLine 
[e660761]1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel/startup.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Thu Jul 30 15:12:54 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
[43784ac]17#define _GNU_SOURCE
[e660761]18
19// C Includes
[09ae8a6]20#include <errno.h>                                      // errno
[f558b5f]21#include <signal.h>
[09ae8a6]22#include <string.h>                                     // strerror
23#include <unistd.h>                                     // sysconf
[f558b5f]24
[e660761]25extern "C" {
[09ae8a6]26        #include <limits.h>                             // PTHREAD_STACK_MIN
27        #include <unistd.h>                             // syscall
28        #include <sys/eventfd.h>                        // eventfd
29        #include <sys/mman.h>                           // mprotect
30        #include <sys/resource.h>                       // getrlimit
[e660761]31}
32
33// CFA Includes
[708ae38]34#include "kernel/private.hfa"
[22226e4]35#include "iofwd.hfa"
[09ae8a6]36#include "startup.hfa"                                  // STARTUP_PRIORITY_XXX
[145dcd5]37#include "limits.hfa"
[bfcf6b9]38#include "math.hfa"
[e660761]39
[97229d6]40#define CFA_PROCESSOR_USE_MMAP 0
41
[e660761]42//-----------------------------------------------------------------------------
43// Some assembly required
44#if defined( __i386 )
[88cafe7]45        #define CtxGet( ctx ) __asm__ volatile ( \
46                "movl %%esp,%0\n" \
47                "movl %%ebp,%1\n" \
48                : "=rm" (ctx.SP), \
49                  "=rm" (ctx.FP) \
50        )
[e660761]51#elif defined( __x86_64 )
[88cafe7]52        #define CtxGet( ctx ) __asm__ volatile ( \
53                "movq %%rsp,%0\n" \
54                "movq %%rbp,%1\n" \
55                : "=rm" (ctx.SP), \
56                  "=rm" (ctx.FP) \
57        )
58#elif defined( __aarch64__ )
59        #define CtxGet( ctx ) __asm__ volatile ( \
60                "mov %0, sp\n" \
61                "mov %1, fp\n" \
62                : "=rm" (ctx.SP), \
63                  "=rm" (ctx.FP) \
64        )
[e660761]65#else
66        #error unknown hardware architecture
67#endif
68
69//-----------------------------------------------------------------------------
70// Start and stop routine for the kernel, declared first to make sure they run first
71static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
72static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
73
74//-----------------------------------------------------------------------------
75// Static Forward Declarations
76struct current_stack_info_t;
77
78static void * __invoke_processor(void * arg);
79static void __kernel_first_resume( processor * this );
80static void __kernel_last_resume ( processor * this );
[e84ab3d]81static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT);
[e660761]82static void deinit(processor & this);
83static void doregister( struct cluster & cltr );
84static void unregister( struct cluster & cltr );
[c993b15]85static void register_tls( processor * this );
86static void unregister_tls( processor * this );
[e84ab3d]87static void ?{}( coroutine$ & this, current_stack_info_t * info);
88static void ?{}( thread$ & this, current_stack_info_t * info);
[e660761]89static void ?{}(processorCtx_t & this) {}
90static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info);
91
[f2384c9a]92#if defined(__CFA_WITH_VERIFY__)
93        static bool verify_fwd_bck_rng(void);
94#endif
95
[e660761]96//-----------------------------------------------------------------------------
97// Forward Declarations for other modules
98extern void __kernel_alarm_startup(void);
99extern void __kernel_alarm_shutdown(void);
[22226e4]100extern void __cfa_io_start( processor * );
101extern void __cfa_io_stop ( processor * );
[e660761]102
103//-----------------------------------------------------------------------------
104// Other Forward Declarations
[1eb239e4]105extern void __wake_proc(processor *);
[7dd98b6]106extern int cfa_main_returned;                                                   // from interpose.cfa
[c655650]107uint32_t __global_random_prime = 4_294_967_291u, __global_random_mask = false;
[e660761]108
109//-----------------------------------------------------------------------------
110// Kernel storage
111KERNEL_STORAGE(cluster,              mainCluster);
112KERNEL_STORAGE(processor,            mainProcessor);
[e84ab3d]113KERNEL_STORAGE(thread$,              mainThread);
[e660761]114KERNEL_STORAGE(__stack_t,            mainThreadCtx);
115KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
[22226e4]116KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
117KERNEL_STORAGE(io_future_t,          mainIdleFuture);
[e660761]118#if !defined(__CFA_NO_STATISTICS__)
119KERNEL_STORAGE(__stats_t, mainProcStats);
120#endif
121
122cluster              * mainCluster;
123processor            * mainProcessor;
[e84ab3d]124thread$              * mainThread;
[e660761]125__scheduler_RWLock_t * __scheduler_lock;
126
127extern "C" {
128        struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
129}
130
[a182ad5]131extern size_t __page_size;
132extern int __map_prot;
133
[e660761]134//-----------------------------------------------------------------------------
135// Global state
[8fc652e0]136thread_local struct KernelThreadData __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" ))) @= {
[e660761]137        NULL,                                                                                           // cannot use 0p
138        NULL,
[c993b15]139        false,
[e660761]140        { 1, false, false },
[c993b15]141        0,
142        { 0, 0 },
143        NULL,
144        #ifdef __CFA_WITH_VERIFY__
145                false,
146                0,
147        #endif
[e660761]148};
149
[3489ea6]150#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
151        // No data needed
152#elif defined(CFA_HAVE_LINUX_RSEQ_H)
153        extern "Cforall" {
[f558b5f]154                __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq @= {
155                        .cpu_id : RSEQ_CPU_ID_UNINITIALIZED,
156                };
[3489ea6]157        }
158#else
159        // No data needed
160#endif
161
[e660761]162//-----------------------------------------------------------------------------
163// Struct to steal stack
164struct current_stack_info_t {
165        __stack_t * storage;  // pointer to stack object
166        void * base;          // base of stack
167        void * limit;         // stack grows towards stack limit
168        void * context;       // address of cfa_context_t
169};
170
171void ?{}( current_stack_info_t & this ) {
172        __stack_context_t ctx;
173        CtxGet( ctx );
174        this.base = ctx.FP;
175
176        rlimit r;
177        getrlimit( RLIMIT_STACK, &r);
178        size_t size = r.rlim_cur;
179
180        this.limit = (void *)(((intptr_t)this.base) - size);
181        this.context = &storage_mainThreadCtx;
182}
183
184
185//=============================================================================================
186// Kernel Setup logic
187//=============================================================================================
188//-----------------------------------------------------------------------------
189// Kernel boot procedures
190static void __kernel_startup(void) {
[8fc652e0]191        /* paranoid */ verify( ! __preemption_enabled() );
[e660761]192        __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
193
194        __cfa_dbg_global_clusters.list{ __get };
195        __cfa_dbg_global_clusters.lock{};
196
[f2384c9a]197        /* paranoid */ verify( verify_fwd_bck_rng() );
198
[e660761]199        // Initialize the global scheduler lock
200        __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
201        (*__scheduler_lock){};
202
203        // Initialize the main cluster
204        mainCluster = (cluster *)&storage_mainCluster;
205        (*mainCluster){"Main Cluster", 0};
206
207        __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n");
208
209        // Construct the processor context of the main processor
210        void ?{}(processorCtx_t & this, processor * proc) {
211                (this.__cor){ "Processor" };
212                this.__cor.starter = 0p;
213                this.proc = proc;
214        }
215
216        void ?{}(processor & this) with( this ) {
[454f478]217                ( this.terminated ){};
[e660761]218                ( this.runner ){};
[a5e7233]219                init( this, "Main Processor", *mainCluster, 0p );
[e660761]220                kernel_thread = pthread_self();
221
222                runner{ &this };
223                __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
224        }
225
226        // Initialize the main processor and the main processor ctx
227        // (the coroutine that contains the processing control flow)
228        mainProcessor = (processor *)&storage_mainProcessor;
229        (*mainProcessor){};
230
[22226e4]231        mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
232        mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
233        /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
234
235        __cfa_io_start( mainProcessor );
[78a580d]236        register_tls( mainProcessor );
[c993b15]237
[24e321c]238        // Start by initializing the main thread
239        // SKULLDUGGERY: the mainThread steals the process main thread
240        // which will then be scheduled by the mainProcessor normally
241        mainThread = (thread$ *)&storage_mainThread;
242        current_stack_info_t info;
243        info.storage = (__stack_t*)&storage_mainThreadCtx;
244        (*mainThread){ &info };
245
246        __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n");
247
[e660761]248        //initialize the global state variables
[8fc652e0]249        __cfaabi_tls.this_processor = mainProcessor;
250        __cfaabi_tls.this_thread    = mainThread;
[e660761]251
252        #if !defined( __CFA_NO_STATISTICS__ )
[8fc652e0]253                __cfaabi_tls.this_stats = (__stats_t *)& storage_mainProcStats;
254                __init_stats( __cfaabi_tls.this_stats );
[e660761]255        #endif
[a67c5b6]256        mainProcessor->local_data = &__cfaabi_tls;
[e660761]257
258        // Enable preemption
259        __kernel_alarm_startup();
260
261        // Add the main thread to the ready queue
262        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
[24e321c]263        schedule_thread$(mainThread, UNPARK_LOCAL);
[e660761]264
265        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
266        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
267        // mainThread is on the ready queue when this call is made.
[8fc652e0]268        __kernel_first_resume( __cfaabi_tls.this_processor );
[e660761]269
270
271        // THE SYSTEM IS NOW COMPLETELY RUNNING
272
273        __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n");
274
[8fc652e0]275        /* paranoid */ verify( ! __preemption_enabled() );
[a3821fa]276        enable_interrupts();
[8fc652e0]277        /* paranoid */ verify( __preemption_enabled() );
278
[e660761]279}
280
281static void __kernel_shutdown(void) {
[7dd98b6]282        if(!cfa_main_returned) return;
[8fc652e0]283        /* paranoid */ verify( __preemption_enabled() );
[e660761]284        disable_interrupts();
[8fc652e0]285        /* paranoid */ verify( ! __preemption_enabled() );
[e660761]286
287        __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n");
288
289        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
290        // When its coroutine terminates, it return control to the mainThread
291        // which is currently here
[7d0ebd0]292        /* paranoid */ verify( !__atomic_load_n(&mainProcessor->do_terminate, __ATOMIC_ACQUIRE) );
[e660761]293        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
[7d0ebd0]294        __wake_proc( mainProcessor );
[8fc652e0]295        __kernel_last_resume( __cfaabi_tls.this_processor );
[e660761]296        mainThread->self_cor.state = Halted;
297
298        // THE SYSTEM IS NOW COMPLETELY STOPPED
299
300        // Disable preemption
301        __kernel_alarm_shutdown();
302
[a5a01faa]303        #if !defined( __CFA_NO_STATISTICS__ )
304                __stats_t * st = (__stats_t *)& storage_mainProcStats;
305                __tally_stats(mainCluster->stats, st);
306                if( 0 != mainProcessor->print_stats ) {
307                        __print_stats( st, mainProcessor->print_stats, "Processor ", mainProcessor->name, (void*)mainProcessor );
308                }
[73f4d08]309                #if defined(CFA_STATS_ARRAY)
310                        __flush_stat( st, "Processor", mainProcessor );
311                #endif
[a5a01faa]312        #endif
313
[a67c5b6]314        mainProcessor->local_data = 0p;
315
[c993b15]316        unregister_tls( mainProcessor );
[78a580d]317        __cfa_io_stop( mainProcessor );
[c993b15]318
[e660761]319        // Destroy the main processor and its context in reverse order of construction
320        // These were manually constructed so we need manually destroy them
321        void ^?{}(processor & this) with( this ){
322                deinit( this );
323
324                /* paranoid */ verify( this.do_terminate == true );
325                __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
326        }
327
328        ^(*mainProcessor){};
329
330        // Final step, destroy the main thread since it is no longer needed
331
332        // Since we provided a stack to this taxk it will not destroy anything
333        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
334        ^(*mainThread){};
335
336        ^(*mainCluster){};
337
338        ^(*__scheduler_lock){};
339
340        ^(__cfa_dbg_global_clusters.list){};
341        ^(__cfa_dbg_global_clusters.lock){};
342
343        __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n");
344}
345
346//=============================================================================================
347// Kernel Initial Scheduling logic
348//=============================================================================================
349
350// Context invoker for processors
351// This is the entry point for processors (kernel threads) *except* for the main processor
352// It effectively constructs a coroutine by stealing the pthread stack
353static void * __invoke_processor(void * arg) {
354        #if !defined( __CFA_NO_STATISTICS__ )
355                __stats_t local_stats;
356                __init_stats( &local_stats );
[8fc652e0]357                __cfaabi_tls.this_stats = &local_stats;
[e660761]358        #endif
359
360        processor * proc = (processor *) arg;
[8fc652e0]361        __cfaabi_tls.this_processor = proc;
362        __cfaabi_tls.this_thread    = 0p;
363        __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1];
[a67c5b6]364        proc->local_data = &__cfaabi_tls;
[c993b15]365
[22226e4]366        __cfa_io_start( proc );
[78a580d]367        register_tls( proc );
[22226e4]368
369        // used for idle sleep when io_uring is present
370        io_future_t future;
371        eventfd_t idle_buf;
372        proc->idle_wctx.ftr = &future;
373        proc->idle_wctx.rdbuf = &idle_buf;
374
375
[e660761]376        // SKULLDUGGERY: We want to create a context for the processor coroutine
377        // which is needed for the 2-step context switch. However, there is no reason
378        // to waste the perfectly valid stack create by pthread.
379        current_stack_info_t info;
380        __stack_t ctx;
381        info.storage = &ctx;
382        (proc->runner){ proc, &info };
383
384        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
385
386        //Set global state
[8fc652e0]387        __cfaabi_tls.this_thread = 0p;
[e660761]388
389        //We now have a proper context from which to schedule threads
390        __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
391
392        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
393        // resume it to start it like it normally would, it will just context switch
394        // back to here. Instead directly call the main since we already are on the
395        // appropriate stack.
396        get_coroutine(proc->runner)->state = Active;
397        main( proc->runner );
398        get_coroutine(proc->runner)->state = Halted;
399
400        // Main routine of the core returned, the core is now fully terminated
401        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
402
403        #if !defined(__CFA_NO_STATISTICS__)
404                __tally_stats(proc->cltr->stats, &local_stats);
405                if( 0 != proc->print_stats ) {
[1b033b8]406                        __print_stats( &local_stats, proc->print_stats, "Processor ", proc->name, (void*)proc );
[e660761]407                }
[73f4d08]408                #if defined(CFA_STATS_ARRAY)
409                        __flush_stat( &local_stats, "Processor", proc );
410                #endif
[e660761]411        #endif
412
[a67c5b6]413        proc->local_data = 0p;
414
[c993b15]415        unregister_tls( proc );
[78a580d]416        __cfa_io_stop( proc );
[c993b15]417
[e660761]418        return 0p;
419}
420
421static void __kernel_first_resume( processor * this ) {
[e84ab3d]422        thread$ * src = mainThread;
423        coroutine$ * dst = get_coroutine(this->runner);
[e660761]424
[8fc652e0]425        /* paranoid */ verify( ! __preemption_enabled() );
[e660761]426
[8fc652e0]427        __cfaabi_tls.this_thread->curr_cor = dst;
[eaf269d]428        __stack_prepare( &dst->stack, DEFAULT_STACK_SIZE );
[e660761]429        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
430
[8fc652e0]431        /* paranoid */ verify( ! __preemption_enabled() );
[e660761]432
433        dst->last = &src->self_cor;
434        dst->starter = dst->starter ? dst->starter : &src->self_cor;
435
436        // make sure the current state is still correct
437        /* paranoid */ verify(src->state == Ready);
[ab5baab]438        src->corctx_flag = true;
[e660761]439
440        // context switch to specified coroutine
441        verify( dst->context.SP );
442        __cfactx_switch( &src->context, &dst->context );
443        // when __cfactx_switch returns we are back in the src coroutine
444
445        mainThread->curr_cor = &mainThread->self_cor;
446
447        // make sure the current state has been update
448        /* paranoid */ verify(src->state == Active);
449
[8fc652e0]450        /* paranoid */ verify( ! __preemption_enabled() );
[e660761]451}
452
453// KERNEL_ONLY
454static void __kernel_last_resume( processor * this ) {
[e84ab3d]455        coroutine$ * src = &mainThread->self_cor;
456        coroutine$ * dst = get_coroutine(this->runner);
[e660761]457
[8fc652e0]458        /* paranoid */ verify( ! __preemption_enabled() );
459        /* paranoid */ verify( dst->starter == src );
460        /* paranoid */ verify( dst->context.SP );
[e660761]461
462        // SKULLDUGGERY in debug the processors check that the
463        // stack is still within the limit of the stack limits after running a thread.
464        // that check doesn't make sense if we context switch to the processor using the
465        // coroutine semantics. Since this is a special case, use the current context
466        // info to populate these fields.
467        __cfaabi_dbg_debug_do(
468                __stack_context_t ctx;
469                CtxGet( ctx );
470                mainThread->context.SP = ctx.SP;
471                mainThread->context.FP = ctx.FP;
472        )
473
474        // context switch to the processor
475        __cfactx_switch( &src->context, &dst->context );
476}
477
478
479//=============================================================================================
480// Kernel Object Constructors logic
481//=============================================================================================
482//-----------------------------------------------------------------------------
483// Main thread construction
[e84ab3d]484static void ?{}( coroutine$ & this, current_stack_info_t * info) with( this ) {
[e660761]485        stack.storage = info->storage;
486        with(*stack.storage) {
487                limit     = info->limit;
488                base      = info->base;
489        }
490        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
491        *istorage |= 0x1;
492        name = "Main Thread";
493        state = Start;
494        starter = 0p;
495        last = 0p;
496        cancellation = 0p;
497}
498
[e84ab3d]499static void ?{}( thread$ & this, current_stack_info_t * info) with( this ) {
[6a77224]500        ticket = TICKET_RUNNING;
[e660761]501        state = Start;
502        self_cor{ info };
503        curr_cor = &self_cor;
504        curr_cluster = mainCluster;
505        self_mon.owner = &this;
506        self_mon.recursion = 1;
507        self_mon_p = &self_mon;
508        link.next = 0p;
[ef94ae7]509        link.ts   = -1llu;
[24e321c]510        preferred = ready_queue_new_preferred();
[89eff25]511        last_proc = 0p;
[c655650]512        random_state = __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl();
[b4b63e8]513        #if defined( __CFA_WITH_VERIFY__ )
[ac12f1f]514                canary = 0x0D15EA5E0D15EA5Ep;
[b4b63e8]515        #endif
[e660761]516
517        node.next = 0p;
518        node.prev = 0p;
519        doregister(curr_cluster, this);
520
521        monitors{ &self_mon_p, 1, (fptr_t)0 };
522}
523
524//-----------------------------------------------------------------------------
525// Processor
526// Construct the processor context of non-main processors
527static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
528        (this.__cor){ info };
529        this.proc = proc;
530}
531
[e84ab3d]532static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT) with( this ) {
[e660761]533        this.name = name;
534        this.cltr = &_cltr;
[431cd4f]535        this.rdq.its = 0;
536        this.rdq.itr = 0;
[884f3f67]537        this.rdq.id  = 0;
[145dcd5]538        this.rdq.target = MAX;
539        this.rdq.last = MAX;
540        this.rdq.cpu = 0;
541        // this.rdq.cutoff = 0ull;
[e660761]542        do_terminate = false;
543        preemption_alarm = 0p;
544        pending_preemption = false;
545
[78da4ab]546        this.io.ctx = 0p;
[dddb3dd0]547        this.io.pending = false;
548        this.io.dirty   = false;
549
[a5e7233]550        this.init.thrd = initT;
[a1538cd]551
[a67c5b6]552        this.local_data = 0p;
553
[22226e4]554        idle_wctx.evfd = eventfd(0, 0);
555        if (idle_wctx.evfd < 0) {
[dddb3dd0]556                abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno));
557        }
[78da4ab]558
[22226e4]559        idle_wctx.sem = 0;
[262fafd9]560        idle_wctx.wake_time = 0;
[7cf3b1d]561
562        // I'm assuming these two are reserved for standard input and output
563        // so I'm using them as sentinels with idle_wctx.
[22226e4]564        /* paranoid */ verify( idle_wctx.evfd != 0 );
565        /* paranoid */ verify( idle_wctx.evfd != 1 );
[7cf3b1d]566
[e660761]567        #if !defined(__CFA_NO_STATISTICS__)
568                print_stats = 0;
569                print_halts = false;
570        #endif
571
572        __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
573}
574
575// Not a ctor, it just preps the destruction but should not destroy members
576static void deinit(processor & this) {
[22226e4]577        close(this.idle_wctx.evfd);
[e660761]578}
579
[e84ab3d]580void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
[454f478]581        ( this.terminated ){};
[e660761]582        ( this.runner ){};
[62502cc4]583
584        disable_interrupts();
[a5e7233]585                init( this, name, _cltr, initT );
[a3821fa]586        enable_interrupts();
[e660761]587
588        __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
589
590        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
[a5e7233]591}
[e660761]592
[a5e7233]593void ?{}(processor & this, const char name[], cluster & _cltr) {
594        (this){name, _cltr, 0p};
[e660761]595}
596
[a182ad5]597extern size_t __page_size;
[e660761]598void ^?{}(processor & this) with( this ){
[7d0ebd0]599        /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) );
600        __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
[e660761]601
[7d0ebd0]602        __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
603        __disable_interrupts_checked();
[e660761]604                __wake_proc( &this );
[7d0ebd0]605        __enable_interrupts_checked();
[e660761]606
[7d0ebd0]607        wait( terminated );
608        /* paranoid */ verify( active_processor() != &this);
[e660761]609
[bfcf6b9]610        __destroy_pthread( kernel_thread, this.stack, 0p );
[e660761]611
[62502cc4]612        disable_interrupts();
613                deinit( this );
[a3821fa]614        enable_interrupts();
[e660761]615}
616
617//-----------------------------------------------------------------------------
618// Cluster
[6a9b12b]619static void ?{}(__cluster_proc_list & this) {
[7cf3b1d]620        this.fdw   = 0p;
[1eb239e4]621        this.idle  = 0;
622        this.total = 0;
623}
624
[e660761]625void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
626        this.name = name;
627        this.preemption_rate = preemption_rate;
[884f3f67]628        this.sched.readyQ.data = 0p;
629        this.sched.readyQ.tscs = 0p;
630        this.sched.readyQ.count = 0;
631        this.sched.io.tscs = 0p;
632        this.sched.caches = 0p;
[e660761]633
634        #if !defined(__CFA_NO_STATISTICS__)
635                print_stats = 0;
636                stats = alloc();
637                __init_stats( stats );
638        #endif
639
640        threads{ __get };
641
[78da4ab]642        io.arbiter = create();
643        io.params = io_params;
644
[e660761]645        doregister(this);
646
647        // Lock the RWlock so no-one pushes/pops while we are changing the queue
[772411a]648        disable_interrupts();
[e660761]649        uint_fast32_t last_size = ready_mutate_lock();
650
651                // Adjust the ready queue size
[a017ee7]652                ready_queue_grow( &this );
[e660761]653
654        // Unlock the RWlock
655        ready_mutate_unlock( last_size );
[a3821fa]656        enable_interrupts( false ); // Don't poll, could be in main cluster
[e660761]657}
658
659void ^?{}(cluster & this) {
[78da4ab]660        destroy(this.io.arbiter);
[e660761]661
662        // Lock the RWlock so no-one pushes/pops while we are changing the queue
[772411a]663        disable_interrupts();
[e660761]664        uint_fast32_t last_size = ready_mutate_lock();
665
666                // Adjust the ready queue size
[a017ee7]667                ready_queue_shrink( &this );
[e660761]668
669        // Unlock the RWlock
670        ready_mutate_unlock( last_size );
[884f3f67]671
672        ready_queue_close( &this );
673        /* paranoid */ verify( this.sched.readyQ.data == 0p );
674        /* paranoid */ verify( this.sched.readyQ.tscs == 0p );
675        /* paranoid */ verify( this.sched.readyQ.count == 0 );
676        /* paranoid */ verify( this.sched.io.tscs == 0p );
677        /* paranoid */ verify( this.sched.caches == 0p );
678
[a3821fa]679        enable_interrupts( false ); // Don't poll, could be in main cluster
[e660761]680
[884f3f67]681
[e660761]682        #if !defined(__CFA_NO_STATISTICS__)
683                if( 0 != this.print_stats ) {
[1b033b8]684                        __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
[e660761]685                }
[73f4d08]686                #if defined(CFA_STATS_ARRAY)
687                        __flush_stat( this.stats, "Cluster", &this );
688                #endif
[e660761]689                free( this.stats );
690        #endif
691
692        unregister(this);
693}
694
695//=============================================================================================
696// Miscellaneous Initialization
697//=============================================================================================
698//-----------------------------------------------------------------------------
699// Global Queues
700static void doregister( cluster     & cltr ) {
701        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
702        push_front( __cfa_dbg_global_clusters.list, cltr );
703        unlock    ( __cfa_dbg_global_clusters.lock );
704}
705
706static void unregister( cluster     & cltr ) {
707        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
708        remove( __cfa_dbg_global_clusters.list, cltr );
709        unlock( __cfa_dbg_global_clusters.lock );
710}
711
[e84ab3d]712void doregister( cluster * cltr, thread$ & thrd ) {
[e660761]713        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
714        cltr->nthreads += 1;
715        push_front(cltr->threads, thrd);
716        unlock    (cltr->thread_list_lock);
717}
718
[e84ab3d]719void unregister( cluster * cltr, thread$ & thrd ) {
[e660761]720        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
721        remove(cltr->threads, thrd );
722        cltr->nthreads -= 1;
723        unlock(cltr->thread_list_lock);
724}
725
[c993b15]726static void register_tls( processor * this ) {
727        // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue
728        uint_fast32_t last_size;
729        [this->unique_id, last_size] = ready_mutate_register();
730
[145dcd5]731                this->rdq.cpu = __kernel_getcpu();
732
[c993b15]733                this->cltr->procs.total += 1u;
734                insert_last(this->cltr->procs.actives, *this);
735
736                // Adjust the ready queue size
737                ready_queue_grow( this->cltr );
738
739        // Unlock the RWlock
740        ready_mutate_unlock( last_size );
741}
742
743
744static void unregister_tls( processor * this ) {
745        // Lock the RWlock so no-one pushes/pops while we are changing the queue
746        uint_fast32_t last_size = ready_mutate_lock();
747                this->cltr->procs.total -= 1u;
748                remove(*this);
749
750                // clear the cluster so nothing gets pushed to local queues
751                cluster * cltr = this->cltr;
752                this->cltr = 0p;
753
754                // Adjust the ready queue size
755                ready_queue_shrink( cltr );
756
757        // Unlock the RWlock and unregister: we don't need the read_lock any more
758        ready_mutate_unregister( this->unique_id, last_size );
759}
760
[e660761]761static void check( int ret, const char func[] ) {
762        if ( ret ) {                                                                            // pthread routines return errno values
763                abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
764        } // if
765} // Abort
766
767void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
768        pthread_attr_t attr;
769
770        check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
771
[09ae8a6]772        size_t stacksize = max( PTHREAD_STACK_MIN, DEFAULT_STACK_SIZE );
[e660761]773
774        void * stack;
[97229d6]775        #if CFA_PROCESSOR_USE_MMAP
[a182ad5]776                stacksize = ceiling( stacksize, __page_size ) + __page_size;
[dd92fe9]777                stack = mmap(0p, stacksize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
[97229d6]778                if(stack == ((void*)-1)) {
779                        abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
780                }
[a182ad5]781                if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
[97229d6]782                        abort( "pthread stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
783                } // if
784        #else
785                __cfaabi_dbg_debug_do(
[a182ad5]786                        stack = memalign( __page_size, stacksize + __page_size );
[97229d6]787                        // pthread has no mechanism to create the guard page in user supplied stack.
[a182ad5]788                        if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
[97229d6]789                                abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
790                        } // if
791                );
792                __cfaabi_dbg_no_debug_do(
793                        stack = malloc( stacksize );
794                );
795        #endif
796
[e660761]797        check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
798        check( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
799        return stack;
[88cafe7]800}
[f2384c9a]801
[bfcf6b9]802void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) {
803        int err = pthread_join( pthread, retval );
804        if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err));
805
[97229d6]806        #if CFA_PROCESSOR_USE_MMAP
807                pthread_attr_t attr;
[bfcf6b9]808
[97229d6]809                check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
[bfcf6b9]810
[97229d6]811                size_t stacksize;
812                // default stack size, normally defined by shell limit
813                check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
814                assert( stacksize >= PTHREAD_STACK_MIN );
[a182ad5]815                stacksize += __page_size;
[bfcf6b9]816
[97229d6]817                if(munmap(stack, stacksize) == -1) {
818                        abort( "pthread stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
819                }
820        #else
[72a3aff]821                __cfaabi_dbg_debug_do(
822                        // pthread has no mechanism to create the guard page in user supplied stack.
[a182ad5]823                        if ( mprotect( stack, __page_size, __map_prot ) == -1 ) {
[72a3aff]824                                abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
825                        } // if
826                );
[97229d6]827                free( stack );
828        #endif
[bfcf6b9]829}
830
[f2384c9a]831#if defined(__CFA_WITH_VERIFY__)
832static bool verify_fwd_bck_rng(void) {
[8fc652e0]833        __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng);
[f2384c9a]834
835        unsigned values[10];
836        for(i; 10) {
837                values[i] = __tls_rand_fwd();
838        }
839
840        __tls_rand_advance_bck();
841
842        for ( i; 9 -~= 0 ) {
843                if(values[i] != __tls_rand_bck()) {
844                        return false;
845                }
846        }
847
848        return true;
849}
[eaf269d]850#endif
Note: See TracBrowser for help on using the repository browser.