source: libcfa/src/concurrency/kernel/private.hfa @ 1756e08

ADTast-experimental
Last change on this file since 1756e08 was 2284d20, checked in by Thierry Delisle <tdelisle@…>, 21 months ago

Added some verifys to make sure atomic_acquire isn't used outside the kernel.

  • Property mode set to 100644
File size: 12.6 KB
RevLine 
[75f3522]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[708ae38]7// kernel/private.hfa --
[75f3522]8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[fd9b524]12// Last Modified On : Wed Aug 12 08:21:33 2020
13// Update Count     : 9
[75f3522]14//
15
[6b0b624]16#pragma once
[75f3522]17
[3489ea6]18#if !defined(__cforall_thread__)
[708ae38]19        #error kernel/private.hfa should only be included in libcfathread source
[3489ea6]20#endif
21
[58b6d1b]22#include "kernel.hfa"
23#include "thread.hfa"
[75f3522]24
[73abe95]25#include "alarm.hfa"
[8834751]26#include "stats.hfa"
[fa21ac9]27
[3489ea6]28extern "C" {
29#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
30        #include <rseq/rseq.h>
31#elif defined(CFA_HAVE_LINUX_RSEQ_H)
[f558b5f]32        #include <linux/rseq.h>
[3489ea6]33#else
34        #ifndef _GNU_SOURCE
[708ae38]35        #error kernel/private requires gnu_source
[3489ea6]36        #endif
37        #include <sched.h>
38#endif
39}
40
[d3605f8]41// Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
[f870e257]42// #define CFA_WANT_IO_URING_IDLE
[d3605f8]43
44// Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
45#if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)
46        #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))
47                #define CFA_WITH_IO_URING_IDLE
48        #endif
49#endif
[059ad16]50
[75f3522]51//-----------------------------------------------------------------------------
52// Scheduler
[1c273d0]53extern "C" {
[2026bb6]54        void disable_interrupts() OPTIONAL_THREAD;
[a3821fa]55        void enable_interrupts( bool poll = true );
[1c273d0]56}
57
[24e321c]58void schedule_thread$( thread$ *, unpark_hint hint ) __attribute__((nonnull (1)));
[75f3522]59
[8fc652e0]60extern bool __preemption_enabled();
61
[13fdf86]62enum {
63        PREEMPT_NORMAL    = 0,
64        PREEMPT_TERMINATE = 1,
65        PREEMPT_IO = 2,
66};
67
[7d0ebd0]68static inline void __disable_interrupts_checked() {
69        /* paranoid */ verify( __preemption_enabled() );
70        disable_interrupts();
71        /* paranoid */ verify( ! __preemption_enabled() );
72}
73
74static inline void __enable_interrupts_checked( bool poll = true ) {
75        /* paranoid */ verify( ! __preemption_enabled() );
76        enable_interrupts( poll );
77        /* paranoid */ verify( __preemption_enabled() );
78}
79
[5afb49a]80//release/wake-up the following resources
[e84ab3d]81void __thread_finish( thread$ * thrd );
[db6f06a]82
[3489ea6]83//-----------------------------------------------------------------------------
84// Hardware
85
86#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
87        // No data needed
88#elif defined(CFA_HAVE_LINUX_RSEQ_H)
89        extern "Cforall" {
[1bcbf02]90                extern __attribute__((aligned(64))) __thread volatile struct rseq __cfaabi_rseq;
[3489ea6]91        }
92#else
93        // No data needed
94#endif
95
96static inline int __kernel_getcpu() {
97        /* paranoid */ verify( ! __preemption_enabled() );
98#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
[f558b5f]99        return rseq_current_cpu();
[3489ea6]100#elif defined(CFA_HAVE_LINUX_RSEQ_H)
[f558b5f]101        int r = __cfaabi_rseq.cpu_id;
102        /* paranoid */ verify( r >= 0 );
103        return r;
[3489ea6]104#else
105        return sched_getcpu();
106#endif
107}
108
[75f3522]109//-----------------------------------------------------------------------------
110// Processor
[c18bf9e]111void main(processorCtx_t &);
112static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
[85b1deb]113
[8c50aed]114void * __create_pthread( pthread_t *, void * (*)(void *), void * );
[bfcf6b9]115void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
[1805b1b]116
[6502a2b]117extern cluster * mainCluster;
118
[75f3522]119//-----------------------------------------------------------------------------
120// Threads
121extern "C" {
[c7a900a]122      void __cfactx_invoke_thread(void (*main)(void *), void * this);
[75f3522]123}
124
[f7d6bb0]125__cfaabi_dbg_debug_do(
[e84ab3d]126        extern void __cfaabi_dbg_thread_register  ( thread$ * thrd );
127        extern void __cfaabi_dbg_thread_unregister( thread$ * thrd );
[f7d6bb0]128)
129
[6a77224]130#define TICKET_BLOCKED (-1) // thread is blocked
131#define TICKET_RUNNING ( 0) // thread is running
132#define TICKET_UNBLOCK ( 1) // thread should ignore next block
133
[969b3fe]134//-----------------------------------------------------------------------------
135// Utils
[e84ab3d]136void doregister( struct cluster * cltr, struct thread$ & thrd );
137void unregister( struct cluster * cltr, struct thread$ & thrd );
[de94a60]138
[f00b26d4]139//-----------------------------------------------------------------------------
140// I/O
[8bee858]141io_arbiter$ * create(void);
142void destroy(io_arbiter$ *);
[f00b26d4]143
[7768b8d]144//=======================================================================
145// Cluster lock API
146//=======================================================================
147// Lock-Free registering/unregistering of threads
148// Register a processor to a given cluster and get its unique id in return
[c993b15]149unsigned register_proc_id( void );
[7768b8d]150
151// Unregister a processor from a given cluster using its id, getting back the original pointer
[c993b15]152void unregister_proc_id( unsigned );
[7768b8d]153
154//=======================================================================
155// Reader-writer lock implementation
156// Concurrent with doregister/unregister,
157//    i.e., threads can be added at any point during or between the entry/exit
[dca5802]158
159//-----------------------------------------------------------------------
160// simple spinlock underlying the RWLock
161// Blocking acquire
[7768b8d]162static inline void __atomic_acquire(volatile bool * ll) {
[2284d20]163        /* paranoid */ verify( ! __preemption_enabled() );
164        /* paranoid */ verify(ll);
165
[7768b8d]166        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
167                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
[fd9b524]168                        Pause();
[7768b8d]169        }
170        /* paranoid */ verify(*ll);
[2284d20]171        /* paranoid */ verify( ! __preemption_enabled() );
[7768b8d]172}
173
[dca5802]174// Non-Blocking acquire
[7768b8d]175static inline bool __atomic_try_acquire(volatile bool * ll) {
[2284d20]176        /* paranoid */ verify( ! __preemption_enabled() );
177        /* paranoid */ verify(ll);
178
[b798713]179        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
[7768b8d]180}
181
[dca5802]182// Release
[7768b8d]183static inline void __atomic_unlock(volatile bool * ll) {
[2284d20]184        /* paranoid */ verify( ! __preemption_enabled() );
185        /* paranoid */ verify(ll);
[7768b8d]186        /* paranoid */ verify(*ll);
187        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
188}
189
[b388ee81]190//-----------------------------------------------------------------------
191// Reader-Writer lock protecting the ready-queues
192// while this lock is mostly generic some aspects
193// have been hard-coded to for the ready-queue for
194// simplicity and performance
[741e22c]195union __attribute__((aligned(64))) __scheduler_RWLock_t {
196        struct {
[cd3fc46]197                __attribute__((aligned(64))) char padding;
198
[741e22c]199                // total cachelines allocated
[cd3fc46]200                __attribute__((aligned(64))) unsigned int max;
[b388ee81]201
[741e22c]202                // cachelines currently in use
203                volatile unsigned int alloc;
[b388ee81]204
[741e22c]205                // cachelines ready to itereate over
206                // (!= to alloc when thread is in second half of doregister)
207                volatile unsigned int ready;
[b388ee81]208
[741e22c]209                // writer lock
210                volatile bool write_lock;
[b388ee81]211
[741e22c]212                // data pointer
213                volatile bool * volatile * data;
214        } lock;
215        char pad[192];
[b388ee81]216};
217
218void  ?{}(__scheduler_RWLock_t & this);
219void ^?{}(__scheduler_RWLock_t & this);
220
[cd3fc46]221extern __scheduler_RWLock_t __scheduler_lock;
[b388ee81]222
[7768b8d]223//-----------------------------------------------------------------------
224// Reader side : acquire when using the ready queue to schedule but not
225//  creating/destroying queues
[cd3fc46]226static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
[8fc652e0]227        /* paranoid */ verify( ! __preemption_enabled() );
[c993b15]228        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
229        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
230        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
[7768b8d]231
232        // Step 1 : make sure no writer are in the middle of the critical section
[c993b15]233        while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
[fd9b524]234                Pause();
[7768b8d]235
236        // Fence needed because we don't want to start trying to acquire the lock
237        // before we read a false.
238        // Not needed on x86
239        // std::atomic_thread_fence(std::memory_order_seq_cst);
240
241        // Step 2 : acquire our local lock
[c993b15]242        __atomic_acquire( &kernelTLS().sched_lock );
243        /*paranoid*/ verify(kernelTLS().sched_lock);
[64a7146]244
245        #ifdef __CFA_WITH_VERIFY__
246                // Debug, check if this is owned for reading
[c993b15]247                kernelTLS().in_sched_lock = true;
[64a7146]248        #endif
[7768b8d]249}
250
[cd3fc46]251static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
[8fc652e0]252        /* paranoid */ verify( ! __preemption_enabled() );
[c993b15]253        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
254        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
255        /* paranoid */ verify( kernelTLS().sched_lock );
256        /* paranoid */ verify( kernelTLS().in_sched_lock );
[64a7146]257        #ifdef __CFA_WITH_VERIFY__
258                // Debug, check if this is owned for reading
[c993b15]259                kernelTLS().in_sched_lock = false;
[64a7146]260        #endif
[c993b15]261        __atomic_unlock(&kernelTLS().sched_lock);
[7768b8d]262}
263
[64a7146]264#ifdef __CFA_WITH_VERIFY__
[e873838]265        static inline bool ready_schedule_islocked(void) {
[8fc652e0]266                /* paranoid */ verify( ! __preemption_enabled() );
[c993b15]267                /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
268                return kernelTLS().sched_lock;
[64a7146]269        }
270
271        static inline bool ready_mutate_islocked() {
[cd3fc46]272                return __scheduler_lock.lock.write_lock;
[64a7146]273        }
274#endif
275
[7768b8d]276//-----------------------------------------------------------------------
277// Writer side : acquire when changing the ready queue, e.g. adding more
278//  queues or removing them.
[b388ee81]279uint_fast32_t ready_mutate_lock( void );
[7768b8d]280
[b388ee81]281void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
[7768b8d]282
[a33c113]283//-----------------------------------------------------------------------
284// Lock-Free registering/unregistering of threads
285// Register a processor to a given cluster and get its unique id in return
286// For convenience, also acquires the lock
[c993b15]287static inline [unsigned, uint_fast32_t] ready_mutate_register() {
288        unsigned id = register_proc_id();
289        uint_fast32_t last = ready_mutate_lock();
290        return [id, last];
[a33c113]291}
292
293// Unregister a processor from a given cluster using its id, getting back the original pointer
294// assumes the lock is acquired
[c993b15]295static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
[a33c113]296        ready_mutate_unlock( last_s );
[c993b15]297        unregister_proc_id( id );
[a33c113]298}
299
[a7504db]300//-----------------------------------------------------------------------
301// Cluster idle lock/unlock
[6a9b12b]302static inline void lock(__cluster_proc_list & this) {
[a7504db]303        /* paranoid */ verify( ! __preemption_enabled() );
304
305        // Start by locking the global RWlock so that we know no-one is
306        // adding/removing processors while we mess with the idle lock
307        ready_schedule_lock();
308
[a633f6f]309        lock( this.lock __cfaabi_dbg_ctx2 );
[a7504db]310
311        /* paranoid */ verify( ! __preemption_enabled() );
312}
313
[5f5a729]314static inline bool try_lock(__cluster_proc_list & this) {
315        /* paranoid */ verify( ! __preemption_enabled() );
316
317        // Start by locking the global RWlock so that we know no-one is
318        // adding/removing processors while we mess with the idle lock
319        ready_schedule_lock();
320
[a633f6f]321        if(try_lock( this.lock __cfaabi_dbg_ctx2 )) {
[5f5a729]322                // success
323                /* paranoid */ verify( ! __preemption_enabled() );
324                return true;
325        }
326
327        // failed to lock
328        ready_schedule_unlock();
329
330        /* paranoid */ verify( ! __preemption_enabled() );
331        return false;
332}
333
[6a9b12b]334static inline void unlock(__cluster_proc_list & this) {
[a7504db]335        /* paranoid */ verify( ! __preemption_enabled() );
336
[a633f6f]337        unlock(this.lock);
[a7504db]338
339        // Release the global lock, which we acquired when locking
340        ready_schedule_unlock();
341
342        /* paranoid */ verify( ! __preemption_enabled() );
343}
344
[b798713]345//=======================================================================
346// Ready-Queue API
[dca5802]347//-----------------------------------------------------------------------
348// push thread onto a ready queue for a cluster
349// returns true if the list was previously empty, false otherwise
[24e321c]350__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint);
[dca5802]351
352//-----------------------------------------------------------------------
[fc59df78]353// pop thread from the local queues of a cluster
[dca5802]354// returns 0p if empty
[1eb239e4]355// May return 0p spuriously
[e84ab3d]356__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr);
[dca5802]357
[1eb239e4]358//-----------------------------------------------------------------------
[fc59df78]359// pop thread from any ready queue of a cluster
[1eb239e4]360// returns 0p if empty
[fc59df78]361// May return 0p spuriously
[e84ab3d]362__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr);
[1eb239e4]363
[fc59df78]364//-----------------------------------------------------------------------
365// search all ready queues of a cluster for any thread
366// returns 0p if empty
367// guaranteed to find any threads added before this call
[e84ab3d]368__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr);
[fc59df78]369
[24e321c]370//-----------------------------------------------------------------------
371// get preferred ready for new thread
372unsigned ready_queue_new_preferred();
373
[dca5802]374//-----------------------------------------------------------------------
375// Increase the width of the ready queue (number of lanes) by 4
[a017ee7]376void ready_queue_grow  (struct cluster * cltr);
[dca5802]377
378//-----------------------------------------------------------------------
379// Decrease the width of the ready queue (number of lanes) by 4
[a017ee7]380void ready_queue_shrink(struct cluster * cltr);
[b798713]381
[884f3f67]382//-----------------------------------------------------------------------
383// Decrease the width of the ready queue (number of lanes) by 4
384void ready_queue_close(struct cluster * cltr);
385
[75f3522]386// Local Variables: //
387// mode: c //
388// tab-width: 4 //
[4aa2fb2]389// End: //
Note: See TracBrowser for help on using the repository browser.