source: libcfa/src/concurrency/kernel_private.hfa @ 7d0ebd0

enumforall-pointer-decaypthread-emulationqualifiedEnum
Last change on this file since 7d0ebd0 was 7d0ebd0, checked in by Thierry Delisle <tdelisle@…>, 9 months ago

Processors should now correctly be unconditionnaly woken-up on termination

  • Property mode set to 100644
File size: 11.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel_private.hfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Wed Aug 12 08:21:33 2020
13// Update Count     : 9
14//
15
16#pragma once
17
18#if !defined(__cforall_thread__)
19        #error kernel_private.hfa should only be included in libcfathread source
20#endif
21
22#include "kernel.hfa"
23#include "thread.hfa"
24
25#include "alarm.hfa"
26#include "stats.hfa"
27
28extern "C" {
29#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
30        #include <rseq/rseq.h>
31#elif defined(CFA_HAVE_LINUX_RSEQ_H)
32        #include <linux/rseq.h>
33#else
34        #ifndef _GNU_SOURCE
35        #error kernel_private requires gnu_source
36        #endif
37        #include <sched.h>
38#endif
39}
40
41// Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
42#define CFA_WANT_IO_URING_IDLE
43
44// Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
45#if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)
46        #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))
47                #define CFA_WITH_IO_URING_IDLE
48        #endif
49#endif
50
51//-----------------------------------------------------------------------------
52// Scheduler
53extern "C" {
54        void disable_interrupts() OPTIONAL_THREAD;
55        void enable_interrupts( bool poll = true );
56}
57
58void schedule_thread$( thread$ *, unpark_hint hint ) __attribute__((nonnull (1)));
59
60extern bool __preemption_enabled();
61
62static inline void __disable_interrupts_checked() {
63        /* paranoid */ verify( __preemption_enabled() );
64        disable_interrupts();
65        /* paranoid */ verify( ! __preemption_enabled() );
66}
67
68static inline void __enable_interrupts_checked( bool poll = true ) {
69        /* paranoid */ verify( ! __preemption_enabled() );
70        enable_interrupts( poll );
71        /* paranoid */ verify( __preemption_enabled() );
72}
73
74//release/wake-up the following resources
75void __thread_finish( thread$ * thrd );
76
77//-----------------------------------------------------------------------------
78// Hardware
79
80#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
81        // No data needed
82#elif defined(CFA_HAVE_LINUX_RSEQ_H)
83        extern "Cforall" {
84                extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq;
85        }
86#else
87        // No data needed
88#endif
89
90static inline int __kernel_getcpu() {
91        /* paranoid */ verify( ! __preemption_enabled() );
92#if   defined(CFA_HAVE_LINUX_LIBRSEQ)
93        return rseq_current_cpu();
94#elif defined(CFA_HAVE_LINUX_RSEQ_H)
95        int r = __cfaabi_rseq.cpu_id;
96        /* paranoid */ verify( r >= 0 );
97        return r;
98#else
99        return sched_getcpu();
100#endif
101}
102
103//-----------------------------------------------------------------------------
104// Processor
105void main(processorCtx_t *);
106
107void * __create_pthread( pthread_t *, void * (*)(void *), void * );
108void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
109
110extern cluster * mainCluster;
111
112//-----------------------------------------------------------------------------
113// Threads
114extern "C" {
115      void __cfactx_invoke_thread(void (*main)(void *), void * this);
116}
117
118__cfaabi_dbg_debug_do(
119        extern void __cfaabi_dbg_thread_register  ( thread$ * thrd );
120        extern void __cfaabi_dbg_thread_unregister( thread$ * thrd );
121)
122
123#define TICKET_BLOCKED (-1) // thread is blocked
124#define TICKET_RUNNING ( 0) // thread is running
125#define TICKET_UNBLOCK ( 1) // thread should ignore next block
126
127//-----------------------------------------------------------------------------
128// Utils
129void doregister( struct cluster * cltr, struct thread$ & thrd );
130void unregister( struct cluster * cltr, struct thread$ & thrd );
131
132//-----------------------------------------------------------------------------
133// I/O
134$io_arbiter * create(void);
135void destroy($io_arbiter *);
136
137//=======================================================================
138// Cluster lock API
139//=======================================================================
140// Lock-Free registering/unregistering of threads
141// Register a processor to a given cluster and get its unique id in return
142unsigned register_proc_id( void );
143
144// Unregister a processor from a given cluster using its id, getting back the original pointer
145void unregister_proc_id( unsigned );
146
147//=======================================================================
148// Reader-writer lock implementation
149// Concurrent with doregister/unregister,
150//    i.e., threads can be added at any point during or between the entry/exit
151
152//-----------------------------------------------------------------------
153// simple spinlock underlying the RWLock
154// Blocking acquire
155static inline void __atomic_acquire(volatile bool * ll) {
156        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
157                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
158                        Pause();
159        }
160        /* paranoid */ verify(*ll);
161}
162
163// Non-Blocking acquire
164static inline bool __atomic_try_acquire(volatile bool * ll) {
165        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
166}
167
168// Release
169static inline void __atomic_unlock(volatile bool * ll) {
170        /* paranoid */ verify(*ll);
171        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
172}
173
174//-----------------------------------------------------------------------
175// Reader-Writer lock protecting the ready-queues
176// while this lock is mostly generic some aspects
177// have been hard-coded to for the ready-queue for
178// simplicity and performance
179struct __scheduler_RWLock_t {
180        // total cachelines allocated
181        unsigned int max;
182
183        // cachelines currently in use
184        volatile unsigned int alloc;
185
186        // cachelines ready to itereate over
187        // (!= to alloc when thread is in second half of doregister)
188        volatile unsigned int ready;
189
190        // writer lock
191        volatile bool write_lock;
192
193        // data pointer
194        volatile bool * volatile * data;
195};
196
197void  ?{}(__scheduler_RWLock_t & this);
198void ^?{}(__scheduler_RWLock_t & this);
199
200extern __scheduler_RWLock_t * __scheduler_lock;
201
202//-----------------------------------------------------------------------
203// Reader side : acquire when using the ready queue to schedule but not
204//  creating/destroying queues
205static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
206        /* paranoid */ verify( ! __preemption_enabled() );
207        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
208        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
209        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
210
211        // Step 1 : make sure no writer are in the middle of the critical section
212        while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
213                Pause();
214
215        // Fence needed because we don't want to start trying to acquire the lock
216        // before we read a false.
217        // Not needed on x86
218        // std::atomic_thread_fence(std::memory_order_seq_cst);
219
220        // Step 2 : acquire our local lock
221        __atomic_acquire( &kernelTLS().sched_lock );
222        /*paranoid*/ verify(kernelTLS().sched_lock);
223
224        #ifdef __CFA_WITH_VERIFY__
225                // Debug, check if this is owned for reading
226                kernelTLS().in_sched_lock = true;
227        #endif
228}
229
230static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
231        /* paranoid */ verify( ! __preemption_enabled() );
232        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
233        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
234        /* paranoid */ verify( kernelTLS().sched_lock );
235        /* paranoid */ verify( kernelTLS().in_sched_lock );
236        #ifdef __CFA_WITH_VERIFY__
237                // Debug, check if this is owned for reading
238                kernelTLS().in_sched_lock = false;
239        #endif
240        __atomic_unlock(&kernelTLS().sched_lock);
241}
242
243#ifdef __CFA_WITH_VERIFY__
244        static inline bool ready_schedule_islocked(void) {
245                /* paranoid */ verify( ! __preemption_enabled() );
246                /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
247                return kernelTLS().sched_lock;
248        }
249
250        static inline bool ready_mutate_islocked() {
251                return __scheduler_lock->write_lock;
252        }
253#endif
254
255//-----------------------------------------------------------------------
256// Writer side : acquire when changing the ready queue, e.g. adding more
257//  queues or removing them.
258uint_fast32_t ready_mutate_lock( void );
259
260void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
261
262//-----------------------------------------------------------------------
263// Lock-Free registering/unregistering of threads
264// Register a processor to a given cluster and get its unique id in return
265// For convenience, also acquires the lock
266static inline [unsigned, uint_fast32_t] ready_mutate_register() {
267        unsigned id = register_proc_id();
268        uint_fast32_t last = ready_mutate_lock();
269        return [id, last];
270}
271
272// Unregister a processor from a given cluster using its id, getting back the original pointer
273// assumes the lock is acquired
274static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
275        ready_mutate_unlock( last_s );
276        unregister_proc_id( id );
277}
278
279//-----------------------------------------------------------------------
280// Cluster idle lock/unlock
281static inline void lock(__cluster_proc_list & this) {
282        /* paranoid */ verify( ! __preemption_enabled() );
283
284        // Start by locking the global RWlock so that we know no-one is
285        // adding/removing processors while we mess with the idle lock
286        ready_schedule_lock();
287
288        lock( this.lock __cfaabi_dbg_ctx2 );
289
290        /* paranoid */ verify( ! __preemption_enabled() );
291}
292
293static inline bool try_lock(__cluster_proc_list & this) {
294        /* paranoid */ verify( ! __preemption_enabled() );
295
296        // Start by locking the global RWlock so that we know no-one is
297        // adding/removing processors while we mess with the idle lock
298        ready_schedule_lock();
299
300        if(try_lock( this.lock __cfaabi_dbg_ctx2 )) {
301                // success
302                /* paranoid */ verify( ! __preemption_enabled() );
303                return true;
304        }
305
306        // failed to lock
307        ready_schedule_unlock();
308
309        /* paranoid */ verify( ! __preemption_enabled() );
310        return false;
311}
312
313static inline void unlock(__cluster_proc_list & this) {
314        /* paranoid */ verify( ! __preemption_enabled() );
315
316        unlock(this.lock);
317
318        // Release the global lock, which we acquired when locking
319        ready_schedule_unlock();
320
321        /* paranoid */ verify( ! __preemption_enabled() );
322}
323
324//=======================================================================
325// Ready-Queue API
326//-----------------------------------------------------------------------
327// push thread onto a ready queue for a cluster
328// returns true if the list was previously empty, false otherwise
329__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint);
330
331//-----------------------------------------------------------------------
332// pop thread from the local queues of a cluster
333// returns 0p if empty
334// May return 0p spuriously
335__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr);
336
337//-----------------------------------------------------------------------
338// pop thread from any ready queue of a cluster
339// returns 0p if empty
340// May return 0p spuriously
341__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr);
342
343//-----------------------------------------------------------------------
344// search all ready queues of a cluster for any thread
345// returns 0p if empty
346// guaranteed to find any threads added before this call
347__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr);
348
349//-----------------------------------------------------------------------
350// get preferred ready for new thread
351unsigned ready_queue_new_preferred();
352
353//-----------------------------------------------------------------------
354// Increase the width of the ready queue (number of lanes) by 4
355void ready_queue_grow  (struct cluster * cltr);
356
357//-----------------------------------------------------------------------
358// Decrease the width of the ready queue (number of lanes) by 4
359void ready_queue_shrink(struct cluster * cltr);
360
361
362// Local Variables: //
363// mode: c //
364// tab-width: 4 //
365// End: //
Note: See TracBrowser for help on using the repository browser.