1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // kernel/private.hfa --
|
---|
8 | //
|
---|
9 | // Author : Thierry Delisle
|
---|
10 | // Created On : Mon Feb 13 12:27:26 2017
|
---|
11 | // Last Modified By : Peter A. Buhr
|
---|
12 | // Last Modified On : Wed Aug 12 08:21:33 2020
|
---|
13 | // Update Count : 9
|
---|
14 | //
|
---|
15 |
|
---|
16 | #pragma once
|
---|
17 |
|
---|
18 | #if !defined(__cforall_thread__)
|
---|
19 | #error kernel/private.hfa should only be included in libcfathread source
|
---|
20 | #endif
|
---|
21 |
|
---|
22 | #include "kernel.hfa"
|
---|
23 | #include "thread.hfa"
|
---|
24 |
|
---|
25 | #include "alarm.hfa"
|
---|
26 | #include "stats.hfa"
|
---|
27 |
|
---|
28 | extern "C" {
|
---|
29 | #if defined(CFA_HAVE_LINUX_LIBRSEQ)
|
---|
30 | #include <rseq/rseq.h>
|
---|
31 | #elif defined(CFA_HAVE_LINUX_RSEQ_H)
|
---|
32 | #include <linux/rseq.h>
|
---|
33 | #else
|
---|
34 | #ifndef _GNU_SOURCE
|
---|
35 | #error kernel/private requires gnu_source
|
---|
36 | #endif
|
---|
37 | #include <sched.h>
|
---|
38 | #endif
|
---|
39 | }
|
---|
40 |
|
---|
41 | // Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
|
---|
42 | // #define CFA_WANT_IO_URING_IDLE
|
---|
43 |
|
---|
44 | // Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
|
---|
45 | #if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)
|
---|
46 | #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))
|
---|
47 | #define CFA_WITH_IO_URING_IDLE
|
---|
48 | #endif
|
---|
49 | #endif
|
---|
50 |
|
---|
51 | //-----------------------------------------------------------------------------
|
---|
52 | // Scheduler
|
---|
53 | extern "C" {
|
---|
54 | void disable_interrupts() OPTIONAL_THREAD;
|
---|
55 | void enable_interrupts( bool poll = true );
|
---|
56 | }
|
---|
57 |
|
---|
58 | void schedule_thread$( thread$ *, unpark_hint hint ) __attribute__((nonnull (1)));
|
---|
59 |
|
---|
60 | extern bool __preemption_enabled();
|
---|
61 |
|
---|
62 | enum {
|
---|
63 | PREEMPT_NORMAL = 0,
|
---|
64 | PREEMPT_TERMINATE = 1,
|
---|
65 | PREEMPT_IO = 2,
|
---|
66 | };
|
---|
67 |
|
---|
68 | static inline void __disable_interrupts_checked() {
|
---|
69 | /* paranoid */ verify( __preemption_enabled() );
|
---|
70 | disable_interrupts();
|
---|
71 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
72 | }
|
---|
73 |
|
---|
74 | static inline void __enable_interrupts_checked( bool poll = true ) {
|
---|
75 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
76 | enable_interrupts( poll );
|
---|
77 | /* paranoid */ verify( __preemption_enabled() );
|
---|
78 | }
|
---|
79 |
|
---|
80 | //release/wake-up the following resources
|
---|
81 | void __thread_finish( thread$ * thrd );
|
---|
82 |
|
---|
83 | //-----------------------------------------------------------------------------
|
---|
84 | // Hardware
|
---|
85 |
|
---|
86 | #if defined(CFA_HAVE_LINUX_LIBRSEQ)
|
---|
87 | // No data needed
|
---|
88 | #elif defined(CFA_HAVE_LINUX_RSEQ_H)
|
---|
89 | extern "Cforall" {
|
---|
90 | extern __attribute__((aligned(64))) __thread volatile struct rseq __cfaabi_rseq;
|
---|
91 | }
|
---|
92 | #else
|
---|
93 | // No data needed
|
---|
94 | #endif
|
---|
95 |
|
---|
96 | static inline int __kernel_getcpu() {
|
---|
97 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
98 | #if defined(CFA_HAVE_LINUX_LIBRSEQ)
|
---|
99 | return rseq_current_cpu();
|
---|
100 | #elif defined(CFA_HAVE_LINUX_RSEQ_H)
|
---|
101 | int r = __cfaabi_rseq.cpu_id;
|
---|
102 | /* paranoid */ verify( r >= 0 );
|
---|
103 | return r;
|
---|
104 | #else
|
---|
105 | return sched_getcpu();
|
---|
106 | #endif
|
---|
107 | }
|
---|
108 |
|
---|
109 | //-----------------------------------------------------------------------------
|
---|
110 | // Processor
|
---|
111 | void main(processorCtx_t &);
|
---|
112 | static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
|
---|
113 |
|
---|
114 | void * __create_pthread( pthread_t *, void * (*)(void *), void * );
|
---|
115 | void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
|
---|
116 |
|
---|
117 | extern cluster * mainCluster;
|
---|
118 |
|
---|
119 | //-----------------------------------------------------------------------------
|
---|
120 | // Threads
|
---|
121 | extern "C" {
|
---|
122 | void __cfactx_invoke_thread(void (*main)(void *), void * this);
|
---|
123 | }
|
---|
124 |
|
---|
125 | __cfaabi_dbg_debug_do(
|
---|
126 | extern void __cfaabi_dbg_thread_register ( thread$ * thrd );
|
---|
127 | extern void __cfaabi_dbg_thread_unregister( thread$ * thrd );
|
---|
128 | )
|
---|
129 |
|
---|
130 | #define TICKET_BLOCKED (-1) // thread is blocked
|
---|
131 | #define TICKET_RUNNING ( 0) // thread is running
|
---|
132 | #define TICKET_UNBLOCK ( 1) // thread should ignore next block
|
---|
133 |
|
---|
134 | //-----------------------------------------------------------------------------
|
---|
135 | // Utils
|
---|
136 | void doregister( struct cluster * cltr, struct thread$ & thrd );
|
---|
137 | void unregister( struct cluster * cltr, struct thread$ & thrd );
|
---|
138 |
|
---|
139 | //-----------------------------------------------------------------------------
|
---|
140 | // I/O
|
---|
141 | io_arbiter$ * create(void);
|
---|
142 | void destroy(io_arbiter$ *);
|
---|
143 |
|
---|
144 | //=======================================================================
|
---|
145 | // Cluster lock API
|
---|
146 | //=======================================================================
|
---|
147 | // Lock-Free registering/unregistering of threads
|
---|
148 | // Register a processor to a given cluster and get its unique id in return
|
---|
149 | unsigned register_proc_id( void );
|
---|
150 |
|
---|
151 | // Unregister a processor from a given cluster using its id, getting back the original pointer
|
---|
152 | void unregister_proc_id( unsigned );
|
---|
153 |
|
---|
154 | //=======================================================================
|
---|
155 | // Reader-writer lock implementation
|
---|
156 | // Concurrent with doregister/unregister,
|
---|
157 | // i.e., threads can be added at any point during or between the entry/exit
|
---|
158 |
|
---|
159 | //-----------------------------------------------------------------------
|
---|
160 | // simple spinlock underlying the RWLock
|
---|
161 | // Blocking acquire
|
---|
162 | static inline void __atomic_acquire(volatile bool * ll) {
|
---|
163 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
164 | /* paranoid */ verify(ll);
|
---|
165 |
|
---|
166 | while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
|
---|
167 | while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
|
---|
168 | Pause();
|
---|
169 | }
|
---|
170 | /* paranoid */ verify(*ll);
|
---|
171 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
172 | }
|
---|
173 |
|
---|
174 | // Non-Blocking acquire
|
---|
175 | static inline bool __atomic_try_acquire(volatile bool * ll) {
|
---|
176 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
177 | /* paranoid */ verify(ll);
|
---|
178 |
|
---|
179 | return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
|
---|
180 | }
|
---|
181 |
|
---|
182 | // Release
|
---|
183 | static inline void __atomic_unlock(volatile bool * ll) {
|
---|
184 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
185 | /* paranoid */ verify(ll);
|
---|
186 | /* paranoid */ verify(*ll);
|
---|
187 | __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
|
---|
188 | }
|
---|
189 |
|
---|
190 | //-----------------------------------------------------------------------
|
---|
191 | // Reader-Writer lock protecting the ready-queues
|
---|
192 | // while this lock is mostly generic some aspects
|
---|
193 | // have been hard-coded to for the ready-queue for
|
---|
194 | // simplicity and performance
|
---|
195 | union __attribute__((aligned(64))) __scheduler_RWLock_t {
|
---|
196 | struct {
|
---|
197 | __attribute__((aligned(64))) char padding;
|
---|
198 |
|
---|
199 | // total cachelines allocated
|
---|
200 | __attribute__((aligned(64))) unsigned int max;
|
---|
201 |
|
---|
202 | // cachelines currently in use
|
---|
203 | volatile unsigned int alloc;
|
---|
204 |
|
---|
205 | // cachelines ready to itereate over
|
---|
206 | // (!= to alloc when thread is in second half of doregister)
|
---|
207 | volatile unsigned int ready;
|
---|
208 |
|
---|
209 | // writer lock
|
---|
210 | volatile bool write_lock;
|
---|
211 |
|
---|
212 | // data pointer
|
---|
213 | volatile bool * volatile * data;
|
---|
214 | } lock;
|
---|
215 | char pad[192];
|
---|
216 | };
|
---|
217 |
|
---|
218 | void ?{}(__scheduler_RWLock_t & this);
|
---|
219 | void ^?{}(__scheduler_RWLock_t & this);
|
---|
220 |
|
---|
221 | extern __scheduler_RWLock_t __scheduler_lock;
|
---|
222 |
|
---|
223 | //-----------------------------------------------------------------------
|
---|
224 | // Reader side : acquire when using the ready queue to schedule but not
|
---|
225 | // creating/destroying queues
|
---|
226 | static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
|
---|
227 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
228 | /* paranoid */ verify( ! kernelTLS().in_sched_lock );
|
---|
229 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
|
---|
230 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
|
---|
231 |
|
---|
232 | // Step 1 : make sure no writer are in the middle of the critical section
|
---|
233 | while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
|
---|
234 | Pause();
|
---|
235 |
|
---|
236 | // Fence needed because we don't want to start trying to acquire the lock
|
---|
237 | // before we read a false.
|
---|
238 | // Not needed on x86
|
---|
239 | // std::atomic_thread_fence(std::memory_order_seq_cst);
|
---|
240 |
|
---|
241 | // Step 2 : acquire our local lock
|
---|
242 | __atomic_acquire( &kernelTLS().sched_lock );
|
---|
243 | /*paranoid*/ verify(kernelTLS().sched_lock);
|
---|
244 |
|
---|
245 | #ifdef __CFA_WITH_VERIFY__
|
---|
246 | // Debug, check if this is owned for reading
|
---|
247 | kernelTLS().in_sched_lock = true;
|
---|
248 | #endif
|
---|
249 | }
|
---|
250 |
|
---|
251 | static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
|
---|
252 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
253 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
|
---|
254 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
|
---|
255 | /* paranoid */ verify( kernelTLS().sched_lock );
|
---|
256 | /* paranoid */ verify( kernelTLS().in_sched_lock );
|
---|
257 | #ifdef __CFA_WITH_VERIFY__
|
---|
258 | // Debug, check if this is owned for reading
|
---|
259 | kernelTLS().in_sched_lock = false;
|
---|
260 | #endif
|
---|
261 | __atomic_unlock(&kernelTLS().sched_lock);
|
---|
262 | }
|
---|
263 |
|
---|
264 | #ifdef __CFA_WITH_VERIFY__
|
---|
265 | static inline bool ready_schedule_islocked(void) {
|
---|
266 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
267 | /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
|
---|
268 | return kernelTLS().sched_lock;
|
---|
269 | }
|
---|
270 |
|
---|
271 | static inline bool ready_mutate_islocked() {
|
---|
272 | return __scheduler_lock.lock.write_lock;
|
---|
273 | }
|
---|
274 | #endif
|
---|
275 |
|
---|
276 | //-----------------------------------------------------------------------
|
---|
277 | // Writer side : acquire when changing the ready queue, e.g. adding more
|
---|
278 | // queues or removing them.
|
---|
279 | uint_fast32_t ready_mutate_lock( void );
|
---|
280 |
|
---|
281 | void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
|
---|
282 |
|
---|
283 | //-----------------------------------------------------------------------
|
---|
284 | // Lock-Free registering/unregistering of threads
|
---|
285 | // Register a processor to a given cluster and get its unique id in return
|
---|
286 | // For convenience, also acquires the lock
|
---|
287 | static inline [unsigned, uint_fast32_t] ready_mutate_register() {
|
---|
288 | unsigned id = register_proc_id();
|
---|
289 | uint_fast32_t last = ready_mutate_lock();
|
---|
290 | return [id, last];
|
---|
291 | }
|
---|
292 |
|
---|
293 | // Unregister a processor from a given cluster using its id, getting back the original pointer
|
---|
294 | // assumes the lock is acquired
|
---|
295 | static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
|
---|
296 | ready_mutate_unlock( last_s );
|
---|
297 | unregister_proc_id( id );
|
---|
298 | }
|
---|
299 |
|
---|
300 | //-----------------------------------------------------------------------
|
---|
301 | // Cluster idle lock/unlock
|
---|
302 | static inline void lock(__cluster_proc_list & this) {
|
---|
303 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
304 |
|
---|
305 | // Start by locking the global RWlock so that we know no-one is
|
---|
306 | // adding/removing processors while we mess with the idle lock
|
---|
307 | ready_schedule_lock();
|
---|
308 |
|
---|
309 | lock( this.lock __cfaabi_dbg_ctx2 );
|
---|
310 |
|
---|
311 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
312 | }
|
---|
313 |
|
---|
314 | static inline bool try_lock(__cluster_proc_list & this) {
|
---|
315 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
316 |
|
---|
317 | // Start by locking the global RWlock so that we know no-one is
|
---|
318 | // adding/removing processors while we mess with the idle lock
|
---|
319 | ready_schedule_lock();
|
---|
320 |
|
---|
321 | if(try_lock( this.lock __cfaabi_dbg_ctx2 )) {
|
---|
322 | // success
|
---|
323 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
324 | return true;
|
---|
325 | }
|
---|
326 |
|
---|
327 | // failed to lock
|
---|
328 | ready_schedule_unlock();
|
---|
329 |
|
---|
330 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
331 | return false;
|
---|
332 | }
|
---|
333 |
|
---|
334 | static inline void unlock(__cluster_proc_list & this) {
|
---|
335 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
336 |
|
---|
337 | unlock(this.lock);
|
---|
338 |
|
---|
339 | // Release the global lock, which we acquired when locking
|
---|
340 | ready_schedule_unlock();
|
---|
341 |
|
---|
342 | /* paranoid */ verify( ! __preemption_enabled() );
|
---|
343 | }
|
---|
344 |
|
---|
345 | //=======================================================================
|
---|
346 | // Ready-Queue API
|
---|
347 | //-----------------------------------------------------------------------
|
---|
348 | // push thread onto a ready queue for a cluster
|
---|
349 | // returns true if the list was previously empty, false otherwise
|
---|
350 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint);
|
---|
351 |
|
---|
352 | //-----------------------------------------------------------------------
|
---|
353 | // pop thread from the local queues of a cluster
|
---|
354 | // returns 0p if empty
|
---|
355 | // May return 0p spuriously
|
---|
356 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr);
|
---|
357 |
|
---|
358 | //-----------------------------------------------------------------------
|
---|
359 | // pop thread from any ready queue of a cluster
|
---|
360 | // returns 0p if empty
|
---|
361 | // May return 0p spuriously
|
---|
362 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr);
|
---|
363 |
|
---|
364 | //-----------------------------------------------------------------------
|
---|
365 | // search all ready queues of a cluster for any thread
|
---|
366 | // returns 0p if empty
|
---|
367 | // guaranteed to find any threads added before this call
|
---|
368 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr);
|
---|
369 |
|
---|
370 | //-----------------------------------------------------------------------
|
---|
371 | // get preferred ready for new thread
|
---|
372 | unsigned ready_queue_new_preferred();
|
---|
373 |
|
---|
374 | //-----------------------------------------------------------------------
|
---|
375 | // Increase the width of the ready queue (number of lanes) by 4
|
---|
376 | void ready_queue_grow (struct cluster * cltr);
|
---|
377 |
|
---|
378 | //-----------------------------------------------------------------------
|
---|
379 | // Decrease the width of the ready queue (number of lanes) by 4
|
---|
380 | void ready_queue_shrink(struct cluster * cltr);
|
---|
381 |
|
---|
382 | //-----------------------------------------------------------------------
|
---|
383 | // Decrease the width of the ready queue (number of lanes) by 4
|
---|
384 | void ready_queue_close(struct cluster * cltr);
|
---|
385 |
|
---|
386 | // Local Variables: //
|
---|
387 | // mode: c //
|
---|
388 | // tab-width: 4 //
|
---|
389 | // End: //
|
---|