source: libcfa/src/concurrency/kernel/fwd.hfa@ baad96e

Last change on this file since baad96e was 8bc67cf, checked in by Peter A. Buhr <pabuhr@…>, 2 years ago

formatting, SKULLDUGGERY to remove spurious free-nonheap-object warning

  • Property mode set to 100644
File size: 13.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel/fwd.hfa -- PUBLIC
8// Fundamental code needed to implement threading M.E.S. algorithms.
9//
10// Author : Thierry Delisle
11// Created On : Thu Jul 30 16:46:41 2020
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include "bits/defs.hfa"
20#include "bits/debug.hfa"
21
22#ifdef __cforall
23#include "bits/random.hfa"
24#endif
25
26struct thread$;
27struct processor;
28struct cluster;
29
30enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
31
32#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
33
34#ifdef __cforall
35extern "C" {
36 extern "Cforall" {
37 extern __attribute__((aligned(64))) __thread struct KernelThreadData {
38 struct thread$ * volatile this_thread;
39 struct processor * volatile this_processor;
40 volatile bool sched_lock;
41
42 struct {
43 volatile unsigned short disable_count;
44 volatile bool enabled;
45 volatile bool in_progress;
46 } preemption_state;
47
48 PRNG_STATE_T random_state;
49
50 struct {
51 uint64_t fwd_seed;
52 uint64_t bck_seed;
53 } ready_rng;
54
55 struct __stats_t * volatile this_stats;
56
57 #ifdef __CFA_WITH_VERIFY__
58 // Debug, check if the rwlock is owned for reading
59 bool in_sched_lock;
60 unsigned sched_id;
61 #endif
62 } __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" )));
63
64 extern bool __preemption_enabled();
65
66 static inline KernelThreadData & kernelTLS( void ) {
67 /* paranoid */ verify( ! __preemption_enabled() );
68 return __cfaabi_tls;
69 }
70
71 extern uintptr_t __cfatls_get( unsigned long int member );
72 #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
73
74 static inline
75 #ifdef __x86_64__ // 64-bit architecture
76 uint64_t
77 #else // 32-bit architecture
78 uint32_t
79 #endif // __x86_64__
80 __tls_rand() {
81 return PRNG_NAME( kernelTLS().random_state );
82 }
83
84 static inline unsigned __tls_rand_fwd() {
85 return LCGBI_fwd( kernelTLS().ready_rng.fwd_seed );
86 }
87
88 static inline unsigned __tls_rand_bck() {
89 return LCGBI_bck( kernelTLS().ready_rng.bck_seed );
90 }
91
92 static inline void __tls_rand_advance_bck(void) {
93 kernelTLS().ready_rng.bck_seed = kernelTLS().ready_rng.fwd_seed;
94 }
95 }
96
97 extern void disable_interrupts();
98 extern void enable_interrupts( bool poll = false );
99
100 extern "Cforall" {
101 enum unpark_hint { UNPARK_LOCAL, UNPARK_REMOTE };
102
103 extern void park( void );
104 extern void unpark( struct thread$ *, unpark_hint );
105 static inline void unpark( struct thread$ * thrd ) { unpark(thrd, UNPARK_LOCAL); }
106 static inline struct thread$ * active_thread () {
107 struct thread$ * t = publicTLS_get( this_thread );
108 /* paranoid */ verify( t );
109 return t;
110 }
111
112 extern bool force_yield( enum __Preemption_Reason );
113
114 static inline void yield() {
115 force_yield(__MANUAL_PREEMPTION);
116 }
117
118 // Yield: yield N times
119 static inline void yield( size_t times ) {
120 for ( times ) {
121 yield();
122 }
123 }
124
125 // Semaphore which only supports a single thread
126 struct single_sem {
127 struct thread$ * volatile ptr;
128 };
129
130 static inline {
131 void ?{}(single_sem & this) {
132 this.ptr = 0p;
133 }
134
135 void ^?{}(single_sem &) {}
136
137 bool wait(single_sem & this) {
138 for () {
139 struct thread$ * expected = this.ptr;
140 if (expected == 1p) {
141 if (__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
142 return false;
143 }
144 }
145 else {
146 /* paranoid */ verify( expected == 0p );
147 if (__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
148 park();
149 return true;
150 }
151 }
152
153 }
154 }
155
156 bool post(single_sem & this) {
157 for () {
158 struct thread$ * expected = this.ptr;
159 if (expected == 1p) return false;
160 if (expected == 0p) {
161 if (__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
162 return false;
163 }
164 }
165 else {
166 if (__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
167 unpark( expected );
168 return true;
169 }
170 }
171 }
172 }
173 }
174
175 // Synchronozation primitive which only supports a single thread and one post
176 // Similar to a binary semaphore with a 'one shot' semantic
177 // is expected to be discarded after each party call their side
178 enum(struct thread$ *) { oneshot_ARMED = 0p, oneshot_FULFILLED = 1p };
179 struct oneshot {
180 // Internal state :
181 // armed : initial state, wait will block
182 // fulfilled : wait won't block
183 // any thread : a thread is currently waiting
184 struct thread$ * volatile ptr;
185 };
186
187 static inline {
188 void ?{}(oneshot & this) {
189 this.ptr = oneshot_ARMED;
190 }
191
192 void ^?{}(oneshot &) {}
193
194 // Wait for the post, return immidiately if it already happened.
195 // return true if the thread was parked
196 bool wait(oneshot & this) {
197 for () {
198 struct thread$ * expected = this.ptr;
199 if (expected == oneshot_FULFILLED) return false;
200 if (__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
201 park();
202 /* paranoid */ verify( this.ptr == oneshot_FULFILLED );
203 return true;
204 }
205 }
206 }
207
208 // Mark as fulfilled, wake thread if needed
209 // return true if a thread was unparked
210 thread$ * post(oneshot & this, bool do_unpark = true) {
211 struct thread$ * got = __atomic_exchange_n( &this.ptr, oneshot_FULFILLED, __ATOMIC_SEQ_CST);
212 if ( got == oneshot_ARMED || got == oneshot_FULFILLED ) return 0p;
213 if (do_unpark) unpark( got );
214 return got;
215 }
216 }
217
218 // base types for future to build upon
219 // It is based on the 'oneshot' type to allow multiple futures
220 // to block on the same instance, permitting users to block a single
221 // thread on "any of" [a given set of] futures.
222 // does not support multiple threads waiting on the same future
223 enum(struct oneshot *) { future_ARMED = 0p, future_FULFILLED = 1p, future_PROGRESS = 2p, future_ABANDONED = 3p };
224 struct future_t {
225 // Internal state :
226 // armed : initial state, wait will block
227 // fulfilled : result is ready, wait won't block
228 // progress : someone else is in the process of fulfilling this
229 // abandoned : client no longer cares, server should delete
230 // any oneshot : a context has been setup to wait, a thread could wait on it
231 struct oneshot * volatile ptr;
232 };
233
234 static inline {
235 void ?{}(future_t & this) {
236 this.ptr = future_ARMED;
237 }
238
239 void ^?{}(future_t &) {}
240
241 void reset(future_t & this) {
242 // needs to be in 0p or 1p
243 __atomic_exchange_n( &this.ptr, future_ARMED, __ATOMIC_SEQ_CST);
244 }
245
246 // check if the future is available
247 bool available( future_t & this ) {
248 while( this.ptr == future_PROGRESS ) Pause();
249 return this.ptr == future_FULFILLED;
250 }
251
252 // Prepare the future to be waited on
253 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
254 bool setup( future_t & this, oneshot & wait_ctx ) {
255 /* paranoid */ verify( wait_ctx.ptr == oneshot_ARMED || wait_ctx.ptr == oneshot_FULFILLED );
256 // The future needs to set the wait context
257 for () {
258 struct oneshot * expected = this.ptr;
259 // Is the future already fulfilled?
260 if (expected == future_FULFILLED) return false; // Yes, just return false (didn't block)
261
262 // The future is not fulfilled, try to setup the wait context
263 if (__atomic_compare_exchange_n(&this.ptr, &expected, &wait_ctx, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
264 return true;
265 }
266 }
267 }
268
269 // Stop waiting on a future
270 // When multiple futures are waited for together in "any of" pattern
271 // futures that weren't fulfilled before the thread woke up
272 // should retract the wait ctx
273 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
274 bool retract( future_t & this, oneshot & wait_ctx ) {
275 struct oneshot * expected = &wait_ctx;
276
277 // attempt to remove the context so it doesn't get consumed.
278 if (__atomic_compare_exchange_n( &this.ptr, &expected, future_ARMED, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
279 // we still have the original context, then no one else saw it
280 return false;
281 }
282
283 // expected == ARMED: future was never actually setup, just return
284 if ( expected == future_ARMED ) return false;
285
286 // expected == FULFILLED: the future is ready and the context was fully consumed
287 // the server won't use the pointer again
288 // It is safe to delete (which could happen after the return)
289 if ( expected == future_FULFILLED ) return true;
290
291 // expected == PROGRESS: the future is ready but the context hasn't fully been consumed
292 // spin until it is safe to move on
293 if ( expected == future_PROGRESS ) {
294 while( this.ptr != future_FULFILLED ) Pause();
295 /* paranoid */ verify( this.ptr == future_FULFILLED );
296 return true;
297 }
298
299 // anything else: the future was setup with a different context ?!?!
300 // something went wrong here, abort
301 abort("Future in unexpected state");
302 }
303
304 // Mark the future as abandoned, meaning it will be deleted by the server
305 bool abandon( future_t & this ) {
306 /* paranoid */ verify( this.ptr != future_ABANDONED );
307
308 // Mark the future as abandonned
309 struct oneshot * got = __atomic_exchange_n( &this.ptr, future_ABANDONED, __ATOMIC_SEQ_CST);
310
311 // If the future isn't already fulfilled, let the server delete it
312 if ( got == future_ARMED ) return false;
313
314 // got == PROGRESS: the future is ready but the context hasn't fully been consumed
315 // spin until it is safe to move on
316 if ( got == future_PROGRESS ) {
317 while( this.ptr != future_FULFILLED ) Pause();
318 got = future_FULFILLED;
319 }
320
321 // The future is completed delete it now
322 /* paranoid */ verify( this.ptr != future_FULFILLED );
323 free( &this );
324 return true;
325 }
326
327 // from the server side, mark the future as fulfilled
328 // delete it if needed
329
330 thread$ * fulfil( future_t & this, bool do_unpark = true ) {
331 for () {
332 struct oneshot * expected = this.ptr;
333
334 #if defined(__GNUC__) && __GNUC__ >= 7
335 // SKULLDUGGERY: gcc bug does not handle push/pop for -Wfree-nonheap-object
336 //#pragma GCC diagnostic push
337 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
338 #endif
339
340 if ( expected == future_ABANDONED ) { free( &this ); return 0p; }
341
342 #if defined(__GNUC__) && __GNUC__ >= 7
343 //#pragma GCC diagnostic pop
344 #endif
345
346 /* paranoid */ verify( expected != future_FULFILLED ); // Future is already fulfilled, should not happen
347 /* paranoid */ verify( expected != future_PROGRESS ); // Future is bein fulfilled by someone else, this is even less supported then the previous case.
348
349 // If there is a wait context, we need to consume it and mark it as consumed after
350 // If there is no context then we can skip the in progress phase
351 struct oneshot * want = expected == future_ARMED ? future_FULFILLED : future_PROGRESS;
352 if (__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
353 if ( expected == future_ARMED ) { return 0p; }
354 thread$ * ret = post( *expected, do_unpark );
355 __atomic_store_n( &this.ptr, future_FULFILLED, __ATOMIC_SEQ_CST);
356 return ret;
357 }
358 }
359
360 }
361
362 // Wait for the future to be fulfilled
363 bool wait( future_t & this ) {
364 oneshot temp;
365 if ( !setup(this, temp) ) return false;
366
367 // Wait context is setup, just wait on it
368 bool ret = wait( temp );
369
370 // Wait for the future to tru
371 while( this.ptr == future_PROGRESS ) Pause();
372 // Make sure the state makes sense
373 // Should be fulfilled, could be in progress but it's out of date if so
374 // since if that is the case, the oneshot was fulfilled (unparking this thread)
375 // and the oneshot should not be needed any more
376 __attribute__((unused)) struct oneshot * was = this.ptr;
377 /* paranoid */ verifyf( was == future_FULFILLED, "Expected this.ptr to be 1p, was %p\n", was );
378
379 // Mark the future as fulfilled, to be consistent
380 // with potential calls to avail
381 // this.ptr = 1p;
382 return ret;
383 }
384
385 // Wait for any future to be fulfilled
386 forall(T& | sized(T) | { bool setup( T&, oneshot & ); bool retract( T&, oneshot & ); })
387 T & wait_any( T * futures, size_t num_futures ) {
388 oneshot temp;
389
390 // setup all futures
391 // if any are already satisfied return
392 for ( i; num_futures ) {
393 if ( !setup(futures[i], temp) ) return futures[i];
394 }
395
396 // Wait context is setup, just wait on it
397 wait( temp );
398
399 size_t ret;
400 // attempt to retract all futures
401 for ( i; num_futures ) {
402 if ( retract( futures[i], temp ) ) ret = i;
403 }
404
405 return futures[ret];
406 }
407 }
408
409 //-----------------------------------------------------------------------
410 // Statics call at the end of each thread to register statistics
411 #if !defined(__CFA_NO_STATISTICS__)
412 static inline struct __stats_t * __tls_stats() {
413 /* paranoid */ verify( ! __preemption_enabled() );
414 /* paranoid */ verify( kernelTLS().this_stats );
415 return kernelTLS().this_stats;
416 }
417
418 #define __STATS__(in_kernel, ...) { \
419 if ( !(in_kernel) ) disable_interrupts(); \
420 with ( *__tls_stats() ) { \
421 __VA_ARGS__ \
422 } \
423 if ( !(in_kernel) ) enable_interrupts(); \
424 }
425 #if defined(CFA_HAVE_LINUX_IO_URING_H)
426 #define __IO_STATS__(in_kernel, ...) { \
427 if ( !(in_kernel) ) disable_interrupts(); \
428 with ( *__tls_stats() ) { \
429 __VA_ARGS__ \
430 } \
431 if ( !(in_kernel) ) enable_interrupts(); \
432 }
433 #else
434 #define __IO_STATS__(in_kernel, ...)
435 #endif
436 #else
437 #define __STATS__(in_kernel, ...)
438 #define __IO_STATS__(in_kernel, ...)
439 #endif
440 }
441}
442#endif // #endif
Note: See TracBrowser for help on using the repository browser.