source: libcfa/src/concurrency/kernel/fwd.hfa@ fa2e183

ADT ast-experimental
Last change on this file since fa2e183 was 3fcb5921, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Fixed what appears to be a bug in retract.
Text coverage for this looks lacking.

  • Property mode set to 100644
File size: 13.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel/fwd.hfa -- PUBLIC
8// Fundamental code needed to implement threading M.E.S. algorithms.
9//
10// Author : Thierry Delisle
11// Created On : Thu Jul 30 16:46:41 2020
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include "bits/defs.hfa"
20#include "bits/debug.hfa"
21
22#ifdef __cforall
23#include "bits/random.hfa"
24#endif
25
26struct thread$;
27struct processor;
28struct cluster;
29
30enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
31
32#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
33
34#ifdef __cforall
35extern "C" {
36 extern "Cforall" {
37 extern __attribute__((aligned(64))) __thread struct KernelThreadData {
38 struct thread$ * volatile this_thread;
39 struct processor * volatile this_processor;
40 volatile bool sched_lock;
41
42 struct {
43 volatile unsigned short disable_count;
44 volatile bool enabled;
45 volatile bool in_progress;
46 } preemption_state;
47
48 #if defined(__SIZEOF_INT128__)
49 __uint128_t rand_seed;
50 #else
51 uint64_t rand_seed;
52 #endif
53 struct {
54 uint64_t fwd_seed;
55 uint64_t bck_seed;
56 } ready_rng;
57
58 struct __stats_t * volatile this_stats;
59
60
61 #ifdef __CFA_WITH_VERIFY__
62 // Debug, check if the rwlock is owned for reading
63 bool in_sched_lock;
64 unsigned sched_id;
65 #endif
66 } __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" )));
67
68 extern bool __preemption_enabled();
69
70 static inline KernelThreadData & kernelTLS( void ) {
71 /* paranoid */ verify( ! __preemption_enabled() );
72 return __cfaabi_tls;
73 }
74
75 extern uintptr_t __cfatls_get( unsigned long int member );
76 #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
77
78 static inline uint64_t __tls_rand() {
79 return
80 #if defined(__SIZEOF_INT128__)
81 lehmer64( kernelTLS().rand_seed );
82 #else
83 xorshift_13_7_17( kernelTLS().rand_seed );
84 #endif
85 }
86
87 static inline unsigned __tls_rand_fwd() {
88 return LCGBI_fwd( kernelTLS().ready_rng.fwd_seed );
89 }
90
91 static inline unsigned __tls_rand_bck() {
92 return LCGBI_bck( kernelTLS().ready_rng.bck_seed );
93 }
94
95 static inline void __tls_rand_advance_bck(void) {
96 kernelTLS().ready_rng.bck_seed = kernelTLS().ready_rng.fwd_seed;
97 }
98 }
99
100 extern void disable_interrupts();
101 extern void enable_interrupts( bool poll = false );
102
103 extern "Cforall" {
104 enum unpark_hint { UNPARK_LOCAL, UNPARK_REMOTE };
105
106 extern void park( void );
107 extern void unpark( struct thread$ *, unpark_hint );
108 static inline void unpark( struct thread$ * thrd ) { unpark(thrd, UNPARK_LOCAL); }
109 static inline struct thread$ * active_thread () {
110 struct thread$ * t = publicTLS_get( this_thread );
111 /* paranoid */ verify( t );
112 return t;
113 }
114
115 extern bool force_yield( enum __Preemption_Reason );
116
117 static inline void yield() {
118 force_yield(__MANUAL_PREEMPTION);
119 }
120
121 // Yield: yield N times
122 static inline void yield( unsigned times ) {
123 for( times ) {
124 yield();
125 }
126 }
127
128 // Semaphore which only supports a single thread
129 struct single_sem {
130 struct thread$ * volatile ptr;
131 };
132
133 static inline {
134 void ?{}(single_sem & this) {
135 this.ptr = 0p;
136 }
137
138 void ^?{}(single_sem &) {}
139
140 bool wait(single_sem & this) {
141 for() {
142 struct thread$ * expected = this.ptr;
143 if(expected == 1p) {
144 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
145 return false;
146 }
147 }
148 else {
149 /* paranoid */ verify( expected == 0p );
150 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
151 park();
152 return true;
153 }
154 }
155
156 }
157 }
158
159 bool post(single_sem & this) {
160 for() {
161 struct thread$ * expected = this.ptr;
162 if(expected == 1p) return false;
163 if(expected == 0p) {
164 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
165 return false;
166 }
167 }
168 else {
169 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
170 unpark( expected );
171 return true;
172 }
173 }
174 }
175 }
176 }
177
178 // Synchronozation primitive which only supports a single thread and one post
179 // Similar to a binary semaphore with a 'one shot' semantic
180 // is expected to be discarded after each party call their side
181 enum(struct thread$ *) { oneshot_ARMED = 0p, oneshot_FULFILLED = 1p };
182 struct oneshot {
183 // Internal state :
184 // armed : initial state, wait will block
185 // fulfilled : wait won't block
186 // any thread : a thread is currently waiting
187 struct thread$ * volatile ptr;
188 };
189
190 static inline {
191 void ?{}(oneshot & this) {
192 this.ptr = oneshot_ARMED;
193 }
194
195 void ^?{}(oneshot &) {}
196
197 // Wait for the post, return immidiately if it already happened.
198 // return true if the thread was parked
199 bool wait(oneshot & this) {
200 for() {
201 struct thread$ * expected = this.ptr;
202 if(expected == oneshot_FULFILLED) return false;
203 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
204 park();
205 /* paranoid */ verify( this.ptr == oneshot_FULFILLED );
206 return true;
207 }
208 }
209 }
210
211 // Mark as fulfilled, wake thread if needed
212 // return true if a thread was unparked
213 thread$ * post(oneshot & this, bool do_unpark = true) {
214 struct thread$ * got = __atomic_exchange_n( &this.ptr, oneshot_FULFILLED, __ATOMIC_SEQ_CST);
215 if( got == oneshot_ARMED || got == oneshot_FULFILLED ) return 0p;
216 if(do_unpark) unpark( got );
217 return got;
218 }
219 }
220
221 // base types for future to build upon
222 // It is based on the 'oneshot' type to allow multiple futures
223 // to block on the same instance, permitting users to block a single
224 // thread on "any of" [a given set of] futures.
225 // does not support multiple threads waiting on the same future
226 enum(struct oneshot *) { future_ARMED = 0p, future_FULFILLED = 1p, future_PROGRESS = 2p, future_ABANDONED = 3p };
227 struct future_t {
228 // Internal state :
229 // armed : initial state, wait will block
230 // fulfilled : result is ready, wait won't block
231 // progress : someone else is in the process of fulfilling this
232 // abandoned : client no longer cares, server should delete
233 // any oneshot : a context has been setup to wait, a thread could wait on it
234 struct oneshot * volatile ptr;
235 };
236
237 static inline {
238 void ?{}(future_t & this) {
239 this.ptr = future_ARMED;
240 }
241
242 void ^?{}(future_t &) {}
243
244 void reset(future_t & this) {
245 // needs to be in 0p or 1p
246 __atomic_exchange_n( &this.ptr, future_ARMED, __ATOMIC_SEQ_CST);
247 }
248
249 // check if the future is available
250 bool available( future_t & this ) {
251 while( this.ptr == future_PROGRESS ) Pause();
252 return this.ptr == future_FULFILLED;
253 }
254
255 // Prepare the future to be waited on
256 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
257 bool setup( future_t & this, oneshot & wait_ctx ) {
258 /* paranoid */ verify( wait_ctx.ptr == oneshot_ARMED || wait_ctx.ptr == oneshot_FULFILLED );
259 // The future needs to set the wait context
260 for() {
261 struct oneshot * expected = this.ptr;
262 // Is the future already fulfilled?
263 if(expected == future_FULFILLED) return false; // Yes, just return false (didn't block)
264
265 // The future is not fulfilled, try to setup the wait context
266 if(__atomic_compare_exchange_n(&this.ptr, &expected, &wait_ctx, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
267 return true;
268 }
269 }
270 }
271
272 // Stop waiting on a future
273 // When multiple futures are waited for together in "any of" pattern
274 // futures that weren't fulfilled before the thread woke up
275 // should retract the wait ctx
276 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
277 bool retract( future_t & this, oneshot & wait_ctx ) {
278 struct oneshot * expected = &wait_ctx;
279
280 // attempt to remove the context so it doesn't get consumed.
281 if(__atomic_compare_exchange_n( &this.ptr, &expected, future_ARMED, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
282 // we still have the original context, then no one else saw it
283 return false;
284 }
285
286 // expected == ARMED: future was never actually setup, just return
287 if( expected == future_ARMED ) return false;
288
289 // expected == FULFILLED: the future is ready and the context was fully consumed
290 // the server won't use the pointer again
291 // It is safe to delete (which could happen after the return)
292 if( expected == future_FULFILLED ) return true;
293
294 // expected == PROGRESS: the future is ready but the context hasn't fully been consumed
295 // spin until it is safe to move on
296 if( expected == future_PROGRESS ) {
297 while( this.ptr != future_FULFILLED ) Pause();
298 /* paranoid */ verify( this.ptr == future_FULFILLED );
299 return true;
300 }
301
302 // anything else: the future was setup with a different context ?!?!
303 // something went wrong here, abort
304 abort("Future in unexpected state");
305 }
306
307 // Mark the future as abandoned, meaning it will be deleted by the server
308 bool abandon( future_t & this ) {
309 /* paranoid */ verify( this.ptr != future_ABANDONED );
310
311 // Mark the future as abandonned
312 struct oneshot * got = __atomic_exchange_n( &this.ptr, future_ABANDONED, __ATOMIC_SEQ_CST);
313
314 // If the future isn't already fulfilled, let the server delete it
315 if( got == future_ARMED ) return false;
316
317 // got == PROGRESS: the future is ready but the context hasn't fully been consumed
318 // spin until it is safe to move on
319 if( got == future_PROGRESS ) {
320 while( this.ptr != future_FULFILLED ) Pause();
321 got = future_FULFILLED;
322 }
323
324 // The future is completed delete it now
325 /* paranoid */ verify( this.ptr != future_FULFILLED );
326 free( &this );
327 return true;
328 }
329
330 // from the server side, mark the future as fulfilled
331 // delete it if needed
332 thread$ * fulfil( future_t & this, bool do_unpark = true ) {
333 for() {
334 struct oneshot * expected = this.ptr;
335 // was this abandoned?
336 #if defined(__GNUC__) && __GNUC__ >= 7
337 #pragma GCC diagnostic push
338 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
339 #endif
340 if( expected == future_ABANDONED ) { free( &this ); return 0p; }
341 #if defined(__GNUC__) && __GNUC__ >= 7
342 #pragma GCC diagnostic pop
343 #endif
344
345 /* paranoid */ verify( expected != future_FULFILLED ); // Future is already fulfilled, should not happen
346 /* paranoid */ verify( expected != future_PROGRESS ); // Future is bein fulfilled by someone else, this is even less supported then the previous case.
347
348 // If there is a wait context, we need to consume it and mark it as consumed after
349 // If there is no context then we can skip the in progress phase
350 struct oneshot * want = expected == future_ARMED ? future_FULFILLED : future_PROGRESS;
351 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
352 if( expected == future_ARMED ) { return 0p; }
353 thread$ * ret = post( *expected, do_unpark );
354 __atomic_store_n( &this.ptr, future_FULFILLED, __ATOMIC_SEQ_CST);
355 return ret;
356 }
357 }
358
359 }
360
361 // Wait for the future to be fulfilled
362 bool wait( future_t & this ) {
363 oneshot temp;
364 if( !setup(this, temp) ) return false;
365
366 // Wait context is setup, just wait on it
367 bool ret = wait( temp );
368
369 // Wait for the future to tru
370 while( this.ptr == future_PROGRESS ) Pause();
371 // Make sure the state makes sense
372 // Should be fulfilled, could be in progress but it's out of date if so
373 // since if that is the case, the oneshot was fulfilled (unparking this thread)
374 // and the oneshot should not be needed any more
375 __attribute__((unused)) struct oneshot * was = this.ptr;
376 /* paranoid */ verifyf( was == future_FULFILLED, "Expected this.ptr to be 1p, was %p\n", was );
377
378 // Mark the future as fulfilled, to be consistent
379 // with potential calls to avail
380 // this.ptr = 1p;
381 return ret;
382 }
383
384 // Wait for any future to be fulfilled
385 forall(T& | sized(T) | { bool setup( T&, oneshot & ); bool retract( T&, oneshot & ); })
386 T & wait_any( T * futures, size_t num_futures ) {
387 oneshot temp;
388
389 // setup all futures
390 // if any are already satisfied return
391 for ( i; num_futures ) {
392 if( !setup(futures[i], temp) ) return futures[i];
393 }
394
395 // Wait context is setup, just wait on it
396 wait( temp );
397
398 size_t ret;
399 // attempt to retract all futures
400 for ( i; num_futures ) {
401 if ( retract( futures[i], temp ) ) ret = i;
402 }
403
404 return futures[ret];
405 }
406 }
407
408 //-----------------------------------------------------------------------
409 // Statics call at the end of each thread to register statistics
410 #if !defined(__CFA_NO_STATISTICS__)
411 static inline struct __stats_t * __tls_stats() {
412 /* paranoid */ verify( ! __preemption_enabled() );
413 /* paranoid */ verify( kernelTLS().this_stats );
414 return kernelTLS().this_stats;
415 }
416
417 #define __STATS__(in_kernel, ...) { \
418 if( !(in_kernel) ) disable_interrupts(); \
419 with( *__tls_stats() ) { \
420 __VA_ARGS__ \
421 } \
422 if( !(in_kernel) ) enable_interrupts(); \
423 }
424 #if defined(CFA_HAVE_LINUX_IO_URING_H)
425 #define __IO_STATS__(in_kernel, ...) { \
426 if( !(in_kernel) ) disable_interrupts(); \
427 with( *__tls_stats() ) { \
428 __VA_ARGS__ \
429 } \
430 if( !(in_kernel) ) enable_interrupts(); \
431 }
432 #else
433 #define __IO_STATS__(in_kernel, ...)
434 #endif
435 #else
436 #define __STATS__(in_kernel, ...)
437 #define __IO_STATS__(in_kernel, ...)
438 #endif
439 }
440}
441#endif
Note: See TracBrowser for help on using the repository browser.