source: libcfa/src/concurrency/kernel/fwd.hfa@ c7015e6b

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since c7015e6b was a76efc8, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

futures now optionally don't unpark the target thread

  • Property mode set to 100644
File size: 12.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel/fwd.hfa -- PUBLIC
8// Fundamental code needed to implement threading M.E.S. algorithms.
9//
10// Author : Thierry Delisle
11// Created On : Thu Jul 30 16:46:41 2020
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include "bits/defs.hfa"
20#include "bits/debug.hfa"
21
22#ifdef __cforall
23#include "bits/random.hfa"
24#endif
25
26struct $thread;
27struct processor;
28struct cluster;
29
30enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
31
32#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
33
34#ifdef __cforall
35extern "C" {
36 extern "Cforall" {
37 extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
38 struct $thread * volatile this_thread;
39 struct processor * volatile this_processor;
40 struct __processor_id_t * volatile this_proc_id;
41 struct __stats_t * volatile this_stats;
42
43 struct {
44 volatile unsigned short disable_count;
45 volatile bool enabled;
46 volatile bool in_progress;
47 } preemption_state;
48
49 #if defined(__SIZEOF_INT128__)
50 __uint128_t rand_seed;
51 #else
52 uint64_t rand_seed;
53 #endif
54 struct {
55 uint64_t fwd_seed;
56 uint64_t bck_seed;
57 } ready_rng;
58 } __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" )));
59
60 extern bool __preemption_enabled();
61
62 static inline KernelThreadData & kernelTLS( void ) {
63 /* paranoid */ verify( ! __preemption_enabled() );
64 return __cfaabi_tls;
65 }
66
67 extern uintptr_t __cfatls_get( unsigned long int member );
68 #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
69
70 static inline uint64_t __tls_rand() {
71 #if defined(__SIZEOF_INT128__)
72 return __lehmer64( kernelTLS().rand_seed );
73 #else
74 return __xorshift64( kernelTLS().rand_seed );
75 #endif
76 }
77
78 #define M (1_l64u << 48_l64u)
79 #define A (25214903917_l64u)
80 #define AI (18446708753438544741_l64u)
81 #define C (11_l64u)
82 #define D (16_l64u)
83
84 static inline unsigned __tls_rand_fwd() {
85
86 kernelTLS().ready_rng.fwd_seed = (A * kernelTLS().ready_rng.fwd_seed + C) & (M - 1);
87 return kernelTLS().ready_rng.fwd_seed >> D;
88 }
89
90 static inline unsigned __tls_rand_bck() {
91 unsigned int r = kernelTLS().ready_rng.bck_seed >> D;
92 kernelTLS().ready_rng.bck_seed = AI * (kernelTLS().ready_rng.bck_seed - C) & (M - 1);
93 return r;
94 }
95
96 #undef M
97 #undef A
98 #undef AI
99 #undef C
100 #undef D
101
102 static inline void __tls_rand_advance_bck(void) {
103 kernelTLS().ready_rng.bck_seed = kernelTLS().ready_rng.fwd_seed;
104 }
105 }
106
107
108
109 extern void disable_interrupts();
110 extern void enable_interrupts( bool poll = false );
111
112 extern "Cforall" {
113 extern void park( void );
114 extern void unpark( struct $thread * this );
115 static inline struct $thread * active_thread () {
116 struct $thread * t = publicTLS_get( this_thread );
117 /* paranoid */ verify( t );
118 return t;
119 }
120
121 extern bool force_yield( enum __Preemption_Reason );
122
123 static inline void yield() {
124 force_yield(__MANUAL_PREEMPTION);
125 }
126
127 // Yield: yield N times
128 static inline void yield( unsigned times ) {
129 for( times ) {
130 yield();
131 }
132 }
133
134 extern uint64_t thread_rand();
135
136 // Semaphore which only supports a single thread
137 struct single_sem {
138 struct $thread * volatile ptr;
139 };
140
141 static inline {
142 void ?{}(single_sem & this) {
143 this.ptr = 0p;
144 }
145
146 void ^?{}(single_sem &) {}
147
148 bool wait(single_sem & this) {
149 for() {
150 struct $thread * expected = this.ptr;
151 if(expected == 1p) {
152 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
153 return false;
154 }
155 }
156 else {
157 /* paranoid */ verify( expected == 0p );
158 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
159 park();
160 return true;
161 }
162 }
163
164 }
165 }
166
167 bool post(single_sem & this) {
168 for() {
169 struct $thread * expected = this.ptr;
170 if(expected == 1p) return false;
171 if(expected == 0p) {
172 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
173 return false;
174 }
175 }
176 else {
177 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
178 unpark( expected );
179 return true;
180 }
181 }
182 }
183 }
184 }
185
186 // Synchronozation primitive which only supports a single thread and one post
187 // Similar to a binary semaphore with a 'one shot' semantic
188 // is expected to be discarded after each party call their side
189 struct oneshot {
190 // Internal state :
191 // 0p : is initial state (wait will block)
192 // 1p : fulfilled (wait won't block)
193 // any thread : a thread is currently waiting
194 struct $thread * volatile ptr;
195 };
196
197 static inline {
198 void ?{}(oneshot & this) {
199 this.ptr = 0p;
200 }
201
202 void ^?{}(oneshot &) {}
203
204 // Wait for the post, return immidiately if it already happened.
205 // return true if the thread was parked
206 bool wait(oneshot & this) {
207 for() {
208 struct $thread * expected = this.ptr;
209 if(expected == 1p) return false;
210 /* paranoid */ verify( expected == 0p );
211 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
212 park();
213 /* paranoid */ verify( this.ptr == 1p );
214 return true;
215 }
216 }
217 }
218
219 // Mark as fulfilled, wake thread if needed
220 // return true if a thread was unparked
221 $thread * post(oneshot & this, bool do_unpark = true) {
222 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
223 if( got == 0p ) return 0p;
224 if(do_unpark) unpark( got );
225 return got;
226 }
227 }
228
229 // base types for future to build upon
230 // It is based on the 'oneshot' type to allow multiple futures
231 // to block on the same instance, permitting users to block a single
232 // thread on "any of" [a given set of] futures.
233 // does not support multiple threads waiting on the same future
234 struct future_t {
235 // Internal state :
236 // 0p : is initial state (wait will block)
237 // 1p : fulfilled (wait won't block)
238 // 2p : in progress ()
239 // 3p : abandoned, server should delete
240 // any oneshot : a context has been setup to wait, a thread could wait on it
241 struct oneshot * volatile ptr;
242 };
243
244 static inline {
245 void ?{}(future_t & this) {
246 this.ptr = 0p;
247 }
248
249 void ^?{}(future_t &) {}
250
251 void reset(future_t & this) {
252 // needs to be in 0p or 1p
253 __atomic_exchange_n( &this.ptr, 0p, __ATOMIC_SEQ_CST);
254 }
255
256 // check if the future is available
257 bool available( future_t & this ) {
258 return this.ptr == 1p;
259 }
260
261 // Prepare the future to be waited on
262 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
263 bool setup( future_t & this, oneshot & wait_ctx ) {
264 /* paranoid */ verify( wait_ctx.ptr == 0p );
265 // The future needs to set the wait context
266 for() {
267 struct oneshot * expected = this.ptr;
268 // Is the future already fulfilled?
269 if(expected == 1p) return false; // Yes, just return false (didn't block)
270
271 // The future is not fulfilled, try to setup the wait context
272 /* paranoid */ verify( expected == 0p );
273 if(__atomic_compare_exchange_n(&this.ptr, &expected, &wait_ctx, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
274 return true;
275 }
276 }
277 }
278
279 // Stop waiting on a future
280 // When multiple futures are waited for together in "any of" pattern
281 // futures that weren't fulfilled before the thread woke up
282 // should retract the wait ctx
283 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly
284 void retract( future_t & this, oneshot & wait_ctx ) {
285 // Remove the wait context
286 struct oneshot * got = __atomic_exchange_n( &this.ptr, 0p, __ATOMIC_SEQ_CST);
287
288 // got == 0p: future was never actually setup, just return
289 if( got == 0p ) return;
290
291 // got == wait_ctx: since fulfil does an atomic_swap,
292 // if we got back the original then no one else saw context
293 // It is safe to delete (which could happen after the return)
294 if( got == &wait_ctx ) return;
295
296 // got == 1p: the future is ready and the context was fully consumed
297 // the server won't use the pointer again
298 // It is safe to delete (which could happen after the return)
299 if( got == 1p ) return;
300
301 // got == 2p: the future is ready but the context hasn't fully been consumed
302 // spin until it is safe to move on
303 if( got == 2p ) {
304 while( this.ptr != 1p ) Pause();
305 return;
306 }
307
308 // got == any thing else, something wen't wrong here, abort
309 abort("Future in unexpected state");
310 }
311
312 // Mark the future as abandoned, meaning it will be deleted by the server
313 bool abandon( future_t & this ) {
314 /* paranoid */ verify( this.ptr != 3p );
315
316 // Mark the future as abandonned
317 struct oneshot * got = __atomic_exchange_n( &this.ptr, 3p, __ATOMIC_SEQ_CST);
318
319 // If the future isn't already fulfilled, let the server delete it
320 if( got == 0p ) return false;
321
322 // got == 2p: the future is ready but the context hasn't fully been consumed
323 // spin until it is safe to move on
324 if( got == 2p ) {
325 while( this.ptr != 1p ) Pause();
326 got = 1p;
327 }
328
329 // The future is completed delete it now
330 /* paranoid */ verify( this.ptr != 1p );
331 free( &this );
332 return true;
333 }
334
335 // from the server side, mark the future as fulfilled
336 // delete it if needed
337 $thread * fulfil( future_t & this, bool do_unpark = true ) {
338 for() {
339 struct oneshot * expected = this.ptr;
340 // was this abandoned?
341 #if defined(__GNUC__) && __GNUC__ >= 7
342 #pragma GCC diagnostic push
343 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
344 #endif
345 if( expected == 3p ) { free( &this ); return 0p; }
346 #if defined(__GNUC__) && __GNUC__ >= 7
347 #pragma GCC diagnostic pop
348 #endif
349
350 /* paranoid */ verify( expected != 1p ); // Future is already fulfilled, should not happen
351 /* paranoid */ verify( expected != 2p ); // Future is bein fulfilled by someone else, this is even less supported then the previous case.
352
353 // If there is a wait context, we need to consume it and mark it as consumed after
354 // If there is no context then we can skip the in progress phase
355 struct oneshot * want = expected == 0p ? 1p : 2p;
356 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
357 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return 0p; }
358 $thread * ret = post( *expected, do_unpark );
359 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
360 return ret;
361 }
362 }
363
364 }
365
366 // Wait for the future to be fulfilled
367 bool wait( future_t & this ) {
368 oneshot temp;
369 if( !setup(this, temp) ) return false;
370
371 // Wait context is setup, just wait on it
372 bool ret = wait( temp );
373
374 // Wait for the future to tru
375 while( this.ptr == 2p ) Pause();
376 // Make sure the state makes sense
377 // Should be fulfilled, could be in progress but it's out of date if so
378 // since if that is the case, the oneshot was fulfilled (unparking this thread)
379 // and the oneshot should not be needed any more
380 __attribute__((unused)) struct oneshot * was = this.ptr;
381 /* paranoid */ verifyf( was == 1p, "Expected this.ptr to be 1p, was %p\n", was );
382
383 // Mark the future as fulfilled, to be consistent
384 // with potential calls to avail
385 // this.ptr = 1p;
386 return ret;
387 }
388 }
389
390 //-----------------------------------------------------------------------
391 // Statics call at the end of each thread to register statistics
392 #if !defined(__CFA_NO_STATISTICS__)
393 static inline struct __stats_t * __tls_stats() {
394 /* paranoid */ verify( ! __preemption_enabled() );
395 /* paranoid */ verify( kernelTLS().this_stats );
396 return kernelTLS().this_stats;
397 }
398
399 #define __STATS__(in_kernel, ...) { \
400 if( !(in_kernel) ) disable_interrupts(); \
401 with( *__tls_stats() ) { \
402 __VA_ARGS__ \
403 } \
404 if( !(in_kernel) ) enable_interrupts(); \
405 }
406 #else
407 #define __STATS__(in_kernel, ...)
408 #endif
409 }
410}
411#endif
Note: See TracBrowser for help on using the repository browser.