source: libcfa/src/concurrency/locks.hfa@ af67ee1

ADT ast-experimental enum forall-pointer-decay jacob/cs343-translation pthread-emulation qualifiedEnum
Last change on this file since af67ee1 was 6ff08d8, checked in by caparsons <caparson@…>, 4 years ago

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

  • Property mode set to 100644
File size: 14.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/queueLockFree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32//-----------------------------------------------------------------------------
33// Semaphores
34
35// '0-nary' semaphore
36// Similar to a counting semaphore except the value of one is never reached
37// as a consequence, a V() that would bring the value to 1 *spins* until
38// a P consumes it
39struct Semaphore0nary {
40 __spinlock_t lock; // needed to protect
41 mpsc_queue(thread$) queue;
42};
43
44static inline bool P(Semaphore0nary & this, thread$ * thrd) {
45 /* paranoid */ verify(!thrd`next);
46 /* paranoid */ verify(!(&(*thrd)`next));
47
48 push(this.queue, thrd);
49 return true;
50}
51
52static inline bool P(Semaphore0nary & this) {
53 thread$ * thrd = active_thread();
54 P(this, thrd);
55 park();
56 return true;
57}
58
59static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) {
60 thread$ * next;
61 lock(this.lock __cfaabi_dbg_ctx2);
62 for (;;) {
63 next = pop(this.queue);
64 if (next) break;
65 Pause();
66 }
67 unlock(this.lock);
68
69 if (doUnpark) unpark(next);
70 return next;
71}
72
73// Wrapper used on top of any sempahore to avoid potential locking
74struct BinaryBenaphore {
75 volatile ssize_t counter;
76};
77
78static inline {
79 void ?{}(BinaryBenaphore & this) { this.counter = 0; }
80 void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }
81 void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }
82
83 // returns true if no blocking needed
84 bool P(BinaryBenaphore & this) {
85 return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;
86 }
87
88 bool tryP(BinaryBenaphore & this) {
89 ssize_t c = this.counter;
90 /* paranoid */ verify( c > MIN );
91 return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
92 }
93
94 // returns true if notify needed
95 bool V(BinaryBenaphore & this) {
96 ssize_t c = 0;
97 for () {
98 /* paranoid */ verify( this.counter < MAX );
99 if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
100 if (c == 0) return true;
101 /* paranoid */ verify(c < 0);
102 return false;
103 } else {
104 if (c == 1) return true;
105 /* paranoid */ verify(c < 1);
106 Pause();
107 }
108 }
109 }
110}
111
112// Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore
113struct ThreadBenaphore {
114 BinaryBenaphore ben;
115 Semaphore0nary sem;
116};
117
118static inline void ?{}(ThreadBenaphore & this) {}
119static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }
120static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }
121
122static inline bool P(ThreadBenaphore & this) { return P(this.ben) ? false : P(this.sem); }
123static inline bool tryP(ThreadBenaphore & this) { return tryP(this.ben); }
124static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); }
125
126static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) {
127 if (V(this.ben)) return 0p;
128 return V(this.sem, doUnpark);
129}
130
131//-----------------------------------------------------------------------------
132// Semaphore
133struct semaphore {
134 __spinlock_t lock;
135 int count;
136 __queue_t(thread$) waiting;
137};
138
139void ?{}(semaphore & this, int count = 1);
140void ^?{}(semaphore & this);
141bool P (semaphore & this);
142bool V (semaphore & this);
143bool V (semaphore & this, unsigned count);
144thread$ * V (semaphore & this, bool );
145
146//----------
147struct single_acquisition_lock {
148 inline blocking_lock;
149};
150
151static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
152static inline void ^?{}( single_acquisition_lock & this ) {}
153static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
154static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
155static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
156static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
157static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
158static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
159
160//----------
161struct owner_lock {
162 inline blocking_lock;
163};
164
165static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
166static inline void ^?{}( owner_lock & this ) {}
167static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
168static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
169static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
170static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
171static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
172static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
173
174struct fast_lock {
175 thread$ * volatile owner;
176 ThreadBenaphore sem;
177};
178
179static inline void ?{}(fast_lock & this) { this.owner = 0p; }
180
181static inline bool $try_lock(fast_lock & this, thread$ * thrd) {
182 thread$ * exp = 0p;
183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
184}
185
186static inline void lock( fast_lock & this ) __attribute__((artificial));
187static inline void lock( fast_lock & this ) {
188 thread$ * thrd = active_thread();
189 /* paranoid */verify(thrd != this.owner);
190
191 for (;;) {
192 if ($try_lock(this, thrd)) return;
193 P(this.sem);
194 }
195}
196
197static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
198static inline bool try_lock ( fast_lock & this ) {
199 thread$ * thrd = active_thread();
200 /* paranoid */ verify(thrd != this.owner);
201 return $try_lock(this, thrd);
202}
203
204static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial));
205static inline thread$ * unlock( fast_lock & this ) {
206 /* paranoid */ verify(active_thread() == this.owner);
207
208 // open 'owner' before unlocking anyone
209 // so new and unlocked threads don't park incorrectly.
210 // This may require additional fencing on ARM.
211 this.owner = 0p;
212
213 return V(this.sem);
214}
215
216static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
217static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
218static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); }
219
220struct mcs_node {
221 mcs_node * volatile next;
222 single_sem sem;
223};
224
225static inline void ?{}(mcs_node & this) { this.next = 0p; }
226
227static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
228 return node->next;
229}
230
231struct mcs_lock {
232 mcs_queue(mcs_node) queue;
233};
234
235static inline void lock(mcs_lock & l, mcs_node & n) {
236 if(push(l.queue, &n))
237 wait(n.sem);
238}
239
240static inline void unlock(mcs_lock & l, mcs_node & n) {
241 mcs_node * next = advance(l.queue, &n);
242 if(next) post(next->sem);
243}
244
245struct linear_backoff_then_block_lock {
246 // Spin lock used for mutual exclusion
247 __spinlock_t spinlock;
248
249 // Current thread owning the lock
250 struct thread$ * owner;
251
252 // List of blocked threads
253 dlist( thread$ ) blocked_threads;
254
255 // Used for comparing and exchanging
256 volatile size_t lock_value;
257
258 // used for linear backoff spinning
259 int spin_start;
260 int spin_end;
261 int spin_count;
262
263 // after unsuccessful linear backoff yield this many times
264 int yield_count;
265};
266
267static inline void ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
268 this.spinlock{};
269 this.blocked_threads{};
270 this.lock_value = 0;
271 this.spin_start = spin_start;
272 this.spin_end = spin_end;
273 this.spin_count = spin_count;
274 this.yield_count = yield_count;
275}
276static inline void ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
277static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
278static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
279static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
280
281static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
282 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
283 owner = active_thread();
284 return true;
285 }
286 return false;
287}
288
289static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
290
291static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
292 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
293 owner = active_thread();
294 return true;
295 }
296 return false;
297}
298
299static inline bool block(linear_backoff_then_block_lock & this) with(this) {
300 lock( spinlock __cfaabi_dbg_ctx2 );
301 if (lock_value != 2) {
302 unlock( spinlock );
303 return true;
304 }
305 insert_last( blocked_threads, *active_thread() );
306 unlock( spinlock );
307 park( );
308 return true;
309}
310
311static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
312 // if owner just return
313 if (active_thread() == owner) return true;
314 size_t compare_val = 0;
315 int spin = spin_start;
316 // linear backoff
317 for( ;; ) {
318 compare_val = 0;
319 if (internal_try_lock(this, compare_val)) return true;
320 if (2 == compare_val) break;
321 for (int i = 0; i < spin; i++) Pause();
322 if (spin >= spin_end) break;
323 spin += spin;
324 }
325
326 // linear backoff bounded by spin_count
327 spin = spin_start;
328 int spin_counter = 0;
329 int yield_counter = 0;
330 for ( ;; ) {
331 if(try_lock_contention(this)) return true;
332 if(spin_counter < spin_count) {
333 for (int i = 0; i < spin; i++) Pause();
334 if (spin < spin_end) spin += spin;
335 else spin_counter++;
336 } else if (yield_counter < yield_count) {
337 // after linear backoff yield yield_count times
338 yield_counter++;
339 yield();
340 } else { break; }
341 }
342
343 // block until signalled
344 while (block(this)) if(try_lock_contention(this)) return true;
345
346 // this should never be reached as block(this) always returns true
347 return false;
348}
349
350static inline bool lock_improved(linear_backoff_then_block_lock & this) with(this) {
351 // if owner just return
352 if (active_thread() == owner) return true;
353 size_t compare_val = 0;
354 int spin = spin_start;
355 // linear backoff
356 for( ;; ) {
357 compare_val = 0;
358 if (internal_try_lock(this, compare_val)) return true;
359 if (2 == compare_val) break;
360 for (int i = 0; i < spin; i++) Pause();
361 if (spin >= spin_end) break;
362 spin += spin;
363 }
364
365 // linear backoff bounded by spin_count
366 spin = spin_start;
367 int spin_counter = 0;
368 int yield_counter = 0;
369 for ( ;; ) {
370 compare_val = 0;
371 if(internal_try_lock(this, compare_val)) return true;
372 if (2 == compare_val) break;
373 if(spin_counter < spin_count) {
374 for (int i = 0; i < spin; i++) Pause();
375 if (spin < spin_end) spin += spin;
376 else spin_counter++;
377 } else if (yield_counter < yield_count) {
378 // after linear backoff yield yield_count times
379 yield_counter++;
380 yield();
381 } else { break; }
382 }
383
384 if(2 != compare_val && try_lock_contention(this)) return true;
385 // block until signalled
386 while (block(this)) if(try_lock_contention(this)) return true;
387
388 // this should never be reached as block(this) always returns true
389 return false;
390}
391
392static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
393 verify(lock_value > 0);
394 owner = 0p;
395 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
396 lock( spinlock __cfaabi_dbg_ctx2 );
397 thread$ * t = &try_pop_front( blocked_threads );
398 unlock( spinlock );
399 unpark( t );
400}
401
402static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
403static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
404static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); }
405
406//-----------------------------------------------------------------------------
407// is_blocking_lock
408trait is_blocking_lock(L & | sized(L)) {
409 // For synchronization locks to use when acquiring
410 void on_notify( L &, struct thread$ * );
411
412 // For synchronization locks to use when releasing
413 size_t on_wait( L & );
414
415 // to set recursion count after getting signalled;
416 void on_wakeup( L &, size_t recursion );
417};
418
419//-----------------------------------------------------------------------------
420// // info_thread
421// // the info thread is a wrapper around a thread used
422// // to store extra data for use in the condition variable
423forall(L & | is_blocking_lock(L)) {
424 struct info_thread;
425
426 // // for use by sequence
427 // info_thread(L) *& Back( info_thread(L) * this );
428 // info_thread(L) *& Next( info_thread(L) * this );
429}
430
431//-----------------------------------------------------------------------------
432// Synchronization Locks
433forall(L & | is_blocking_lock(L)) {
434 struct condition_variable {
435 // Spin lock used for mutual exclusion
436 __spinlock_t lock;
437
438 // List of blocked threads
439 dlist( info_thread(L) ) blocked_threads;
440
441 // Count of current blocked threads
442 int count;
443 };
444
445
446 void ?{}( condition_variable(L) & this );
447 void ^?{}( condition_variable(L) & this );
448
449 bool notify_one( condition_variable(L) & this );
450 bool notify_all( condition_variable(L) & this );
451
452 uintptr_t front( condition_variable(L) & this );
453
454 bool empty ( condition_variable(L) & this );
455 int counter( condition_variable(L) & this );
456
457 void wait( condition_variable(L) & this );
458 void wait( condition_variable(L) & this, uintptr_t info );
459 bool wait( condition_variable(L) & this, Duration duration );
460 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
461
462 void wait( condition_variable(L) & this, L & l );
463 void wait( condition_variable(L) & this, L & l, uintptr_t info );
464 bool wait( condition_variable(L) & this, L & l, Duration duration );
465 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
466}
Note: See TracBrowser for help on using the repository browser.