source: libcfa/src/concurrency/locks.hfa@ 441a6a7

ADT ast-experimental
Last change on this file since 441a6a7 was 7d9598d8, checked in by caparson <caparson@…>, 3 years ago

small lock cleanup

  • Property mode set to 100644
File size: 24.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h> /* Definition of FUTEX_* constants */
37#include <sys/syscall.h> /* Definition of SYS_* constants */
38#include <unistd.h>
39
40//-----------------------------------------------------------------------------
41// Semaphore
42struct semaphore {
43 __spinlock_t lock;
44 int count;
45 __queue_t(thread$) waiting;
46};
47
48void ?{}(semaphore & this, int count = 1);
49void ^?{}(semaphore & this);
50bool P (semaphore & this);
51bool V (semaphore & this);
52bool V (semaphore & this, unsigned count);
53thread$ * V (semaphore & this, bool );
54
55//----------
56struct single_acquisition_lock {
57 inline blocking_lock;
58};
59
60static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
61static inline void ^?{}( single_acquisition_lock & this ) {}
62static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
63static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
64static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
65static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
66static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
67static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
68
69//----------
70struct owner_lock {
71 inline blocking_lock;
72};
73
74static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
75static inline void ^?{}( owner_lock & this ) {}
76static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
77static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
78static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
79static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
80static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
81static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
82
83//-----------------------------------------------------------------------------
84// MCS Lock
85struct mcs_node {
86 mcs_node * volatile next;
87 single_sem sem;
88};
89
90static inline void ?{}(mcs_node & this) { this.next = 0p; }
91
92static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
93 return node->next;
94}
95
96struct mcs_lock {
97 mcs_queue(mcs_node) queue;
98};
99
100static inline void lock(mcs_lock & l, mcs_node & n) {
101 if(push(l.queue, &n))
102 wait(n.sem);
103}
104
105static inline void unlock(mcs_lock & l, mcs_node & n) {
106 mcs_node * next = advance(l.queue, &n);
107 if(next) post(next->sem);
108}
109
110//-----------------------------------------------------------------------------
111// MCS Spin Lock
112// - No recursive acquisition
113// - Needs to be released by owner
114
115struct mcs_spin_node {
116 mcs_spin_node * volatile next;
117 volatile bool locked;
118};
119
120struct mcs_spin_queue {
121 mcs_spin_node * volatile tail;
122};
123
124static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
125
126static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
127 return node->next;
128}
129
130struct mcs_spin_lock {
131 mcs_spin_queue queue;
132};
133
134static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
135 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
136 n.locked = true;
137 if(prev == 0p) return;
138 prev->next = &n;
139 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
140}
141
142static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
143 mcs_spin_node * n_ptr = &n;
144 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
145 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
146 n.next->locked = false;
147}
148
149//-----------------------------------------------------------------------------
150// futex_mutex
151
152// - No cond var support
153// - Kernel thd blocking alternative to the spinlock
154// - No ownership (will deadlock on reacq)
155struct futex_mutex {
156 // lock state any state other than UNLOCKED is locked
157 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
158
159 // stores a lock state
160 int val;
161};
162
163// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
164static inline int futex(int *uaddr, int futex_op, int val) {
165 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
166}
167
168static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
169
170static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
171 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
172}
173
174static inline int internal_exchange(futex_mutex & this) with(this) {
175 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
176}
177
178// if this is called recursively IT WILL DEADLOCK!!!!!
179static inline void lock(futex_mutex & this) with(this) {
180 int state;
181
182
183 // // linear backoff omitted for now
184 // for( int spin = 4; spin < 1024; spin += spin) {
185 // state = 0;
186 // // if unlocked, lock and return
187 // if (internal_try_lock(this, state)) return;
188 // if (2 == state) break;
189 // for (int i = 0; i < spin; i++) Pause();
190 // }
191
192 // no contention try to acquire
193 if (internal_try_lock(this, state)) return;
194
195 // if not in contended state, set to be in contended state
196 if (state != 2) state = internal_exchange(this);
197
198 // block and spin until we win the lock
199 while (state != 0) {
200 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
201 state = internal_exchange(this);
202 }
203}
204
205static inline void unlock(futex_mutex & this) with(this) {
206 // if uncontended do atomice unlock and then return
207 if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
208
209 // otherwise threads are blocked so we must wake one
210 __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
211 futex((int *)&val, FUTEX_WAKE, 1);
212}
213
214static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
215static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
216
217// to set recursion count after getting signalled;
218static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
219
220//-----------------------------------------------------------------------------
221// CLH Spinlock
222// - No recursive acquisition
223// - Needs to be released by owner
224
225struct clh_lock {
226 volatile bool * volatile tail;
227};
228
229static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
230static inline void ^?{}( clh_lock & this ) { free(this.tail); }
231
232static inline void lock(clh_lock & l) {
233 thread$ * curr_thd = active_thread();
234 *(curr_thd->clh_node) = false;
235 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
236 while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
237 curr_thd->clh_prev = prev;
238}
239
240static inline void unlock(clh_lock & l) {
241 thread$ * curr_thd = active_thread();
242 __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
243 curr_thd->clh_node = curr_thd->clh_prev;
244}
245
246static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
247static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
248static inline void on_wakeup(clh_lock & this, size_t recursion ) {
249 #ifdef REACQ
250 lock(this);
251 #endif
252}
253
254
255//-----------------------------------------------------------------------------
256// Linear backoff Spinlock
257struct linear_backoff_then_block_lock {
258 // Spin lock used for mutual exclusion
259 __spinlock_t spinlock;
260
261 // List of blocked threads
262 dlist( thread$ ) blocked_threads;
263
264 // Used for comparing and exchanging
265 volatile size_t lock_value;
266};
267
268static inline void ?{}( linear_backoff_then_block_lock & this ) {
269 this.spinlock{};
270 this.blocked_threads{};
271 this.lock_value = 0;
272}
273static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
274// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
275// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
276
277static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
278 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
279 return true;
280 }
281 return false;
282}
283
284static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
285
286static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
287 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
288 return true;
289 }
290 return false;
291}
292
293static inline bool block(linear_backoff_then_block_lock & this) with(this) {
294 lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
295 if (lock_value != 2) {
296 unlock( spinlock );
297 return true;
298 }
299 insert_last( blocked_threads, *active_thread() );
300 unlock( spinlock );
301 park( );
302 return true;
303}
304
305static inline void lock(linear_backoff_then_block_lock & this) with(this) {
306 size_t compare_val = 0;
307 int spin = 4;
308 // linear backoff
309 for( ;; ) {
310 compare_val = 0;
311 if (internal_try_lock(this, compare_val)) return;
312 if (2 == compare_val) break;
313 for (int i = 0; i < spin; i++) Pause();
314 if (spin >= 1024) break;
315 spin += spin;
316 }
317
318 if(2 != compare_val && try_lock_contention(this)) return;
319 // block until signalled
320 while (block(this)) if(try_lock_contention(this)) return;
321}
322
323static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
324 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
325 lock( spinlock __cfaabi_dbg_ctx2 );
326 thread$ * t = &try_pop_front( blocked_threads );
327 unlock( spinlock );
328 unpark( t );
329}
330
331static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
332static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
333static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
334 #ifdef REACQ
335 lock(this);
336 #endif
337}
338
339//-----------------------------------------------------------------------------
340// Fast Block Lock
341
342// minimal blocking lock
343// - No reacquire for cond var
344// - No recursive acquisition
345// - No ownership
346struct fast_block_lock {
347 // List of blocked threads
348 dlist( thread$ ) blocked_threads;
349
350 // Spin lock used for mutual exclusion
351 __spinlock_t lock;
352
353 // flag showing if lock is held
354 bool held:1;
355
356 #ifdef __CFA_DEBUG__
357 // for deadlock detection
358 struct thread$ * owner;
359 #endif
360};
361
362static inline void ?{}( fast_block_lock & this ) with(this) {
363 lock{};
364 blocked_threads{};
365 held = false;
366}
367static inline void ^?{}( fast_block_lock & this ) {}
368static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
369static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
370
371// if this is called recursively IT WILL DEADLOCK!!!!!
372static inline void lock(fast_block_lock & this) with(this) {
373 lock( lock __cfaabi_dbg_ctx2 );
374
375 #ifdef __CFA_DEBUG__
376 assert(!(held && owner == active_thread()));
377 #endif
378 if ( held ) {
379 insert_last( blocked_threads, *active_thread() );
380 unlock( lock );
381 park( );
382 return;
383 }
384 held = true;
385 #ifdef __CFA_DEBUG__
386 owner = active_thread();
387 #endif
388 unlock( lock );
389}
390
391static inline void unlock(fast_block_lock & this) with(this) {
392 lock( lock __cfaabi_dbg_ctx2 );
393 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
394 thread$ * t = &try_pop_front( blocked_threads );
395 held = ( t ? true : false );
396 #ifdef __CFA_DEBUG__
397 owner = ( t ? t : 0p );
398 #endif
399 unpark( t );
400 unlock( lock );
401}
402
403static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
404 #ifdef REACQ
405 lock( lock __cfaabi_dbg_ctx2 );
406 insert_last( blocked_threads, *t );
407 unlock( lock );
408 #else
409 unpark(t);
410 #endif
411}
412static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
413static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
414
415//-----------------------------------------------------------------------------
416// simple_owner_lock
417
418// pthread owner lock
419// - reacquire for cond var
420// - recursive acquisition
421// - ownership
422struct simple_owner_lock {
423 // List of blocked threads
424 dlist( thread$ ) blocked_threads;
425
426 // Spin lock used for mutual exclusion
427 __spinlock_t lock;
428
429 // owner showing if lock is held
430 struct thread$ * owner;
431
432 size_t recursion_count;
433};
434
435static inline void ?{}( simple_owner_lock & this ) with(this) {
436 lock{};
437 blocked_threads{};
438 owner = 0p;
439 recursion_count = 0;
440}
441static inline void ^?{}( simple_owner_lock & this ) {}
442static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
443static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
444
445static inline void lock(simple_owner_lock & this) with(this) {
446 if (owner == active_thread()) {
447 recursion_count++;
448 return;
449 }
450 lock( lock __cfaabi_dbg_ctx2 );
451
452 if (owner != 0p) {
453 insert_last( blocked_threads, *active_thread() );
454 unlock( lock );
455 park( );
456 return;
457 }
458 owner = active_thread();
459 recursion_count = 1;
460 unlock( lock );
461}
462
463// TODO: fix duplicate def issue and bring this back
464// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
465 // thread$ * t = &try_pop_front( blocked_threads );
466 // owner = t;
467 // recursion_count = ( t ? 1 : 0 );
468 // unpark( t );
469// }
470
471static inline void unlock(simple_owner_lock & this) with(this) {
472 lock( lock __cfaabi_dbg_ctx2 );
473 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
474 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
475 // if recursion count is zero release lock and set new owner if one is waiting
476 recursion_count--;
477 if ( recursion_count == 0 ) {
478 // pop_and_set_new_owner( this );
479 thread$ * t = &try_pop_front( blocked_threads );
480 owner = t;
481 recursion_count = ( t ? 1 : 0 );
482 unpark( t );
483 }
484 unlock( lock );
485}
486
487static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
488 lock( lock __cfaabi_dbg_ctx2 );
489 // lock held
490 if ( owner != 0p ) {
491 insert_last( blocked_threads, *t );
492 }
493 // lock not held
494 else {
495 owner = t;
496 recursion_count = 1;
497 unpark( t );
498 }
499 unlock( lock );
500}
501
502static inline size_t on_wait(simple_owner_lock & this) with(this) {
503 lock( lock __cfaabi_dbg_ctx2 );
504 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
505 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
506
507 size_t ret = recursion_count;
508
509 // pop_and_set_new_owner( this );
510
511 thread$ * t = &try_pop_front( blocked_threads );
512 owner = t;
513 recursion_count = ( t ? 1 : 0 );
514 unpark( t );
515
516 unlock( lock );
517 return ret;
518}
519
520static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
521
522//-----------------------------------------------------------------------------
523// Spin Queue Lock
524
525// - No reacquire for cond var
526// - No recursive acquisition
527// - No ownership
528// - spin lock with no locking/atomics in unlock
529struct spin_queue_lock {
530 // Spin lock used for mutual exclusion
531 mcs_spin_lock lock;
532
533 // flag showing if lock is held
534 volatile bool held;
535
536 #ifdef __CFA_DEBUG__
537 // for deadlock detection
538 struct thread$ * owner;
539 #endif
540};
541
542static inline void ?{}( spin_queue_lock & this ) with(this) {
543 lock{};
544 held = false;
545}
546static inline void ^?{}( spin_queue_lock & this ) {}
547static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
548static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
549
550// if this is called recursively IT WILL DEADLOCK!!!!!
551static inline void lock(spin_queue_lock & this) with(this) {
552 mcs_spin_node node;
553 lock( lock, node );
554 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
555 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
556 unlock( lock, node );
557}
558
559static inline void unlock(spin_queue_lock & this) with(this) {
560 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
561}
562
563static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
564 unpark(t);
565}
566static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
567static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
568 #ifdef REACQ
569 lock(this);
570 #endif
571}
572
573
574//-----------------------------------------------------------------------------
575// MCS Block Spin Lock
576
577// - No reacquire for cond var
578// - No recursive acquisition
579// - No ownership
580// - Blocks but first node spins (like spin queue but blocking for not first thd)
581struct mcs_block_spin_lock {
582 // Spin lock used for mutual exclusion
583 mcs_lock lock;
584
585 // flag showing if lock is held
586 volatile bool held;
587};
588
589static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
590 lock{};
591 held = false;
592}
593static inline void ^?{}( mcs_block_spin_lock & this ) {}
594static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
595static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
596
597// if this is called recursively IT WILL DEADLOCK!!!!!
598static inline void lock(mcs_block_spin_lock & this) with(this) {
599 mcs_node node;
600 lock( lock, node );
601 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
602 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
603 unlock( lock, node );
604}
605
606static inline void unlock(mcs_block_spin_lock & this) with(this) {
607 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
608}
609
610static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
611static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
612static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
613 #ifdef REACQ
614 lock(this);
615 #endif
616}
617
618//-----------------------------------------------------------------------------
619// Block Spin Lock
620
621// - No reacquire for cond var
622// - No recursive acquisition
623// - No ownership
624// - Blocks but first node spins (like spin queue but blocking for not first thd)
625struct block_spin_lock {
626 // Spin lock used for mutual exclusion
627 fast_block_lock lock;
628
629 // flag showing if lock is held
630 volatile bool held;
631};
632
633static inline void ?{}( block_spin_lock & this ) with(this) {
634 lock{};
635 held = false;
636}
637static inline void ^?{}( block_spin_lock & this ) {}
638static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
639static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
640
641// if this is called recursively IT WILL DEADLOCK!!!!!
642static inline void lock(block_spin_lock & this) with(this) {
643 lock( lock );
644 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
645 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
646 unlock( lock );
647}
648
649static inline void unlock(block_spin_lock & this) with(this) {
650 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
651}
652
653static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
654 #ifdef REACQ
655 // first we acquire internal fast_block_lock
656 lock( lock __cfaabi_dbg_ctx2 );
657 if ( held ) { // if internal fast_block_lock is held
658 insert_last( blocked_threads, *t );
659 unlock( lock );
660 return;
661 }
662 // if internal fast_block_lock is not held
663 held = true;
664 #ifdef __CFA_DEBUG__
665 owner = t;
666 #endif
667 unlock( lock );
668
669 #endif
670 unpark(t);
671
672}
673static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
674static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
675 #ifdef REACQ
676 // now we acquire the entire block_spin_lock upon waking up
677 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
678 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
679 unlock( lock ); // Now we release the internal fast_spin_lock
680 #endif
681}
682
683//-----------------------------------------------------------------------------
684// is_blocking_lock
685trait is_blocking_lock(L & | sized(L)) {
686 // For synchronization locks to use when acquiring
687 void on_notify( L &, struct thread$ * );
688
689 // For synchronization locks to use when releasing
690 size_t on_wait( L & );
691
692 // to set recursion count after getting signalled;
693 void on_wakeup( L &, size_t recursion );
694};
695
696//-----------------------------------------------------------------------------
697// // info_thread
698// // the info thread is a wrapper around a thread used
699// // to store extra data for use in the condition variable
700forall(L & | is_blocking_lock(L)) {
701 struct info_thread;
702
703 // // for use by sequence
704 // info_thread(L) *& Back( info_thread(L) * this );
705 // info_thread(L) *& Next( info_thread(L) * this );
706}
707
708//-----------------------------------------------------------------------------
709// Synchronization Locks
710forall(L & | is_blocking_lock(L)) {
711
712 //-----------------------------------------------------------------------------
713 // condition_variable
714
715 // The multi-tool condition variable
716 // - can pass timeouts to wait for either a signal or timeout
717 // - can wait without passing a lock
718 // - can have waiters reacquire different locks while waiting on the same cond var
719 // - has shadow queue
720 // - can be signalled outside of critical sections with no locks held
721 struct condition_variable {
722 // Spin lock used for mutual exclusion
723 __spinlock_t lock;
724
725 // List of blocked threads
726 dlist( info_thread(L) ) blocked_threads;
727
728 // Count of current blocked threads
729 int count;
730 };
731
732
733 void ?{}( condition_variable(L) & this );
734 void ^?{}( condition_variable(L) & this );
735
736 bool notify_one( condition_variable(L) & this );
737 bool notify_all( condition_variable(L) & this );
738
739 uintptr_t front( condition_variable(L) & this );
740
741 bool empty ( condition_variable(L) & this );
742 int counter( condition_variable(L) & this );
743
744 void wait( condition_variable(L) & this );
745 void wait( condition_variable(L) & this, uintptr_t info );
746 bool wait( condition_variable(L) & this, Duration duration );
747 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
748
749 void wait( condition_variable(L) & this, L & l );
750 void wait( condition_variable(L) & this, L & l, uintptr_t info );
751 bool wait( condition_variable(L) & this, L & l, Duration duration );
752 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
753
754 //-----------------------------------------------------------------------------
755 // fast_cond_var
756
757 // The trimmed and slim condition variable
758 // - no internal lock so you must hold a lock while using this cond var
759 // - signalling without holding branded lock is UNSAFE!
760 // - only allows usage of one lock, cond var is branded after usage
761
762 struct fast_cond_var {
763 // List of blocked threads
764 dlist( info_thread(L) ) blocked_threads;
765 #ifdef __CFA_DEBUG__
766 L * lock_used;
767 #endif
768 };
769
770 void ?{}( fast_cond_var(L) & this );
771 void ^?{}( fast_cond_var(L) & this );
772
773 bool notify_one( fast_cond_var(L) & this );
774 bool notify_all( fast_cond_var(L) & this );
775
776 uintptr_t front( fast_cond_var(L) & this );
777 bool empty ( fast_cond_var(L) & this );
778
779 void wait( fast_cond_var(L) & this, L & l );
780 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
781
782
783 //-----------------------------------------------------------------------------
784 // pthread_cond_var
785 //
786 // - cond var with minimal footprint
787 // - supports operations needed for phthread cond
788
789 struct pthread_cond_var {
790 dlist( info_thread(L) ) blocked_threads;
791 __spinlock_t lock;
792 };
793
794 void ?{}( pthread_cond_var(L) & this );
795 void ^?{}( pthread_cond_var(L) & this );
796
797 bool notify_one( pthread_cond_var(L) & this );
798 bool notify_all( pthread_cond_var(L) & this );
799
800 uintptr_t front( pthread_cond_var(L) & this );
801 bool empty ( pthread_cond_var(L) & this );
802
803 void wait( pthread_cond_var(L) & this, L & l );
804 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
805 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
806 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
807}
Note: See TracBrowser for help on using the repository browser.