source: libcfa/src/concurrency/locks.hfa@ 2f6a9391

ADT ast-experimental
Last change on this file since 2f6a9391 was d30e3eb, checked in by caparson <caparson@…>, 3 years ago

cleaned up exp_backoff lock and rewrote parts of channels to improve performance

  • Property mode set to 100644
File size: 23.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h> /* Definition of FUTEX_* constants */
37#include <sys/syscall.h> /* Definition of SYS_* constants */
38#include <unistd.h>
39
40// C_TODO: cleanup this and locks.cfa
41// - appropriate separation of interface and impl
42// - clean up unused/unneeded locks
43// - change messy big blocking lock from inheritance to composition to remove need for flags
44
45//-----------------------------------------------------------------------------
46// Semaphore
47struct semaphore {
48 __spinlock_t lock;
49 int count;
50 __queue_t(thread$) waiting;
51};
52
53void ?{}(semaphore & this, int count = 1);
54void ^?{}(semaphore & this);
55bool P (semaphore & this);
56bool V (semaphore & this);
57bool V (semaphore & this, unsigned count);
58thread$ * V (semaphore & this, bool );
59
60//----------
61struct single_acquisition_lock {
62 inline blocking_lock;
63};
64
65static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
66static inline void ^?{}( single_acquisition_lock & this ) {}
67static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
68static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
69static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
70static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
71static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
72static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
73
74//----------
75struct owner_lock {
76 inline blocking_lock;
77};
78
79static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
80static inline void ^?{}( owner_lock & this ) {}
81static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
82static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
83static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
84static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
85static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
86static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
87
88//-----------------------------------------------------------------------------
89// MCS Lock
90struct mcs_node {
91 mcs_node * volatile next;
92 single_sem sem;
93};
94
95static inline void ?{}(mcs_node & this) { this.next = 0p; }
96
97static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
98 return node->next;
99}
100
101struct mcs_lock {
102 mcs_queue(mcs_node) queue;
103};
104
105static inline void lock(mcs_lock & l, mcs_node & n) {
106 if(push(l.queue, &n))
107 wait(n.sem);
108}
109
110static inline void unlock(mcs_lock & l, mcs_node & n) {
111 mcs_node * next = advance(l.queue, &n);
112 if(next) post(next->sem);
113}
114
115//-----------------------------------------------------------------------------
116// MCS Spin Lock
117// - No recursive acquisition
118// - Needs to be released by owner
119
120struct mcs_spin_node {
121 mcs_spin_node * volatile next;
122 volatile bool locked;
123};
124
125struct mcs_spin_queue {
126 mcs_spin_node * volatile tail;
127};
128
129static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
130
131static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
132 return node->next;
133}
134
135struct mcs_spin_lock {
136 mcs_spin_queue queue;
137};
138
139static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
140 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
141 n.locked = true;
142 if(prev == 0p) return;
143 prev->next = &n;
144 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
145}
146
147static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
148 mcs_spin_node * n_ptr = &n;
149 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
150 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
151 n.next->locked = false;
152}
153
154//-----------------------------------------------------------------------------
155// futex_mutex
156
157// - No cond var support
158// - Kernel thd blocking alternative to the spinlock
159// - No ownership (will deadlock on reacq)
160struct futex_mutex {
161 // lock state any state other than UNLOCKED is locked
162 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
163
164 // stores a lock state
165 int val;
166};
167
168// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
169static inline int futex(int *uaddr, int futex_op, int val) {
170 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
171}
172
173static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
174
175static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
176 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
177}
178
179static inline int internal_exchange(futex_mutex & this) with(this) {
180 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
181}
182
183// if this is called recursively IT WILL DEADLOCK!!!!!
184static inline void lock(futex_mutex & this) with(this) {
185 int state;
186
187
188 // // linear backoff omitted for now
189 // for( int spin = 4; spin < 1024; spin += spin) {
190 // state = 0;
191 // // if unlocked, lock and return
192 // if (internal_try_lock(this, state)) return;
193 // if (2 == state) break;
194 // for (int i = 0; i < spin; i++) Pause();
195 // }
196
197 // no contention try to acquire
198 if (internal_try_lock(this, state)) return;
199
200 // if not in contended state, set to be in contended state
201 if (state != 2) state = internal_exchange(this);
202
203 // block and spin until we win the lock
204 while (state != 0) {
205 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
206 state = internal_exchange(this);
207 }
208}
209
210static inline void unlock(futex_mutex & this) with(this) {
211 // if uncontended do atomice unlock and then return
212 if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
213
214 // otherwise threads are blocked so we must wake one
215 __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
216 futex((int *)&val, FUTEX_WAKE, 1);
217}
218
219static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
220static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
221
222// to set recursion count after getting signalled;
223static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
224
225//-----------------------------------------------------------------------------
226// CLH Spinlock
227// - No recursive acquisition
228// - Needs to be released by owner
229
230struct clh_lock {
231 volatile bool * volatile tail;
232 volatile bool * volatile head;
233};
234
235static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
236static inline void ^?{}( clh_lock & this ) { free(this.tail); }
237
238static inline void lock(clh_lock & l) {
239 thread$ * curr_thd = active_thread();
240 *(curr_thd->clh_node) = false;
241 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
242 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
243 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
244 curr_thd->clh_node = prev;
245}
246
247static inline void unlock(clh_lock & l) {
248 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
249}
250
251static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
252static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
253static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
254
255//-----------------------------------------------------------------------------
256// Exponential backoff then block lock
257struct exp_backoff_then_block_lock {
258 // Spin lock used for mutual exclusion
259 __spinlock_t spinlock;
260
261 // List of blocked threads
262 dlist( thread$ ) blocked_threads;
263
264 // Used for comparing and exchanging
265 volatile size_t lock_value;
266};
267
268static inline void ?{}( exp_backoff_then_block_lock & this ) {
269 this.spinlock{};
270 this.blocked_threads{};
271 this.lock_value = 0;
272}
273
274static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
275 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
276}
277
278static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
279
280static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
281 return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
282}
283
284static inline bool block(exp_backoff_then_block_lock & this) with(this) {
285 lock( spinlock __cfaabi_dbg_ctx2 );
286 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
287 unlock( spinlock );
288 return true;
289 }
290 insert_last( blocked_threads, *active_thread() );
291 unlock( spinlock );
292 park( );
293 return true;
294}
295
296static inline void lock(exp_backoff_then_block_lock & this) with(this) {
297 size_t compare_val = 0;
298 int spin = 4;
299
300 // linear backoff
301 for( ;; ) {
302 compare_val = 0;
303 if (internal_try_lock(this, compare_val)) return;
304 if (2 == compare_val) break;
305 for (int i = 0; i < spin; i++) Pause();
306 if (spin >= 1024) break;
307 spin += spin;
308 }
309
310 if(2 != compare_val && try_lock_contention(this)) return;
311 // block until signalled
312 while (block(this)) if(try_lock_contention(this)) return;
313}
314
315static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
316 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
317 lock( spinlock __cfaabi_dbg_ctx2 );
318 thread$ * t = &try_pop_front( blocked_threads );
319 unlock( spinlock );
320 unpark( t );
321}
322
323static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
324static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
325static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
326
327//-----------------------------------------------------------------------------
328// Fast Block Lock
329
330// minimal blocking lock
331// - No reacquire for cond var
332// - No recursive acquisition
333// - No ownership
334struct fast_block_lock {
335 // List of blocked threads
336 dlist( thread$ ) blocked_threads;
337
338 // Spin lock used for mutual exclusion
339 __spinlock_t lock;
340
341 // flag showing if lock is held
342 bool held:1;
343};
344
345static inline void ?{}( fast_block_lock & this ) with(this) {
346 lock{};
347 blocked_threads{};
348 held = false;
349}
350static inline void ^?{}( fast_block_lock & this ) {}
351static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
352static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
353
354// if this is called recursively IT WILL DEADLOCK!!!!!
355static inline void lock(fast_block_lock & this) with(this) {
356 lock( lock __cfaabi_dbg_ctx2 );
357 if ( held ) {
358 insert_last( blocked_threads, *active_thread() );
359 unlock( lock );
360 park( );
361 return;
362 }
363 held = true;
364 unlock( lock );
365}
366
367static inline void unlock(fast_block_lock & this) with(this) {
368 lock( lock __cfaabi_dbg_ctx2 );
369 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
370 thread$ * t = &try_pop_front( blocked_threads );
371 held = ( t ? true : false );
372 unpark( t );
373 unlock( lock );
374}
375
376static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
377 lock( lock __cfaabi_dbg_ctx2 );
378 insert_last( blocked_threads, *t );
379 unlock( lock );
380}
381static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
382static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
383
384//-----------------------------------------------------------------------------
385// simple_owner_lock
386
387// pthread owner lock
388// - reacquire for cond var
389// - recursive acquisition
390// - ownership
391struct simple_owner_lock {
392 // List of blocked threads
393 dlist( thread$ ) blocked_threads;
394
395 // Spin lock used for mutual exclusion
396 __spinlock_t lock;
397
398 // owner showing if lock is held
399 struct thread$ * owner;
400
401 size_t recursion_count;
402};
403
404static inline void ?{}( simple_owner_lock & this ) with(this) {
405 lock{};
406 blocked_threads{};
407 owner = 0p;
408 recursion_count = 0;
409}
410static inline void ^?{}( simple_owner_lock & this ) {}
411static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
412static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
413
414static inline void lock(simple_owner_lock & this) with(this) {
415 if (owner == active_thread()) {
416 recursion_count++;
417 return;
418 }
419 lock( lock __cfaabi_dbg_ctx2 );
420
421 if (owner != 0p) {
422 insert_last( blocked_threads, *active_thread() );
423 unlock( lock );
424 park( );
425 return;
426 }
427 owner = active_thread();
428 recursion_count = 1;
429 unlock( lock );
430}
431
432// TODO: fix duplicate def issue and bring this back
433// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
434 // thread$ * t = &try_pop_front( blocked_threads );
435 // owner = t;
436 // recursion_count = ( t ? 1 : 0 );
437 // unpark( t );
438// }
439
440static inline void unlock(simple_owner_lock & this) with(this) {
441 lock( lock __cfaabi_dbg_ctx2 );
442 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
443 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
444 // if recursion count is zero release lock and set new owner if one is waiting
445 recursion_count--;
446 if ( recursion_count == 0 ) {
447 // pop_and_set_new_owner( this );
448 thread$ * t = &try_pop_front( blocked_threads );
449 owner = t;
450 recursion_count = ( t ? 1 : 0 );
451 unpark( t );
452 }
453 unlock( lock );
454}
455
456static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
457 lock( lock __cfaabi_dbg_ctx2 );
458 // lock held
459 if ( owner != 0p ) {
460 insert_last( blocked_threads, *t );
461 }
462 // lock not held
463 else {
464 owner = t;
465 recursion_count = 1;
466 unpark( t );
467 }
468 unlock( lock );
469}
470
471static inline size_t on_wait(simple_owner_lock & this) with(this) {
472 lock( lock __cfaabi_dbg_ctx2 );
473 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
474 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
475
476 size_t ret = recursion_count;
477
478 // pop_and_set_new_owner( this );
479
480 thread$ * t = &try_pop_front( blocked_threads );
481 owner = t;
482 recursion_count = ( t ? 1 : 0 );
483 unpark( t );
484
485 unlock( lock );
486 return ret;
487}
488
489static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
490
491//-----------------------------------------------------------------------------
492// Spin Queue Lock
493
494// - No reacquire for cond var
495// - No recursive acquisition
496// - No ownership
497// - spin lock with no locking/atomics in unlock
498struct spin_queue_lock {
499 // Spin lock used for mutual exclusion
500 mcs_spin_lock lock;
501
502 // flag showing if lock is held
503 volatile bool held;
504};
505
506static inline void ?{}( spin_queue_lock & this ) with(this) {
507 lock{};
508 held = false;
509}
510static inline void ^?{}( spin_queue_lock & this ) {}
511static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
512static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
513
514// if this is called recursively IT WILL DEADLOCK!
515static inline void lock(spin_queue_lock & this) with(this) {
516 mcs_spin_node node;
517 lock( lock, node );
518 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
519 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
520 unlock( lock, node );
521}
522
523static inline void unlock(spin_queue_lock & this) with(this) {
524 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
525}
526
527static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
528 unpark(t);
529}
530static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
531static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
532
533
534//-----------------------------------------------------------------------------
535// MCS Block Spin Lock
536
537// - No reacquire for cond var
538// - No recursive acquisition
539// - No ownership
540// - Blocks but first node spins (like spin queue but blocking for not first thd)
541struct mcs_block_spin_lock {
542 // Spin lock used for mutual exclusion
543 mcs_lock lock;
544
545 // flag showing if lock is held
546 volatile bool held;
547};
548
549static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
550 lock{};
551 held = false;
552}
553static inline void ^?{}( mcs_block_spin_lock & this ) {}
554static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
555static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
556
557// if this is called recursively IT WILL DEADLOCK!!!!!
558static inline void lock(mcs_block_spin_lock & this) with(this) {
559 mcs_node node;
560 lock( lock, node );
561 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
562 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
563 unlock( lock, node );
564}
565
566static inline void unlock(mcs_block_spin_lock & this) with(this) {
567 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
568}
569
570static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
571static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
572static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
573
574//-----------------------------------------------------------------------------
575// Block Spin Lock
576
577// - No reacquire for cond var
578// - No recursive acquisition
579// - No ownership
580// - Blocks but first node spins (like spin queue but blocking for not first thd)
581struct block_spin_lock {
582 // Spin lock used for mutual exclusion
583 fast_block_lock lock;
584
585 // flag showing if lock is held
586 volatile bool held;
587};
588
589static inline void ?{}( block_spin_lock & this ) with(this) {
590 lock{};
591 held = false;
592}
593static inline void ^?{}( block_spin_lock & this ) {}
594static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
595static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
596
597// if this is called recursively IT WILL DEADLOCK!!!!!
598static inline void lock(block_spin_lock & this) with(this) {
599 lock( lock );
600 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
601 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
602 unlock( lock );
603}
604
605static inline void unlock(block_spin_lock & this) with(this) {
606 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
607}
608
609static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
610 // first we acquire internal fast_block_lock
611 lock( lock __cfaabi_dbg_ctx2 );
612 if ( held ) { // if internal fast_block_lock is held
613 insert_last( blocked_threads, *t );
614 unlock( lock );
615 return;
616 }
617 // if internal fast_block_lock is not held
618 held = true;
619 unlock( lock );
620
621 unpark(t);
622}
623static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
624static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
625 // now we acquire the entire block_spin_lock upon waking up
626 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
627 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
628 unlock( lock ); // Now we release the internal fast_spin_lock
629}
630
631//-----------------------------------------------------------------------------
632// is_blocking_lock
633forall( L & | sized(L) )
634trait is_blocking_lock {
635 // For synchronization locks to use when acquiring
636 void on_notify( L &, struct thread$ * );
637
638 // For synchronization locks to use when releasing
639 size_t on_wait( L & );
640
641 // to set recursion count after getting signalled;
642 void on_wakeup( L &, size_t recursion );
643};
644
645//-----------------------------------------------------------------------------
646// // info_thread
647// // the info thread is a wrapper around a thread used
648// // to store extra data for use in the condition variable
649forall(L & | is_blocking_lock(L)) {
650 struct info_thread;
651
652 // // for use by sequence
653 // info_thread(L) *& Back( info_thread(L) * this );
654 // info_thread(L) *& Next( info_thread(L) * this );
655}
656
657//-----------------------------------------------------------------------------
658// Synchronization Locks
659forall(L & | is_blocking_lock(L)) {
660
661 //-----------------------------------------------------------------------------
662 // condition_variable
663
664 // The multi-tool condition variable
665 // - can pass timeouts to wait for either a signal or timeout
666 // - can wait without passing a lock
667 // - can have waiters reacquire different locks while waiting on the same cond var
668 // - has shadow queue
669 // - can be signalled outside of critical sections with no locks held
670 struct condition_variable {
671 // Spin lock used for mutual exclusion
672 __spinlock_t lock;
673
674 // List of blocked threads
675 dlist( info_thread(L) ) blocked_threads;
676
677 // Count of current blocked threads
678 int count;
679 };
680
681
682 void ?{}( condition_variable(L) & this );
683 void ^?{}( condition_variable(L) & this );
684
685 bool notify_one( condition_variable(L) & this );
686 bool notify_all( condition_variable(L) & this );
687
688 uintptr_t front( condition_variable(L) & this );
689
690 bool empty ( condition_variable(L) & this );
691 int counter( condition_variable(L) & this );
692
693 void wait( condition_variable(L) & this );
694 void wait( condition_variable(L) & this, uintptr_t info );
695 bool wait( condition_variable(L) & this, Duration duration );
696 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
697
698 void wait( condition_variable(L) & this, L & l );
699 void wait( condition_variable(L) & this, L & l, uintptr_t info );
700 bool wait( condition_variable(L) & this, L & l, Duration duration );
701 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
702
703 //-----------------------------------------------------------------------------
704 // fast_cond_var
705
706 // The trimmed and slim condition variable
707 // - no internal lock so you must hold a lock while using this cond var
708 // - signalling without holding branded lock is UNSAFE!
709 // - only allows usage of one lock, cond var is branded after usage
710
711 struct fast_cond_var {
712 // List of blocked threads
713 dlist( info_thread(L) ) blocked_threads;
714 #ifdef __CFA_DEBUG__
715 L * lock_used;
716 #endif
717 };
718
719 void ?{}( fast_cond_var(L) & this );
720 void ^?{}( fast_cond_var(L) & this );
721
722 bool notify_one( fast_cond_var(L) & this );
723 bool notify_all( fast_cond_var(L) & this );
724
725 uintptr_t front( fast_cond_var(L) & this );
726 bool empty ( fast_cond_var(L) & this );
727
728 void wait( fast_cond_var(L) & this, L & l );
729 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
730
731
732 //-----------------------------------------------------------------------------
733 // pthread_cond_var
734 //
735 // - cond var with minimal footprint
736 // - supports operations needed for phthread cond
737
738 struct pthread_cond_var {
739 dlist( info_thread(L) ) blocked_threads;
740 __spinlock_t lock;
741 };
742
743 void ?{}( pthread_cond_var(L) & this );
744 void ^?{}( pthread_cond_var(L) & this );
745
746 bool notify_one( pthread_cond_var(L) & this );
747 bool notify_all( pthread_cond_var(L) & this );
748
749 uintptr_t front( pthread_cond_var(L) & this );
750 bool empty ( pthread_cond_var(L) & this );
751
752 void wait( pthread_cond_var(L) & this, L & l );
753 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
754 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
755 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
756}
Note: See TracBrowser for help on using the repository browser.